diff --git a/go.mod b/go.mod index fdd80bb1f..ce8c50647 100644 --- a/go.mod +++ b/go.mod @@ -1,70 +1,95 @@ module github.com/bpfman/bpfman-operator -go 1.24.0 +go 1.25.0 require ( github.com/bpfman/bpfman v0.5.7-0.20250305151919-a74c631c3643 - github.com/containers/image/v5 v5.34.2 - github.com/go-logr/logr v1.4.2 - github.com/kong/kubernetes-testing-framework v0.47.2 - github.com/netobserv/netobserv-ebpf-agent v1.7.0-community.0.20250402125041-1fca7614320e + github.com/containers/image/v5 v5.36.2 + github.com/go-logr/logr v1.4.3 + github.com/kong/kubernetes-testing-framework v0.48.0 + github.com/netobserv/netobserv-ebpf-agent v1.10.1-community github.com/openshift/api v0.0.0-20240605201059-cefcda60d938 - github.com/stretchr/testify v1.10.0 - go.uber.org/zap v1.27.0 - google.golang.org/grpc v1.71.0 - k8s.io/api v0.32.3 - k8s.io/apimachinery v0.32.3 - k8s.io/client-go v0.32.3 - k8s.io/code-generator v0.32.3 - k8s.io/cri-api v0.33.2 - sigs.k8s.io/controller-runtime v0.20.4 + github.com/stretchr/testify v1.11.1 + go.uber.org/zap v1.27.1 + google.golang.org/grpc v1.78.0 + k8s.io/api v0.34.3 + k8s.io/apimachinery v0.34.3 + k8s.io/client-go v0.34.3 + k8s.io/code-generator v0.34.3 + k8s.io/cri-api v0.35.0 + sigs.k8s.io/controller-runtime v0.22.4 ) require ( - cel.dev/expr v0.19.1 // indirect + cel.dev/expr v0.24.0 // indirect github.com/antlr4-go/antlr/v4 v4.13.0 // indirect - github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect - github.com/containers/storage v1.57.2 // indirect + github.com/cenkalti/backoff/v5 v5.0.2 // indirect + github.com/cenkalti/hub v1.0.2 // indirect + github.com/cenkalti/rpc2 v0.0.0-20210604223624-c1acbc6ec984 // indirect + github.com/cilium/ebpf v0.20.0 // indirect + github.com/containernetworking/cni v1.1.2 // indirect + github.com/containernetworking/plugins v1.2.0 // indirect + github.com/containers/storage v1.59.1 // indirect + github.com/coreos/go-iptables v0.6.0 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect + github.com/gavv/monotime v0.0.0-20190418164738-30dba4353424 // indirect github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/swag/jsonname v0.25.1 // indirect github.com/google/btree v1.1.3 // indirect - github.com/google/cel-go v0.22.0 // indirect - github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/cel-go v0.26.0 // indirect + github.com/google/gnostic-models v0.7.0 // indirect github.com/google/go-cmp v0.7.0 // indirect - github.com/gorilla/websocket v1.5.1 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 // indirect + github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/klauspost/compress v1.17.11 // indirect + github.com/jpillora/backoff v1.0.0 // indirect github.com/moby/spdystream v0.5.0 // indirect + github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/netobserv/flowlogs-pipeline v1.10.0-community // indirect + github.com/netsampler/goflow2 v1.3.7 // indirect + github.com/ovn-org/libovsdb v0.7.1-0.20240820095311-ce1951614a20 // indirect + github.com/ovn-org/ovn-kubernetes/go-controller v0.0.0-20250227173154-57a2590a1d16 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/safchain/ethtool v0.5.10 // indirect github.com/sirupsen/logrus v1.9.3 // indirect - github.com/spf13/cobra v1.8.1 // indirect + github.com/spf13/cobra v1.10.2 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect - github.com/vishvananda/netlink v1.3.0 // indirect + github.com/urfave/cli/v2 v2.27.6 // indirect + github.com/vishvananda/netlink v1.3.1-0.20250425193846-9d88d8385bf9 // indirect github.com/vishvananda/netns v0.0.5 // indirect github.com/x448/float16 v0.8.4 // indirect - go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect - go.opentelemetry.io/otel v1.34.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 // indirect - go.opentelemetry.io/otel/metric v1.34.0 // indirect - go.opentelemetry.io/otel/sdk v1.34.0 // indirect - go.opentelemetry.io/otel/trace v1.34.0 // indirect - go.opentelemetry.io/proto/otlp v1.5.0 // indirect - golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250207221924-e9438ea467c6 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250207221924-e9438ea467c6 // indirect - google.golang.org/protobuf v1.36.5 // indirect - gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect - k8s.io/apiserver v0.32.1 // indirect - k8s.io/component-base v0.32.1 // indirect - k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect - sigs.k8s.io/gateway-api v1.1.0 // indirect - sigs.k8s.io/yaml v1.4.0 // indirect + github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect + go.opentelemetry.io/otel v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + go.opentelemetry.io/otel/sdk v1.38.0 // indirect + go.opentelemetry.io/otel/trace v1.38.0 // indirect + go.opentelemetry.io/proto/otlp v1.7.0 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 // indirect + golang.org/x/tools/go/expect v0.1.1-deprecated // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251124214823-79d6a2a48846 // indirect + google.golang.org/protobuf v1.36.11 // indirect + gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect + gopkg.in/gcfg.v1 v1.2.3 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect + gopkg.in/warnings.v0 v0.1.2 // indirect + k8s.io/apiserver v0.34.3 // indirect + k8s.io/component-base v0.34.3 // indirect + k8s.io/gengo/v2 v2.0.0-20250820003526-c297c0c1eb9d // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect + sigs.k8s.io/yaml v1.6.0 // indirect ) require ( @@ -72,49 +97,46 @@ require ( github.com/blang/semver/v4 v4.0.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/emicklei/go-restful/v3 v3.12.1 // indirect + github.com/emicklei/go-restful/v3 v3.13.0 // indirect github.com/evanphx/json-patch/v5 v5.9.11 // indirect - github.com/fsnotify/fsnotify v1.8.0 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect github.com/go-logr/zapr v1.3.0 // indirect - github.com/go-openapi/jsonpointer v0.21.0 // indirect - github.com/go-openapi/jsonreference v0.21.0 // indirect - github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-openapi/jsonpointer v0.22.1 // indirect + github.com/go-openapi/jsonreference v0.21.2 // indirect + github.com/go-openapi/swag v0.23.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.4 // indirect - github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.6.0 github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/mailru/easyjson v0.7.7 // indirect + github.com/mailru/easyjson v0.9.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_golang v1.21.1 - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.62.0 // indirect - github.com/prometheus/procfs v0.15.1 // indirect - github.com/spf13/pflag v1.0.6 // indirect + github.com/prometheus/client_golang v1.23.2 + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.67.1 // indirect + github.com/prometheus/procfs v0.17.0 // indirect + github.com/spf13/pflag v1.0.10 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/mod v0.22.0 // indirect - golang.org/x/net v0.38.0 // indirect - golang.org/x/oauth2 v0.26.0 // indirect - golang.org/x/sync v0.12.0 - golang.org/x/sys v0.31.0 // indirect - golang.org/x/term v0.30.0 // indirect - golang.org/x/text v0.23.0 // indirect - golang.org/x/time v0.7.0 // indirect - golang.org/x/tools v0.28.0 // indirect + golang.org/x/mod v0.29.0 // indirect + golang.org/x/net v0.47.0 // indirect + golang.org/x/oauth2 v0.34.0 // indirect + golang.org/x/sync v0.19.0 + golang.org/x/sys v0.40.0 // indirect + golang.org/x/term v0.37.0 // indirect + golang.org/x/text v0.31.0 // indirect + golang.org/x/time v0.14.0 // indirect + golang.org/x/tools v0.38.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.32.1 // indirect + k8s.io/apiextensions-apiserver v0.34.3 // indirect k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect - k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 - sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect - sigs.k8s.io/kind v0.24.0 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect + k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect + k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d + sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect + sigs.k8s.io/kind v0.30.0 // indirect ) diff --git a/go.sum b/go.sum index e9e1d16f1..86e3e143a 100644 --- a/go.sum +++ b/go.sum @@ -1,11 +1,11 @@ -cel.dev/expr v0.19.1 h1:NciYrtDRIR0lNCnH1LFJegdjspNx9fI59O7TWcua/W4= -cel.dev/expr v0.19.1/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= +github.com/alexflint/go-filemutex v1.2.0 h1:1v0TJPDtlhgpW4nJ+GvxCLSlUDC3+gW0CQQvlmfDR/s= +github.com/alexflint/go-filemutex v1.2.0/go.mod h1:mYyQSWvw9Tx2/H2n9qXPb52tTYfE0pZAWcBq5mK025c= github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= -github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= @@ -14,282 +14,468 @@ github.com/bpfman/bpfman v0.5.7-0.20250305151919-a74c631c3643 h1:+5L55JZucclVLZq github.com/bpfman/bpfman v0.5.7-0.20250305151919-a74c631c3643/go.mod h1:ZBZc5wo+lEQA+w/89Sbjaw5VhuvuLTDS3a3yDjOl81o= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8= +github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= +github.com/cenkalti/hub v1.0.2 h1:Nqv9TNaA9boeO2wQFW8o87BY3zKthtnzXmWGmJqhAV8= +github.com/cenkalti/hub v1.0.2/go.mod h1:8LAFAZcCasb83vfxatMUnZHRoQcffho2ELpHb+kaTJU= +github.com/cenkalti/rpc2 v0.0.0-20210604223624-c1acbc6ec984 h1:CNwZyGS6KpfaOWbh2yLkSy3rSTUh3jub9CzpFpP6PVQ= +github.com/cenkalti/rpc2 v0.0.0-20210604223624-c1acbc6ec984/go.mod h1:v2npkhrXyk5BCnkNIiPdRI23Uq6uWPUQGL2hnRcRr/M= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/containers/image/v5 v5.34.2 h1:3r1etun4uJYq5197tcymUcI1h6+zyzKS9PtRtBlEKMI= -github.com/containers/image/v5 v5.34.2/go.mod h1:MG++slvQSZVq5ejAcLdu4APGsKGMb0YHHnAo7X28fdE= -github.com/containers/storage v1.57.2 h1:2roCtTyE9pzIaBDHibK72DTnYkPmwWaq5uXxZdaWK4U= -github.com/containers/storage v1.57.2/go.mod h1:i/Hb4lu7YgFr9G0K6BMjqW0BLJO1sFsnWQwj2UoWCUM= -github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.20.0 h1:atwWj9d3NffHyPZzVlx3hmw1on5CLe9eljR8VuHTwhM= +github.com/cilium/ebpf v0.20.0/go.mod h1:pzLjFymM+uZPLk/IXZUL63xdx5VXEo+enTzxkZXdycw= +github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl31EQbXALQ= +github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= +github.com/containernetworking/plugins v1.2.0 h1:SWgg3dQG1yzUo4d9iD8cwSVh1VqI+bP7mkPDoSfP9VU= +github.com/containernetworking/plugins v1.2.0/go.mod h1:/VjX4uHecW5vVimFa1wkG4s+r/s9qIfPdqlLF4TW8c4= +github.com/containers/image/v5 v5.36.2 h1:GcxYQyAHRF/pLqR4p4RpvKllnNL8mOBn0eZnqJbfTwk= +github.com/containers/image/v5 v5.36.2/go.mod h1:b4GMKH2z/5t6/09utbse2ZiLK/c72GuGLFdp7K69eA4= +github.com/containers/storage v1.59.1 h1:11Zu68MXsEQGBBd+GadPrHPpWeqjKS8hJDGiAHgIqDs= +github.com/containers/storage v1.59.1/go.mod h1:KoAYHnAjP3/cTsRS+mmWZGkufSY2GACiKQ4V3ZLQnR0= +github.com/coreos/go-iptables v0.6.0 h1:is9qnZMPYjLd8LYqmm/qlE+wwEgJIkTYdhV3rfZo4jk= +github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo= +github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= -github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI= -github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/emicklei/go-restful/v3 v3.13.0 h1:C4Bl2xDndpU6nJ4bc1jXd+uTmYPVUwkD6bFY/oTyCes= +github.com/emicklei/go-restful/v3 v3.13.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k= +github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= -github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= -github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= -github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= +github.com/gaissmai/cidrtree v0.1.4 h1:/aYnv1LIwjtSDHNr1eNN99WJeh6vLrB+Sgr1tRMhHDc= +github.com/gaissmai/cidrtree v0.1.4/go.mod h1:nrjEeeMZmvoJpLcSvZ3qIVFxw/+9GHKi7wDHHmHKGRI= +github.com/gavv/monotime v0.0.0-20190418164738-30dba4353424 h1:Vh7rylVZRZCj6W41lRlP17xPk4Nq260H4Xo/DDYmEZk= +github.com/gavv/monotime v0.0.0-20190418164738-30dba4353424/go.mod h1:vmp8DIyckQMXOPl0AQVHt+7n5h7Gb7hS6CUydiV8QeA= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= -github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= -github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= -github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= -github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= -github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= -github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-openapi/jsonpointer v0.22.1 h1:sHYI1He3b9NqJ4wXLoJDKmUmHkWy/L7rtEo92JUxBNk= +github.com/go-openapi/jsonpointer v0.22.1/go.mod h1:pQT9OsLkfz1yWoMgYFy4x3U5GY5nUlsOn1qSBH5MkCM= +github.com/go-openapi/jsonreference v0.21.2 h1:Wxjda4M/BBQllegefXrY/9aq1fxBA8sI5M/lFU6tSWU= +github.com/go-openapi/jsonreference v0.21.2/go.mod h1:pp3PEjIsJ9CZDGCNOyXIQxsNuroxm8FAJ/+quA0yKzQ= +github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU= +github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0= +github.com/go-openapi/swag/jsonname v0.25.1 h1:Sgx+qbwa4ej6AomWC6pEfXrA6uP2RkaNjA9BR8a1RJU= +github.com/go-openapi/swag/jsonname v0.25.1/go.mod h1:71Tekow6UOLBD3wS7XhdT98g5J5GR13NOTQ9/6Q11Zo= +github.com/go-quicktest/qt v1.101.1-0.20240301121107-c6c8733fa1e6 h1:teYtXy9B7y5lHTp8V9KPxpYRAVA7dozigQcMiBust1s= +github.com/go-quicktest/qt v1.101.1-0.20240301121107-c6c8733fa1e6/go.mod h1:p4lGIVX+8Wa6ZPNDvqcxq36XpUDLh42FLetFU7odllI= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g= -github.com/google/cel-go v0.22.0/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8= -github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= -github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/cel-go v0.26.0 h1:DPGjXackMpJWH680oGY4lZhYjIameYmR+/6RBdDGmaI= +github.com/google/cel-go v0.26.0/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM= +github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= +github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= -github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 h1:e9Rjr40Z98/clHv5Yg79Is0NtosR5LXRvdr7o/6NwbA= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1/go.mod h1:tIxuGz/9mpox++sgp9fJjHO0+q1X9/UOWd798aAm22M= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 h1:X5VWvz21y3gzm9Nw/kaUeku/1+uBhcekkmy4IkffJww= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/josharian/native v1.1.0 h1:uuaP0hAbW7Y4l0ZRQ6C9zfb7Mg1mbFKry/xzDAfmtLA= +github.com/josharian/native v1.1.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/k8snetworkplumbingwg/govdpa v0.1.5-0.20230926073613-07c1031aea47 h1:iSncnlC+rtlNOIpPa3fbqQMhpTscGJIlkiWaPl1VcS4= +github.com/k8snetworkplumbingwg/govdpa v0.1.5-0.20230926073613-07c1031aea47/go.mod h1:SPaDIyUmwN03Bgn0u/mhoiE4o/+koeKh11VUsdsUX0U= +github.com/k8snetworkplumbingwg/ipamclaims v0.4.0-alpha h1:ss+EP77GlQmh90hGKpnAG4Q3VVxRlB7GoncemaPtO4g= +github.com/k8snetworkplumbingwg/ipamclaims v0.4.0-alpha/go.mod h1:qlR+sKxQ2OGfwhFCuXSd7rJ/GgC38vQBeHKQ7f2YnpI= +github.com/k8snetworkplumbingwg/multi-networkpolicy v0.0.0-20200914073308-0f33b9190170 h1:rtPle+U5e7Fia0j44gm+p5QMgOIXXB3A8GtFeCCh8Kk= +github.com/k8snetworkplumbingwg/multi-networkpolicy v0.0.0-20200914073308-0f33b9190170/go.mod h1:CF9uYILB8GY25A/6Hhi1AWKc29qbyLu8r7Gs+uINGZE= +github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.6.0 h1:BT3ghAY0q7lWib9rz+tVXDFkm27dJV6SLCn7TunZwo4= +github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.6.0/go.mod h1:wxt2YWRVItDtaQmVSmaN5ubE2L1c9CiNoHQwSJnM8Ko= +github.com/k8snetworkplumbingwg/sriovnet v1.2.1-0.20230427090635-4929697df2dc h1:v6+jUd70AayPbIRgTYUNpnBLG5cBPTY0+10y80CZeMk= +github.com/k8snetworkplumbingwg/sriovnet v1.2.1-0.20230427090635-4929697df2dc/go.mod h1:jyWzGe6ZtYiPq6ih6aXCOy6mZ49Y9mNyBOLBBXnli+k= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= -github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= -github.com/kong/kubernetes-testing-framework v0.47.2 h1:+2Z9anTpbV/hwNeN+NFQz53BMU+g3QJydkweBp3tULo= -github.com/kong/kubernetes-testing-framework v0.47.2/go.mod h1:DJ5btl/srdIM03tg3f+jS9Izu7xkRkciAM69Ptqun1I= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/kong/kubernetes-testing-framework v0.48.0 h1:ce80CT1CW/VCbS+jbvFW7Q/hRSkvVijxmreg+1lCZkU= +github.com/kong/kubernetes-testing-framework v0.48.0/go.mod h1:aUs4Pxb/4R7o8TNh33jiTLdNZD81cYjDBt5rmiT5ZG0= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= +github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= +github.com/mdlayher/arp v0.0.0-20220512170110-6706a2966875 h1:ql8x//rJsHMjS+qqEag8n3i4azw1QneKh5PieH9UEbY= +github.com/mdlayher/arp v0.0.0-20220512170110-6706a2966875/go.mod h1:kfOoFJuHWp76v1RgZCb9/gVUc7XdY877S2uVYbNliGc= +github.com/mdlayher/ethernet v0.0.0-20220221185849-529eae5b6118 h1:2oDp6OOhLxQ9JBoUuysVz9UZ9uI6oLUbvAZu0x8o+vE= +github.com/mdlayher/ethernet v0.0.0-20220221185849-529eae5b6118/go.mod h1:ZFUnHIVchZ9lJoWoEGUg8Q3M4U8aNNWA3CVSUTkW4og= +github.com/mdlayher/ndp v1.0.1 h1:+yAD79/BWyFlvAoeG5ncPS0ItlHP/eVbH7bQ6/+LVA4= +github.com/mdlayher/ndp v1.0.1/go.mod h1:rf3wKaWhAYJEXFKpgF8kQ2AxypxVbfNcZbqoAo6fVzk= +github.com/mdlayher/packet v1.0.0 h1:InhZJbdShQYt6XV2GPj5XHxChzOfhJJOMbvnGAmOfQ8= +github.com/mdlayher/packet v1.0.0/go.mod h1:eE7/ctqDhoiRhQ44ko5JZU2zxB88g+JH/6jmnjzPjOU= +github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U= +github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA= +github.com/metallb/frr-k8s v0.0.15 h1:6M3UGhovX1EFoaSGjrRD7djUAx3w2I+g81FH8OVtHkM= +github.com/metallb/frr-k8s v0.0.15/go.mod h1:TjrGoAf+v00hYGlI8jUdyDxY5udMAOs2GWwrvLWnA4E= +github.com/miekg/dns v1.1.31 h1:sJFOl9BgwbYAWOGEwr61FU28pqsBNdpRBnhGXtO06Oo= +github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/netobserv/netobserv-ebpf-agent v1.7.0-community.0.20250402125041-1fca7614320e h1:+w28am18Fb7cBRBsxMPL98mhz3utkGdJisuWb5cTU+8= -github.com/netobserv/netobserv-ebpf-agent v1.7.0-community.0.20250402125041-1fca7614320e/go.mod h1:+oGWsT85ryKIE/8gcsehciPFr3LPVypeQNRGkLnL/Jc= +github.com/netobserv/flowlogs-pipeline v1.10.0-community h1:3Km5B+UmDrYPeiwhWY43YYEIlzjbRfErKMQ67PtMZWQ= +github.com/netobserv/flowlogs-pipeline v1.10.0-community/go.mod h1:CuhvsFU2p15A9RPWSOGZaaCfjsocn7ZYtxKH2hAehZY= +github.com/netobserv/netobserv-ebpf-agent v1.10.1-community h1:9elSOcMkEQeztWH6w8eB3cRZUApaBvexZNQ/xExXXnA= +github.com/netobserv/netobserv-ebpf-agent v1.10.1-community/go.mod h1:l9igyOCKLjPGD49AOuMWPbwdUTPHqid1fvC8dSYzOR4= +github.com/netsampler/goflow2 v1.3.7 h1:XZaTy8kkMnGXpJ9hS3KbO1McyrFTpVNhVFEx9rNhMmc= +github.com/netsampler/goflow2 v1.3.7/go.mod h1:4UZsVGVAs//iMCptUHn3WNScztJeUhZH7kDW2+/vDdQ= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg= github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw= github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/openshift/api v0.0.0-20240605201059-cefcda60d938 h1:3Y0E8q6pFb8PgzW0N52jVZBollMeW1kI13iU/vXPCnc= github.com/openshift/api v0.0.0-20240605201059-cefcda60d938/go.mod h1:OOh6Qopf21pSzqNVCB5gomomBXb8o5sGKZxG2KNpaXM= +github.com/openshift/client-go v0.0.0-20231121143148-910ca30a1a9a h1:4FVrw8hz0Wb3izbf6JfOEK+pJTYpEvteRR73mCh2g/A= +github.com/openshift/client-go v0.0.0-20231121143148-910ca30a1a9a/go.mod h1:arApQobmOjZqtxw44TwnQdUCH+t9DgZ8geYPFqksHws= +github.com/ovn-org/libovsdb v0.7.1-0.20240820095311-ce1951614a20 h1:OoDvzyaK7F/ZANIIFOgb4Haj7mye3Hle0fYZZNdidSs= +github.com/ovn-org/libovsdb v0.7.1-0.20240820095311-ce1951614a20/go.mod h1:dJbxEaalQl83nn904K32FaMjlH/qOObZ0bj4ejQ78AI= +github.com/ovn-org/ovn-kubernetes/go-controller v0.0.0-20250227173154-57a2590a1d16 h1:t4NphP6IIFRe5/2NGc1MD0e72pLYIzaG9YizrYyk84Y= +github.com/ovn-org/ovn-kubernetes/go-controller v0.0.0-20250227173154-57a2590a1d16/go.mod h1:MzFM3OEsLM2w/4MBMOCsxGR6ZBUvJfOxvQHB8LIKSv4= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk= -github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= -github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= -github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.67.1 h1:OTSON1P4DNxzTg4hmKCc37o4ZAZDv0cfXLkOt0oEowI= +github.com/prometheus/common v0.67.1/go.mod h1:RpmT9v35q2Y+lsieQsdOh5sXZ6ajUGC8NjZAmr8vb0Q= +github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= +github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/safchain/ethtool v0.5.10 h1:Im294gZtuf4pSGJRAOGKaASNi3wMeFaGaWuSaomedpc= +github.com/safchain/ethtool v0.5.10/go.mod h1:w9jh2Lx7YBR4UwzLkzCmWl85UY0W2uZdd7/DckVE5+c= +github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= +github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= -github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= -github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= +github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= +github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= +github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= +github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= +github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= +github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= +github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/vishvananda/netlink v1.3.0 h1:X7l42GfcV4S6E4vHTsw48qbrV+9PVojNfIhZcwQdrZk= -github.com/vishvananda/netlink v1.3.0/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs= -github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/urfave/cli/v2 v2.27.6 h1:VdRdS98FNhKZ8/Az8B7MTyGQmpIr36O1EHybx/LaZ4g= +github.com/urfave/cli/v2 v2.27.6/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ= +github.com/vishvananda/netlink v1.3.1-0.20250425193846-9d88d8385bf9 h1:ZEjCI2kamoTYIx348/Nfco4c4NPvpq972DM2HMgnBgI= +github.com/vishvananda/netlink v1.3.1-0.20250425193846-9d88d8385bf9/go.mod h1:ARtKouGSTGchR8aMwmkzC0qiNPrrWO5JS/XMVl45+b4= github.com/vishvananda/netns v0.0.5 h1:DfiHV+j8bA32MFM7bfEunvT8IAqQ/NzSJHtcmW5zdEY= github.com/vishvananda/netns v0.0.5/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4= +github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= -go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= -go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= -go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glBlOBp5+WlYsOElzTNmiPW/x60= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA= -go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= -go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= -go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= -go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= -go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= -go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= -go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= -go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= -go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= -go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 h1:Ahq7pZmv87yiyn3jeFz/LekZmPLLdKejuO3NcK9MssM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0/go.mod h1:MJTqhM0im3mRLw1i8uGHnCvUEeS7VwRyxlLC78PA18M= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 h1:EtFWSnwW9hGObjkIdmlnWSydO+Qs8OwzfzXLUPg4xOc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0/go.mod h1:QjUEoiGCPkvFZ/MjK6ZZfNOS6mfVEVKYE99dFhuN2LI= +go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= +go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= +go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= +go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +go.opentelemetry.io/proto/otlp v1.7.0 h1:jX1VolD6nHuFzOYso2E73H85i92Mv8JQYk0K9vz09os= +go.opentelemetry.io/proto/otlp v1.7.0/go.mod h1:fSKjH6YJ7HDlwzltzyMj036AJ3ejJLCgCSHGj4efDDo= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= -go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= +go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67 h1:1UoZQm6f0P/ZO0w1Ri+f+ifG/gXhegadRdwBIXEFWDo= -golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= +golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= +golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 h1:R84qjqJb5nVJMxqWYb3np9L5ZsaDtB+a39EqjV0JSUM= +golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0/go.mod h1:S9Xr4PYopiDyqSyp5NjCrhFrqg6A5zA2E/iPHPhqnS8= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= -golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= +golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= -golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= -golang.org/x/oauth2 v0.26.0 h1:afQXWNNaeC4nvZ0Ed9XvCCzXM6UHJG7iCg0W4fPqSBE= -golang.org/x/oauth2 v0.26.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw= +golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= -golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= -golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= -golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= +golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= -golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= -golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= +golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= +golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= -golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= +golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= +golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= +golang.org/x/tools/go/expect v0.1.1-deprecated h1:jpBZDwmgPhXsKZC6WhL20P4b/wmnpsEAGHaNy0n/rJM= +golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= +golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM= +golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated/go.mod h1:RVAQXBGNv1ib0J382/DPCRS/BPnsGebyM1Gj5VSDpG8= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/genproto/googleapis/api v0.0.0-20250207221924-e9438ea467c6 h1:L9JNMl/plZH9wmzQUHleO/ZZDSN+9Gh41wPczNy+5Fk= -google.golang.org/genproto/googleapis/api v0.0.0-20250207221924-e9438ea467c6/go.mod h1:iYONQfRdizDB8JJBybql13nArx91jcUk7zCXEsOofM4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250207221924-e9438ea467c6 h1:2duwAxN2+k0xLNpjnHTXoMUgnv6VPSp5fiqTuwSxjmI= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250207221924-e9438ea467c6/go.mod h1:8BS3B93F/U1juMFq9+EDk+qOT5CO1R9IzXxG3PTqiRk= -google.golang.org/grpc v1.71.0 h1:kF77BGdPTQ4/JZWMlb9VpJ5pa25aqvVqogsxNHHdeBg= -google.golang.org/grpc v1.71.0/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= -google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= -google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda h1:+2XxjfsAu6vqFxwGBRcHiMaDCuZiqXGDUDVWVtrFAnE= +google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251124214823-79d6a2a48846 h1:Wgl1rcDNThT+Zn47YyCXOXyX/COgMTIdhJ717F0l4xk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251124214823-79d6a2a48846/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= +google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc= +google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= +google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= -gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo= +gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/fsnotify/fsnotify.v1 v1.4.7 h1:XNNYLJHt73EyYiCZi6+xjupS9CpvmiDgjPTAjrBlQbo= +gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE= +gopkg.in/gcfg.v1 v1.2.3 h1:m8OOJ4ccYHnx2f4gQwpno8nAX5OGOh7RLaaz0pj3Ogs= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= -k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k= -k8s.io/apiextensions-apiserver v0.32.1 h1:hjkALhRUeCariC8DiVmb5jj0VjIc1N0DREP32+6UXZw= -k8s.io/apiextensions-apiserver v0.32.1/go.mod h1:sxWIGuGiYov7Io1fAS2X06NjMIk5CbRHc2StSmbaQto= -k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= -k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= -k8s.io/apiserver v0.32.1 h1:oo0OozRos66WFq87Zc5tclUX2r0mymoVHRq8JmR7Aak= -k8s.io/apiserver v0.32.1/go.mod h1:UcB9tWjBY7aryeI5zAgzVJB/6k7E97bkr1RgqDz0jPw= -k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU= -k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY= -k8s.io/code-generator v0.32.3 h1:31p2TVzC9+hVdSkAFruAk3JY+iSfzrJ83Qij1yZutyw= -k8s.io/code-generator v0.32.3/go.mod h1:+mbiYID5NLsBuqxjQTygKM/DAdKpAjvBzrJd64NU1G8= -k8s.io/component-base v0.32.1 h1:/5IfJ0dHIKBWysGV0yKTFfacZ5yNV1sulPh3ilJjRZk= -k8s.io/component-base v0.32.1/go.mod h1:j1iMMHi/sqAHeG5z+O9BFNCF698a1u0186zkjMZQ28w= -k8s.io/cri-api v0.33.2 h1:1OiWm6gUx7JrN+xqxMzGDCPfPxVT8b6n7B6SeYl5luM= -k8s.io/cri-api v0.33.2/go.mod h1:OLQvT45OpIA+tv91ZrpuFIGY+Y2Ho23poS7n115Aocs= -k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 h1:si3PfKm8dDYxgfbeA6orqrtLkvvIeH8UqffFJDl0bz4= -k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU= +k8s.io/api v0.34.3 h1:D12sTP257/jSH2vHV2EDYrb16bS7ULlHpdNdNhEw2S4= +k8s.io/api v0.34.3/go.mod h1:PyVQBF886Q5RSQZOim7DybQjAbVs8g7gwJNhGtY5MBk= +k8s.io/apiextensions-apiserver v0.34.3 h1:p10fGlkDY09eWKOTeUSioxwLukJnm+KuDZdrW71y40g= +k8s.io/apiextensions-apiserver v0.34.3/go.mod h1:aujxvqGFRdb/cmXYfcRTeppN7S2XV/t7WMEc64zB5A0= +k8s.io/apimachinery v0.34.3 h1:/TB+SFEiQvN9HPldtlWOTp0hWbJ+fjU+wkxysf/aQnE= +k8s.io/apimachinery v0.34.3/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= +k8s.io/apiserver v0.34.3 h1:uGH1qpDvSiYG4HVFqc6A3L4CKiX+aBWDrrsxHYK0Bdo= +k8s.io/apiserver v0.34.3/go.mod h1:QPnnahMO5C2m3lm6fPW3+JmyQbvHZQ8uudAu/493P2w= +k8s.io/client-go v0.34.3 h1:wtYtpzy/OPNYf7WyNBTj3iUA0XaBHVqhv4Iv3tbrF5A= +k8s.io/client-go v0.34.3/go.mod h1:OxxeYagaP9Kdf78UrKLa3YZixMCfP6bgPwPwNBQBzpM= +k8s.io/code-generator v0.34.3 h1:6ipJKsJZZ9q21BO8I2jEj4OLN3y8/1n4aihKN0xKmQk= +k8s.io/code-generator v0.34.3/go.mod h1:oW73UPYpGLsbRN8Ozkhd6ZzkF8hzFCiYmvEuWZDroI4= +k8s.io/component-base v0.34.3 h1:zsEgw6ELqK0XncCQomgO9DpUIzlrYuZYA0Cgo+JWpVk= +k8s.io/component-base v0.34.3/go.mod h1:5iIlD8wPfWE/xSHTRfbjuvUul2WZbI2nOUK65XL0E/c= +k8s.io/cri-api v0.35.0 h1:fxLSKyJHqbyCSUsg1rW4DRpmjSEM/elZ1GXzYTSLoDQ= +k8s.io/cri-api v0.35.0/go.mod h1:Cnt29u/tYl1Se1cBRL30uSZ/oJ5TaIp4sZm1xDLvcMc= +k8s.io/gengo/v2 v2.0.0-20250820003526-c297c0c1eb9d h1:qUrYOinhdAUL0xxhA4gPqogPBaS9nIq2l2kTb6pmeB0= +k8s.io/gengo/v2 v2.0.0-20250820003526-c297c0c1eb9d/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= -k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= -k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcpeN4baWEV2ko2Z/AsiZgEdwgcfwLgMo= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= -sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU= -sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= -sigs.k8s.io/gateway-api v1.1.0 h1:DsLDXCi6jR+Xz8/xd0Z1PYl2Pn0TyaFMOPPZIj4inDM= -sigs.k8s.io/gateway-api v1.1.0/go.mod h1:ZH4lHrL2sDi0FHZ9jjneb8kKnGzFWyrTya35sWUTrRs= -sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= -sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= -sigs.k8s.io/kind v0.24.0 h1:g4y4eu0qa+SCeKESLpESgMmVFBebL0BDa6f777OIWrg= -sigs.k8s.io/kind v0.24.0/go.mod h1:t7ueEpzPYJvHA8aeLtI52rtFftNgUYUaCwvxjk7phfw= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= -sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE= +k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ= +k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d h1:wAhiDyZ4Tdtt7e46e9M5ZSAJ/MnPGPs+Ki1gHw4w1R0= +k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= +sigs.k8s.io/controller-runtime v0.22.4 h1:GEjV7KV3TY8e+tJ2LCTxUTanW4z/FmNB7l327UfMq9A= +sigs.k8s.io/controller-runtime v0.22.4/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/kind v0.30.0 h1:2Xi1KFEfSMm0XDcvKnUt15ZfgRPCT0OnCBbpgh8DztY= +sigs.k8s.io/kind v0.30.0/go.mod h1:FSqriGaoTPruiXWfRnUXNykF8r2t+fHtK0P0m1AbGF8= +sigs.k8s.io/network-policy-api v0.1.5 h1:xyS7VAaM9EfyB428oFk7WjWaCK6B129i+ILUF4C8l6E= +sigs.k8s.io/network-policy-api v0.1.5/go.mod h1:D7Nkr43VLNd7iYryemnj8qf0N/WjBzTZDxYA+g4u1/Y= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/vendor/cel.dev/expr/.bazelversion b/vendor/cel.dev/expr/.bazelversion index 26bc914a3..13c50892b 100644 --- a/vendor/cel.dev/expr/.bazelversion +++ b/vendor/cel.dev/expr/.bazelversion @@ -1,2 +1,2 @@ -7.0.1 +7.3.2 # Keep this pinned version in parity with cel-go diff --git a/vendor/cel.dev/expr/MODULE.bazel b/vendor/cel.dev/expr/MODULE.bazel index 9794266f5..85ac9ff61 100644 --- a/vendor/cel.dev/expr/MODULE.bazel +++ b/vendor/cel.dev/expr/MODULE.bazel @@ -8,26 +8,38 @@ bazel_dep( ) bazel_dep( name = "gazelle", - version = "0.36.0", + version = "0.39.1", repo_name = "bazel_gazelle", ) bazel_dep( name = "googleapis", - version = "0.0.0-20240819-fe8ba054a", + version = "0.0.0-20241220-5e258e33.bcr.1", repo_name = "com_google_googleapis", ) +bazel_dep( + name = "googleapis-cc", + version = "1.0.0", +) +bazel_dep( + name = "googleapis-java", + version = "1.0.0", +) +bazel_dep( + name = "googleapis-go", + version = "1.0.0", +) bazel_dep( name = "protobuf", - version = "26.0", + version = "27.0", repo_name = "com_google_protobuf", ) bazel_dep( name = "rules_cc", - version = "0.0.9", + version = "0.0.17", ) bazel_dep( name = "rules_go", - version = "0.49.0", + version = "0.53.0", repo_name = "io_bazel_rules_go", ) bazel_dep( @@ -36,7 +48,7 @@ bazel_dep( ) bazel_dep( name = "rules_proto", - version = "6.0.0", + version = "7.0.2", ) bazel_dep( name = "rules_python", @@ -50,16 +62,8 @@ python.toolchain( python_version = "3.11", ) -switched_rules = use_extension("@com_google_googleapis//:extensions.bzl", "switched_rules") -switched_rules.use_languages( - cc = True, - go = True, - java = True, -) -use_repo(switched_rules, "com_google_googleapis_imports") - go_sdk = use_extension("@io_bazel_rules_go//go:extensions.bzl", "go_sdk") -go_sdk.download(version = "1.21.1") +go_sdk.download(version = "1.22.0") go_deps = use_extension("@bazel_gazelle//:extensions.bzl", "go_deps") go_deps.from_file(go_mod = "//:go.mod") diff --git a/vendor/cel.dev/expr/README.md b/vendor/cel.dev/expr/README.md index 7930c0b75..42d67f87c 100644 --- a/vendor/cel.dev/expr/README.md +++ b/vendor/cel.dev/expr/README.md @@ -69,5 +69,3 @@ For more detail, see: * [Language Definition](doc/langdef.md) Released under the [Apache License](LICENSE). - -Disclaimer: This is not an official Google product. diff --git a/vendor/cel.dev/expr/cloudbuild.yaml b/vendor/cel.dev/expr/cloudbuild.yaml index c40881f12..e3e533a04 100644 --- a/vendor/cel.dev/expr/cloudbuild.yaml +++ b/vendor/cel.dev/expr/cloudbuild.yaml @@ -1,5 +1,5 @@ steps: -- name: 'gcr.io/cloud-builders/bazel:7.0.1' +- name: 'gcr.io/cloud-builders/bazel:7.3.2' entrypoint: bazel args: ['build', '...'] id: bazel-build diff --git a/vendor/cel.dev/expr/eval.pb.go b/vendor/cel.dev/expr/eval.pb.go index 8f651f9cc..a7aae0900 100644 --- a/vendor/cel.dev/expr/eval.pb.go +++ b/vendor/cel.dev/expr/eval.pb.go @@ -1,15 +1,15 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.5 +// protoc-gen-go v1.36.3 +// protoc v5.27.1 // source: cel/expr/eval.proto package expr import ( - status "google.golang.org/genproto/googleapis/rpc/status" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" reflect "reflect" sync "sync" ) @@ -22,21 +22,18 @@ const ( ) type EvalState struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Values []*ExprValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + Results []*EvalState_Result `protobuf:"bytes,3,rep,name=results,proto3" json:"results,omitempty"` unknownFields protoimpl.UnknownFields - - Values []*ExprValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` - Results []*EvalState_Result `protobuf:"bytes,3,rep,name=results,proto3" json:"results,omitempty"` + sizeCache protoimpl.SizeCache } func (x *EvalState) Reset() { *x = EvalState{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_eval_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_eval_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EvalState) String() string { @@ -47,7 +44,7 @@ func (*EvalState) ProtoMessage() {} func (x *EvalState) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_eval_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -77,25 +74,22 @@ func (x *EvalState) GetResults() []*EvalState_Result { } type ExprValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Kind: + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to Kind: // // *ExprValue_Value // *ExprValue_Error // *ExprValue_Unknown - Kind isExprValue_Kind `protobuf_oneof:"kind"` + Kind isExprValue_Kind `protobuf_oneof:"kind"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ExprValue) Reset() { *x = ExprValue{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_eval_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_eval_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExprValue) String() string { @@ -106,7 +100,7 @@ func (*ExprValue) ProtoMessage() {} func (x *ExprValue) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_eval_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -121,30 +115,36 @@ func (*ExprValue) Descriptor() ([]byte, []int) { return file_cel_expr_eval_proto_rawDescGZIP(), []int{1} } -func (m *ExprValue) GetKind() isExprValue_Kind { - if m != nil { - return m.Kind +func (x *ExprValue) GetKind() isExprValue_Kind { + if x != nil { + return x.Kind } return nil } func (x *ExprValue) GetValue() *Value { - if x, ok := x.GetKind().(*ExprValue_Value); ok { - return x.Value + if x != nil { + if x, ok := x.Kind.(*ExprValue_Value); ok { + return x.Value + } } return nil } func (x *ExprValue) GetError() *ErrorSet { - if x, ok := x.GetKind().(*ExprValue_Error); ok { - return x.Error + if x != nil { + if x, ok := x.Kind.(*ExprValue_Error); ok { + return x.Error + } } return nil } func (x *ExprValue) GetUnknown() *UnknownSet { - if x, ok := x.GetKind().(*ExprValue_Unknown); ok { - return x.Unknown + if x != nil { + if x, ok := x.Kind.(*ExprValue_Unknown); ok { + return x.Unknown + } } return nil } @@ -172,20 +172,17 @@ func (*ExprValue_Error) isExprValue_Kind() {} func (*ExprValue_Unknown) isExprValue_Kind() {} type ErrorSet struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Errors []*Status `protobuf:"bytes,1,rep,name=errors,proto3" json:"errors,omitempty"` unknownFields protoimpl.UnknownFields - - Errors []*status.Status `protobuf:"bytes,1,rep,name=errors,proto3" json:"errors,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ErrorSet) Reset() { *x = ErrorSet{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_eval_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_eval_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ErrorSet) String() string { @@ -196,7 +193,7 @@ func (*ErrorSet) ProtoMessage() {} func (x *ErrorSet) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_eval_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -211,28 +208,85 @@ func (*ErrorSet) Descriptor() ([]byte, []int) { return file_cel_expr_eval_proto_rawDescGZIP(), []int{2} } -func (x *ErrorSet) GetErrors() []*status.Status { +func (x *ErrorSet) GetErrors() []*Status { if x != nil { return x.Errors } return nil } -type UnknownSet struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type Status struct { + state protoimpl.MessageState `protogen:"open.v1"` + Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + Details []*anypb.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} - Exprs []int64 `protobuf:"varint,1,rep,packed,name=exprs,proto3" json:"exprs,omitempty"` +func (x *Status) Reset() { + *x = Status{} + mi := &file_cel_expr_eval_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *UnknownSet) Reset() { - *x = UnknownSet{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_eval_proto_msgTypes[3] +func (x *Status) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Status) ProtoMessage() {} + +func (x *Status) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_eval_proto_msgTypes[3] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Status.ProtoReflect.Descriptor instead. +func (*Status) Descriptor() ([]byte, []int) { + return file_cel_expr_eval_proto_rawDescGZIP(), []int{3} +} + +func (x *Status) GetCode() int32 { + if x != nil { + return x.Code } + return 0 +} + +func (x *Status) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *Status) GetDetails() []*anypb.Any { + if x != nil { + return x.Details + } + return nil +} + +type UnknownSet struct { + state protoimpl.MessageState `protogen:"open.v1"` + Exprs []int64 `protobuf:"varint,1,rep,packed,name=exprs,proto3" json:"exprs,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UnknownSet) Reset() { + *x = UnknownSet{} + mi := &file_cel_expr_eval_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UnknownSet) String() string { @@ -242,8 +296,8 @@ func (x *UnknownSet) String() string { func (*UnknownSet) ProtoMessage() {} func (x *UnknownSet) ProtoReflect() protoreflect.Message { - mi := &file_cel_expr_eval_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_cel_expr_eval_proto_msgTypes[4] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -255,7 +309,7 @@ func (x *UnknownSet) ProtoReflect() protoreflect.Message { // Deprecated: Use UnknownSet.ProtoReflect.Descriptor instead. func (*UnknownSet) Descriptor() ([]byte, []int) { - return file_cel_expr_eval_proto_rawDescGZIP(), []int{3} + return file_cel_expr_eval_proto_rawDescGZIP(), []int{4} } func (x *UnknownSet) GetExprs() []int64 { @@ -266,21 +320,18 @@ func (x *UnknownSet) GetExprs() []int64 { } type EvalState_Result struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Expr int64 `protobuf:"varint,1,opt,name=expr,proto3" json:"expr,omitempty"` + Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` unknownFields protoimpl.UnknownFields - - Expr int64 `protobuf:"varint,1,opt,name=expr,proto3" json:"expr,omitempty"` - Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` + sizeCache protoimpl.SizeCache } func (x *EvalState_Result) Reset() { *x = EvalState_Result{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_eval_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_eval_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EvalState_Result) String() string { @@ -290,8 +341,8 @@ func (x *EvalState_Result) String() string { func (*EvalState_Result) ProtoMessage() {} func (x *EvalState_Result) ProtoReflect() protoreflect.Message { - mi := &file_cel_expr_eval_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_cel_expr_eval_proto_msgTypes[5] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -325,39 +376,45 @@ var File_cel_expr_eval_proto protoreflect.FileDescriptor var file_cel_expr_eval_proto_rawDesc = []byte{ 0x0a, 0x13, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x65, 0x76, 0x61, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x1a, - 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, - 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa2, - 0x01, 0x0a, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2b, 0x0a, 0x06, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, - 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x72, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x65, 0x6c, - 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, - 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x1a, - 0x32, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x78, 0x70, - 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x12, 0x14, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x22, 0x9a, 0x01, 0x0a, 0x09, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x48, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, - 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, - 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x30, 0x0a, 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, - 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, - 0x70, 0x72, 0x2e, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, - 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, - 0x22, 0x36, 0x0a, 0x08, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x06, - 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0x22, 0x0a, 0x0a, 0x55, 0x6e, 0x6b, 0x6e, - 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x03, 0x52, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x42, 0x2c, 0x0a, 0x0c, - 0x64, 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x09, 0x45, 0x76, - 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, - 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, + 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x14, 0x63, 0x65, 0x6c, 0x2f, + 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0xa2, 0x01, 0x0a, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2b, + 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, + 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x72, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, + 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x73, 0x1a, 0x32, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, + 0x78, 0x70, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x9a, 0x01, 0x0a, 0x09, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2a, 0x0a, 0x05, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x48, + 0x00, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x30, 0x0a, 0x07, 0x75, 0x6e, 0x6b, 0x6e, + 0x6f, 0x77, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x48, + 0x00, 0x52, 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, + 0x6e, 0x64, 0x22, 0x34, 0x0a, 0x08, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x28, + 0x0a, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, + 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0x66, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, + 0x22, 0x22, 0x0a, 0x0a, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x12, 0x14, + 0x0a, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x03, 0x52, 0x05, 0x65, + 0x78, 0x70, 0x72, 0x73, 0x42, 0x2c, 0x0a, 0x0c, 0x64, 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x42, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, + 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -372,28 +429,30 @@ func file_cel_expr_eval_proto_rawDescGZIP() []byte { return file_cel_expr_eval_proto_rawDescData } -var file_cel_expr_eval_proto_msgTypes = make([]protoimpl.MessageInfo, 5) -var file_cel_expr_eval_proto_goTypes = []interface{}{ +var file_cel_expr_eval_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_cel_expr_eval_proto_goTypes = []any{ (*EvalState)(nil), // 0: cel.expr.EvalState (*ExprValue)(nil), // 1: cel.expr.ExprValue (*ErrorSet)(nil), // 2: cel.expr.ErrorSet - (*UnknownSet)(nil), // 3: cel.expr.UnknownSet - (*EvalState_Result)(nil), // 4: cel.expr.EvalState.Result - (*Value)(nil), // 5: cel.expr.Value - (*status.Status)(nil), // 6: google.rpc.Status + (*Status)(nil), // 3: cel.expr.Status + (*UnknownSet)(nil), // 4: cel.expr.UnknownSet + (*EvalState_Result)(nil), // 5: cel.expr.EvalState.Result + (*Value)(nil), // 6: cel.expr.Value + (*anypb.Any)(nil), // 7: google.protobuf.Any } var file_cel_expr_eval_proto_depIdxs = []int32{ 1, // 0: cel.expr.EvalState.values:type_name -> cel.expr.ExprValue - 4, // 1: cel.expr.EvalState.results:type_name -> cel.expr.EvalState.Result - 5, // 2: cel.expr.ExprValue.value:type_name -> cel.expr.Value + 5, // 1: cel.expr.EvalState.results:type_name -> cel.expr.EvalState.Result + 6, // 2: cel.expr.ExprValue.value:type_name -> cel.expr.Value 2, // 3: cel.expr.ExprValue.error:type_name -> cel.expr.ErrorSet - 3, // 4: cel.expr.ExprValue.unknown:type_name -> cel.expr.UnknownSet - 6, // 5: cel.expr.ErrorSet.errors:type_name -> google.rpc.Status - 6, // [6:6] is the sub-list for method output_type - 6, // [6:6] is the sub-list for method input_type - 6, // [6:6] is the sub-list for extension type_name - 6, // [6:6] is the sub-list for extension extendee - 0, // [0:6] is the sub-list for field type_name + 4, // 4: cel.expr.ExprValue.unknown:type_name -> cel.expr.UnknownSet + 3, // 5: cel.expr.ErrorSet.errors:type_name -> cel.expr.Status + 7, // 6: cel.expr.Status.details:type_name -> google.protobuf.Any + 7, // [7:7] is the sub-list for method output_type + 7, // [7:7] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name } func init() { file_cel_expr_eval_proto_init() } @@ -402,69 +461,7 @@ func file_cel_expr_eval_proto_init() { return } file_cel_expr_value_proto_init() - if !protoimpl.UnsafeEnabled { - file_cel_expr_eval_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EvalState); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_eval_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExprValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_eval_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ErrorSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_eval_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UnknownSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_eval_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EvalState_Result); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_cel_expr_eval_proto_msgTypes[1].OneofWrappers = []interface{}{ + file_cel_expr_eval_proto_msgTypes[1].OneofWrappers = []any{ (*ExprValue_Value)(nil), (*ExprValue_Error)(nil), (*ExprValue_Unknown)(nil), @@ -475,7 +472,7 @@ func file_cel_expr_eval_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_cel_expr_eval_proto_rawDesc, NumEnums: 0, - NumMessages: 5, + NumMessages: 6, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/github.com/asaskevich/govalidator/.gitignore b/vendor/github.com/asaskevich/govalidator/.gitignore deleted file mode 100644 index 8d69a9418..000000000 --- a/vendor/github.com/asaskevich/govalidator/.gitignore +++ /dev/null @@ -1,15 +0,0 @@ -bin/ -.idea/ -# Binaries for programs and plugins -*.exe -*.exe~ -*.dll -*.so -*.dylib - -# Test binary, built with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - diff --git a/vendor/github.com/asaskevich/govalidator/.travis.yml b/vendor/github.com/asaskevich/govalidator/.travis.yml deleted file mode 100644 index bb83c6670..000000000 --- a/vendor/github.com/asaskevich/govalidator/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go -dist: xenial -go: - - '1.10' - - '1.11' - - '1.12' - - '1.13' - - 'tip' - -script: - - go test -coverpkg=./... -coverprofile=coverage.info -timeout=5s - - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md b/vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md deleted file mode 100644 index 4b462b0d8..000000000 --- a/vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,43 +0,0 @@ -# Contributor Code of Conduct - -This project adheres to [The Code Manifesto](http://codemanifesto.com) -as its guidelines for contributor interactions. - -## The Code Manifesto - -We want to work in an ecosystem that empowers developers to reach their -potential — one that encourages growth and effective collaboration. A space -that is safe for all. - -A space such as this benefits everyone that participates in it. It encourages -new developers to enter our field. It is through discussion and collaboration -that we grow, and through growth that we improve. - -In the effort to create such a place, we hold to these values: - -1. **Discrimination limits us.** This includes discrimination on the basis of - race, gender, sexual orientation, gender identity, age, nationality, - technology and any other arbitrary exclusion of a group of people. -2. **Boundaries honor us.** Your comfort levels are not everyone’s comfort - levels. Remember that, and if brought to your attention, heed it. -3. **We are our biggest assets.** None of us were born masters of our trade. - Each of us has been helped along the way. Return that favor, when and where - you can. -4. **We are resources for the future.** As an extension of #3, share what you - know. Make yourself a resource to help those that come after you. -5. **Respect defines us.** Treat others as you wish to be treated. Make your - discussions, criticisms and debates from a position of respectfulness. Ask - yourself, is it true? Is it necessary? Is it constructive? Anything less is - unacceptable. -6. **Reactions require grace.** Angry responses are valid, but abusive language - and vindictive actions are toxic. When something happens that offends you, - handle it assertively, but be respectful. Escalate reasonably, and try to - allow the offender an opportunity to explain themselves, and possibly - correct the issue. -7. **Opinions are just that: opinions.** Each and every one of us, due to our - background and upbringing, have varying opinions. That is perfectly - acceptable. Remember this: if you respect your own opinions, you should - respect the opinions of others. -8. **To err is human.** You might not intend it, but mistakes do happen and - contribute to build experience. Tolerate honest mistakes, and don't - hesitate to apologize if you make one yourself. diff --git a/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md b/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md deleted file mode 100644 index 7ed268a1e..000000000 --- a/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md +++ /dev/null @@ -1,63 +0,0 @@ -#### Support -If you do have a contribution to the package, feel free to create a Pull Request or an Issue. - -#### What to contribute -If you don't know what to do, there are some features and functions that need to be done - -- [ ] Refactor code -- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check -- [ ] Create actual list of contributors and projects that currently using this package -- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues) -- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions) -- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new -- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc -- [x] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224) -- [ ] Implement fuzzing testing -- [ ] Implement some struct/map/array utilities -- [ ] Implement map/array validation -- [ ] Implement benchmarking -- [ ] Implement batch of examples -- [ ] Look at forks for new features and fixes - -#### Advice -Feel free to create what you want, but keep in mind when you implement new features: -- Code must be clear and readable, names of variables/constants clearly describes what they are doing -- Public functions must be documented and described in source file and added to README.md to the list of available functions -- There are must be unit-tests for any new functions and improvements - -## Financial contributions - -We also welcome financial contributions in full transparency on our [open collective](https://opencollective.com/govalidator). -Anyone can file an expense. If the expense makes sense for the development of the community, it will be "merged" in the ledger of our open collective by the core contributors and the person who filed the expense will be reimbursed. - - -## Credits - - -### Contributors - -Thank you to all the people who have already contributed to govalidator! - - - -### Backers - -Thank you to all our backers! [[Become a backer](https://opencollective.com/govalidator#backer)] - - - - -### Sponsors - -Thank you to all our sponsors! (please ask your company to also support this open source project by [becoming a sponsor](https://opencollective.com/govalidator#sponsor)) - - - - - - - - - - - \ No newline at end of file diff --git a/vendor/github.com/asaskevich/govalidator/LICENSE b/vendor/github.com/asaskevich/govalidator/LICENSE deleted file mode 100644 index cacba9102..000000000 --- a/vendor/github.com/asaskevich/govalidator/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014-2020 Alex Saskevich - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/asaskevich/govalidator/README.md b/vendor/github.com/asaskevich/govalidator/README.md deleted file mode 100644 index 2c3fc35eb..000000000 --- a/vendor/github.com/asaskevich/govalidator/README.md +++ /dev/null @@ -1,622 +0,0 @@ -govalidator -=========== -[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/asaskevich/govalidator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [![GoDoc](https://godoc.org/github.com/asaskevich/govalidator?status.png)](https://godoc.org/github.com/asaskevich/govalidator) -[![Build Status](https://travis-ci.org/asaskevich/govalidator.svg?branch=master)](https://travis-ci.org/asaskevich/govalidator) -[![Coverage](https://codecov.io/gh/asaskevich/govalidator/branch/master/graph/badge.svg)](https://codecov.io/gh/asaskevich/govalidator) [![Go Report Card](https://goreportcard.com/badge/github.com/asaskevich/govalidator)](https://goreportcard.com/report/github.com/asaskevich/govalidator) [![GoSearch](http://go-search.org/badge?id=github.com%2Fasaskevich%2Fgovalidator)](http://go-search.org/view?id=github.com%2Fasaskevich%2Fgovalidator) [![Backers on Open Collective](https://opencollective.com/govalidator/backers/badge.svg)](#backers) [![Sponsors on Open Collective](https://opencollective.com/govalidator/sponsors/badge.svg)](#sponsors) [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_shield) - -A package of validators and sanitizers for strings, structs and collections. Based on [validator.js](https://github.com/chriso/validator.js). - -#### Installation -Make sure that Go is installed on your computer. -Type the following command in your terminal: - - go get github.com/asaskevich/govalidator - -or you can get specified release of the package with `gopkg.in`: - - go get gopkg.in/asaskevich/govalidator.v10 - -After it the package is ready to use. - - -#### Import package in your project -Add following line in your `*.go` file: -```go -import "github.com/asaskevich/govalidator" -``` -If you are unhappy to use long `govalidator`, you can do something like this: -```go -import ( - valid "github.com/asaskevich/govalidator" -) -``` - -#### Activate behavior to require all fields have a validation tag by default -`SetFieldsRequiredByDefault` causes validation to fail when struct fields do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). A good place to activate this is a package init function or the main() function. - -`SetNilPtrAllowedByRequired` causes validation to pass when struct fields marked by `required` are set to nil. This is disabled by default for consistency, but some packages that need to be able to determine between `nil` and `zero value` state can use this. If disabled, both `nil` and `zero` values cause validation errors. - -```go -import "github.com/asaskevich/govalidator" - -func init() { - govalidator.SetFieldsRequiredByDefault(true) -} -``` - -Here's some code to explain it: -```go -// this struct definition will fail govalidator.ValidateStruct() (and the field values do not matter): -type exampleStruct struct { - Name string `` - Email string `valid:"email"` -} - -// this, however, will only fail when Email is empty or an invalid email address: -type exampleStruct2 struct { - Name string `valid:"-"` - Email string `valid:"email"` -} - -// lastly, this will only fail when Email is an invalid email address but not when it's empty: -type exampleStruct2 struct { - Name string `valid:"-"` - Email string `valid:"email,optional"` -} -``` - -#### Recent breaking changes (see [#123](https://github.com/asaskevich/govalidator/pull/123)) -##### Custom validator function signature -A context was added as the second parameter, for structs this is the object being validated – this makes dependent validation possible. -```go -import "github.com/asaskevich/govalidator" - -// old signature -func(i interface{}) bool - -// new signature -func(i interface{}, o interface{}) bool -``` - -##### Adding a custom validator -This was changed to prevent data races when accessing custom validators. -```go -import "github.com/asaskevich/govalidator" - -// before -govalidator.CustomTypeTagMap["customByteArrayValidator"] = func(i interface{}, o interface{}) bool { - // ... -} - -// after -govalidator.CustomTypeTagMap.Set("customByteArrayValidator", func(i interface{}, o interface{}) bool { - // ... -}) -``` - -#### List of functions: -```go -func Abs(value float64) float64 -func BlackList(str, chars string) string -func ByteLength(str string, params ...string) bool -func CamelCaseToUnderscore(str string) string -func Contains(str, substring string) bool -func Count(array []interface{}, iterator ConditionIterator) int -func Each(array []interface{}, iterator Iterator) -func ErrorByField(e error, field string) string -func ErrorsByField(e error) map[string]string -func Filter(array []interface{}, iterator ConditionIterator) []interface{} -func Find(array []interface{}, iterator ConditionIterator) interface{} -func GetLine(s string, index int) (string, error) -func GetLines(s string) []string -func HasLowerCase(str string) bool -func HasUpperCase(str string) bool -func HasWhitespace(str string) bool -func HasWhitespaceOnly(str string) bool -func InRange(value interface{}, left interface{}, right interface{}) bool -func InRangeFloat32(value, left, right float32) bool -func InRangeFloat64(value, left, right float64) bool -func InRangeInt(value, left, right interface{}) bool -func IsASCII(str string) bool -func IsAlpha(str string) bool -func IsAlphanumeric(str string) bool -func IsBase64(str string) bool -func IsByteLength(str string, min, max int) bool -func IsCIDR(str string) bool -func IsCRC32(str string) bool -func IsCRC32b(str string) bool -func IsCreditCard(str string) bool -func IsDNSName(str string) bool -func IsDataURI(str string) bool -func IsDialString(str string) bool -func IsDivisibleBy(str, num string) bool -func IsEmail(str string) bool -func IsExistingEmail(email string) bool -func IsFilePath(str string) (bool, int) -func IsFloat(str string) bool -func IsFullWidth(str string) bool -func IsHalfWidth(str string) bool -func IsHash(str string, algorithm string) bool -func IsHexadecimal(str string) bool -func IsHexcolor(str string) bool -func IsHost(str string) bool -func IsIP(str string) bool -func IsIPv4(str string) bool -func IsIPv6(str string) bool -func IsISBN(str string, version int) bool -func IsISBN10(str string) bool -func IsISBN13(str string) bool -func IsISO3166Alpha2(str string) bool -func IsISO3166Alpha3(str string) bool -func IsISO4217(str string) bool -func IsISO693Alpha2(str string) bool -func IsISO693Alpha3b(str string) bool -func IsIn(str string, params ...string) bool -func IsInRaw(str string, params ...string) bool -func IsInt(str string) bool -func IsJSON(str string) bool -func IsLatitude(str string) bool -func IsLongitude(str string) bool -func IsLowerCase(str string) bool -func IsMAC(str string) bool -func IsMD4(str string) bool -func IsMD5(str string) bool -func IsMagnetURI(str string) bool -func IsMongoID(str string) bool -func IsMultibyte(str string) bool -func IsNatural(value float64) bool -func IsNegative(value float64) bool -func IsNonNegative(value float64) bool -func IsNonPositive(value float64) bool -func IsNotNull(str string) bool -func IsNull(str string) bool -func IsNumeric(str string) bool -func IsPort(str string) bool -func IsPositive(value float64) bool -func IsPrintableASCII(str string) bool -func IsRFC3339(str string) bool -func IsRFC3339WithoutZone(str string) bool -func IsRGBcolor(str string) bool -func IsRegex(str string) bool -func IsRequestURI(rawurl string) bool -func IsRequestURL(rawurl string) bool -func IsRipeMD128(str string) bool -func IsRipeMD160(str string) bool -func IsRsaPub(str string, params ...string) bool -func IsRsaPublicKey(str string, keylen int) bool -func IsSHA1(str string) bool -func IsSHA256(str string) bool -func IsSHA384(str string) bool -func IsSHA512(str string) bool -func IsSSN(str string) bool -func IsSemver(str string) bool -func IsTiger128(str string) bool -func IsTiger160(str string) bool -func IsTiger192(str string) bool -func IsTime(str string, format string) bool -func IsType(v interface{}, params ...string) bool -func IsURL(str string) bool -func IsUTFDigit(str string) bool -func IsUTFLetter(str string) bool -func IsUTFLetterNumeric(str string) bool -func IsUTFNumeric(str string) bool -func IsUUID(str string) bool -func IsUUIDv3(str string) bool -func IsUUIDv4(str string) bool -func IsUUIDv5(str string) bool -func IsULID(str string) bool -func IsUnixTime(str string) bool -func IsUpperCase(str string) bool -func IsVariableWidth(str string) bool -func IsWhole(value float64) bool -func LeftTrim(str, chars string) string -func Map(array []interface{}, iterator ResultIterator) []interface{} -func Matches(str, pattern string) bool -func MaxStringLength(str string, params ...string) bool -func MinStringLength(str string, params ...string) bool -func NormalizeEmail(str string) (string, error) -func PadBoth(str string, padStr string, padLen int) string -func PadLeft(str string, padStr string, padLen int) string -func PadRight(str string, padStr string, padLen int) string -func PrependPathToErrors(err error, path string) error -func Range(str string, params ...string) bool -func RemoveTags(s string) string -func ReplacePattern(str, pattern, replace string) string -func Reverse(s string) string -func RightTrim(str, chars string) string -func RuneLength(str string, params ...string) bool -func SafeFileName(str string) string -func SetFieldsRequiredByDefault(value bool) -func SetNilPtrAllowedByRequired(value bool) -func Sign(value float64) float64 -func StringLength(str string, params ...string) bool -func StringMatches(s string, params ...string) bool -func StripLow(str string, keepNewLines bool) string -func ToBoolean(str string) (bool, error) -func ToFloat(str string) (float64, error) -func ToInt(value interface{}) (res int64, err error) -func ToJSON(obj interface{}) (string, error) -func ToString(obj interface{}) string -func Trim(str, chars string) string -func Truncate(str string, length int, ending string) string -func TruncatingErrorf(str string, args ...interface{}) error -func UnderscoreToCamelCase(s string) string -func ValidateMap(inputMap map[string]interface{}, validationMap map[string]interface{}) (bool, error) -func ValidateStruct(s interface{}) (bool, error) -func WhiteList(str, chars string) string -type ConditionIterator -type CustomTypeValidator -type Error -func (e Error) Error() string -type Errors -func (es Errors) Error() string -func (es Errors) Errors() []error -type ISO3166Entry -type ISO693Entry -type InterfaceParamValidator -type Iterator -type ParamValidator -type ResultIterator -type UnsupportedTypeError -func (e *UnsupportedTypeError) Error() string -type Validator -``` - -#### Examples -###### IsURL -```go -println(govalidator.IsURL(`http://user@pass:domain.com/path/page`)) -``` -###### IsType -```go -println(govalidator.IsType("Bob", "string")) -println(govalidator.IsType(1, "int")) -i := 1 -println(govalidator.IsType(&i, "*int")) -``` - -IsType can be used through the tag `type` which is essential for map validation: -```go -type User struct { - Name string `valid:"type(string)"` - Age int `valid:"type(int)"` - Meta interface{} `valid:"type(string)"` -} -result, err := govalidator.ValidateStruct(User{"Bob", 20, "meta"}) -if err != nil { - println("error: " + err.Error()) -} -println(result) -``` -###### ToString -```go -type User struct { - FirstName string - LastName string -} - -str := govalidator.ToString(&User{"John", "Juan"}) -println(str) -``` -###### Each, Map, Filter, Count for slices -Each iterates over the slice/array and calls Iterator for every item -```go -data := []interface{}{1, 2, 3, 4, 5} -var fn govalidator.Iterator = func(value interface{}, index int) { - println(value.(int)) -} -govalidator.Each(data, fn) -``` -```go -data := []interface{}{1, 2, 3, 4, 5} -var fn govalidator.ResultIterator = func(value interface{}, index int) interface{} { - return value.(int) * 3 -} -_ = govalidator.Map(data, fn) // result = []interface{}{1, 6, 9, 12, 15} -``` -```go -data := []interface{}{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} -var fn govalidator.ConditionIterator = func(value interface{}, index int) bool { - return value.(int)%2 == 0 -} -_ = govalidator.Filter(data, fn) // result = []interface{}{2, 4, 6, 8, 10} -_ = govalidator.Count(data, fn) // result = 5 -``` -###### ValidateStruct [#2](https://github.com/asaskevich/govalidator/pull/2) -If you want to validate structs, you can use tag `valid` for any field in your structure. All validators used with this field in one tag are separated by comma. If you want to skip validation, place `-` in your tag. If you need a validator that is not on the list below, you can add it like this: -```go -govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool { - return str == "duck" -}) -``` -For completely custom validators (interface-based), see below. - -Here is a list of available validators for struct fields (validator - used function): -```go -"email": IsEmail, -"url": IsURL, -"dialstring": IsDialString, -"requrl": IsRequestURL, -"requri": IsRequestURI, -"alpha": IsAlpha, -"utfletter": IsUTFLetter, -"alphanum": IsAlphanumeric, -"utfletternum": IsUTFLetterNumeric, -"numeric": IsNumeric, -"utfnumeric": IsUTFNumeric, -"utfdigit": IsUTFDigit, -"hexadecimal": IsHexadecimal, -"hexcolor": IsHexcolor, -"rgbcolor": IsRGBcolor, -"lowercase": IsLowerCase, -"uppercase": IsUpperCase, -"int": IsInt, -"float": IsFloat, -"null": IsNull, -"uuid": IsUUID, -"uuidv3": IsUUIDv3, -"uuidv4": IsUUIDv4, -"uuidv5": IsUUIDv5, -"creditcard": IsCreditCard, -"isbn10": IsISBN10, -"isbn13": IsISBN13, -"json": IsJSON, -"multibyte": IsMultibyte, -"ascii": IsASCII, -"printableascii": IsPrintableASCII, -"fullwidth": IsFullWidth, -"halfwidth": IsHalfWidth, -"variablewidth": IsVariableWidth, -"base64": IsBase64, -"datauri": IsDataURI, -"ip": IsIP, -"port": IsPort, -"ipv4": IsIPv4, -"ipv6": IsIPv6, -"dns": IsDNSName, -"host": IsHost, -"mac": IsMAC, -"latitude": IsLatitude, -"longitude": IsLongitude, -"ssn": IsSSN, -"semver": IsSemver, -"rfc3339": IsRFC3339, -"rfc3339WithoutZone": IsRFC3339WithoutZone, -"ISO3166Alpha2": IsISO3166Alpha2, -"ISO3166Alpha3": IsISO3166Alpha3, -"ulid": IsULID, -``` -Validators with parameters - -```go -"range(min|max)": Range, -"length(min|max)": ByteLength, -"runelength(min|max)": RuneLength, -"stringlength(min|max)": StringLength, -"matches(pattern)": StringMatches, -"in(string1|string2|...|stringN)": IsIn, -"rsapub(keylength)" : IsRsaPub, -"minstringlength(int): MinStringLength, -"maxstringlength(int): MaxStringLength, -``` -Validators with parameters for any type - -```go -"type(type)": IsType, -``` - -And here is small example of usage: -```go -type Post struct { - Title string `valid:"alphanum,required"` - Message string `valid:"duck,ascii"` - Message2 string `valid:"animal(dog)"` - AuthorIP string `valid:"ipv4"` - Date string `valid:"-"` -} -post := &Post{ - Title: "My Example Post", - Message: "duck", - Message2: "dog", - AuthorIP: "123.234.54.3", -} - -// Add your own struct validation tags -govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool { - return str == "duck" -}) - -// Add your own struct validation tags with parameter -govalidator.ParamTagMap["animal"] = govalidator.ParamValidator(func(str string, params ...string) bool { - species := params[0] - return str == species -}) -govalidator.ParamTagRegexMap["animal"] = regexp.MustCompile("^animal\\((\\w+)\\)$") - -result, err := govalidator.ValidateStruct(post) -if err != nil { - println("error: " + err.Error()) -} -println(result) -``` -###### ValidateMap [#2](https://github.com/asaskevich/govalidator/pull/338) -If you want to validate maps, you can use the map to be validated and a validation map that contain the same tags used in ValidateStruct, both maps have to be in the form `map[string]interface{}` - -So here is small example of usage: -```go -var mapTemplate = map[string]interface{}{ - "name":"required,alpha", - "family":"required,alpha", - "email":"required,email", - "cell-phone":"numeric", - "address":map[string]interface{}{ - "line1":"required,alphanum", - "line2":"alphanum", - "postal-code":"numeric", - }, -} - -var inputMap = map[string]interface{}{ - "name":"Bob", - "family":"Smith", - "email":"foo@bar.baz", - "address":map[string]interface{}{ - "line1":"", - "line2":"", - "postal-code":"", - }, -} - -result, err := govalidator.ValidateMap(inputMap, mapTemplate) -if err != nil { - println("error: " + err.Error()) -} -println(result) -``` - -###### WhiteList -```go -// Remove all characters from string ignoring characters between "a" and "z" -println(govalidator.WhiteList("a3a43a5a4a3a2a23a4a5a4a3a4", "a-z") == "aaaaaaaaaaaa") -``` - -###### Custom validation functions -Custom validation using your own domain specific validators is also available - here's an example of how to use it: -```go -import "github.com/asaskevich/govalidator" - -type CustomByteArray [6]byte // custom types are supported and can be validated - -type StructWithCustomByteArray struct { - ID CustomByteArray `valid:"customByteArrayValidator,customMinLengthValidator"` // multiple custom validators are possible as well and will be evaluated in sequence - Email string `valid:"email"` - CustomMinLength int `valid:"-"` -} - -govalidator.CustomTypeTagMap.Set("customByteArrayValidator", func(i interface{}, context interface{}) bool { - switch v := context.(type) { // you can type switch on the context interface being validated - case StructWithCustomByteArray: - // you can check and validate against some other field in the context, - // return early or not validate against the context at all – your choice - case SomeOtherType: - // ... - default: - // expecting some other type? Throw/panic here or continue - } - - switch v := i.(type) { // type switch on the struct field being validated - case CustomByteArray: - for _, e := range v { // this validator checks that the byte array is not empty, i.e. not all zeroes - if e != 0 { - return true - } - } - } - return false -}) -govalidator.CustomTypeTagMap.Set("customMinLengthValidator", func(i interface{}, context interface{}) bool { - switch v := context.(type) { // this validates a field against the value in another field, i.e. dependent validation - case StructWithCustomByteArray: - return len(v.ID) >= v.CustomMinLength - } - return false -}) -``` - -###### Loop over Error() -By default .Error() returns all errors in a single String. To access each error you can do this: -```go - if err != nil { - errs := err.(govalidator.Errors).Errors() - for _, e := range errs { - fmt.Println(e.Error()) - } - } -``` - -###### Custom error messages -Custom error messages are supported via annotations by adding the `~` separator - here's an example of how to use it: -```go -type Ticket struct { - Id int64 `json:"id"` - FirstName string `json:"firstname" valid:"required~First name is blank"` -} -``` - -#### Notes -Documentation is available here: [godoc.org](https://godoc.org/github.com/asaskevich/govalidator). -Full information about code coverage is also available here: [govalidator on gocover.io](http://gocover.io/github.com/asaskevich/govalidator). - -#### Support -If you do have a contribution to the package, feel free to create a Pull Request or an Issue. - -#### What to contribute -If you don't know what to do, there are some features and functions that need to be done - -- [ ] Refactor code -- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check -- [ ] Create actual list of contributors and projects that currently using this package -- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues) -- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions) -- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new -- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc -- [x] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224) -- [ ] Implement fuzzing testing -- [ ] Implement some struct/map/array utilities -- [ ] Implement map/array validation -- [ ] Implement benchmarking -- [ ] Implement batch of examples -- [ ] Look at forks for new features and fixes - -#### Advice -Feel free to create what you want, but keep in mind when you implement new features: -- Code must be clear and readable, names of variables/constants clearly describes what they are doing -- Public functions must be documented and described in source file and added to README.md to the list of available functions -- There are must be unit-tests for any new functions and improvements - -## Credits -### Contributors - -This project exists thanks to all the people who contribute. [[Contribute](CONTRIBUTING.md)]. - -#### Special thanks to [contributors](https://github.com/asaskevich/govalidator/graphs/contributors) -* [Daniel Lohse](https://github.com/annismckenzie) -* [Attila Oláh](https://github.com/attilaolah) -* [Daniel Korner](https://github.com/Dadie) -* [Steven Wilkin](https://github.com/stevenwilkin) -* [Deiwin Sarjas](https://github.com/deiwin) -* [Noah Shibley](https://github.com/slugmobile) -* [Nathan Davies](https://github.com/nathj07) -* [Matt Sanford](https://github.com/mzsanford) -* [Simon ccl1115](https://github.com/ccl1115) - - - - -### Backers - -Thank you to all our backers! 🙏 [[Become a backer](https://opencollective.com/govalidator#backer)] - - - - -### Sponsors - -Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/govalidator#sponsor)] - - - - - - - - - - - - - - - -## License -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_large) diff --git a/vendor/github.com/asaskevich/govalidator/arrays.go b/vendor/github.com/asaskevich/govalidator/arrays.go deleted file mode 100644 index 3e1da7cb4..000000000 --- a/vendor/github.com/asaskevich/govalidator/arrays.go +++ /dev/null @@ -1,87 +0,0 @@ -package govalidator - -// Iterator is the function that accepts element of slice/array and its index -type Iterator func(interface{}, int) - -// ResultIterator is the function that accepts element of slice/array and its index and returns any result -type ResultIterator func(interface{}, int) interface{} - -// ConditionIterator is the function that accepts element of slice/array and its index and returns boolean -type ConditionIterator func(interface{}, int) bool - -// ReduceIterator is the function that accepts two element of slice/array and returns result of merging those values -type ReduceIterator func(interface{}, interface{}) interface{} - -// Some validates that any item of array corresponds to ConditionIterator. Returns boolean. -func Some(array []interface{}, iterator ConditionIterator) bool { - res := false - for index, data := range array { - res = res || iterator(data, index) - } - return res -} - -// Every validates that every item of array corresponds to ConditionIterator. Returns boolean. -func Every(array []interface{}, iterator ConditionIterator) bool { - res := true - for index, data := range array { - res = res && iterator(data, index) - } - return res -} - -// Reduce boils down a list of values into a single value by ReduceIterator -func Reduce(array []interface{}, iterator ReduceIterator, initialValue interface{}) interface{} { - for _, data := range array { - initialValue = iterator(initialValue, data) - } - return initialValue -} - -// Each iterates over the slice and apply Iterator to every item -func Each(array []interface{}, iterator Iterator) { - for index, data := range array { - iterator(data, index) - } -} - -// Map iterates over the slice and apply ResultIterator to every item. Returns new slice as a result. -func Map(array []interface{}, iterator ResultIterator) []interface{} { - var result = make([]interface{}, len(array)) - for index, data := range array { - result[index] = iterator(data, index) - } - return result -} - -// Find iterates over the slice and apply ConditionIterator to every item. Returns first item that meet ConditionIterator or nil otherwise. -func Find(array []interface{}, iterator ConditionIterator) interface{} { - for index, data := range array { - if iterator(data, index) { - return data - } - } - return nil -} - -// Filter iterates over the slice and apply ConditionIterator to every item. Returns new slice. -func Filter(array []interface{}, iterator ConditionIterator) []interface{} { - var result = make([]interface{}, 0) - for index, data := range array { - if iterator(data, index) { - result = append(result, data) - } - } - return result -} - -// Count iterates over the slice and apply ConditionIterator to every item. Returns count of items that meets ConditionIterator. -func Count(array []interface{}, iterator ConditionIterator) int { - count := 0 - for index, data := range array { - if iterator(data, index) { - count = count + 1 - } - } - return count -} diff --git a/vendor/github.com/asaskevich/govalidator/converter.go b/vendor/github.com/asaskevich/govalidator/converter.go deleted file mode 100644 index d68e990fc..000000000 --- a/vendor/github.com/asaskevich/govalidator/converter.go +++ /dev/null @@ -1,81 +0,0 @@ -package govalidator - -import ( - "encoding/json" - "fmt" - "reflect" - "strconv" -) - -// ToString convert the input to a string. -func ToString(obj interface{}) string { - res := fmt.Sprintf("%v", obj) - return res -} - -// ToJSON convert the input to a valid JSON string -func ToJSON(obj interface{}) (string, error) { - res, err := json.Marshal(obj) - if err != nil { - res = []byte("") - } - return string(res), err -} - -// ToFloat convert the input string to a float, or 0.0 if the input is not a float. -func ToFloat(value interface{}) (res float64, err error) { - val := reflect.ValueOf(value) - - switch value.(type) { - case int, int8, int16, int32, int64: - res = float64(val.Int()) - case uint, uint8, uint16, uint32, uint64: - res = float64(val.Uint()) - case float32, float64: - res = val.Float() - case string: - res, err = strconv.ParseFloat(val.String(), 64) - if err != nil { - res = 0 - } - default: - err = fmt.Errorf("ToInt: unknown interface type %T", value) - res = 0 - } - - return -} - -// ToInt convert the input string or any int type to an integer type 64, or 0 if the input is not an integer. -func ToInt(value interface{}) (res int64, err error) { - val := reflect.ValueOf(value) - - switch value.(type) { - case int, int8, int16, int32, int64: - res = val.Int() - case uint, uint8, uint16, uint32, uint64: - res = int64(val.Uint()) - case float32, float64: - res = int64(val.Float()) - case string: - if IsInt(val.String()) { - res, err = strconv.ParseInt(val.String(), 0, 64) - if err != nil { - res = 0 - } - } else { - err = fmt.Errorf("ToInt: invalid numeric format %g", value) - res = 0 - } - default: - err = fmt.Errorf("ToInt: unknown interface type %T", value) - res = 0 - } - - return -} - -// ToBoolean convert the input string to a boolean. -func ToBoolean(str string) (bool, error) { - return strconv.ParseBool(str) -} diff --git a/vendor/github.com/asaskevich/govalidator/doc.go b/vendor/github.com/asaskevich/govalidator/doc.go deleted file mode 100644 index 55dce62dc..000000000 --- a/vendor/github.com/asaskevich/govalidator/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -package govalidator - -// A package of validators and sanitizers for strings, structures and collections. diff --git a/vendor/github.com/asaskevich/govalidator/error.go b/vendor/github.com/asaskevich/govalidator/error.go deleted file mode 100644 index 1da2336f4..000000000 --- a/vendor/github.com/asaskevich/govalidator/error.go +++ /dev/null @@ -1,47 +0,0 @@ -package govalidator - -import ( - "sort" - "strings" -) - -// Errors is an array of multiple errors and conforms to the error interface. -type Errors []error - -// Errors returns itself. -func (es Errors) Errors() []error { - return es -} - -func (es Errors) Error() string { - var errs []string - for _, e := range es { - errs = append(errs, e.Error()) - } - sort.Strings(errs) - return strings.Join(errs, ";") -} - -// Error encapsulates a name, an error and whether there's a custom error message or not. -type Error struct { - Name string - Err error - CustomErrorMessageExists bool - - // Validator indicates the name of the validator that failed - Validator string - Path []string -} - -func (e Error) Error() string { - if e.CustomErrorMessageExists { - return e.Err.Error() - } - - errName := e.Name - if len(e.Path) > 0 { - errName = strings.Join(append(e.Path, e.Name), ".") - } - - return errName + ": " + e.Err.Error() -} diff --git a/vendor/github.com/asaskevich/govalidator/numerics.go b/vendor/github.com/asaskevich/govalidator/numerics.go deleted file mode 100644 index 5041d9e86..000000000 --- a/vendor/github.com/asaskevich/govalidator/numerics.go +++ /dev/null @@ -1,100 +0,0 @@ -package govalidator - -import ( - "math" -) - -// Abs returns absolute value of number -func Abs(value float64) float64 { - return math.Abs(value) -} - -// Sign returns signum of number: 1 in case of value > 0, -1 in case of value < 0, 0 otherwise -func Sign(value float64) float64 { - if value > 0 { - return 1 - } else if value < 0 { - return -1 - } else { - return 0 - } -} - -// IsNegative returns true if value < 0 -func IsNegative(value float64) bool { - return value < 0 -} - -// IsPositive returns true if value > 0 -func IsPositive(value float64) bool { - return value > 0 -} - -// IsNonNegative returns true if value >= 0 -func IsNonNegative(value float64) bool { - return value >= 0 -} - -// IsNonPositive returns true if value <= 0 -func IsNonPositive(value float64) bool { - return value <= 0 -} - -// InRangeInt returns true if value lies between left and right border -func InRangeInt(value, left, right interface{}) bool { - value64, _ := ToInt(value) - left64, _ := ToInt(left) - right64, _ := ToInt(right) - if left64 > right64 { - left64, right64 = right64, left64 - } - return value64 >= left64 && value64 <= right64 -} - -// InRangeFloat32 returns true if value lies between left and right border -func InRangeFloat32(value, left, right float32) bool { - if left > right { - left, right = right, left - } - return value >= left && value <= right -} - -// InRangeFloat64 returns true if value lies between left and right border -func InRangeFloat64(value, left, right float64) bool { - if left > right { - left, right = right, left - } - return value >= left && value <= right -} - -// InRange returns true if value lies between left and right border, generic type to handle int, float32, float64 and string. -// All types must the same type. -// False if value doesn't lie in range or if it incompatible or not comparable -func InRange(value interface{}, left interface{}, right interface{}) bool { - switch value.(type) { - case int: - intValue, _ := ToInt(value) - intLeft, _ := ToInt(left) - intRight, _ := ToInt(right) - return InRangeInt(intValue, intLeft, intRight) - case float32, float64: - intValue, _ := ToFloat(value) - intLeft, _ := ToFloat(left) - intRight, _ := ToFloat(right) - return InRangeFloat64(intValue, intLeft, intRight) - case string: - return value.(string) >= left.(string) && value.(string) <= right.(string) - default: - return false - } -} - -// IsWhole returns true if value is whole number -func IsWhole(value float64) bool { - return math.Remainder(value, 1) == 0 -} - -// IsNatural returns true if value is natural number (positive and whole) -func IsNatural(value float64) bool { - return IsWhole(value) && IsPositive(value) -} diff --git a/vendor/github.com/asaskevich/govalidator/patterns.go b/vendor/github.com/asaskevich/govalidator/patterns.go deleted file mode 100644 index bafc3765e..000000000 --- a/vendor/github.com/asaskevich/govalidator/patterns.go +++ /dev/null @@ -1,113 +0,0 @@ -package govalidator - -import "regexp" - -// Basic regular expressions for validating strings -const ( - Email string = "^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$" - CreditCard string = "^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|(222[1-9]|22[3-9][0-9]|2[3-6][0-9]{2}|27[01][0-9]|2720)[0-9]{12}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11}|6[27][0-9]{14})$" - ISBN10 string = "^(?:[0-9]{9}X|[0-9]{10})$" - ISBN13 string = "^(?:[0-9]{13})$" - UUID3 string = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$" - UUID4 string = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" - UUID5 string = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" - UUID string = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" - Alpha string = "^[a-zA-Z]+$" - Alphanumeric string = "^[a-zA-Z0-9]+$" - Numeric string = "^[0-9]+$" - Int string = "^(?:[-+]?(?:0|[1-9][0-9]*))$" - Float string = "^(?:[-+]?(?:[0-9]+))?(?:\\.[0-9]*)?(?:[eE][\\+\\-]?(?:[0-9]+))?$" - Hexadecimal string = "^[0-9a-fA-F]+$" - Hexcolor string = "^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$" - RGBcolor string = "^rgb\\(\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*\\)$" - ASCII string = "^[\x00-\x7F]+$" - Multibyte string = "[^\x00-\x7F]" - FullWidth string = "[^\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]" - HalfWidth string = "[\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]" - Base64 string = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$" - PrintableASCII string = "^[\x20-\x7E]+$" - DataURI string = "^data:.+\\/(.+);base64$" - MagnetURI string = "^magnet:\\?xt=urn:[a-zA-Z0-9]+:[a-zA-Z0-9]{32,40}&dn=.+&tr=.+$" - Latitude string = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$" - Longitude string = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$" - DNSName string = `^([a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62}){1}(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*[\._]?$` - IP string = `(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))` - URLSchema string = `((ftp|tcp|udp|wss?|https?):\/\/)` - URLUsername string = `(\S+(:\S*)?@)` - URLPath string = `((\/|\?|#)[^\s]*)` - URLPort string = `(:(\d{1,5}))` - URLIP string = `([1-9]\d?|1\d\d|2[01]\d|22[0-3]|24\d|25[0-5])(\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-5]))` - URLSubdomain string = `((www\.)|([a-zA-Z0-9]+([-_\.]?[a-zA-Z0-9])*[a-zA-Z0-9]\.[a-zA-Z0-9]+))` - URL = `^` + URLSchema + `?` + URLUsername + `?` + `((` + URLIP + `|(\[` + IP + `\])|(([a-zA-Z0-9]([a-zA-Z0-9-_]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|(` + URLSubdomain + `?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))\.?` + URLPort + `?` + URLPath + `?$` - SSN string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$` - WinPath string = `^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$` - UnixPath string = `^(/[^/\x00]*)+/?$` - WinARPath string = `^(?:(?:[a-zA-Z]:|\\\\[a-z0-9_.$●-]+\\[a-z0-9_.$●-]+)\\|\\?[^\\/:*?"<>|\r\n]+\\?)(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$` - UnixARPath string = `^((\.{0,2}/)?([^/\x00]*))+/?$` - Semver string = "^v?(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)(-(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(\\.(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*)?(\\+[0-9a-zA-Z-]+(\\.[0-9a-zA-Z-]+)*)?$" - tagName string = "valid" - hasLowerCase string = ".*[[:lower:]]" - hasUpperCase string = ".*[[:upper:]]" - hasWhitespace string = ".*[[:space:]]" - hasWhitespaceOnly string = "^[[:space:]]+$" - IMEI string = "^[0-9a-f]{14}$|^\\d{15}$|^\\d{18}$" - IMSI string = "^\\d{14,15}$" - E164 string = `^\+?[1-9]\d{1,14}$` -) - -// Used by IsFilePath func -const ( - // Unknown is unresolved OS type - Unknown = iota - // Win is Windows type - Win - // Unix is *nix OS types - Unix -) - -var ( - userRegexp = regexp.MustCompile("^[a-zA-Z0-9!#$%&'*+/=?^_`{|}~.-]+$") - hostRegexp = regexp.MustCompile("^[^\\s]+\\.[^\\s]+$") - userDotRegexp = regexp.MustCompile("(^[.]{1})|([.]{1}$)|([.]{2,})") - rxEmail = regexp.MustCompile(Email) - rxCreditCard = regexp.MustCompile(CreditCard) - rxISBN10 = regexp.MustCompile(ISBN10) - rxISBN13 = regexp.MustCompile(ISBN13) - rxUUID3 = regexp.MustCompile(UUID3) - rxUUID4 = regexp.MustCompile(UUID4) - rxUUID5 = regexp.MustCompile(UUID5) - rxUUID = regexp.MustCompile(UUID) - rxAlpha = regexp.MustCompile(Alpha) - rxAlphanumeric = regexp.MustCompile(Alphanumeric) - rxNumeric = regexp.MustCompile(Numeric) - rxInt = regexp.MustCompile(Int) - rxFloat = regexp.MustCompile(Float) - rxHexadecimal = regexp.MustCompile(Hexadecimal) - rxHexcolor = regexp.MustCompile(Hexcolor) - rxRGBcolor = regexp.MustCompile(RGBcolor) - rxASCII = regexp.MustCompile(ASCII) - rxPrintableASCII = regexp.MustCompile(PrintableASCII) - rxMultibyte = regexp.MustCompile(Multibyte) - rxFullWidth = regexp.MustCompile(FullWidth) - rxHalfWidth = regexp.MustCompile(HalfWidth) - rxBase64 = regexp.MustCompile(Base64) - rxDataURI = regexp.MustCompile(DataURI) - rxMagnetURI = regexp.MustCompile(MagnetURI) - rxLatitude = regexp.MustCompile(Latitude) - rxLongitude = regexp.MustCompile(Longitude) - rxDNSName = regexp.MustCompile(DNSName) - rxURL = regexp.MustCompile(URL) - rxSSN = regexp.MustCompile(SSN) - rxWinPath = regexp.MustCompile(WinPath) - rxUnixPath = regexp.MustCompile(UnixPath) - rxARWinPath = regexp.MustCompile(WinARPath) - rxARUnixPath = regexp.MustCompile(UnixARPath) - rxSemver = regexp.MustCompile(Semver) - rxHasLowerCase = regexp.MustCompile(hasLowerCase) - rxHasUpperCase = regexp.MustCompile(hasUpperCase) - rxHasWhitespace = regexp.MustCompile(hasWhitespace) - rxHasWhitespaceOnly = regexp.MustCompile(hasWhitespaceOnly) - rxIMEI = regexp.MustCompile(IMEI) - rxIMSI = regexp.MustCompile(IMSI) - rxE164 = regexp.MustCompile(E164) -) diff --git a/vendor/github.com/asaskevich/govalidator/types.go b/vendor/github.com/asaskevich/govalidator/types.go deleted file mode 100644 index c573abb51..000000000 --- a/vendor/github.com/asaskevich/govalidator/types.go +++ /dev/null @@ -1,656 +0,0 @@ -package govalidator - -import ( - "reflect" - "regexp" - "sort" - "sync" -) - -// Validator is a wrapper for a validator function that returns bool and accepts string. -type Validator func(str string) bool - -// CustomTypeValidator is a wrapper for validator functions that returns bool and accepts any type. -// The second parameter should be the context (in the case of validating a struct: the whole object being validated). -type CustomTypeValidator func(i interface{}, o interface{}) bool - -// ParamValidator is a wrapper for validator functions that accept additional parameters. -type ParamValidator func(str string, params ...string) bool - -// InterfaceParamValidator is a wrapper for functions that accept variants parameters for an interface value -type InterfaceParamValidator func(in interface{}, params ...string) bool -type tagOptionsMap map[string]tagOption - -func (t tagOptionsMap) orderedKeys() []string { - var keys []string - for k := range t { - keys = append(keys, k) - } - - sort.Slice(keys, func(a, b int) bool { - return t[keys[a]].order < t[keys[b]].order - }) - - return keys -} - -type tagOption struct { - name string - customErrorMessage string - order int -} - -// UnsupportedTypeError is a wrapper for reflect.Type -type UnsupportedTypeError struct { - Type reflect.Type -} - -// stringValues is a slice of reflect.Value holding *reflect.StringValue. -// It implements the methods to sort by string. -type stringValues []reflect.Value - -// InterfaceParamTagMap is a map of functions accept variants parameters for an interface value -var InterfaceParamTagMap = map[string]InterfaceParamValidator{ - "type": IsType, -} - -// InterfaceParamTagRegexMap maps interface param tags to their respective regexes. -var InterfaceParamTagRegexMap = map[string]*regexp.Regexp{ - "type": regexp.MustCompile(`^type\((.*)\)$`), -} - -// ParamTagMap is a map of functions accept variants parameters -var ParamTagMap = map[string]ParamValidator{ - "length": ByteLength, - "range": Range, - "runelength": RuneLength, - "stringlength": StringLength, - "matches": StringMatches, - "in": IsInRaw, - "rsapub": IsRsaPub, - "minstringlength": MinStringLength, - "maxstringlength": MaxStringLength, -} - -// ParamTagRegexMap maps param tags to their respective regexes. -var ParamTagRegexMap = map[string]*regexp.Regexp{ - "range": regexp.MustCompile("^range\\((\\d+)\\|(\\d+)\\)$"), - "length": regexp.MustCompile("^length\\((\\d+)\\|(\\d+)\\)$"), - "runelength": regexp.MustCompile("^runelength\\((\\d+)\\|(\\d+)\\)$"), - "stringlength": regexp.MustCompile("^stringlength\\((\\d+)\\|(\\d+)\\)$"), - "in": regexp.MustCompile(`^in\((.*)\)`), - "matches": regexp.MustCompile(`^matches\((.+)\)$`), - "rsapub": regexp.MustCompile("^rsapub\\((\\d+)\\)$"), - "minstringlength": regexp.MustCompile("^minstringlength\\((\\d+)\\)$"), - "maxstringlength": regexp.MustCompile("^maxstringlength\\((\\d+)\\)$"), -} - -type customTypeTagMap struct { - validators map[string]CustomTypeValidator - - sync.RWMutex -} - -func (tm *customTypeTagMap) Get(name string) (CustomTypeValidator, bool) { - tm.RLock() - defer tm.RUnlock() - v, ok := tm.validators[name] - return v, ok -} - -func (tm *customTypeTagMap) Set(name string, ctv CustomTypeValidator) { - tm.Lock() - defer tm.Unlock() - tm.validators[name] = ctv -} - -// CustomTypeTagMap is a map of functions that can be used as tags for ValidateStruct function. -// Use this to validate compound or custom types that need to be handled as a whole, e.g. -// `type UUID [16]byte` (this would be handled as an array of bytes). -var CustomTypeTagMap = &customTypeTagMap{validators: make(map[string]CustomTypeValidator)} - -// TagMap is a map of functions, that can be used as tags for ValidateStruct function. -var TagMap = map[string]Validator{ - "email": IsEmail, - "url": IsURL, - "dialstring": IsDialString, - "requrl": IsRequestURL, - "requri": IsRequestURI, - "alpha": IsAlpha, - "utfletter": IsUTFLetter, - "alphanum": IsAlphanumeric, - "utfletternum": IsUTFLetterNumeric, - "numeric": IsNumeric, - "utfnumeric": IsUTFNumeric, - "utfdigit": IsUTFDigit, - "hexadecimal": IsHexadecimal, - "hexcolor": IsHexcolor, - "rgbcolor": IsRGBcolor, - "lowercase": IsLowerCase, - "uppercase": IsUpperCase, - "int": IsInt, - "float": IsFloat, - "null": IsNull, - "notnull": IsNotNull, - "uuid": IsUUID, - "uuidv3": IsUUIDv3, - "uuidv4": IsUUIDv4, - "uuidv5": IsUUIDv5, - "creditcard": IsCreditCard, - "isbn10": IsISBN10, - "isbn13": IsISBN13, - "json": IsJSON, - "multibyte": IsMultibyte, - "ascii": IsASCII, - "printableascii": IsPrintableASCII, - "fullwidth": IsFullWidth, - "halfwidth": IsHalfWidth, - "variablewidth": IsVariableWidth, - "base64": IsBase64, - "datauri": IsDataURI, - "ip": IsIP, - "port": IsPort, - "ipv4": IsIPv4, - "ipv6": IsIPv6, - "dns": IsDNSName, - "host": IsHost, - "mac": IsMAC, - "latitude": IsLatitude, - "longitude": IsLongitude, - "ssn": IsSSN, - "semver": IsSemver, - "rfc3339": IsRFC3339, - "rfc3339WithoutZone": IsRFC3339WithoutZone, - "ISO3166Alpha2": IsISO3166Alpha2, - "ISO3166Alpha3": IsISO3166Alpha3, - "ISO4217": IsISO4217, - "IMEI": IsIMEI, - "ulid": IsULID, -} - -// ISO3166Entry stores country codes -type ISO3166Entry struct { - EnglishShortName string - FrenchShortName string - Alpha2Code string - Alpha3Code string - Numeric string -} - -//ISO3166List based on https://www.iso.org/obp/ui/#search/code/ Code Type "Officially Assigned Codes" -var ISO3166List = []ISO3166Entry{ - {"Afghanistan", "Afghanistan (l')", "AF", "AFG", "004"}, - {"Albania", "Albanie (l')", "AL", "ALB", "008"}, - {"Antarctica", "Antarctique (l')", "AQ", "ATA", "010"}, - {"Algeria", "Algérie (l')", "DZ", "DZA", "012"}, - {"American Samoa", "Samoa américaines (les)", "AS", "ASM", "016"}, - {"Andorra", "Andorre (l')", "AD", "AND", "020"}, - {"Angola", "Angola (l')", "AO", "AGO", "024"}, - {"Antigua and Barbuda", "Antigua-et-Barbuda", "AG", "ATG", "028"}, - {"Azerbaijan", "Azerbaïdjan (l')", "AZ", "AZE", "031"}, - {"Argentina", "Argentine (l')", "AR", "ARG", "032"}, - {"Australia", "Australie (l')", "AU", "AUS", "036"}, - {"Austria", "Autriche (l')", "AT", "AUT", "040"}, - {"Bahamas (the)", "Bahamas (les)", "BS", "BHS", "044"}, - {"Bahrain", "Bahreïn", "BH", "BHR", "048"}, - {"Bangladesh", "Bangladesh (le)", "BD", "BGD", "050"}, - {"Armenia", "Arménie (l')", "AM", "ARM", "051"}, - {"Barbados", "Barbade (la)", "BB", "BRB", "052"}, - {"Belgium", "Belgique (la)", "BE", "BEL", "056"}, - {"Bermuda", "Bermudes (les)", "BM", "BMU", "060"}, - {"Bhutan", "Bhoutan (le)", "BT", "BTN", "064"}, - {"Bolivia (Plurinational State of)", "Bolivie (État plurinational de)", "BO", "BOL", "068"}, - {"Bosnia and Herzegovina", "Bosnie-Herzégovine (la)", "BA", "BIH", "070"}, - {"Botswana", "Botswana (le)", "BW", "BWA", "072"}, - {"Bouvet Island", "Bouvet (l'Île)", "BV", "BVT", "074"}, - {"Brazil", "Brésil (le)", "BR", "BRA", "076"}, - {"Belize", "Belize (le)", "BZ", "BLZ", "084"}, - {"British Indian Ocean Territory (the)", "Indien (le Territoire britannique de l'océan)", "IO", "IOT", "086"}, - {"Solomon Islands", "Salomon (Îles)", "SB", "SLB", "090"}, - {"Virgin Islands (British)", "Vierges britanniques (les Îles)", "VG", "VGB", "092"}, - {"Brunei Darussalam", "Brunéi Darussalam (le)", "BN", "BRN", "096"}, - {"Bulgaria", "Bulgarie (la)", "BG", "BGR", "100"}, - {"Myanmar", "Myanmar (le)", "MM", "MMR", "104"}, - {"Burundi", "Burundi (le)", "BI", "BDI", "108"}, - {"Belarus", "Bélarus (le)", "BY", "BLR", "112"}, - {"Cambodia", "Cambodge (le)", "KH", "KHM", "116"}, - {"Cameroon", "Cameroun (le)", "CM", "CMR", "120"}, - {"Canada", "Canada (le)", "CA", "CAN", "124"}, - {"Cabo Verde", "Cabo Verde", "CV", "CPV", "132"}, - {"Cayman Islands (the)", "Caïmans (les Îles)", "KY", "CYM", "136"}, - {"Central African Republic (the)", "République centrafricaine (la)", "CF", "CAF", "140"}, - {"Sri Lanka", "Sri Lanka", "LK", "LKA", "144"}, - {"Chad", "Tchad (le)", "TD", "TCD", "148"}, - {"Chile", "Chili (le)", "CL", "CHL", "152"}, - {"China", "Chine (la)", "CN", "CHN", "156"}, - {"Taiwan (Province of China)", "Taïwan (Province de Chine)", "TW", "TWN", "158"}, - {"Christmas Island", "Christmas (l'Île)", "CX", "CXR", "162"}, - {"Cocos (Keeling) Islands (the)", "Cocos (les Îles)/ Keeling (les Îles)", "CC", "CCK", "166"}, - {"Colombia", "Colombie (la)", "CO", "COL", "170"}, - {"Comoros (the)", "Comores (les)", "KM", "COM", "174"}, - {"Mayotte", "Mayotte", "YT", "MYT", "175"}, - {"Congo (the)", "Congo (le)", "CG", "COG", "178"}, - {"Congo (the Democratic Republic of the)", "Congo (la République démocratique du)", "CD", "COD", "180"}, - {"Cook Islands (the)", "Cook (les Îles)", "CK", "COK", "184"}, - {"Costa Rica", "Costa Rica (le)", "CR", "CRI", "188"}, - {"Croatia", "Croatie (la)", "HR", "HRV", "191"}, - {"Cuba", "Cuba", "CU", "CUB", "192"}, - {"Cyprus", "Chypre", "CY", "CYP", "196"}, - {"Czech Republic (the)", "tchèque (la République)", "CZ", "CZE", "203"}, - {"Benin", "Bénin (le)", "BJ", "BEN", "204"}, - {"Denmark", "Danemark (le)", "DK", "DNK", "208"}, - {"Dominica", "Dominique (la)", "DM", "DMA", "212"}, - {"Dominican Republic (the)", "dominicaine (la République)", "DO", "DOM", "214"}, - {"Ecuador", "Équateur (l')", "EC", "ECU", "218"}, - {"El Salvador", "El Salvador", "SV", "SLV", "222"}, - {"Equatorial Guinea", "Guinée équatoriale (la)", "GQ", "GNQ", "226"}, - {"Ethiopia", "Éthiopie (l')", "ET", "ETH", "231"}, - {"Eritrea", "Érythrée (l')", "ER", "ERI", "232"}, - {"Estonia", "Estonie (l')", "EE", "EST", "233"}, - {"Faroe Islands (the)", "Féroé (les Îles)", "FO", "FRO", "234"}, - {"Falkland Islands (the) [Malvinas]", "Falkland (les Îles)/Malouines (les Îles)", "FK", "FLK", "238"}, - {"South Georgia and the South Sandwich Islands", "Géorgie du Sud-et-les Îles Sandwich du Sud (la)", "GS", "SGS", "239"}, - {"Fiji", "Fidji (les)", "FJ", "FJI", "242"}, - {"Finland", "Finlande (la)", "FI", "FIN", "246"}, - {"Åland Islands", "Åland(les Îles)", "AX", "ALA", "248"}, - {"France", "France (la)", "FR", "FRA", "250"}, - {"French Guiana", "Guyane française (la )", "GF", "GUF", "254"}, - {"French Polynesia", "Polynésie française (la)", "PF", "PYF", "258"}, - {"French Southern Territories (the)", "Terres australes françaises (les)", "TF", "ATF", "260"}, - {"Djibouti", "Djibouti", "DJ", "DJI", "262"}, - {"Gabon", "Gabon (le)", "GA", "GAB", "266"}, - {"Georgia", "Géorgie (la)", "GE", "GEO", "268"}, - {"Gambia (the)", "Gambie (la)", "GM", "GMB", "270"}, - {"Palestine, State of", "Palestine, État de", "PS", "PSE", "275"}, - {"Germany", "Allemagne (l')", "DE", "DEU", "276"}, - {"Ghana", "Ghana (le)", "GH", "GHA", "288"}, - {"Gibraltar", "Gibraltar", "GI", "GIB", "292"}, - {"Kiribati", "Kiribati", "KI", "KIR", "296"}, - {"Greece", "Grèce (la)", "GR", "GRC", "300"}, - {"Greenland", "Groenland (le)", "GL", "GRL", "304"}, - {"Grenada", "Grenade (la)", "GD", "GRD", "308"}, - {"Guadeloupe", "Guadeloupe (la)", "GP", "GLP", "312"}, - {"Guam", "Guam", "GU", "GUM", "316"}, - {"Guatemala", "Guatemala (le)", "GT", "GTM", "320"}, - {"Guinea", "Guinée (la)", "GN", "GIN", "324"}, - {"Guyana", "Guyana (le)", "GY", "GUY", "328"}, - {"Haiti", "Haïti", "HT", "HTI", "332"}, - {"Heard Island and McDonald Islands", "Heard-et-Îles MacDonald (l'Île)", "HM", "HMD", "334"}, - {"Holy See (the)", "Saint-Siège (le)", "VA", "VAT", "336"}, - {"Honduras", "Honduras (le)", "HN", "HND", "340"}, - {"Hong Kong", "Hong Kong", "HK", "HKG", "344"}, - {"Hungary", "Hongrie (la)", "HU", "HUN", "348"}, - {"Iceland", "Islande (l')", "IS", "ISL", "352"}, - {"India", "Inde (l')", "IN", "IND", "356"}, - {"Indonesia", "Indonésie (l')", "ID", "IDN", "360"}, - {"Iran (Islamic Republic of)", "Iran (République Islamique d')", "IR", "IRN", "364"}, - {"Iraq", "Iraq (l')", "IQ", "IRQ", "368"}, - {"Ireland", "Irlande (l')", "IE", "IRL", "372"}, - {"Israel", "Israël", "IL", "ISR", "376"}, - {"Italy", "Italie (l')", "IT", "ITA", "380"}, - {"Côte d'Ivoire", "Côte d'Ivoire (la)", "CI", "CIV", "384"}, - {"Jamaica", "Jamaïque (la)", "JM", "JAM", "388"}, - {"Japan", "Japon (le)", "JP", "JPN", "392"}, - {"Kazakhstan", "Kazakhstan (le)", "KZ", "KAZ", "398"}, - {"Jordan", "Jordanie (la)", "JO", "JOR", "400"}, - {"Kenya", "Kenya (le)", "KE", "KEN", "404"}, - {"Korea (the Democratic People's Republic of)", "Corée (la République populaire démocratique de)", "KP", "PRK", "408"}, - {"Korea (the Republic of)", "Corée (la République de)", "KR", "KOR", "410"}, - {"Kuwait", "Koweït (le)", "KW", "KWT", "414"}, - {"Kyrgyzstan", "Kirghizistan (le)", "KG", "KGZ", "417"}, - {"Lao People's Democratic Republic (the)", "Lao, République démocratique populaire", "LA", "LAO", "418"}, - {"Lebanon", "Liban (le)", "LB", "LBN", "422"}, - {"Lesotho", "Lesotho (le)", "LS", "LSO", "426"}, - {"Latvia", "Lettonie (la)", "LV", "LVA", "428"}, - {"Liberia", "Libéria (le)", "LR", "LBR", "430"}, - {"Libya", "Libye (la)", "LY", "LBY", "434"}, - {"Liechtenstein", "Liechtenstein (le)", "LI", "LIE", "438"}, - {"Lithuania", "Lituanie (la)", "LT", "LTU", "440"}, - {"Luxembourg", "Luxembourg (le)", "LU", "LUX", "442"}, - {"Macao", "Macao", "MO", "MAC", "446"}, - {"Madagascar", "Madagascar", "MG", "MDG", "450"}, - {"Malawi", "Malawi (le)", "MW", "MWI", "454"}, - {"Malaysia", "Malaisie (la)", "MY", "MYS", "458"}, - {"Maldives", "Maldives (les)", "MV", "MDV", "462"}, - {"Mali", "Mali (le)", "ML", "MLI", "466"}, - {"Malta", "Malte", "MT", "MLT", "470"}, - {"Martinique", "Martinique (la)", "MQ", "MTQ", "474"}, - {"Mauritania", "Mauritanie (la)", "MR", "MRT", "478"}, - {"Mauritius", "Maurice", "MU", "MUS", "480"}, - {"Mexico", "Mexique (le)", "MX", "MEX", "484"}, - {"Monaco", "Monaco", "MC", "MCO", "492"}, - {"Mongolia", "Mongolie (la)", "MN", "MNG", "496"}, - {"Moldova (the Republic of)", "Moldova , République de", "MD", "MDA", "498"}, - {"Montenegro", "Monténégro (le)", "ME", "MNE", "499"}, - {"Montserrat", "Montserrat", "MS", "MSR", "500"}, - {"Morocco", "Maroc (le)", "MA", "MAR", "504"}, - {"Mozambique", "Mozambique (le)", "MZ", "MOZ", "508"}, - {"Oman", "Oman", "OM", "OMN", "512"}, - {"Namibia", "Namibie (la)", "NA", "NAM", "516"}, - {"Nauru", "Nauru", "NR", "NRU", "520"}, - {"Nepal", "Népal (le)", "NP", "NPL", "524"}, - {"Netherlands (the)", "Pays-Bas (les)", "NL", "NLD", "528"}, - {"Curaçao", "Curaçao", "CW", "CUW", "531"}, - {"Aruba", "Aruba", "AW", "ABW", "533"}, - {"Sint Maarten (Dutch part)", "Saint-Martin (partie néerlandaise)", "SX", "SXM", "534"}, - {"Bonaire, Sint Eustatius and Saba", "Bonaire, Saint-Eustache et Saba", "BQ", "BES", "535"}, - {"New Caledonia", "Nouvelle-Calédonie (la)", "NC", "NCL", "540"}, - {"Vanuatu", "Vanuatu (le)", "VU", "VUT", "548"}, - {"New Zealand", "Nouvelle-Zélande (la)", "NZ", "NZL", "554"}, - {"Nicaragua", "Nicaragua (le)", "NI", "NIC", "558"}, - {"Niger (the)", "Niger (le)", "NE", "NER", "562"}, - {"Nigeria", "Nigéria (le)", "NG", "NGA", "566"}, - {"Niue", "Niue", "NU", "NIU", "570"}, - {"Norfolk Island", "Norfolk (l'Île)", "NF", "NFK", "574"}, - {"Norway", "Norvège (la)", "NO", "NOR", "578"}, - {"Northern Mariana Islands (the)", "Mariannes du Nord (les Îles)", "MP", "MNP", "580"}, - {"United States Minor Outlying Islands (the)", "Îles mineures éloignées des États-Unis (les)", "UM", "UMI", "581"}, - {"Micronesia (Federated States of)", "Micronésie (États fédérés de)", "FM", "FSM", "583"}, - {"Marshall Islands (the)", "Marshall (Îles)", "MH", "MHL", "584"}, - {"Palau", "Palaos (les)", "PW", "PLW", "585"}, - {"Pakistan", "Pakistan (le)", "PK", "PAK", "586"}, - {"Panama", "Panama (le)", "PA", "PAN", "591"}, - {"Papua New Guinea", "Papouasie-Nouvelle-Guinée (la)", "PG", "PNG", "598"}, - {"Paraguay", "Paraguay (le)", "PY", "PRY", "600"}, - {"Peru", "Pérou (le)", "PE", "PER", "604"}, - {"Philippines (the)", "Philippines (les)", "PH", "PHL", "608"}, - {"Pitcairn", "Pitcairn", "PN", "PCN", "612"}, - {"Poland", "Pologne (la)", "PL", "POL", "616"}, - {"Portugal", "Portugal (le)", "PT", "PRT", "620"}, - {"Guinea-Bissau", "Guinée-Bissau (la)", "GW", "GNB", "624"}, - {"Timor-Leste", "Timor-Leste (le)", "TL", "TLS", "626"}, - {"Puerto Rico", "Porto Rico", "PR", "PRI", "630"}, - {"Qatar", "Qatar (le)", "QA", "QAT", "634"}, - {"Réunion", "Réunion (La)", "RE", "REU", "638"}, - {"Romania", "Roumanie (la)", "RO", "ROU", "642"}, - {"Russian Federation (the)", "Russie (la Fédération de)", "RU", "RUS", "643"}, - {"Rwanda", "Rwanda (le)", "RW", "RWA", "646"}, - {"Saint Barthélemy", "Saint-Barthélemy", "BL", "BLM", "652"}, - {"Saint Helena, Ascension and Tristan da Cunha", "Sainte-Hélène, Ascension et Tristan da Cunha", "SH", "SHN", "654"}, - {"Saint Kitts and Nevis", "Saint-Kitts-et-Nevis", "KN", "KNA", "659"}, - {"Anguilla", "Anguilla", "AI", "AIA", "660"}, - {"Saint Lucia", "Sainte-Lucie", "LC", "LCA", "662"}, - {"Saint Martin (French part)", "Saint-Martin (partie française)", "MF", "MAF", "663"}, - {"Saint Pierre and Miquelon", "Saint-Pierre-et-Miquelon", "PM", "SPM", "666"}, - {"Saint Vincent and the Grenadines", "Saint-Vincent-et-les Grenadines", "VC", "VCT", "670"}, - {"San Marino", "Saint-Marin", "SM", "SMR", "674"}, - {"Sao Tome and Principe", "Sao Tomé-et-Principe", "ST", "STP", "678"}, - {"Saudi Arabia", "Arabie saoudite (l')", "SA", "SAU", "682"}, - {"Senegal", "Sénégal (le)", "SN", "SEN", "686"}, - {"Serbia", "Serbie (la)", "RS", "SRB", "688"}, - {"Seychelles", "Seychelles (les)", "SC", "SYC", "690"}, - {"Sierra Leone", "Sierra Leone (la)", "SL", "SLE", "694"}, - {"Singapore", "Singapour", "SG", "SGP", "702"}, - {"Slovakia", "Slovaquie (la)", "SK", "SVK", "703"}, - {"Viet Nam", "Viet Nam (le)", "VN", "VNM", "704"}, - {"Slovenia", "Slovénie (la)", "SI", "SVN", "705"}, - {"Somalia", "Somalie (la)", "SO", "SOM", "706"}, - {"South Africa", "Afrique du Sud (l')", "ZA", "ZAF", "710"}, - {"Zimbabwe", "Zimbabwe (le)", "ZW", "ZWE", "716"}, - {"Spain", "Espagne (l')", "ES", "ESP", "724"}, - {"South Sudan", "Soudan du Sud (le)", "SS", "SSD", "728"}, - {"Sudan (the)", "Soudan (le)", "SD", "SDN", "729"}, - {"Western Sahara*", "Sahara occidental (le)*", "EH", "ESH", "732"}, - {"Suriname", "Suriname (le)", "SR", "SUR", "740"}, - {"Svalbard and Jan Mayen", "Svalbard et l'Île Jan Mayen (le)", "SJ", "SJM", "744"}, - {"Swaziland", "Swaziland (le)", "SZ", "SWZ", "748"}, - {"Sweden", "Suède (la)", "SE", "SWE", "752"}, - {"Switzerland", "Suisse (la)", "CH", "CHE", "756"}, - {"Syrian Arab Republic", "République arabe syrienne (la)", "SY", "SYR", "760"}, - {"Tajikistan", "Tadjikistan (le)", "TJ", "TJK", "762"}, - {"Thailand", "Thaïlande (la)", "TH", "THA", "764"}, - {"Togo", "Togo (le)", "TG", "TGO", "768"}, - {"Tokelau", "Tokelau (les)", "TK", "TKL", "772"}, - {"Tonga", "Tonga (les)", "TO", "TON", "776"}, - {"Trinidad and Tobago", "Trinité-et-Tobago (la)", "TT", "TTO", "780"}, - {"United Arab Emirates (the)", "Émirats arabes unis (les)", "AE", "ARE", "784"}, - {"Tunisia", "Tunisie (la)", "TN", "TUN", "788"}, - {"Turkey", "Turquie (la)", "TR", "TUR", "792"}, - {"Turkmenistan", "Turkménistan (le)", "TM", "TKM", "795"}, - {"Turks and Caicos Islands (the)", "Turks-et-Caïcos (les Îles)", "TC", "TCA", "796"}, - {"Tuvalu", "Tuvalu (les)", "TV", "TUV", "798"}, - {"Uganda", "Ouganda (l')", "UG", "UGA", "800"}, - {"Ukraine", "Ukraine (l')", "UA", "UKR", "804"}, - {"Macedonia (the former Yugoslav Republic of)", "Macédoine (l'ex‑République yougoslave de)", "MK", "MKD", "807"}, - {"Egypt", "Égypte (l')", "EG", "EGY", "818"}, - {"United Kingdom of Great Britain and Northern Ireland (the)", "Royaume-Uni de Grande-Bretagne et d'Irlande du Nord (le)", "GB", "GBR", "826"}, - {"Guernsey", "Guernesey", "GG", "GGY", "831"}, - {"Jersey", "Jersey", "JE", "JEY", "832"}, - {"Isle of Man", "Île de Man", "IM", "IMN", "833"}, - {"Tanzania, United Republic of", "Tanzanie, République-Unie de", "TZ", "TZA", "834"}, - {"United States of America (the)", "États-Unis d'Amérique (les)", "US", "USA", "840"}, - {"Virgin Islands (U.S.)", "Vierges des États-Unis (les Îles)", "VI", "VIR", "850"}, - {"Burkina Faso", "Burkina Faso (le)", "BF", "BFA", "854"}, - {"Uruguay", "Uruguay (l')", "UY", "URY", "858"}, - {"Uzbekistan", "Ouzbékistan (l')", "UZ", "UZB", "860"}, - {"Venezuela (Bolivarian Republic of)", "Venezuela (République bolivarienne du)", "VE", "VEN", "862"}, - {"Wallis and Futuna", "Wallis-et-Futuna", "WF", "WLF", "876"}, - {"Samoa", "Samoa (le)", "WS", "WSM", "882"}, - {"Yemen", "Yémen (le)", "YE", "YEM", "887"}, - {"Zambia", "Zambie (la)", "ZM", "ZMB", "894"}, -} - -// ISO4217List is the list of ISO currency codes -var ISO4217List = []string{ - "AED", "AFN", "ALL", "AMD", "ANG", "AOA", "ARS", "AUD", "AWG", "AZN", - "BAM", "BBD", "BDT", "BGN", "BHD", "BIF", "BMD", "BND", "BOB", "BOV", "BRL", "BSD", "BTN", "BWP", "BYN", "BZD", - "CAD", "CDF", "CHE", "CHF", "CHW", "CLF", "CLP", "CNY", "COP", "COU", "CRC", "CUC", "CUP", "CVE", "CZK", - "DJF", "DKK", "DOP", "DZD", - "EGP", "ERN", "ETB", "EUR", - "FJD", "FKP", - "GBP", "GEL", "GHS", "GIP", "GMD", "GNF", "GTQ", "GYD", - "HKD", "HNL", "HRK", "HTG", "HUF", - "IDR", "ILS", "INR", "IQD", "IRR", "ISK", - "JMD", "JOD", "JPY", - "KES", "KGS", "KHR", "KMF", "KPW", "KRW", "KWD", "KYD", "KZT", - "LAK", "LBP", "LKR", "LRD", "LSL", "LYD", - "MAD", "MDL", "MGA", "MKD", "MMK", "MNT", "MOP", "MRO", "MUR", "MVR", "MWK", "MXN", "MXV", "MYR", "MZN", - "NAD", "NGN", "NIO", "NOK", "NPR", "NZD", - "OMR", - "PAB", "PEN", "PGK", "PHP", "PKR", "PLN", "PYG", - "QAR", - "RON", "RSD", "RUB", "RWF", - "SAR", "SBD", "SCR", "SDG", "SEK", "SGD", "SHP", "SLL", "SOS", "SRD", "SSP", "STD", "STN", "SVC", "SYP", "SZL", - "THB", "TJS", "TMT", "TND", "TOP", "TRY", "TTD", "TWD", "TZS", - "UAH", "UGX", "USD", "USN", "UYI", "UYU", "UYW", "UZS", - "VEF", "VES", "VND", "VUV", - "WST", - "XAF", "XAG", "XAU", "XBA", "XBB", "XBC", "XBD", "XCD", "XDR", "XOF", "XPD", "XPF", "XPT", "XSU", "XTS", "XUA", "XXX", - "YER", - "ZAR", "ZMW", "ZWL", -} - -// ISO693Entry stores ISO language codes -type ISO693Entry struct { - Alpha3bCode string - Alpha2Code string - English string -} - -//ISO693List based on http://data.okfn.org/data/core/language-codes/r/language-codes-3b2.json -var ISO693List = []ISO693Entry{ - {Alpha3bCode: "aar", Alpha2Code: "aa", English: "Afar"}, - {Alpha3bCode: "abk", Alpha2Code: "ab", English: "Abkhazian"}, - {Alpha3bCode: "afr", Alpha2Code: "af", English: "Afrikaans"}, - {Alpha3bCode: "aka", Alpha2Code: "ak", English: "Akan"}, - {Alpha3bCode: "alb", Alpha2Code: "sq", English: "Albanian"}, - {Alpha3bCode: "amh", Alpha2Code: "am", English: "Amharic"}, - {Alpha3bCode: "ara", Alpha2Code: "ar", English: "Arabic"}, - {Alpha3bCode: "arg", Alpha2Code: "an", English: "Aragonese"}, - {Alpha3bCode: "arm", Alpha2Code: "hy", English: "Armenian"}, - {Alpha3bCode: "asm", Alpha2Code: "as", English: "Assamese"}, - {Alpha3bCode: "ava", Alpha2Code: "av", English: "Avaric"}, - {Alpha3bCode: "ave", Alpha2Code: "ae", English: "Avestan"}, - {Alpha3bCode: "aym", Alpha2Code: "ay", English: "Aymara"}, - {Alpha3bCode: "aze", Alpha2Code: "az", English: "Azerbaijani"}, - {Alpha3bCode: "bak", Alpha2Code: "ba", English: "Bashkir"}, - {Alpha3bCode: "bam", Alpha2Code: "bm", English: "Bambara"}, - {Alpha3bCode: "baq", Alpha2Code: "eu", English: "Basque"}, - {Alpha3bCode: "bel", Alpha2Code: "be", English: "Belarusian"}, - {Alpha3bCode: "ben", Alpha2Code: "bn", English: "Bengali"}, - {Alpha3bCode: "bih", Alpha2Code: "bh", English: "Bihari languages"}, - {Alpha3bCode: "bis", Alpha2Code: "bi", English: "Bislama"}, - {Alpha3bCode: "bos", Alpha2Code: "bs", English: "Bosnian"}, - {Alpha3bCode: "bre", Alpha2Code: "br", English: "Breton"}, - {Alpha3bCode: "bul", Alpha2Code: "bg", English: "Bulgarian"}, - {Alpha3bCode: "bur", Alpha2Code: "my", English: "Burmese"}, - {Alpha3bCode: "cat", Alpha2Code: "ca", English: "Catalan; Valencian"}, - {Alpha3bCode: "cha", Alpha2Code: "ch", English: "Chamorro"}, - {Alpha3bCode: "che", Alpha2Code: "ce", English: "Chechen"}, - {Alpha3bCode: "chi", Alpha2Code: "zh", English: "Chinese"}, - {Alpha3bCode: "chu", Alpha2Code: "cu", English: "Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic"}, - {Alpha3bCode: "chv", Alpha2Code: "cv", English: "Chuvash"}, - {Alpha3bCode: "cor", Alpha2Code: "kw", English: "Cornish"}, - {Alpha3bCode: "cos", Alpha2Code: "co", English: "Corsican"}, - {Alpha3bCode: "cre", Alpha2Code: "cr", English: "Cree"}, - {Alpha3bCode: "cze", Alpha2Code: "cs", English: "Czech"}, - {Alpha3bCode: "dan", Alpha2Code: "da", English: "Danish"}, - {Alpha3bCode: "div", Alpha2Code: "dv", English: "Divehi; Dhivehi; Maldivian"}, - {Alpha3bCode: "dut", Alpha2Code: "nl", English: "Dutch; Flemish"}, - {Alpha3bCode: "dzo", Alpha2Code: "dz", English: "Dzongkha"}, - {Alpha3bCode: "eng", Alpha2Code: "en", English: "English"}, - {Alpha3bCode: "epo", Alpha2Code: "eo", English: "Esperanto"}, - {Alpha3bCode: "est", Alpha2Code: "et", English: "Estonian"}, - {Alpha3bCode: "ewe", Alpha2Code: "ee", English: "Ewe"}, - {Alpha3bCode: "fao", Alpha2Code: "fo", English: "Faroese"}, - {Alpha3bCode: "fij", Alpha2Code: "fj", English: "Fijian"}, - {Alpha3bCode: "fin", Alpha2Code: "fi", English: "Finnish"}, - {Alpha3bCode: "fre", Alpha2Code: "fr", English: "French"}, - {Alpha3bCode: "fry", Alpha2Code: "fy", English: "Western Frisian"}, - {Alpha3bCode: "ful", Alpha2Code: "ff", English: "Fulah"}, - {Alpha3bCode: "geo", Alpha2Code: "ka", English: "Georgian"}, - {Alpha3bCode: "ger", Alpha2Code: "de", English: "German"}, - {Alpha3bCode: "gla", Alpha2Code: "gd", English: "Gaelic; Scottish Gaelic"}, - {Alpha3bCode: "gle", Alpha2Code: "ga", English: "Irish"}, - {Alpha3bCode: "glg", Alpha2Code: "gl", English: "Galician"}, - {Alpha3bCode: "glv", Alpha2Code: "gv", English: "Manx"}, - {Alpha3bCode: "gre", Alpha2Code: "el", English: "Greek, Modern (1453-)"}, - {Alpha3bCode: "grn", Alpha2Code: "gn", English: "Guarani"}, - {Alpha3bCode: "guj", Alpha2Code: "gu", English: "Gujarati"}, - {Alpha3bCode: "hat", Alpha2Code: "ht", English: "Haitian; Haitian Creole"}, - {Alpha3bCode: "hau", Alpha2Code: "ha", English: "Hausa"}, - {Alpha3bCode: "heb", Alpha2Code: "he", English: "Hebrew"}, - {Alpha3bCode: "her", Alpha2Code: "hz", English: "Herero"}, - {Alpha3bCode: "hin", Alpha2Code: "hi", English: "Hindi"}, - {Alpha3bCode: "hmo", Alpha2Code: "ho", English: "Hiri Motu"}, - {Alpha3bCode: "hrv", Alpha2Code: "hr", English: "Croatian"}, - {Alpha3bCode: "hun", Alpha2Code: "hu", English: "Hungarian"}, - {Alpha3bCode: "ibo", Alpha2Code: "ig", English: "Igbo"}, - {Alpha3bCode: "ice", Alpha2Code: "is", English: "Icelandic"}, - {Alpha3bCode: "ido", Alpha2Code: "io", English: "Ido"}, - {Alpha3bCode: "iii", Alpha2Code: "ii", English: "Sichuan Yi; Nuosu"}, - {Alpha3bCode: "iku", Alpha2Code: "iu", English: "Inuktitut"}, - {Alpha3bCode: "ile", Alpha2Code: "ie", English: "Interlingue; Occidental"}, - {Alpha3bCode: "ina", Alpha2Code: "ia", English: "Interlingua (International Auxiliary Language Association)"}, - {Alpha3bCode: "ind", Alpha2Code: "id", English: "Indonesian"}, - {Alpha3bCode: "ipk", Alpha2Code: "ik", English: "Inupiaq"}, - {Alpha3bCode: "ita", Alpha2Code: "it", English: "Italian"}, - {Alpha3bCode: "jav", Alpha2Code: "jv", English: "Javanese"}, - {Alpha3bCode: "jpn", Alpha2Code: "ja", English: "Japanese"}, - {Alpha3bCode: "kal", Alpha2Code: "kl", English: "Kalaallisut; Greenlandic"}, - {Alpha3bCode: "kan", Alpha2Code: "kn", English: "Kannada"}, - {Alpha3bCode: "kas", Alpha2Code: "ks", English: "Kashmiri"}, - {Alpha3bCode: "kau", Alpha2Code: "kr", English: "Kanuri"}, - {Alpha3bCode: "kaz", Alpha2Code: "kk", English: "Kazakh"}, - {Alpha3bCode: "khm", Alpha2Code: "km", English: "Central Khmer"}, - {Alpha3bCode: "kik", Alpha2Code: "ki", English: "Kikuyu; Gikuyu"}, - {Alpha3bCode: "kin", Alpha2Code: "rw", English: "Kinyarwanda"}, - {Alpha3bCode: "kir", Alpha2Code: "ky", English: "Kirghiz; Kyrgyz"}, - {Alpha3bCode: "kom", Alpha2Code: "kv", English: "Komi"}, - {Alpha3bCode: "kon", Alpha2Code: "kg", English: "Kongo"}, - {Alpha3bCode: "kor", Alpha2Code: "ko", English: "Korean"}, - {Alpha3bCode: "kua", Alpha2Code: "kj", English: "Kuanyama; Kwanyama"}, - {Alpha3bCode: "kur", Alpha2Code: "ku", English: "Kurdish"}, - {Alpha3bCode: "lao", Alpha2Code: "lo", English: "Lao"}, - {Alpha3bCode: "lat", Alpha2Code: "la", English: "Latin"}, - {Alpha3bCode: "lav", Alpha2Code: "lv", English: "Latvian"}, - {Alpha3bCode: "lim", Alpha2Code: "li", English: "Limburgan; Limburger; Limburgish"}, - {Alpha3bCode: "lin", Alpha2Code: "ln", English: "Lingala"}, - {Alpha3bCode: "lit", Alpha2Code: "lt", English: "Lithuanian"}, - {Alpha3bCode: "ltz", Alpha2Code: "lb", English: "Luxembourgish; Letzeburgesch"}, - {Alpha3bCode: "lub", Alpha2Code: "lu", English: "Luba-Katanga"}, - {Alpha3bCode: "lug", Alpha2Code: "lg", English: "Ganda"}, - {Alpha3bCode: "mac", Alpha2Code: "mk", English: "Macedonian"}, - {Alpha3bCode: "mah", Alpha2Code: "mh", English: "Marshallese"}, - {Alpha3bCode: "mal", Alpha2Code: "ml", English: "Malayalam"}, - {Alpha3bCode: "mao", Alpha2Code: "mi", English: "Maori"}, - {Alpha3bCode: "mar", Alpha2Code: "mr", English: "Marathi"}, - {Alpha3bCode: "may", Alpha2Code: "ms", English: "Malay"}, - {Alpha3bCode: "mlg", Alpha2Code: "mg", English: "Malagasy"}, - {Alpha3bCode: "mlt", Alpha2Code: "mt", English: "Maltese"}, - {Alpha3bCode: "mon", Alpha2Code: "mn", English: "Mongolian"}, - {Alpha3bCode: "nau", Alpha2Code: "na", English: "Nauru"}, - {Alpha3bCode: "nav", Alpha2Code: "nv", English: "Navajo; Navaho"}, - {Alpha3bCode: "nbl", Alpha2Code: "nr", English: "Ndebele, South; South Ndebele"}, - {Alpha3bCode: "nde", Alpha2Code: "nd", English: "Ndebele, North; North Ndebele"}, - {Alpha3bCode: "ndo", Alpha2Code: "ng", English: "Ndonga"}, - {Alpha3bCode: "nep", Alpha2Code: "ne", English: "Nepali"}, - {Alpha3bCode: "nno", Alpha2Code: "nn", English: "Norwegian Nynorsk; Nynorsk, Norwegian"}, - {Alpha3bCode: "nob", Alpha2Code: "nb", English: "Bokmål, Norwegian; Norwegian Bokmål"}, - {Alpha3bCode: "nor", Alpha2Code: "no", English: "Norwegian"}, - {Alpha3bCode: "nya", Alpha2Code: "ny", English: "Chichewa; Chewa; Nyanja"}, - {Alpha3bCode: "oci", Alpha2Code: "oc", English: "Occitan (post 1500); Provençal"}, - {Alpha3bCode: "oji", Alpha2Code: "oj", English: "Ojibwa"}, - {Alpha3bCode: "ori", Alpha2Code: "or", English: "Oriya"}, - {Alpha3bCode: "orm", Alpha2Code: "om", English: "Oromo"}, - {Alpha3bCode: "oss", Alpha2Code: "os", English: "Ossetian; Ossetic"}, - {Alpha3bCode: "pan", Alpha2Code: "pa", English: "Panjabi; Punjabi"}, - {Alpha3bCode: "per", Alpha2Code: "fa", English: "Persian"}, - {Alpha3bCode: "pli", Alpha2Code: "pi", English: "Pali"}, - {Alpha3bCode: "pol", Alpha2Code: "pl", English: "Polish"}, - {Alpha3bCode: "por", Alpha2Code: "pt", English: "Portuguese"}, - {Alpha3bCode: "pus", Alpha2Code: "ps", English: "Pushto; Pashto"}, - {Alpha3bCode: "que", Alpha2Code: "qu", English: "Quechua"}, - {Alpha3bCode: "roh", Alpha2Code: "rm", English: "Romansh"}, - {Alpha3bCode: "rum", Alpha2Code: "ro", English: "Romanian; Moldavian; Moldovan"}, - {Alpha3bCode: "run", Alpha2Code: "rn", English: "Rundi"}, - {Alpha3bCode: "rus", Alpha2Code: "ru", English: "Russian"}, - {Alpha3bCode: "sag", Alpha2Code: "sg", English: "Sango"}, - {Alpha3bCode: "san", Alpha2Code: "sa", English: "Sanskrit"}, - {Alpha3bCode: "sin", Alpha2Code: "si", English: "Sinhala; Sinhalese"}, - {Alpha3bCode: "slo", Alpha2Code: "sk", English: "Slovak"}, - {Alpha3bCode: "slv", Alpha2Code: "sl", English: "Slovenian"}, - {Alpha3bCode: "sme", Alpha2Code: "se", English: "Northern Sami"}, - {Alpha3bCode: "smo", Alpha2Code: "sm", English: "Samoan"}, - {Alpha3bCode: "sna", Alpha2Code: "sn", English: "Shona"}, - {Alpha3bCode: "snd", Alpha2Code: "sd", English: "Sindhi"}, - {Alpha3bCode: "som", Alpha2Code: "so", English: "Somali"}, - {Alpha3bCode: "sot", Alpha2Code: "st", English: "Sotho, Southern"}, - {Alpha3bCode: "spa", Alpha2Code: "es", English: "Spanish; Castilian"}, - {Alpha3bCode: "srd", Alpha2Code: "sc", English: "Sardinian"}, - {Alpha3bCode: "srp", Alpha2Code: "sr", English: "Serbian"}, - {Alpha3bCode: "ssw", Alpha2Code: "ss", English: "Swati"}, - {Alpha3bCode: "sun", Alpha2Code: "su", English: "Sundanese"}, - {Alpha3bCode: "swa", Alpha2Code: "sw", English: "Swahili"}, - {Alpha3bCode: "swe", Alpha2Code: "sv", English: "Swedish"}, - {Alpha3bCode: "tah", Alpha2Code: "ty", English: "Tahitian"}, - {Alpha3bCode: "tam", Alpha2Code: "ta", English: "Tamil"}, - {Alpha3bCode: "tat", Alpha2Code: "tt", English: "Tatar"}, - {Alpha3bCode: "tel", Alpha2Code: "te", English: "Telugu"}, - {Alpha3bCode: "tgk", Alpha2Code: "tg", English: "Tajik"}, - {Alpha3bCode: "tgl", Alpha2Code: "tl", English: "Tagalog"}, - {Alpha3bCode: "tha", Alpha2Code: "th", English: "Thai"}, - {Alpha3bCode: "tib", Alpha2Code: "bo", English: "Tibetan"}, - {Alpha3bCode: "tir", Alpha2Code: "ti", English: "Tigrinya"}, - {Alpha3bCode: "ton", Alpha2Code: "to", English: "Tonga (Tonga Islands)"}, - {Alpha3bCode: "tsn", Alpha2Code: "tn", English: "Tswana"}, - {Alpha3bCode: "tso", Alpha2Code: "ts", English: "Tsonga"}, - {Alpha3bCode: "tuk", Alpha2Code: "tk", English: "Turkmen"}, - {Alpha3bCode: "tur", Alpha2Code: "tr", English: "Turkish"}, - {Alpha3bCode: "twi", Alpha2Code: "tw", English: "Twi"}, - {Alpha3bCode: "uig", Alpha2Code: "ug", English: "Uighur; Uyghur"}, - {Alpha3bCode: "ukr", Alpha2Code: "uk", English: "Ukrainian"}, - {Alpha3bCode: "urd", Alpha2Code: "ur", English: "Urdu"}, - {Alpha3bCode: "uzb", Alpha2Code: "uz", English: "Uzbek"}, - {Alpha3bCode: "ven", Alpha2Code: "ve", English: "Venda"}, - {Alpha3bCode: "vie", Alpha2Code: "vi", English: "Vietnamese"}, - {Alpha3bCode: "vol", Alpha2Code: "vo", English: "Volapük"}, - {Alpha3bCode: "wel", Alpha2Code: "cy", English: "Welsh"}, - {Alpha3bCode: "wln", Alpha2Code: "wa", English: "Walloon"}, - {Alpha3bCode: "wol", Alpha2Code: "wo", English: "Wolof"}, - {Alpha3bCode: "xho", Alpha2Code: "xh", English: "Xhosa"}, - {Alpha3bCode: "yid", Alpha2Code: "yi", English: "Yiddish"}, - {Alpha3bCode: "yor", Alpha2Code: "yo", English: "Yoruba"}, - {Alpha3bCode: "zha", Alpha2Code: "za", English: "Zhuang; Chuang"}, - {Alpha3bCode: "zul", Alpha2Code: "zu", English: "Zulu"}, -} diff --git a/vendor/github.com/asaskevich/govalidator/utils.go b/vendor/github.com/asaskevich/govalidator/utils.go deleted file mode 100644 index f4c30f824..000000000 --- a/vendor/github.com/asaskevich/govalidator/utils.go +++ /dev/null @@ -1,270 +0,0 @@ -package govalidator - -import ( - "errors" - "fmt" - "html" - "math" - "path" - "regexp" - "strings" - "unicode" - "unicode/utf8" -) - -// Contains checks if the string contains the substring. -func Contains(str, substring string) bool { - return strings.Contains(str, substring) -} - -// Matches checks if string matches the pattern (pattern is regular expression) -// In case of error return false -func Matches(str, pattern string) bool { - match, _ := regexp.MatchString(pattern, str) - return match -} - -// LeftTrim trims characters from the left side of the input. -// If second argument is empty, it will remove leading spaces. -func LeftTrim(str, chars string) string { - if chars == "" { - return strings.TrimLeftFunc(str, unicode.IsSpace) - } - r, _ := regexp.Compile("^[" + chars + "]+") - return r.ReplaceAllString(str, "") -} - -// RightTrim trims characters from the right side of the input. -// If second argument is empty, it will remove trailing spaces. -func RightTrim(str, chars string) string { - if chars == "" { - return strings.TrimRightFunc(str, unicode.IsSpace) - } - r, _ := regexp.Compile("[" + chars + "]+$") - return r.ReplaceAllString(str, "") -} - -// Trim trims characters from both sides of the input. -// If second argument is empty, it will remove spaces. -func Trim(str, chars string) string { - return LeftTrim(RightTrim(str, chars), chars) -} - -// WhiteList removes characters that do not appear in the whitelist. -func WhiteList(str, chars string) string { - pattern := "[^" + chars + "]+" - r, _ := regexp.Compile(pattern) - return r.ReplaceAllString(str, "") -} - -// BlackList removes characters that appear in the blacklist. -func BlackList(str, chars string) string { - pattern := "[" + chars + "]+" - r, _ := regexp.Compile(pattern) - return r.ReplaceAllString(str, "") -} - -// StripLow removes characters with a numerical value < 32 and 127, mostly control characters. -// If keep_new_lines is true, newline characters are preserved (\n and \r, hex 0xA and 0xD). -func StripLow(str string, keepNewLines bool) string { - chars := "" - if keepNewLines { - chars = "\x00-\x09\x0B\x0C\x0E-\x1F\x7F" - } else { - chars = "\x00-\x1F\x7F" - } - return BlackList(str, chars) -} - -// ReplacePattern replaces regular expression pattern in string -func ReplacePattern(str, pattern, replace string) string { - r, _ := regexp.Compile(pattern) - return r.ReplaceAllString(str, replace) -} - -// Escape replaces <, >, & and " with HTML entities. -var Escape = html.EscapeString - -func addSegment(inrune, segment []rune) []rune { - if len(segment) == 0 { - return inrune - } - if len(inrune) != 0 { - inrune = append(inrune, '_') - } - inrune = append(inrune, segment...) - return inrune -} - -// UnderscoreToCamelCase converts from underscore separated form to camel case form. -// Ex.: my_func => MyFunc -func UnderscoreToCamelCase(s string) string { - return strings.Replace(strings.Title(strings.Replace(strings.ToLower(s), "_", " ", -1)), " ", "", -1) -} - -// CamelCaseToUnderscore converts from camel case form to underscore separated form. -// Ex.: MyFunc => my_func -func CamelCaseToUnderscore(str string) string { - var output []rune - var segment []rune - for _, r := range str { - - // not treat number as separate segment - if !unicode.IsLower(r) && string(r) != "_" && !unicode.IsNumber(r) { - output = addSegment(output, segment) - segment = nil - } - segment = append(segment, unicode.ToLower(r)) - } - output = addSegment(output, segment) - return string(output) -} - -// Reverse returns reversed string -func Reverse(s string) string { - r := []rune(s) - for i, j := 0, len(r)-1; i < j; i, j = i+1, j-1 { - r[i], r[j] = r[j], r[i] - } - return string(r) -} - -// GetLines splits string by "\n" and return array of lines -func GetLines(s string) []string { - return strings.Split(s, "\n") -} - -// GetLine returns specified line of multiline string -func GetLine(s string, index int) (string, error) { - lines := GetLines(s) - if index < 0 || index >= len(lines) { - return "", errors.New("line index out of bounds") - } - return lines[index], nil -} - -// RemoveTags removes all tags from HTML string -func RemoveTags(s string) string { - return ReplacePattern(s, "<[^>]*>", "") -} - -// SafeFileName returns safe string that can be used in file names -func SafeFileName(str string) string { - name := strings.ToLower(str) - name = path.Clean(path.Base(name)) - name = strings.Trim(name, " ") - separators, err := regexp.Compile(`[ &_=+:]`) - if err == nil { - name = separators.ReplaceAllString(name, "-") - } - legal, err := regexp.Compile(`[^[:alnum:]-.]`) - if err == nil { - name = legal.ReplaceAllString(name, "") - } - for strings.Contains(name, "--") { - name = strings.Replace(name, "--", "-", -1) - } - return name -} - -// NormalizeEmail canonicalize an email address. -// The local part of the email address is lowercased for all domains; the hostname is always lowercased and -// the local part of the email address is always lowercased for hosts that are known to be case-insensitive (currently only GMail). -// Normalization follows special rules for known providers: currently, GMail addresses have dots removed in the local part and -// are stripped of tags (e.g. some.one+tag@gmail.com becomes someone@gmail.com) and all @googlemail.com addresses are -// normalized to @gmail.com. -func NormalizeEmail(str string) (string, error) { - if !IsEmail(str) { - return "", fmt.Errorf("%s is not an email", str) - } - parts := strings.Split(str, "@") - parts[0] = strings.ToLower(parts[0]) - parts[1] = strings.ToLower(parts[1]) - if parts[1] == "gmail.com" || parts[1] == "googlemail.com" { - parts[1] = "gmail.com" - parts[0] = strings.Split(ReplacePattern(parts[0], `\.`, ""), "+")[0] - } - return strings.Join(parts, "@"), nil -} - -// Truncate a string to the closest length without breaking words. -func Truncate(str string, length int, ending string) string { - var aftstr, befstr string - if len(str) > length { - words := strings.Fields(str) - before, present := 0, 0 - for i := range words { - befstr = aftstr - before = present - aftstr = aftstr + words[i] + " " - present = len(aftstr) - if present > length && i != 0 { - if (length - before) < (present - length) { - return Trim(befstr, " /\\.,\"'#!?&@+-") + ending - } - return Trim(aftstr, " /\\.,\"'#!?&@+-") + ending - } - } - } - - return str -} - -// PadLeft pads left side of a string if size of string is less then indicated pad length -func PadLeft(str string, padStr string, padLen int) string { - return buildPadStr(str, padStr, padLen, true, false) -} - -// PadRight pads right side of a string if size of string is less then indicated pad length -func PadRight(str string, padStr string, padLen int) string { - return buildPadStr(str, padStr, padLen, false, true) -} - -// PadBoth pads both sides of a string if size of string is less then indicated pad length -func PadBoth(str string, padStr string, padLen int) string { - return buildPadStr(str, padStr, padLen, true, true) -} - -// PadString either left, right or both sides. -// Note that padding string can be unicode and more then one character -func buildPadStr(str string, padStr string, padLen int, padLeft bool, padRight bool) string { - - // When padded length is less then the current string size - if padLen < utf8.RuneCountInString(str) { - return str - } - - padLen -= utf8.RuneCountInString(str) - - targetLen := padLen - - targetLenLeft := targetLen - targetLenRight := targetLen - if padLeft && padRight { - targetLenLeft = padLen / 2 - targetLenRight = padLen - targetLenLeft - } - - strToRepeatLen := utf8.RuneCountInString(padStr) - - repeatTimes := int(math.Ceil(float64(targetLen) / float64(strToRepeatLen))) - repeatedString := strings.Repeat(padStr, repeatTimes) - - leftSide := "" - if padLeft { - leftSide = repeatedString[0:targetLenLeft] - } - - rightSide := "" - if padRight { - rightSide = repeatedString[0:targetLenRight] - } - - return leftSide + str + rightSide -} - -// TruncatingErrorf removes extra args from fmt.Errorf if not formatted in the str object -func TruncatingErrorf(str string, args ...interface{}) error { - n := strings.Count(str, "%s") - return fmt.Errorf(str, args[:n]...) -} diff --git a/vendor/github.com/asaskevich/govalidator/validator.go b/vendor/github.com/asaskevich/govalidator/validator.go deleted file mode 100644 index c9c4fac06..000000000 --- a/vendor/github.com/asaskevich/govalidator/validator.go +++ /dev/null @@ -1,1768 +0,0 @@ -// Package govalidator is package of validators and sanitizers for strings, structs and collections. -package govalidator - -import ( - "bytes" - "crypto/rsa" - "crypto/x509" - "encoding/base64" - "encoding/json" - "encoding/pem" - "fmt" - "io/ioutil" - "net" - "net/url" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "time" - "unicode" - "unicode/utf8" -) - -var ( - fieldsRequiredByDefault bool - nilPtrAllowedByRequired = false - notNumberRegexp = regexp.MustCompile("[^0-9]+") - whiteSpacesAndMinus = regexp.MustCompile(`[\s-]+`) - paramsRegexp = regexp.MustCompile(`\(.*\)$`) -) - -const maxURLRuneCount = 2083 -const minURLRuneCount = 3 -const rfc3339WithoutZone = "2006-01-02T15:04:05" - -// SetFieldsRequiredByDefault causes validation to fail when struct fields -// do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). -// This struct definition will fail govalidator.ValidateStruct() (and the field values do not matter): -// type exampleStruct struct { -// Name string `` -// Email string `valid:"email"` -// This, however, will only fail when Email is empty or an invalid email address: -// type exampleStruct2 struct { -// Name string `valid:"-"` -// Email string `valid:"email"` -// Lastly, this will only fail when Email is an invalid email address but not when it's empty: -// type exampleStruct2 struct { -// Name string `valid:"-"` -// Email string `valid:"email,optional"` -func SetFieldsRequiredByDefault(value bool) { - fieldsRequiredByDefault = value -} - -// SetNilPtrAllowedByRequired causes validation to pass for nil ptrs when a field is set to required. -// The validation will still reject ptr fields in their zero value state. Example with this enabled: -// type exampleStruct struct { -// Name *string `valid:"required"` -// With `Name` set to "", this will be considered invalid input and will cause a validation error. -// With `Name` set to nil, this will be considered valid by validation. -// By default this is disabled. -func SetNilPtrAllowedByRequired(value bool) { - nilPtrAllowedByRequired = value -} - -// IsEmail checks if the string is an email. -func IsEmail(str string) bool { - // TODO uppercase letters are not supported - return rxEmail.MatchString(str) -} - -// IsExistingEmail checks if the string is an email of existing domain -func IsExistingEmail(email string) bool { - - if len(email) < 6 || len(email) > 254 { - return false - } - at := strings.LastIndex(email, "@") - if at <= 0 || at > len(email)-3 { - return false - } - user := email[:at] - host := email[at+1:] - if len(user) > 64 { - return false - } - switch host { - case "localhost", "example.com": - return true - } - if userDotRegexp.MatchString(user) || !userRegexp.MatchString(user) || !hostRegexp.MatchString(host) { - return false - } - if _, err := net.LookupMX(host); err != nil { - if _, err := net.LookupIP(host); err != nil { - return false - } - } - - return true -} - -// IsURL checks if the string is an URL. -func IsURL(str string) bool { - if str == "" || utf8.RuneCountInString(str) >= maxURLRuneCount || len(str) <= minURLRuneCount || strings.HasPrefix(str, ".") { - return false - } - strTemp := str - if strings.Contains(str, ":") && !strings.Contains(str, "://") { - // support no indicated urlscheme but with colon for port number - // http:// is appended so url.Parse will succeed, strTemp used so it does not impact rxURL.MatchString - strTemp = "http://" + str - } - u, err := url.Parse(strTemp) - if err != nil { - return false - } - if strings.HasPrefix(u.Host, ".") { - return false - } - if u.Host == "" && (u.Path != "" && !strings.Contains(u.Path, ".")) { - return false - } - return rxURL.MatchString(str) -} - -// IsRequestURL checks if the string rawurl, assuming -// it was received in an HTTP request, is a valid -// URL confirm to RFC 3986 -func IsRequestURL(rawurl string) bool { - url, err := url.ParseRequestURI(rawurl) - if err != nil { - return false //Couldn't even parse the rawurl - } - if len(url.Scheme) == 0 { - return false //No Scheme found - } - return true -} - -// IsRequestURI checks if the string rawurl, assuming -// it was received in an HTTP request, is an -// absolute URI or an absolute path. -func IsRequestURI(rawurl string) bool { - _, err := url.ParseRequestURI(rawurl) - return err == nil -} - -// IsAlpha checks if the string contains only letters (a-zA-Z). Empty string is valid. -func IsAlpha(str string) bool { - if IsNull(str) { - return true - } - return rxAlpha.MatchString(str) -} - -//IsUTFLetter checks if the string contains only unicode letter characters. -//Similar to IsAlpha but for all languages. Empty string is valid. -func IsUTFLetter(str string) bool { - if IsNull(str) { - return true - } - - for _, c := range str { - if !unicode.IsLetter(c) { - return false - } - } - return true - -} - -// IsAlphanumeric checks if the string contains only letters and numbers. Empty string is valid. -func IsAlphanumeric(str string) bool { - if IsNull(str) { - return true - } - return rxAlphanumeric.MatchString(str) -} - -// IsUTFLetterNumeric checks if the string contains only unicode letters and numbers. Empty string is valid. -func IsUTFLetterNumeric(str string) bool { - if IsNull(str) { - return true - } - for _, c := range str { - if !unicode.IsLetter(c) && !unicode.IsNumber(c) { //letters && numbers are ok - return false - } - } - return true - -} - -// IsNumeric checks if the string contains only numbers. Empty string is valid. -func IsNumeric(str string) bool { - if IsNull(str) { - return true - } - return rxNumeric.MatchString(str) -} - -// IsUTFNumeric checks if the string contains only unicode numbers of any kind. -// Numbers can be 0-9 but also Fractions ¾,Roman Ⅸ and Hangzhou 〩. Empty string is valid. -func IsUTFNumeric(str string) bool { - if IsNull(str) { - return true - } - if strings.IndexAny(str, "+-") > 0 { - return false - } - if len(str) > 1 { - str = strings.TrimPrefix(str, "-") - str = strings.TrimPrefix(str, "+") - } - for _, c := range str { - if !unicode.IsNumber(c) { //numbers && minus sign are ok - return false - } - } - return true - -} - -// IsUTFDigit checks if the string contains only unicode radix-10 decimal digits. Empty string is valid. -func IsUTFDigit(str string) bool { - if IsNull(str) { - return true - } - if strings.IndexAny(str, "+-") > 0 { - return false - } - if len(str) > 1 { - str = strings.TrimPrefix(str, "-") - str = strings.TrimPrefix(str, "+") - } - for _, c := range str { - if !unicode.IsDigit(c) { //digits && minus sign are ok - return false - } - } - return true - -} - -// IsHexadecimal checks if the string is a hexadecimal number. -func IsHexadecimal(str string) bool { - return rxHexadecimal.MatchString(str) -} - -// IsHexcolor checks if the string is a hexadecimal color. -func IsHexcolor(str string) bool { - return rxHexcolor.MatchString(str) -} - -// IsRGBcolor checks if the string is a valid RGB color in form rgb(RRR, GGG, BBB). -func IsRGBcolor(str string) bool { - return rxRGBcolor.MatchString(str) -} - -// IsLowerCase checks if the string is lowercase. Empty string is valid. -func IsLowerCase(str string) bool { - if IsNull(str) { - return true - } - return str == strings.ToLower(str) -} - -// IsUpperCase checks if the string is uppercase. Empty string is valid. -func IsUpperCase(str string) bool { - if IsNull(str) { - return true - } - return str == strings.ToUpper(str) -} - -// HasLowerCase checks if the string contains at least 1 lowercase. Empty string is valid. -func HasLowerCase(str string) bool { - if IsNull(str) { - return true - } - return rxHasLowerCase.MatchString(str) -} - -// HasUpperCase checks if the string contains as least 1 uppercase. Empty string is valid. -func HasUpperCase(str string) bool { - if IsNull(str) { - return true - } - return rxHasUpperCase.MatchString(str) -} - -// IsInt checks if the string is an integer. Empty string is valid. -func IsInt(str string) bool { - if IsNull(str) { - return true - } - return rxInt.MatchString(str) -} - -// IsFloat checks if the string is a float. -func IsFloat(str string) bool { - return str != "" && rxFloat.MatchString(str) -} - -// IsDivisibleBy checks if the string is a number that's divisible by another. -// If second argument is not valid integer or zero, it's return false. -// Otherwise, if first argument is not valid integer or zero, it's return true (Invalid string converts to zero). -func IsDivisibleBy(str, num string) bool { - f, _ := ToFloat(str) - p := int64(f) - q, _ := ToInt(num) - if q == 0 { - return false - } - return (p == 0) || (p%q == 0) -} - -// IsNull checks if the string is null. -func IsNull(str string) bool { - return len(str) == 0 -} - -// IsNotNull checks if the string is not null. -func IsNotNull(str string) bool { - return !IsNull(str) -} - -// HasWhitespaceOnly checks the string only contains whitespace -func HasWhitespaceOnly(str string) bool { - return len(str) > 0 && rxHasWhitespaceOnly.MatchString(str) -} - -// HasWhitespace checks if the string contains any whitespace -func HasWhitespace(str string) bool { - return len(str) > 0 && rxHasWhitespace.MatchString(str) -} - -// IsByteLength checks if the string's length (in bytes) falls in a range. -func IsByteLength(str string, min, max int) bool { - return len(str) >= min && len(str) <= max -} - -// IsUUIDv3 checks if the string is a UUID version 3. -func IsUUIDv3(str string) bool { - return rxUUID3.MatchString(str) -} - -// IsUUIDv4 checks if the string is a UUID version 4. -func IsUUIDv4(str string) bool { - return rxUUID4.MatchString(str) -} - -// IsUUIDv5 checks if the string is a UUID version 5. -func IsUUIDv5(str string) bool { - return rxUUID5.MatchString(str) -} - -// IsUUID checks if the string is a UUID (version 3, 4 or 5). -func IsUUID(str string) bool { - return rxUUID.MatchString(str) -} - -// Byte to index table for O(1) lookups when unmarshaling. -// We use 0xFF as sentinel value for invalid indexes. -var ulidDec = [...]byte{ - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x01, - 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, - 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, 0x15, 0xFF, - 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, 0x1D, 0x1E, - 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, - 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, - 0x15, 0xFF, 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, - 0x1D, 0x1E, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, -} - -// EncodedSize is the length of a text encoded ULID. -const ulidEncodedSize = 26 - -// IsULID checks if the string is a ULID. -// -// Implementation got from: -// https://github.com/oklog/ulid (Apache-2.0 License) -// -func IsULID(str string) bool { - // Check if a base32 encoded ULID is the right length. - if len(str) != ulidEncodedSize { - return false - } - - // Check if all the characters in a base32 encoded ULID are part of the - // expected base32 character set. - if ulidDec[str[0]] == 0xFF || - ulidDec[str[1]] == 0xFF || - ulidDec[str[2]] == 0xFF || - ulidDec[str[3]] == 0xFF || - ulidDec[str[4]] == 0xFF || - ulidDec[str[5]] == 0xFF || - ulidDec[str[6]] == 0xFF || - ulidDec[str[7]] == 0xFF || - ulidDec[str[8]] == 0xFF || - ulidDec[str[9]] == 0xFF || - ulidDec[str[10]] == 0xFF || - ulidDec[str[11]] == 0xFF || - ulidDec[str[12]] == 0xFF || - ulidDec[str[13]] == 0xFF || - ulidDec[str[14]] == 0xFF || - ulidDec[str[15]] == 0xFF || - ulidDec[str[16]] == 0xFF || - ulidDec[str[17]] == 0xFF || - ulidDec[str[18]] == 0xFF || - ulidDec[str[19]] == 0xFF || - ulidDec[str[20]] == 0xFF || - ulidDec[str[21]] == 0xFF || - ulidDec[str[22]] == 0xFF || - ulidDec[str[23]] == 0xFF || - ulidDec[str[24]] == 0xFF || - ulidDec[str[25]] == 0xFF { - return false - } - - // Check if the first character in a base32 encoded ULID will overflow. This - // happens because the base32 representation encodes 130 bits, while the - // ULID is only 128 bits. - // - // See https://github.com/oklog/ulid/issues/9 for details. - if str[0] > '7' { - return false - } - return true -} - -// IsCreditCard checks if the string is a credit card. -func IsCreditCard(str string) bool { - sanitized := whiteSpacesAndMinus.ReplaceAllString(str, "") - if !rxCreditCard.MatchString(sanitized) { - return false - } - - number, _ := ToInt(sanitized) - number, lastDigit := number / 10, number % 10 - - var sum int64 - for i:=0; number > 0; i++ { - digit := number % 10 - - if i % 2 == 0 { - digit *= 2 - if digit > 9 { - digit -= 9 - } - } - - sum += digit - number = number / 10 - } - - return (sum + lastDigit) % 10 == 0 -} - -// IsISBN10 checks if the string is an ISBN version 10. -func IsISBN10(str string) bool { - return IsISBN(str, 10) -} - -// IsISBN13 checks if the string is an ISBN version 13. -func IsISBN13(str string) bool { - return IsISBN(str, 13) -} - -// IsISBN checks if the string is an ISBN (version 10 or 13). -// If version value is not equal to 10 or 13, it will be checks both variants. -func IsISBN(str string, version int) bool { - sanitized := whiteSpacesAndMinus.ReplaceAllString(str, "") - var checksum int32 - var i int32 - if version == 10 { - if !rxISBN10.MatchString(sanitized) { - return false - } - for i = 0; i < 9; i++ { - checksum += (i + 1) * int32(sanitized[i]-'0') - } - if sanitized[9] == 'X' { - checksum += 10 * 10 - } else { - checksum += 10 * int32(sanitized[9]-'0') - } - if checksum%11 == 0 { - return true - } - return false - } else if version == 13 { - if !rxISBN13.MatchString(sanitized) { - return false - } - factor := []int32{1, 3} - for i = 0; i < 12; i++ { - checksum += factor[i%2] * int32(sanitized[i]-'0') - } - return (int32(sanitized[12]-'0'))-((10-(checksum%10))%10) == 0 - } - return IsISBN(str, 10) || IsISBN(str, 13) -} - -// IsJSON checks if the string is valid JSON (note: uses json.Unmarshal). -func IsJSON(str string) bool { - var js json.RawMessage - return json.Unmarshal([]byte(str), &js) == nil -} - -// IsMultibyte checks if the string contains one or more multibyte chars. Empty string is valid. -func IsMultibyte(str string) bool { - if IsNull(str) { - return true - } - return rxMultibyte.MatchString(str) -} - -// IsASCII checks if the string contains ASCII chars only. Empty string is valid. -func IsASCII(str string) bool { - if IsNull(str) { - return true - } - return rxASCII.MatchString(str) -} - -// IsPrintableASCII checks if the string contains printable ASCII chars only. Empty string is valid. -func IsPrintableASCII(str string) bool { - if IsNull(str) { - return true - } - return rxPrintableASCII.MatchString(str) -} - -// IsFullWidth checks if the string contains any full-width chars. Empty string is valid. -func IsFullWidth(str string) bool { - if IsNull(str) { - return true - } - return rxFullWidth.MatchString(str) -} - -// IsHalfWidth checks if the string contains any half-width chars. Empty string is valid. -func IsHalfWidth(str string) bool { - if IsNull(str) { - return true - } - return rxHalfWidth.MatchString(str) -} - -// IsVariableWidth checks if the string contains a mixture of full and half-width chars. Empty string is valid. -func IsVariableWidth(str string) bool { - if IsNull(str) { - return true - } - return rxHalfWidth.MatchString(str) && rxFullWidth.MatchString(str) -} - -// IsBase64 checks if a string is base64 encoded. -func IsBase64(str string) bool { - return rxBase64.MatchString(str) -} - -// IsFilePath checks is a string is Win or Unix file path and returns it's type. -func IsFilePath(str string) (bool, int) { - if rxWinPath.MatchString(str) { - //check windows path limit see: - // http://msdn.microsoft.com/en-us/library/aa365247(VS.85).aspx#maxpath - if len(str[3:]) > 32767 { - return false, Win - } - return true, Win - } else if rxUnixPath.MatchString(str) { - return true, Unix - } - return false, Unknown -} - -//IsWinFilePath checks both relative & absolute paths in Windows -func IsWinFilePath(str string) bool { - if rxARWinPath.MatchString(str) { - //check windows path limit see: - // http://msdn.microsoft.com/en-us/library/aa365247(VS.85).aspx#maxpath - if len(str[3:]) > 32767 { - return false - } - return true - } - return false -} - -//IsUnixFilePath checks both relative & absolute paths in Unix -func IsUnixFilePath(str string) bool { - if rxARUnixPath.MatchString(str) { - return true - } - return false -} - -// IsDataURI checks if a string is base64 encoded data URI such as an image -func IsDataURI(str string) bool { - dataURI := strings.Split(str, ",") - if !rxDataURI.MatchString(dataURI[0]) { - return false - } - return IsBase64(dataURI[1]) -} - -// IsMagnetURI checks if a string is valid magnet URI -func IsMagnetURI(str string) bool { - return rxMagnetURI.MatchString(str) -} - -// IsISO3166Alpha2 checks if a string is valid two-letter country code -func IsISO3166Alpha2(str string) bool { - for _, entry := range ISO3166List { - if str == entry.Alpha2Code { - return true - } - } - return false -} - -// IsISO3166Alpha3 checks if a string is valid three-letter country code -func IsISO3166Alpha3(str string) bool { - for _, entry := range ISO3166List { - if str == entry.Alpha3Code { - return true - } - } - return false -} - -// IsISO693Alpha2 checks if a string is valid two-letter language code -func IsISO693Alpha2(str string) bool { - for _, entry := range ISO693List { - if str == entry.Alpha2Code { - return true - } - } - return false -} - -// IsISO693Alpha3b checks if a string is valid three-letter language code -func IsISO693Alpha3b(str string) bool { - for _, entry := range ISO693List { - if str == entry.Alpha3bCode { - return true - } - } - return false -} - -// IsDNSName will validate the given string as a DNS name -func IsDNSName(str string) bool { - if str == "" || len(strings.Replace(str, ".", "", -1)) > 255 { - // constraints already violated - return false - } - return !IsIP(str) && rxDNSName.MatchString(str) -} - -// IsHash checks if a string is a hash of type algorithm. -// Algorithm is one of ['md4', 'md5', 'sha1', 'sha256', 'sha384', 'sha512', 'ripemd128', 'ripemd160', 'tiger128', 'tiger160', 'tiger192', 'crc32', 'crc32b'] -func IsHash(str string, algorithm string) bool { - var len string - algo := strings.ToLower(algorithm) - - if algo == "crc32" || algo == "crc32b" { - len = "8" - } else if algo == "md5" || algo == "md4" || algo == "ripemd128" || algo == "tiger128" { - len = "32" - } else if algo == "sha1" || algo == "ripemd160" || algo == "tiger160" { - len = "40" - } else if algo == "tiger192" { - len = "48" - } else if algo == "sha3-224" { - len = "56" - } else if algo == "sha256" || algo == "sha3-256" { - len = "64" - } else if algo == "sha384" || algo == "sha3-384" { - len = "96" - } else if algo == "sha512" || algo == "sha3-512" { - len = "128" - } else { - return false - } - - return Matches(str, "^[a-f0-9]{"+len+"}$") -} - -// IsSHA3224 checks is a string is a SHA3-224 hash. Alias for `IsHash(str, "sha3-224")` -func IsSHA3224(str string) bool { - return IsHash(str, "sha3-224") -} - -// IsSHA3256 checks is a string is a SHA3-256 hash. Alias for `IsHash(str, "sha3-256")` -func IsSHA3256(str string) bool { - return IsHash(str, "sha3-256") -} - -// IsSHA3384 checks is a string is a SHA3-384 hash. Alias for `IsHash(str, "sha3-384")` -func IsSHA3384(str string) bool { - return IsHash(str, "sha3-384") -} - -// IsSHA3512 checks is a string is a SHA3-512 hash. Alias for `IsHash(str, "sha3-512")` -func IsSHA3512(str string) bool { - return IsHash(str, "sha3-512") -} - -// IsSHA512 checks is a string is a SHA512 hash. Alias for `IsHash(str, "sha512")` -func IsSHA512(str string) bool { - return IsHash(str, "sha512") -} - -// IsSHA384 checks is a string is a SHA384 hash. Alias for `IsHash(str, "sha384")` -func IsSHA384(str string) bool { - return IsHash(str, "sha384") -} - -// IsSHA256 checks is a string is a SHA256 hash. Alias for `IsHash(str, "sha256")` -func IsSHA256(str string) bool { - return IsHash(str, "sha256") -} - -// IsTiger192 checks is a string is a Tiger192 hash. Alias for `IsHash(str, "tiger192")` -func IsTiger192(str string) bool { - return IsHash(str, "tiger192") -} - -// IsTiger160 checks is a string is a Tiger160 hash. Alias for `IsHash(str, "tiger160")` -func IsTiger160(str string) bool { - return IsHash(str, "tiger160") -} - -// IsRipeMD160 checks is a string is a RipeMD160 hash. Alias for `IsHash(str, "ripemd160")` -func IsRipeMD160(str string) bool { - return IsHash(str, "ripemd160") -} - -// IsSHA1 checks is a string is a SHA-1 hash. Alias for `IsHash(str, "sha1")` -func IsSHA1(str string) bool { - return IsHash(str, "sha1") -} - -// IsTiger128 checks is a string is a Tiger128 hash. Alias for `IsHash(str, "tiger128")` -func IsTiger128(str string) bool { - return IsHash(str, "tiger128") -} - -// IsRipeMD128 checks is a string is a RipeMD128 hash. Alias for `IsHash(str, "ripemd128")` -func IsRipeMD128(str string) bool { - return IsHash(str, "ripemd128") -} - -// IsCRC32 checks is a string is a CRC32 hash. Alias for `IsHash(str, "crc32")` -func IsCRC32(str string) bool { - return IsHash(str, "crc32") -} - -// IsCRC32b checks is a string is a CRC32b hash. Alias for `IsHash(str, "crc32b")` -func IsCRC32b(str string) bool { - return IsHash(str, "crc32b") -} - -// IsMD5 checks is a string is a MD5 hash. Alias for `IsHash(str, "md5")` -func IsMD5(str string) bool { - return IsHash(str, "md5") -} - -// IsMD4 checks is a string is a MD4 hash. Alias for `IsHash(str, "md4")` -func IsMD4(str string) bool { - return IsHash(str, "md4") -} - -// IsDialString validates the given string for usage with the various Dial() functions -func IsDialString(str string) bool { - if h, p, err := net.SplitHostPort(str); err == nil && h != "" && p != "" && (IsDNSName(h) || IsIP(h)) && IsPort(p) { - return true - } - - return false -} - -// IsIP checks if a string is either IP version 4 or 6. Alias for `net.ParseIP` -func IsIP(str string) bool { - return net.ParseIP(str) != nil -} - -// IsPort checks if a string represents a valid port -func IsPort(str string) bool { - if i, err := strconv.Atoi(str); err == nil && i > 0 && i < 65536 { - return true - } - return false -} - -// IsIPv4 checks if the string is an IP version 4. -func IsIPv4(str string) bool { - ip := net.ParseIP(str) - return ip != nil && strings.Contains(str, ".") -} - -// IsIPv6 checks if the string is an IP version 6. -func IsIPv6(str string) bool { - ip := net.ParseIP(str) - return ip != nil && strings.Contains(str, ":") -} - -// IsCIDR checks if the string is an valid CIDR notiation (IPV4 & IPV6) -func IsCIDR(str string) bool { - _, _, err := net.ParseCIDR(str) - return err == nil -} - -// IsMAC checks if a string is valid MAC address. -// Possible MAC formats: -// 01:23:45:67:89:ab -// 01:23:45:67:89:ab:cd:ef -// 01-23-45-67-89-ab -// 01-23-45-67-89-ab-cd-ef -// 0123.4567.89ab -// 0123.4567.89ab.cdef -func IsMAC(str string) bool { - _, err := net.ParseMAC(str) - return err == nil -} - -// IsHost checks if the string is a valid IP (both v4 and v6) or a valid DNS name -func IsHost(str string) bool { - return IsIP(str) || IsDNSName(str) -} - -// IsMongoID checks if the string is a valid hex-encoded representation of a MongoDB ObjectId. -func IsMongoID(str string) bool { - return rxHexadecimal.MatchString(str) && (len(str) == 24) -} - -// IsLatitude checks if a string is valid latitude. -func IsLatitude(str string) bool { - return rxLatitude.MatchString(str) -} - -// IsLongitude checks if a string is valid longitude. -func IsLongitude(str string) bool { - return rxLongitude.MatchString(str) -} - -// IsIMEI checks if a string is valid IMEI -func IsIMEI(str string) bool { - return rxIMEI.MatchString(str) -} - -// IsIMSI checks if a string is valid IMSI -func IsIMSI(str string) bool { - if !rxIMSI.MatchString(str) { - return false - } - - mcc, err := strconv.ParseInt(str[0:3], 10, 32) - if err != nil { - return false - } - - switch mcc { - case 202, 204, 206, 208, 212, 213, 214, 216, 218, 219: - case 220, 221, 222, 226, 228, 230, 231, 232, 234, 235: - case 238, 240, 242, 244, 246, 247, 248, 250, 255, 257: - case 259, 260, 262, 266, 268, 270, 272, 274, 276, 278: - case 280, 282, 283, 284, 286, 288, 289, 290, 292, 293: - case 294, 295, 297, 302, 308, 310, 311, 312, 313, 314: - case 315, 316, 330, 332, 334, 338, 340, 342, 344, 346: - case 348, 350, 352, 354, 356, 358, 360, 362, 363, 364: - case 365, 366, 368, 370, 372, 374, 376, 400, 401, 402: - case 404, 405, 406, 410, 412, 413, 414, 415, 416, 417: - case 418, 419, 420, 421, 422, 424, 425, 426, 427, 428: - case 429, 430, 431, 432, 434, 436, 437, 438, 440, 441: - case 450, 452, 454, 455, 456, 457, 460, 461, 466, 467: - case 470, 472, 502, 505, 510, 514, 515, 520, 525, 528: - case 530, 536, 537, 539, 540, 541, 542, 543, 544, 545: - case 546, 547, 548, 549, 550, 551, 552, 553, 554, 555: - case 602, 603, 604, 605, 606, 607, 608, 609, 610, 611: - case 612, 613, 614, 615, 616, 617, 618, 619, 620, 621: - case 622, 623, 624, 625, 626, 627, 628, 629, 630, 631: - case 632, 633, 634, 635, 636, 637, 638, 639, 640, 641: - case 642, 643, 645, 646, 647, 648, 649, 650, 651, 652: - case 653, 654, 655, 657, 658, 659, 702, 704, 706, 708: - case 710, 712, 714, 716, 722, 724, 730, 732, 734, 736: - case 738, 740, 742, 744, 746, 748, 750, 995: - return true - default: - return false - } - return true -} - -// IsRsaPublicKey checks if a string is valid public key with provided length -func IsRsaPublicKey(str string, keylen int) bool { - bb := bytes.NewBufferString(str) - pemBytes, err := ioutil.ReadAll(bb) - if err != nil { - return false - } - block, _ := pem.Decode(pemBytes) - if block != nil && block.Type != "PUBLIC KEY" { - return false - } - var der []byte - - if block != nil { - der = block.Bytes - } else { - der, err = base64.StdEncoding.DecodeString(str) - if err != nil { - return false - } - } - - key, err := x509.ParsePKIXPublicKey(der) - if err != nil { - return false - } - pubkey, ok := key.(*rsa.PublicKey) - if !ok { - return false - } - bitlen := len(pubkey.N.Bytes()) * 8 - return bitlen == int(keylen) -} - -// IsRegex checks if a give string is a valid regex with RE2 syntax or not -func IsRegex(str string) bool { - if _, err := regexp.Compile(str); err == nil { - return true - } - return false -} - -func toJSONName(tag string) string { - if tag == "" { - return "" - } - - // JSON name always comes first. If there's no options then split[0] is - // JSON name, if JSON name is not set, then split[0] is an empty string. - split := strings.SplitN(tag, ",", 2) - - name := split[0] - - // However it is possible that the field is skipped when - // (de-)serializing from/to JSON, in which case assume that there is no - // tag name to use - if name == "-" { - return "" - } - return name -} - -func prependPathToErrors(err error, path string) error { - switch err2 := err.(type) { - case Error: - err2.Path = append([]string{path}, err2.Path...) - return err2 - case Errors: - errors := err2.Errors() - for i, err3 := range errors { - errors[i] = prependPathToErrors(err3, path) - } - return err2 - } - return err -} - -// ValidateArray performs validation according to condition iterator that validates every element of the array -func ValidateArray(array []interface{}, iterator ConditionIterator) bool { - return Every(array, iterator) -} - -// ValidateMap use validation map for fields. -// result will be equal to `false` if there are any errors. -// s is the map containing the data to be validated. -// m is the validation map in the form: -// map[string]interface{}{"name":"required,alpha","address":map[string]interface{}{"line1":"required,alphanum"}} -func ValidateMap(s map[string]interface{}, m map[string]interface{}) (bool, error) { - if s == nil { - return true, nil - } - result := true - var err error - var errs Errors - var index int - val := reflect.ValueOf(s) - for key, value := range s { - presentResult := true - validator, ok := m[key] - if !ok { - presentResult = false - var err error - err = fmt.Errorf("all map keys has to be present in the validation map; got %s", key) - err = prependPathToErrors(err, key) - errs = append(errs, err) - } - valueField := reflect.ValueOf(value) - mapResult := true - typeResult := true - structResult := true - resultField := true - switch subValidator := validator.(type) { - case map[string]interface{}: - var err error - if v, ok := value.(map[string]interface{}); !ok { - mapResult = false - err = fmt.Errorf("map validator has to be for the map type only; got %s", valueField.Type().String()) - err = prependPathToErrors(err, key) - errs = append(errs, err) - } else { - mapResult, err = ValidateMap(v, subValidator) - if err != nil { - mapResult = false - err = prependPathToErrors(err, key) - errs = append(errs, err) - } - } - case string: - if (valueField.Kind() == reflect.Struct || - (valueField.Kind() == reflect.Ptr && valueField.Elem().Kind() == reflect.Struct)) && - subValidator != "-" { - var err error - structResult, err = ValidateStruct(valueField.Interface()) - if err != nil { - err = prependPathToErrors(err, key) - errs = append(errs, err) - } - } - resultField, err = typeCheck(valueField, reflect.StructField{ - Name: key, - PkgPath: "", - Type: val.Type(), - Tag: reflect.StructTag(fmt.Sprintf("%s:%q", tagName, subValidator)), - Offset: 0, - Index: []int{index}, - Anonymous: false, - }, val, nil) - if err != nil { - errs = append(errs, err) - } - case nil: - // already handlerd when checked before - default: - typeResult = false - err = fmt.Errorf("map validator has to be either map[string]interface{} or string; got %s", valueField.Type().String()) - err = prependPathToErrors(err, key) - errs = append(errs, err) - } - result = result && presentResult && typeResult && resultField && structResult && mapResult - index++ - } - // checks required keys - requiredResult := true - for key, value := range m { - if schema, ok := value.(string); ok { - tags := parseTagIntoMap(schema) - if required, ok := tags["required"]; ok { - if _, ok := s[key]; !ok { - requiredResult = false - if required.customErrorMessage != "" { - err = Error{key, fmt.Errorf(required.customErrorMessage), true, "required", []string{}} - } else { - err = Error{key, fmt.Errorf("required field missing"), false, "required", []string{}} - } - errs = append(errs, err) - } - } - } - } - - if len(errs) > 0 { - err = errs - } - return result && requiredResult, err -} - -// ValidateStruct use tags for fields. -// result will be equal to `false` if there are any errors. -// todo currently there is no guarantee that errors will be returned in predictable order (tests may to fail) -func ValidateStruct(s interface{}) (bool, error) { - if s == nil { - return true, nil - } - result := true - var err error - val := reflect.ValueOf(s) - if val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr { - val = val.Elem() - } - // we only accept structs - if val.Kind() != reflect.Struct { - return false, fmt.Errorf("function only accepts structs; got %s", val.Kind()) - } - var errs Errors - for i := 0; i < val.NumField(); i++ { - valueField := val.Field(i) - typeField := val.Type().Field(i) - if typeField.PkgPath != "" { - continue // Private field - } - structResult := true - if valueField.Kind() == reflect.Interface { - valueField = valueField.Elem() - } - if (valueField.Kind() == reflect.Struct || - (valueField.Kind() == reflect.Ptr && valueField.Elem().Kind() == reflect.Struct)) && - typeField.Tag.Get(tagName) != "-" { - var err error - structResult, err = ValidateStruct(valueField.Interface()) - if err != nil { - err = prependPathToErrors(err, typeField.Name) - errs = append(errs, err) - } - } - resultField, err2 := typeCheck(valueField, typeField, val, nil) - if err2 != nil { - - // Replace structure name with JSON name if there is a tag on the variable - jsonTag := toJSONName(typeField.Tag.Get("json")) - if jsonTag != "" { - switch jsonError := err2.(type) { - case Error: - jsonError.Name = jsonTag - err2 = jsonError - case Errors: - for i2, err3 := range jsonError { - switch customErr := err3.(type) { - case Error: - customErr.Name = jsonTag - jsonError[i2] = customErr - } - } - - err2 = jsonError - } - } - - errs = append(errs, err2) - } - result = result && resultField && structResult - } - if len(errs) > 0 { - err = errs - } - return result, err -} - -// ValidateStructAsync performs async validation of the struct and returns results through the channels -func ValidateStructAsync(s interface{}) (<-chan bool, <-chan error) { - res := make(chan bool) - errors := make(chan error) - - go func() { - defer close(res) - defer close(errors) - - isValid, isFailed := ValidateStruct(s) - - res <- isValid - errors <- isFailed - }() - - return res, errors -} - -// ValidateMapAsync performs async validation of the map and returns results through the channels -func ValidateMapAsync(s map[string]interface{}, m map[string]interface{}) (<-chan bool, <-chan error) { - res := make(chan bool) - errors := make(chan error) - - go func() { - defer close(res) - defer close(errors) - - isValid, isFailed := ValidateMap(s, m) - - res <- isValid - errors <- isFailed - }() - - return res, errors -} - -// parseTagIntoMap parses a struct tag `valid:required~Some error message,length(2|3)` into map[string]string{"required": "Some error message", "length(2|3)": ""} -func parseTagIntoMap(tag string) tagOptionsMap { - optionsMap := make(tagOptionsMap) - options := strings.Split(tag, ",") - - for i, option := range options { - option = strings.TrimSpace(option) - - validationOptions := strings.Split(option, "~") - if !isValidTag(validationOptions[0]) { - continue - } - if len(validationOptions) == 2 { - optionsMap[validationOptions[0]] = tagOption{validationOptions[0], validationOptions[1], i} - } else { - optionsMap[validationOptions[0]] = tagOption{validationOptions[0], "", i} - } - } - return optionsMap -} - -func isValidTag(s string) bool { - if s == "" { - return false - } - for _, c := range s { - switch { - case strings.ContainsRune("\\'\"!#$%&()*+-./:<=>?@[]^_{|}~ ", c): - // Backslash and quote chars are reserved, but - // otherwise any punctuation chars are allowed - // in a tag name. - default: - if !unicode.IsLetter(c) && !unicode.IsDigit(c) { - return false - } - } - } - return true -} - -// IsSSN will validate the given string as a U.S. Social Security Number -func IsSSN(str string) bool { - if str == "" || len(str) != 11 { - return false - } - return rxSSN.MatchString(str) -} - -// IsSemver checks if string is valid semantic version -func IsSemver(str string) bool { - return rxSemver.MatchString(str) -} - -// IsType checks if interface is of some type -func IsType(v interface{}, params ...string) bool { - if len(params) == 1 { - typ := params[0] - return strings.Replace(reflect.TypeOf(v).String(), " ", "", -1) == strings.Replace(typ, " ", "", -1) - } - return false -} - -// IsTime checks if string is valid according to given format -func IsTime(str string, format string) bool { - _, err := time.Parse(format, str) - return err == nil -} - -// IsUnixTime checks if string is valid unix timestamp value -func IsUnixTime(str string) bool { - if _, err := strconv.Atoi(str); err == nil { - return true - } - return false -} - -// IsRFC3339 checks if string is valid timestamp value according to RFC3339 -func IsRFC3339(str string) bool { - return IsTime(str, time.RFC3339) -} - -// IsRFC3339WithoutZone checks if string is valid timestamp value according to RFC3339 which excludes the timezone. -func IsRFC3339WithoutZone(str string) bool { - return IsTime(str, rfc3339WithoutZone) -} - -// IsISO4217 checks if string is valid ISO currency code -func IsISO4217(str string) bool { - for _, currency := range ISO4217List { - if str == currency { - return true - } - } - - return false -} - -// ByteLength checks string's length -func ByteLength(str string, params ...string) bool { - if len(params) == 2 { - min, _ := ToInt(params[0]) - max, _ := ToInt(params[1]) - return len(str) >= int(min) && len(str) <= int(max) - } - - return false -} - -// RuneLength checks string's length -// Alias for StringLength -func RuneLength(str string, params ...string) bool { - return StringLength(str, params...) -} - -// IsRsaPub checks whether string is valid RSA key -// Alias for IsRsaPublicKey -func IsRsaPub(str string, params ...string) bool { - if len(params) == 1 { - len, _ := ToInt(params[0]) - return IsRsaPublicKey(str, int(len)) - } - - return false -} - -// StringMatches checks if a string matches a given pattern. -func StringMatches(s string, params ...string) bool { - if len(params) == 1 { - pattern := params[0] - return Matches(s, pattern) - } - return false -} - -// StringLength checks string's length (including multi byte strings) -func StringLength(str string, params ...string) bool { - - if len(params) == 2 { - strLength := utf8.RuneCountInString(str) - min, _ := ToInt(params[0]) - max, _ := ToInt(params[1]) - return strLength >= int(min) && strLength <= int(max) - } - - return false -} - -// MinStringLength checks string's minimum length (including multi byte strings) -func MinStringLength(str string, params ...string) bool { - - if len(params) == 1 { - strLength := utf8.RuneCountInString(str) - min, _ := ToInt(params[0]) - return strLength >= int(min) - } - - return false -} - -// MaxStringLength checks string's maximum length (including multi byte strings) -func MaxStringLength(str string, params ...string) bool { - - if len(params) == 1 { - strLength := utf8.RuneCountInString(str) - max, _ := ToInt(params[0]) - return strLength <= int(max) - } - - return false -} - -// Range checks string's length -func Range(str string, params ...string) bool { - if len(params) == 2 { - value, _ := ToFloat(str) - min, _ := ToFloat(params[0]) - max, _ := ToFloat(params[1]) - return InRange(value, min, max) - } - - return false -} - -// IsInRaw checks if string is in list of allowed values -func IsInRaw(str string, params ...string) bool { - if len(params) == 1 { - rawParams := params[0] - - parsedParams := strings.Split(rawParams, "|") - - return IsIn(str, parsedParams...) - } - - return false -} - -// IsIn checks if string str is a member of the set of strings params -func IsIn(str string, params ...string) bool { - for _, param := range params { - if str == param { - return true - } - } - - return false -} - -func checkRequired(v reflect.Value, t reflect.StructField, options tagOptionsMap) (bool, error) { - if nilPtrAllowedByRequired { - k := v.Kind() - if (k == reflect.Ptr || k == reflect.Interface) && v.IsNil() { - return true, nil - } - } - - if requiredOption, isRequired := options["required"]; isRequired { - if len(requiredOption.customErrorMessage) > 0 { - return false, Error{t.Name, fmt.Errorf(requiredOption.customErrorMessage), true, "required", []string{}} - } - return false, Error{t.Name, fmt.Errorf("non zero value required"), false, "required", []string{}} - } else if _, isOptional := options["optional"]; fieldsRequiredByDefault && !isOptional { - return false, Error{t.Name, fmt.Errorf("Missing required field"), false, "required", []string{}} - } - // not required and empty is valid - return true, nil -} - -func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options tagOptionsMap) (isValid bool, resultErr error) { - if !v.IsValid() { - return false, nil - } - - tag := t.Tag.Get(tagName) - - // checks if the field should be ignored - switch tag { - case "": - if v.Kind() != reflect.Slice && v.Kind() != reflect.Map { - if !fieldsRequiredByDefault { - return true, nil - } - return false, Error{t.Name, fmt.Errorf("All fields are required to at least have one validation defined"), false, "required", []string{}} - } - case "-": - return true, nil - } - - isRootType := false - if options == nil { - isRootType = true - options = parseTagIntoMap(tag) - } - - if isEmptyValue(v) { - // an empty value is not validated, checks only required - isValid, resultErr = checkRequired(v, t, options) - for key := range options { - delete(options, key) - } - return isValid, resultErr - } - - var customTypeErrors Errors - optionsOrder := options.orderedKeys() - for _, validatorName := range optionsOrder { - validatorStruct := options[validatorName] - if validatefunc, ok := CustomTypeTagMap.Get(validatorName); ok { - delete(options, validatorName) - - if result := validatefunc(v.Interface(), o.Interface()); !result { - if len(validatorStruct.customErrorMessage) > 0 { - customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: TruncatingErrorf(validatorStruct.customErrorMessage, fmt.Sprint(v), validatorName), CustomErrorMessageExists: true, Validator: stripParams(validatorName)}) - continue - } - customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: fmt.Errorf("%s does not validate as %s", fmt.Sprint(v), validatorName), CustomErrorMessageExists: false, Validator: stripParams(validatorName)}) - } - } - } - - if len(customTypeErrors.Errors()) > 0 { - return false, customTypeErrors - } - - if isRootType { - // Ensure that we've checked the value by all specified validators before report that the value is valid - defer func() { - delete(options, "optional") - delete(options, "required") - - if isValid && resultErr == nil && len(options) != 0 { - optionsOrder := options.orderedKeys() - for _, validator := range optionsOrder { - isValid = false - resultErr = Error{t.Name, fmt.Errorf( - "The following validator is invalid or can't be applied to the field: %q", validator), false, stripParams(validator), []string{}} - return - } - } - }() - } - - for _, validatorSpec := range optionsOrder { - validatorStruct := options[validatorSpec] - var negate bool - validator := validatorSpec - customMsgExists := len(validatorStruct.customErrorMessage) > 0 - - // checks whether the tag looks like '!something' or 'something' - if validator[0] == '!' { - validator = validator[1:] - negate = true - } - - // checks for interface param validators - for key, value := range InterfaceParamTagRegexMap { - ps := value.FindStringSubmatch(validator) - if len(ps) == 0 { - continue - } - - validatefunc, ok := InterfaceParamTagMap[key] - if !ok { - continue - } - - delete(options, validatorSpec) - - field := fmt.Sprint(v) - if result := validatefunc(v.Interface(), ps[1:]...); (!result && !negate) || (result && negate) { - if customMsgExists { - return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - if negate { - return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - } - } - - switch v.Kind() { - case reflect.Bool, - reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, - reflect.Float32, reflect.Float64, - reflect.String: - // for each tag option checks the map of validator functions - for _, validatorSpec := range optionsOrder { - validatorStruct := options[validatorSpec] - var negate bool - validator := validatorSpec - customMsgExists := len(validatorStruct.customErrorMessage) > 0 - - // checks whether the tag looks like '!something' or 'something' - if validator[0] == '!' { - validator = validator[1:] - negate = true - } - - // checks for param validators - for key, value := range ParamTagRegexMap { - ps := value.FindStringSubmatch(validator) - if len(ps) == 0 { - continue - } - - validatefunc, ok := ParamTagMap[key] - if !ok { - continue - } - - delete(options, validatorSpec) - - switch v.Kind() { - case reflect.String, - reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, - reflect.Float32, reflect.Float64: - - field := fmt.Sprint(v) // make value into string, then validate with regex - if result := validatefunc(field, ps[1:]...); (!result && !negate) || (result && negate) { - if customMsgExists { - return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - if negate { - return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - default: - // type not yet supported, fail - return false, Error{t.Name, fmt.Errorf("Validator %s doesn't support kind %s", validator, v.Kind()), false, stripParams(validatorSpec), []string{}} - } - } - - if validatefunc, ok := TagMap[validator]; ok { - delete(options, validatorSpec) - - switch v.Kind() { - case reflect.String, - reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, - reflect.Float32, reflect.Float64: - field := fmt.Sprint(v) // make value into string, then validate with regex - if result := validatefunc(field); !result && !negate || result && negate { - if customMsgExists { - return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - if negate { - return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - default: - //Not Yet Supported Types (Fail here!) - err := fmt.Errorf("Validator %s doesn't support kind %s for value %v", validator, v.Kind(), v) - return false, Error{t.Name, err, false, stripParams(validatorSpec), []string{}} - } - } - } - return true, nil - case reflect.Map: - if v.Type().Key().Kind() != reflect.String { - return false, &UnsupportedTypeError{v.Type()} - } - var sv stringValues - sv = v.MapKeys() - sort.Sort(sv) - result := true - for i, k := range sv { - var resultItem bool - var err error - if v.MapIndex(k).Kind() != reflect.Struct { - resultItem, err = typeCheck(v.MapIndex(k), t, o, options) - if err != nil { - return false, err - } - } else { - resultItem, err = ValidateStruct(v.MapIndex(k).Interface()) - if err != nil { - err = prependPathToErrors(err, t.Name+"."+sv[i].Interface().(string)) - return false, err - } - } - result = result && resultItem - } - return result, nil - case reflect.Slice, reflect.Array: - result := true - for i := 0; i < v.Len(); i++ { - var resultItem bool - var err error - if v.Index(i).Kind() != reflect.Struct { - resultItem, err = typeCheck(v.Index(i), t, o, options) - if err != nil { - return false, err - } - } else { - resultItem, err = ValidateStruct(v.Index(i).Interface()) - if err != nil { - err = prependPathToErrors(err, t.Name+"."+strconv.Itoa(i)) - return false, err - } - } - result = result && resultItem - } - return result, nil - case reflect.Interface: - // If the value is an interface then encode its element - if v.IsNil() { - return true, nil - } - return ValidateStruct(v.Interface()) - case reflect.Ptr: - // If the value is a pointer then checks its element - if v.IsNil() { - return true, nil - } - return typeCheck(v.Elem(), t, o, options) - case reflect.Struct: - return true, nil - default: - return false, &UnsupportedTypeError{v.Type()} - } -} - -func stripParams(validatorString string) string { - return paramsRegexp.ReplaceAllString(validatorString, "") -} - -// isEmptyValue checks whether value empty or not -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.String, reflect.Array: - return v.Len() == 0 - case reflect.Map, reflect.Slice: - return v.Len() == 0 || v.IsNil() - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - - return reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface()) -} - -// ErrorByField returns error for specified field of the struct -// validated by ValidateStruct or empty string if there are no errors -// or this field doesn't exists or doesn't have any errors. -func ErrorByField(e error, field string) string { - if e == nil { - return "" - } - return ErrorsByField(e)[field] -} - -// ErrorsByField returns map of errors of the struct validated -// by ValidateStruct or empty map if there are no errors. -func ErrorsByField(e error) map[string]string { - m := make(map[string]string) - if e == nil { - return m - } - // prototype for ValidateStruct - - switch e := e.(type) { - case Error: - m[e.Name] = e.Err.Error() - case Errors: - for _, item := range e.Errors() { - n := ErrorsByField(item) - for k, v := range n { - m[k] = v - } - } - } - - return m -} - -// Error returns string equivalent for reflect.Type -func (e *UnsupportedTypeError) Error() string { - return "validator: unsupported type: " + e.Type.String() -} - -func (sv stringValues) Len() int { return len(sv) } -func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } -func (sv stringValues) Less(i, j int) bool { return sv.get(i) < sv.get(j) } -func (sv stringValues) get(i int) string { return sv[i].String() } - -func IsE164(str string) bool { - return rxE164.MatchString(str) -} diff --git a/vendor/github.com/asaskevich/govalidator/wercker.yml b/vendor/github.com/asaskevich/govalidator/wercker.yml deleted file mode 100644 index bc5f7b086..000000000 --- a/vendor/github.com/asaskevich/govalidator/wercker.yml +++ /dev/null @@ -1,15 +0,0 @@ -box: golang -build: - steps: - - setup-go-workspace - - - script: - name: go get - code: | - go version - go get -t ./... - - - script: - name: go test - code: | - go test -race -v ./... diff --git a/vendor/github.com/cenkalti/backoff/v5/.gitignore b/vendor/github.com/cenkalti/backoff/v5/.gitignore new file mode 100644 index 000000000..50d95c548 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +# IDEs +.idea/ diff --git a/vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md b/vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md new file mode 100644 index 000000000..658c37436 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md @@ -0,0 +1,29 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [5.0.0] - 2024-12-19 + +### Added + +- RetryAfterError can be returned from an operation to indicate how long to wait before the next retry. + +### Changed + +- Retry function now accepts additional options for specifying max number of tries and max elapsed time. +- Retry function now accepts a context.Context. +- Operation function signature changed to return result (any type) and error. + +### Removed + +- RetryNotify* and RetryWithData functions. Only single Retry function remains. +- Optional arguments from ExponentialBackoff constructor. +- Clock and Timer interfaces. + +### Fixed + +- The original error is returned from Retry if there's a PermanentError. (#144) +- The Retry function respects the wrapped PermanentError. (#140) diff --git a/vendor/github.com/cenkalti/backoff/v5/LICENSE b/vendor/github.com/cenkalti/backoff/v5/LICENSE new file mode 100644 index 000000000..89b817996 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Cenk Altı + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cenkalti/backoff/v5/README.md b/vendor/github.com/cenkalti/backoff/v5/README.md new file mode 100644 index 000000000..4611b1d17 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/README.md @@ -0,0 +1,31 @@ +# Exponential Backoff [![GoDoc][godoc image]][godoc] + +This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client]. + +[Exponential backoff][exponential backoff wiki] +is an algorithm that uses feedback to multiplicatively decrease the rate of some process, +in order to gradually find an acceptable rate. +The retries exponentially increase and stop increasing when a certain threshold is met. + +## Usage + +Import path is `github.com/cenkalti/backoff/v5`. Please note the version part at the end. + +For most cases, use `Retry` function. See [example_test.go][example] for an example. + +If you have specific needs, copy `Retry` function (from [retry.go][retry-src]) into your code and modify it as needed. + +## Contributing + +* I would like to keep this library as small as possible. +* Please don't send a PR without opening an issue and discussing it first. +* If proposed change is not a common use case, I will probably not accept it. + +[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v5 +[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png + +[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java +[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff + +[retry-src]: https://github.com/cenkalti/backoff/blob/v5/retry.go +[example]: https://github.com/cenkalti/backoff/blob/v5/example_test.go diff --git a/vendor/github.com/cenkalti/backoff/v5/backoff.go b/vendor/github.com/cenkalti/backoff/v5/backoff.go new file mode 100644 index 000000000..dd2b24ca7 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/backoff.go @@ -0,0 +1,66 @@ +// Package backoff implements backoff algorithms for retrying operations. +// +// Use Retry function for retrying operations that may fail. +// If Retry does not meet your needs, +// copy/paste the function into your project and modify as you wish. +// +// There is also Ticker type similar to time.Ticker. +// You can use it if you need to work with channels. +// +// See Examples section below for usage examples. +package backoff + +import "time" + +// BackOff is a backoff policy for retrying an operation. +type BackOff interface { + // NextBackOff returns the duration to wait before retrying the operation, + // backoff.Stop to indicate that no more retries should be made. + // + // Example usage: + // + // duration := backoff.NextBackOff() + // if duration == backoff.Stop { + // // Do not retry operation. + // } else { + // // Sleep for duration and retry operation. + // } + // + NextBackOff() time.Duration + + // Reset to initial state. + Reset() +} + +// Stop indicates that no more retries should be made for use in NextBackOff(). +const Stop time.Duration = -1 + +// ZeroBackOff is a fixed backoff policy whose backoff time is always zero, +// meaning that the operation is retried immediately without waiting, indefinitely. +type ZeroBackOff struct{} + +func (b *ZeroBackOff) Reset() {} + +func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 } + +// StopBackOff is a fixed backoff policy that always returns backoff.Stop for +// NextBackOff(), meaning that the operation should never be retried. +type StopBackOff struct{} + +func (b *StopBackOff) Reset() {} + +func (b *StopBackOff) NextBackOff() time.Duration { return Stop } + +// ConstantBackOff is a backoff policy that always returns the same backoff delay. +// This is in contrast to an exponential backoff policy, +// which returns a delay that grows longer as you call NextBackOff() over and over again. +type ConstantBackOff struct { + Interval time.Duration +} + +func (b *ConstantBackOff) Reset() {} +func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval } + +func NewConstantBackOff(d time.Duration) *ConstantBackOff { + return &ConstantBackOff{Interval: d} +} diff --git a/vendor/github.com/cenkalti/backoff/v5/error.go b/vendor/github.com/cenkalti/backoff/v5/error.go new file mode 100644 index 000000000..beb2b38a2 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/error.go @@ -0,0 +1,46 @@ +package backoff + +import ( + "fmt" + "time" +) + +// PermanentError signals that the operation should not be retried. +type PermanentError struct { + Err error +} + +// Permanent wraps the given err in a *PermanentError. +func Permanent(err error) error { + if err == nil { + return nil + } + return &PermanentError{ + Err: err, + } +} + +// Error returns a string representation of the Permanent error. +func (e *PermanentError) Error() string { + return e.Err.Error() +} + +// Unwrap returns the wrapped error. +func (e *PermanentError) Unwrap() error { + return e.Err +} + +// RetryAfterError signals that the operation should be retried after the given duration. +type RetryAfterError struct { + Duration time.Duration +} + +// RetryAfter returns a RetryAfter error that specifies how long to wait before retrying. +func RetryAfter(seconds int) error { + return &RetryAfterError{Duration: time.Duration(seconds) * time.Second} +} + +// Error returns a string representation of the RetryAfter error. +func (e *RetryAfterError) Error() string { + return fmt.Sprintf("retry after %s", e.Duration) +} diff --git a/vendor/github.com/cenkalti/backoff/v5/exponential.go b/vendor/github.com/cenkalti/backoff/v5/exponential.go new file mode 100644 index 000000000..c1f3e442d --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/exponential.go @@ -0,0 +1,125 @@ +package backoff + +import ( + "math/rand" + "time" +) + +/* +ExponentialBackOff is a backoff implementation that increases the backoff +period for each retry attempt using a randomization function that grows exponentially. + +NextBackOff() is calculated using the following formula: + + randomized interval = + RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) + +In other words NextBackOff() will range between the randomization factor +percentage below and above the retry interval. + +For example, given the following parameters: + + RetryInterval = 2 + RandomizationFactor = 0.5 + Multiplier = 2 + +the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, +multiplied by the exponential, that is, between 2 and 6 seconds. + +Note: MaxInterval caps the RetryInterval and not the randomized interval. + +If the time elapsed since an ExponentialBackOff instance is created goes past the +MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. + +The elapsed time can be reset by calling Reset(). + +Example: Given the following default arguments, for 10 tries the sequence will be, +and assuming we go over the MaxElapsedTime on the 10th try: + + Request # RetryInterval (seconds) Randomized Interval (seconds) + + 1 0.5 [0.25, 0.75] + 2 0.75 [0.375, 1.125] + 3 1.125 [0.562, 1.687] + 4 1.687 [0.8435, 2.53] + 5 2.53 [1.265, 3.795] + 6 3.795 [1.897, 5.692] + 7 5.692 [2.846, 8.538] + 8 8.538 [4.269, 12.807] + 9 12.807 [6.403, 19.210] + 10 19.210 backoff.Stop + +Note: Implementation is not thread-safe. +*/ +type ExponentialBackOff struct { + InitialInterval time.Duration + RandomizationFactor float64 + Multiplier float64 + MaxInterval time.Duration + + currentInterval time.Duration +} + +// Default values for ExponentialBackOff. +const ( + DefaultInitialInterval = 500 * time.Millisecond + DefaultRandomizationFactor = 0.5 + DefaultMultiplier = 1.5 + DefaultMaxInterval = 60 * time.Second +) + +// NewExponentialBackOff creates an instance of ExponentialBackOff using default values. +func NewExponentialBackOff() *ExponentialBackOff { + return &ExponentialBackOff{ + InitialInterval: DefaultInitialInterval, + RandomizationFactor: DefaultRandomizationFactor, + Multiplier: DefaultMultiplier, + MaxInterval: DefaultMaxInterval, + } +} + +// Reset the interval back to the initial retry interval and restarts the timer. +// Reset must be called before using b. +func (b *ExponentialBackOff) Reset() { + b.currentInterval = b.InitialInterval +} + +// NextBackOff calculates the next backoff interval using the formula: +// +// Randomized interval = RetryInterval * (1 ± RandomizationFactor) +func (b *ExponentialBackOff) NextBackOff() time.Duration { + if b.currentInterval == 0 { + b.currentInterval = b.InitialInterval + } + + next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) + b.incrementCurrentInterval() + return next +} + +// Increments the current interval by multiplying it with the multiplier. +func (b *ExponentialBackOff) incrementCurrentInterval() { + // Check for overflow, if overflow is detected set the current interval to the max interval. + if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { + b.currentInterval = b.MaxInterval + } else { + b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) + } +} + +// Returns a random value from the following interval: +// +// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval]. +func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { + if randomizationFactor == 0 { + return currentInterval // make sure no randomness is used when randomizationFactor is 0. + } + var delta = randomizationFactor * float64(currentInterval) + var minInterval = float64(currentInterval) - delta + var maxInterval = float64(currentInterval) + delta + + // Get a random value from the range [minInterval, maxInterval]. + // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then + // we want a 33% chance for selecting either 1, 2 or 3. + return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) +} diff --git a/vendor/github.com/cenkalti/backoff/v5/retry.go b/vendor/github.com/cenkalti/backoff/v5/retry.go new file mode 100644 index 000000000..e43f47fb8 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/retry.go @@ -0,0 +1,139 @@ +package backoff + +import ( + "context" + "errors" + "time" +) + +// DefaultMaxElapsedTime sets a default limit for the total retry duration. +const DefaultMaxElapsedTime = 15 * time.Minute + +// Operation is a function that attempts an operation and may be retried. +type Operation[T any] func() (T, error) + +// Notify is a function called on operation error with the error and backoff duration. +type Notify func(error, time.Duration) + +// retryOptions holds configuration settings for the retry mechanism. +type retryOptions struct { + BackOff BackOff // Strategy for calculating backoff periods. + Timer timer // Timer to manage retry delays. + Notify Notify // Optional function to notify on each retry error. + MaxTries uint // Maximum number of retry attempts. + MaxElapsedTime time.Duration // Maximum total time for all retries. +} + +type RetryOption func(*retryOptions) + +// WithBackOff configures a custom backoff strategy. +func WithBackOff(b BackOff) RetryOption { + return func(args *retryOptions) { + args.BackOff = b + } +} + +// withTimer sets a custom timer for managing delays between retries. +func withTimer(t timer) RetryOption { + return func(args *retryOptions) { + args.Timer = t + } +} + +// WithNotify sets a notification function to handle retry errors. +func WithNotify(n Notify) RetryOption { + return func(args *retryOptions) { + args.Notify = n + } +} + +// WithMaxTries limits the number of retry attempts. +func WithMaxTries(n uint) RetryOption { + return func(args *retryOptions) { + args.MaxTries = n + } +} + +// WithMaxElapsedTime limits the total duration for retry attempts. +func WithMaxElapsedTime(d time.Duration) RetryOption { + return func(args *retryOptions) { + args.MaxElapsedTime = d + } +} + +// Retry attempts the operation until success, a permanent error, or backoff completion. +// It ensures the operation is executed at least once. +// +// Returns the operation result or error if retries are exhausted or context is cancelled. +func Retry[T any](ctx context.Context, operation Operation[T], opts ...RetryOption) (T, error) { + // Initialize default retry options. + args := &retryOptions{ + BackOff: NewExponentialBackOff(), + Timer: &defaultTimer{}, + MaxElapsedTime: DefaultMaxElapsedTime, + } + + // Apply user-provided options to the default settings. + for _, opt := range opts { + opt(args) + } + + defer args.Timer.Stop() + + startedAt := time.Now() + args.BackOff.Reset() + for numTries := uint(1); ; numTries++ { + // Execute the operation. + res, err := operation() + if err == nil { + return res, nil + } + + // Stop retrying if maximum tries exceeded. + if args.MaxTries > 0 && numTries >= args.MaxTries { + return res, err + } + + // Handle permanent errors without retrying. + var permanent *PermanentError + if errors.As(err, &permanent) { + return res, err + } + + // Stop retrying if context is cancelled. + if cerr := context.Cause(ctx); cerr != nil { + return res, cerr + } + + // Calculate next backoff duration. + next := args.BackOff.NextBackOff() + if next == Stop { + return res, err + } + + // Reset backoff if RetryAfterError is encountered. + var retryAfter *RetryAfterError + if errors.As(err, &retryAfter) { + next = retryAfter.Duration + args.BackOff.Reset() + } + + // Stop retrying if maximum elapsed time exceeded. + if args.MaxElapsedTime > 0 && time.Since(startedAt)+next > args.MaxElapsedTime { + return res, err + } + + // Notify on error if a notifier function is provided. + if args.Notify != nil { + args.Notify(err, next) + } + + // Wait for the next backoff period or context cancellation. + args.Timer.Start(next) + select { + case <-args.Timer.C(): + case <-ctx.Done(): + return res, context.Cause(ctx) + } + } +} diff --git a/vendor/github.com/cenkalti/backoff/v5/ticker.go b/vendor/github.com/cenkalti/backoff/v5/ticker.go new file mode 100644 index 000000000..f0d4b2ae7 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/ticker.go @@ -0,0 +1,83 @@ +package backoff + +import ( + "sync" + "time" +) + +// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff. +// +// Ticks will continue to arrive when the previous operation is still running, +// so operations that take a while to fail could run in quick succession. +type Ticker struct { + C <-chan time.Time + c chan time.Time + b BackOff + timer timer + stop chan struct{} + stopOnce sync.Once +} + +// NewTicker returns a new Ticker containing a channel that will send +// the time at times specified by the BackOff argument. Ticker is +// guaranteed to tick at least once. The channel is closed when Stop +// method is called or BackOff stops. It is not safe to manipulate the +// provided backoff policy (notably calling NextBackOff or Reset) +// while the ticker is running. +func NewTicker(b BackOff) *Ticker { + c := make(chan time.Time) + t := &Ticker{ + C: c, + c: c, + b: b, + timer: &defaultTimer{}, + stop: make(chan struct{}), + } + t.b.Reset() + go t.run() + return t +} + +// Stop turns off a ticker. After Stop, no more ticks will be sent. +func (t *Ticker) Stop() { + t.stopOnce.Do(func() { close(t.stop) }) +} + +func (t *Ticker) run() { + c := t.c + defer close(c) + + // Ticker is guaranteed to tick at least once. + afterC := t.send(time.Now()) + + for { + if afterC == nil { + return + } + + select { + case tick := <-afterC: + afterC = t.send(tick) + case <-t.stop: + t.c = nil // Prevent future ticks from being sent to the channel. + return + } + } +} + +func (t *Ticker) send(tick time.Time) <-chan time.Time { + select { + case t.c <- tick: + case <-t.stop: + return nil + } + + next := t.b.NextBackOff() + if next == Stop { + t.Stop() + return nil + } + + t.timer.Start(next) + return t.timer.C() +} diff --git a/vendor/github.com/cenkalti/backoff/v5/timer.go b/vendor/github.com/cenkalti/backoff/v5/timer.go new file mode 100644 index 000000000..a89530974 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/timer.go @@ -0,0 +1,35 @@ +package backoff + +import "time" + +type timer interface { + Start(duration time.Duration) + Stop() + C() <-chan time.Time +} + +// defaultTimer implements Timer interface using time.Timer +type defaultTimer struct { + timer *time.Timer +} + +// C returns the timers channel which receives the current time when the timer fires. +func (t *defaultTimer) C() <-chan time.Time { + return t.timer.C +} + +// Start starts the timer to fire after the given duration +func (t *defaultTimer) Start(duration time.Duration) { + if t.timer == nil { + t.timer = time.NewTimer(duration) + } else { + t.timer.Reset(duration) + } +} + +// Stop is called when the timer is not used anymore and resources may be freed. +func (t *defaultTimer) Stop() { + if t.timer != nil { + t.timer.Stop() + } +} diff --git a/vendor/github.com/cenkalti/hub/.gitignore b/vendor/github.com/cenkalti/hub/.gitignore new file mode 100644 index 000000000..00268614f --- /dev/null +++ b/vendor/github.com/cenkalti/hub/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/cenkalti/hub/.travis.yml b/vendor/github.com/cenkalti/hub/.travis.yml new file mode 100644 index 000000000..d8cecb0df --- /dev/null +++ b/vendor/github.com/cenkalti/hub/.travis.yml @@ -0,0 +1,5 @@ +language: go +go: 1.13 +arch: + - amd64 + - ppc64le diff --git a/vendor/github.com/cenkalti/hub/LICENSE b/vendor/github.com/cenkalti/hub/LICENSE new file mode 100644 index 000000000..89b817996 --- /dev/null +++ b/vendor/github.com/cenkalti/hub/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Cenk Altı + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cenkalti/hub/README.md b/vendor/github.com/cenkalti/hub/README.md new file mode 100644 index 000000000..d3f211818 --- /dev/null +++ b/vendor/github.com/cenkalti/hub/README.md @@ -0,0 +1,5 @@ +hub +=== + +[![GoDoc](https://godoc.org/github.com/cenkalti/hub?status.png)](https://godoc.org/github.com/cenkalti/hub) +[![Build Status](https://travis-ci.org/cenkalti/hub.png)](https://travis-ci.org/cenkalti/hub) diff --git a/vendor/github.com/cenkalti/hub/hub.go b/vendor/github.com/cenkalti/hub/hub.go new file mode 100644 index 000000000..24c5efa86 --- /dev/null +++ b/vendor/github.com/cenkalti/hub/hub.go @@ -0,0 +1,82 @@ +// Package hub provides a simple event dispatcher for publish/subscribe pattern. +package hub + +import "sync" + +type Kind int + +// Event is an interface for published events. +type Event interface { + Kind() Kind +} + +// Hub is an event dispatcher, publishes events to the subscribers +// which are subscribed for a specific event type. +// Optimized for publish calls. +// The handlers may be called in order different than they are registered. +type Hub struct { + subscribers map[Kind][]handler + m sync.RWMutex + seq uint64 +} + +type handler struct { + f func(Event) + id uint64 +} + +// Subscribe registers f for the event of a specific kind. +func (h *Hub) Subscribe(kind Kind, f func(Event)) (cancel func()) { + var cancelled bool + h.m.Lock() + h.seq++ + id := h.seq + if h.subscribers == nil { + h.subscribers = make(map[Kind][]handler) + } + h.subscribers[kind] = append(h.subscribers[kind], handler{id: id, f: f}) + h.m.Unlock() + return func() { + h.m.Lock() + if cancelled { + h.m.Unlock() + return + } + cancelled = true + a := h.subscribers[kind] + for i, f := range a { + if f.id == id { + a[i], h.subscribers[kind] = a[len(a)-1], a[:len(a)-1] + break + } + } + if len(a) == 0 { + delete(h.subscribers, kind) + } + h.m.Unlock() + } +} + +// Publish an event to the subscribers. +func (h *Hub) Publish(e Event) { + h.m.RLock() + if handlers, ok := h.subscribers[e.Kind()]; ok { + for _, h := range handlers { + h.f(e) + } + } + h.m.RUnlock() +} + +// DefaultHub is the default Hub used by Publish and Subscribe. +var DefaultHub Hub + +// Subscribe registers f for the event of a specific kind in the DefaultHub. +func Subscribe(kind Kind, f func(Event)) (cancel func()) { + return DefaultHub.Subscribe(kind, f) +} + +// Publish an event to the subscribers in DefaultHub. +func Publish(e Event) { + DefaultHub.Publish(e) +} diff --git a/vendor/github.com/cenkalti/rpc2/.gitignore b/vendor/github.com/cenkalti/rpc2/.gitignore new file mode 100644 index 000000000..836562412 --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/vendor/github.com/cenkalti/rpc2/.travis.yml b/vendor/github.com/cenkalti/rpc2/.travis.yml new file mode 100644 index 000000000..ae8233c2b --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/.travis.yml @@ -0,0 +1,9 @@ +language: go + +go: + - 1.15 + - tip + +arch: + - amd64 + - ppc64le diff --git a/vendor/github.com/cenkalti/rpc2/LICENSE b/vendor/github.com/cenkalti/rpc2/LICENSE new file mode 100644 index 000000000..d565b1b1f --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Cenk Altı + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/cenkalti/rpc2/README.md b/vendor/github.com/cenkalti/rpc2/README.md new file mode 100644 index 000000000..3dffd26e4 --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/README.md @@ -0,0 +1,82 @@ +rpc2 +==== + +[![GoDoc](https://godoc.org/github.com/cenkalti/rpc2?status.png)](https://godoc.org/github.com/cenkalti/rpc2) +[![Build Status](https://travis-ci.org/cenkalti/rpc2.png)](https://travis-ci.org/cenkalti/rpc2) + +rpc2 is a fork of net/rpc package in the standard library. +The main goal is to add bi-directional support to calls. +That means server can call the methods of client. +This is not possible with net/rpc package. +In order to do this it adds a `*Client` argument to method signatures. + +Install +-------- + + go get github.com/cenkalti/rpc2 + +Example server +--------------- + +```go +package main + +import ( + "fmt" + "net" + + "github.com/cenkalti/rpc2" +) + +type Args struct{ A, B int } +type Reply int + +func main() { + srv := rpc2.NewServer() + srv.Handle("add", func(client *rpc2.Client, args *Args, reply *Reply) error { + + // Reversed call (server to client) + var rep Reply + client.Call("mult", Args{2, 3}, &rep) + fmt.Println("mult result:", rep) + + *reply = Reply(args.A + args.B) + return nil + }) + + lis, _ := net.Listen("tcp", "127.0.0.1:5000") + srv.Accept(lis) +} +``` + +Example Client +--------------- + +```go +package main + +import ( + "fmt" + "net" + + "github.com/cenkalti/rpc2" +) + +type Args struct{ A, B int } +type Reply int + +func main() { + conn, _ := net.Dial("tcp", "127.0.0.1:5000") + + clt := rpc2.NewClient(conn) + clt.Handle("mult", func(client *rpc2.Client, args *Args, reply *Reply) error { + *reply = Reply(args.A * args.B) + return nil + }) + go clt.Run() + + var rep Reply + clt.Call("add", Args{1, 2}, &rep) + fmt.Println("add result:", rep) +} +``` diff --git a/vendor/github.com/cenkalti/rpc2/client.go b/vendor/github.com/cenkalti/rpc2/client.go new file mode 100644 index 000000000..cc9956976 --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/client.go @@ -0,0 +1,364 @@ +// Package rpc2 provides bi-directional RPC client and server similar to net/rpc. +package rpc2 + +import ( + "context" + "errors" + "io" + "log" + "reflect" + "sync" +) + +// Client represents an RPC Client. +// There may be multiple outstanding Calls associated +// with a single Client, and a Client may be used by +// multiple goroutines simultaneously. +type Client struct { + mutex sync.Mutex // protects pending, seq, request + sending sync.Mutex + request Request // temp area used in send() + seq uint64 + pending map[uint64]*Call + closing bool + shutdown bool + server bool + codec Codec + handlers map[string]*handler + disconnect chan struct{} + State *State // additional information to associate with client + blocking bool // whether to block request handling +} + +// NewClient returns a new Client to handle requests to the +// set of services at the other end of the connection. +// It adds a buffer to the write side of the connection so +// the header and payload are sent as a unit. +func NewClient(conn io.ReadWriteCloser) *Client { + return NewClientWithCodec(NewGobCodec(conn)) +} + +// NewClientWithCodec is like NewClient but uses the specified +// codec to encode requests and decode responses. +func NewClientWithCodec(codec Codec) *Client { + return &Client{ + codec: codec, + pending: make(map[uint64]*Call), + handlers: make(map[string]*handler), + disconnect: make(chan struct{}), + seq: 1, // 0 means notification. + } +} + +// SetBlocking puts the client in blocking mode. +// In blocking mode, received requests are processes synchronously. +// If you have methods that may take a long time, other subsequent requests may time out. +func (c *Client) SetBlocking(blocking bool) { + c.blocking = blocking +} + +// Run the client's read loop. +// You must run this method before calling any methods on the server. +func (c *Client) Run() { + c.readLoop() +} + +// DisconnectNotify returns a channel that is closed +// when the client connection has gone away. +func (c *Client) DisconnectNotify() chan struct{} { + return c.disconnect +} + +// Handle registers the handler function for the given method. If a handler already exists for method, Handle panics. +func (c *Client) Handle(method string, handlerFunc interface{}) { + addHandler(c.handlers, method, handlerFunc) +} + +// readLoop reads messages from codec. +// It reads a reqeust or a response to the previous request. +// If the message is request, calls the handler function. +// If the message is response, sends the reply to the associated call. +func (c *Client) readLoop() { + var err error + var req Request + var resp Response + for err == nil { + req = Request{} + resp = Response{} + if err = c.codec.ReadHeader(&req, &resp); err != nil { + break + } + + if req.Method != "" { + // request comes to server + if err = c.readRequest(&req); err != nil { + debugln("rpc2: error reading request:", err.Error()) + } + } else { + // response comes to client + if err = c.readResponse(&resp); err != nil { + debugln("rpc2: error reading response:", err.Error()) + } + } + } + // Terminate pending calls. + c.sending.Lock() + c.mutex.Lock() + c.shutdown = true + closing := c.closing + if err == io.EOF { + if closing { + err = ErrShutdown + } else { + err = io.ErrUnexpectedEOF + } + } + for _, call := range c.pending { + call.Error = err + call.done() + } + c.mutex.Unlock() + c.sending.Unlock() + if err != io.EOF && !closing && !c.server { + debugln("rpc2: client protocol error:", err) + } + close(c.disconnect) + if !closing { + c.codec.Close() + } +} + +func (c *Client) handleRequest(req Request, method *handler, argv reflect.Value) { + // Invoke the method, providing a new value for the reply. + replyv := reflect.New(method.replyType.Elem()) + + returnValues := method.fn.Call([]reflect.Value{reflect.ValueOf(c), argv, replyv}) + + // Do not send response if request is a notification. + if req.Seq == 0 { + return + } + + // The return value for the method is an error. + errInter := returnValues[0].Interface() + errmsg := "" + if errInter != nil { + errmsg = errInter.(error).Error() + } + resp := &Response{ + Seq: req.Seq, + Error: errmsg, + } + if err := c.codec.WriteResponse(resp, replyv.Interface()); err != nil { + debugln("rpc2: error writing response:", err.Error()) + } +} + +func (c *Client) readRequest(req *Request) error { + method, ok := c.handlers[req.Method] + if !ok { + resp := &Response{ + Seq: req.Seq, + Error: "rpc2: can't find method " + req.Method, + } + return c.codec.WriteResponse(resp, resp) + } + + // Decode the argument value. + var argv reflect.Value + argIsValue := false // if true, need to indirect before calling. + if method.argType.Kind() == reflect.Ptr { + argv = reflect.New(method.argType.Elem()) + } else { + argv = reflect.New(method.argType) + argIsValue = true + } + // argv guaranteed to be a pointer now. + if err := c.codec.ReadRequestBody(argv.Interface()); err != nil { + return err + } + if argIsValue { + argv = argv.Elem() + } + + if c.blocking { + c.handleRequest(*req, method, argv) + } else { + go c.handleRequest(*req, method, argv) + } + + return nil +} + +func (c *Client) readResponse(resp *Response) error { + seq := resp.Seq + c.mutex.Lock() + call := c.pending[seq] + delete(c.pending, seq) + c.mutex.Unlock() + + var err error + switch { + case call == nil: + // We've got no pending call. That usually means that + // WriteRequest partially failed, and call was already + // removed; response is a server telling us about an + // error reading request body. We should still attempt + // to read error body, but there's no one to give it to. + err = c.codec.ReadResponseBody(nil) + if err != nil { + err = errors.New("reading error body: " + err.Error()) + } + case resp.Error != "": + // We've got an error response. Give this to the request; + // any subsequent requests will get the ReadResponseBody + // error if there is one. + call.Error = ServerError(resp.Error) + err = c.codec.ReadResponseBody(nil) + if err != nil { + err = errors.New("reading error body: " + err.Error()) + } + call.done() + default: + err = c.codec.ReadResponseBody(call.Reply) + if err != nil { + call.Error = errors.New("reading body " + err.Error()) + } + call.done() + } + + return err +} + +// Close waits for active calls to finish and closes the codec. +func (c *Client) Close() error { + c.mutex.Lock() + if c.shutdown || c.closing { + c.mutex.Unlock() + return ErrShutdown + } + c.closing = true + c.mutex.Unlock() + return c.codec.Close() +} + +// Go invokes the function asynchronously. It returns the Call structure representing +// the invocation. The done channel will signal when the call is complete by returning +// the same Call object. If done is nil, Go will allocate a new channel. +// If non-nil, done must be buffered or Go will deliberately crash. +func (c *Client) Go(method string, args interface{}, reply interface{}, done chan *Call) *Call { + call := new(Call) + call.Method = method + call.Args = args + call.Reply = reply + if done == nil { + done = make(chan *Call, 10) // buffered. + } else { + // If caller passes done != nil, it must arrange that + // done has enough buffer for the number of simultaneous + // RPCs that will be using that channel. If the channel + // is totally unbuffered, it's best not to run at all. + if cap(done) == 0 { + log.Panic("rpc2: done channel is unbuffered") + } + } + call.Done = done + c.send(call) + return call +} + +// CallWithContext invokes the named function, waits for it to complete, and +// returns its error status, or an error from Context timeout. +func (c *Client) CallWithContext(ctx context.Context, method string, args interface{}, reply interface{}) error { + call := c.Go(method, args, reply, make(chan *Call, 1)) + select { + case <-call.Done: + return call.Error + case <-ctx.Done(): + return ctx.Err() + } + return nil +} + +// Call invokes the named function, waits for it to complete, and returns its error status. +func (c *Client) Call(method string, args interface{}, reply interface{}) error { + return c.CallWithContext(context.Background(), method, args, reply) +} + +func (call *Call) done() { + select { + case call.Done <- call: + // ok + default: + // We don't want to block here. It is the caller's responsibility to make + // sure the channel has enough buffer space. See comment in Go(). + debugln("rpc2: discarding Call reply due to insufficient Done chan capacity") + } +} + +// ServerError represents an error that has been returned from +// the remote side of the RPC connection. +type ServerError string + +func (e ServerError) Error() string { + return string(e) +} + +// ErrShutdown is returned when the connection is closing or closed. +var ErrShutdown = errors.New("connection is shut down") + +// Call represents an active RPC. +type Call struct { + Method string // The name of the service and method to call. + Args interface{} // The argument to the function (*struct). + Reply interface{} // The reply from the function (*struct). + Error error // After completion, the error status. + Done chan *Call // Strobes when call is complete. +} + +func (c *Client) send(call *Call) { + c.sending.Lock() + defer c.sending.Unlock() + + // Register this call. + c.mutex.Lock() + if c.shutdown || c.closing { + call.Error = ErrShutdown + c.mutex.Unlock() + call.done() + return + } + seq := c.seq + c.seq++ + c.pending[seq] = call + c.mutex.Unlock() + + // Encode and send the request. + c.request.Seq = seq + c.request.Method = call.Method + err := c.codec.WriteRequest(&c.request, call.Args) + if err != nil { + c.mutex.Lock() + call = c.pending[seq] + delete(c.pending, seq) + c.mutex.Unlock() + if call != nil { + call.Error = err + call.done() + } + } +} + +// Notify sends a request to the receiver but does not wait for a return value. +func (c *Client) Notify(method string, args interface{}) error { + c.sending.Lock() + defer c.sending.Unlock() + + if c.shutdown || c.closing { + return ErrShutdown + } + + c.request.Seq = 0 + c.request.Method = method + return c.codec.WriteRequest(&c.request, args) +} diff --git a/vendor/github.com/cenkalti/rpc2/codec.go b/vendor/github.com/cenkalti/rpc2/codec.go new file mode 100644 index 000000000..b097d9aaa --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/codec.go @@ -0,0 +1,125 @@ +package rpc2 + +import ( + "bufio" + "encoding/gob" + "io" + "sync" +) + +// A Codec implements reading and writing of RPC requests and responses. +// The client calls ReadHeader to read a message header. +// The implementation must populate either Request or Response argument. +// Depending on which argument is populated, ReadRequestBody or +// ReadResponseBody is called right after ReadHeader. +// ReadRequestBody and ReadResponseBody may be called with a nil +// argument to force the body to be read and then discarded. +type Codec interface { + // ReadHeader must read a message and populate either the request + // or the response by inspecting the incoming message. + ReadHeader(*Request, *Response) error + + // ReadRequestBody into args argument of handler function. + ReadRequestBody(interface{}) error + + // ReadResponseBody into reply argument of handler function. + ReadResponseBody(interface{}) error + + // WriteRequest must be safe for concurrent use by multiple goroutines. + WriteRequest(*Request, interface{}) error + + // WriteResponse must be safe for concurrent use by multiple goroutines. + WriteResponse(*Response, interface{}) error + + // Close is called when client/server finished with the connection. + Close() error +} + +// Request is a header written before every RPC call. +type Request struct { + Seq uint64 // sequence number chosen by client + Method string +} + +// Response is a header written before every RPC return. +type Response struct { + Seq uint64 // echoes that of the request + Error string // error, if any. +} + +type gobCodec struct { + rwc io.ReadWriteCloser + dec *gob.Decoder + enc *gob.Encoder + encBuf *bufio.Writer + mutex sync.Mutex +} + +type message struct { + Seq uint64 + Method string + Error string +} + +// NewGobCodec returns a new rpc2.Codec using gob encoding/decoding on conn. +func NewGobCodec(conn io.ReadWriteCloser) Codec { + buf := bufio.NewWriter(conn) + return &gobCodec{ + rwc: conn, + dec: gob.NewDecoder(conn), + enc: gob.NewEncoder(buf), + encBuf: buf, + } +} + +func (c *gobCodec) ReadHeader(req *Request, resp *Response) error { + var msg message + if err := c.dec.Decode(&msg); err != nil { + return err + } + + if msg.Method != "" { + req.Seq = msg.Seq + req.Method = msg.Method + } else { + resp.Seq = msg.Seq + resp.Error = msg.Error + } + return nil +} + +func (c *gobCodec) ReadRequestBody(body interface{}) error { + return c.dec.Decode(body) +} + +func (c *gobCodec) ReadResponseBody(body interface{}) error { + return c.dec.Decode(body) +} + +func (c *gobCodec) WriteRequest(r *Request, body interface{}) (err error) { + c.mutex.Lock() + defer c.mutex.Unlock() + if err = c.enc.Encode(r); err != nil { + return + } + if err = c.enc.Encode(body); err != nil { + return + } + return c.encBuf.Flush() +} + +func (c *gobCodec) WriteResponse(r *Response, body interface{}) (err error) { + c.mutex.Lock() + defer c.mutex.Unlock() + if err = c.enc.Encode(r); err != nil { + return + } + if err = c.enc.Encode(body); err != nil { + return + } + return c.encBuf.Flush() +} + +func (c *gobCodec) Close() error { + return c.rwc.Close() +} diff --git a/vendor/github.com/cenkalti/rpc2/debug.go b/vendor/github.com/cenkalti/rpc2/debug.go new file mode 100644 index 000000000..ec1b62521 --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/debug.go @@ -0,0 +1,12 @@ +package rpc2 + +import "log" + +// DebugLog controls the printing of internal and I/O errors. +var DebugLog = false + +func debugln(v ...interface{}) { + if DebugLog { + log.Println(v...) + } +} diff --git a/vendor/github.com/cenkalti/rpc2/jsonrpc/jsonrpc.go b/vendor/github.com/cenkalti/rpc2/jsonrpc/jsonrpc.go new file mode 100644 index 000000000..87e116887 --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/jsonrpc/jsonrpc.go @@ -0,0 +1,226 @@ +// Package jsonrpc implements a JSON-RPC ClientCodec and ServerCodec for the rpc2 package. +// +// Beside struct types, JSONCodec allows using positional arguments. +// Use []interface{} as the type of argument when sending and receiving methods. +// +// Positional arguments example: +// server.Handle("add", func(client *rpc2.Client, args []interface{}, result *float64) error { +// *result = args[0].(float64) + args[1].(float64) +// return nil +// }) +// +// var result float64 +// client.Call("add", []interface{}{1, 2}, &result) +// +package jsonrpc + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "reflect" + "sync" + + "github.com/cenkalti/rpc2" +) + +type jsonCodec struct { + dec *json.Decoder // for reading JSON values + enc *json.Encoder // for writing JSON values + c io.Closer + + // temporary work space + msg message + serverRequest serverRequest + clientResponse clientResponse + + // JSON-RPC clients can use arbitrary json values as request IDs. + // Package rpc expects uint64 request IDs. + // We assign uint64 sequence numbers to incoming requests + // but save the original request ID in the pending map. + // When rpc responds, we use the sequence number in + // the response to find the original request ID. + mutex sync.Mutex // protects seq, pending + pending map[uint64]*json.RawMessage + seq uint64 +} + +// NewJSONCodec returns a new rpc2.Codec using JSON-RPC on conn. +func NewJSONCodec(conn io.ReadWriteCloser) rpc2.Codec { + return &jsonCodec{ + dec: json.NewDecoder(conn), + enc: json.NewEncoder(conn), + c: conn, + pending: make(map[uint64]*json.RawMessage), + } +} + +// serverRequest and clientResponse combined +type message struct { + Method string `json:"method"` + Params *json.RawMessage `json:"params"` + Id *json.RawMessage `json:"id"` + Result *json.RawMessage `json:"result"` + Error interface{} `json:"error"` +} + +// Unmarshal to +type serverRequest struct { + Method string `json:"method"` + Params *json.RawMessage `json:"params"` + Id *json.RawMessage `json:"id"` +} +type clientResponse struct { + Id uint64 `json:"id"` + Result *json.RawMessage `json:"result"` + Error interface{} `json:"error"` +} + +// to Marshal +type serverResponse struct { + Id *json.RawMessage `json:"id"` + Result interface{} `json:"result"` + Error interface{} `json:"error"` +} +type clientRequest struct { + Method string `json:"method"` + Params interface{} `json:"params"` + Id *uint64 `json:"id"` +} + +func (c *jsonCodec) ReadHeader(req *rpc2.Request, resp *rpc2.Response) error { + c.msg = message{} + if err := c.dec.Decode(&c.msg); err != nil { + return err + } + + if c.msg.Method != "" { + // request comes to server + c.serverRequest.Id = c.msg.Id + c.serverRequest.Method = c.msg.Method + c.serverRequest.Params = c.msg.Params + + req.Method = c.serverRequest.Method + + // JSON request id can be any JSON value; + // RPC package expects uint64. Translate to + // internal uint64 and save JSON on the side. + if c.serverRequest.Id == nil { + // Notification + } else { + c.mutex.Lock() + c.seq++ + c.pending[c.seq] = c.serverRequest.Id + c.serverRequest.Id = nil + req.Seq = c.seq + c.mutex.Unlock() + } + } else { + // response comes to client + err := json.Unmarshal(*c.msg.Id, &c.clientResponse.Id) + if err != nil { + return err + } + c.clientResponse.Result = c.msg.Result + c.clientResponse.Error = c.msg.Error + + resp.Error = "" + resp.Seq = c.clientResponse.Id + if c.clientResponse.Error != nil || c.clientResponse.Result == nil { + x, ok := c.clientResponse.Error.(string) + if !ok { + return fmt.Errorf("invalid error %v", c.clientResponse.Error) + } + if x == "" { + x = "unspecified error" + } + resp.Error = x + } + } + return nil +} + +var errMissingParams = errors.New("jsonrpc: request body missing params") + +func (c *jsonCodec) ReadRequestBody(x interface{}) error { + if x == nil { + return nil + } + if c.serverRequest.Params == nil { + return errMissingParams + } + + var err error + + // Check if x points to a slice of any kind + rt := reflect.TypeOf(x) + if rt.Kind() == reflect.Ptr && rt.Elem().Kind() == reflect.Slice { + // If it's a slice, unmarshal as is + err = json.Unmarshal(*c.serverRequest.Params, x) + } else { + // Anything else unmarshal into a slice containing x + params := &[]interface{}{x} + err = json.Unmarshal(*c.serverRequest.Params, params) + } + + return err +} + +func (c *jsonCodec) ReadResponseBody(x interface{}) error { + if x == nil { + return nil + } + return json.Unmarshal(*c.clientResponse.Result, x) +} + +func (c *jsonCodec) WriteRequest(r *rpc2.Request, param interface{}) error { + req := &clientRequest{Method: r.Method} + + // Check if param is a slice of any kind + if param != nil && reflect.TypeOf(param).Kind() == reflect.Slice { + // If it's a slice, leave as is + req.Params = param + } else { + // Put anything else into a slice + req.Params = []interface{}{param} + } + + if r.Seq == 0 { + // Notification + req.Id = nil + } else { + seq := r.Seq + req.Id = &seq + } + return c.enc.Encode(req) +} + +var null = json.RawMessage([]byte("null")) + +func (c *jsonCodec) WriteResponse(r *rpc2.Response, x interface{}) error { + c.mutex.Lock() + b, ok := c.pending[r.Seq] + if !ok { + c.mutex.Unlock() + return errors.New("invalid sequence number in response") + } + delete(c.pending, r.Seq) + c.mutex.Unlock() + + if b == nil { + // Invalid request so no id. Use JSON null. + b = &null + } + resp := serverResponse{Id: b} + if r.Error == "" { + resp.Result = x + } else { + resp.Error = r.Error + } + return c.enc.Encode(resp) +} + +func (c *jsonCodec) Close() error { + return c.c.Close() +} diff --git a/vendor/github.com/cenkalti/rpc2/server.go b/vendor/github.com/cenkalti/rpc2/server.go new file mode 100644 index 000000000..2a5be7ed6 --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/server.go @@ -0,0 +1,181 @@ +package rpc2 + +import ( + "io" + "log" + "net" + "reflect" + "unicode" + "unicode/utf8" + + "github.com/cenkalti/hub" +) + +// Precompute the reflect type for error. Can't use error directly +// because Typeof takes an empty interface value. This is annoying. +var typeOfError = reflect.TypeOf((*error)(nil)).Elem() +var typeOfClient = reflect.TypeOf((*Client)(nil)) + +const ( + clientConnected hub.Kind = iota + clientDisconnected +) + +// Server responds to RPC requests made by Client. +type Server struct { + handlers map[string]*handler + eventHub *hub.Hub +} + +type handler struct { + fn reflect.Value + argType reflect.Type + replyType reflect.Type +} + +type connectionEvent struct { + Client *Client +} + +type disconnectionEvent struct { + Client *Client +} + +func (connectionEvent) Kind() hub.Kind { return clientConnected } +func (disconnectionEvent) Kind() hub.Kind { return clientDisconnected } + +// NewServer returns a new Server. +func NewServer() *Server { + return &Server{ + handlers: make(map[string]*handler), + eventHub: &hub.Hub{}, + } +} + +// Handle registers the handler function for the given method. If a handler already exists for method, Handle panics. +func (s *Server) Handle(method string, handlerFunc interface{}) { + addHandler(s.handlers, method, handlerFunc) +} + +func addHandler(handlers map[string]*handler, mname string, handlerFunc interface{}) { + if _, ok := handlers[mname]; ok { + panic("rpc2: multiple registrations for " + mname) + } + + method := reflect.ValueOf(handlerFunc) + mtype := method.Type() + // Method needs three ins: *client, *args, *reply. + if mtype.NumIn() != 3 { + log.Panicln("method", mname, "has wrong number of ins:", mtype.NumIn()) + } + // First arg must be a pointer to rpc2.Client. + clientType := mtype.In(0) + if clientType.Kind() != reflect.Ptr { + log.Panicln("method", mname, "client type not a pointer:", clientType) + } + if clientType != typeOfClient { + log.Panicln("method", mname, "first argument", clientType.String(), "not *rpc2.Client") + } + // Second arg need not be a pointer. + argType := mtype.In(1) + if !isExportedOrBuiltinType(argType) { + log.Panicln(mname, "argument type not exported:", argType) + } + // Third arg must be a pointer. + replyType := mtype.In(2) + if replyType.Kind() != reflect.Ptr { + log.Panicln("method", mname, "reply type not a pointer:", replyType) + } + // Reply type must be exported. + if !isExportedOrBuiltinType(replyType) { + log.Panicln("method", mname, "reply type not exported:", replyType) + } + // Method needs one out. + if mtype.NumOut() != 1 { + log.Panicln("method", mname, "has wrong number of outs:", mtype.NumOut()) + } + // The return type of the method must be error. + if returnType := mtype.Out(0); returnType != typeOfError { + log.Panicln("method", mname, "returns", returnType.String(), "not error") + } + handlers[mname] = &handler{ + fn: method, + argType: argType, + replyType: replyType, + } +} + +// Is this type exported or a builtin? +func isExportedOrBuiltinType(t reflect.Type) bool { + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + // PkgPath will be non-empty even for an exported type, + // so we need to check the type name as well. + return isExported(t.Name()) || t.PkgPath() == "" +} + +// Is this an exported - upper case - name? +func isExported(name string) bool { + rune, _ := utf8.DecodeRuneInString(name) + return unicode.IsUpper(rune) +} + +// OnConnect registers a function to run when a client connects. +func (s *Server) OnConnect(f func(*Client)) { + s.eventHub.Subscribe(clientConnected, func(e hub.Event) { + go f(e.(connectionEvent).Client) + }) +} + +// OnDisconnect registers a function to run when a client disconnects. +func (s *Server) OnDisconnect(f func(*Client)) { + s.eventHub.Subscribe(clientDisconnected, func(e hub.Event) { + go f(e.(disconnectionEvent).Client) + }) +} + +// Accept accepts connections on the listener and serves requests +// for each incoming connection. Accept blocks; the caller typically +// invokes it in a go statement. +func (s *Server) Accept(lis net.Listener) { + for { + conn, err := lis.Accept() + if err != nil { + log.Print("rpc.Serve: accept:", err.Error()) + return + } + go s.ServeConn(conn) + } +} + +// ServeConn runs the server on a single connection. +// ServeConn blocks, serving the connection until the client hangs up. +// The caller typically invokes ServeConn in a go statement. +// ServeConn uses the gob wire format (see package gob) on the +// connection. To use an alternate codec, use ServeCodec. +func (s *Server) ServeConn(conn io.ReadWriteCloser) { + s.ServeCodec(NewGobCodec(conn)) +} + +// ServeCodec is like ServeConn but uses the specified codec to +// decode requests and encode responses. +func (s *Server) ServeCodec(codec Codec) { + s.ServeCodecWithState(codec, NewState()) +} + +// ServeCodecWithState is like ServeCodec but also gives the ability to +// associate a state variable with the client that persists across RPC calls. +func (s *Server) ServeCodecWithState(codec Codec, state *State) { + defer codec.Close() + + // Client also handles the incoming connections. + c := NewClientWithCodec(codec) + c.server = true + c.handlers = s.handlers + c.State = state + + s.eventHub.Publish(connectionEvent{c}) + c.Run() + s.eventHub.Publish(disconnectionEvent{c}) +} diff --git a/vendor/github.com/cenkalti/rpc2/state.go b/vendor/github.com/cenkalti/rpc2/state.go new file mode 100644 index 000000000..7a4f23e6d --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/state.go @@ -0,0 +1,25 @@ +package rpc2 + +import "sync" + +type State struct { + store map[string]interface{} + m sync.RWMutex +} + +func NewState() *State { + return &State{store: make(map[string]interface{})} +} + +func (s *State) Get(key string) (value interface{}, ok bool) { + s.m.RLock() + value, ok = s.store[key] + s.m.RUnlock() + return +} + +func (s *State) Set(key string, value interface{}) { + s.m.Lock() + s.store[key] = value + s.m.Unlock() +} diff --git a/vendor/github.com/cilium/ebpf/.clang-format b/vendor/github.com/cilium/ebpf/.clang-format new file mode 100644 index 000000000..0ff425760 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/.clang-format @@ -0,0 +1,25 @@ +--- +Language: Cpp +BasedOnStyle: LLVM +AlignAfterOpenBracket: DontAlign +AlignConsecutiveAssignments: true +AlignEscapedNewlines: DontAlign +# mkdocs annotations in source code are written as trailing comments +# and alignment pushes these really far away from the content. +AlignTrailingComments: false +AlwaysBreakBeforeMultilineStrings: true +AlwaysBreakTemplateDeclarations: false +AllowAllParametersOfDeclarationOnNextLine: false +AllowShortFunctionsOnASingleLine: false +BreakBeforeBraces: Attach +IndentWidth: 4 +KeepEmptyLinesAtTheStartOfBlocks: false +TabWidth: 4 +UseTab: ForContinuationAndIndentation +ColumnLimit: 1000 +# Go compiler comments need to stay unindented. +CommentPragmas: '^go:.*' +# linux/bpf.h needs to be included before bpf/bpf_helpers.h for types like __u64 +# and sorting makes this impossible. +SortIncludes: false +... diff --git a/vendor/github.com/cilium/ebpf/.gitattributes b/vendor/github.com/cilium/ebpf/.gitattributes new file mode 100644 index 000000000..ea7c9a89c --- /dev/null +++ b/vendor/github.com/cilium/ebpf/.gitattributes @@ -0,0 +1,4 @@ +# Force line ending normalisation +* text=auto +# Show types.go in the PR diff view by default +internal/sys/types.go linguist-generated=false diff --git a/vendor/github.com/cilium/ebpf/.gitignore b/vendor/github.com/cilium/ebpf/.gitignore new file mode 100644 index 000000000..b46162b8e --- /dev/null +++ b/vendor/github.com/cilium/ebpf/.gitignore @@ -0,0 +1,14 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +*.o +!*_bpf*.o + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out diff --git a/vendor/github.com/cilium/ebpf/.golangci.yaml b/vendor/github.com/cilium/ebpf/.golangci.yaml new file mode 100644 index 000000000..8f88708b2 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/.golangci.yaml @@ -0,0 +1,29 @@ +version: "2" +linters: + default: none + enable: + - depguard + - govet + - ineffassign + - misspell + - unused + settings: + depguard: + rules: + no-x-sys-unix: + files: + - '!**/internal/unix/*.go' + - '!**/examples/**/*.go' + - '!**/docs/**/*.go' + deny: + - pkg: golang.org/x/sys/unix + desc: use internal/unix instead + +formatters: + enable: + - gofmt + - goimports + settings: + goimports: + local-prefixes: + - github.com/cilium/ebpf diff --git a/vendor/github.com/cilium/ebpf/.vimto.toml b/vendor/github.com/cilium/ebpf/.vimto.toml new file mode 100644 index 000000000..49a12dbc0 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/.vimto.toml @@ -0,0 +1,12 @@ +kernel="ghcr.io/cilium/ci-kernels:stable" +smp="cpus=2" +memory="1G" +user="root" +setup=[ + "mount -t cgroup2 -o nosuid,noexec,nodev cgroup2 /sys/fs/cgroup", + "/bin/sh -c 'modprobe bpf_testmod || true'", + "dmesg --clear", +] +teardown=[ + "dmesg --read-clear", +] diff --git a/vendor/github.com/cilium/ebpf/CODEOWNERS b/vendor/github.com/cilium/ebpf/CODEOWNERS new file mode 100644 index 000000000..bd0a61158 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/CODEOWNERS @@ -0,0 +1,11 @@ +* @cilium/ebpf-lib-maintainers + +features/ @rgo3 +link/ @mmat11 + +perf/ @florianl +ringbuf/ @florianl + +btf/ @dylandreimerink + +docs/ @ti-mo diff --git a/vendor/github.com/cilium/ebpf/CODE_OF_CONDUCT.md b/vendor/github.com/cilium/ebpf/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..8e42838c5 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/CODE_OF_CONDUCT.md @@ -0,0 +1,46 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at nathanjsweet at gmail dot com or i at lmb dot io. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/cilium/ebpf/CONTRIBUTING.md b/vendor/github.com/cilium/ebpf/CONTRIBUTING.md new file mode 100644 index 000000000..673a9ac29 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/CONTRIBUTING.md @@ -0,0 +1,5 @@ +# Contributing to ebpf-go + +Want to contribute to ebpf-go? There are a few things you need to know. + +We wrote a [contribution guide](https://ebpf-go.dev/contributing/) to help you get started. diff --git a/vendor/github.com/cilium/ebpf/LICENSE b/vendor/github.com/cilium/ebpf/LICENSE new file mode 100644 index 000000000..c637ae99c --- /dev/null +++ b/vendor/github.com/cilium/ebpf/LICENSE @@ -0,0 +1,23 @@ +MIT License + +Copyright (c) 2017 Nathan Sweet +Copyright (c) 2018, 2019 Cloudflare +Copyright (c) 2019 Authors of Cilium + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/cilium/ebpf/MAINTAINERS.md b/vendor/github.com/cilium/ebpf/MAINTAINERS.md new file mode 100644 index 000000000..a56a03e39 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/MAINTAINERS.md @@ -0,0 +1,3 @@ +# Maintainers + +Maintainers can be found in the [Cilium Maintainers file](https://github.com/cilium/community/blob/main/roles/Maintainers.md) diff --git a/vendor/github.com/cilium/ebpf/Makefile b/vendor/github.com/cilium/ebpf/Makefile new file mode 100644 index 000000000..4f53b37f3 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/Makefile @@ -0,0 +1,118 @@ +# The development version of clang is distributed as the 'clang' binary, +# while stable/released versions have a version number attached. +# Pin the default clang to a stable version. +CLANG ?= clang-20 +STRIP ?= llvm-strip-20 +OBJCOPY ?= llvm-objcopy-20 +CFLAGS := -O2 -g -Wall -Werror -mcpu=v2 $(CFLAGS) + +CI_KERNEL_URL ?= https://github.com/cilium/ci-kernels/raw/master/ + +# Obtain an absolute path to the directory of the Makefile. +# Assume the Makefile is in the root of the repository. +REPODIR := $(shell dirname $(realpath $(firstword $(MAKEFILE_LIST)))) +UIDGID := $(shell stat -c '%u:%g' ${REPODIR}) + +# Prefer podman if installed, otherwise use docker. +# Note: Setting the var at runtime will always override. +CONTAINER_ENGINE ?= $(if $(shell command -v podman), podman, docker) +CONTAINER_RUN_ARGS ?= $(if $(filter ${CONTAINER_ENGINE}, podman), \ + --log-driver=none \ + -v "$(shell go env GOCACHE)":/root/.cache/go-build \ + -v "$(shell go env GOMODCACHE)":/go/pkg/mod, --user "${UIDGID}") + +IMAGE := $(shell cat ${REPODIR}/testdata/docker/IMAGE) +VERSION := $(shell cat ${REPODIR}/testdata/docker/VERSION) + +TARGETS := \ + testdata/loader-clang-14 \ + testdata/loader-clang-17 \ + testdata/loader-$(CLANG) \ + testdata/manyprogs \ + testdata/btf_map_init \ + testdata/invalid_map \ + testdata/raw_tracepoint \ + testdata/invalid_map_static \ + testdata/invalid_btf_map_init \ + testdata/strings \ + testdata/freplace \ + testdata/fentry_fexit \ + testdata/iproute2_map_compat \ + testdata/map_spin_lock \ + testdata/subprog_reloc \ + testdata/fwd_decl \ + testdata/kconfig \ + testdata/ksym \ + testdata/kfunc \ + testdata/invalid-kfunc \ + testdata/kfunc-kmod \ + testdata/constants \ + testdata/errors \ + testdata/variables \ + testdata/arena \ + btf/testdata/relocs \ + btf/testdata/relocs_read \ + btf/testdata/relocs_read_tgt \ + btf/testdata/relocs_enum \ + btf/testdata/tags \ + cmd/bpf2go/testdata/minimal + +.PHONY: all clean container-all container-shell generate + +.DEFAULT_TARGET = container-all + +# Build all ELF binaries using a containerized LLVM toolchain. +container-all: + +${CONTAINER_ENGINE} run --rm -ti ${CONTAINER_RUN_ARGS} \ + -v "${REPODIR}":/ebpf -w /ebpf --env MAKEFLAGS \ + --env HOME="/tmp" \ + --env BPF2GO_CC="$(CLANG)" \ + --env BPF2GO_CFLAGS="$(CFLAGS)" \ + "${IMAGE}:${VERSION}" \ + make all + +# (debug) Drop the user into a shell inside the container as root. +# Set BPF2GO_ envs to make 'make generate' just work. +container-shell: + ${CONTAINER_ENGINE} run --rm -ti ${CONTAINER_RUN_ARGS} \ + -v "${REPODIR}":/ebpf -w /ebpf \ + --env BPF2GO_CC="$(CLANG)" \ + --env BPF2GO_CFLAGS="$(CFLAGS)" \ + "${IMAGE}:${VERSION}" + +clean: + find "$(CURDIR)" -name "*.elf" -delete + find "$(CURDIR)" -name "*.o" -delete + +format: + find . -type f -name "*.c" | xargs clang-format -i + +all: format $(addsuffix -el.elf,$(TARGETS)) $(addsuffix -eb.elf,$(TARGETS)) generate + ln -srf testdata/loader-$(CLANG)-el.elf testdata/loader-el.elf + ln -srf testdata/loader-$(CLANG)-eb.elf testdata/loader-eb.elf + +generate: + go generate -run "gentypes" ./... + go generate -skip "gentypes" ./... + +testdata/loader-%-el.elf: testdata/loader.c + $* $(CFLAGS) -target bpfel -c $< -o $@ + $(STRIP) -g $@ + +testdata/loader-%-eb.elf: testdata/loader.c + $* $(CFLAGS) -target bpfeb -c $< -o $@ + $(STRIP) -g $@ + +%-el.elf: %.c + $(CLANG) $(CFLAGS) -target bpfel -c $< -o $@ + $(STRIP) -g $@ + +%-eb.elf : %.c + $(CLANG) $(CFLAGS) -target bpfeb -c $< -o $@ + $(STRIP) -g $@ + +.PHONY: update-kernel-deps +update-kernel-deps: export KERNEL_VERSION?=6.8 +update-kernel-deps: + ./testdata/sh/update-kernel-deps.sh + $(MAKE) container-all diff --git a/vendor/github.com/cilium/ebpf/README.md b/vendor/github.com/cilium/ebpf/README.md new file mode 100644 index 000000000..01a154c61 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/README.md @@ -0,0 +1,76 @@ +# eBPF + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/cilium/ebpf)](https://pkg.go.dev/github.com/cilium/ebpf) + +![HoneyGopher](docs/ebpf/ebpf-go.png) + +ebpf-go is a pure Go library that provides utilities for loading, compiling, and +debugging eBPF programs. It has minimal external dependencies and is intended to +be used in long running processes. + +See [ebpf.io](https://ebpf.io) for complementary projects from the wider eBPF +ecosystem. + +## Getting Started + +Please take a look at our [Getting Started] guide. + +[Contributions](https://ebpf-go.dev/contributing) are highly encouraged, as they highlight certain use cases of +eBPF and the library, and help shape the future of the project. + +## Getting Help + +The community actively monitors our [GitHub Discussions](https://github.com/cilium/ebpf/discussions) page. +Please search for existing threads before starting a new one. Refrain from +opening issues on the bug tracker if you're just starting out or if you're not +sure if something is a bug in the library code. + +Alternatively, [join](https://ebpf.io/slack) the +[#ebpf-go](https://cilium.slack.com/messages/ebpf-go) channel on Slack if you +have other questions regarding the project. Note that this channel is ephemeral +and has its history erased past a certain point, which is less helpful for +others running into the same problem later. + +## Packages + +This library includes the following packages: + +* [asm](https://pkg.go.dev/github.com/cilium/ebpf/asm) contains a basic + assembler, allowing you to write eBPF assembly instructions directly + within your Go code. (You don't need to use this if you prefer to write your eBPF program in C.) +* [cmd/bpf2go](https://pkg.go.dev/github.com/cilium/ebpf/cmd/bpf2go) allows + compiling and embedding eBPF programs written in C within Go code. As well as + compiling the C code, it auto-generates Go code for loading and manipulating + the eBPF program and map objects. +* [link](https://pkg.go.dev/github.com/cilium/ebpf/link) allows attaching eBPF + to various hooks +* [perf](https://pkg.go.dev/github.com/cilium/ebpf/perf) allows reading from a + `PERF_EVENT_ARRAY` +* [ringbuf](https://pkg.go.dev/github.com/cilium/ebpf/ringbuf) allows reading from a + `BPF_MAP_TYPE_RINGBUF` map +* [features](https://pkg.go.dev/github.com/cilium/ebpf/features) implements the equivalent + of `bpftool feature probe` for discovering BPF-related kernel features using native Go. +* [rlimit](https://pkg.go.dev/github.com/cilium/ebpf/rlimit) provides a convenient API to lift + the `RLIMIT_MEMLOCK` constraint on kernels before 5.11. +* [btf](https://pkg.go.dev/github.com/cilium/ebpf/btf) allows reading the BPF Type Format. +* [pin](https://pkg.go.dev/github.com/cilium/ebpf/pin) provides APIs for working with pinned objects on bpffs. + +## Requirements + +* A version of Go that is [supported by + upstream](https://golang.org/doc/devel/release.html#policy) +* Linux (amd64, arm64): CI is run against kernel.org LTS releases. >= 4.4 should work but EOL'ed + versions are not supported. +* Windows (amd64): CI is run against Windows Server 2022. Only the latest eBPF for Windows + release is supported. +* Other architectures are best effort. 32bit arches are not supported. + +## License + +MIT + +### eBPF Gopher + +The eBPF honeygopher is based on the Go gopher designed by Renee French. + +[Getting Started]: https://ebpf-go.dev/guides/getting-started/ diff --git a/vendor/github.com/cilium/ebpf/asm/alu.go b/vendor/github.com/cilium/ebpf/asm/alu.go new file mode 100644 index 000000000..a4ae72212 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/alu.go @@ -0,0 +1,180 @@ +package asm + +//go:generate go tool stringer -output alu_string.go -type=Source,Endianness,ALUOp + +// Source of ALU / ALU64 / Branch operations +// +// msb lsb +// +------------+-+---+ +// | op |S|cls| +// +------------+-+---+ +type Source uint16 + +const sourceMask OpCode = 0x0008 + +// Source bitmask +const ( + // InvalidSource is returned by getters when invoked + // on non ALU / branch OpCodes. + InvalidSource Source = 0xffff + // ImmSource src is from constant + ImmSource Source = 0x0000 + // RegSource src is from register + RegSource Source = 0x0008 +) + +// The Endianness of a byte swap instruction. +type Endianness uint8 + +const endianMask = sourceMask + +// Endian flags +const ( + InvalidEndian Endianness = 0xff + // Convert to little endian + LE Endianness = 0x00 + // Convert to big endian + BE Endianness = 0x08 +) + +// ALUOp are ALU / ALU64 operations +// +// msb lsb +// +-------+----+-+---+ +// | EXT | OP |s|cls| +// +-------+----+-+---+ +type ALUOp uint16 + +const aluMask OpCode = 0x3ff0 + +const ( + // InvalidALUOp is returned by getters when invoked + // on non ALU OpCodes + InvalidALUOp ALUOp = 0xffff + // Add - addition + Add ALUOp = 0x0000 + // Sub - subtraction + Sub ALUOp = 0x0010 + // Mul - multiplication + Mul ALUOp = 0x0020 + // Div - division + Div ALUOp = 0x0030 + // SDiv - signed division + SDiv ALUOp = Div + 0x0100 + // Or - bitwise or + Or ALUOp = 0x0040 + // And - bitwise and + And ALUOp = 0x0050 + // LSh - bitwise shift left + LSh ALUOp = 0x0060 + // RSh - bitwise shift right + RSh ALUOp = 0x0070 + // Neg - sign/unsign signing bit + Neg ALUOp = 0x0080 + // Mod - modulo + Mod ALUOp = 0x0090 + // SMod - signed modulo + SMod ALUOp = Mod + 0x0100 + // Xor - bitwise xor + Xor ALUOp = 0x00a0 + // Mov - move value from one place to another + Mov ALUOp = 0x00b0 + // MovSX8 - move lower 8 bits, sign extended upper bits of target + MovSX8 ALUOp = Mov + 0x0100 + // MovSX16 - move lower 16 bits, sign extended upper bits of target + MovSX16 ALUOp = Mov + 0x0200 + // MovSX32 - move lower 32 bits, sign extended upper bits of target + MovSX32 ALUOp = Mov + 0x0300 + // ArSh - arithmetic shift + ArSh ALUOp = 0x00c0 + // Swap - endian conversions + Swap ALUOp = 0x00d0 +) + +// HostTo converts from host to another endianness. +func HostTo(endian Endianness, dst Register, size Size) Instruction { + var imm int64 + switch size { + case Half: + imm = 16 + case Word: + imm = 32 + case DWord: + imm = 64 + default: + return Instruction{OpCode: InvalidOpCode} + } + + return Instruction{ + OpCode: OpCode(ALUClass).SetALUOp(Swap).SetSource(Source(endian)), + Dst: dst, + Constant: imm, + } +} + +// BSwap unconditionally reverses the order of bytes in a register. +func BSwap(dst Register, size Size) Instruction { + var imm int64 + switch size { + case Half: + imm = 16 + case Word: + imm = 32 + case DWord: + imm = 64 + default: + return Instruction{OpCode: InvalidOpCode} + } + + return Instruction{ + OpCode: OpCode(ALU64Class).SetALUOp(Swap), + Dst: dst, + Constant: imm, + } +} + +// Op returns the OpCode for an ALU operation with a given source. +func (op ALUOp) Op(source Source) OpCode { + return OpCode(ALU64Class).SetALUOp(op).SetSource(source) +} + +// Reg emits `dst (op) src`. +func (op ALUOp) Reg(dst, src Register) Instruction { + return Instruction{ + OpCode: op.Op(RegSource), + Dst: dst, + Src: src, + } +} + +// Imm emits `dst (op) value`. +func (op ALUOp) Imm(dst Register, value int32) Instruction { + return Instruction{ + OpCode: op.Op(ImmSource), + Dst: dst, + Constant: int64(value), + } +} + +// Op32 returns the OpCode for a 32-bit ALU operation with a given source. +func (op ALUOp) Op32(source Source) OpCode { + return OpCode(ALUClass).SetALUOp(op).SetSource(source) +} + +// Reg32 emits `dst (op) src`, zeroing the upper 32 bit of dst. +func (op ALUOp) Reg32(dst, src Register) Instruction { + return Instruction{ + OpCode: op.Op32(RegSource), + Dst: dst, + Src: src, + } +} + +// Imm32 emits `dst (op) value`, zeroing the upper 32 bit of dst. +func (op ALUOp) Imm32(dst Register, value int32) Instruction { + return Instruction{ + OpCode: op.Op32(ImmSource), + Dst: dst, + Constant: int64(value), + } +} diff --git a/vendor/github.com/cilium/ebpf/asm/alu_string.go b/vendor/github.com/cilium/ebpf/asm/alu_string.go new file mode 100644 index 000000000..35b406bf3 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/alu_string.go @@ -0,0 +1,117 @@ +// Code generated by "stringer -output alu_string.go -type=Source,Endianness,ALUOp"; DO NOT EDIT. + +package asm + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidSource-65535] + _ = x[ImmSource-0] + _ = x[RegSource-8] +} + +const ( + _Source_name_0 = "ImmSource" + _Source_name_1 = "RegSource" + _Source_name_2 = "InvalidSource" +) + +func (i Source) String() string { + switch { + case i == 0: + return _Source_name_0 + case i == 8: + return _Source_name_1 + case i == 65535: + return _Source_name_2 + default: + return "Source(" + strconv.FormatInt(int64(i), 10) + ")" + } +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidEndian-255] + _ = x[LE-0] + _ = x[BE-8] +} + +const ( + _Endianness_name_0 = "LE" + _Endianness_name_1 = "BE" + _Endianness_name_2 = "InvalidEndian" +) + +func (i Endianness) String() string { + switch { + case i == 0: + return _Endianness_name_0 + case i == 8: + return _Endianness_name_1 + case i == 255: + return _Endianness_name_2 + default: + return "Endianness(" + strconv.FormatInt(int64(i), 10) + ")" + } +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidALUOp-65535] + _ = x[Add-0] + _ = x[Sub-16] + _ = x[Mul-32] + _ = x[Div-48] + _ = x[SDiv-304] + _ = x[Or-64] + _ = x[And-80] + _ = x[LSh-96] + _ = x[RSh-112] + _ = x[Neg-128] + _ = x[Mod-144] + _ = x[SMod-400] + _ = x[Xor-160] + _ = x[Mov-176] + _ = x[MovSX8-432] + _ = x[MovSX16-688] + _ = x[MovSX32-944] + _ = x[ArSh-192] + _ = x[Swap-208] +} + +const _ALUOp_name = "AddSubMulDivOrAndLShRShNegModXorMovArShSwapSDivSModMovSX8MovSX16MovSX32InvalidALUOp" + +var _ALUOp_map = map[ALUOp]string{ + 0: _ALUOp_name[0:3], + 16: _ALUOp_name[3:6], + 32: _ALUOp_name[6:9], + 48: _ALUOp_name[9:12], + 64: _ALUOp_name[12:14], + 80: _ALUOp_name[14:17], + 96: _ALUOp_name[17:20], + 112: _ALUOp_name[20:23], + 128: _ALUOp_name[23:26], + 144: _ALUOp_name[26:29], + 160: _ALUOp_name[29:32], + 176: _ALUOp_name[32:35], + 192: _ALUOp_name[35:39], + 208: _ALUOp_name[39:43], + 304: _ALUOp_name[43:47], + 400: _ALUOp_name[47:51], + 432: _ALUOp_name[51:57], + 688: _ALUOp_name[57:64], + 944: _ALUOp_name[64:71], + 65535: _ALUOp_name[71:83], +} + +func (i ALUOp) String() string { + if str, ok := _ALUOp_map[i]; ok { + return str + } + return "ALUOp(" + strconv.FormatInt(int64(i), 10) + ")" +} diff --git a/vendor/github.com/cilium/ebpf/asm/doc.go b/vendor/github.com/cilium/ebpf/asm/doc.go new file mode 100644 index 000000000..7031bdc27 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/doc.go @@ -0,0 +1,2 @@ +// Package asm is an assembler for eBPF bytecode. +package asm diff --git a/vendor/github.com/cilium/ebpf/asm/func.go b/vendor/github.com/cilium/ebpf/asm/func.go new file mode 100644 index 000000000..fe75c7578 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/func.go @@ -0,0 +1,23 @@ +package asm + +import "github.com/cilium/ebpf/internal/platform" + +//go:generate go tool stringer -output func_string.go -type=BuiltinFunc + +// BuiltinFunc is a built-in eBPF function. +type BuiltinFunc uint32 + +// BuiltinFuncForPlatform returns a platform specific function constant. +// +// Use this if the library doesn't provide a constant yet. +func BuiltinFuncForPlatform(plat string, value uint32) (BuiltinFunc, error) { + return platform.EncodeConstant[BuiltinFunc](plat, value) +} + +// Call emits a function call. +func (fn BuiltinFunc) Call() Instruction { + return Instruction{ + OpCode: OpCode(JumpClass).SetJumpOp(Call), + Constant: int64(fn), + } +} diff --git a/vendor/github.com/cilium/ebpf/asm/func_lin.go b/vendor/github.com/cilium/ebpf/asm/func_lin.go new file mode 100644 index 000000000..1dd026d62 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/func_lin.go @@ -0,0 +1,223 @@ +// Code generated by internal/cmd/genfunctions.awk; DO NOT EDIT. + +package asm + +// Code in this file is derived from Linux, available under the GPL-2.0 WITH Linux-syscall-note. + +import "github.com/cilium/ebpf/internal/platform" + +// Built-in functions (Linux). +const ( + FnUnspec = BuiltinFunc(platform.LinuxTag | 0) //lint:ignore SA4016 consistency + FnMapLookupElem = BuiltinFunc(platform.LinuxTag | 1) + FnMapUpdateElem = BuiltinFunc(platform.LinuxTag | 2) + FnMapDeleteElem = BuiltinFunc(platform.LinuxTag | 3) + FnProbeRead = BuiltinFunc(platform.LinuxTag | 4) + FnKtimeGetNs = BuiltinFunc(platform.LinuxTag | 5) + FnTracePrintk = BuiltinFunc(platform.LinuxTag | 6) + FnGetPrandomU32 = BuiltinFunc(platform.LinuxTag | 7) + FnGetSmpProcessorId = BuiltinFunc(platform.LinuxTag | 8) + FnSkbStoreBytes = BuiltinFunc(platform.LinuxTag | 9) + FnL3CsumReplace = BuiltinFunc(platform.LinuxTag | 10) + FnL4CsumReplace = BuiltinFunc(platform.LinuxTag | 11) + FnTailCall = BuiltinFunc(platform.LinuxTag | 12) + FnCloneRedirect = BuiltinFunc(platform.LinuxTag | 13) + FnGetCurrentPidTgid = BuiltinFunc(platform.LinuxTag | 14) + FnGetCurrentUidGid = BuiltinFunc(platform.LinuxTag | 15) + FnGetCurrentComm = BuiltinFunc(platform.LinuxTag | 16) + FnGetCgroupClassid = BuiltinFunc(platform.LinuxTag | 17) + FnSkbVlanPush = BuiltinFunc(platform.LinuxTag | 18) + FnSkbVlanPop = BuiltinFunc(platform.LinuxTag | 19) + FnSkbGetTunnelKey = BuiltinFunc(platform.LinuxTag | 20) + FnSkbSetTunnelKey = BuiltinFunc(platform.LinuxTag | 21) + FnPerfEventRead = BuiltinFunc(platform.LinuxTag | 22) + FnRedirect = BuiltinFunc(platform.LinuxTag | 23) + FnGetRouteRealm = BuiltinFunc(platform.LinuxTag | 24) + FnPerfEventOutput = BuiltinFunc(platform.LinuxTag | 25) + FnSkbLoadBytes = BuiltinFunc(platform.LinuxTag | 26) + FnGetStackid = BuiltinFunc(platform.LinuxTag | 27) + FnCsumDiff = BuiltinFunc(platform.LinuxTag | 28) + FnSkbGetTunnelOpt = BuiltinFunc(platform.LinuxTag | 29) + FnSkbSetTunnelOpt = BuiltinFunc(platform.LinuxTag | 30) + FnSkbChangeProto = BuiltinFunc(platform.LinuxTag | 31) + FnSkbChangeType = BuiltinFunc(platform.LinuxTag | 32) + FnSkbUnderCgroup = BuiltinFunc(platform.LinuxTag | 33) + FnGetHashRecalc = BuiltinFunc(platform.LinuxTag | 34) + FnGetCurrentTask = BuiltinFunc(platform.LinuxTag | 35) + FnProbeWriteUser = BuiltinFunc(platform.LinuxTag | 36) + FnCurrentTaskUnderCgroup = BuiltinFunc(platform.LinuxTag | 37) + FnSkbChangeTail = BuiltinFunc(platform.LinuxTag | 38) + FnSkbPullData = BuiltinFunc(platform.LinuxTag | 39) + FnCsumUpdate = BuiltinFunc(platform.LinuxTag | 40) + FnSetHashInvalid = BuiltinFunc(platform.LinuxTag | 41) + FnGetNumaNodeId = BuiltinFunc(platform.LinuxTag | 42) + FnSkbChangeHead = BuiltinFunc(platform.LinuxTag | 43) + FnXdpAdjustHead = BuiltinFunc(platform.LinuxTag | 44) + FnProbeReadStr = BuiltinFunc(platform.LinuxTag | 45) + FnGetSocketCookie = BuiltinFunc(platform.LinuxTag | 46) + FnGetSocketUid = BuiltinFunc(platform.LinuxTag | 47) + FnSetHash = BuiltinFunc(platform.LinuxTag | 48) + FnSetsockopt = BuiltinFunc(platform.LinuxTag | 49) + FnSkbAdjustRoom = BuiltinFunc(platform.LinuxTag | 50) + FnRedirectMap = BuiltinFunc(platform.LinuxTag | 51) + FnSkRedirectMap = BuiltinFunc(platform.LinuxTag | 52) + FnSockMapUpdate = BuiltinFunc(platform.LinuxTag | 53) + FnXdpAdjustMeta = BuiltinFunc(platform.LinuxTag | 54) + FnPerfEventReadValue = BuiltinFunc(platform.LinuxTag | 55) + FnPerfProgReadValue = BuiltinFunc(platform.LinuxTag | 56) + FnGetsockopt = BuiltinFunc(platform.LinuxTag | 57) + FnOverrideReturn = BuiltinFunc(platform.LinuxTag | 58) + FnSockOpsCbFlagsSet = BuiltinFunc(platform.LinuxTag | 59) + FnMsgRedirectMap = BuiltinFunc(platform.LinuxTag | 60) + FnMsgApplyBytes = BuiltinFunc(platform.LinuxTag | 61) + FnMsgCorkBytes = BuiltinFunc(platform.LinuxTag | 62) + FnMsgPullData = BuiltinFunc(platform.LinuxTag | 63) + FnBind = BuiltinFunc(platform.LinuxTag | 64) + FnXdpAdjustTail = BuiltinFunc(platform.LinuxTag | 65) + FnSkbGetXfrmState = BuiltinFunc(platform.LinuxTag | 66) + FnGetStack = BuiltinFunc(platform.LinuxTag | 67) + FnSkbLoadBytesRelative = BuiltinFunc(platform.LinuxTag | 68) + FnFibLookup = BuiltinFunc(platform.LinuxTag | 69) + FnSockHashUpdate = BuiltinFunc(platform.LinuxTag | 70) + FnMsgRedirectHash = BuiltinFunc(platform.LinuxTag | 71) + FnSkRedirectHash = BuiltinFunc(platform.LinuxTag | 72) + FnLwtPushEncap = BuiltinFunc(platform.LinuxTag | 73) + FnLwtSeg6StoreBytes = BuiltinFunc(platform.LinuxTag | 74) + FnLwtSeg6AdjustSrh = BuiltinFunc(platform.LinuxTag | 75) + FnLwtSeg6Action = BuiltinFunc(platform.LinuxTag | 76) + FnRcRepeat = BuiltinFunc(platform.LinuxTag | 77) + FnRcKeydown = BuiltinFunc(platform.LinuxTag | 78) + FnSkbCgroupId = BuiltinFunc(platform.LinuxTag | 79) + FnGetCurrentCgroupId = BuiltinFunc(platform.LinuxTag | 80) + FnGetLocalStorage = BuiltinFunc(platform.LinuxTag | 81) + FnSkSelectReuseport = BuiltinFunc(platform.LinuxTag | 82) + FnSkbAncestorCgroupId = BuiltinFunc(platform.LinuxTag | 83) + FnSkLookupTcp = BuiltinFunc(platform.LinuxTag | 84) + FnSkLookupUdp = BuiltinFunc(platform.LinuxTag | 85) + FnSkRelease = BuiltinFunc(platform.LinuxTag | 86) + FnMapPushElem = BuiltinFunc(platform.LinuxTag | 87) + FnMapPopElem = BuiltinFunc(platform.LinuxTag | 88) + FnMapPeekElem = BuiltinFunc(platform.LinuxTag | 89) + FnMsgPushData = BuiltinFunc(platform.LinuxTag | 90) + FnMsgPopData = BuiltinFunc(platform.LinuxTag | 91) + FnRcPointerRel = BuiltinFunc(platform.LinuxTag | 92) + FnSpinLock = BuiltinFunc(platform.LinuxTag | 93) + FnSpinUnlock = BuiltinFunc(platform.LinuxTag | 94) + FnSkFullsock = BuiltinFunc(platform.LinuxTag | 95) + FnTcpSock = BuiltinFunc(platform.LinuxTag | 96) + FnSkbEcnSetCe = BuiltinFunc(platform.LinuxTag | 97) + FnGetListenerSock = BuiltinFunc(platform.LinuxTag | 98) + FnSkcLookupTcp = BuiltinFunc(platform.LinuxTag | 99) + FnTcpCheckSyncookie = BuiltinFunc(platform.LinuxTag | 100) + FnSysctlGetName = BuiltinFunc(platform.LinuxTag | 101) + FnSysctlGetCurrentValue = BuiltinFunc(platform.LinuxTag | 102) + FnSysctlGetNewValue = BuiltinFunc(platform.LinuxTag | 103) + FnSysctlSetNewValue = BuiltinFunc(platform.LinuxTag | 104) + FnStrtol = BuiltinFunc(platform.LinuxTag | 105) + FnStrtoul = BuiltinFunc(platform.LinuxTag | 106) + FnSkStorageGet = BuiltinFunc(platform.LinuxTag | 107) + FnSkStorageDelete = BuiltinFunc(platform.LinuxTag | 108) + FnSendSignal = BuiltinFunc(platform.LinuxTag | 109) + FnTcpGenSyncookie = BuiltinFunc(platform.LinuxTag | 110) + FnSkbOutput = BuiltinFunc(platform.LinuxTag | 111) + FnProbeReadUser = BuiltinFunc(platform.LinuxTag | 112) + FnProbeReadKernel = BuiltinFunc(platform.LinuxTag | 113) + FnProbeReadUserStr = BuiltinFunc(platform.LinuxTag | 114) + FnProbeReadKernelStr = BuiltinFunc(platform.LinuxTag | 115) + FnTcpSendAck = BuiltinFunc(platform.LinuxTag | 116) + FnSendSignalThread = BuiltinFunc(platform.LinuxTag | 117) + FnJiffies64 = BuiltinFunc(platform.LinuxTag | 118) + FnReadBranchRecords = BuiltinFunc(platform.LinuxTag | 119) + FnGetNsCurrentPidTgid = BuiltinFunc(platform.LinuxTag | 120) + FnXdpOutput = BuiltinFunc(platform.LinuxTag | 121) + FnGetNetnsCookie = BuiltinFunc(platform.LinuxTag | 122) + FnGetCurrentAncestorCgroupId = BuiltinFunc(platform.LinuxTag | 123) + FnSkAssign = BuiltinFunc(platform.LinuxTag | 124) + FnKtimeGetBootNs = BuiltinFunc(platform.LinuxTag | 125) + FnSeqPrintf = BuiltinFunc(platform.LinuxTag | 126) + FnSeqWrite = BuiltinFunc(platform.LinuxTag | 127) + FnSkCgroupId = BuiltinFunc(platform.LinuxTag | 128) + FnSkAncestorCgroupId = BuiltinFunc(platform.LinuxTag | 129) + FnRingbufOutput = BuiltinFunc(platform.LinuxTag | 130) + FnRingbufReserve = BuiltinFunc(platform.LinuxTag | 131) + FnRingbufSubmit = BuiltinFunc(platform.LinuxTag | 132) + FnRingbufDiscard = BuiltinFunc(platform.LinuxTag | 133) + FnRingbufQuery = BuiltinFunc(platform.LinuxTag | 134) + FnCsumLevel = BuiltinFunc(platform.LinuxTag | 135) + FnSkcToTcp6Sock = BuiltinFunc(platform.LinuxTag | 136) + FnSkcToTcpSock = BuiltinFunc(platform.LinuxTag | 137) + FnSkcToTcpTimewaitSock = BuiltinFunc(platform.LinuxTag | 138) + FnSkcToTcpRequestSock = BuiltinFunc(platform.LinuxTag | 139) + FnSkcToUdp6Sock = BuiltinFunc(platform.LinuxTag | 140) + FnGetTaskStack = BuiltinFunc(platform.LinuxTag | 141) + FnLoadHdrOpt = BuiltinFunc(platform.LinuxTag | 142) + FnStoreHdrOpt = BuiltinFunc(platform.LinuxTag | 143) + FnReserveHdrOpt = BuiltinFunc(platform.LinuxTag | 144) + FnInodeStorageGet = BuiltinFunc(platform.LinuxTag | 145) + FnInodeStorageDelete = BuiltinFunc(platform.LinuxTag | 146) + FnDPath = BuiltinFunc(platform.LinuxTag | 147) + FnCopyFromUser = BuiltinFunc(platform.LinuxTag | 148) + FnSnprintfBtf = BuiltinFunc(platform.LinuxTag | 149) + FnSeqPrintfBtf = BuiltinFunc(platform.LinuxTag | 150) + FnSkbCgroupClassid = BuiltinFunc(platform.LinuxTag | 151) + FnRedirectNeigh = BuiltinFunc(platform.LinuxTag | 152) + FnPerCpuPtr = BuiltinFunc(platform.LinuxTag | 153) + FnThisCpuPtr = BuiltinFunc(platform.LinuxTag | 154) + FnRedirectPeer = BuiltinFunc(platform.LinuxTag | 155) + FnTaskStorageGet = BuiltinFunc(platform.LinuxTag | 156) + FnTaskStorageDelete = BuiltinFunc(platform.LinuxTag | 157) + FnGetCurrentTaskBtf = BuiltinFunc(platform.LinuxTag | 158) + FnBprmOptsSet = BuiltinFunc(platform.LinuxTag | 159) + FnKtimeGetCoarseNs = BuiltinFunc(platform.LinuxTag | 160) + FnImaInodeHash = BuiltinFunc(platform.LinuxTag | 161) + FnSockFromFile = BuiltinFunc(platform.LinuxTag | 162) + FnCheckMtu = BuiltinFunc(platform.LinuxTag | 163) + FnForEachMapElem = BuiltinFunc(platform.LinuxTag | 164) + FnSnprintf = BuiltinFunc(platform.LinuxTag | 165) + FnSysBpf = BuiltinFunc(platform.LinuxTag | 166) + FnBtfFindByNameKind = BuiltinFunc(platform.LinuxTag | 167) + FnSysClose = BuiltinFunc(platform.LinuxTag | 168) + FnTimerInit = BuiltinFunc(platform.LinuxTag | 169) + FnTimerSetCallback = BuiltinFunc(platform.LinuxTag | 170) + FnTimerStart = BuiltinFunc(platform.LinuxTag | 171) + FnTimerCancel = BuiltinFunc(platform.LinuxTag | 172) + FnGetFuncIp = BuiltinFunc(platform.LinuxTag | 173) + FnGetAttachCookie = BuiltinFunc(platform.LinuxTag | 174) + FnTaskPtRegs = BuiltinFunc(platform.LinuxTag | 175) + FnGetBranchSnapshot = BuiltinFunc(platform.LinuxTag | 176) + FnTraceVprintk = BuiltinFunc(platform.LinuxTag | 177) + FnSkcToUnixSock = BuiltinFunc(platform.LinuxTag | 178) + FnKallsymsLookupName = BuiltinFunc(platform.LinuxTag | 179) + FnFindVma = BuiltinFunc(platform.LinuxTag | 180) + FnLoop = BuiltinFunc(platform.LinuxTag | 181) + FnStrncmp = BuiltinFunc(platform.LinuxTag | 182) + FnGetFuncArg = BuiltinFunc(platform.LinuxTag | 183) + FnGetFuncRet = BuiltinFunc(platform.LinuxTag | 184) + FnGetFuncArgCnt = BuiltinFunc(platform.LinuxTag | 185) + FnGetRetval = BuiltinFunc(platform.LinuxTag | 186) + FnSetRetval = BuiltinFunc(platform.LinuxTag | 187) + FnXdpGetBuffLen = BuiltinFunc(platform.LinuxTag | 188) + FnXdpLoadBytes = BuiltinFunc(platform.LinuxTag | 189) + FnXdpStoreBytes = BuiltinFunc(platform.LinuxTag | 190) + FnCopyFromUserTask = BuiltinFunc(platform.LinuxTag | 191) + FnSkbSetTstamp = BuiltinFunc(platform.LinuxTag | 192) + FnImaFileHash = BuiltinFunc(platform.LinuxTag | 193) + FnKptrXchg = BuiltinFunc(platform.LinuxTag | 194) + FnMapLookupPercpuElem = BuiltinFunc(platform.LinuxTag | 195) + FnSkcToMptcpSock = BuiltinFunc(platform.LinuxTag | 196) + FnDynptrFromMem = BuiltinFunc(platform.LinuxTag | 197) + FnRingbufReserveDynptr = BuiltinFunc(platform.LinuxTag | 198) + FnRingbufSubmitDynptr = BuiltinFunc(platform.LinuxTag | 199) + FnRingbufDiscardDynptr = BuiltinFunc(platform.LinuxTag | 200) + FnDynptrRead = BuiltinFunc(platform.LinuxTag | 201) + FnDynptrWrite = BuiltinFunc(platform.LinuxTag | 202) + FnDynptrData = BuiltinFunc(platform.LinuxTag | 203) + FnTcpRawGenSyncookieIpv4 = BuiltinFunc(platform.LinuxTag | 204) + FnTcpRawGenSyncookieIpv6 = BuiltinFunc(platform.LinuxTag | 205) + FnTcpRawCheckSyncookieIpv4 = BuiltinFunc(platform.LinuxTag | 206) + FnTcpRawCheckSyncookieIpv6 = BuiltinFunc(platform.LinuxTag | 207) + FnKtimeGetTaiNs = BuiltinFunc(platform.LinuxTag | 208) + FnUserRingbufDrain = BuiltinFunc(platform.LinuxTag | 209) + FnCgrpStorageGet = BuiltinFunc(platform.LinuxTag | 210) + FnCgrpStorageDelete = BuiltinFunc(platform.LinuxTag | 211) +) diff --git a/vendor/github.com/cilium/ebpf/asm/func_string.go b/vendor/github.com/cilium/ebpf/asm/func_string.go new file mode 100644 index 000000000..d5d624f09 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/func_string.go @@ -0,0 +1,276 @@ +// Code generated by "stringer -output func_string.go -type=BuiltinFunc"; DO NOT EDIT. + +package asm + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[FnUnspec-0] + _ = x[FnMapLookupElem-1] + _ = x[FnMapUpdateElem-2] + _ = x[FnMapDeleteElem-3] + _ = x[FnProbeRead-4] + _ = x[FnKtimeGetNs-5] + _ = x[FnTracePrintk-6] + _ = x[FnGetPrandomU32-7] + _ = x[FnGetSmpProcessorId-8] + _ = x[FnSkbStoreBytes-9] + _ = x[FnL3CsumReplace-10] + _ = x[FnL4CsumReplace-11] + _ = x[FnTailCall-12] + _ = x[FnCloneRedirect-13] + _ = x[FnGetCurrentPidTgid-14] + _ = x[FnGetCurrentUidGid-15] + _ = x[FnGetCurrentComm-16] + _ = x[FnGetCgroupClassid-17] + _ = x[FnSkbVlanPush-18] + _ = x[FnSkbVlanPop-19] + _ = x[FnSkbGetTunnelKey-20] + _ = x[FnSkbSetTunnelKey-21] + _ = x[FnPerfEventRead-22] + _ = x[FnRedirect-23] + _ = x[FnGetRouteRealm-24] + _ = x[FnPerfEventOutput-25] + _ = x[FnSkbLoadBytes-26] + _ = x[FnGetStackid-27] + _ = x[FnCsumDiff-28] + _ = x[FnSkbGetTunnelOpt-29] + _ = x[FnSkbSetTunnelOpt-30] + _ = x[FnSkbChangeProto-31] + _ = x[FnSkbChangeType-32] + _ = x[FnSkbUnderCgroup-33] + _ = x[FnGetHashRecalc-34] + _ = x[FnGetCurrentTask-35] + _ = x[FnProbeWriteUser-36] + _ = x[FnCurrentTaskUnderCgroup-37] + _ = x[FnSkbChangeTail-38] + _ = x[FnSkbPullData-39] + _ = x[FnCsumUpdate-40] + _ = x[FnSetHashInvalid-41] + _ = x[FnGetNumaNodeId-42] + _ = x[FnSkbChangeHead-43] + _ = x[FnXdpAdjustHead-44] + _ = x[FnProbeReadStr-45] + _ = x[FnGetSocketCookie-46] + _ = x[FnGetSocketUid-47] + _ = x[FnSetHash-48] + _ = x[FnSetsockopt-49] + _ = x[FnSkbAdjustRoom-50] + _ = x[FnRedirectMap-51] + _ = x[FnSkRedirectMap-52] + _ = x[FnSockMapUpdate-53] + _ = x[FnXdpAdjustMeta-54] + _ = x[FnPerfEventReadValue-55] + _ = x[FnPerfProgReadValue-56] + _ = x[FnGetsockopt-57] + _ = x[FnOverrideReturn-58] + _ = x[FnSockOpsCbFlagsSet-59] + _ = x[FnMsgRedirectMap-60] + _ = x[FnMsgApplyBytes-61] + _ = x[FnMsgCorkBytes-62] + _ = x[FnMsgPullData-63] + _ = x[FnBind-64] + _ = x[FnXdpAdjustTail-65] + _ = x[FnSkbGetXfrmState-66] + _ = x[FnGetStack-67] + _ = x[FnSkbLoadBytesRelative-68] + _ = x[FnFibLookup-69] + _ = x[FnSockHashUpdate-70] + _ = x[FnMsgRedirectHash-71] + _ = x[FnSkRedirectHash-72] + _ = x[FnLwtPushEncap-73] + _ = x[FnLwtSeg6StoreBytes-74] + _ = x[FnLwtSeg6AdjustSrh-75] + _ = x[FnLwtSeg6Action-76] + _ = x[FnRcRepeat-77] + _ = x[FnRcKeydown-78] + _ = x[FnSkbCgroupId-79] + _ = x[FnGetCurrentCgroupId-80] + _ = x[FnGetLocalStorage-81] + _ = x[FnSkSelectReuseport-82] + _ = x[FnSkbAncestorCgroupId-83] + _ = x[FnSkLookupTcp-84] + _ = x[FnSkLookupUdp-85] + _ = x[FnSkRelease-86] + _ = x[FnMapPushElem-87] + _ = x[FnMapPopElem-88] + _ = x[FnMapPeekElem-89] + _ = x[FnMsgPushData-90] + _ = x[FnMsgPopData-91] + _ = x[FnRcPointerRel-92] + _ = x[FnSpinLock-93] + _ = x[FnSpinUnlock-94] + _ = x[FnSkFullsock-95] + _ = x[FnTcpSock-96] + _ = x[FnSkbEcnSetCe-97] + _ = x[FnGetListenerSock-98] + _ = x[FnSkcLookupTcp-99] + _ = x[FnTcpCheckSyncookie-100] + _ = x[FnSysctlGetName-101] + _ = x[FnSysctlGetCurrentValue-102] + _ = x[FnSysctlGetNewValue-103] + _ = x[FnSysctlSetNewValue-104] + _ = x[FnStrtol-105] + _ = x[FnStrtoul-106] + _ = x[FnSkStorageGet-107] + _ = x[FnSkStorageDelete-108] + _ = x[FnSendSignal-109] + _ = x[FnTcpGenSyncookie-110] + _ = x[FnSkbOutput-111] + _ = x[FnProbeReadUser-112] + _ = x[FnProbeReadKernel-113] + _ = x[FnProbeReadUserStr-114] + _ = x[FnProbeReadKernelStr-115] + _ = x[FnTcpSendAck-116] + _ = x[FnSendSignalThread-117] + _ = x[FnJiffies64-118] + _ = x[FnReadBranchRecords-119] + _ = x[FnGetNsCurrentPidTgid-120] + _ = x[FnXdpOutput-121] + _ = x[FnGetNetnsCookie-122] + _ = x[FnGetCurrentAncestorCgroupId-123] + _ = x[FnSkAssign-124] + _ = x[FnKtimeGetBootNs-125] + _ = x[FnSeqPrintf-126] + _ = x[FnSeqWrite-127] + _ = x[FnSkCgroupId-128] + _ = x[FnSkAncestorCgroupId-129] + _ = x[FnRingbufOutput-130] + _ = x[FnRingbufReserve-131] + _ = x[FnRingbufSubmit-132] + _ = x[FnRingbufDiscard-133] + _ = x[FnRingbufQuery-134] + _ = x[FnCsumLevel-135] + _ = x[FnSkcToTcp6Sock-136] + _ = x[FnSkcToTcpSock-137] + _ = x[FnSkcToTcpTimewaitSock-138] + _ = x[FnSkcToTcpRequestSock-139] + _ = x[FnSkcToUdp6Sock-140] + _ = x[FnGetTaskStack-141] + _ = x[FnLoadHdrOpt-142] + _ = x[FnStoreHdrOpt-143] + _ = x[FnReserveHdrOpt-144] + _ = x[FnInodeStorageGet-145] + _ = x[FnInodeStorageDelete-146] + _ = x[FnDPath-147] + _ = x[FnCopyFromUser-148] + _ = x[FnSnprintfBtf-149] + _ = x[FnSeqPrintfBtf-150] + _ = x[FnSkbCgroupClassid-151] + _ = x[FnRedirectNeigh-152] + _ = x[FnPerCpuPtr-153] + _ = x[FnThisCpuPtr-154] + _ = x[FnRedirectPeer-155] + _ = x[FnTaskStorageGet-156] + _ = x[FnTaskStorageDelete-157] + _ = x[FnGetCurrentTaskBtf-158] + _ = x[FnBprmOptsSet-159] + _ = x[FnKtimeGetCoarseNs-160] + _ = x[FnImaInodeHash-161] + _ = x[FnSockFromFile-162] + _ = x[FnCheckMtu-163] + _ = x[FnForEachMapElem-164] + _ = x[FnSnprintf-165] + _ = x[FnSysBpf-166] + _ = x[FnBtfFindByNameKind-167] + _ = x[FnSysClose-168] + _ = x[FnTimerInit-169] + _ = x[FnTimerSetCallback-170] + _ = x[FnTimerStart-171] + _ = x[FnTimerCancel-172] + _ = x[FnGetFuncIp-173] + _ = x[FnGetAttachCookie-174] + _ = x[FnTaskPtRegs-175] + _ = x[FnGetBranchSnapshot-176] + _ = x[FnTraceVprintk-177] + _ = x[FnSkcToUnixSock-178] + _ = x[FnKallsymsLookupName-179] + _ = x[FnFindVma-180] + _ = x[FnLoop-181] + _ = x[FnStrncmp-182] + _ = x[FnGetFuncArg-183] + _ = x[FnGetFuncRet-184] + _ = x[FnGetFuncArgCnt-185] + _ = x[FnGetRetval-186] + _ = x[FnSetRetval-187] + _ = x[FnXdpGetBuffLen-188] + _ = x[FnXdpLoadBytes-189] + _ = x[FnXdpStoreBytes-190] + _ = x[FnCopyFromUserTask-191] + _ = x[FnSkbSetTstamp-192] + _ = x[FnImaFileHash-193] + _ = x[FnKptrXchg-194] + _ = x[FnMapLookupPercpuElem-195] + _ = x[FnSkcToMptcpSock-196] + _ = x[FnDynptrFromMem-197] + _ = x[FnRingbufReserveDynptr-198] + _ = x[FnRingbufSubmitDynptr-199] + _ = x[FnRingbufDiscardDynptr-200] + _ = x[FnDynptrRead-201] + _ = x[FnDynptrWrite-202] + _ = x[FnDynptrData-203] + _ = x[FnTcpRawGenSyncookieIpv4-204] + _ = x[FnTcpRawGenSyncookieIpv6-205] + _ = x[FnTcpRawCheckSyncookieIpv4-206] + _ = x[FnTcpRawCheckSyncookieIpv6-207] + _ = x[FnKtimeGetTaiNs-208] + _ = x[FnUserRingbufDrain-209] + _ = x[FnCgrpStorageGet-210] + _ = x[FnCgrpStorageDelete-211] + _ = x[WindowsFnMapLookupElem-268435457] + _ = x[WindowsFnMapUpdateElem-268435458] + _ = x[WindowsFnMapDeleteElem-268435459] + _ = x[WindowsFnMapLookupAndDeleteElem-268435460] + _ = x[WindowsFnTailCall-268435461] + _ = x[WindowsFnGetPrandomU32-268435462] + _ = x[WindowsFnKtimeGetBootNs-268435463] + _ = x[WindowsFnGetSmpProcessorId-268435464] + _ = x[WindowsFnKtimeGetNs-268435465] + _ = x[WindowsFnCsumDiff-268435466] + _ = x[WindowsFnRingbufOutput-268435467] + _ = x[WindowsFnTracePrintk2-268435468] + _ = x[WindowsFnTracePrintk3-268435469] + _ = x[WindowsFnTracePrintk4-268435470] + _ = x[WindowsFnTracePrintk5-268435471] + _ = x[WindowsFnMapPushElem-268435472] + _ = x[WindowsFnMapPopElem-268435473] + _ = x[WindowsFnMapPeekElem-268435474] + _ = x[WindowsFnGetCurrentPidTgid-268435475] + _ = x[WindowsFnGetCurrentLogonId-268435476] + _ = x[WindowsFnIsCurrentAdmin-268435477] + _ = x[WindowsFnMemcpy-268435478] + _ = x[WindowsFnMemcmp-268435479] + _ = x[WindowsFnMemset-268435480] + _ = x[WindowsFnMemmove-268435481] + _ = x[WindowsFnGetSocketCookie-268435482] + _ = x[WindowsFnStrncpyS-268435483] + _ = x[WindowsFnStrncatS-268435484] + _ = x[WindowsFnStrnlenS-268435485] + _ = x[WindowsFnKtimeGetBootMs-268435486] + _ = x[WindowsFnKtimeGetMs-268435487] +} + +const ( + _BuiltinFunc_name_0 = "FnUnspecFnMapLookupElemFnMapUpdateElemFnMapDeleteElemFnProbeReadFnKtimeGetNsFnTracePrintkFnGetPrandomU32FnGetSmpProcessorIdFnSkbStoreBytesFnL3CsumReplaceFnL4CsumReplaceFnTailCallFnCloneRedirectFnGetCurrentPidTgidFnGetCurrentUidGidFnGetCurrentCommFnGetCgroupClassidFnSkbVlanPushFnSkbVlanPopFnSkbGetTunnelKeyFnSkbSetTunnelKeyFnPerfEventReadFnRedirectFnGetRouteRealmFnPerfEventOutputFnSkbLoadBytesFnGetStackidFnCsumDiffFnSkbGetTunnelOptFnSkbSetTunnelOptFnSkbChangeProtoFnSkbChangeTypeFnSkbUnderCgroupFnGetHashRecalcFnGetCurrentTaskFnProbeWriteUserFnCurrentTaskUnderCgroupFnSkbChangeTailFnSkbPullDataFnCsumUpdateFnSetHashInvalidFnGetNumaNodeIdFnSkbChangeHeadFnXdpAdjustHeadFnProbeReadStrFnGetSocketCookieFnGetSocketUidFnSetHashFnSetsockoptFnSkbAdjustRoomFnRedirectMapFnSkRedirectMapFnSockMapUpdateFnXdpAdjustMetaFnPerfEventReadValueFnPerfProgReadValueFnGetsockoptFnOverrideReturnFnSockOpsCbFlagsSetFnMsgRedirectMapFnMsgApplyBytesFnMsgCorkBytesFnMsgPullDataFnBindFnXdpAdjustTailFnSkbGetXfrmStateFnGetStackFnSkbLoadBytesRelativeFnFibLookupFnSockHashUpdateFnMsgRedirectHashFnSkRedirectHashFnLwtPushEncapFnLwtSeg6StoreBytesFnLwtSeg6AdjustSrhFnLwtSeg6ActionFnRcRepeatFnRcKeydownFnSkbCgroupIdFnGetCurrentCgroupIdFnGetLocalStorageFnSkSelectReuseportFnSkbAncestorCgroupIdFnSkLookupTcpFnSkLookupUdpFnSkReleaseFnMapPushElemFnMapPopElemFnMapPeekElemFnMsgPushDataFnMsgPopDataFnRcPointerRelFnSpinLockFnSpinUnlockFnSkFullsockFnTcpSockFnSkbEcnSetCeFnGetListenerSockFnSkcLookupTcpFnTcpCheckSyncookieFnSysctlGetNameFnSysctlGetCurrentValueFnSysctlGetNewValueFnSysctlSetNewValueFnStrtolFnStrtoulFnSkStorageGetFnSkStorageDeleteFnSendSignalFnTcpGenSyncookieFnSkbOutputFnProbeReadUserFnProbeReadKernelFnProbeReadUserStrFnProbeReadKernelStrFnTcpSendAckFnSendSignalThreadFnJiffies64FnReadBranchRecordsFnGetNsCurrentPidTgidFnXdpOutputFnGetNetnsCookieFnGetCurrentAncestorCgroupIdFnSkAssignFnKtimeGetBootNsFnSeqPrintfFnSeqWriteFnSkCgroupIdFnSkAncestorCgroupIdFnRingbufOutputFnRingbufReserveFnRingbufSubmitFnRingbufDiscardFnRingbufQueryFnCsumLevelFnSkcToTcp6SockFnSkcToTcpSockFnSkcToTcpTimewaitSockFnSkcToTcpRequestSockFnSkcToUdp6SockFnGetTaskStackFnLoadHdrOptFnStoreHdrOptFnReserveHdrOptFnInodeStorageGetFnInodeStorageDeleteFnDPathFnCopyFromUserFnSnprintfBtfFnSeqPrintfBtfFnSkbCgroupClassidFnRedirectNeighFnPerCpuPtrFnThisCpuPtrFnRedirectPeerFnTaskStorageGetFnTaskStorageDeleteFnGetCurrentTaskBtfFnBprmOptsSetFnKtimeGetCoarseNsFnImaInodeHashFnSockFromFileFnCheckMtuFnForEachMapElemFnSnprintfFnSysBpfFnBtfFindByNameKindFnSysCloseFnTimerInitFnTimerSetCallbackFnTimerStartFnTimerCancelFnGetFuncIpFnGetAttachCookieFnTaskPtRegsFnGetBranchSnapshotFnTraceVprintkFnSkcToUnixSockFnKallsymsLookupNameFnFindVmaFnLoopFnStrncmpFnGetFuncArgFnGetFuncRetFnGetFuncArgCntFnGetRetvalFnSetRetvalFnXdpGetBuffLenFnXdpLoadBytesFnXdpStoreBytesFnCopyFromUserTaskFnSkbSetTstampFnImaFileHashFnKptrXchgFnMapLookupPercpuElemFnSkcToMptcpSockFnDynptrFromMemFnRingbufReserveDynptrFnRingbufSubmitDynptrFnRingbufDiscardDynptrFnDynptrReadFnDynptrWriteFnDynptrDataFnTcpRawGenSyncookieIpv4FnTcpRawGenSyncookieIpv6FnTcpRawCheckSyncookieIpv4FnTcpRawCheckSyncookieIpv6FnKtimeGetTaiNsFnUserRingbufDrainFnCgrpStorageGetFnCgrpStorageDelete" + _BuiltinFunc_name_1 = "WindowsFnMapLookupElemWindowsFnMapUpdateElemWindowsFnMapDeleteElemWindowsFnMapLookupAndDeleteElemWindowsFnTailCallWindowsFnGetPrandomU32WindowsFnKtimeGetBootNsWindowsFnGetSmpProcessorIdWindowsFnKtimeGetNsWindowsFnCsumDiffWindowsFnRingbufOutputWindowsFnTracePrintk2WindowsFnTracePrintk3WindowsFnTracePrintk4WindowsFnTracePrintk5WindowsFnMapPushElemWindowsFnMapPopElemWindowsFnMapPeekElemWindowsFnGetCurrentPidTgidWindowsFnGetCurrentLogonIdWindowsFnIsCurrentAdminWindowsFnMemcpyWindowsFnMemcmpWindowsFnMemsetWindowsFnMemmoveWindowsFnGetSocketCookieWindowsFnStrncpySWindowsFnStrncatSWindowsFnStrnlenSWindowsFnKtimeGetBootMsWindowsFnKtimeGetMs" +) + +var ( + _BuiltinFunc_index_0 = [...]uint16{0, 8, 23, 38, 53, 64, 76, 89, 104, 123, 138, 153, 168, 178, 193, 212, 230, 246, 264, 277, 289, 306, 323, 338, 348, 363, 380, 394, 406, 416, 433, 450, 466, 481, 497, 512, 528, 544, 568, 583, 596, 608, 624, 639, 654, 669, 683, 700, 714, 723, 735, 750, 763, 778, 793, 808, 828, 847, 859, 875, 894, 910, 925, 939, 952, 958, 973, 990, 1000, 1022, 1033, 1049, 1066, 1082, 1096, 1115, 1133, 1148, 1158, 1169, 1182, 1202, 1219, 1238, 1259, 1272, 1285, 1296, 1309, 1321, 1334, 1347, 1359, 1373, 1383, 1395, 1407, 1416, 1429, 1446, 1460, 1479, 1494, 1517, 1536, 1555, 1563, 1572, 1586, 1603, 1615, 1632, 1643, 1658, 1675, 1693, 1713, 1725, 1743, 1754, 1773, 1794, 1805, 1821, 1849, 1859, 1875, 1886, 1896, 1908, 1928, 1943, 1959, 1974, 1990, 2004, 2015, 2030, 2044, 2066, 2087, 2102, 2116, 2128, 2141, 2156, 2173, 2193, 2200, 2214, 2227, 2241, 2259, 2274, 2285, 2297, 2311, 2327, 2346, 2365, 2378, 2396, 2410, 2424, 2434, 2450, 2460, 2468, 2487, 2497, 2508, 2526, 2538, 2551, 2562, 2579, 2591, 2610, 2624, 2639, 2659, 2668, 2674, 2683, 2695, 2707, 2722, 2733, 2744, 2759, 2773, 2788, 2806, 2820, 2833, 2843, 2864, 2880, 2895, 2917, 2938, 2960, 2972, 2985, 2997, 3021, 3045, 3071, 3097, 3112, 3130, 3146, 3165} + _BuiltinFunc_index_1 = [...]uint16{0, 22, 44, 66, 97, 114, 136, 159, 185, 204, 221, 243, 264, 285, 306, 327, 347, 366, 386, 412, 438, 461, 476, 491, 506, 522, 546, 563, 580, 597, 620, 639} +) + +func (i BuiltinFunc) String() string { + switch { + case i <= 211: + return _BuiltinFunc_name_0[_BuiltinFunc_index_0[i]:_BuiltinFunc_index_0[i+1]] + case 268435457 <= i && i <= 268435487: + i -= 268435457 + return _BuiltinFunc_name_1[_BuiltinFunc_index_1[i]:_BuiltinFunc_index_1[i+1]] + default: + return "BuiltinFunc(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/github.com/cilium/ebpf/asm/func_win.go b/vendor/github.com/cilium/ebpf/asm/func_win.go new file mode 100644 index 000000000..b016f0086 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/func_win.go @@ -0,0 +1,44 @@ +// Code generated by internal/cmd/genwinfunctions.awk; DO NOT EDIT. + +package asm + +// Code in this file is derived from eBPF for Windows, available under the MIT License. + +import ( + "github.com/cilium/ebpf/internal/platform" +) + +// Built-in functions (Windows). +const ( + WindowsFnMapLookupElem = BuiltinFunc(platform.WindowsTag | 1) + WindowsFnMapUpdateElem = BuiltinFunc(platform.WindowsTag | 2) + WindowsFnMapDeleteElem = BuiltinFunc(platform.WindowsTag | 3) + WindowsFnMapLookupAndDeleteElem = BuiltinFunc(platform.WindowsTag | 4) + WindowsFnTailCall = BuiltinFunc(platform.WindowsTag | 5) + WindowsFnGetPrandomU32 = BuiltinFunc(platform.WindowsTag | 6) + WindowsFnKtimeGetBootNs = BuiltinFunc(platform.WindowsTag | 7) + WindowsFnGetSmpProcessorId = BuiltinFunc(platform.WindowsTag | 8) + WindowsFnKtimeGetNs = BuiltinFunc(platform.WindowsTag | 9) + WindowsFnCsumDiff = BuiltinFunc(platform.WindowsTag | 10) + WindowsFnRingbufOutput = BuiltinFunc(platform.WindowsTag | 11) + WindowsFnTracePrintk2 = BuiltinFunc(platform.WindowsTag | 12) + WindowsFnTracePrintk3 = BuiltinFunc(platform.WindowsTag | 13) + WindowsFnTracePrintk4 = BuiltinFunc(platform.WindowsTag | 14) + WindowsFnTracePrintk5 = BuiltinFunc(platform.WindowsTag | 15) + WindowsFnMapPushElem = BuiltinFunc(platform.WindowsTag | 16) + WindowsFnMapPopElem = BuiltinFunc(platform.WindowsTag | 17) + WindowsFnMapPeekElem = BuiltinFunc(platform.WindowsTag | 18) + WindowsFnGetCurrentPidTgid = BuiltinFunc(platform.WindowsTag | 19) + WindowsFnGetCurrentLogonId = BuiltinFunc(platform.WindowsTag | 20) + WindowsFnIsCurrentAdmin = BuiltinFunc(platform.WindowsTag | 21) + WindowsFnMemcpy = BuiltinFunc(platform.WindowsTag | 22) + WindowsFnMemcmp = BuiltinFunc(platform.WindowsTag | 23) + WindowsFnMemset = BuiltinFunc(platform.WindowsTag | 24) + WindowsFnMemmove = BuiltinFunc(platform.WindowsTag | 25) + WindowsFnGetSocketCookie = BuiltinFunc(platform.WindowsTag | 26) + WindowsFnStrncpyS = BuiltinFunc(platform.WindowsTag | 27) + WindowsFnStrncatS = BuiltinFunc(platform.WindowsTag | 28) + WindowsFnStrnlenS = BuiltinFunc(platform.WindowsTag | 29) + WindowsFnKtimeGetBootMs = BuiltinFunc(platform.WindowsTag | 30) + WindowsFnKtimeGetMs = BuiltinFunc(platform.WindowsTag | 31) +) diff --git a/vendor/github.com/cilium/ebpf/asm/instruction.go b/vendor/github.com/cilium/ebpf/asm/instruction.go new file mode 100644 index 000000000..b2ce72ca8 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/instruction.go @@ -0,0 +1,978 @@ +package asm + +import ( + "crypto/sha1" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "io" + "math" + "sort" + "strings" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/platform" + "github.com/cilium/ebpf/internal/sys" +) + +// InstructionSize is the size of a BPF instruction in bytes +const InstructionSize = 8 + +// RawInstructionOffset is an offset in units of raw BPF instructions. +type RawInstructionOffset uint64 + +var ErrUnreferencedSymbol = errors.New("unreferenced symbol") +var ErrUnsatisfiedMapReference = errors.New("unsatisfied map reference") +var ErrUnsatisfiedProgramReference = errors.New("unsatisfied program reference") + +// Bytes returns the offset of an instruction in bytes. +func (rio RawInstructionOffset) Bytes() uint64 { + return uint64(rio) * InstructionSize +} + +// Instruction is a single eBPF instruction. +type Instruction struct { + OpCode OpCode + Dst Register + Src Register + Offset int16 + Constant int64 + + // Metadata contains optional metadata about this instruction. + Metadata Metadata +} + +// Unmarshal decodes a BPF instruction. +func (ins *Instruction) Unmarshal(r io.Reader, bo binary.ByteOrder, platform string) error { + data := make([]byte, InstructionSize) + if _, err := io.ReadFull(r, data); err != nil { + return err + } + + ins.OpCode = OpCode(data[0]) + + regs := data[1] + switch bo { + case binary.LittleEndian: + ins.Dst, ins.Src = Register(regs&0xF), Register(regs>>4) + case binary.BigEndian: + ins.Dst, ins.Src = Register(regs>>4), Register(regs&0xf) + } + + ins.Offset = int16(bo.Uint16(data[2:4])) + + // Convert to int32 before widening to int64 + // to ensure the signed bit is carried over. + ins.Constant = int64(int32(bo.Uint32(data[4:8]))) + + if ins.IsBuiltinCall() { + if ins.Constant >= 0 { + // Leave negative constants from the instruction stream + // unchanged. These are sometimes used as placeholders for later + // patching. + // This relies on not having a valid platform tag with a high bit set. + fn, err := BuiltinFuncForPlatform(platform, uint32(ins.Constant)) + if err != nil { + return err + } + ins.Constant = int64(fn) + } + } else if ins.OpCode.Class().IsALU() { + switch ins.OpCode.ALUOp() { + case Div: + if ins.Offset == 1 { + ins.OpCode = ins.OpCode.SetALUOp(SDiv) + ins.Offset = 0 + } + case Mod: + if ins.Offset == 1 { + ins.OpCode = ins.OpCode.SetALUOp(SMod) + ins.Offset = 0 + } + case Mov: + switch ins.Offset { + case 8: + ins.OpCode = ins.OpCode.SetALUOp(MovSX8) + ins.Offset = 0 + case 16: + ins.OpCode = ins.OpCode.SetALUOp(MovSX16) + ins.Offset = 0 + case 32: + ins.OpCode = ins.OpCode.SetALUOp(MovSX32) + ins.Offset = 0 + } + } + } else if ins.OpCode.Class() == StXClass && + ins.OpCode.Mode() == AtomicMode { + // For atomic ops, part of the opcode is stored in the + // constant field. Shift over 8 bytes so we can OR with the actual opcode and + // apply `atomicMask` to avoid merging unknown bits that may be added in the future. + ins.OpCode |= (OpCode((ins.Constant << 8)) & atomicMask) + } + + if !ins.OpCode.IsDWordLoad() { + return nil + } + + // Pull another instruction from the stream to retrieve the second + // half of the 64-bit immediate value. + if _, err := io.ReadFull(r, data); err != nil { + // No Wrap, to avoid io.EOF clash + return errors.New("64bit immediate is missing second half") + } + + // Require that all fields other than the value are zero. + if bo.Uint32(data[0:4]) != 0 { + return errors.New("64bit immediate has non-zero fields") + } + + cons1 := uint32(ins.Constant) + cons2 := int32(bo.Uint32(data[4:8])) + ins.Constant = int64(cons2)<<32 | int64(cons1) + + return nil +} + +// Marshal encodes a BPF instruction. +func (ins Instruction) Marshal(w io.Writer, bo binary.ByteOrder) (uint64, error) { + if ins.OpCode == InvalidOpCode { + return 0, errors.New("invalid opcode") + } + + isDWordLoad := ins.OpCode.IsDWordLoad() + + cons := int32(ins.Constant) + if isDWordLoad { + // Encode least significant 32bit first for 64bit operations. + cons = int32(uint32(ins.Constant)) + } + + regs, err := newBPFRegisters(ins.Dst, ins.Src, bo) + if err != nil { + return 0, fmt.Errorf("can't marshal registers: %s", err) + } + + if ins.IsBuiltinCall() { + fn := BuiltinFunc(ins.Constant) + plat, value := platform.DecodeConstant(fn) + if plat != platform.Native { + return 0, fmt.Errorf("function %s (%s): %w", fn, plat, internal.ErrNotSupportedOnOS) + } + cons = int32(value) + } else if ins.OpCode.Class().IsALU() { + newOffset := int16(0) + switch ins.OpCode.ALUOp() { + case SDiv: + ins.OpCode = ins.OpCode.SetALUOp(Div) + newOffset = 1 + case SMod: + ins.OpCode = ins.OpCode.SetALUOp(Mod) + newOffset = 1 + case MovSX8: + ins.OpCode = ins.OpCode.SetALUOp(Mov) + newOffset = 8 + case MovSX16: + ins.OpCode = ins.OpCode.SetALUOp(Mov) + newOffset = 16 + case MovSX32: + ins.OpCode = ins.OpCode.SetALUOp(Mov) + newOffset = 32 + } + if newOffset != 0 && ins.Offset != 0 { + return 0, fmt.Errorf("extended ALU opcodes should have an .Offset of 0: %s", ins) + } + ins.Offset = newOffset + } else if atomic := ins.OpCode.AtomicOp(); atomic != InvalidAtomic { + ins.OpCode = ins.OpCode &^ atomicMask + ins.Constant = int64(atomic >> 8) + } + + op, err := ins.OpCode.bpfOpCode() + if err != nil { + return 0, err + } + + data := make([]byte, InstructionSize) + data[0] = op + data[1] = byte(regs) + bo.PutUint16(data[2:4], uint16(ins.Offset)) + bo.PutUint32(data[4:8], uint32(cons)) + if _, err := w.Write(data); err != nil { + return 0, err + } + + if !isDWordLoad { + return InstructionSize, nil + } + + // The first half of the second part of a double-wide instruction + // must be zero. The second half carries the value. + bo.PutUint32(data[0:4], 0) + bo.PutUint32(data[4:8], uint32(ins.Constant>>32)) + if _, err := w.Write(data); err != nil { + return 0, err + } + + return 2 * InstructionSize, nil +} + +// AssociateMap associates a Map with this Instruction. +// +// Implicitly clears the Instruction's Reference field. +// +// Returns an error if the Instruction is not a map load. +func (ins *Instruction) AssociateMap(m FDer) error { + if !ins.IsLoadFromMap() { + return errors.New("not a load from a map") + } + + ins.Metadata.Set(referenceMeta{}, nil) + ins.Metadata.Set(mapMeta{}, m) + + return nil +} + +// RewriteMapPtr changes an instruction to use a new map fd. +// +// Returns an error if the instruction doesn't load a map. +// +// Deprecated: use AssociateMap instead. If you cannot provide a Map, +// wrap an fd in a type implementing FDer. +func (ins *Instruction) RewriteMapPtr(fd int) error { + if !ins.IsLoadFromMap() { + return errors.New("not a load from a map") + } + + ins.encodeMapFD(fd) + + return nil +} + +func (ins *Instruction) encodeMapFD(fd int) { + // Preserve the offset value for direct map loads. + offset := uint64(ins.Constant) & (math.MaxUint32 << 32) + rawFd := uint64(uint32(fd)) + ins.Constant = int64(offset | rawFd) +} + +// MapPtr returns the map fd for this instruction. +// +// The result is undefined if the instruction is not a load from a map, +// see IsLoadFromMap. +// +// Deprecated: use Map() instead. +func (ins *Instruction) MapPtr() int { + // If there is a map associated with the instruction, return its FD. + if fd := ins.Metadata.Get(mapMeta{}); fd != nil { + return fd.(FDer).FD() + } + + // Fall back to the fd stored in the Constant field + return ins.mapFd() +} + +// mapFd returns the map file descriptor stored in the 32 least significant +// bits of ins' Constant field. +func (ins *Instruction) mapFd() int { + return int(int32(ins.Constant)) +} + +// RewriteMapOffset changes the offset of a direct load from a map. +// +// Returns an error if the instruction is not a direct load. +func (ins *Instruction) RewriteMapOffset(offset uint32) error { + if !ins.OpCode.IsDWordLoad() { + return fmt.Errorf("%s is not a 64 bit load", ins.OpCode) + } + + if ins.Src != PseudoMapValue { + return errors.New("not a direct load from a map") + } + + fd := uint64(ins.Constant) & math.MaxUint32 + ins.Constant = int64(uint64(offset)<<32 | fd) + return nil +} + +func (ins *Instruction) mapOffset() uint32 { + return uint32(uint64(ins.Constant) >> 32) +} + +// IsLoadFromMap returns true if the instruction loads from a map. +// +// This covers both loading the map pointer and direct map value loads. +func (ins *Instruction) IsLoadFromMap() bool { + return ins.OpCode == LoadImmOp(DWord) && (ins.Src == PseudoMapFD || ins.Src == PseudoMapValue) +} + +// IsFunctionCall returns true if the instruction calls another BPF function. +// +// This is not the same thing as a BPF helper call. +func (ins *Instruction) IsFunctionCall() bool { + return ins.OpCode.JumpOp() == Call && ins.Src == PseudoCall +} + +// IsKfuncCall returns true if the instruction calls a kfunc. +// +// This is not the same thing as a BPF helper call. +func (ins *Instruction) IsKfuncCall() bool { + return ins.OpCode.JumpOp() == Call && ins.Src == PseudoKfuncCall +} + +// IsLoadOfFunctionPointer returns true if the instruction loads a function pointer. +func (ins *Instruction) IsLoadOfFunctionPointer() bool { + return ins.OpCode.IsDWordLoad() && ins.Src == PseudoFunc +} + +// IsFunctionReference returns true if the instruction references another BPF +// function, either by invoking a Call jump operation or by loading a function +// pointer. +func (ins *Instruction) IsFunctionReference() bool { + return ins.IsFunctionCall() || ins.IsLoadOfFunctionPointer() +} + +// IsBuiltinCall returns true if the instruction is a built-in call, i.e. BPF helper call. +func (ins *Instruction) IsBuiltinCall() bool { + return ins.OpCode.JumpOp() == Call && ins.Src == R0 && ins.Dst == R0 +} + +// IsConstantLoad returns true if the instruction loads a constant of the +// given size. +func (ins *Instruction) IsConstantLoad(size Size) bool { + return ins.OpCode == LoadImmOp(size) && ins.Src == R0 && ins.Offset == 0 +} + +// Format implements fmt.Formatter. +func (ins Instruction) Format(f fmt.State, c rune) { + if c != 'v' { + fmt.Fprintf(f, "{UNRECOGNIZED: %c}", c) + return + } + + op := ins.OpCode + + if op == InvalidOpCode { + fmt.Fprint(f, "INVALID") + return + } + + // Omit trailing space for Exit + if op.JumpOp() == Exit { + fmt.Fprint(f, op) + return + } + + if ins.IsLoadFromMap() { + fd := ins.mapFd() + m := ins.Map() + switch ins.Src { + case PseudoMapFD: + if m != nil { + fmt.Fprintf(f, "LoadMapPtr dst: %s map: %s", ins.Dst, m) + } else { + fmt.Fprintf(f, "LoadMapPtr dst: %s fd: %d", ins.Dst, fd) + } + + case PseudoMapValue: + if m != nil { + fmt.Fprintf(f, "LoadMapValue dst: %s, map: %s off: %d", ins.Dst, m, ins.mapOffset()) + } else { + fmt.Fprintf(f, "LoadMapValue dst: %s, fd: %d off: %d", ins.Dst, fd, ins.mapOffset()) + } + } + + goto ref + } + + switch cls := op.Class(); { + case cls.isLoadOrStore(): + fmt.Fprintf(f, "%v ", op) + switch op.Mode() { + case ImmMode: + fmt.Fprintf(f, "dst: %s imm: %d", ins.Dst, ins.Constant) + case AbsMode: + fmt.Fprintf(f, "imm: %d", ins.Constant) + case IndMode: + fmt.Fprintf(f, "dst: %s src: %s imm: %d", ins.Dst, ins.Src, ins.Constant) + case MemMode, MemSXMode: + fmt.Fprintf(f, "dst: %s src: %s off: %d imm: %d", ins.Dst, ins.Src, ins.Offset, ins.Constant) + case AtomicMode: + fmt.Fprintf(f, "dst: %s src: %s off: %d", ins.Dst, ins.Src, ins.Offset) + } + + case cls.IsALU(): + fmt.Fprintf(f, "%v", op) + if op == Swap.Op(ImmSource) { + fmt.Fprintf(f, "%d", ins.Constant) + } + + fmt.Fprintf(f, " dst: %s ", ins.Dst) + switch { + case op.ALUOp() == Swap: + break + case op.Source() == ImmSource: + fmt.Fprintf(f, "imm: %d", ins.Constant) + default: + fmt.Fprintf(f, "src: %s", ins.Src) + } + + case cls.IsJump(): + fmt.Fprintf(f, "%v ", op) + switch jop := op.JumpOp(); jop { + case Call: + switch ins.Src { + case PseudoCall: + // bpf-to-bpf call + fmt.Fprint(f, ins.Constant) + case PseudoKfuncCall: + // kfunc call + fmt.Fprintf(f, "Kfunc(%d)", ins.Constant) + default: + fmt.Fprint(f, BuiltinFunc(ins.Constant)) + } + + case Ja: + if ins.OpCode.Class() == Jump32Class { + fmt.Fprintf(f, "imm: %d", ins.Constant) + } else { + fmt.Fprintf(f, "off: %d", ins.Offset) + } + + default: + fmt.Fprintf(f, "dst: %s off: %d ", ins.Dst, ins.Offset) + if op.Source() == ImmSource { + fmt.Fprintf(f, "imm: %d", ins.Constant) + } else { + fmt.Fprintf(f, "src: %s", ins.Src) + } + } + default: + fmt.Fprintf(f, "%v ", op) + } + +ref: + if ins.Reference() != "" { + fmt.Fprintf(f, " <%s>", ins.Reference()) + } +} + +func (ins Instruction) equal(other Instruction) bool { + return ins.OpCode == other.OpCode && + ins.Dst == other.Dst && + ins.Src == other.Src && + ins.Offset == other.Offset && + ins.Constant == other.Constant +} + +// Size returns the amount of bytes ins would occupy in binary form. +func (ins Instruction) Size() uint64 { + return uint64(InstructionSize * ins.OpCode.rawInstructions()) +} + +// WithMetadata sets the given Metadata on the Instruction. e.g. to copy +// Metadata from another Instruction when replacing it. +func (ins Instruction) WithMetadata(meta Metadata) Instruction { + ins.Metadata = meta + return ins +} + +type symbolMeta struct{} + +// WithSymbol marks the Instruction as a Symbol, which other Instructions +// can point to using corresponding calls to WithReference. +func (ins Instruction) WithSymbol(name string) Instruction { + ins.Metadata.Set(symbolMeta{}, name) + return ins +} + +// Sym creates a symbol. +// +// Deprecated: use WithSymbol instead. +func (ins Instruction) Sym(name string) Instruction { + return ins.WithSymbol(name) +} + +// Symbol returns the value ins has been marked with using WithSymbol, +// otherwise returns an empty string. A symbol is often an Instruction +// at the start of a function body. +func (ins Instruction) Symbol() string { + sym, _ := ins.Metadata.Get(symbolMeta{}).(string) + return sym +} + +type referenceMeta struct{} + +// WithReference makes ins reference another Symbol or map by name. +func (ins Instruction) WithReference(ref string) Instruction { + ins.Metadata.Set(referenceMeta{}, ref) + return ins +} + +// Reference returns the Symbol or map name referenced by ins, if any. +func (ins Instruction) Reference() string { + ref, _ := ins.Metadata.Get(referenceMeta{}).(string) + return ref +} + +type mapMeta struct{} + +// Map returns the Map referenced by ins, if any. +// An Instruction will contain a Map if e.g. it references an existing, +// pinned map that was opened during ELF loading. +func (ins Instruction) Map() FDer { + fd, _ := ins.Metadata.Get(mapMeta{}).(FDer) + return fd +} + +type sourceMeta struct{} + +// WithSource adds source information about the Instruction. +func (ins Instruction) WithSource(src fmt.Stringer) Instruction { + ins.Metadata.Set(sourceMeta{}, src) + return ins +} + +// Source returns source information about the Instruction. The field is +// present when the compiler emits BTF line info about the Instruction and +// usually contains the line of source code responsible for it. +func (ins Instruction) Source() fmt.Stringer { + str, _ := ins.Metadata.Get(sourceMeta{}).(fmt.Stringer) + return str +} + +// A Comment can be passed to Instruction.WithSource to add a comment +// to an instruction. +type Comment string + +func (s Comment) String() string { + return string(s) +} + +// FDer represents a resource tied to an underlying file descriptor. +// Used as a stand-in for e.g. ebpf.Map since that type cannot be +// imported here and FD() is the only method we rely on. +type FDer interface { + FD() int +} + +// Instructions is an eBPF program. +type Instructions []Instruction + +// AppendInstructions decodes [Instruction] from r and appends them to insns. +func AppendInstructions(insns Instructions, r io.Reader, bo binary.ByteOrder, platform string) (Instructions, error) { + var offset uint64 + for { + var ins Instruction + err := ins.Unmarshal(r, bo, platform) + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return nil, fmt.Errorf("offset %d: %w", offset, err) + } + + insns = append(insns, ins) + offset += ins.Size() + } + + return insns, nil +} + +// Name returns the name of the function insns belongs to, if any. +func (insns Instructions) Name() string { + if len(insns) == 0 { + return "" + } + return insns[0].Symbol() +} + +func (insns Instructions) String() string { + return fmt.Sprint(insns) +} + +// Size returns the amount of bytes insns would occupy in binary form. +func (insns Instructions) Size() uint64 { + var sum uint64 + for _, ins := range insns { + sum += ins.Size() + } + return sum +} + +// AssociateMap updates all Instructions that Reference the given symbol +// to point to an existing Map m instead. +// +// Returns ErrUnreferencedSymbol error if no references to symbol are found +// in insns. If symbol is anything else than the symbol name of map (e.g. +// a bpf2bpf subprogram), an error is returned. +func (insns Instructions) AssociateMap(symbol string, m FDer) error { + if symbol == "" { + return errors.New("empty symbol") + } + + var found bool + for i := range insns { + ins := &insns[i] + if ins.Reference() != symbol { + continue + } + + if err := ins.AssociateMap(m); err != nil { + return err + } + + found = true + } + + if !found { + return fmt.Errorf("symbol %s: %w", symbol, ErrUnreferencedSymbol) + } + + return nil +} + +// RewriteMapPtr rewrites all loads of a specific map pointer to a new fd. +// +// Returns ErrUnreferencedSymbol if the symbol isn't used. +// +// Deprecated: use AssociateMap instead. +func (insns Instructions) RewriteMapPtr(symbol string, fd int) error { + if symbol == "" { + return errors.New("empty symbol") + } + + var found bool + for i := range insns { + ins := &insns[i] + if ins.Reference() != symbol { + continue + } + + if !ins.IsLoadFromMap() { + return errors.New("not a load from a map") + } + + ins.encodeMapFD(fd) + + found = true + } + + if !found { + return fmt.Errorf("symbol %s: %w", symbol, ErrUnreferencedSymbol) + } + + return nil +} + +// SymbolOffsets returns the set of symbols and their offset in +// the instructions. +func (insns Instructions) SymbolOffsets() (map[string]int, error) { + offsets := make(map[string]int) + + for i, ins := range insns { + if ins.Symbol() == "" { + continue + } + + if _, ok := offsets[ins.Symbol()]; ok { + return nil, fmt.Errorf("duplicate symbol %s", ins.Symbol()) + } + + offsets[ins.Symbol()] = i + } + + return offsets, nil +} + +// FunctionReferences returns a set of symbol names these Instructions make +// bpf-to-bpf calls to. +func (insns Instructions) FunctionReferences() []string { + calls := make(map[string]struct{}) + for _, ins := range insns { + if ins.Constant != -1 { + // BPF-to-BPF calls have -1 constants. + continue + } + + if ins.Reference() == "" { + continue + } + + if !ins.IsFunctionReference() { + continue + } + + calls[ins.Reference()] = struct{}{} + } + + result := make([]string, 0, len(calls)) + for call := range calls { + result = append(result, call) + } + + sort.Strings(result) + return result +} + +// ReferenceOffsets returns the set of references and their offset in +// the instructions. +func (insns Instructions) ReferenceOffsets() map[string][]int { + offsets := make(map[string][]int) + + for i, ins := range insns { + if ins.Reference() == "" { + continue + } + + offsets[ins.Reference()] = append(offsets[ins.Reference()], i) + } + + return offsets +} + +// Format implements fmt.Formatter. +// +// You can control indentation of symbols by +// specifying a width. Setting a precision controls the indentation of +// instructions. +// The default character is a tab, which can be overridden by specifying +// the ' ' space flag. +func (insns Instructions) Format(f fmt.State, c rune) { + if c != 's' && c != 'v' { + fmt.Fprintf(f, "{UNKNOWN FORMAT '%c'}", c) + return + } + + // Precision is better in this case, because it allows + // specifying 0 padding easily. + padding, ok := f.Precision() + if !ok { + padding = 1 + } + + indent := strings.Repeat("\t", padding) + if f.Flag(' ') { + indent = strings.Repeat(" ", padding) + } + + symPadding, ok := f.Width() + if !ok { + symPadding = padding - 1 + } + if symPadding < 0 { + symPadding = 0 + } + + symIndent := strings.Repeat("\t", symPadding) + if f.Flag(' ') { + symIndent = strings.Repeat(" ", symPadding) + } + + // Guess how many digits we need at most, by assuming that all instructions + // are double wide. + highestOffset := len(insns) * 2 + offsetWidth := int(math.Ceil(math.Log10(float64(highestOffset)))) + + iter := insns.Iterate() + for iter.Next() { + if iter.Ins.Symbol() != "" { + fmt.Fprintf(f, "%s%s:\n", symIndent, iter.Ins.Symbol()) + } + if src := iter.Ins.Source(); src != nil { + line := strings.TrimSpace(src.String()) + if line != "" { + fmt.Fprintf(f, "%s%*s; %s\n", indent, offsetWidth, " ", line) + } + } + fmt.Fprintf(f, "%s%*d: %v\n", indent, offsetWidth, iter.Offset, iter.Ins) + } +} + +// Marshal encodes a BPF program into the kernel format. +// +// insns may be modified if there are unresolved jumps or bpf2bpf calls. +// +// Returns ErrUnsatisfiedProgramReference if there is a Reference Instruction +// without a matching Symbol Instruction within insns. +func (insns Instructions) Marshal(w io.Writer, bo binary.ByteOrder) error { + if err := insns.encodeFunctionReferences(); err != nil { + return err + } + + if err := insns.encodeMapPointers(); err != nil { + return err + } + + for i, ins := range insns { + if _, err := ins.Marshal(w, bo); err != nil { + return fmt.Errorf("instruction %d: %w", i, err) + } + } + return nil +} + +// Tag calculates the kernel tag for a series of instructions. +// +// It mirrors bpf_prog_calc_tag in the kernel and so can be compared +// to ProgramInfo.Tag to figure out whether a loaded program matches +// certain instructions. +func (insns Instructions) Tag(bo binary.ByteOrder) (string, error) { + h := sha1.New() + for i, ins := range insns { + if ins.IsLoadFromMap() { + ins.Constant = 0 + } + _, err := ins.Marshal(h, bo) + if err != nil { + return "", fmt.Errorf("instruction %d: %w", i, err) + } + } + return hex.EncodeToString(h.Sum(nil)[:sys.BPF_TAG_SIZE]), nil +} + +// encodeFunctionReferences populates the Offset (or Constant, depending on +// the instruction type) field of instructions with a Reference field to point +// to the offset of the corresponding instruction with a matching Symbol field. +// +// Only Reference Instructions that are either jumps or BPF function references +// (calls or function pointer loads) are populated. +// +// Returns ErrUnsatisfiedProgramReference if there is a Reference Instruction +// without at least one corresponding Symbol Instruction within insns. +func (insns Instructions) encodeFunctionReferences() error { + // Index the offsets of instructions tagged as a symbol. + symbolOffsets := make(map[string]RawInstructionOffset) + iter := insns.Iterate() + for iter.Next() { + ins := iter.Ins + + if ins.Symbol() == "" { + continue + } + + if _, ok := symbolOffsets[ins.Symbol()]; ok { + return fmt.Errorf("duplicate symbol %s", ins.Symbol()) + } + + symbolOffsets[ins.Symbol()] = iter.Offset + } + + // Find all instructions tagged as references to other symbols. + // Depending on the instruction type, populate their constant or offset + // fields to point to the symbol they refer to within the insn stream. + iter = insns.Iterate() + for iter.Next() { + i := iter.Index + offset := iter.Offset + ins := iter.Ins + + if ins.Reference() == "" { + continue + } + + switch { + case ins.IsFunctionReference() && ins.Constant == -1, + ins.OpCode == Ja.opCode(Jump32Class, ImmSource) && ins.Constant == -1: + symOffset, ok := symbolOffsets[ins.Reference()] + if !ok { + return fmt.Errorf("%s at insn %d: symbol %q: %w", ins.OpCode, i, ins.Reference(), ErrUnsatisfiedProgramReference) + } + + ins.Constant = int64(symOffset - offset - 1) + + case ins.OpCode.Class().IsJump() && ins.Offset == -1: + symOffset, ok := symbolOffsets[ins.Reference()] + if !ok { + return fmt.Errorf("%s at insn %d: symbol %q: %w", ins.OpCode, i, ins.Reference(), ErrUnsatisfiedProgramReference) + } + + ins.Offset = int16(symOffset - offset - 1) + } + } + + return nil +} + +// encodeMapPointers finds all Map Instructions and encodes their FDs +// into their Constant fields. +func (insns Instructions) encodeMapPointers() error { + iter := insns.Iterate() + for iter.Next() { + ins := iter.Ins + + if !ins.IsLoadFromMap() { + continue + } + + m := ins.Map() + if m == nil { + continue + } + + fd := m.FD() + if fd < 0 { + return fmt.Errorf("map %s: %w", m, sys.ErrClosedFd) + } + + ins.encodeMapFD(m.FD()) + } + + return nil +} + +// Iterate allows iterating a BPF program while keeping track of +// various offsets. +// +// Modifying the instruction slice will lead to undefined behaviour. +func (insns Instructions) Iterate() *InstructionIterator { + return &InstructionIterator{insns: insns} +} + +// InstructionIterator iterates over a BPF program. +type InstructionIterator struct { + insns Instructions + // The instruction in question. + Ins *Instruction + // The index of the instruction in the original instruction slice. + Index int + // The offset of the instruction in raw BPF instructions. This accounts + // for double-wide instructions. + Offset RawInstructionOffset +} + +// Next returns true as long as there are any instructions remaining. +func (iter *InstructionIterator) Next() bool { + if len(iter.insns) == 0 { + return false + } + + if iter.Ins != nil { + iter.Index++ + iter.Offset += RawInstructionOffset(iter.Ins.OpCode.rawInstructions()) + } + iter.Ins = &iter.insns[0] + iter.insns = iter.insns[1:] + return true +} + +type bpfRegisters uint8 + +func newBPFRegisters(dst, src Register, bo binary.ByteOrder) (bpfRegisters, error) { + switch bo { + case binary.LittleEndian: + return bpfRegisters((src << 4) | (dst & 0xF)), nil + case binary.BigEndian: + return bpfRegisters((dst << 4) | (src & 0xF)), nil + default: + return 0, fmt.Errorf("unrecognized ByteOrder %T", bo) + } +} + +// IsUnreferencedSymbol returns true if err was caused by +// an unreferenced symbol. +// +// Deprecated: use errors.Is(err, asm.ErrUnreferencedSymbol). +func IsUnreferencedSymbol(err error) bool { + return errors.Is(err, ErrUnreferencedSymbol) +} diff --git a/vendor/github.com/cilium/ebpf/asm/jump.go b/vendor/github.com/cilium/ebpf/asm/jump.go new file mode 100644 index 000000000..a14bc4c89 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/jump.go @@ -0,0 +1,135 @@ +package asm + +//go:generate go tool stringer -output jump_string.go -type=JumpOp + +// JumpOp affect control flow. +// +// msb lsb +// +----+-+---+ +// |OP |s|cls| +// +----+-+---+ +type JumpOp uint8 + +const jumpMask OpCode = 0xf0 + +const ( + // InvalidJumpOp is returned by getters when invoked + // on non branch OpCodes + InvalidJumpOp JumpOp = 0xff + // Ja jumps by offset unconditionally + Ja JumpOp = 0x00 + // JEq jumps by offset if r == imm + JEq JumpOp = 0x10 + // JGT jumps by offset if r > imm + JGT JumpOp = 0x20 + // JGE jumps by offset if r >= imm + JGE JumpOp = 0x30 + // JSet jumps by offset if r & imm + JSet JumpOp = 0x40 + // JNE jumps by offset if r != imm + JNE JumpOp = 0x50 + // JSGT jumps by offset if signed r > signed imm + JSGT JumpOp = 0x60 + // JSGE jumps by offset if signed r >= signed imm + JSGE JumpOp = 0x70 + // Call builtin or user defined function from imm + Call JumpOp = 0x80 + // Exit ends execution, with value in r0 + Exit JumpOp = 0x90 + // JLT jumps by offset if r < imm + JLT JumpOp = 0xa0 + // JLE jumps by offset if r <= imm + JLE JumpOp = 0xb0 + // JSLT jumps by offset if signed r < signed imm + JSLT JumpOp = 0xc0 + // JSLE jumps by offset if signed r <= signed imm + JSLE JumpOp = 0xd0 +) + +// Return emits an exit instruction. +// +// Requires a return value in R0. +func Return() Instruction { + return Instruction{ + OpCode: OpCode(JumpClass).SetJumpOp(Exit), + } +} + +// Op returns the OpCode for a given jump source. +func (op JumpOp) Op(source Source) OpCode { + return OpCode(JumpClass).SetJumpOp(op).SetSource(source) +} + +// Imm compares 64 bit dst to 64 bit value (sign extended), and adjusts PC by offset if the condition is fulfilled. +func (op JumpOp) Imm(dst Register, value int32, label string) Instruction { + return Instruction{ + OpCode: op.opCode(JumpClass, ImmSource), + Dst: dst, + Offset: -1, + Constant: int64(value), + }.WithReference(label) +} + +// Imm32 compares 32 bit dst to 32 bit value, and adjusts PC by offset if the condition is fulfilled. +// Requires kernel 5.1. +func (op JumpOp) Imm32(dst Register, value int32, label string) Instruction { + return Instruction{ + OpCode: op.opCode(Jump32Class, ImmSource), + Dst: dst, + Offset: -1, + Constant: int64(value), + }.WithReference(label) +} + +// Reg compares 64 bit dst to 64 bit src, and adjusts PC by offset if the condition is fulfilled. +func (op JumpOp) Reg(dst, src Register, label string) Instruction { + return Instruction{ + OpCode: op.opCode(JumpClass, RegSource), + Dst: dst, + Src: src, + Offset: -1, + }.WithReference(label) +} + +// Reg32 compares 32 bit dst to 32 bit src, and adjusts PC by offset if the condition is fulfilled. +// Requires kernel 5.1. +func (op JumpOp) Reg32(dst, src Register, label string) Instruction { + return Instruction{ + OpCode: op.opCode(Jump32Class, RegSource), + Dst: dst, + Src: src, + Offset: -1, + }.WithReference(label) +} + +func (op JumpOp) opCode(class Class, source Source) OpCode { + if op == Exit || op == Call { + return InvalidOpCode + } + + return OpCode(class).SetJumpOp(op).SetSource(source) +} + +// LongJump returns a jump always instruction with a range of [-2^31, 2^31 - 1]. +func LongJump(label string) Instruction { + return Instruction{ + OpCode: Ja.opCode(Jump32Class, ImmSource), + Constant: -1, + }.WithReference(label) +} + +// Label adjusts PC to the address of the label. +func (op JumpOp) Label(label string) Instruction { + if op == Call { + return Instruction{ + OpCode: OpCode(JumpClass).SetJumpOp(Call), + Src: PseudoCall, + Constant: -1, + }.WithReference(label) + } + + return Instruction{ + OpCode: OpCode(JumpClass).SetJumpOp(op), + Offset: -1, + }.WithReference(label) +} diff --git a/vendor/github.com/cilium/ebpf/asm/jump_string.go b/vendor/github.com/cilium/ebpf/asm/jump_string.go new file mode 100644 index 000000000..85a4aaffa --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/jump_string.go @@ -0,0 +1,53 @@ +// Code generated by "stringer -output jump_string.go -type=JumpOp"; DO NOT EDIT. + +package asm + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidJumpOp-255] + _ = x[Ja-0] + _ = x[JEq-16] + _ = x[JGT-32] + _ = x[JGE-48] + _ = x[JSet-64] + _ = x[JNE-80] + _ = x[JSGT-96] + _ = x[JSGE-112] + _ = x[Call-128] + _ = x[Exit-144] + _ = x[JLT-160] + _ = x[JLE-176] + _ = x[JSLT-192] + _ = x[JSLE-208] +} + +const _JumpOp_name = "JaJEqJGTJGEJSetJNEJSGTJSGECallExitJLTJLEJSLTJSLEInvalidJumpOp" + +var _JumpOp_map = map[JumpOp]string{ + 0: _JumpOp_name[0:2], + 16: _JumpOp_name[2:5], + 32: _JumpOp_name[5:8], + 48: _JumpOp_name[8:11], + 64: _JumpOp_name[11:15], + 80: _JumpOp_name[15:18], + 96: _JumpOp_name[18:22], + 112: _JumpOp_name[22:26], + 128: _JumpOp_name[26:30], + 144: _JumpOp_name[30:34], + 160: _JumpOp_name[34:37], + 176: _JumpOp_name[37:40], + 192: _JumpOp_name[40:44], + 208: _JumpOp_name[44:48], + 255: _JumpOp_name[48:61], +} + +func (i JumpOp) String() string { + if str, ok := _JumpOp_map[i]; ok { + return str + } + return "JumpOp(" + strconv.FormatInt(int64(i), 10) + ")" +} diff --git a/vendor/github.com/cilium/ebpf/asm/load_store.go b/vendor/github.com/cilium/ebpf/asm/load_store.go new file mode 100644 index 000000000..a32a9b318 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/load_store.go @@ -0,0 +1,336 @@ +package asm + +import "fmt" + +//go:generate go tool stringer -output load_store_string.go -type=Mode,Size + +// Mode for load and store operations +// +// msb lsb +// +---+--+---+ +// |MDE|sz|cls| +// +---+--+---+ +type Mode uint8 + +const modeMask OpCode = 0xe0 + +const ( + // InvalidMode is returned by getters when invoked + // on non load / store OpCodes + InvalidMode Mode = 0xff + // ImmMode - immediate value + ImmMode Mode = 0x00 + // AbsMode - immediate value + offset + AbsMode Mode = 0x20 + // IndMode - indirect (imm+src) + IndMode Mode = 0x40 + // MemMode - load from memory + MemMode Mode = 0x60 + // MemSXMode - load from memory, sign extension + MemSXMode Mode = 0x80 + // AtomicMode - add atomically across processors. + AtomicMode Mode = 0xc0 +) + +const atomicMask OpCode = 0x0001_ff00 + +type AtomicOp uint32 + +const ( + InvalidAtomic AtomicOp = 0xffff_ffff + + // AddAtomic - add src to memory address dst atomically + AddAtomic AtomicOp = AtomicOp(Add) << 8 + // FetchAdd - add src to memory address dst atomically, store result in src + FetchAdd AtomicOp = AddAtomic | fetch + // AndAtomic - bitwise AND src with memory address at dst atomically + AndAtomic AtomicOp = AtomicOp(And) << 8 + // FetchAnd - bitwise AND src with memory address at dst atomically, store result in src + FetchAnd AtomicOp = AndAtomic | fetch + // OrAtomic - bitwise OR src with memory address at dst atomically + OrAtomic AtomicOp = AtomicOp(Or) << 8 + // FetchOr - bitwise OR src with memory address at dst atomically, store result in src + FetchOr AtomicOp = OrAtomic | fetch + // XorAtomic - bitwise XOR src with memory address at dst atomically + XorAtomic AtomicOp = AtomicOp(Xor) << 8 + // FetchXor - bitwise XOR src with memory address at dst atomically, store result in src + FetchXor AtomicOp = XorAtomic | fetch + + // Xchg - atomically exchange the old value with the new value + // + // src gets populated with the old value of *(size *)(dst + offset). + Xchg AtomicOp = 0x0000_e000 | fetch + // CmpXchg - atomically compare and exchange the old value with the new value + // + // Compares R0 and *(size *)(dst + offset), writes src to *(size *)(dst + offset) on match. + // R0 gets populated with the old value of *(size *)(dst + offset), even if no exchange occurs. + CmpXchg AtomicOp = 0x0000_f000 | fetch + + // fetch modifier for copy-modify-write atomics + fetch AtomicOp = 0x0000_0100 + // loadAcquire - atomically load with acquire semantics + loadAcquire AtomicOp = 0x0001_0000 + // storeRelease - atomically store with release semantics + storeRelease AtomicOp = 0x0001_1000 +) + +func (op AtomicOp) String() string { + var name string + switch op { + case AddAtomic, AndAtomic, OrAtomic, XorAtomic: + name = ALUOp(op >> 8).String() + case FetchAdd, FetchAnd, FetchOr, FetchXor: + name = "Fetch" + ALUOp((op^fetch)>>8).String() + case Xchg: + name = "Xchg" + case CmpXchg: + name = "CmpXchg" + case loadAcquire: + name = "LdAcq" + case storeRelease: + name = "StRel" + default: + name = fmt.Sprintf("AtomicOp(%#x)", uint32(op)) + } + + return name +} + +func (op AtomicOp) OpCode(size Size) OpCode { + switch op { + case AddAtomic, AndAtomic, OrAtomic, XorAtomic, + FetchAdd, FetchAnd, FetchOr, FetchXor, + Xchg, CmpXchg: + switch size { + case Byte, Half: + // 8-bit and 16-bit atomic copy-modify-write atomics are not supported + return InvalidOpCode + } + } + + return OpCode(StXClass).SetMode(AtomicMode).SetSize(size).SetAtomicOp(op) +} + +// Mem emits `*(size *)(dst + offset) (op) src`. +func (op AtomicOp) Mem(dst, src Register, size Size, offset int16) Instruction { + return Instruction{ + OpCode: op.OpCode(size), + Dst: dst, + Src: src, + Offset: offset, + } +} + +// Emits `lock-acquire dst = *(size *)(src + offset)`. +func LoadAcquire(dst, src Register, size Size, offset int16) Instruction { + return Instruction{ + OpCode: loadAcquire.OpCode(size), + Dst: dst, + Src: src, + Offset: offset, + } +} + +// Emits `lock-release *(size *)(dst + offset) = src`. +func StoreRelease(dst, src Register, size Size, offset int16) Instruction { + return Instruction{ + OpCode: storeRelease.OpCode(size), + Dst: dst, + Src: src, + Offset: offset, + } +} + +// Size of load and store operations +// +// msb lsb +// +---+--+---+ +// |mde|SZ|cls| +// +---+--+---+ +type Size uint8 + +const sizeMask OpCode = 0x18 + +const ( + // InvalidSize is returned by getters when invoked + // on non load / store OpCodes + InvalidSize Size = 0xff + // DWord - double word; 64 bits + DWord Size = 0x18 + // Word - word; 32 bits + Word Size = 0x00 + // Half - half-word; 16 bits + Half Size = 0x08 + // Byte - byte; 8 bits + Byte Size = 0x10 +) + +// Sizeof returns the size in bytes. +func (s Size) Sizeof() int { + switch s { + case DWord: + return 8 + case Word: + return 4 + case Half: + return 2 + case Byte: + return 1 + default: + return -1 + } +} + +// LoadMemOp returns the OpCode to load a value of given size from memory. +func LoadMemOp(size Size) OpCode { + return OpCode(LdXClass).SetMode(MemMode).SetSize(size) +} + +// LoadMemSXOp returns the OpCode to load a value of given size from memory sign extended. +func LoadMemSXOp(size Size) OpCode { + return OpCode(LdXClass).SetMode(MemSXMode).SetSize(size) +} + +// LoadMem emits `dst = *(size *)(src + offset)`. +func LoadMem(dst, src Register, offset int16, size Size) Instruction { + return Instruction{ + OpCode: LoadMemOp(size), + Dst: dst, + Src: src, + Offset: offset, + } +} + +// LoadMemSX emits `dst = *(size *)(src + offset)` but sign extends dst. +func LoadMemSX(dst, src Register, offset int16, size Size) Instruction { + if size == DWord { + return Instruction{OpCode: InvalidOpCode} + } + + return Instruction{ + OpCode: LoadMemSXOp(size), + Dst: dst, + Src: src, + Offset: offset, + } +} + +// LoadImmOp returns the OpCode to load an immediate of given size. +// +// As of kernel 4.20, only DWord size is accepted. +func LoadImmOp(size Size) OpCode { + return OpCode(LdClass).SetMode(ImmMode).SetSize(size) +} + +// LoadImm emits `dst = (size)value`. +// +// As of kernel 4.20, only DWord size is accepted. +func LoadImm(dst Register, value int64, size Size) Instruction { + return Instruction{ + OpCode: LoadImmOp(size), + Dst: dst, + Constant: value, + } +} + +// LoadMapPtr stores a pointer to a map in dst. +func LoadMapPtr(dst Register, fd int) Instruction { + if fd < 0 { + return Instruction{OpCode: InvalidOpCode} + } + + return Instruction{ + OpCode: LoadImmOp(DWord), + Dst: dst, + Src: PseudoMapFD, + Constant: int64(uint32(fd)), + } +} + +// LoadMapValue stores a pointer to the value at a certain offset of a map. +func LoadMapValue(dst Register, fd int, offset uint32) Instruction { + if fd < 0 { + return Instruction{OpCode: InvalidOpCode} + } + + fdAndOffset := (uint64(offset) << 32) | uint64(uint32(fd)) + return Instruction{ + OpCode: LoadImmOp(DWord), + Dst: dst, + Src: PseudoMapValue, + Constant: int64(fdAndOffset), + } +} + +// LoadIndOp returns the OpCode for loading a value of given size from an sk_buff. +func LoadIndOp(size Size) OpCode { + return OpCode(LdClass).SetMode(IndMode).SetSize(size) +} + +// LoadInd emits `dst = ntoh(*(size *)(((sk_buff *)R6)->data + src + offset))`. +func LoadInd(dst, src Register, offset int32, size Size) Instruction { + return Instruction{ + OpCode: LoadIndOp(size), + Dst: dst, + Src: src, + Constant: int64(offset), + } +} + +// LoadAbsOp returns the OpCode for loading a value of given size from an sk_buff. +func LoadAbsOp(size Size) OpCode { + return OpCode(LdClass).SetMode(AbsMode).SetSize(size) +} + +// LoadAbs emits `r0 = ntoh(*(size *)(((sk_buff *)R6)->data + offset))`. +func LoadAbs(offset int32, size Size) Instruction { + return Instruction{ + OpCode: LoadAbsOp(size), + Dst: R0, + Constant: int64(offset), + } +} + +// StoreMemOp returns the OpCode for storing a register of given size in memory. +func StoreMemOp(size Size) OpCode { + return OpCode(StXClass).SetMode(MemMode).SetSize(size) +} + +// StoreMem emits `*(size *)(dst + offset) = src` +func StoreMem(dst Register, offset int16, src Register, size Size) Instruction { + return Instruction{ + OpCode: StoreMemOp(size), + Dst: dst, + Src: src, + Offset: offset, + } +} + +// StoreImmOp returns the OpCode for storing an immediate of given size in memory. +func StoreImmOp(size Size) OpCode { + return OpCode(StClass).SetMode(MemMode).SetSize(size) +} + +// StoreImm emits `*(size *)(dst + offset) = value`. +func StoreImm(dst Register, offset int16, value int64, size Size) Instruction { + if size == DWord { + return Instruction{OpCode: InvalidOpCode} + } + + return Instruction{ + OpCode: StoreImmOp(size), + Dst: dst, + Offset: offset, + Constant: value, + } +} + +// StoreXAddOp returns the OpCode to atomically add a register to a value in memory. +func StoreXAddOp(size Size) OpCode { + return AddAtomic.OpCode(size) +} + +// StoreXAdd atomically adds src to *dst. +func StoreXAdd(dst, src Register, size Size) Instruction { + return AddAtomic.Mem(dst, src, size, 0) +} diff --git a/vendor/github.com/cilium/ebpf/asm/load_store_string.go b/vendor/github.com/cilium/ebpf/asm/load_store_string.go new file mode 100644 index 000000000..bbed58b66 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/load_store_string.go @@ -0,0 +1,84 @@ +// Code generated by "stringer -output load_store_string.go -type=Mode,Size"; DO NOT EDIT. + +package asm + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidMode-255] + _ = x[ImmMode-0] + _ = x[AbsMode-32] + _ = x[IndMode-64] + _ = x[MemMode-96] + _ = x[MemSXMode-128] + _ = x[AtomicMode-192] +} + +const ( + _Mode_name_0 = "ImmMode" + _Mode_name_1 = "AbsMode" + _Mode_name_2 = "IndMode" + _Mode_name_3 = "MemMode" + _Mode_name_4 = "MemSXMode" + _Mode_name_5 = "AtomicMode" + _Mode_name_6 = "InvalidMode" +) + +func (i Mode) String() string { + switch { + case i == 0: + return _Mode_name_0 + case i == 32: + return _Mode_name_1 + case i == 64: + return _Mode_name_2 + case i == 96: + return _Mode_name_3 + case i == 128: + return _Mode_name_4 + case i == 192: + return _Mode_name_5 + case i == 255: + return _Mode_name_6 + default: + return "Mode(" + strconv.FormatInt(int64(i), 10) + ")" + } +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidSize-255] + _ = x[DWord-24] + _ = x[Word-0] + _ = x[Half-8] + _ = x[Byte-16] +} + +const ( + _Size_name_0 = "Word" + _Size_name_1 = "Half" + _Size_name_2 = "Byte" + _Size_name_3 = "DWord" + _Size_name_4 = "InvalidSize" +) + +func (i Size) String() string { + switch { + case i == 0: + return _Size_name_0 + case i == 8: + return _Size_name_1 + case i == 16: + return _Size_name_2 + case i == 24: + return _Size_name_3 + case i == 255: + return _Size_name_4 + default: + return "Size(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/github.com/cilium/ebpf/asm/metadata.go b/vendor/github.com/cilium/ebpf/asm/metadata.go new file mode 100644 index 000000000..dd368a936 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/metadata.go @@ -0,0 +1,80 @@ +package asm + +// Metadata contains metadata about an instruction. +type Metadata struct { + head *metaElement +} + +type metaElement struct { + next *metaElement + key, value interface{} +} + +// Find the element containing key. +// +// Returns nil if there is no such element. +func (m *Metadata) find(key interface{}) *metaElement { + for e := m.head; e != nil; e = e.next { + if e.key == key { + return e + } + } + return nil +} + +// Remove an element from the linked list. +// +// Copies as many elements of the list as necessary to remove r, but doesn't +// perform a full copy. +func (m *Metadata) remove(r *metaElement) { + current := &m.head + for e := m.head; e != nil; e = e.next { + if e == r { + // We've found the element we want to remove. + *current = e.next + + // No need to copy the tail. + return + } + + // There is another element in front of the one we want to remove. + // We have to copy it to be able to change metaElement.next. + cpy := &metaElement{key: e.key, value: e.value} + *current = cpy + current = &cpy.next + } +} + +// Set a key to a value. +// +// If value is nil, the key is removed. Avoids modifying old metadata by +// copying if necessary. +func (m *Metadata) Set(key, value interface{}) { + if e := m.find(key); e != nil { + if e.value == value { + // Key is present and the value is the same. Nothing to do. + return + } + + // Key is present with a different value. Create a copy of the list + // which doesn't have the element in it. + m.remove(e) + } + + // m.head is now a linked list that doesn't contain key. + if value == nil { + return + } + + m.head = &metaElement{key: key, value: value, next: m.head} +} + +// Get the value of a key. +// +// Returns nil if no value with the given key is present. +func (m *Metadata) Get(key interface{}) interface{} { + if e := m.find(key); e != nil { + return e.value + } + return nil +} diff --git a/vendor/github.com/cilium/ebpf/asm/opcode.go b/vendor/github.com/cilium/ebpf/asm/opcode.go new file mode 100644 index 000000000..9b2f80f0a --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/opcode.go @@ -0,0 +1,327 @@ +package asm + +import ( + "fmt" + "strings" +) + +//go:generate go tool stringer -output opcode_string.go -type=Class + +// Class of operations +// +// msb lsb +// +---+--+---+ +// | ?? |CLS| +// +---+--+---+ +type Class uint8 + +const classMask OpCode = 0x07 + +const ( + // LdClass loads immediate values into registers. + // Also used for non-standard load operations from cBPF. + LdClass Class = 0x00 + // LdXClass loads memory into registers. + LdXClass Class = 0x01 + // StClass stores immediate values to memory. + StClass Class = 0x02 + // StXClass stores registers to memory. + StXClass Class = 0x03 + // ALUClass describes arithmetic operators. + ALUClass Class = 0x04 + // JumpClass describes jump operators. + JumpClass Class = 0x05 + // Jump32Class describes jump operators with 32-bit comparisons. + // Requires kernel 5.1. + Jump32Class Class = 0x06 + // ALU64Class describes arithmetic operators in 64-bit mode. + ALU64Class Class = 0x07 +) + +// IsLoad checks if this is either LdClass or LdXClass. +func (cls Class) IsLoad() bool { + return cls == LdClass || cls == LdXClass +} + +// IsStore checks if this is either StClass or StXClass. +func (cls Class) IsStore() bool { + return cls == StClass || cls == StXClass +} + +func (cls Class) isLoadOrStore() bool { + return cls.IsLoad() || cls.IsStore() +} + +// IsALU checks if this is either ALUClass or ALU64Class. +func (cls Class) IsALU() bool { + return cls == ALUClass || cls == ALU64Class +} + +// IsJump checks if this is either JumpClass or Jump32Class. +func (cls Class) IsJump() bool { + return cls == JumpClass || cls == Jump32Class +} + +func (cls Class) isJumpOrALU() bool { + return cls.IsJump() || cls.IsALU() +} + +// OpCode represents a single operation. +// It is not a 1:1 mapping to real eBPF opcodes. +// +// The encoding varies based on a 3-bit Class: +// +// 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 +// ??? | CLS +// +// For ALUClass and ALUCLass32: +// +// 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 +// 0 | OPC |S| CLS +// +// For LdClass, LdXclass, StClass and StXClass: +// +// 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 +// 0 | MDE |SIZ| CLS +// +// For StXClass where MDE == AtomicMode: +// +// 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 +// 0 | ATOMIC OP | MDE |SIZ| CLS +// +// For JumpClass, Jump32Class: +// +// 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 +// 0 | OPC |S| CLS +type OpCode uint32 + +// InvalidOpCode is returned by setters on OpCode +const InvalidOpCode OpCode = 0xffff + +// bpfOpCode returns the actual BPF opcode. +func (op OpCode) bpfOpCode() (byte, error) { + const opCodeMask = 0xff + + if !valid(op, opCodeMask) { + return 0, fmt.Errorf("invalid opcode %x", op) + } + + return byte(op & opCodeMask), nil +} + +// rawInstructions returns the number of BPF instructions required +// to encode this opcode. +func (op OpCode) rawInstructions() int { + if op.IsDWordLoad() { + return 2 + } + return 1 +} + +func (op OpCode) IsDWordLoad() bool { + return op == LoadImmOp(DWord) +} + +// Class returns the class of operation. +func (op OpCode) Class() Class { + return Class(op & classMask) +} + +// Mode returns the mode for load and store operations. +func (op OpCode) Mode() Mode { + if !op.Class().isLoadOrStore() { + return InvalidMode + } + return Mode(op & modeMask) +} + +// Size returns the size for load and store operations. +func (op OpCode) Size() Size { + if !op.Class().isLoadOrStore() { + return InvalidSize + } + return Size(op & sizeMask) +} + +// AtomicOp returns the type of atomic operation. +func (op OpCode) AtomicOp() AtomicOp { + if op.Class() != StXClass || op.Mode() != AtomicMode { + return InvalidAtomic + } + return AtomicOp(op & atomicMask) +} + +// Source returns the source for branch and ALU operations. +func (op OpCode) Source() Source { + if !op.Class().isJumpOrALU() || op.ALUOp() == Swap { + return InvalidSource + } + return Source(op & sourceMask) +} + +// ALUOp returns the ALUOp. +func (op OpCode) ALUOp() ALUOp { + if !op.Class().IsALU() { + return InvalidALUOp + } + return ALUOp(op & aluMask) +} + +// Endianness returns the Endianness for a byte swap instruction. +func (op OpCode) Endianness() Endianness { + if op.ALUOp() != Swap { + return InvalidEndian + } + return Endianness(op & endianMask) +} + +// JumpOp returns the JumpOp. +// Returns InvalidJumpOp if it doesn't encode a jump. +func (op OpCode) JumpOp() JumpOp { + if !op.Class().IsJump() { + return InvalidJumpOp + } + + jumpOp := JumpOp(op & jumpMask) + + // Some JumpOps are only supported by JumpClass, not Jump32Class. + if op.Class() == Jump32Class && (jumpOp == Exit || jumpOp == Call) { + return InvalidJumpOp + } + + return jumpOp +} + +// SetMode sets the mode on load and store operations. +// +// Returns InvalidOpCode if op is of the wrong class. +func (op OpCode) SetMode(mode Mode) OpCode { + if !op.Class().isLoadOrStore() || !valid(OpCode(mode), modeMask) { + return InvalidOpCode + } + return (op & ^modeMask) | OpCode(mode) +} + +// SetSize sets the size on load and store operations. +// +// Returns InvalidOpCode if op is of the wrong class. +func (op OpCode) SetSize(size Size) OpCode { + if !op.Class().isLoadOrStore() || !valid(OpCode(size), sizeMask) { + return InvalidOpCode + } + return (op & ^sizeMask) | OpCode(size) +} + +func (op OpCode) SetAtomicOp(atomic AtomicOp) OpCode { + if op.Class() != StXClass || op.Mode() != AtomicMode || !valid(OpCode(atomic), atomicMask) { + return InvalidOpCode + } + return (op & ^atomicMask) | OpCode(atomic) +} + +// SetSource sets the source on jump and ALU operations. +// +// Returns InvalidOpCode if op is of the wrong class. +func (op OpCode) SetSource(source Source) OpCode { + if !op.Class().isJumpOrALU() || !valid(OpCode(source), sourceMask) { + return InvalidOpCode + } + return (op & ^sourceMask) | OpCode(source) +} + +// SetALUOp sets the ALUOp on ALU operations. +// +// Returns InvalidOpCode if op is of the wrong class. +func (op OpCode) SetALUOp(alu ALUOp) OpCode { + if !op.Class().IsALU() || !valid(OpCode(alu), aluMask) { + return InvalidOpCode + } + return (op & ^aluMask) | OpCode(alu) +} + +// SetJumpOp sets the JumpOp on jump operations. +// +// Returns InvalidOpCode if op is of the wrong class. +func (op OpCode) SetJumpOp(jump JumpOp) OpCode { + if !op.Class().IsJump() || !valid(OpCode(jump), jumpMask) { + return InvalidOpCode + } + + newOp := (op & ^jumpMask) | OpCode(jump) + + // Check newOp is legal. + if newOp.JumpOp() == InvalidJumpOp { + return InvalidOpCode + } + + return newOp +} + +func (op OpCode) String() string { + var f strings.Builder + + switch class := op.Class(); { + case class.isLoadOrStore(): + f.WriteString(strings.TrimSuffix(class.String(), "Class")) + + mode := op.Mode() + f.WriteString(strings.TrimSuffix(mode.String(), "Mode")) + + if atomic := op.AtomicOp(); atomic != InvalidAtomic { + f.WriteString(strings.TrimSuffix(atomic.String(), "Atomic")) + } + + switch op.Size() { + case DWord: + f.WriteString("DW") + case Word: + f.WriteString("W") + case Half: + f.WriteString("H") + case Byte: + f.WriteString("B") + } + + case class.IsALU(): + if op.ALUOp() == Swap && op.Class() == ALU64Class { + // B to make BSwap, uncontitional byte swap + f.WriteString("B") + } + + f.WriteString(op.ALUOp().String()) + + if op.ALUOp() == Swap { + if op.Class() == ALUClass { + // Width for Endian is controlled by Constant + f.WriteString(op.Endianness().String()) + } + } else { + f.WriteString(strings.TrimSuffix(op.Source().String(), "Source")) + + if class == ALUClass { + f.WriteString("32") + } + } + + case class.IsJump(): + f.WriteString(op.JumpOp().String()) + + if class == Jump32Class { + f.WriteString("32") + } + + if jop := op.JumpOp(); jop != Exit && jop != Call && jop != Ja { + f.WriteString(strings.TrimSuffix(op.Source().String(), "Source")) + } + + default: + fmt.Fprintf(&f, "OpCode(%#x)", uint8(op)) + } + + return f.String() +} + +// valid returns true if all bits in value are covered by mask. +func valid(value, mask OpCode) bool { + return value & ^mask == 0 +} diff --git a/vendor/github.com/cilium/ebpf/asm/opcode_string.go b/vendor/github.com/cilium/ebpf/asm/opcode_string.go new file mode 100644 index 000000000..07825e0dd --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/opcode_string.go @@ -0,0 +1,31 @@ +// Code generated by "stringer -output opcode_string.go -type=Class"; DO NOT EDIT. + +package asm + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[LdClass-0] + _ = x[LdXClass-1] + _ = x[StClass-2] + _ = x[StXClass-3] + _ = x[ALUClass-4] + _ = x[JumpClass-5] + _ = x[Jump32Class-6] + _ = x[ALU64Class-7] +} + +const _Class_name = "LdClassLdXClassStClassStXClassALUClassJumpClassJump32ClassALU64Class" + +var _Class_index = [...]uint8{0, 7, 15, 22, 30, 38, 47, 58, 68} + +func (i Class) String() string { + idx := int(i) - 0 + if i < 0 || idx >= len(_Class_index)-1 { + return "Class(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Class_name[_Class_index[idx]:_Class_index[idx+1]] +} diff --git a/vendor/github.com/cilium/ebpf/asm/register.go b/vendor/github.com/cilium/ebpf/asm/register.go new file mode 100644 index 000000000..457a3b8a8 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/register.go @@ -0,0 +1,51 @@ +package asm + +import ( + "fmt" +) + +// Register is the source or destination of most operations. +type Register uint8 + +// R0 contains return values. +const R0 Register = 0 + +// Registers for function arguments. +const ( + R1 Register = R0 + 1 + iota + R2 + R3 + R4 + R5 +) + +// Callee saved registers preserved by function calls. +const ( + R6 Register = R5 + 1 + iota + R7 + R8 + R9 +) + +// Read-only frame pointer to access stack. +const ( + R10 Register = R9 + 1 + RFP = R10 +) + +// Pseudo registers used by 64bit loads and jumps +const ( + PseudoMapFD = R1 // BPF_PSEUDO_MAP_FD + PseudoMapValue = R2 // BPF_PSEUDO_MAP_VALUE + PseudoCall = R1 // BPF_PSEUDO_CALL + PseudoFunc = R4 // BPF_PSEUDO_FUNC + PseudoKfuncCall = R2 // BPF_PSEUDO_KFUNC_CALL +) + +func (r Register) String() string { + v := uint8(r) + if v == 10 { + return "rfp" + } + return fmt.Sprintf("r%d", v) +} diff --git a/vendor/github.com/cilium/ebpf/attachtype_string.go b/vendor/github.com/cilium/ebpf/attachtype_string.go new file mode 100644 index 000000000..efed516b6 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/attachtype_string.go @@ -0,0 +1,100 @@ +// Code generated by "stringer -type AttachType -trimprefix Attach"; DO NOT EDIT. + +package ebpf + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[AttachNone-0] + _ = x[AttachCGroupInetIngress-0] + _ = x[AttachCGroupInetEgress-1] + _ = x[AttachCGroupInetSockCreate-2] + _ = x[AttachCGroupSockOps-3] + _ = x[AttachSkSKBStreamParser-4] + _ = x[AttachSkSKBStreamVerdict-5] + _ = x[AttachCGroupDevice-6] + _ = x[AttachSkMsgVerdict-7] + _ = x[AttachCGroupInet4Bind-8] + _ = x[AttachCGroupInet6Bind-9] + _ = x[AttachCGroupInet4Connect-10] + _ = x[AttachCGroupInet6Connect-11] + _ = x[AttachCGroupInet4PostBind-12] + _ = x[AttachCGroupInet6PostBind-13] + _ = x[AttachCGroupUDP4Sendmsg-14] + _ = x[AttachCGroupUDP6Sendmsg-15] + _ = x[AttachLircMode2-16] + _ = x[AttachFlowDissector-17] + _ = x[AttachCGroupSysctl-18] + _ = x[AttachCGroupUDP4Recvmsg-19] + _ = x[AttachCGroupUDP6Recvmsg-20] + _ = x[AttachCGroupGetsockopt-21] + _ = x[AttachCGroupSetsockopt-22] + _ = x[AttachTraceRawTp-23] + _ = x[AttachTraceFEntry-24] + _ = x[AttachTraceFExit-25] + _ = x[AttachModifyReturn-26] + _ = x[AttachLSMMac-27] + _ = x[AttachTraceIter-28] + _ = x[AttachCgroupInet4GetPeername-29] + _ = x[AttachCgroupInet6GetPeername-30] + _ = x[AttachCgroupInet4GetSockname-31] + _ = x[AttachCgroupInet6GetSockname-32] + _ = x[AttachXDPDevMap-33] + _ = x[AttachCgroupInetSockRelease-34] + _ = x[AttachXDPCPUMap-35] + _ = x[AttachSkLookup-36] + _ = x[AttachXDP-37] + _ = x[AttachSkSKBVerdict-38] + _ = x[AttachSkReuseportSelect-39] + _ = x[AttachSkReuseportSelectOrMigrate-40] + _ = x[AttachPerfEvent-41] + _ = x[AttachTraceKprobeMulti-42] + _ = x[AttachTraceKprobeSession-56] + _ = x[AttachLSMCgroup-43] + _ = x[AttachStructOps-44] + _ = x[AttachNetfilter-45] + _ = x[AttachTCXIngress-46] + _ = x[AttachTCXEgress-47] + _ = x[AttachTraceUprobeMulti-48] + _ = x[AttachCgroupUnixConnect-49] + _ = x[AttachCgroupUnixSendmsg-50] + _ = x[AttachCgroupUnixRecvmsg-51] + _ = x[AttachCgroupUnixGetpeername-52] + _ = x[AttachCgroupUnixGetsockname-53] + _ = x[AttachNetkitPrimary-54] + _ = x[AttachNetkitPeer-55] + _ = x[AttachWindowsXDP-268435457] + _ = x[AttachWindowsBind-268435458] + _ = x[AttachWindowsCGroupInet4Connect-268435459] + _ = x[AttachWindowsCGroupInet6Connect-268435460] + _ = x[AttachWindowsCgroupInet4RecvAccept-268435461] + _ = x[AttachWindowsCgroupInet6RecvAccept-268435462] + _ = x[AttachWindowsCGroupSockOps-268435463] + _ = x[AttachWindowsSample-268435464] + _ = x[AttachWindowsXDPTest-268435465] +} + +const ( + _AttachType_name_0 = "NoneCGroupInetEgressCGroupInetSockCreateCGroupSockOpsSkSKBStreamParserSkSKBStreamVerdictCGroupDeviceSkMsgVerdictCGroupInet4BindCGroupInet6BindCGroupInet4ConnectCGroupInet6ConnectCGroupInet4PostBindCGroupInet6PostBindCGroupUDP4SendmsgCGroupUDP6SendmsgLircMode2FlowDissectorCGroupSysctlCGroupUDP4RecvmsgCGroupUDP6RecvmsgCGroupGetsockoptCGroupSetsockoptTraceRawTpTraceFEntryTraceFExitModifyReturnLSMMacTraceIterCgroupInet4GetPeernameCgroupInet6GetPeernameCgroupInet4GetSocknameCgroupInet6GetSocknameXDPDevMapCgroupInetSockReleaseXDPCPUMapSkLookupXDPSkSKBVerdictSkReuseportSelectSkReuseportSelectOrMigratePerfEventTraceKprobeMultiLSMCgroupStructOpsNetfilterTCXIngressTCXEgressTraceUprobeMultiCgroupUnixConnectCgroupUnixSendmsgCgroupUnixRecvmsgCgroupUnixGetpeernameCgroupUnixGetsocknameNetkitPrimaryNetkitPeerTraceKprobeSession" + _AttachType_name_1 = "WindowsXDPWindowsBindWindowsCGroupInet4ConnectWindowsCGroupInet6ConnectWindowsCgroupInet4RecvAcceptWindowsCgroupInet6RecvAcceptWindowsCGroupSockOpsWindowsSampleWindowsXDPTest" +) + +var ( + _AttachType_index_0 = [...]uint16{0, 4, 20, 40, 53, 70, 88, 100, 112, 127, 142, 160, 178, 197, 216, 233, 250, 259, 272, 284, 301, 318, 334, 350, 360, 371, 381, 393, 399, 408, 430, 452, 474, 496, 505, 526, 535, 543, 546, 558, 575, 601, 610, 626, 635, 644, 653, 663, 672, 688, 705, 722, 739, 760, 781, 794, 804, 822} + _AttachType_index_1 = [...]uint8{0, 10, 21, 46, 71, 99, 127, 147, 160, 174} +) + +func (i AttachType) String() string { + switch { + case i <= 56: + return _AttachType_name_0[_AttachType_index_0[i]:_AttachType_index_0[i+1]] + case 268435457 <= i && i <= 268435465: + i -= 268435457 + return _AttachType_name_1[_AttachType_index_1[i]:_AttachType_index_1[i+1]] + default: + return "AttachType(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/github.com/cilium/ebpf/btf/btf.go b/vendor/github.com/cilium/ebpf/btf/btf.go new file mode 100644 index 000000000..41e1f8a6f --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/btf.go @@ -0,0 +1,550 @@ +package btf + +import ( + "debug/elf" + "errors" + "fmt" + "io" + "iter" + "maps" + "math" + "os" + "reflect" + "slices" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" +) + +const btfMagic = 0xeB9F + +// Errors returned by BTF functions. +var ( + ErrNotSupported = internal.ErrNotSupported + ErrNotFound = errors.New("not found") + ErrNoExtendedInfo = errors.New("no extended info") + ErrMultipleMatches = errors.New("multiple matching types") +) + +// ID represents the unique ID of a BTF object. +type ID = sys.BTFID + +type elfData struct { + sectionSizes map[string]uint32 + symbolOffsets map[elfSymbol]uint32 + fixups map[Type]bool +} + +type elfSymbol struct { + section string + name string +} + +// Spec allows querying a set of Types and loading the set into the +// kernel. +type Spec struct { + *decoder + + // Additional data from ELF, may be nil. + elf *elfData +} + +// LoadSpec opens file and calls LoadSpecFromReader on it. +func LoadSpec(file string) (*Spec, error) { + fh, err := os.Open(file) + if err != nil { + return nil, err + } + defer fh.Close() + + return LoadSpecFromReader(fh) +} + +// LoadSpecFromReader reads from an ELF or a raw BTF blob. +// +// Returns ErrNotFound if reading from an ELF which contains no BTF. ExtInfos +// may be nil. +func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) { + file, err := internal.NewSafeELFFile(rd) + if err != nil { + raw, err := io.ReadAll(io.NewSectionReader(rd, 0, math.MaxInt64)) + if err != nil { + return nil, fmt.Errorf("read raw BTF: %w", err) + } + + return loadRawSpec(raw, nil) + } + + return loadSpecFromELF(file) +} + +// LoadSpecAndExtInfosFromReader reads from an ELF. +// +// ExtInfos may be nil if the ELF doesn't contain section metadata. +// Returns ErrNotFound if the ELF contains no BTF. +func LoadSpecAndExtInfosFromReader(rd io.ReaderAt) (*Spec, *ExtInfos, error) { + file, err := internal.NewSafeELFFile(rd) + if err != nil { + return nil, nil, err + } + + spec, err := loadSpecFromELF(file) + if err != nil { + return nil, nil, err + } + + extInfos, err := loadExtInfosFromELF(file, spec) + if err != nil && !errors.Is(err, ErrNotFound) { + return nil, nil, err + } + + return spec, extInfos, nil +} + +// symbolOffsets extracts all symbols offsets from an ELF and indexes them by +// section and variable name. +// +// References to variables in BTF data sections carry unsigned 32-bit offsets. +// Some ELF symbols (e.g. in vmlinux) may point to virtual memory that is well +// beyond this range. Since these symbols cannot be described by BTF info, +// ignore them here. +func symbolOffsets(file *internal.SafeELFFile) (map[elfSymbol]uint32, error) { + symbols, err := file.Symbols() + if err != nil { + return nil, fmt.Errorf("can't read symbols: %v", err) + } + + offsets := make(map[elfSymbol]uint32) + for _, sym := range symbols { + if idx := sym.Section; idx >= elf.SHN_LORESERVE && idx <= elf.SHN_HIRESERVE { + // Ignore things like SHN_ABS + continue + } + + if sym.Value > math.MaxUint32 { + // VarSecinfo offset is u32, cannot reference symbols in higher regions. + continue + } + + if int(sym.Section) >= len(file.Sections) { + return nil, fmt.Errorf("symbol %s: invalid section %d", sym.Name, sym.Section) + } + + secName := file.Sections[sym.Section].Name + offsets[elfSymbol{secName, sym.Name}] = uint32(sym.Value) + } + + return offsets, nil +} + +func loadSpecFromELF(file *internal.SafeELFFile) (*Spec, error) { + var ( + btfSection *elf.Section + sectionSizes = make(map[string]uint32) + ) + + for _, sec := range file.Sections { + switch sec.Name { + case ".BTF": + btfSection = sec + default: + if sec.Type != elf.SHT_PROGBITS && sec.Type != elf.SHT_NOBITS { + break + } + + if sec.Size > math.MaxUint32 { + return nil, fmt.Errorf("section %s exceeds maximum size", sec.Name) + } + + sectionSizes[sec.Name] = uint32(sec.Size) + } + } + + if btfSection == nil { + return nil, fmt.Errorf("btf: %w", ErrNotFound) + } + + offsets, err := symbolOffsets(file) + if err != nil { + return nil, err + } + + rawBTF, err := btfSection.Data() + if err != nil { + return nil, fmt.Errorf("reading .BTF section: %w", err) + } + + spec, err := loadRawSpec(rawBTF, nil) + if err != nil { + return nil, err + } + + if spec.decoder.byteOrder != file.ByteOrder { + return nil, fmt.Errorf("BTF byte order %s does not match ELF byte order %s", spec.decoder.byteOrder, file.ByteOrder) + } + + spec.elf = &elfData{ + sectionSizes, + offsets, + make(map[Type]bool), + } + + return spec, nil +} + +func loadRawSpec(btf []byte, base *Spec) (*Spec, error) { + var ( + baseDecoder *decoder + baseStrings *stringTable + err error + ) + + if base != nil { + baseDecoder = base.decoder + baseStrings = base.strings + } + + header, bo, err := parseBTFHeader(btf) + if err != nil { + return nil, fmt.Errorf("parsing .BTF header: %v", err) + } + + if header.HdrLen > uint32(len(btf)) { + return nil, fmt.Errorf("BTF header length is out of bounds") + } + btf = btf[header.HdrLen:] + + if int(header.StringOff+header.StringLen) > len(btf) { + return nil, fmt.Errorf("string table is out of bounds") + } + stringsSection := btf[header.StringOff : header.StringOff+header.StringLen] + + rawStrings, err := newStringTable(stringsSection, baseStrings) + if err != nil { + return nil, fmt.Errorf("read string section: %w", err) + } + + if int(header.TypeOff+header.TypeLen) > len(btf) { + return nil, fmt.Errorf("types section is out of bounds") + } + typesSection := btf[header.TypeOff : header.TypeOff+header.TypeLen] + + decoder, err := newDecoder(typesSection, bo, rawStrings, baseDecoder) + if err != nil { + return nil, err + } + + return &Spec{decoder, nil}, nil +} + +// fixupDatasec attempts to patch up missing info in Datasecs and its members by +// supplementing them with information from the ELF headers and symbol table. +func (elf *elfData) fixupDatasec(typ Type) error { + if elf == nil { + return nil + } + + if ds, ok := typ.(*Datasec); ok { + if elf.fixups[ds] { + return nil + } + elf.fixups[ds] = true + + name := ds.Name + + // Some Datasecs are virtual and don't have corresponding ELF sections. + switch name { + case ".ksyms": + // .ksyms describes forward declarations of kfunc signatures, as well as + // references to kernel symbols. + // Nothing to fix up, all sizes and offsets are 0. + for _, vsi := range ds.Vars { + switch t := vsi.Type.(type) { + case *Func: + continue + case *Var: + if _, ok := t.Type.(*Void); !ok { + return fmt.Errorf("data section %s: expected %s to be *Void, not %T: %w", name, vsi.Type.TypeName(), vsi.Type, ErrNotSupported) + } + default: + return fmt.Errorf("data section %s: expected to be either *btf.Func or *btf.Var, not %T: %w", name, vsi.Type, ErrNotSupported) + } + } + + return nil + case ".kconfig": + // .kconfig has a size of 0 and has all members' offsets set to 0. + // Fix up all offsets and set the Datasec's size. + if err := fixupDatasecLayout(ds); err != nil { + return err + } + + // Fix up extern to global linkage to avoid a BTF verifier error. + for _, vsi := range ds.Vars { + vsi.Type.(*Var).Linkage = GlobalVar + } + + return nil + } + + if ds.Size != 0 { + return nil + } + + ds.Size, ok = elf.sectionSizes[name] + if !ok { + return fmt.Errorf("data section %s: missing size", name) + } + + for i := range ds.Vars { + symName := ds.Vars[i].Type.TypeName() + ds.Vars[i].Offset, ok = elf.symbolOffsets[elfSymbol{name, symName}] + if !ok { + return fmt.Errorf("data section %s: missing offset for symbol %s", name, symName) + } + } + } + + return nil +} + +// fixupDatasecLayout populates ds.Vars[].Offset according to var sizes and +// alignment. Calculate and set ds.Size. +func fixupDatasecLayout(ds *Datasec) error { + var off uint32 + + for i, vsi := range ds.Vars { + v, ok := vsi.Type.(*Var) + if !ok { + return fmt.Errorf("member %d: unsupported type %T", i, vsi.Type) + } + + size, err := Sizeof(v.Type) + if err != nil { + return fmt.Errorf("variable %s: getting size: %w", v.Name, err) + } + align, err := alignof(v.Type) + if err != nil { + return fmt.Errorf("variable %s: getting alignment: %w", v.Name, err) + } + + // Align the current member based on the offset of the end of the previous + // member and the alignment of the current member. + off = internal.Align(off, uint32(align)) + + ds.Vars[i].Offset = off + + off += uint32(size) + } + + ds.Size = off + + return nil +} + +// Copy a Spec. +// +// All contained types are duplicated while preserving any modifications made +// to them. +func (s *Spec) Copy() *Spec { + if s == nil { + return nil + } + + cpy := &Spec{ + s.decoder.Copy(), + nil, + } + + if s.elf != nil { + cpy.elf = &elfData{ + s.elf.sectionSizes, + s.elf.symbolOffsets, + maps.Clone(s.elf.fixups), + } + } + + return cpy +} + +// TypeByID returns the BTF Type with the given type ID. +// +// Returns an error wrapping ErrNotFound if a Type with the given ID +// does not exist in the Spec. +func (s *Spec) TypeByID(id TypeID) (Type, error) { + typ, err := s.decoder.TypeByID(id) + if err != nil { + return nil, fmt.Errorf("inflate type: %w", err) + } + + if err := s.elf.fixupDatasec(typ); err != nil { + return nil, err + } + + return typ, nil +} + +// TypeID returns the ID for a given Type. +// +// Returns an error wrapping [ErrNotFound] if the type isn't part of the Spec. +func (s *Spec) TypeID(typ Type) (TypeID, error) { + return s.decoder.TypeID(typ) +} + +// AnyTypesByName returns a list of BTF Types with the given name. +// +// If the BTF blob describes multiple compilation units like vmlinux, multiple +// Types with the same name and kind can exist, but might not describe the same +// data structure. +// +// Returns an error wrapping ErrNotFound if no matching Type exists in the Spec. +func (s *Spec) AnyTypesByName(name string) ([]Type, error) { + types, err := s.TypesByName(newEssentialName(name)) + if err != nil { + return nil, err + } + + for i := 0; i < len(types); i++ { + // Match against the full name, not just the essential one + // in case the type being looked up is a struct flavor. + if types[i].TypeName() != name { + types = slices.Delete(types, i, i+1) + continue + } + + if err := s.elf.fixupDatasec(types[i]); err != nil { + return nil, err + } + } + + return types, nil +} + +// AnyTypeByName returns a Type with the given name. +// +// Returns an error if multiple types of that name exist. +func (s *Spec) AnyTypeByName(name string) (Type, error) { + types, err := s.AnyTypesByName(name) + if err != nil { + return nil, err + } + + if len(types) > 1 { + return nil, fmt.Errorf("found multiple types: %v", types) + } + + return types[0], nil +} + +// TypeByName searches for a Type with a specific name. Since multiple Types +// with the same name can exist, the parameter typ is taken to narrow down the +// search in case of a clash. +// +// typ must be a non-nil pointer to an implementation of a Type. On success, the +// address of the found Type will be copied to typ. +// +// Returns an error wrapping ErrNotFound if no matching Type exists in the Spec. +// Returns an error wrapping ErrMultipleTypes if multiple candidates are found. +func (s *Spec) TypeByName(name string, typ interface{}) error { + typeInterface := reflect.TypeOf((*Type)(nil)).Elem() + + // typ may be **T or *Type + typValue := reflect.ValueOf(typ) + if typValue.Kind() != reflect.Ptr { + return fmt.Errorf("%T is not a pointer", typ) + } + + typPtr := typValue.Elem() + if !typPtr.CanSet() { + return fmt.Errorf("%T cannot be set", typ) + } + + wanted := typPtr.Type() + if wanted == typeInterface { + // This is *Type. Unwrap the value's type. + wanted = typPtr.Elem().Type() + } + + if !wanted.AssignableTo(typeInterface) { + return fmt.Errorf("%T does not satisfy Type interface", typ) + } + + types, err := s.AnyTypesByName(name) + if err != nil { + return err + } + + var candidate Type + for _, typ := range types { + if reflect.TypeOf(typ) != wanted { + continue + } + + if candidate != nil { + return fmt.Errorf("type %s(%T): %w", name, typ, ErrMultipleMatches) + } + + candidate = typ + } + + if candidate == nil { + return fmt.Errorf("%s %s: %w", wanted, name, ErrNotFound) + } + + typPtr.Set(reflect.ValueOf(candidate)) + + return nil +} + +// LoadSplitSpec loads split BTF from the given file. +// +// Types from base are used to resolve references in the split BTF. +// The returned Spec only contains types from the split BTF, not from the base. +func LoadSplitSpec(file string, base *Spec) (*Spec, error) { + fh, err := os.Open(file) + if err != nil { + return nil, err + } + defer fh.Close() + + return LoadSplitSpecFromReader(fh, base) +} + +// LoadSplitSpecFromReader loads split BTF from a reader. +// +// Types from base are used to resolve references in the split BTF. +// The returned Spec only contains types from the split BTF, not from the base. +func LoadSplitSpecFromReader(r io.ReaderAt, base *Spec) (*Spec, error) { + raw, err := io.ReadAll(io.NewSectionReader(r, 0, math.MaxInt64)) + if err != nil { + return nil, fmt.Errorf("read raw BTF: %w", err) + } + + return loadRawSpec(raw, base) +} + +// All iterates over all types. +func (s *Spec) All() iter.Seq2[Type, error] { + return func(yield func(Type, error) bool) { + for id := s.firstTypeID; ; id++ { + typ, err := s.TypeByID(id) + if errors.Is(err, ErrNotFound) { + return + } else if err != nil { + yield(nil, err) + return + } + + // Skip declTags, during unmarshaling declTags become `Tags` fields of other types. + // We keep them in the spec to avoid holes in the ID space, but for the purposes of + // iteration, they are not useful to the user. + if _, ok := typ.(*declTag); ok { + continue + } + + if !yield(typ, nil) { + return + } + } + } +} diff --git a/vendor/github.com/cilium/ebpf/btf/btf_types.go b/vendor/github.com/cilium/ebpf/btf/btf_types.go new file mode 100644 index 000000000..c957f5970 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/btf_types.go @@ -0,0 +1,512 @@ +package btf + +import ( + "encoding/binary" + "errors" + "fmt" + "unsafe" +) + +//go:generate go tool stringer -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage,btfKind + +// btfKind describes a Type. +type btfKind uint8 + +// Equivalents of the BTF_KIND_* constants. +const ( + kindUnknown btfKind = iota // Unknown + kindInt // Int + kindPointer // Pointer + kindArray // Array + kindStruct // Struct + kindUnion // Union + kindEnum // Enum + kindForward // Forward + kindTypedef // Typedef + kindVolatile // Volatile + kindConst // Const + kindRestrict // Restrict + // Added ~4.20 + kindFunc // Func + kindFuncProto // FuncProto + // Added ~5.1 + kindVar // Var + kindDatasec // Datasec + // Added ~5.13 + kindFloat // Float + // Added 5.16 + kindDeclTag // DeclTag + // Added 5.17 + kindTypeTag // TypeTag + // Added 6.0 + kindEnum64 // Enum64 +) + +// FuncLinkage describes BTF function linkage metadata. +type FuncLinkage int + +// Equivalent of enum btf_func_linkage. +const ( + StaticFunc FuncLinkage = iota // static + GlobalFunc // global + ExternFunc // extern +) + +// VarLinkage describes BTF variable linkage metadata. +type VarLinkage int + +const ( + StaticVar VarLinkage = iota // static + GlobalVar // global + ExternVar // extern +) + +const ( + btfTypeKindShift = 24 + btfTypeKindLen = 5 + btfTypeVlenShift = 0 + btfTypeVlenMask = 16 + btfTypeKindFlagShift = 31 + btfTypeKindFlagMask = 1 +) + +var btfHeaderLen = binary.Size(&btfHeader{}) + +type btfHeader struct { + Magic uint16 + Version uint8 + Flags uint8 + HdrLen uint32 + + TypeOff uint32 + TypeLen uint32 + StringOff uint32 + StringLen uint32 +} + +// parseBTFHeader parses the header of the .BTF section. +func parseBTFHeader(buf []byte) (*btfHeader, binary.ByteOrder, error) { + var header btfHeader + var bo binary.ByteOrder + for _, order := range []binary.ByteOrder{binary.LittleEndian, binary.BigEndian} { + n, err := binary.Decode(buf, order, &header) + if err != nil { + return nil, nil, fmt.Errorf("read header: %v", err) + } + + if header.Magic != btfMagic { + continue + } + + buf = buf[n:] + bo = order + break + } + + if bo == nil { + return nil, nil, fmt.Errorf("no valid BTF header") + } + + if header.Version != 1 { + return nil, nil, fmt.Errorf("unexpected version %v", header.Version) + } + + if header.Flags != 0 { + return nil, nil, fmt.Errorf("unsupported flags %v", header.Flags) + } + + remainder := int64(header.HdrLen) - int64(binary.Size(&header)) + if remainder < 0 { + return nil, nil, errors.New("header length shorter than btfHeader size") + } + + for _, b := range buf[:remainder] { + if b != 0 { + return nil, nil, errors.New("header contains non-zero trailer") + } + } + + return &header, bo, nil +} + +// btfType is equivalent to struct btf_type in Documentation/bpf/btf.rst. +type btfType struct { + NameOff uint32 + /* "info" bits arrangement + * bits 0-15: vlen (e.g. # of struct's members), linkage + * bits 16-23: unused + * bits 24-28: kind (e.g. int, ptr, array...etc) + * bits 29-30: unused + * bit 31: kind_flag, currently used by + * struct, union and fwd + */ + Info uint32 + /* "size" is used by INT, ENUM, STRUCT and UNION. + * "size" tells the size of the type it is describing. + * + * "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT, + * FUNC and FUNC_PROTO. + * "type" is a type_id referring to another type. + */ + SizeType uint32 +} + +var btfTypeSize = int(unsafe.Sizeof(btfType{})) + +func unmarshalBtfType(bt *btfType, b []byte, bo binary.ByteOrder) (int, error) { + if len(b) < btfTypeSize { + return 0, fmt.Errorf("not enough bytes to unmarshal btfType") + } + + bt.NameOff = bo.Uint32(b[0:]) + bt.Info = bo.Uint32(b[4:]) + bt.SizeType = bo.Uint32(b[8:]) + return btfTypeSize, nil +} + +func mask(len uint32) uint32 { + return (1 << len) - 1 +} + +func readBits(value, len, shift uint32) uint32 { + return (value >> shift) & mask(len) +} + +func writeBits(value, len, shift, new uint32) uint32 { + value &^= mask(len) << shift + value |= (new & mask(len)) << shift + return value +} + +func (bt *btfType) info(len, shift uint32) uint32 { + return readBits(bt.Info, len, shift) +} + +func (bt *btfType) setInfo(value, len, shift uint32) { + bt.Info = writeBits(bt.Info, len, shift, value) +} + +func (bt *btfType) Kind() btfKind { + return btfKind(bt.info(btfTypeKindLen, btfTypeKindShift)) +} + +func (bt *btfType) SetKind(kind btfKind) { + bt.setInfo(uint32(kind), btfTypeKindLen, btfTypeKindShift) +} + +func (bt *btfType) Vlen() int { + return int(bt.info(btfTypeVlenMask, btfTypeVlenShift)) +} + +func (bt *btfType) SetVlen(vlen int) { + bt.setInfo(uint32(vlen), btfTypeVlenMask, btfTypeVlenShift) +} + +func (bt *btfType) kindFlagBool() bool { + return bt.info(btfTypeKindFlagMask, btfTypeKindFlagShift) == 1 +} + +func (bt *btfType) setKindFlagBool(set bool) { + var value uint32 + if set { + value = 1 + } + bt.setInfo(value, btfTypeKindFlagMask, btfTypeKindFlagShift) +} + +// Bitfield returns true if the struct or union contain a bitfield. +func (bt *btfType) Bitfield() bool { + return bt.kindFlagBool() +} + +func (bt *btfType) SetBitfield(isBitfield bool) { + bt.setKindFlagBool(isBitfield) +} + +func (bt *btfType) FwdKind() FwdKind { + return FwdKind(bt.info(btfTypeKindFlagMask, btfTypeKindFlagShift)) +} + +func (bt *btfType) SetFwdKind(kind FwdKind) { + bt.setInfo(uint32(kind), btfTypeKindFlagMask, btfTypeKindFlagShift) +} + +func (bt *btfType) Signed() bool { + return bt.kindFlagBool() +} + +func (bt *btfType) SetSigned(signed bool) { + bt.setKindFlagBool(signed) +} + +func (bt *btfType) Linkage() FuncLinkage { + return FuncLinkage(bt.info(btfTypeVlenMask, btfTypeVlenShift)) +} + +func (bt *btfType) SetLinkage(linkage FuncLinkage) { + bt.setInfo(uint32(linkage), btfTypeVlenMask, btfTypeVlenShift) +} + +func (bt *btfType) Type() TypeID { + // TODO: Panic here if wrong kind? + return TypeID(bt.SizeType) +} + +func (bt *btfType) SetType(id TypeID) { + bt.SizeType = uint32(id) +} + +func (bt *btfType) Size() uint32 { + // TODO: Panic here if wrong kind? + return bt.SizeType +} + +func (bt *btfType) SetSize(size uint32) { + bt.SizeType = size +} + +func (bt *btfType) Encode(buf []byte, bo binary.ByteOrder) (int, error) { + if len(buf) < btfTypeSize { + return 0, fmt.Errorf("not enough bytes to marshal btfType") + } + bo.PutUint32(buf[0:], bt.NameOff) + bo.PutUint32(buf[4:], bt.Info) + bo.PutUint32(buf[8:], bt.SizeType) + return btfTypeSize, nil +} + +// DataLen returns the length of additional type specific data in bytes. +func (bt *btfType) DataLen() (int, error) { + switch bt.Kind() { + case kindInt: + return int(unsafe.Sizeof(btfInt{})), nil + case kindPointer: + case kindArray: + return int(unsafe.Sizeof(btfArray{})), nil + case kindStruct: + fallthrough + case kindUnion: + return int(unsafe.Sizeof(btfMember{})) * bt.Vlen(), nil + case kindEnum: + return int(unsafe.Sizeof(btfEnum{})) * bt.Vlen(), nil + case kindForward: + case kindTypedef: + case kindVolatile: + case kindConst: + case kindRestrict: + case kindFunc: + case kindFuncProto: + return int(unsafe.Sizeof(btfParam{})) * bt.Vlen(), nil + case kindVar: + return int(unsafe.Sizeof(btfVariable{})), nil + case kindDatasec: + return int(unsafe.Sizeof(btfVarSecinfo{})) * bt.Vlen(), nil + case kindFloat: + case kindDeclTag: + return int(unsafe.Sizeof(btfDeclTag{})), nil + case kindTypeTag: + case kindEnum64: + return int(unsafe.Sizeof(btfEnum64{})) * bt.Vlen(), nil + default: + return 0, fmt.Errorf("unknown kind: %v", bt.Kind()) + } + + return 0, nil +} + +// btfInt encodes additional data for integers. +// +// ? ? ? ? e e e e o o o o o o o o ? ? ? ? ? ? ? ? b b b b b b b b +// ? = undefined +// e = encoding +// o = offset (bitfields?) +// b = bits (bitfields) +type btfInt struct { + Raw uint32 +} + +const ( + btfIntEncodingLen = 4 + btfIntEncodingShift = 24 + btfIntOffsetLen = 8 + btfIntOffsetShift = 16 + btfIntBitsLen = 8 + btfIntBitsShift = 0 +) + +var btfIntLen = int(unsafe.Sizeof(btfInt{})) + +func unmarshalBtfInt(bi *btfInt, b []byte, bo binary.ByteOrder) (int, error) { + if len(b) < btfIntLen { + return 0, fmt.Errorf("not enough bytes to unmarshal btfInt") + } + + bi.Raw = bo.Uint32(b[0:]) + return btfIntLen, nil +} + +func (bi btfInt) Encoding() IntEncoding { + return IntEncoding(readBits(bi.Raw, btfIntEncodingLen, btfIntEncodingShift)) +} + +func (bi *btfInt) SetEncoding(e IntEncoding) { + bi.Raw = writeBits(uint32(bi.Raw), btfIntEncodingLen, btfIntEncodingShift, uint32(e)) +} + +func (bi btfInt) Offset() Bits { + return Bits(readBits(bi.Raw, btfIntOffsetLen, btfIntOffsetShift)) +} + +func (bi *btfInt) SetOffset(offset uint32) { + bi.Raw = writeBits(bi.Raw, btfIntOffsetLen, btfIntOffsetShift, offset) +} + +func (bi btfInt) Bits() Bits { + return Bits(readBits(bi.Raw, btfIntBitsLen, btfIntBitsShift)) +} + +func (bi *btfInt) SetBits(bits byte) { + bi.Raw = writeBits(bi.Raw, btfIntBitsLen, btfIntBitsShift, uint32(bits)) +} + +type btfArray struct { + Type TypeID + IndexType TypeID + Nelems uint32 +} + +var btfArrayLen = int(unsafe.Sizeof(btfArray{})) + +func unmarshalBtfArray(ba *btfArray, b []byte, bo binary.ByteOrder) (int, error) { + if len(b) < btfArrayLen { + return 0, fmt.Errorf("not enough bytes to unmarshal btfArray") + } + + ba.Type = TypeID(bo.Uint32(b[0:])) + ba.IndexType = TypeID(bo.Uint32(b[4:])) + ba.Nelems = bo.Uint32(b[8:]) + return btfArrayLen, nil +} + +type btfMember struct { + NameOff uint32 + Type TypeID + Offset uint32 +} + +var btfMemberLen = int(unsafe.Sizeof(btfMember{})) + +func unmarshalBtfMember(bm *btfMember, b []byte, bo binary.ByteOrder) (int, error) { + if btfMemberLen > len(b) { + return 0, fmt.Errorf("not enough bytes to unmarshal btfMember") + } + + bm.NameOff = bo.Uint32(b[0:]) + bm.Type = TypeID(bo.Uint32(b[4:])) + bm.Offset = bo.Uint32(b[8:]) + return btfMemberLen, nil +} + +type btfVarSecinfo struct { + Type TypeID + Offset uint32 + Size uint32 +} + +var btfVarSecinfoLen = int(unsafe.Sizeof(btfVarSecinfo{})) + +func unmarshalBtfVarSecInfo(bvsi *btfVarSecinfo, b []byte, bo binary.ByteOrder) (int, error) { + if len(b) < btfVarSecinfoLen { + return 0, fmt.Errorf("not enough bytes to unmarshal btfVarSecinfo") + } + + bvsi.Type = TypeID(bo.Uint32(b[0:])) + bvsi.Offset = bo.Uint32(b[4:]) + bvsi.Size = bo.Uint32(b[8:]) + return btfVarSecinfoLen, nil +} + +type btfVariable struct { + Linkage uint32 +} + +var btfVariableLen = int(unsafe.Sizeof(btfVariable{})) + +func unmarshalBtfVariable(bv *btfVariable, b []byte, bo binary.ByteOrder) (int, error) { + if len(b) < btfVariableLen { + return 0, fmt.Errorf("not enough bytes to unmarshal btfVariable") + } + + bv.Linkage = bo.Uint32(b[0:]) + return btfVariableLen, nil +} + +type btfEnum struct { + NameOff uint32 + Val uint32 +} + +var btfEnumLen = int(unsafe.Sizeof(btfEnum{})) + +func unmarshalBtfEnum(be *btfEnum, b []byte, bo binary.ByteOrder) (int, error) { + if btfEnumLen > len(b) { + return 0, fmt.Errorf("not enough bytes to unmarshal btfEnum") + } + + be.NameOff = bo.Uint32(b[0:]) + be.Val = bo.Uint32(b[4:]) + return btfEnumLen, nil +} + +type btfEnum64 struct { + NameOff uint32 + ValLo32 uint32 + ValHi32 uint32 +} + +var btfEnum64Len = int(unsafe.Sizeof(btfEnum64{})) + +func unmarshalBtfEnum64(enum *btfEnum64, b []byte, bo binary.ByteOrder) (int, error) { + if len(b) < btfEnum64Len { + return 0, fmt.Errorf("not enough bytes to unmarshal btfEnum64") + } + + enum.NameOff = bo.Uint32(b[0:]) + enum.ValLo32 = bo.Uint32(b[4:]) + enum.ValHi32 = bo.Uint32(b[8:]) + + return btfEnum64Len, nil +} + +type btfParam struct { + NameOff uint32 + Type TypeID +} + +var btfParamLen = int(unsafe.Sizeof(btfParam{})) + +func unmarshalBtfParam(param *btfParam, b []byte, bo binary.ByteOrder) (int, error) { + if len(b) < btfParamLen { + return 0, fmt.Errorf("not enough bytes to unmarshal btfParam") + } + + param.NameOff = bo.Uint32(b[0:]) + param.Type = TypeID(bo.Uint32(b[4:])) + + return btfParamLen, nil +} + +type btfDeclTag struct { + ComponentIdx uint32 +} + +var btfDeclTagLen = int(unsafe.Sizeof(btfDeclTag{})) + +func unmarshalBtfDeclTag(bdt *btfDeclTag, b []byte, bo binary.ByteOrder) (int, error) { + if len(b) < btfDeclTagLen { + return 0, fmt.Errorf("not enough bytes to unmarshal btfDeclTag") + } + + bdt.ComponentIdx = bo.Uint32(b[0:]) + return btfDeclTagLen, nil +} diff --git a/vendor/github.com/cilium/ebpf/btf/btf_types_string.go b/vendor/github.com/cilium/ebpf/btf/btf_types_string.go new file mode 100644 index 000000000..a9d2d82b6 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/btf_types_string.go @@ -0,0 +1,83 @@ +// Code generated by "stringer -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage,btfKind"; DO NOT EDIT. + +package btf + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[StaticFunc-0] + _ = x[GlobalFunc-1] + _ = x[ExternFunc-2] +} + +const _FuncLinkage_name = "staticglobalextern" + +var _FuncLinkage_index = [...]uint8{0, 6, 12, 18} + +func (i FuncLinkage) String() string { + idx := int(i) - 0 + if i < 0 || idx >= len(_FuncLinkage_index)-1 { + return "FuncLinkage(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _FuncLinkage_name[_FuncLinkage_index[idx]:_FuncLinkage_index[idx+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[StaticVar-0] + _ = x[GlobalVar-1] + _ = x[ExternVar-2] +} + +const _VarLinkage_name = "staticglobalextern" + +var _VarLinkage_index = [...]uint8{0, 6, 12, 18} + +func (i VarLinkage) String() string { + idx := int(i) - 0 + if i < 0 || idx >= len(_VarLinkage_index)-1 { + return "VarLinkage(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _VarLinkage_name[_VarLinkage_index[idx]:_VarLinkage_index[idx+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[kindUnknown-0] + _ = x[kindInt-1] + _ = x[kindPointer-2] + _ = x[kindArray-3] + _ = x[kindStruct-4] + _ = x[kindUnion-5] + _ = x[kindEnum-6] + _ = x[kindForward-7] + _ = x[kindTypedef-8] + _ = x[kindVolatile-9] + _ = x[kindConst-10] + _ = x[kindRestrict-11] + _ = x[kindFunc-12] + _ = x[kindFuncProto-13] + _ = x[kindVar-14] + _ = x[kindDatasec-15] + _ = x[kindFloat-16] + _ = x[kindDeclTag-17] + _ = x[kindTypeTag-18] + _ = x[kindEnum64-19] +} + +const _btfKind_name = "UnknownIntPointerArrayStructUnionEnumForwardTypedefVolatileConstRestrictFuncFuncProtoVarDatasecFloatDeclTagTypeTagEnum64" + +var _btfKind_index = [...]uint8{0, 7, 10, 17, 22, 28, 33, 37, 44, 51, 59, 64, 72, 76, 85, 88, 95, 100, 107, 114, 120} + +func (i btfKind) String() string { + idx := int(i) - 0 + if i < 0 || idx >= len(_btfKind_index)-1 { + return "btfKind(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _btfKind_name[_btfKind_index[idx]:_btfKind_index[idx+1]] +} diff --git a/vendor/github.com/cilium/ebpf/btf/core.go b/vendor/github.com/cilium/ebpf/btf/core.go new file mode 100644 index 000000000..f128011dd --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/core.go @@ -0,0 +1,1264 @@ +package btf + +import ( + "encoding/binary" + "errors" + "fmt" + "math" + "reflect" + "strconv" + "strings" + + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/platform" +) + +// Code in this file is derived from libbpf, which is available under a BSD +// 2-Clause license. + +// A constant used when CO-RE relocation has to remove instructions. +// +// Taken from libbpf. +const COREBadRelocationSentinel = 0xbad2310 + +// COREFixup is the result of computing a CO-RE relocation for a target. +type COREFixup struct { + kind coreKind + local uint64 + target uint64 + // True if there is no valid fixup. The instruction is replaced with an + // invalid dummy. + poison bool + // True if the validation of the local value should be skipped. Used by + // some kinds of bitfield relocations. + skipLocalValidation bool +} + +func (f *COREFixup) equal(other COREFixup) bool { + return f.local == other.local && f.target == other.target +} + +func (f *COREFixup) String() string { + if f.poison { + return fmt.Sprintf("%s=poison", f.kind) + } + return fmt.Sprintf("%s=%d->%d", f.kind, f.local, f.target) +} + +func (f *COREFixup) Apply(ins *asm.Instruction) error { + if !platform.IsLinux { + return fmt.Errorf("CO-RE fixup: %w", internal.ErrNotSupportedOnOS) + } + + if f.poison { + // Relocation is poisoned, replace the instruction with an invalid one. + if ins.OpCode.IsDWordLoad() { + // Replace a dword load with a invalid dword load to preserve instruction size. + *ins = asm.LoadImm(asm.R10, COREBadRelocationSentinel, asm.DWord) + } else { + // Replace all single size instruction with a invalid call instruction. + *ins = asm.BuiltinFunc(COREBadRelocationSentinel).Call() + } + + // Add context to the kernel verifier output. + if source := ins.Source(); source != nil { + *ins = ins.WithSource(asm.Comment(fmt.Sprintf("instruction poisoned by CO-RE: %s", source))) + } else { + *ins = ins.WithSource(asm.Comment("instruction poisoned by CO-RE")) + } + + return nil + } + + switch class := ins.OpCode.Class(); class { + case asm.LdXClass, asm.StClass, asm.StXClass: + if want := int16(f.local); !f.skipLocalValidation && want != ins.Offset { + return fmt.Errorf("invalid offset %d, expected %d", ins.Offset, f.local) + } + + if f.target > math.MaxInt16 { + return fmt.Errorf("offset %d exceeds MaxInt16", f.target) + } + + ins.Offset = int16(f.target) + + case asm.LdClass: + if !ins.IsConstantLoad(asm.DWord) { + return fmt.Errorf("not a dword-sized immediate load") + } + + if want := int64(f.local); !f.skipLocalValidation && want != ins.Constant { + return fmt.Errorf("invalid immediate %d, expected %d (fixup: %v)", ins.Constant, want, f) + } + + ins.Constant = int64(f.target) + + case asm.ALUClass: + if ins.OpCode.ALUOp() == asm.Swap { + return fmt.Errorf("relocation against swap") + } + + fallthrough + + case asm.ALU64Class: + if src := ins.OpCode.Source(); src != asm.ImmSource { + return fmt.Errorf("invalid source %s", src) + } + + if want := int64(f.local); !f.skipLocalValidation && want != ins.Constant { + return fmt.Errorf("invalid immediate %d, expected %d (fixup: %v, kind: %v, ins: %v)", ins.Constant, want, f, f.kind, ins) + } + + if f.target > math.MaxInt32 { + return fmt.Errorf("immediate %d exceeds MaxInt32", f.target) + } + + ins.Constant = int64(f.target) + + default: + return fmt.Errorf("invalid class %s", class) + } + + return nil +} + +func (f COREFixup) isNonExistant() bool { + return f.kind.checksForExistence() && f.target == 0 +} + +// coreKind is the type of CO-RE relocation as specified in BPF source code. +type coreKind uint32 + +const ( + reloFieldByteOffset coreKind = iota /* field byte offset */ + reloFieldByteSize /* field size in bytes */ + reloFieldExists /* field existence in target kernel */ + reloFieldSigned /* field signedness (0 - unsigned, 1 - signed) */ + reloFieldLShiftU64 /* bitfield-specific left bitshift */ + reloFieldRShiftU64 /* bitfield-specific right bitshift */ + reloTypeIDLocal /* type ID in local BPF object */ + reloTypeIDTarget /* type ID in target kernel */ + reloTypeExists /* type existence in target kernel */ + reloTypeSize /* type size in bytes */ + reloEnumvalExists /* enum value existence in target kernel */ + reloEnumvalValue /* enum value integer value */ + reloTypeMatches /* type matches kernel type */ +) + +func (k coreKind) checksForExistence() bool { + return k == reloEnumvalExists || k == reloTypeExists || k == reloFieldExists || k == reloTypeMatches +} + +func (k coreKind) String() string { + switch k { + case reloFieldByteOffset: + return "byte_off" + case reloFieldByteSize: + return "byte_sz" + case reloFieldExists: + return "field_exists" + case reloFieldSigned: + return "signed" + case reloFieldLShiftU64: + return "lshift_u64" + case reloFieldRShiftU64: + return "rshift_u64" + case reloTypeIDLocal: + return "local_type_id" + case reloTypeIDTarget: + return "target_type_id" + case reloTypeExists: + return "type_exists" + case reloTypeSize: + return "type_size" + case reloEnumvalExists: + return "enumval_exists" + case reloEnumvalValue: + return "enumval_value" + case reloTypeMatches: + return "type_matches" + default: + return fmt.Sprintf("unknown (%d)", k) + } +} + +// CORERelocate calculates changes needed to adjust eBPF instructions for differences +// in types. +// +// targets forms the set of types to relocate against. The first element has to be +// BTF for vmlinux, the following must be types for kernel modules. +// +// resolveLocalTypeID is called for each local type which requires a stable TypeID. +// Calling the function with the same type multiple times must produce the same +// result. It is the callers responsibility to ensure that the relocated instructions +// are loaded with matching BTF. +// +// Returns a list of fixups which can be applied to instructions to make them +// match the target type(s). +// +// Fixups are returned in the order of relos, e.g. fixup[i] is the solution +// for relos[i]. +func CORERelocate(relos []*CORERelocation, targets []*Spec, bo binary.ByteOrder, resolveLocalTypeID func(Type) (TypeID, error)) ([]COREFixup, error) { + if len(targets) == 0 { + // Explicitly check for nil here since the argument used to be optional. + return nil, fmt.Errorf("targets must be provided") + } + + // We can't encode type IDs that aren't for vmlinux into instructions at the + // moment. + resolveTargetTypeID := targets[0].TypeID + + for _, target := range targets { + if bo != target.byteOrder { + return nil, fmt.Errorf("can't relocate %s against %s", bo, target.byteOrder) + } + } + + type reloGroup struct { + relos []*CORERelocation + // Position of each relocation in relos. + indices []int + } + + // Split relocations into per Type lists. + relosByType := make(map[Type]*reloGroup) + result := make([]COREFixup, len(relos)) + for i, relo := range relos { + if relo.kind == reloTypeIDLocal { + // Filtering out reloTypeIDLocal here makes our lives a lot easier + // down the line, since it doesn't have a target at all. + if len(relo.accessor) > 1 || relo.accessor[0] != 0 { + return nil, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor) + } + + id, err := resolveLocalTypeID(relo.typ) + if err != nil { + return nil, fmt.Errorf("%s: get type id: %w", relo.kind, err) + } + + result[i] = COREFixup{ + kind: relo.kind, + local: uint64(relo.id), + target: uint64(id), + } + continue + } + + group, ok := relosByType[relo.typ] + if !ok { + group = &reloGroup{} + relosByType[relo.typ] = group + } + group.relos = append(group.relos, relo) + group.indices = append(group.indices, i) + } + + for localType, group := range relosByType { + localTypeName := localType.TypeName() + if localTypeName == "" { + return nil, fmt.Errorf("relocate unnamed or anonymous type %s: %w", localType, ErrNotSupported) + } + + essentialName := newEssentialName(localTypeName) + + var targetTypes []Type + for _, target := range targets { + namedTypes, err := target.TypesByName(essentialName) + if errors.Is(err, ErrNotFound) { + continue + } else if err != nil { + return nil, err + } + + targetTypes = append(targetTypes, namedTypes...) + } + + fixups, err := coreCalculateFixups(group.relos, targetTypes, bo, resolveTargetTypeID) + if err != nil { + return nil, fmt.Errorf("relocate %s: %w", localType, err) + } + + for j, index := range group.indices { + result[index] = fixups[j] + } + } + + return result, nil +} + +var errAmbiguousRelocation = errors.New("ambiguous relocation") +var errImpossibleRelocation = errors.New("impossible relocation") +var errIncompatibleTypes = errors.New("incompatible types") + +// coreCalculateFixups finds the target type that best matches all relocations. +// +// All relos must target the same type. +// +// The best target is determined by scoring: the less poisoning we have to do +// the better the target is. +func coreCalculateFixups(relos []*CORERelocation, targets []Type, bo binary.ByteOrder, resolveTargetTypeID func(Type) (TypeID, error)) ([]COREFixup, error) { + bestScore := len(relos) + var bestFixups []COREFixup + for _, target := range targets { + score := 0 // lower is better + fixups := make([]COREFixup, 0, len(relos)) + for _, relo := range relos { + fixup, err := coreCalculateFixup(relo, target, bo, resolveTargetTypeID) + if err != nil { + return nil, fmt.Errorf("target %s: %s: %w", target, relo.kind, err) + } + if fixup.poison || fixup.isNonExistant() { + score++ + } + fixups = append(fixups, fixup) + } + + if score > bestScore { + // We have a better target already, ignore this one. + continue + } + + if score < bestScore { + // This is the best target yet, use it. + bestScore = score + bestFixups = fixups + continue + } + + // Some other target has the same score as the current one. Make sure + // the fixups agree with each other. + for i, fixup := range bestFixups { + if !fixup.equal(fixups[i]) { + return nil, fmt.Errorf("%s: multiple types match: %w", fixup.kind, errAmbiguousRelocation) + } + } + } + + if bestFixups == nil { + // Nothing at all matched, probably because there are no suitable + // targets at all. + // + // Poison everything except checksForExistence. + bestFixups = make([]COREFixup, len(relos)) + for i, relo := range relos { + if relo.kind.checksForExistence() { + bestFixups[i] = COREFixup{kind: relo.kind, local: 1, target: 0} + } else { + bestFixups[i] = COREFixup{kind: relo.kind, poison: true} + } + } + } + + return bestFixups, nil +} + +var errNoSignedness = errors.New("no signedness") + +// coreCalculateFixup calculates the fixup given a relocation and a target type. +func coreCalculateFixup(relo *CORERelocation, target Type, bo binary.ByteOrder, resolveTargetTypeID func(Type) (TypeID, error)) (COREFixup, error) { + fixup := func(local, target uint64) (COREFixup, error) { + return COREFixup{kind: relo.kind, local: local, target: target}, nil + } + fixupWithoutValidation := func(local, target uint64) (COREFixup, error) { + return COREFixup{kind: relo.kind, local: local, target: target, skipLocalValidation: true}, nil + } + poison := func() (COREFixup, error) { + if relo.kind.checksForExistence() { + return fixup(1, 0) + } + return COREFixup{kind: relo.kind, poison: true}, nil + } + zero := COREFixup{} + + local := relo.typ + + switch relo.kind { + case reloTypeMatches: + if len(relo.accessor) > 1 || relo.accessor[0] != 0 { + return zero, fmt.Errorf("unexpected accessor %v", relo.accessor) + } + + err := coreTypesMatch(local, target, nil) + if errors.Is(err, errIncompatibleTypes) { + return poison() + } + if err != nil { + return zero, err + } + + return fixup(1, 1) + + case reloTypeIDTarget, reloTypeSize, reloTypeExists: + if len(relo.accessor) > 1 || relo.accessor[0] != 0 { + return zero, fmt.Errorf("unexpected accessor %v", relo.accessor) + } + + err := CheckTypeCompatibility(local, target) + if errors.Is(err, errIncompatibleTypes) { + return poison() + } + if err != nil { + return zero, err + } + + switch relo.kind { + case reloTypeExists: + return fixup(1, 1) + + case reloTypeIDTarget: + targetID, err := resolveTargetTypeID(target) + if errors.Is(err, ErrNotFound) { + // Probably a relocation trying to get the ID + // of a type from a kmod. + return poison() + } + if err != nil { + return zero, err + } + return fixup(uint64(relo.id), uint64(targetID)) + + case reloTypeSize: + localSize, err := Sizeof(local) + if err != nil { + return zero, err + } + + targetSize, err := Sizeof(target) + if err != nil { + return zero, err + } + + return fixup(uint64(localSize), uint64(targetSize)) + } + + case reloEnumvalValue, reloEnumvalExists: + localValue, targetValue, err := coreFindEnumValue(local, relo.accessor, target) + if errors.Is(err, errImpossibleRelocation) { + return poison() + } + if err != nil { + return zero, err + } + + switch relo.kind { + case reloEnumvalExists: + return fixup(1, 1) + + case reloEnumvalValue: + return fixup(localValue.Value, targetValue.Value) + } + + case reloFieldByteOffset, reloFieldByteSize, reloFieldExists, reloFieldLShiftU64, reloFieldRShiftU64, reloFieldSigned: + if _, ok := As[*Fwd](target); ok { + // We can't relocate fields using a forward declaration, so + // skip it. If a non-forward declaration is present in the BTF + // we'll find it in one of the other iterations. + return poison() + } + + localField, targetField, err := coreFindField(local, relo.accessor, target) + if errors.Is(err, errImpossibleRelocation) { + return poison() + } + if err != nil { + return zero, err + } + + maybeSkipValidation := func(f COREFixup, err error) (COREFixup, error) { + f.skipLocalValidation = localField.bitfieldSize > 0 + return f, err + } + + switch relo.kind { + case reloFieldExists: + return fixup(1, 1) + + case reloFieldByteOffset: + return maybeSkipValidation(fixup(uint64(localField.offset), uint64(targetField.offset))) + + case reloFieldByteSize: + localSize, err := Sizeof(localField.Type) + if err != nil { + return zero, err + } + + targetSize, err := Sizeof(targetField.Type) + if err != nil { + return zero, err + } + return maybeSkipValidation(fixup(uint64(localSize), uint64(targetSize))) + + case reloFieldLShiftU64: + var target uint64 + if bo == binary.LittleEndian { + targetSize, err := targetField.sizeBits() + if err != nil { + return zero, err + } + + target = uint64(64 - targetField.bitfieldOffset - targetSize) + } else { + loadWidth, err := Sizeof(targetField.Type) + if err != nil { + return zero, err + } + + target = uint64(64 - Bits(loadWidth*8) + targetField.bitfieldOffset) + } + return fixupWithoutValidation(0, target) + + case reloFieldRShiftU64: + targetSize, err := targetField.sizeBits() + if err != nil { + return zero, err + } + + return fixupWithoutValidation(0, uint64(64-targetSize)) + + case reloFieldSigned: + switch local := UnderlyingType(localField.Type).(type) { + case *Enum: + target, ok := As[*Enum](targetField.Type) + if !ok { + return zero, fmt.Errorf("target isn't *Enum but %T", targetField.Type) + } + + return fixup(boolToUint64(local.Signed), boolToUint64(target.Signed)) + case *Int: + target, ok := As[*Int](targetField.Type) + if !ok { + return zero, fmt.Errorf("target isn't *Int but %T", targetField.Type) + } + + return fixup( + uint64(local.Encoding&Signed), + uint64(target.Encoding&Signed), + ) + default: + return zero, fmt.Errorf("type %T: %w", local, errNoSignedness) + } + } + } + + return zero, ErrNotSupported +} + +func boolToUint64(val bool) uint64 { + if val { + return 1 + } + return 0 +} + +/* coreAccessor contains a path through a struct. It contains at least one index. + * + * The interpretation depends on the kind of the relocation. The following is + * taken from struct bpf_core_relo in libbpf_internal.h: + * + * - for field-based relocations, string encodes an accessed field using + * a sequence of field and array indices, separated by colon (:). It's + * conceptually very close to LLVM's getelementptr ([0]) instruction's + * arguments for identifying offset to a field. + * - for type-based relocations, strings is expected to be just "0"; + * - for enum value-based relocations, string contains an index of enum + * value within its enum type; + * + * Example to provide a better feel. + * + * struct sample { + * int a; + * struct { + * int b[10]; + * }; + * }; + * + * struct sample s = ...; + * int x = &s->a; // encoded as "0:0" (a is field #0) + * int y = &s->b[5]; // encoded as "0:1:0:5" (anon struct is field #1, + * // b is field #0 inside anon struct, accessing elem #5) + * int z = &s[10]->b; // encoded as "10:1" (ptr is used as an array) + */ +type coreAccessor []int + +func parseCOREAccessor(accessor string) (coreAccessor, error) { + if accessor == "" { + return nil, fmt.Errorf("empty accessor") + } + + parts := strings.Split(accessor, ":") + result := make(coreAccessor, 0, len(parts)) + for _, part := range parts { + // 31 bits to avoid overflowing int on 32 bit platforms. + index, err := strconv.ParseUint(part, 10, 31) + if err != nil { + return nil, fmt.Errorf("accessor index %q: %s", part, err) + } + + result = append(result, int(index)) + } + + return result, nil +} + +func (ca coreAccessor) String() string { + strs := make([]string, 0, len(ca)) + for _, i := range ca { + strs = append(strs, strconv.Itoa(i)) + } + return strings.Join(strs, ":") +} + +func (ca coreAccessor) enumValue(t Type) (*EnumValue, error) { + e, ok := As[*Enum](t) + if !ok { + return nil, fmt.Errorf("not an enum: %s", t) + } + + if len(ca) > 1 { + return nil, fmt.Errorf("invalid accessor %s for enum", ca) + } + + i := ca[0] + if i >= len(e.Values) { + return nil, fmt.Errorf("invalid index %d for %s", i, e) + } + + return &e.Values[i], nil +} + +// coreField represents the position of a "child" of a composite type from the +// start of that type. +// +// /- start of composite +// | offset * 8 | bitfieldOffset | bitfieldSize | ... | +// \- start of field end of field -/ +type coreField struct { + Type Type + + // The position of the field from the start of the composite type in bytes. + offset uint32 + + // The offset of the bitfield in bits from the start of the field. + bitfieldOffset Bits + + // The size of the bitfield in bits. + // + // Zero if the field is not a bitfield. + bitfieldSize Bits +} + +func (cf *coreField) adjustOffsetToNthElement(n int) error { + if n == 0 { + return nil + } + + size, err := Sizeof(cf.Type) + if err != nil { + return err + } + + cf.offset += uint32(n) * uint32(size) + return nil +} + +func (cf *coreField) adjustOffsetBits(offset Bits) error { + align, err := alignof(cf.Type) + if err != nil { + return err + } + + // We can compute the load offset by: + // 1) converting the bit offset to bytes with a flooring division. + // 2) dividing and multiplying that offset by the alignment, yielding the + // load size aligned offset. + offsetBytes := uint32(offset/8) / uint32(align) * uint32(align) + + // The number of bits remaining is the bit offset less the number of bits + // we can "skip" with the aligned offset. + cf.bitfieldOffset = offset - Bits(offsetBytes*8) + + // We know that cf.offset is aligned at to at least align since we get it + // from the compiler via BTF. Adding an aligned offsetBytes preserves the + // alignment. + cf.offset += offsetBytes + return nil +} + +func (cf *coreField) sizeBits() (Bits, error) { + if cf.bitfieldSize > 0 { + return cf.bitfieldSize, nil + } + + // Someone is trying to access a non-bitfield via a bit shift relocation. + // This happens when a field changes from a bitfield to a regular field + // between kernel versions. Synthesise the size to make the shifts work. + size, err := Sizeof(cf.Type) + if err != nil { + return 0, err + } + return Bits(size * 8), nil +} + +// coreFindField descends into the local type using the accessor and tries to +// find an equivalent field in target at each step. +// +// Returns the field and the offset of the field from the start of +// target in bits. +func coreFindField(localT Type, localAcc coreAccessor, targetT Type) (coreField, coreField, error) { + local := coreField{Type: localT} + target := coreField{Type: targetT} + + if err := coreAreMembersCompatible(local.Type, target.Type); err != nil { + return coreField{}, coreField{}, fmt.Errorf("fields: %w", err) + } + + // The first index is used to offset a pointer of the base type like + // when accessing an array. + if err := local.adjustOffsetToNthElement(localAcc[0]); err != nil { + return coreField{}, coreField{}, err + } + + if err := target.adjustOffsetToNthElement(localAcc[0]); err != nil { + return coreField{}, coreField{}, err + } + + var localMaybeFlex, targetMaybeFlex bool + for i, acc := range localAcc[1:] { + switch localType := UnderlyingType(local.Type).(type) { + case composite: + // For composite types acc is used to find the field in the local type, + // and then we try to find a field in target with the same name. + localMembers := localType.members() + if acc >= len(localMembers) { + return coreField{}, coreField{}, fmt.Errorf("invalid accessor %d for %s", acc, localType) + } + + localMember := localMembers[acc] + if localMember.Name == "" { + localMemberType, ok := As[composite](localMember.Type) + if !ok { + return coreField{}, coreField{}, fmt.Errorf("unnamed field with type %s: %s", localMember.Type, ErrNotSupported) + } + + // This is an anonymous struct or union, ignore it. + local = coreField{ + Type: localMemberType, + offset: local.offset + localMember.Offset.Bytes(), + } + localMaybeFlex = false + continue + } + + targetType, ok := As[composite](target.Type) + if !ok { + return coreField{}, coreField{}, fmt.Errorf("target not composite: %w", errImpossibleRelocation) + } + + targetMember, last, err := coreFindMember(targetType, localMember.Name) + if err != nil { + return coreField{}, coreField{}, err + } + + local = coreField{ + Type: localMember.Type, + offset: local.offset, + bitfieldSize: localMember.BitfieldSize, + } + localMaybeFlex = acc == len(localMembers)-1 + + target = coreField{ + Type: targetMember.Type, + offset: target.offset, + bitfieldSize: targetMember.BitfieldSize, + } + targetMaybeFlex = last + + if local.bitfieldSize == 0 && target.bitfieldSize == 0 { + local.offset += localMember.Offset.Bytes() + target.offset += targetMember.Offset.Bytes() + break + } + + // Either of the members is a bitfield. Make sure we're at the + // end of the accessor. + if next := i + 1; next < len(localAcc[1:]) { + return coreField{}, coreField{}, fmt.Errorf("can't descend into bitfield") + } + + if err := local.adjustOffsetBits(localMember.Offset); err != nil { + return coreField{}, coreField{}, err + } + + if err := target.adjustOffsetBits(targetMember.Offset); err != nil { + return coreField{}, coreField{}, err + } + + case *Array: + // For arrays, acc is the index in the target. + targetType, ok := As[*Array](target.Type) + if !ok { + return coreField{}, coreField{}, fmt.Errorf("target not array: %w", errImpossibleRelocation) + } + + if localType.Nelems == 0 && !localMaybeFlex { + return coreField{}, coreField{}, fmt.Errorf("local type has invalid flexible array") + } + if targetType.Nelems == 0 && !targetMaybeFlex { + return coreField{}, coreField{}, fmt.Errorf("target type has invalid flexible array") + } + + if localType.Nelems > 0 && acc >= int(localType.Nelems) { + return coreField{}, coreField{}, fmt.Errorf("invalid access of %s at index %d", localType, acc) + } + if targetType.Nelems > 0 && acc >= int(targetType.Nelems) { + return coreField{}, coreField{}, fmt.Errorf("out of bounds access of target: %w", errImpossibleRelocation) + } + + local = coreField{ + Type: localType.Type, + offset: local.offset, + } + localMaybeFlex = false + + if err := local.adjustOffsetToNthElement(acc); err != nil { + return coreField{}, coreField{}, err + } + + target = coreField{ + Type: targetType.Type, + offset: target.offset, + } + targetMaybeFlex = false + + if err := target.adjustOffsetToNthElement(acc); err != nil { + return coreField{}, coreField{}, err + } + + default: + return coreField{}, coreField{}, fmt.Errorf("relocate field of %T: %w", localType, ErrNotSupported) + } + + if err := coreAreMembersCompatible(local.Type, target.Type); err != nil { + return coreField{}, coreField{}, err + } + } + + return local, target, nil +} + +// coreFindMember finds a member in a composite type while handling anonymous +// structs and unions. +func coreFindMember(typ composite, name string) (Member, bool, error) { + if name == "" { + return Member{}, false, errors.New("can't search for anonymous member") + } + + type offsetTarget struct { + composite + offset Bits + } + + targets := []offsetTarget{{typ, 0}} + visited := make(map[composite]bool) + + for i := 0; i < len(targets); i++ { + target := targets[i] + + // Only visit targets once to prevent infinite recursion. + if visited[target] { + continue + } + if len(visited) >= maxResolveDepth { + // This check is different than libbpf, which restricts the entire + // path to BPF_CORE_SPEC_MAX_LEN items. + return Member{}, false, fmt.Errorf("type is nested too deep") + } + visited[target] = true + + members := target.members() + for j, member := range members { + if member.Name == name { + // NB: This is safe because member is a copy. + member.Offset += target.offset + return member, j == len(members)-1, nil + } + + // The names don't match, but this member could be an anonymous struct + // or union. + if member.Name != "" { + continue + } + + comp, ok := As[composite](member.Type) + if !ok { + return Member{}, false, fmt.Errorf("anonymous non-composite type %T not allowed", member.Type) + } + + targets = append(targets, offsetTarget{comp, target.offset + member.Offset}) + } + } + + return Member{}, false, fmt.Errorf("no matching member: %w", errImpossibleRelocation) +} + +// coreFindEnumValue follows localAcc to find the equivalent enum value in target. +func coreFindEnumValue(local Type, localAcc coreAccessor, target Type) (localValue, targetValue *EnumValue, _ error) { + localValue, err := localAcc.enumValue(local) + if err != nil { + return nil, nil, err + } + + targetEnum, ok := As[*Enum](target) + if !ok { + return nil, nil, errImpossibleRelocation + } + + localName := newEssentialName(localValue.Name) + for i, targetValue := range targetEnum.Values { + if newEssentialName(targetValue.Name) != localName { + continue + } + + return localValue, &targetEnum.Values[i], nil + } + + return nil, nil, errImpossibleRelocation +} + +// CheckTypeCompatibility checks local and target types for Compatibility according to CO-RE rules. +// +// Only layout compatibility is checked, ignoring names of the root type. +func CheckTypeCompatibility(localType Type, targetType Type) error { + return coreAreTypesCompatible(localType, targetType, nil) +} + +type pair struct { + A, B Type +} + +/* The comment below is from bpf_core_types_are_compat in libbpf.c: + * + * Check local and target types for compatibility. This check is used for + * type-based CO-RE relocations and follow slightly different rules than + * field-based relocations. This function assumes that root types were already + * checked for name match. Beyond that initial root-level name check, names + * are completely ignored. Compatibility rules are as follows: + * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but + * kind should match for local and target types (i.e., STRUCT is not + * compatible with UNION); + * - for ENUMs, the size is ignored; + * - for INT, size and signedness are ignored; + * - for ARRAY, dimensionality is ignored, element types are checked for + * compatibility recursively; + * - CONST/VOLATILE/RESTRICT modifiers are ignored; + * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible; + * - FUNC_PROTOs are compatible if they have compatible signature: same + * number of input args and compatible return and argument types. + * These rules are not set in stone and probably will be adjusted as we get + * more experience with using BPF CO-RE relocations. + * + * Returns errIncompatibleTypes if types are not compatible. + */ +func coreAreTypesCompatible(localType Type, targetType Type, visited map[pair]struct{}) error { + localType = UnderlyingType(localType) + targetType = UnderlyingType(targetType) + + if reflect.TypeOf(localType) != reflect.TypeOf(targetType) { + return fmt.Errorf("type mismatch between %v and %v: %w", localType, targetType, errIncompatibleTypes) + } + + if _, ok := visited[pair{localType, targetType}]; ok { + return nil + } + if visited == nil { + visited = make(map[pair]struct{}) + } + visited[pair{localType, targetType}] = struct{}{} + + switch lv := localType.(type) { + case *Void, *Struct, *Union, *Enum, *Fwd, *Int: + return nil + + case *Pointer: + tv := targetType.(*Pointer) + return coreAreTypesCompatible(lv.Target, tv.Target, visited) + + case *Array: + tv := targetType.(*Array) + if err := coreAreTypesCompatible(lv.Index, tv.Index, visited); err != nil { + return err + } + + return coreAreTypesCompatible(lv.Type, tv.Type, visited) + + case *FuncProto: + tv := targetType.(*FuncProto) + if err := coreAreTypesCompatible(lv.Return, tv.Return, visited); err != nil { + return err + } + + if len(lv.Params) != len(tv.Params) { + return fmt.Errorf("function param mismatch: %w", errIncompatibleTypes) + } + + for i, localParam := range lv.Params { + targetParam := tv.Params[i] + if err := coreAreTypesCompatible(localParam.Type, targetParam.Type, visited); err != nil { + return err + } + } + + return nil + + default: + return fmt.Errorf("unsupported type %T", localType) + } +} + +/* coreAreMembersCompatible checks two types for field-based relocation compatibility. + * + * The comment below is from bpf_core_fields_are_compat in libbpf.c: + * + * Check two types for compatibility for the purpose of field access + * relocation. const/volatile/restrict and typedefs are skipped to ensure we + * are relocating semantically compatible entities: + * - any two STRUCTs/UNIONs are compatible and can be mixed; + * - any two FWDs are compatible, if their names match (modulo flavor suffix); + * - any two PTRs are always compatible; + * - for ENUMs, names should be the same (ignoring flavor suffix) or at + * least one of enums should be anonymous; + * - for ENUMs, check sizes, names are ignored; + * - for INT, size and signedness are ignored; + * - any two FLOATs are always compatible; + * - for ARRAY, dimensionality is ignored, element types are checked for + * compatibility recursively; + * [ NB: coreAreMembersCompatible doesn't recurse, this check is done + * by coreFindField. ] + * - everything else shouldn't be ever a target of relocation. + * These rules are not set in stone and probably will be adjusted as we get + * more experience with using BPF CO-RE relocations. + * + * Returns errImpossibleRelocation if the members are not compatible. + */ +func coreAreMembersCompatible(localType Type, targetType Type) error { + localType = UnderlyingType(localType) + targetType = UnderlyingType(targetType) + + _, lok := localType.(composite) + _, tok := targetType.(composite) + if lok && tok { + return nil + } + + if reflect.TypeOf(localType) != reflect.TypeOf(targetType) { + return fmt.Errorf("type mismatch: %w", errImpossibleRelocation) + } + + switch lv := localType.(type) { + case *Array, *Pointer, *Float, *Int: + return nil + + case *Enum: + tv := targetType.(*Enum) + if !coreEssentialNamesMatch(lv.Name, tv.Name) { + return fmt.Errorf("names %q and %q don't match: %w", lv.Name, tv.Name, errImpossibleRelocation) + } + + return nil + + case *Fwd: + tv := targetType.(*Fwd) + if !coreEssentialNamesMatch(lv.Name, tv.Name) { + return fmt.Errorf("names %q and %q don't match: %w", lv.Name, tv.Name, errImpossibleRelocation) + } + + return nil + + default: + return fmt.Errorf("type %s: %w", localType, ErrNotSupported) + } +} + +// coreEssentialNamesMatch compares two names while ignoring their flavour suffix. +// +// This should only be used on names which are in the global scope, like struct +// names, typedefs or enum values. +func coreEssentialNamesMatch(a, b string) bool { + if a == "" || b == "" { + // allow anonymous and named type to match + return true + } + + return newEssentialName(a) == newEssentialName(b) +} + +/* The comment below is from __bpf_core_types_match in relo_core.c: + * + * Check that two types "match". This function assumes that root types were + * already checked for name match. + * + * The matching relation is defined as follows: + * - modifiers and typedefs are stripped (and, hence, effectively ignored) + * - generally speaking types need to be of same kind (struct vs. struct, union + * vs. union, etc.) + * - exceptions are struct/union behind a pointer which could also match a + * forward declaration of a struct or union, respectively, and enum vs. + * enum64 (see below) + * Then, depending on type: + * - integers: + * - match if size and signedness match + * - arrays & pointers: + * - target types are recursively matched + * - structs & unions: + * - local members need to exist in target with the same name + * - for each member we recursively check match unless it is already behind a + * pointer, in which case we only check matching names and compatible kind + * - enums: + * - local variants have to have a match in target by symbolic name (but not + * numeric value) + * - size has to match (but enum may match enum64 and vice versa) + * - function pointers: + * - number and position of arguments in local type has to match target + * - for each argument and the return value we recursively check match + */ +func coreTypesMatch(localType Type, targetType Type, visited map[pair]struct{}) error { + localType = UnderlyingType(localType) + targetType = UnderlyingType(targetType) + + if !coreEssentialNamesMatch(localType.TypeName(), targetType.TypeName()) { + return fmt.Errorf("type name %q don't match %q: %w", localType.TypeName(), targetType.TypeName(), errIncompatibleTypes) + } + + if reflect.TypeOf(localType) != reflect.TypeOf(targetType) { + return fmt.Errorf("type mismatch between %v and %v: %w", localType, targetType, errIncompatibleTypes) + } + + if _, ok := visited[pair{localType, targetType}]; ok { + return nil + } + if visited == nil { + visited = make(map[pair]struct{}) + } + visited[pair{localType, targetType}] = struct{}{} + + switch lv := (localType).(type) { + case *Void: + + case *Fwd: + if targetType.(*Fwd).Kind != lv.Kind { + return fmt.Errorf("fwd kind mismatch between %v and %v: %w", localType, targetType, errIncompatibleTypes) + } + + case *Enum: + return coreEnumsMatch(lv, targetType.(*Enum)) + + case composite: + tv := targetType.(composite) + + if len(lv.members()) > len(tv.members()) { + return errIncompatibleTypes + } + + localMembers := lv.members() + targetMembers := map[string]Member{} + for _, member := range tv.members() { + targetMembers[member.Name] = member + } + + for _, localMember := range localMembers { + targetMember, found := targetMembers[localMember.Name] + if !found { + return fmt.Errorf("no field %q in %v: %w", localMember.Name, targetType, errIncompatibleTypes) + } + + err := coreTypesMatch(localMember.Type, targetMember.Type, visited) + if err != nil { + return err + } + } + + case *Int: + if !coreEncodingMatches(lv, targetType.(*Int)) { + return fmt.Errorf("int mismatch between %v and %v: %w", localType, targetType, errIncompatibleTypes) + } + + case *Pointer: + tv := targetType.(*Pointer) + + // Allow a pointer to a forward declaration to match a struct + // or union. + if fwd, ok := As[*Fwd](lv.Target); ok && fwd.matches(tv.Target) { + return nil + } + + if fwd, ok := As[*Fwd](tv.Target); ok && fwd.matches(lv.Target) { + return nil + } + + return coreTypesMatch(lv.Target, tv.Target, visited) + + case *Array: + tv := targetType.(*Array) + + if lv.Nelems != tv.Nelems { + return fmt.Errorf("array mismatch between %v and %v: %w", localType, targetType, errIncompatibleTypes) + } + + return coreTypesMatch(lv.Type, tv.Type, visited) + + case *FuncProto: + tv := targetType.(*FuncProto) + + if len(lv.Params) != len(tv.Params) { + return fmt.Errorf("function param mismatch: %w", errIncompatibleTypes) + } + + for i, lparam := range lv.Params { + if err := coreTypesMatch(lparam.Type, tv.Params[i].Type, visited); err != nil { + return err + } + } + + return coreTypesMatch(lv.Return, tv.Return, visited) + + default: + return fmt.Errorf("unsupported type %T", localType) + } + + return nil +} + +// coreEncodingMatches returns true if both ints have the same size and signedness. +// All encodings other than `Signed` are considered unsigned. +func coreEncodingMatches(local, target *Int) bool { + return local.Size == target.Size && (local.Encoding == Signed) == (target.Encoding == Signed) +} + +// coreEnumsMatch checks two enums match, which is considered to be the case if the following is true: +// - size has to match (but enum may match enum64 and vice versa) +// - local variants have to have a match in target by symbolic name (but not numeric value) +func coreEnumsMatch(local *Enum, target *Enum) error { + if local.Size != target.Size { + return fmt.Errorf("size mismatch between %v and %v: %w", local, target, errIncompatibleTypes) + } + + // If there are more values in the local than the target, there must be at least one value in the local + // that isn't in the target, and therefor the types are incompatible. + if len(local.Values) > len(target.Values) { + return fmt.Errorf("local has more values than target: %w", errIncompatibleTypes) + } + +outer: + for _, lv := range local.Values { + for _, rv := range target.Values { + if coreEssentialNamesMatch(lv.Name, rv.Name) { + continue outer + } + } + + return fmt.Errorf("no match for %v in %v: %w", lv, target, errIncompatibleTypes) + } + + return nil +} diff --git a/vendor/github.com/cilium/ebpf/btf/doc.go b/vendor/github.com/cilium/ebpf/btf/doc.go new file mode 100644 index 000000000..b1f4b1fc3 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/doc.go @@ -0,0 +1,5 @@ +// Package btf handles data encoded according to the BPF Type Format. +// +// The canonical documentation lives in the Linux kernel repository and is +// available at https://www.kernel.org/doc/html/latest/bpf/btf.html +package btf diff --git a/vendor/github.com/cilium/ebpf/btf/ext_info.go b/vendor/github.com/cilium/ebpf/btf/ext_info.go new file mode 100644 index 000000000..6ff5e2b90 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/ext_info.go @@ -0,0 +1,832 @@ +package btf + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "sort" + + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" +) + +// ExtInfos contains ELF section metadata. +type ExtInfos struct { + // The slices are sorted by offset in ascending order. + funcInfos map[string]FuncOffsets + lineInfos map[string]LineOffsets + relocationInfos map[string]CORERelocationInfos +} + +// loadExtInfosFromELF parses ext infos from the .BTF.ext section in an ELF. +// +// Returns an error wrapping ErrNotFound if no ext infos are present. +func loadExtInfosFromELF(file *internal.SafeELFFile, spec *Spec) (*ExtInfos, error) { + section := file.Section(".BTF.ext") + if section == nil { + return nil, fmt.Errorf("btf ext infos: %w", ErrNotFound) + } + + if section.ReaderAt == nil { + return nil, fmt.Errorf("compressed ext_info is not supported") + } + + return loadExtInfos(section.ReaderAt, file.ByteOrder, spec) +} + +// loadExtInfos parses bare ext infos. +func loadExtInfos(r io.ReaderAt, bo binary.ByteOrder, spec *Spec) (*ExtInfos, error) { + // Open unbuffered section reader. binary.Read() calls io.ReadFull on + // the header structs, resulting in one syscall per header. + headerRd := io.NewSectionReader(r, 0, math.MaxInt64) + extHeader, err := parseBTFExtHeader(headerRd, bo) + if err != nil { + return nil, fmt.Errorf("parsing BTF extension header: %w", err) + } + + coreHeader, err := parseBTFExtCOREHeader(headerRd, bo, extHeader) + if err != nil { + return nil, fmt.Errorf("parsing BTF CO-RE header: %w", err) + } + + buf := internal.NewBufferedSectionReader(r, extHeader.funcInfoStart(), int64(extHeader.FuncInfoLen)) + btfFuncInfos, err := parseFuncInfos(buf, bo, spec.strings) + if err != nil { + return nil, fmt.Errorf("parsing BTF function info: %w", err) + } + + funcInfos := make(map[string]FuncOffsets, len(btfFuncInfos)) + for section, bfis := range btfFuncInfos { + funcInfos[section], err = newFuncOffsets(bfis, spec) + if err != nil { + return nil, fmt.Errorf("section %s: func infos: %w", section, err) + } + } + + buf = internal.NewBufferedSectionReader(r, extHeader.lineInfoStart(), int64(extHeader.LineInfoLen)) + btfLineInfos, err := parseLineInfos(buf, bo, spec.strings) + if err != nil { + return nil, fmt.Errorf("parsing BTF line info: %w", err) + } + + lineInfos := make(map[string]LineOffsets, len(btfLineInfos)) + for section, blis := range btfLineInfos { + lineInfos[section], err = newLineInfos(blis, spec.strings) + if err != nil { + return nil, fmt.Errorf("section %s: line infos: %w", section, err) + } + } + + if coreHeader == nil || coreHeader.COREReloLen == 0 { + return &ExtInfos{funcInfos, lineInfos, nil}, nil + } + + var btfCORERelos map[string][]bpfCORERelo + buf = internal.NewBufferedSectionReader(r, extHeader.coreReloStart(coreHeader), int64(coreHeader.COREReloLen)) + btfCORERelos, err = parseCORERelos(buf, bo, spec.strings) + if err != nil { + return nil, fmt.Errorf("parsing CO-RE relocation info: %w", err) + } + + coreRelos := make(map[string]CORERelocationInfos, len(btfCORERelos)) + for section, brs := range btfCORERelos { + coreRelos[section], err = newRelocationInfos(brs, spec, spec.strings) + if err != nil { + return nil, fmt.Errorf("section %s: CO-RE relocations: %w", section, err) + } + } + + return &ExtInfos{funcInfos, lineInfos, coreRelos}, nil +} + +type ( + funcInfoMeta struct{} + coreRelocationMeta struct{} +) + +// Assign per-section metadata from BTF to a section's instructions. +func (ei *ExtInfos) Assign(insns asm.Instructions, section string) { + funcInfos := ei.funcInfos[section] + lineInfos := ei.lineInfos[section] + reloInfos := ei.relocationInfos[section] + + AssignMetadataToInstructions(insns, funcInfos, lineInfos, reloInfos) +} + +// Assign per-instruction metadata to the instructions in insns. +func AssignMetadataToInstructions( + insns asm.Instructions, + funcInfos FuncOffsets, + lineInfos LineOffsets, + reloInfos CORERelocationInfos, +) { + iter := insns.Iterate() + for iter.Next() { + if len(funcInfos) > 0 && funcInfos[0].Offset == iter.Offset { + *iter.Ins = WithFuncMetadata(*iter.Ins, funcInfos[0].Func) + funcInfos = funcInfos[1:] + } + + if len(lineInfos) > 0 && lineInfos[0].Offset == iter.Offset { + *iter.Ins = iter.Ins.WithSource(lineInfos[0].Line) + lineInfos = lineInfos[1:] + } + + if len(reloInfos.infos) > 0 && reloInfos.infos[0].offset == iter.Offset { + iter.Ins.Metadata.Set(coreRelocationMeta{}, reloInfos.infos[0].relo) + reloInfos.infos = reloInfos.infos[1:] + } + } +} + +// MarshalExtInfos encodes function and line info embedded in insns into kernel +// wire format. +// +// If an instruction has an [asm.Comment], it will be synthesized into a mostly +// empty line info. +func MarshalExtInfos(insns asm.Instructions, b *Builder) (funcInfos, lineInfos []byte, _ error) { + iter := insns.Iterate() + for iter.Next() { + if iter.Ins.Source() != nil || FuncMetadata(iter.Ins) != nil { + goto marshal + } + } + + return nil, nil, nil + +marshal: + var fiBuf, liBuf bytes.Buffer + for { + if fn := FuncMetadata(iter.Ins); fn != nil { + fi := &FuncOffset{ + Func: fn, + Offset: iter.Offset, + } + if err := fi.marshal(&fiBuf, b); err != nil { + return nil, nil, fmt.Errorf("write func info: %w", err) + } + } + + if source := iter.Ins.Source(); source != nil { + var line *Line + if l, ok := source.(*Line); ok { + line = l + } else { + line = &Line{ + line: source.String(), + } + } + + li := &LineOffset{ + Offset: iter.Offset, + Line: line, + } + if err := li.marshal(&liBuf, b); err != nil { + return nil, nil, fmt.Errorf("write line info: %w", err) + } + } + + if !iter.Next() { + break + } + } + + return fiBuf.Bytes(), liBuf.Bytes(), nil +} + +// btfExtHeader is found at the start of the .BTF.ext section. +type btfExtHeader struct { + Magic uint16 + Version uint8 + Flags uint8 + + // HdrLen is larger than the size of struct btfExtHeader when it is + // immediately followed by a btfExtCOREHeader. + HdrLen uint32 + + FuncInfoOff uint32 + FuncInfoLen uint32 + LineInfoOff uint32 + LineInfoLen uint32 +} + +// parseBTFExtHeader parses the header of the .BTF.ext section. +func parseBTFExtHeader(r io.Reader, bo binary.ByteOrder) (*btfExtHeader, error) { + var header btfExtHeader + if err := binary.Read(r, bo, &header); err != nil { + return nil, fmt.Errorf("can't read header: %v", err) + } + + if header.Magic != btfMagic { + return nil, fmt.Errorf("incorrect magic value %v", header.Magic) + } + + if header.Version != 1 { + return nil, fmt.Errorf("unexpected version %v", header.Version) + } + + if header.Flags != 0 { + return nil, fmt.Errorf("unsupported flags %v", header.Flags) + } + + if int64(header.HdrLen) < int64(binary.Size(&header)) { + return nil, fmt.Errorf("header length shorter than btfExtHeader size") + } + + return &header, nil +} + +// funcInfoStart returns the offset from the beginning of the .BTF.ext section +// to the start of its func_info entries. +func (h *btfExtHeader) funcInfoStart() int64 { + return int64(h.HdrLen + h.FuncInfoOff) +} + +// lineInfoStart returns the offset from the beginning of the .BTF.ext section +// to the start of its line_info entries. +func (h *btfExtHeader) lineInfoStart() int64 { + return int64(h.HdrLen + h.LineInfoOff) +} + +// coreReloStart returns the offset from the beginning of the .BTF.ext section +// to the start of its CO-RE relocation entries. +func (h *btfExtHeader) coreReloStart(ch *btfExtCOREHeader) int64 { + return int64(h.HdrLen + ch.COREReloOff) +} + +// btfExtCOREHeader is found right after the btfExtHeader when its HdrLen +// field is larger than its size. +type btfExtCOREHeader struct { + COREReloOff uint32 + COREReloLen uint32 +} + +// parseBTFExtCOREHeader parses the tail of the .BTF.ext header. If additional +// header bytes are present, extHeader.HdrLen will be larger than the struct, +// indicating the presence of a CO-RE extension header. +func parseBTFExtCOREHeader(r io.Reader, bo binary.ByteOrder, extHeader *btfExtHeader) (*btfExtCOREHeader, error) { + extHdrSize := int64(binary.Size(&extHeader)) + remainder := int64(extHeader.HdrLen) - extHdrSize + + if remainder == 0 { + return nil, nil + } + + var coreHeader btfExtCOREHeader + if err := binary.Read(r, bo, &coreHeader); err != nil { + return nil, fmt.Errorf("can't read header: %v", err) + } + + return &coreHeader, nil +} + +type btfExtInfoSec struct { + SecNameOff uint32 + NumInfo uint32 +} + +// parseExtInfoSec parses a btf_ext_info_sec header within .BTF.ext, +// appearing within func_info and line_info sub-sections. +// These headers appear once for each program section in the ELF and are +// followed by one or more func/line_info records for the section. +func parseExtInfoSec(r io.Reader, bo binary.ByteOrder, strings *stringTable) (string, *btfExtInfoSec, error) { + var infoHeader btfExtInfoSec + if err := binary.Read(r, bo, &infoHeader); err != nil { + return "", nil, fmt.Errorf("read ext info header: %w", err) + } + + secName, err := strings.Lookup(infoHeader.SecNameOff) + if err != nil { + return "", nil, fmt.Errorf("get section name: %w", err) + } + if secName == "" { + return "", nil, fmt.Errorf("extinfo header refers to empty section name") + } + + if infoHeader.NumInfo == 0 { + return "", nil, fmt.Errorf("section %s has zero records", secName) + } + + return secName, &infoHeader, nil +} + +// parseExtInfoRecordSize parses the uint32 at the beginning of a func_infos +// or line_infos segment that describes the length of all extInfoRecords in +// that segment. +func parseExtInfoRecordSize(r io.Reader, bo binary.ByteOrder) (uint32, error) { + const maxRecordSize = 256 + + var recordSize uint32 + if err := binary.Read(r, bo, &recordSize); err != nil { + return 0, fmt.Errorf("can't read record size: %v", err) + } + + if recordSize < 4 { + // Need at least InsnOff worth of bytes per record. + return 0, errors.New("record size too short") + } + if recordSize > maxRecordSize { + return 0, fmt.Errorf("record size %v exceeds %v", recordSize, maxRecordSize) + } + + return recordSize, nil +} + +// FuncOffsets is a sorted slice of FuncOffset. +type FuncOffsets []FuncOffset + +// The size of a FuncInfo in BTF wire format. +var FuncInfoSize = uint32(binary.Size(bpfFuncInfo{})) + +// FuncOffset represents a [btf.Func] and its raw instruction offset within a +// BPF program. +type FuncOffset struct { + Offset asm.RawInstructionOffset + Func *Func +} + +type bpfFuncInfo struct { + // Instruction offset of the function within an ELF section. + InsnOff uint32 + TypeID TypeID +} + +func newFuncOffset(fi bpfFuncInfo, spec *Spec) (*FuncOffset, error) { + typ, err := spec.TypeByID(fi.TypeID) + if err != nil { + return nil, err + } + + fn, ok := typ.(*Func) + if !ok { + return nil, fmt.Errorf("type ID %d is a %T, but expected a Func", fi.TypeID, typ) + } + + // C doesn't have anonymous functions, but check just in case. + if fn.Name == "" { + return nil, fmt.Errorf("func with type ID %d doesn't have a name", fi.TypeID) + } + + return &FuncOffset{ + asm.RawInstructionOffset(fi.InsnOff), + fn, + }, nil +} + +func newFuncOffsets(bfis []bpfFuncInfo, spec *Spec) (FuncOffsets, error) { + fos := make(FuncOffsets, 0, len(bfis)) + + for _, bfi := range bfis { + fi, err := newFuncOffset(bfi, spec) + if err != nil { + return FuncOffsets{}, fmt.Errorf("offset %d: %w", bfi.InsnOff, err) + } + fos = append(fos, *fi) + } + sort.Slice(fos, func(i, j int) bool { + return fos[i].Offset <= fos[j].Offset + }) + return fos, nil +} + +// LoadFuncInfos parses BTF func info from kernel wire format into a +// [FuncOffsets], a sorted slice of [btf.Func]s of (sub)programs within a BPF +// program with their corresponding raw instruction offsets. +func LoadFuncInfos(reader io.Reader, bo binary.ByteOrder, recordNum uint32, spec *Spec) (FuncOffsets, error) { + fis, err := parseFuncInfoRecords( + reader, + bo, + FuncInfoSize, + recordNum, + false, + ) + if err != nil { + return FuncOffsets{}, fmt.Errorf("parsing BTF func info: %w", err) + } + + return newFuncOffsets(fis, spec) +} + +// marshal into the BTF wire format. +func (fi *FuncOffset) marshal(w *bytes.Buffer, b *Builder) error { + id, err := b.Add(fi.Func) + if err != nil { + return err + } + bfi := bpfFuncInfo{ + InsnOff: uint32(fi.Offset), + TypeID: id, + } + buf := make([]byte, FuncInfoSize) + internal.NativeEndian.PutUint32(buf, bfi.InsnOff) + internal.NativeEndian.PutUint32(buf[4:], uint32(bfi.TypeID)) + _, err = w.Write(buf) + return err +} + +// parseFuncInfos parses a func_info sub-section within .BTF.ext ito a map of +// func infos indexed by section name. +func parseFuncInfos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfFuncInfo, error) { + recordSize, err := parseExtInfoRecordSize(r, bo) + if err != nil { + return nil, err + } + + result := make(map[string][]bpfFuncInfo) + for { + secName, infoHeader, err := parseExtInfoSec(r, bo, strings) + if errors.Is(err, io.EOF) { + return result, nil + } + if err != nil { + return nil, err + } + + records, err := parseFuncInfoRecords(r, bo, recordSize, infoHeader.NumInfo, true) + if err != nil { + return nil, fmt.Errorf("section %v: %w", secName, err) + } + + result[secName] = records + } +} + +// parseFuncInfoRecords parses a stream of func_infos into a funcInfos. +// These records appear after a btf_ext_info_sec header in the func_info +// sub-section of .BTF.ext. +func parseFuncInfoRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32, offsetInBytes bool) ([]bpfFuncInfo, error) { + var out []bpfFuncInfo + var fi bpfFuncInfo + + if exp, got := FuncInfoSize, recordSize; exp != got { + // BTF blob's record size is longer than we know how to parse. + return nil, fmt.Errorf("expected FuncInfo record size %d, but BTF blob contains %d", exp, got) + } + + for i := uint32(0); i < recordNum; i++ { + if err := binary.Read(r, bo, &fi); err != nil { + return nil, fmt.Errorf("can't read function info: %v", err) + } + + if offsetInBytes { + if fi.InsnOff%asm.InstructionSize != 0 { + return nil, fmt.Errorf("offset %v is not aligned with instruction size", fi.InsnOff) + } + + // ELF tracks offset in bytes, the kernel expects raw BPF instructions. + // Convert as early as possible. + fi.InsnOff /= asm.InstructionSize + } + + out = append(out, fi) + } + + return out, nil +} + +var LineInfoSize = uint32(binary.Size(bpfLineInfo{})) + +// Line represents the location and contents of a single line of source +// code a BPF ELF was compiled from. +type Line struct { + fileName string + line string + lineNumber uint32 + lineColumn uint32 +} + +func (li *Line) FileName() string { + return li.fileName +} + +func (li *Line) Line() string { + return li.line +} + +func (li *Line) LineNumber() uint32 { + return li.lineNumber +} + +func (li *Line) LineColumn() uint32 { + return li.lineColumn +} + +func (li *Line) String() string { + return li.line +} + +// LineOffsets contains a sorted list of line infos. +type LineOffsets []LineOffset + +// LineOffset represents a line info and its raw instruction offset. +type LineOffset struct { + Offset asm.RawInstructionOffset + Line *Line +} + +// Constants for the format of bpfLineInfo.LineCol. +const ( + bpfLineShift = 10 + bpfLineMax = (1 << (32 - bpfLineShift)) - 1 + bpfColumnMax = (1 << bpfLineShift) - 1 +) + +type bpfLineInfo struct { + // Instruction offset of the line within the whole instruction stream, in instructions. + InsnOff uint32 + FileNameOff uint32 + LineOff uint32 + LineCol uint32 +} + +// LoadLineInfos parses BTF line info in kernel wire format. +func LoadLineInfos(reader io.Reader, bo binary.ByteOrder, recordNum uint32, spec *Spec) (LineOffsets, error) { + lis, err := parseLineInfoRecords( + reader, + bo, + LineInfoSize, + recordNum, + false, + ) + if err != nil { + return LineOffsets{}, fmt.Errorf("parsing BTF line info: %w", err) + } + + return newLineInfos(lis, spec.strings) +} + +func newLineInfo(li bpfLineInfo, strings *stringTable) (LineOffset, error) { + line, err := strings.LookupCached(li.LineOff) + if err != nil { + return LineOffset{}, fmt.Errorf("lookup of line: %w", err) + } + + fileName, err := strings.LookupCached(li.FileNameOff) + if err != nil { + return LineOffset{}, fmt.Errorf("lookup of filename: %w", err) + } + + lineNumber := li.LineCol >> bpfLineShift + lineColumn := li.LineCol & bpfColumnMax + + return LineOffset{ + asm.RawInstructionOffset(li.InsnOff), + &Line{ + fileName, + line, + lineNumber, + lineColumn, + }, + }, nil +} + +func newLineInfos(blis []bpfLineInfo, strings *stringTable) (LineOffsets, error) { + lis := make([]LineOffset, 0, len(blis)) + for _, bli := range blis { + li, err := newLineInfo(bli, strings) + if err != nil { + return LineOffsets{}, fmt.Errorf("offset %d: %w", bli.InsnOff, err) + } + lis = append(lis, li) + } + sort.Slice(lis, func(i, j int) bool { + return lis[i].Offset <= lis[j].Offset + }) + return lis, nil +} + +// marshal writes the binary representation of the LineInfo to w. +func (li *LineOffset) marshal(w *bytes.Buffer, b *Builder) error { + line := li.Line + if line.lineNumber > bpfLineMax { + return fmt.Errorf("line %d exceeds %d", line.lineNumber, bpfLineMax) + } + + if line.lineColumn > bpfColumnMax { + return fmt.Errorf("column %d exceeds %d", line.lineColumn, bpfColumnMax) + } + + fileNameOff, err := b.addString(line.fileName) + if err != nil { + return fmt.Errorf("file name %q: %w", line.fileName, err) + } + + lineOff, err := b.addString(line.line) + if err != nil { + return fmt.Errorf("line %q: %w", line.line, err) + } + + bli := bpfLineInfo{ + uint32(li.Offset), + fileNameOff, + lineOff, + (line.lineNumber << bpfLineShift) | line.lineColumn, + } + + buf := make([]byte, LineInfoSize) + internal.NativeEndian.PutUint32(buf, bli.InsnOff) + internal.NativeEndian.PutUint32(buf[4:], bli.FileNameOff) + internal.NativeEndian.PutUint32(buf[8:], bli.LineOff) + internal.NativeEndian.PutUint32(buf[12:], bli.LineCol) + _, err = w.Write(buf) + return err +} + +// parseLineInfos parses a line_info sub-section within .BTF.ext ito a map of +// line infos indexed by section name. +func parseLineInfos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfLineInfo, error) { + recordSize, err := parseExtInfoRecordSize(r, bo) + if err != nil { + return nil, err + } + + result := make(map[string][]bpfLineInfo) + for { + secName, infoHeader, err := parseExtInfoSec(r, bo, strings) + if errors.Is(err, io.EOF) { + return result, nil + } + if err != nil { + return nil, err + } + + records, err := parseLineInfoRecords(r, bo, recordSize, infoHeader.NumInfo, true) + if err != nil { + return nil, fmt.Errorf("section %v: %w", secName, err) + } + + result[secName] = records + } +} + +// parseLineInfoRecords parses a stream of line_infos into a lineInfos. +// These records appear after a btf_ext_info_sec header in the line_info +// sub-section of .BTF.ext. +func parseLineInfoRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32, offsetInBytes bool) ([]bpfLineInfo, error) { + if exp, got := uint32(binary.Size(bpfLineInfo{})), recordSize; exp != got { + // BTF blob's record size is longer than we know how to parse. + return nil, fmt.Errorf("expected LineInfo record size %d, but BTF blob contains %d", exp, got) + } + + out := make([]bpfLineInfo, recordNum) + if err := binary.Read(r, bo, out); err != nil { + return nil, fmt.Errorf("can't read line info: %v", err) + } + + if offsetInBytes { + for i := range out { + li := &out[i] + if li.InsnOff%asm.InstructionSize != 0 { + return nil, fmt.Errorf("offset %v is not aligned with instruction size", li.InsnOff) + } + + // ELF tracks offset in bytes, the kernel expects raw BPF instructions. + // Convert as early as possible. + li.InsnOff /= asm.InstructionSize + } + } + + return out, nil +} + +// bpfCORERelo matches the kernel's struct bpf_core_relo. +type bpfCORERelo struct { + InsnOff uint32 + TypeID TypeID + AccessStrOff uint32 + Kind coreKind +} + +type CORERelocation struct { + // The local type of the relocation, stripped of typedefs and qualifiers. + typ Type + accessor coreAccessor + kind coreKind + // The ID of the local type in the source BTF. + id TypeID +} + +func (cr *CORERelocation) String() string { + return fmt.Sprintf("CORERelocation(%s, %s[%s], local_id=%d)", cr.kind, cr.typ, cr.accessor, cr.id) +} + +func CORERelocationMetadata(ins *asm.Instruction) *CORERelocation { + relo, _ := ins.Metadata.Get(coreRelocationMeta{}).(*CORERelocation) + return relo +} + +// CORERelocationInfos contains a sorted list of co:re relocation infos. +type CORERelocationInfos struct { + infos []coreRelocationInfo +} + +type coreRelocationInfo struct { + relo *CORERelocation + offset asm.RawInstructionOffset +} + +func newRelocationInfo(relo bpfCORERelo, spec *Spec, strings *stringTable) (*coreRelocationInfo, error) { + typ, err := spec.TypeByID(relo.TypeID) + if err != nil { + return nil, err + } + + accessorStr, err := strings.Lookup(relo.AccessStrOff) + if err != nil { + return nil, err + } + + accessor, err := parseCOREAccessor(accessorStr) + if err != nil { + return nil, fmt.Errorf("accessor %q: %s", accessorStr, err) + } + + return &coreRelocationInfo{ + &CORERelocation{ + typ, + accessor, + relo.Kind, + relo.TypeID, + }, + asm.RawInstructionOffset(relo.InsnOff), + }, nil +} + +func newRelocationInfos(brs []bpfCORERelo, spec *Spec, strings *stringTable) (CORERelocationInfos, error) { + rs := CORERelocationInfos{ + infos: make([]coreRelocationInfo, 0, len(brs)), + } + for _, br := range brs { + relo, err := newRelocationInfo(br, spec, strings) + if err != nil { + return CORERelocationInfos{}, fmt.Errorf("offset %d: %w", br.InsnOff, err) + } + rs.infos = append(rs.infos, *relo) + } + sort.Slice(rs.infos, func(i, j int) bool { + return rs.infos[i].offset < rs.infos[j].offset + }) + return rs, nil +} + +var extInfoReloSize = binary.Size(bpfCORERelo{}) + +// parseCORERelos parses a core_relos sub-section within .BTF.ext ito a map of +// CO-RE relocations indexed by section name. +func parseCORERelos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfCORERelo, error) { + recordSize, err := parseExtInfoRecordSize(r, bo) + if err != nil { + return nil, err + } + + if recordSize != uint32(extInfoReloSize) { + return nil, fmt.Errorf("expected record size %d, got %d", extInfoReloSize, recordSize) + } + + result := make(map[string][]bpfCORERelo) + for { + secName, infoHeader, err := parseExtInfoSec(r, bo, strings) + if errors.Is(err, io.EOF) { + return result, nil + } + if err != nil { + return nil, err + } + + records, err := parseCOREReloRecords(r, bo, infoHeader.NumInfo) + if err != nil { + return nil, fmt.Errorf("section %v: %w", secName, err) + } + + result[secName] = records + } +} + +// parseCOREReloRecords parses a stream of CO-RE relocation entries into a +// coreRelos. These records appear after a btf_ext_info_sec header in the +// core_relos sub-section of .BTF.ext. +func parseCOREReloRecords(r io.Reader, bo binary.ByteOrder, recordNum uint32) ([]bpfCORERelo, error) { + var out []bpfCORERelo + + var relo bpfCORERelo + for i := uint32(0); i < recordNum; i++ { + if err := binary.Read(r, bo, &relo); err != nil { + return nil, fmt.Errorf("can't read CO-RE relocation: %v", err) + } + + if relo.InsnOff%asm.InstructionSize != 0 { + return nil, fmt.Errorf("offset %v is not aligned with instruction size", relo.InsnOff) + } + + // ELF tracks offset in bytes, the kernel expects raw BPF instructions. + // Convert as early as possible. + relo.InsnOff /= asm.InstructionSize + + out = append(out, relo) + } + + return out, nil +} diff --git a/vendor/github.com/cilium/ebpf/btf/feature.go b/vendor/github.com/cilium/ebpf/btf/feature.go new file mode 100644 index 000000000..5b427f5d3 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/feature.go @@ -0,0 +1,158 @@ +package btf + +import ( + "errors" + "math" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +// haveBTF attempts to load a BTF blob containing an Int. It should pass on any +// kernel that supports BPF_BTF_LOAD. +var haveBTF = internal.NewFeatureTest("BTF", func() error { + // 0-length anonymous integer + err := probeBTF(&Int{}) + if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) { + return internal.ErrNotSupported + } + return err +}, "4.18") + +// haveMapBTF attempts to load a minimal BTF blob containing a Var. It is +// used as a proxy for .bss, .data and .rodata map support, which generally +// come with a Var and Datasec. These were introduced in Linux 5.2. +var haveMapBTF = internal.NewFeatureTest("Map BTF (Var/Datasec)", func() error { + if err := haveBTF(); err != nil { + return err + } + + v := &Var{ + Name: "a", + Type: &Pointer{(*Void)(nil)}, + } + + err := probeBTF(v) + if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) { + // Treat both EINVAL and EPERM as not supported: creating the map may still + // succeed without Btf* attrs. + return internal.ErrNotSupported + } + return err +}, "5.2") + +// haveProgBTF attempts to load a BTF blob containing a Func and FuncProto. It +// is used as a proxy for ext_info (func_info) support, which depends on +// Func(Proto) by definition. +var haveProgBTF = internal.NewFeatureTest("Program BTF (func/line_info)", func() error { + if err := haveBTF(); err != nil { + return err + } + + fn := &Func{ + Name: "a", + Type: &FuncProto{Return: (*Void)(nil)}, + } + + err := probeBTF(fn) + if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) { + return internal.ErrNotSupported + } + return err +}, "5.0") + +var haveFuncLinkage = internal.NewFeatureTest("BTF func linkage", func() error { + if err := haveProgBTF(); err != nil { + return err + } + + fn := &Func{ + Name: "a", + Type: &FuncProto{Return: (*Void)(nil)}, + Linkage: GlobalFunc, + } + + err := probeBTF(fn) + if errors.Is(err, unix.EINVAL) { + return internal.ErrNotSupported + } + return err +}, "5.6") + +var haveDeclTags = internal.NewFeatureTest("BTF decl tags", func() error { + if err := haveBTF(); err != nil { + return err + } + + t := &Typedef{ + Name: "a", + Type: &Int{}, + Tags: []string{"a"}, + } + + err := probeBTF(t) + if errors.Is(err, unix.EINVAL) { + return internal.ErrNotSupported + } + return err +}, "5.16") + +var haveTypeTags = internal.NewFeatureTest("BTF type tags", func() error { + if err := haveBTF(); err != nil { + return err + } + + t := &TypeTag{ + Type: &Int{}, + Value: "a", + } + + err := probeBTF(t) + if errors.Is(err, unix.EINVAL) { + return internal.ErrNotSupported + } + return err +}, "5.17") + +var haveEnum64 = internal.NewFeatureTest("ENUM64", func() error { + if err := haveBTF(); err != nil { + return err + } + + enum := &Enum{ + Size: 8, + Values: []EnumValue{ + {"TEST", math.MaxUint32 + 1}, + }, + } + + err := probeBTF(enum) + if errors.Is(err, unix.EINVAL) { + return internal.ErrNotSupported + } + return err +}, "6.0") + +func probeBTF(typ Type) error { + b, err := NewBuilder([]Type{typ}) + if err != nil { + return err + } + + buf, err := b.Marshal(nil, nil) + if err != nil { + return err + } + + fd, err := sys.BtfLoad(&sys.BtfLoadAttr{ + Btf: sys.SlicePointer(buf), + BtfSize: uint32(len(buf)), + }) + + if err == nil { + fd.Close() + } + + return err +} diff --git a/vendor/github.com/cilium/ebpf/btf/format.go b/vendor/github.com/cilium/ebpf/btf/format.go new file mode 100644 index 000000000..7deca334a --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/format.go @@ -0,0 +1,353 @@ +package btf + +import ( + "errors" + "fmt" + "strings" +) + +var errNestedTooDeep = errors.New("nested too deep") + +// GoFormatter converts a Type to Go syntax. +// +// A zero GoFormatter is valid to use. +type GoFormatter struct { + w strings.Builder + + // Types present in this map are referred to using the given name if they + // are encountered when outputting another type. + Names map[Type]string + + // Identifier is called for each field of struct-like types. By default the + // field name is used as is. + Identifier func(string) string + + // EnumIdentifier is called for each element of an enum. By default the + // name of the enum type is concatenated with Identifier(element). + EnumIdentifier func(name, element string) string +} + +// TypeDeclaration generates a Go type declaration for a BTF type. +func (gf *GoFormatter) TypeDeclaration(name string, typ Type) (string, error) { + gf.w.Reset() + if err := gf.writeTypeDecl(name, typ); err != nil { + return "", err + } + return gf.w.String(), nil +} + +func (gf *GoFormatter) identifier(s string) string { + if gf.Identifier != nil { + return gf.Identifier(s) + } + + return s +} + +func (gf *GoFormatter) enumIdentifier(name, element string) string { + if gf.EnumIdentifier != nil { + return gf.EnumIdentifier(name, element) + } + + return name + gf.identifier(element) +} + +// writeTypeDecl outputs a declaration of the given type. +// +// It encodes https://golang.org/ref/spec#Type_declarations: +// +// type foo struct { _ structs.HostLayout; bar uint32; } +// type bar int32 +func (gf *GoFormatter) writeTypeDecl(name string, typ Type) error { + if name == "" { + return fmt.Errorf("need a name for type %s", typ) + } + + typ = skipQualifiers(typ) + fmt.Fprintf(&gf.w, "type %s ", name) + if err := gf.writeTypeLit(typ, 0); err != nil { + return err + } + + e, ok := typ.(*Enum) + if !ok || len(e.Values) == 0 { + return nil + } + + gf.w.WriteString("; const ( ") + for _, ev := range e.Values { + id := gf.enumIdentifier(name, ev.Name) + var value any + if e.Signed { + value = int64(ev.Value) + } else { + value = ev.Value + } + fmt.Fprintf(&gf.w, "%s %s = %d; ", id, name, value) + } + gf.w.WriteString(")") + + return nil +} + +// writeType outputs the name of a named type or a literal describing the type. +// +// It encodes https://golang.org/ref/spec#Types. +// +// foo (if foo is a named type) +// uint32 +func (gf *GoFormatter) writeType(typ Type, depth int) error { + typ = skipQualifiers(typ) + + name := gf.Names[typ] + if name != "" { + gf.w.WriteString(name) + return nil + } + + return gf.writeTypeLit(typ, depth) +} + +// writeTypeLit outputs a literal describing the type. +// +// The function ignores named types. +// +// It encodes https://golang.org/ref/spec#TypeLit. +// +// struct { _ structs.HostLayout; bar uint32; } +// uint32 +func (gf *GoFormatter) writeTypeLit(typ Type, depth int) error { + depth++ + if depth > maxResolveDepth { + return errNestedTooDeep + } + + var err error + switch v := skipQualifiers(typ).(type) { + case *Int: + err = gf.writeIntLit(v) + + case *Enum: + if !v.Signed { + gf.w.WriteRune('u') + } + switch v.Size { + case 1: + gf.w.WriteString("int8") + case 2: + gf.w.WriteString("int16") + case 4: + gf.w.WriteString("int32") + case 8: + gf.w.WriteString("int64") + default: + err = fmt.Errorf("invalid enum size %d", v.Size) + } + + case *Typedef: + err = gf.writeType(v.Type, depth) + + case *Array: + fmt.Fprintf(&gf.w, "[%d]", v.Nelems) + err = gf.writeType(v.Type, depth) + + case *Struct: + err = gf.writeStructLit(v.Size, v.Members, depth) + + case *Union: + // Always choose the first member to represent the union in Go. + err = gf.writeStructLit(v.Size, v.Members[:1], depth) + + case *Datasec: + err = gf.writeDatasecLit(v, depth) + + case *Var: + err = gf.writeTypeLit(v.Type, depth) + + default: + return fmt.Errorf("type %T: %w", v, ErrNotSupported) + } + + if err != nil { + return fmt.Errorf("%s: %w", typ, err) + } + + return nil +} + +func (gf *GoFormatter) writeIntLit(i *Int) error { + bits := i.Size * 8 + switch i.Encoding { + case Bool: + if i.Size != 1 { + return fmt.Errorf("bool with size %d", i.Size) + } + gf.w.WriteString("bool") + case Char: + if i.Size != 1 { + return fmt.Errorf("char with size %d", i.Size) + } + // BTF doesn't have a way to specify the signedness of a char. Assume + // we are dealing with unsigned, since this works nicely with []byte + // in Go code. + fallthrough + case Unsigned, Signed: + stem := "uint" + if i.Encoding == Signed { + stem = "int" + } + if i.Size > 8 { + fmt.Fprintf(&gf.w, "[%d]byte /* %s%d */", i.Size, stem, i.Size*8) + } else { + fmt.Fprintf(&gf.w, "%s%d", stem, bits) + } + default: + return fmt.Errorf("can't encode %s", i.Encoding) + } + return nil +} + +func (gf *GoFormatter) writeStructLit(size uint32, members []Member, depth int) error { + gf.w.WriteString("struct { _ structs.HostLayout; ") + + prevOffset := uint32(0) + skippedBitfield := false + for i, m := range members { + if m.BitfieldSize > 0 { + skippedBitfield = true + continue + } + + offset := m.Offset.Bytes() + if n := offset - prevOffset; skippedBitfield && n > 0 { + fmt.Fprintf(&gf.w, "_ [%d]byte /* unsupported bitfield */; ", n) + } else { + gf.writePadding(n) + } + + fieldSize, err := Sizeof(m.Type) + if err != nil { + return fmt.Errorf("field %d: %w", i, err) + } + + prevOffset = offset + uint32(fieldSize) + if prevOffset > size { + return fmt.Errorf("field %d of size %d exceeds type size %d", i, fieldSize, size) + } + + if err := gf.writeStructField(m, depth); err != nil { + return fmt.Errorf("field %d: %w", i, err) + } + } + + gf.writePadding(size - prevOffset) + gf.w.WriteString("}") + return nil +} + +func (gf *GoFormatter) writeStructField(m Member, depth int) error { + if m.BitfieldSize > 0 { + return fmt.Errorf("bitfields are not supported") + } + if m.Offset%8 != 0 { + return fmt.Errorf("unsupported offset %d", m.Offset) + } + + if m.Name == "" { + // Special case a nested anonymous union like + // struct foo { union { int bar; int baz }; } + // by replacing the whole union with its first member. + union, ok := m.Type.(*Union) + if !ok { + return fmt.Errorf("anonymous fields are not supported") + + } + + if len(union.Members) == 0 { + return errors.New("empty anonymous union") + } + + depth++ + if depth > maxResolveDepth { + return errNestedTooDeep + } + + m := union.Members[0] + size, err := Sizeof(m.Type) + if err != nil { + return err + } + + if err := gf.writeStructField(m, depth); err != nil { + return err + } + + gf.writePadding(union.Size - uint32(size)) + return nil + + } + + fmt.Fprintf(&gf.w, "%s ", gf.identifier(m.Name)) + + if err := gf.writeType(m.Type, depth); err != nil { + return err + } + + gf.w.WriteString("; ") + return nil +} + +func (gf *GoFormatter) writeDatasecLit(ds *Datasec, depth int) error { + gf.w.WriteString("struct { _ structs.HostLayout; ") + + prevOffset := uint32(0) + for i, vsi := range ds.Vars { + v, ok := vsi.Type.(*Var) + if !ok { + return fmt.Errorf("can't format %s as part of data section", vsi.Type) + } + + if v.Linkage != GlobalVar { + // Ignore static, extern, etc. for now. + continue + } + + if v.Name == "" { + return fmt.Errorf("variable %d: empty name", i) + } + + gf.writePadding(vsi.Offset - prevOffset) + prevOffset = vsi.Offset + vsi.Size + + fmt.Fprintf(&gf.w, "%s ", gf.identifier(v.Name)) + + if err := gf.writeType(v.Type, depth); err != nil { + return fmt.Errorf("variable %d: %w", i, err) + } + + gf.w.WriteString("; ") + } + + gf.writePadding(ds.Size - prevOffset) + gf.w.WriteString("}") + return nil +} + +func (gf *GoFormatter) writePadding(bytes uint32) { + if bytes > 0 { + fmt.Fprintf(&gf.w, "_ [%d]byte; ", bytes) + } +} + +func skipQualifiers(typ Type) Type { + result := typ + for depth := 0; depth <= maxResolveDepth; depth++ { + switch v := (result).(type) { + case qualifier: + result = v.qualify() + default: + return result + } + } + return &cycle{typ} +} diff --git a/vendor/github.com/cilium/ebpf/btf/handle.go b/vendor/github.com/cilium/ebpf/btf/handle.go new file mode 100644 index 000000000..89e09a3b8 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/handle.go @@ -0,0 +1,332 @@ +package btf + +import ( + "errors" + "fmt" + "math" + "os" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/platform" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +// Handle is a reference to BTF loaded into the kernel. +type Handle struct { + fd *sys.FD + + // Size of the raw BTF in bytes. + size uint32 + + needsKernelBase bool +} + +// NewHandle loads the contents of a [Builder] into the kernel. +// +// Returns an error wrapping ErrNotSupported if the kernel doesn't support BTF. +func NewHandle(b *Builder) (*Handle, error) { + small := getByteSlice() + defer putByteSlice(small) + + buf, err := b.Marshal(*small, KernelMarshalOptions()) + if err != nil { + return nil, fmt.Errorf("marshal BTF: %w", err) + } + + return NewHandleFromRawBTF(buf) +} + +// NewHandleFromRawBTF loads raw BTF into the kernel. +// +// Returns an error wrapping ErrNotSupported if the kernel doesn't support BTF. +func NewHandleFromRawBTF(btf []byte) (*Handle, error) { + const minLogSize = 64 * 1024 + + if platform.IsWindows { + return nil, fmt.Errorf("btf: handle: %w", internal.ErrNotSupportedOnOS) + } + + if uint64(len(btf)) > math.MaxUint32 { + return nil, errors.New("BTF exceeds the maximum size") + } + + attr := &sys.BtfLoadAttr{ + Btf: sys.SlicePointer(btf), + BtfSize: uint32(len(btf)), + } + + var ( + logBuf []byte + err error + ) + for { + var fd *sys.FD + fd, err = sys.BtfLoad(attr) + if err == nil { + return &Handle{fd, attr.BtfSize, false}, nil + } + + if attr.BtfLogTrueSize != 0 && attr.BtfLogSize >= attr.BtfLogTrueSize { + // The log buffer already has the correct size. + break + } + + if attr.BtfLogSize != 0 && !errors.Is(err, unix.ENOSPC) { + // Up until at least kernel 6.0, the BTF verifier does not return ENOSPC + // if there are other verification errors. ENOSPC is only returned when + // the BTF blob is correct, a log was requested, and the provided buffer + // is too small. We're therefore not sure whether we got the full + // log or not. + break + } + + // Make an educated guess how large the buffer should be. Start + // at a reasonable minimum and then double the size. + logSize := uint32(max(len(logBuf)*2, minLogSize)) + if int(logSize) < len(logBuf) { + return nil, errors.New("overflow while probing log buffer size") + } + + if attr.BtfLogTrueSize != 0 { + // The kernel has given us a hint how large the log buffer has to be. + logSize = attr.BtfLogTrueSize + } + + logBuf = make([]byte, logSize) + attr.BtfLogSize = logSize + attr.BtfLogBuf = sys.SlicePointer(logBuf) + attr.BtfLogLevel = 1 + } + + if err := haveBTF(); err != nil { + return nil, err + } + + return nil, internal.ErrorWithLog("load btf", err, logBuf) +} + +// NewHandleFromID returns the BTF handle for a given id. +// +// Prefer calling [ebpf.Program.Handle] or [ebpf.Map.Handle] if possible. +// +// Returns ErrNotExist, if there is no BTF with the given id. +// +// Requires CAP_SYS_ADMIN. +func NewHandleFromID(id ID) (*Handle, error) { + if platform.IsWindows { + return nil, fmt.Errorf("btf: handle: %w", internal.ErrNotSupportedOnOS) + } + + fd, err := sys.BtfGetFdById(&sys.BtfGetFdByIdAttr{ + Id: uint32(id), + }) + if err != nil { + return nil, fmt.Errorf("get FD for ID %d: %w", id, err) + } + + info, err := newHandleInfoFromFD(fd) + if err != nil { + _ = fd.Close() + return nil, err + } + + return &Handle{fd, info.size, info.IsModule()}, nil +} + +// Spec parses the kernel BTF into Go types. +// +// base must contain type information for vmlinux if the handle is for +// a kernel module. It may be nil otherwise. +func (h *Handle) Spec(base *Spec) (*Spec, error) { + var btfInfo sys.BtfInfo + btfBuffer := make([]byte, h.size) + btfInfo.Btf = sys.SlicePointer(btfBuffer) + btfInfo.BtfSize = uint32(len(btfBuffer)) + + if err := sys.ObjInfo(h.fd, &btfInfo); err != nil { + return nil, err + } + + if h.needsKernelBase && base == nil { + return nil, fmt.Errorf("missing base types") + } + + return loadRawSpec(btfBuffer, base) +} + +// Close destroys the handle. +// +// Subsequent calls to FD will return an invalid value. +func (h *Handle) Close() error { + if h == nil { + return nil + } + + return h.fd.Close() +} + +// FD returns the file descriptor for the handle. +func (h *Handle) FD() int { + return h.fd.Int() +} + +// Info returns metadata about the handle. +func (h *Handle) Info() (*HandleInfo, error) { + return newHandleInfoFromFD(h.fd) +} + +// HandleInfo describes a Handle. +type HandleInfo struct { + // ID of this handle in the kernel. The ID is only valid as long as the + // associated handle is kept alive. + ID ID + + // Name is an identifying name for the BTF, currently only used by the + // kernel. + Name string + + // IsKernel is true if the BTF originated with the kernel and not + // userspace. + IsKernel bool + + // Size of the raw BTF in bytes. + size uint32 +} + +func newHandleInfoFromFD(fd *sys.FD) (*HandleInfo, error) { + // We invoke the syscall once with a empty BTF and name buffers to get size + // information to allocate buffers. Then we invoke it a second time with + // buffers to receive the data. + var btfInfo sys.BtfInfo + if err := sys.ObjInfo(fd, &btfInfo); err != nil { + return nil, fmt.Errorf("get BTF info for fd %s: %w", fd, err) + } + + if btfInfo.NameLen > 0 { + // NameLen doesn't account for the terminating NUL. + btfInfo.NameLen++ + } + + // Don't pull raw BTF by default, since it may be quite large. + btfSize := btfInfo.BtfSize + btfInfo.BtfSize = 0 + + nameBuffer := make([]byte, btfInfo.NameLen) + btfInfo.Name = sys.SlicePointer(nameBuffer) + btfInfo.NameLen = uint32(len(nameBuffer)) + if err := sys.ObjInfo(fd, &btfInfo); err != nil { + return nil, err + } + + return &HandleInfo{ + ID: ID(btfInfo.Id), + Name: unix.ByteSliceToString(nameBuffer), + IsKernel: btfInfo.KernelBtf != 0, + size: btfSize, + }, nil +} + +// IsVmlinux returns true if the BTF is for the kernel itself. +func (i *HandleInfo) IsVmlinux() bool { + return i.IsKernel && i.Name == "vmlinux" +} + +// IsModule returns true if the BTF is for a kernel module. +func (i *HandleInfo) IsModule() bool { + return i.IsKernel && i.Name != "vmlinux" +} + +// HandleIterator allows enumerating BTF blobs loaded into the kernel. +type HandleIterator struct { + // The ID of the current handle. Only valid after a call to Next. + ID ID + // The current Handle. Only valid until a call to Next. + // See Take if you want to retain the handle. + Handle *Handle + err error +} + +// Next retrieves a handle for the next BTF object. +// +// Returns true if another BTF object was found. Call [HandleIterator.Err] after +// the function returns false. +func (it *HandleIterator) Next() bool { + if platform.IsWindows { + it.err = fmt.Errorf("btf: %w", internal.ErrNotSupportedOnOS) + return false + } + + id := it.ID + for { + attr := &sys.BtfGetNextIdAttr{Id: id} + err := sys.BtfGetNextId(attr) + if errors.Is(err, os.ErrNotExist) { + // There are no more BTF objects. + break + } else if err != nil { + it.err = fmt.Errorf("get next BTF ID: %w", err) + break + } + + id = attr.NextId + handle, err := NewHandleFromID(id) + if errors.Is(err, os.ErrNotExist) { + // Try again with the next ID. + continue + } else if err != nil { + it.err = fmt.Errorf("retrieve handle for ID %d: %w", id, err) + break + } + + it.Handle.Close() + it.ID, it.Handle = id, handle + return true + } + + // No more handles or we encountered an error. + it.Handle.Close() + it.Handle = nil + return false +} + +// Take the ownership of the current handle. +// +// It's the callers responsibility to close the handle. +func (it *HandleIterator) Take() *Handle { + handle := it.Handle + it.Handle = nil + return handle +} + +// Err returns an error if iteration failed for some reason. +func (it *HandleIterator) Err() error { + return it.err +} + +// FindHandle returns the first handle for which predicate returns true. +// +// Requires CAP_SYS_ADMIN. +// +// Returns an error wrapping ErrNotFound if predicate never returns true or if +// there is no BTF loaded into the kernel. +func FindHandle(predicate func(info *HandleInfo) bool) (*Handle, error) { + it := new(HandleIterator) + defer it.Handle.Close() + + for it.Next() { + info, err := it.Handle.Info() + if err != nil { + return nil, fmt.Errorf("info for ID %d: %w", it.ID, err) + } + + if predicate(info) { + return it.Take(), nil + } + } + if err := it.Err(); err != nil { + return nil, fmt.Errorf("iterate handles: %w", err) + } + + return nil, fmt.Errorf("find handle: %w", ErrNotFound) +} diff --git a/vendor/github.com/cilium/ebpf/btf/kernel.go b/vendor/github.com/cilium/ebpf/btf/kernel.go new file mode 100644 index 000000000..bb7368bfc --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/kernel.go @@ -0,0 +1,333 @@ +package btf + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "runtime" + "slices" + "sort" + "sync" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/linux" + "github.com/cilium/ebpf/internal/platform" + "github.com/cilium/ebpf/internal/unix" +) + +// globalCache amortises decoding BTF across all users of the library. +var globalCache = struct { + sync.RWMutex + kernel *Spec + modules map[string]*Spec +}{ + modules: make(map[string]*Spec), +} + +// FlushKernelSpec removes any cached kernel type information. +func FlushKernelSpec() { + globalCache.Lock() + defer globalCache.Unlock() + + globalCache.kernel = nil + globalCache.modules = make(map[string]*Spec) +} + +// LoadKernelSpec returns the current kernel's BTF information. +// +// Defaults to /sys/kernel/btf/vmlinux and falls back to scanning the file system +// for vmlinux ELFs. Returns an error wrapping ErrNotSupported if BTF is not enabled. +// +// Consider using [Cache] instead. +func LoadKernelSpec() (*Spec, error) { + spec, err := loadCachedKernelSpec() + return spec.Copy(), err +} + +// load (and cache) the kernel spec. +// +// Does not copy Spec. +func loadCachedKernelSpec() (*Spec, error) { + globalCache.RLock() + spec := globalCache.kernel + globalCache.RUnlock() + + if spec != nil { + return spec, nil + } + + globalCache.Lock() + defer globalCache.Unlock() + + // check again, to prevent race between multiple callers + if globalCache.kernel != nil { + return globalCache.kernel, nil + } + + spec, err := loadKernelSpec() + if err != nil { + return nil, err + } + + globalCache.kernel = spec + return spec, nil +} + +// LoadKernelModuleSpec returns the BTF information for the named kernel module. +// +// Using [Cache.Module] is faster when loading BTF for more than one module. +// +// Defaults to /sys/kernel/btf/. +// Returns an error wrapping ErrNotSupported if BTF is not enabled. +// Returns an error wrapping fs.ErrNotExist if BTF for the specific module doesn't exist. +func LoadKernelModuleSpec(module string) (*Spec, error) { + spec, err := loadCachedKernelModuleSpec(module) + return spec.Copy(), err +} + +// load (and cache) a module spec. +// +// Does not copy Spec. +func loadCachedKernelModuleSpec(module string) (*Spec, error) { + globalCache.RLock() + spec := globalCache.modules[module] + globalCache.RUnlock() + + if spec != nil { + return spec, nil + } + + base, err := loadCachedKernelSpec() + if err != nil { + return nil, err + } + + // NB: This only allows a single module to be parsed at a time. Not sure + // it makes a difference. + globalCache.Lock() + defer globalCache.Unlock() + + // check again, to prevent race between multiple callers + if spec := globalCache.modules[module]; spec != nil { + return spec, nil + } + + spec, err = loadKernelModuleSpec(module, base) + if err != nil { + return nil, err + } + + globalCache.modules[module] = spec + return spec, nil +} + +func loadKernelSpec() (*Spec, error) { + if platform.IsWindows { + return nil, internal.ErrNotSupportedOnOS + } + + fh, err := os.Open("/sys/kernel/btf/vmlinux") + if err == nil { + defer fh.Close() + + info, err := fh.Stat() + if err != nil { + return nil, fmt.Errorf("stat vmlinux: %w", err) + } + + // NB: It's not safe to mmap arbitrary files because mmap(2) doesn't + // guarantee that changes made after mmap are not visible in the mapping. + // + // This is not a problem for vmlinux, since it is always a read-only file. + raw, err := unix.Mmap(int(fh.Fd()), 0, int(info.Size()), unix.PROT_READ, unix.MAP_PRIVATE) + if err != nil { + return LoadSplitSpecFromReader(fh, nil) + } + + spec, err := loadRawSpec(raw, nil) + if err != nil { + _ = unix.Munmap(raw) + return nil, fmt.Errorf("load vmlinux: %w", err) + } + + runtime.AddCleanup(spec.decoder.sharedBuf, func(b []byte) { + _ = unix.Munmap(b) + }, raw) + + return spec, nil + } + + file, err := findVMLinux() + if err != nil { + return nil, err + } + defer file.Close() + + spec, err := LoadSpecFromReader(file) + return spec, err +} + +func loadKernelModuleSpec(module string, base *Spec) (*Spec, error) { + if platform.IsWindows { + return nil, internal.ErrNotSupportedOnOS + } + + dir, file := filepath.Split(module) + if dir != "" || filepath.Ext(file) != "" { + return nil, fmt.Errorf("invalid module name %q", module) + } + + fh, err := os.Open(filepath.Join("/sys/kernel/btf", module)) + if err != nil { + return nil, err + } + defer fh.Close() + + return LoadSplitSpecFromReader(fh, base) +} + +// findVMLinux scans multiple well-known paths for vmlinux kernel images. +func findVMLinux() (*os.File, error) { + if platform.IsWindows { + return nil, fmt.Errorf("find vmlinux: %w", internal.ErrNotSupportedOnOS) + } + + release, err := linux.KernelRelease() + if err != nil { + return nil, err + } + + // use same list of locations as libbpf + // https://github.com/libbpf/libbpf/blob/9a3a42608dbe3731256a5682a125ac1e23bced8f/src/btf.c#L3114-L3122 + locations := []string{ + "/boot/vmlinux-%s", + "/lib/modules/%s/vmlinux-%[1]s", + "/lib/modules/%s/build/vmlinux", + "/usr/lib/modules/%s/kernel/vmlinux", + "/usr/lib/debug/boot/vmlinux-%s", + "/usr/lib/debug/boot/vmlinux-%s.debug", + "/usr/lib/debug/lib/modules/%s/vmlinux", + } + + for _, loc := range locations { + file, err := os.Open(fmt.Sprintf(loc, release)) + if errors.Is(err, os.ErrNotExist) { + continue + } + return file, err + } + + return nil, fmt.Errorf("no BTF found for kernel version %s: %w", release, internal.ErrNotSupported) +} + +// Cache allows to amortise the cost of decoding BTF across multiple call-sites. +// +// It is not safe for concurrent use. +type Cache struct { + kernelTypes *Spec + moduleTypes map[string]*Spec + loadedModules []string +} + +// NewCache creates a new Cache. +// +// Opportunistically reuses a global cache if possible. +func NewCache() *Cache { + globalCache.RLock() + defer globalCache.RUnlock() + + // This copy is either a no-op or very cheap, since the spec won't contain + // any inflated types. + kernel := globalCache.kernel.Copy() + if kernel == nil { + return &Cache{} + } + + modules := make(map[string]*Spec, len(globalCache.modules)) + for name, spec := range globalCache.modules { + decoder, _ := rebaseDecoder(spec.decoder, kernel.decoder) + // NB: Kernel module BTF can't contain ELF fixups because it is always + // read from sysfs. + modules[name] = &Spec{decoder: decoder} + } + + if len(modules) == 0 { + return &Cache{kernel, nil, nil} + } + + return &Cache{kernel, modules, nil} +} + +// Kernel is equivalent to [LoadKernelSpec], except that repeated calls do +// not copy the Spec. +func (c *Cache) Kernel() (*Spec, error) { + if c.kernelTypes != nil { + return c.kernelTypes, nil + } + + var err error + c.kernelTypes, err = LoadKernelSpec() + return c.kernelTypes, err +} + +// Module is equivalent to [LoadKernelModuleSpec], except that repeated calls do +// not copy the spec. +// +// All modules also share the return value of [Kernel] as their base. +func (c *Cache) Module(name string) (*Spec, error) { + if spec := c.moduleTypes[name]; spec != nil { + return spec, nil + } + + if c.moduleTypes == nil { + c.moduleTypes = make(map[string]*Spec) + } + + base, err := c.Kernel() + if err != nil { + return nil, err + } + + spec, err := loadCachedKernelModuleSpec(name) + if err != nil { + return nil, err + } + + // Important: base is shared between modules. This allows inflating common + // types only once. + decoder, err := rebaseDecoder(spec.decoder, base.decoder) + if err != nil { + return nil, err + } + + spec = &Spec{decoder: decoder} + c.moduleTypes[name] = spec + return spec, err +} + +// Modules returns a sorted list of all loaded modules. +func (c *Cache) Modules() ([]string, error) { + if c.loadedModules != nil { + return c.loadedModules, nil + } + + btfDir, err := os.Open("/sys/kernel/btf") + if err != nil { + return nil, err + } + defer btfDir.Close() + + entries, err := btfDir.Readdirnames(-1) + if err != nil { + return nil, err + } + + entries = slices.DeleteFunc(entries, func(s string) bool { + return s == "vmlinux" + }) + + sort.Strings(entries) + c.loadedModules = entries + return entries, nil +} diff --git a/vendor/github.com/cilium/ebpf/btf/marshal.go b/vendor/github.com/cilium/ebpf/btf/marshal.go new file mode 100644 index 000000000..308ce8d34 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/marshal.go @@ -0,0 +1,696 @@ +package btf + +import ( + "encoding/binary" + "errors" + "fmt" + "maps" + "math" + "slices" + "sync" + "unsafe" + + "github.com/cilium/ebpf/internal" +) + +type MarshalOptions struct { + // Target byte order. Defaults to the system's native endianness. + Order binary.ByteOrder + // Remove function linkage information for compatibility with <5.6 kernels. + StripFuncLinkage bool + // Replace decl tags with a placeholder for compatibility with <5.16 kernels. + ReplaceDeclTags bool + // Replace TypeTags with a placeholder for compatibility with <5.17 kernels. + ReplaceTypeTags bool + // Replace Enum64 with a placeholder for compatibility with <6.0 kernels. + ReplaceEnum64 bool + // Prevent the "No type found" error when loading BTF without any types. + PreventNoTypeFound bool +} + +// KernelMarshalOptions will generate BTF suitable for the current kernel. +func KernelMarshalOptions() *MarshalOptions { + return &MarshalOptions{ + Order: internal.NativeEndian, + StripFuncLinkage: haveFuncLinkage() != nil, + ReplaceDeclTags: haveDeclTags() != nil, + ReplaceTypeTags: haveTypeTags() != nil, + ReplaceEnum64: haveEnum64() != nil, + PreventNoTypeFound: true, // All current kernels require this. + } +} + +// encoder turns Types into raw BTF. +type encoder struct { + MarshalOptions + + pending internal.Deque[Type] + strings *stringTableBuilder + ids map[Type]TypeID + visited map[Type]struct{} + lastID TypeID +} + +var bufferPool = sync.Pool{ + New: func() any { + buf := make([]byte, btfHeaderLen+128) + return &buf + }, +} + +func getByteSlice() *[]byte { + return bufferPool.Get().(*[]byte) +} + +func putByteSlice(buf *[]byte) { + *buf = (*buf)[:0] + bufferPool.Put(buf) +} + +// Builder turns Types into raw BTF. +// +// The default value may be used and represents an empty BTF blob. Void is +// added implicitly if necessary. +type Builder struct { + // Explicitly added types. + types []Type + // IDs for all added types which the user knows about. + stableIDs map[Type]TypeID + // Explicitly added strings. + strings *stringTableBuilder +} + +// NewBuilder creates a Builder from a list of types. +// +// It is more efficient than calling [Add] individually. +// +// Returns an error if adding any of the types fails. +func NewBuilder(types []Type) (*Builder, error) { + b := &Builder{ + make([]Type, 0, len(types)), + make(map[Type]TypeID, len(types)), + nil, + } + + for _, typ := range types { + _, err := b.Add(typ) + if err != nil { + return nil, fmt.Errorf("add %s: %w", typ, err) + } + } + + return b, nil +} + +// Empty returns true if neither types nor strings have been added. +func (b *Builder) Empty() bool { + return len(b.types) == 0 && (b.strings == nil || b.strings.Length() == 0) +} + +// Add a Type and allocate a stable ID for it. +// +// Adding the identical Type multiple times is valid and will return the same ID. +// +// See [Type] for details on identity. +func (b *Builder) Add(typ Type) (TypeID, error) { + if b.stableIDs == nil { + b.stableIDs = make(map[Type]TypeID) + } + + if _, ok := typ.(*Void); ok { + // Equality is weird for void, since it is a zero sized type. + return 0, nil + } + + if ds, ok := typ.(*Datasec); ok { + if err := datasecResolveWorkaround(b, ds); err != nil { + return 0, err + } + } + + id, ok := b.stableIDs[typ] + if ok { + return id, nil + } + + b.types = append(b.types, typ) + + id = TypeID(len(b.types)) + if int(id) != len(b.types) { + return 0, fmt.Errorf("no more type IDs") + } + + b.stableIDs[typ] = id + return id, nil +} + +// Marshal encodes all types in the Marshaler into BTF wire format. +// +// opts may be nil. +func (b *Builder) Marshal(buf []byte, opts *MarshalOptions) ([]byte, error) { + stb := b.strings + if stb == nil { + // Assume that most types are named. This makes encoding large BTF like + // vmlinux a lot cheaper. + stb = newStringTableBuilder(len(b.types)) + } else { + // Avoid modifying the Builder's string table. + stb = b.strings.Copy() + } + + if opts == nil { + opts = &MarshalOptions{Order: internal.NativeEndian} + } + + // Reserve space for the BTF header. + buf = slices.Grow(buf, btfHeaderLen)[:btfHeaderLen] + + e := encoder{ + MarshalOptions: *opts, + strings: stb, + lastID: TypeID(len(b.types)), + visited: make(map[Type]struct{}, len(b.types)), + ids: maps.Clone(b.stableIDs), + } + + if e.ids == nil { + e.ids = make(map[Type]TypeID) + } + + types := b.types + if len(types) == 0 && stb.Length() > 0 && opts.PreventNoTypeFound { + // We have strings that need to be written out, + // but no types (besides the implicit Void). + // Kernels as recent as v6.7 refuse to load such BTF + // with a "No type found" error in the log. + // Fix this by adding a dummy type. + types = []Type{&Int{Size: 0}} + } + + // Ensure that types are marshaled in the exact order they were Add()ed. + // Otherwise the ID returned from Add() won't match. + e.pending.Grow(len(types)) + for _, typ := range types { + e.pending.Push(typ) + } + + buf, err := e.deflatePending(buf) + if err != nil { + return nil, err + } + + length := len(buf) + typeLen := uint32(length - btfHeaderLen) + + stringLen := e.strings.Length() + buf = e.strings.AppendEncoded(buf) + + // Fill out the header, and write it out. + header := &btfHeader{ + Magic: btfMagic, + Version: 1, + Flags: 0, + HdrLen: uint32(btfHeaderLen), + TypeOff: 0, + TypeLen: typeLen, + StringOff: typeLen, + StringLen: uint32(stringLen), + } + + _, err = binary.Encode(buf[:btfHeaderLen], e.Order, header) + if err != nil { + return nil, fmt.Errorf("write header: %v", err) + } + + return buf, nil +} + +// addString adds a string to the resulting BTF. +// +// Adding the same string multiple times will return the same result. +// +// Returns an identifier into the string table or an error if the string +// contains invalid characters. +func (b *Builder) addString(str string) (uint32, error) { + if b.strings == nil { + b.strings = newStringTableBuilder(0) + } + + return b.strings.Add(str) +} + +func (e *encoder) allocateIDs(root Type) error { + for typ := range postorder(root, e.visited) { + if _, ok := typ.(*Void); ok { + continue + } + + if _, ok := e.ids[typ]; ok { + continue + } + + id := e.lastID + 1 + if id < e.lastID { + return errors.New("type ID overflow") + } + + e.pending.Push(typ) + e.ids[typ] = id + e.lastID = id + } + + return nil +} + +// id returns the ID for the given type or panics with an error. +func (e *encoder) id(typ Type) TypeID { + if _, ok := typ.(*Void); ok { + return 0 + } + + id, ok := e.ids[typ] + if !ok { + panic(fmt.Errorf("no ID for type %v", typ)) + } + + return id +} + +func (e *encoder) deflatePending(buf []byte) ([]byte, error) { + // Declare root outside of the loop to avoid repeated heap allocations. + var root Type + + for !e.pending.Empty() { + root = e.pending.Shift() + + // Allocate IDs for all children of typ, including transitive dependencies. + err := e.allocateIDs(root) + if err != nil { + return nil, err + } + + buf, err = e.deflateType(buf, root) + if err != nil { + id := e.ids[root] + return nil, fmt.Errorf("deflate %v with ID %d: %w", root, id, err) + } + } + + return buf, nil +} + +func (e *encoder) deflateType(buf []byte, typ Type) (_ []byte, err error) { + defer func() { + if r := recover(); r != nil { + var ok bool + err, ok = r.(error) + if !ok { + panic(r) + } + } + }() + + var raw btfType + raw.NameOff, err = e.strings.Add(typ.TypeName()) + if err != nil { + return nil, err + } + + // Reserve space for the btfType header. + start := len(buf) + buf = append(buf, make([]byte, unsafe.Sizeof(raw))...) + + switch v := typ.(type) { + case *Void: + return nil, errors.New("Void is implicit in BTF wire format") + + case *Int: + buf, err = e.deflateInt(buf, &raw, v) + + case *Pointer: + raw.SetKind(kindPointer) + raw.SetType(e.id(v.Target)) + + case *Array: + raw.SetKind(kindArray) + buf, err = binary.Append(buf, e.Order, &btfArray{ + e.id(v.Type), + e.id(v.Index), + v.Nelems, + }) + + case *Struct: + raw.SetKind(kindStruct) + raw.SetSize(v.Size) + buf, err = e.deflateMembers(buf, &raw, v.Members) + + case *Union: + buf, err = e.deflateUnion(buf, &raw, v) + + case *Enum: + if v.Size == 8 { + buf, err = e.deflateEnum64(buf, &raw, v) + } else { + buf, err = e.deflateEnum(buf, &raw, v) + } + + case *Fwd: + raw.SetKind(kindForward) + raw.SetFwdKind(v.Kind) + + case *Typedef: + raw.SetKind(kindTypedef) + raw.SetType(e.id(v.Type)) + + case *Volatile: + raw.SetKind(kindVolatile) + raw.SetType(e.id(v.Type)) + + case *Const: + e.deflateConst(&raw, v) + + case *Restrict: + raw.SetKind(kindRestrict) + raw.SetType(e.id(v.Type)) + + case *Func: + raw.SetKind(kindFunc) + raw.SetType(e.id(v.Type)) + if !e.StripFuncLinkage { + raw.SetLinkage(v.Linkage) + } + + case *FuncProto: + raw.SetKind(kindFuncProto) + raw.SetType(e.id(v.Return)) + raw.SetVlen(len(v.Params)) + buf, err = e.deflateFuncParams(buf, v.Params) + + case *Var: + raw.SetKind(kindVar) + raw.SetType(e.id(v.Type)) + buf, err = binary.Append(buf, e.Order, btfVariable{uint32(v.Linkage)}) + + case *Datasec: + raw.SetKind(kindDatasec) + raw.SetSize(v.Size) + raw.SetVlen(len(v.Vars)) + buf, err = e.deflateVarSecinfos(buf, v.Vars) + + case *Float: + raw.SetKind(kindFloat) + raw.SetSize(v.Size) + + case *declTag: + buf, err = e.deflateDeclTag(buf, &raw, v) + + case *TypeTag: + err = e.deflateTypeTag(&raw, v) + + default: + return nil, fmt.Errorf("don't know how to deflate %T", v) + } + + if err != nil { + return nil, err + } + + header := buf[start : start+int(unsafe.Sizeof(raw))] + if _, err = raw.Encode(header, e.Order); err != nil { + return nil, err + } + + return buf, nil +} + +func (e *encoder) deflateInt(buf []byte, raw *btfType, i *Int) ([]byte, error) { + raw.SetKind(kindInt) + raw.SetSize(i.Size) + + var bi btfInt + bi.SetEncoding(i.Encoding) + // We need to set bits in addition to size, since btf_type_int_is_regular + // otherwise flags this as a bitfield. + bi.SetBits(byte(i.Size) * 8) + return binary.Append(buf, e.Order, bi) +} + +func (e *encoder) deflateDeclTag(buf []byte, raw *btfType, tag *declTag) ([]byte, error) { + // Replace a decl tag with an integer for compatibility with <5.16 kernels, + // following libbpf behaviour. + if e.ReplaceDeclTags { + typ := &Int{"decl_tag_placeholder", 1, Unsigned} + buf, err := e.deflateInt(buf, raw, typ) + if err != nil { + return nil, err + } + + // Add the placeholder type name to the string table. The encoder added the + // original type name before this call. + raw.NameOff, err = e.strings.Add(typ.TypeName()) + return buf, err + } + + var err error + raw.SetKind(kindDeclTag) + raw.SetType(e.id(tag.Type)) + raw.NameOff, err = e.strings.Add(tag.Value) + if err != nil { + return nil, err + } + + return binary.Append(buf, e.Order, btfDeclTag{uint32(tag.Index)}) +} + +func (e *encoder) deflateConst(raw *btfType, c *Const) { + raw.SetKind(kindConst) + raw.SetType(e.id(c.Type)) +} + +func (e *encoder) deflateTypeTag(raw *btfType, tag *TypeTag) (err error) { + // Replace a type tag with a const qualifier for compatibility with <5.17 + // kernels, following libbpf behaviour. + if e.ReplaceTypeTags { + e.deflateConst(raw, &Const{tag.Type}) + return nil + } + + raw.SetKind(kindTypeTag) + raw.SetType(e.id(tag.Type)) + raw.NameOff, err = e.strings.Add(tag.Value) + return +} + +func (e *encoder) deflateUnion(buf []byte, raw *btfType, union *Union) ([]byte, error) { + raw.SetKind(kindUnion) + raw.SetSize(union.Size) + return e.deflateMembers(buf, raw, union.Members) +} + +func (e *encoder) deflateMembers(buf []byte, header *btfType, members []Member) ([]byte, error) { + var bm btfMember + isBitfield := false + + buf = slices.Grow(buf, len(members)*int(unsafe.Sizeof(bm))) + for _, member := range members { + isBitfield = isBitfield || member.BitfieldSize > 0 + + offset := member.Offset + if isBitfield { + offset = member.BitfieldSize<<24 | (member.Offset & 0xffffff) + } + + nameOff, err := e.strings.Add(member.Name) + if err != nil { + return nil, err + } + + bm = btfMember{ + nameOff, + e.id(member.Type), + uint32(offset), + } + + buf, err = binary.Append(buf, e.Order, &bm) + if err != nil { + return nil, err + } + } + + header.SetVlen(len(members)) + header.SetBitfield(isBitfield) + return buf, nil +} + +func (e *encoder) deflateEnum(buf []byte, raw *btfType, enum *Enum) ([]byte, error) { + raw.SetKind(kindEnum) + raw.SetSize(enum.Size) + raw.SetVlen(len(enum.Values)) + // Signedness appeared together with ENUM64 support. + raw.SetSigned(enum.Signed && !e.ReplaceEnum64) + return e.deflateEnumValues(buf, enum) +} + +func (e *encoder) deflateEnumValues(buf []byte, enum *Enum) ([]byte, error) { + var be btfEnum + buf = slices.Grow(buf, len(enum.Values)*int(unsafe.Sizeof(be))) + for _, value := range enum.Values { + nameOff, err := e.strings.Add(value.Name) + if err != nil { + return nil, err + } + + if enum.Signed { + if signedValue := int64(value.Value); signedValue < math.MinInt32 || signedValue > math.MaxInt32 { + return nil, fmt.Errorf("value %d of enum %q exceeds 32 bits", signedValue, value.Name) + } + } else { + if value.Value > math.MaxUint32 { + return nil, fmt.Errorf("value %d of enum %q exceeds 32 bits", value.Value, value.Name) + } + } + + be = btfEnum{ + nameOff, + uint32(value.Value), + } + + buf, err = binary.Append(buf, e.Order, &be) + if err != nil { + return nil, err + } + } + + return buf, nil +} + +func (e *encoder) deflateEnum64(buf []byte, raw *btfType, enum *Enum) ([]byte, error) { + if e.ReplaceEnum64 { + // Replace the ENUM64 with a union of fields with the correct size. + // This matches libbpf behaviour on purpose. + placeholder := &Int{ + "enum64_placeholder", + enum.Size, + Unsigned, + } + if enum.Signed { + placeholder.Encoding = Signed + } + if err := e.allocateIDs(placeholder); err != nil { + return nil, fmt.Errorf("add enum64 placeholder: %w", err) + } + + members := make([]Member, 0, len(enum.Values)) + for _, v := range enum.Values { + members = append(members, Member{ + Name: v.Name, + Type: placeholder, + }) + } + + return e.deflateUnion(buf, raw, &Union{enum.Name, enum.Size, members, nil}) + } + + raw.SetKind(kindEnum64) + raw.SetSize(enum.Size) + raw.SetVlen(len(enum.Values)) + raw.SetSigned(enum.Signed) + return e.deflateEnum64Values(buf, enum.Values) +} + +func (e *encoder) deflateEnum64Values(buf []byte, values []EnumValue) ([]byte, error) { + var be btfEnum64 + buf = slices.Grow(buf, len(values)*int(unsafe.Sizeof(be))) + for _, value := range values { + nameOff, err := e.strings.Add(value.Name) + if err != nil { + return nil, err + } + + be = btfEnum64{ + nameOff, + uint32(value.Value), + uint32(value.Value >> 32), + } + + buf, err = binary.Append(buf, e.Order, &be) + if err != nil { + return nil, err + } + } + + return buf, nil +} + +func (e *encoder) deflateFuncParams(buf []byte, params []FuncParam) ([]byte, error) { + var bp btfParam + buf = slices.Grow(buf, len(params)*int(unsafe.Sizeof(bp))) + for _, param := range params { + nameOff, err := e.strings.Add(param.Name) + if err != nil { + return nil, err + } + + bp = btfParam{ + nameOff, + e.id(param.Type), + } + + buf, err = binary.Append(buf, e.Order, &bp) + if err != nil { + return nil, err + } + } + return buf, nil +} + +func (e *encoder) deflateVarSecinfos(buf []byte, vars []VarSecinfo) ([]byte, error) { + var vsi btfVarSecinfo + var err error + buf = slices.Grow(buf, len(vars)*int(unsafe.Sizeof(vsi))) + for _, v := range vars { + vsi = btfVarSecinfo{ + e.id(v.Type), + v.Offset, + v.Size, + } + + buf, err = binary.Append(buf, e.Order, vsi) + if err != nil { + return nil, err + } + } + return buf, nil +} + +// MarshalMapKV creates a BTF object containing a map key and value. +// +// The function is intended for the use of the ebpf package and may be removed +// at any point in time. +func MarshalMapKV(key, value Type) (_ *Handle, keyID, valueID TypeID, err error) { + var b Builder + + if key != nil { + keyID, err = b.Add(key) + if err != nil { + return nil, 0, 0, fmt.Errorf("add key type: %w", err) + } + } + + if value != nil { + valueID, err = b.Add(value) + if err != nil { + return nil, 0, 0, fmt.Errorf("add value type: %w", err) + } + } + + handle, err := NewHandle(&b) + if err != nil { + // Check for 'full' map BTF support, since kernels between 4.18 and 5.2 + // already support BTF blobs for maps without Var or Datasec just fine. + if err := haveMapBTF(); err != nil { + return nil, 0, 0, err + } + } + return handle, keyID, valueID, err +} diff --git a/vendor/github.com/cilium/ebpf/btf/strings.go b/vendor/github.com/cilium/ebpf/btf/strings.go new file mode 100644 index 000000000..482f93bef --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/strings.go @@ -0,0 +1,208 @@ +package btf + +import ( + "bytes" + "errors" + "fmt" + "io" + "maps" + "strings" + "sync" +) + +// stringTable contains a sequence of null-terminated strings. +// +// It is safe for concurrent use. +type stringTable struct { + base *stringTable + bytes []byte + + mu sync.Mutex + cache map[uint32]string +} + +// sizedReader is implemented by bytes.Reader, io.SectionReader, strings.Reader, etc. +type sizedReader interface { + io.Reader + Size() int64 +} + +func readStringTable(r sizedReader, base *stringTable) (*stringTable, error) { + bytes := make([]byte, r.Size()) + if _, err := io.ReadFull(r, bytes); err != nil { + return nil, err + } + + return newStringTable(bytes, base) +} + +func newStringTable(bytes []byte, base *stringTable) (*stringTable, error) { + // When parsing split BTF's string table, the first entry offset is derived + // from the last entry offset of the base BTF. + firstStringOffset := uint32(0) + if base != nil { + firstStringOffset = uint32(len(base.bytes)) + } + + if len(bytes) > 0 { + if bytes[len(bytes)-1] != 0 { + return nil, errors.New("string table isn't null terminated") + } + + if firstStringOffset == 0 && bytes[0] != 0 { + return nil, errors.New("first item in string table is non-empty") + } + } + + return &stringTable{base: base, bytes: bytes}, nil +} + +func (st *stringTable) Lookup(offset uint32) (string, error) { + // Fast path: zero offset is the empty string, looked up frequently. + if offset == 0 { + return "", nil + } + + b, err := st.lookupSlow(offset) + return string(b), err +} + +func (st *stringTable) LookupBytes(offset uint32) ([]byte, error) { + // Fast path: zero offset is the empty string, looked up frequently. + if offset == 0 { + return nil, nil + } + + return st.lookupSlow(offset) +} + +func (st *stringTable) lookupSlow(offset uint32) ([]byte, error) { + if st.base != nil { + n := uint32(len(st.base.bytes)) + if offset < n { + return st.base.lookupSlow(offset) + } + offset -= n + } + + if offset > uint32(len(st.bytes)) { + return nil, fmt.Errorf("offset %d is out of bounds of string table", offset) + } + + if offset > 0 && st.bytes[offset-1] != 0 { + return nil, fmt.Errorf("offset %d is not the beginning of a string", offset) + } + + i := bytes.IndexByte(st.bytes[offset:], 0) + return st.bytes[offset : offset+uint32(i)], nil +} + +// LookupCache returns the string at the given offset, caching the result +// for future lookups. +func (cst *stringTable) LookupCached(offset uint32) (string, error) { + // Fast path: zero offset is the empty string, looked up frequently. + if offset == 0 { + return "", nil + } + + cst.mu.Lock() + defer cst.mu.Unlock() + + if str, ok := cst.cache[offset]; ok { + return str, nil + } + + str, err := cst.Lookup(offset) + if err != nil { + return "", err + } + + if cst.cache == nil { + cst.cache = make(map[uint32]string) + } + cst.cache[offset] = str + return str, nil +} + +// stringTableBuilder builds BTF string tables. +type stringTableBuilder struct { + length uint32 + strings map[string]uint32 +} + +// newStringTableBuilder creates a builder with the given capacity. +// +// capacity may be zero. +func newStringTableBuilder(capacity int) *stringTableBuilder { + var stb stringTableBuilder + + if capacity == 0 { + // Use the runtime's small default size. + stb.strings = make(map[string]uint32) + } else { + stb.strings = make(map[string]uint32, capacity) + } + + // Ensure that the empty string is at index 0. + stb.append("") + return &stb +} + +// Add a string to the table. +// +// Adding the same string multiple times will only store it once. +func (stb *stringTableBuilder) Add(str string) (uint32, error) { + if strings.IndexByte(str, 0) != -1 { + return 0, fmt.Errorf("string contains null: %q", str) + } + + offset, ok := stb.strings[str] + if ok { + return offset, nil + } + + return stb.append(str), nil +} + +func (stb *stringTableBuilder) append(str string) uint32 { + offset := stb.length + stb.length += uint32(len(str)) + 1 + stb.strings[str] = offset + return offset +} + +// Lookup finds the offset of a string in the table. +// +// Returns an error if str hasn't been added yet. +func (stb *stringTableBuilder) Lookup(str string) (uint32, error) { + offset, ok := stb.strings[str] + if !ok { + return 0, fmt.Errorf("string %q is not in table", str) + } + + return offset, nil +} + +// Length returns the length in bytes. +func (stb *stringTableBuilder) Length() int { + return int(stb.length) +} + +// AppendEncoded appends the string table to the end of the provided buffer. +func (stb *stringTableBuilder) AppendEncoded(buf []byte) []byte { + n := len(buf) + buf = append(buf, make([]byte, stb.Length())...) + strings := buf[n:] + for str, offset := range stb.strings { + copy(strings[offset:], str) + } + return buf +} + +// Copy the string table builder. +func (stb *stringTableBuilder) Copy() *stringTableBuilder { + return &stringTableBuilder{ + stb.length, + maps.Clone(stb.strings), + } +} diff --git a/vendor/github.com/cilium/ebpf/btf/traversal.go b/vendor/github.com/cilium/ebpf/btf/traversal.go new file mode 100644 index 000000000..57c1dc27e --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/traversal.go @@ -0,0 +1,159 @@ +package btf + +import ( + "fmt" + "iter" +) + +// Functions to traverse a cyclic graph of types. The below was very useful: +// https://eli.thegreenplace.net/2015/directed-graph-traversal-orderings-and-applications-to-data-flow-analysis/#post-order-and-reverse-post-order + +// postorder yields all types reachable from root in post order. +func postorder(root Type, visited map[Type]struct{}) iter.Seq[Type] { + return func(yield func(Type) bool) { + visitInPostorder(root, visited, yield) + } +} + +// visitInPostorder is a separate function to avoid arguments escaping +// to the heap. Don't change the setup without re-running the benchmarks. +func visitInPostorder(root Type, visited map[Type]struct{}, yield func(typ Type) bool) bool { + if _, ok := visited[root]; ok { + return true + } + if visited == nil { + visited = make(map[Type]struct{}) + } + visited[root] = struct{}{} + + for child := range children(root) { + if !visitInPostorder(*child, visited, yield) { + return false + } + } + + return yield(root) +} + +// children yields all direct descendants of typ. +func children(typ Type) iter.Seq[*Type] { + return func(yield func(*Type) bool) { + // Explicitly type switch on the most common types to allow the inliner to + // do its work. This avoids allocating intermediate slices from walk() on + // the heap. + var tags []string + switch v := typ.(type) { + case *Void, *Int, *Enum, *Fwd, *Float, *declTag: + // No children to traverse. + // declTags is declared as a leaf type since it's parsed into .Tags fields of other types + // during unmarshaling. + case *Pointer: + if !yield(&v.Target) { + return + } + case *Array: + if !yield(&v.Index) { + return + } + if !yield(&v.Type) { + return + } + case *Struct: + for i := range v.Members { + if !yield(&v.Members[i].Type) { + return + } + for _, t := range v.Members[i].Tags { + var tag Type = &declTag{v, t, i} + if !yield(&tag) { + return + } + } + } + tags = v.Tags + case *Union: + for i := range v.Members { + if !yield(&v.Members[i].Type) { + return + } + for _, t := range v.Members[i].Tags { + var tag Type = &declTag{v, t, i} + if !yield(&tag) { + return + } + } + } + tags = v.Tags + case *Typedef: + if !yield(&v.Type) { + return + } + tags = v.Tags + case *Volatile: + if !yield(&v.Type) { + return + } + case *Const: + if !yield(&v.Type) { + return + } + case *Restrict: + if !yield(&v.Type) { + return + } + case *Func: + if !yield(&v.Type) { + return + } + if fp, ok := v.Type.(*FuncProto); ok { + for i := range fp.Params { + if len(v.ParamTags) <= i { + continue + } + for _, t := range v.ParamTags[i] { + var tag Type = &declTag{v, t, i} + if !yield(&tag) { + return + } + } + } + } + tags = v.Tags + case *FuncProto: + if !yield(&v.Return) { + return + } + for i := range v.Params { + if !yield(&v.Params[i].Type) { + return + } + } + case *Var: + if !yield(&v.Type) { + return + } + tags = v.Tags + case *Datasec: + for i := range v.Vars { + if !yield(&v.Vars[i].Type) { + return + } + } + case *TypeTag: + if !yield(&v.Type) { + return + } + case *cycle: + // cycle has children, but we ignore them deliberately. + default: + panic(fmt.Sprintf("don't know how to walk Type %T", v)) + } + + for _, t := range tags { + var tag Type = &declTag{typ, t, -1} + if !yield(&tag) { + return + } + } + } +} diff --git a/vendor/github.com/cilium/ebpf/btf/types.go b/vendor/github.com/cilium/ebpf/btf/types.go new file mode 100644 index 000000000..fc0a59744 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/types.go @@ -0,0 +1,910 @@ +package btf + +import ( + "errors" + "fmt" + "io" + "math" + "strings" + + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" +) + +// Mirrors MAX_RESOLVE_DEPTH in libbpf. +// https://github.com/libbpf/libbpf/blob/e26b84dc330c9644c07428c271ab491b0f01f4e1/src/btf.c#L761 +const maxResolveDepth = 32 + +// TypeID identifies a type in a BTF section. +type TypeID = sys.TypeID + +// Type represents a type described by BTF. +// +// Identity of Type follows the [Go specification]: two Types are considered +// equal if they have the same concrete type and the same dynamic value, aka +// they point at the same location in memory. This means that the following +// Types are considered distinct even though they have the same "shape". +// +// a := &Int{Size: 1} +// b := &Int{Size: 1} +// a != b +// +// [Go specification]: https://go.dev/ref/spec#Comparison_operators +type Type interface { + // Type can be formatted using the %s and %v verbs. %s outputs only the + // identity of the type, without any detail. %v outputs additional detail. + // + // Use the '+' flag to include the address of the type. + // + // Use the width to specify how many levels of detail to output, for example + // %1v will output detail for the root type and a short description of its + // children. %2v would output details of the root type and its children + // as well as a short description of the grandchildren. + fmt.Formatter + + // Name of the type, empty for anonymous types and types that cannot + // carry a name, like Void and Pointer. + TypeName() string + + // Make a copy of the type, without copying Type members. + copy() Type + + // New implementations must update walkType. +} + +var ( + _ Type = (*Int)(nil) + _ Type = (*Struct)(nil) + _ Type = (*Union)(nil) + _ Type = (*Enum)(nil) + _ Type = (*Fwd)(nil) + _ Type = (*Func)(nil) + _ Type = (*Typedef)(nil) + _ Type = (*Var)(nil) + _ Type = (*Datasec)(nil) + _ Type = (*Float)(nil) + _ Type = (*declTag)(nil) + _ Type = (*TypeTag)(nil) + _ Type = (*cycle)(nil) +) + +// Void is the unit type of BTF. +type Void struct{} + +func (v *Void) Format(fs fmt.State, verb rune) { formatType(fs, verb, v) } +func (v *Void) TypeName() string { return "" } +func (v *Void) size() uint32 { return 0 } +func (v *Void) copy() Type { return (*Void)(nil) } + +type IntEncoding byte + +// Valid IntEncodings. +// +// These may look like they are flags, but they aren't. +const ( + Unsigned IntEncoding = 0 + Signed IntEncoding = 1 + Char IntEncoding = 2 + Bool IntEncoding = 4 +) + +func (ie IntEncoding) String() string { + switch ie { + case Char: + // NB: There is no way to determine signedness for char. + return "char" + case Bool: + return "bool" + case Signed: + return "signed" + case Unsigned: + return "unsigned" + default: + return fmt.Sprintf("IntEncoding(%d)", byte(ie)) + } +} + +// Int is an integer of a given length. +// +// See https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-int +type Int struct { + Name string + + // The size of the integer in bytes. + Size uint32 + Encoding IntEncoding +} + +func (i *Int) Format(fs fmt.State, verb rune) { + formatType(fs, verb, i, i.Encoding, "size=", i.Size) +} + +func (i *Int) TypeName() string { return i.Name } +func (i *Int) size() uint32 { return i.Size } +func (i *Int) copy() Type { + cpy := *i + return &cpy +} + +// Pointer is a pointer to another type. +type Pointer struct { + Target Type +} + +func (p *Pointer) Format(fs fmt.State, verb rune) { + formatType(fs, verb, p, "target=", p.Target) +} + +func (p *Pointer) TypeName() string { return "" } +func (p *Pointer) size() uint32 { return 8 } +func (p *Pointer) copy() Type { + cpy := *p + return &cpy +} + +// Array is an array with a fixed number of elements. +type Array struct { + Index Type + Type Type + Nelems uint32 +} + +func (arr *Array) Format(fs fmt.State, verb rune) { + formatType(fs, verb, arr, "index=", arr.Index, "type=", arr.Type, "n=", arr.Nelems) +} + +func (arr *Array) TypeName() string { return "" } + +func (arr *Array) copy() Type { + cpy := *arr + return &cpy +} + +// Struct is a compound type of consecutive members. +type Struct struct { + Name string + // The size of the struct including padding, in bytes + Size uint32 + Members []Member + Tags []string +} + +func (s *Struct) Format(fs fmt.State, verb rune) { + formatType(fs, verb, s, "fields=", len(s.Members)) +} + +func (s *Struct) TypeName() string { return s.Name } + +func (s *Struct) size() uint32 { return s.Size } + +func (s *Struct) copy() Type { + cpy := *s + cpy.Members = copyMembers(s.Members) + cpy.Tags = copyTags(cpy.Tags) + return &cpy +} + +func (s *Struct) members() []Member { + return s.Members +} + +// Union is a compound type where members occupy the same memory. +type Union struct { + Name string + // The size of the union including padding, in bytes. + Size uint32 + Members []Member + Tags []string +} + +func (u *Union) Format(fs fmt.State, verb rune) { + formatType(fs, verb, u, "fields=", len(u.Members)) +} + +func (u *Union) TypeName() string { return u.Name } + +func (u *Union) size() uint32 { return u.Size } + +func (u *Union) copy() Type { + cpy := *u + cpy.Members = copyMembers(u.Members) + cpy.Tags = copyTags(cpy.Tags) + return &cpy +} + +func (u *Union) members() []Member { + return u.Members +} + +func copyMembers(orig []Member) []Member { + cpy := make([]Member, len(orig)) + copy(cpy, orig) + for i, member := range cpy { + cpy[i].Tags = copyTags(member.Tags) + } + return cpy +} + +func copyTags(orig []string) []string { + if orig == nil { // preserve nil vs zero-len slice distinction + return nil + } + cpy := make([]string, len(orig)) + copy(cpy, orig) + return cpy +} + +type composite interface { + Type + members() []Member +} + +var ( + _ composite = (*Struct)(nil) + _ composite = (*Union)(nil) +) + +// A value in bits. +type Bits uint32 + +// Bytes converts a bit value into bytes. +func (b Bits) Bytes() uint32 { + return uint32(b / 8) +} + +// Member is part of a Struct or Union. +// +// It is not a valid Type. +type Member struct { + Name string + Type Type + Offset Bits + BitfieldSize Bits + Tags []string +} + +// Enum lists possible values. +type Enum struct { + Name string + // Size of the enum value in bytes. + Size uint32 + // True if the values should be interpreted as signed integers. + Signed bool + Values []EnumValue +} + +func (e *Enum) Format(fs fmt.State, verb rune) { + formatType(fs, verb, e, "size=", e.Size, "values=", len(e.Values)) +} + +func (e *Enum) TypeName() string { return e.Name } + +// EnumValue is part of an Enum +// +// Is is not a valid Type +type EnumValue struct { + Name string + Value uint64 +} + +func (e *Enum) size() uint32 { return e.Size } +func (e *Enum) copy() Type { + cpy := *e + cpy.Values = make([]EnumValue, len(e.Values)) + copy(cpy.Values, e.Values) + return &cpy +} + +// FwdKind is the type of forward declaration. +type FwdKind int + +// Valid types of forward declaration. +const ( + FwdStruct FwdKind = iota + FwdUnion +) + +func (fk FwdKind) String() string { + switch fk { + case FwdStruct: + return "struct" + case FwdUnion: + return "union" + default: + return fmt.Sprintf("%T(%d)", fk, int(fk)) + } +} + +// Fwd is a forward declaration of a Type. +type Fwd struct { + Name string + Kind FwdKind +} + +func (f *Fwd) Format(fs fmt.State, verb rune) { + formatType(fs, verb, f, f.Kind) +} + +func (f *Fwd) TypeName() string { return f.Name } + +func (f *Fwd) copy() Type { + cpy := *f + return &cpy +} + +func (f *Fwd) matches(typ Type) bool { + if _, ok := As[*Struct](typ); ok && f.Kind == FwdStruct { + return true + } + + if _, ok := As[*Union](typ); ok && f.Kind == FwdUnion { + return true + } + + return false +} + +// Typedef is an alias of a Type. +type Typedef struct { + Name string + Type Type + Tags []string +} + +func (td *Typedef) Format(fs fmt.State, verb rune) { + formatType(fs, verb, td, td.Type) +} + +func (td *Typedef) TypeName() string { return td.Name } + +func (td *Typedef) copy() Type { + cpy := *td + cpy.Tags = copyTags(td.Tags) + return &cpy +} + +// Volatile is a qualifier. +type Volatile struct { + Type Type +} + +func (v *Volatile) Format(fs fmt.State, verb rune) { + formatType(fs, verb, v, v.Type) +} + +func (v *Volatile) TypeName() string { return "" } + +func (v *Volatile) qualify() Type { return v.Type } +func (v *Volatile) copy() Type { + cpy := *v + return &cpy +} + +// Const is a qualifier. +type Const struct { + Type Type +} + +func (c *Const) Format(fs fmt.State, verb rune) { + formatType(fs, verb, c, c.Type) +} + +func (c *Const) TypeName() string { return "" } + +func (c *Const) qualify() Type { return c.Type } +func (c *Const) copy() Type { + cpy := *c + return &cpy +} + +// Restrict is a qualifier. +type Restrict struct { + Type Type +} + +func (r *Restrict) Format(fs fmt.State, verb rune) { + formatType(fs, verb, r, r.Type) +} + +func (r *Restrict) TypeName() string { return "" } + +func (r *Restrict) qualify() Type { return r.Type } +func (r *Restrict) copy() Type { + cpy := *r + return &cpy +} + +// Func is a function definition. +type Func struct { + Name string + Type Type + Linkage FuncLinkage + Tags []string + // ParamTags holds a list of tags for each parameter of the FuncProto to which `Type` points. + // If no tags are present for any param, the outer slice will be nil/len(ParamTags)==0. + // If at least 1 param has a tag, the outer slice will have the same length as the number of params. + // The inner slice contains the tags and may be nil/len(ParamTags[i])==0 if no tags are present for that param. + ParamTags [][]string +} + +func FuncMetadata(ins *asm.Instruction) *Func { + fn, _ := ins.Metadata.Get(funcInfoMeta{}).(*Func) + return fn +} + +// WithFuncMetadata adds a btf.Func to the Metadata of asm.Instruction. +func WithFuncMetadata(ins asm.Instruction, fn *Func) asm.Instruction { + ins.Metadata.Set(funcInfoMeta{}, fn) + return ins +} + +func (f *Func) Format(fs fmt.State, verb rune) { + formatType(fs, verb, f, f.Linkage, "proto=", f.Type) +} + +func (f *Func) TypeName() string { return f.Name } + +func (f *Func) copy() Type { + cpy := *f + cpy.Tags = copyTags(f.Tags) + if f.ParamTags != nil { // preserve nil vs zero-len slice distinction + ptCopy := make([][]string, len(f.ParamTags)) + for i, tags := range f.ParamTags { + ptCopy[i] = copyTags(tags) + } + cpy.ParamTags = ptCopy + } + return &cpy +} + +// FuncProto is a function declaration. +type FuncProto struct { + Return Type + Params []FuncParam +} + +func (fp *FuncProto) Format(fs fmt.State, verb rune) { + formatType(fs, verb, fp, "args=", len(fp.Params), "return=", fp.Return) +} + +func (fp *FuncProto) TypeName() string { return "" } + +func (fp *FuncProto) copy() Type { + cpy := *fp + cpy.Params = make([]FuncParam, len(fp.Params)) + copy(cpy.Params, fp.Params) + return &cpy +} + +type FuncParam struct { + Name string + Type Type +} + +// Var is a global variable. +type Var struct { + Name string + Type Type + Linkage VarLinkage + Tags []string +} + +func (v *Var) Format(fs fmt.State, verb rune) { + formatType(fs, verb, v, v.Linkage) +} + +func (v *Var) TypeName() string { return v.Name } + +func (v *Var) copy() Type { + cpy := *v + cpy.Tags = copyTags(v.Tags) + return &cpy +} + +// Datasec is a global program section containing data. +type Datasec struct { + Name string + Size uint32 + Vars []VarSecinfo +} + +func (ds *Datasec) Format(fs fmt.State, verb rune) { + formatType(fs, verb, ds) +} + +func (ds *Datasec) TypeName() string { return ds.Name } + +func (ds *Datasec) size() uint32 { return ds.Size } + +func (ds *Datasec) copy() Type { + cpy := *ds + cpy.Vars = make([]VarSecinfo, len(ds.Vars)) + copy(cpy.Vars, ds.Vars) + return &cpy +} + +// VarSecinfo describes variable in a Datasec. +// +// It is not a valid Type. +type VarSecinfo struct { + // Var or Func. + Type Type + Offset uint32 + Size uint32 +} + +// Float is a float of a given length. +type Float struct { + Name string + + // The size of the float in bytes. + Size uint32 +} + +func (f *Float) Format(fs fmt.State, verb rune) { + formatType(fs, verb, f, "size=", f.Size*8) +} + +func (f *Float) TypeName() string { return f.Name } +func (f *Float) size() uint32 { return f.Size } +func (f *Float) copy() Type { + cpy := *f + return &cpy +} + +// declTag associates metadata with a declaration. +type declTag struct { + Type Type + Value string + // The index this tag refers to in the target type. For composite types, + // a value of -1 indicates that the tag refers to the whole type. Otherwise + // it indicates which member or argument the tag applies to. + Index int +} + +func (dt *declTag) Format(fs fmt.State, verb rune) { + formatType(fs, verb, dt, "type=", dt.Type, "value=", dt.Value, "index=", dt.Index) +} + +func (dt *declTag) TypeName() string { return "" } +func (dt *declTag) copy() Type { + cpy := *dt + return &cpy +} + +// TypeTag associates metadata with a pointer type. Tag types act as a custom +// modifier(const, restrict, volatile) for the target type. Unlike declTags, +// TypeTags are ordered so the order in which they are added matters. +// +// One of their uses is to mark pointers as `__kptr` meaning a pointer points +// to kernel memory. Adding a `__kptr` to pointers in map values allows you +// to store pointers to kernel memory in maps. +type TypeTag struct { + Type Type + Value string +} + +func (tt *TypeTag) Format(fs fmt.State, verb rune) { + formatType(fs, verb, tt, "type=", tt.Type, "value=", tt.Value) +} + +func (tt *TypeTag) TypeName() string { return "" } +func (tt *TypeTag) qualify() Type { return tt.Type } +func (tt *TypeTag) copy() Type { + cpy := *tt + return &cpy +} + +// cycle is a type which had to be elided since it exceeded maxTypeDepth. +type cycle struct { + root Type +} + +func (c *cycle) ID() TypeID { return math.MaxUint32 } +func (c *cycle) Format(fs fmt.State, verb rune) { formatType(fs, verb, c, "root=", c.root) } +func (c *cycle) TypeName() string { return "" } +func (c *cycle) copy() Type { + cpy := *c + return &cpy +} + +type sizer interface { + size() uint32 +} + +var ( + _ sizer = (*Int)(nil) + _ sizer = (*Pointer)(nil) + _ sizer = (*Struct)(nil) + _ sizer = (*Union)(nil) + _ sizer = (*Enum)(nil) + _ sizer = (*Datasec)(nil) +) + +type qualifier interface { + qualify() Type +} + +var ( + _ qualifier = (*Const)(nil) + _ qualifier = (*Restrict)(nil) + _ qualifier = (*Volatile)(nil) + _ qualifier = (*TypeTag)(nil) +) + +var errUnsizedType = errors.New("type is unsized") + +// Sizeof returns the size of a type in bytes. +// +// Returns an error if the size can't be computed. +func Sizeof(typ Type) (int, error) { + var ( + n = int64(1) + elem int64 + ) + + for i := 0; i < maxResolveDepth; i++ { + switch v := typ.(type) { + case *Array: + if n > 0 && int64(v.Nelems) > math.MaxInt64/n { + return 0, fmt.Errorf("type %s: overflow", typ) + } + + // Arrays may be of zero length, which allows + // n to be zero as well. + n *= int64(v.Nelems) + typ = v.Type + continue + + case sizer: + elem = int64(v.size()) + + case *Typedef: + typ = v.Type + continue + + case qualifier: + typ = v.qualify() + continue + + default: + return 0, fmt.Errorf("type %T: %w", typ, errUnsizedType) + } + + if n > 0 && elem > math.MaxInt64/n { + return 0, fmt.Errorf("type %s: overflow", typ) + } + + size := n * elem + if int64(int(size)) != size { + return 0, fmt.Errorf("type %s: overflow", typ) + } + + return int(size), nil + } + + return 0, fmt.Errorf("type %s: exceeded type depth", typ) +} + +// alignof returns the alignment of a type. +// +// Returns an error if the Type can't be aligned, like an integer with an uneven +// size. Currently only supports the subset of types necessary for bitfield +// relocations. +func alignof(typ Type) (int, error) { + var n int + + switch t := UnderlyingType(typ).(type) { + case *Enum: + n = int(t.size()) + case *Int: + n = int(t.Size) + case *Array: + return alignof(t.Type) + default: + return 0, fmt.Errorf("can't calculate alignment of %T", t) + } + + if !internal.IsPow(n) { + return 0, fmt.Errorf("alignment value %d is not a power of two", n) + } + + return n, nil +} + +// Copy a Type recursively. +// +// typ may form a cycle. +func Copy(typ Type) Type { + return copyType(typ, nil, make(map[Type]Type), nil) +} + +func copyType(typ Type, ids map[Type]TypeID, copies map[Type]Type, copiedIDs map[Type]TypeID) Type { + if typ == nil { + return nil + } + + cpy, ok := copies[typ] + if ok { + // This has been copied previously, no need to continue. + return cpy + } + + cpy = typ.copy() + copies[typ] = cpy + + if id, ok := ids[typ]; ok { + copiedIDs[cpy] = id + } + + for child := range children(cpy) { + *child = copyType(*child, ids, copies, copiedIDs) + } + + return cpy +} + +type typeDeque = internal.Deque[*Type] + +// essentialName represents the name of a BTF type stripped of any flavor +// suffixes after a ___ delimiter. +type essentialName string + +// newEssentialName returns name without a ___ suffix. +// +// CO-RE has the concept of 'struct flavors', which are used to deal with +// changes in kernel data structures. Anything after three underscores +// in a type name is ignored for the purpose of finding a candidate type +// in the kernel's BTF. +func newEssentialName(name string) essentialName { + if name == "" { + return "" + } + lastIdx := strings.LastIndex(name, "___") + if lastIdx > 0 { + return essentialName(name[:lastIdx]) + } + return essentialName(name) +} + +// UnderlyingType skips qualifiers and Typedefs. +func UnderlyingType(typ Type) Type { + result := typ + for depth := 0; depth <= maxResolveDepth; depth++ { + switch v := (result).(type) { + case qualifier: + result = v.qualify() + case *Typedef: + result = v.Type + default: + return result + } + } + return &cycle{typ} +} + +// QualifiedType returns the type with all qualifiers removed. +func QualifiedType(typ Type) Type { + result := typ + for depth := 0; depth <= maxResolveDepth; depth++ { + switch v := (result).(type) { + case qualifier: + result = v.qualify() + default: + return result + } + } + return &cycle{typ} +} + +// As returns typ if is of type T. Otherwise it peels qualifiers and Typedefs +// until it finds a T. +// +// Returns the zero value and false if there is no T or if the type is nested +// too deeply. +func As[T Type](typ Type) (T, bool) { + // NB: We can't make this function return (*T) since then + // we can't assert that a type matches an interface which + // embeds Type: as[composite](T). + for depth := 0; depth <= maxResolveDepth; depth++ { + switch v := (typ).(type) { + case T: + return v, true + case qualifier: + typ = v.qualify() + case *Typedef: + typ = v.Type + default: + goto notFound + } + } +notFound: + var zero T + return zero, false +} + +type formatState struct { + fmt.State + depth int +} + +// formattableType is a subset of Type, to ease unit testing of formatType. +type formattableType interface { + fmt.Formatter + TypeName() string +} + +// formatType formats a type in a canonical form. +// +// Handles cyclical types by only printing cycles up to a certain depth. Elements +// in extra are separated by spaces unless the preceding element is a string +// ending in '='. +func formatType(f fmt.State, verb rune, t formattableType, extra ...interface{}) { + if verb != 'v' && verb != 's' { + fmt.Fprintf(f, "{UNRECOGNIZED: %c}", verb) + return + } + + _, _ = io.WriteString(f, internal.GoTypeName(t)) + + if name := t.TypeName(); name != "" { + // Output BTF type name if present. + fmt.Fprintf(f, ":%q", name) + } + + if f.Flag('+') { + // Output address if requested. + fmt.Fprintf(f, ":%#p", t) + } + + if verb == 's' { + // %s omits details. + return + } + + var depth int + if ps, ok := f.(*formatState); ok { + depth = ps.depth + f = ps.State + } + + maxDepth, ok := f.Width() + if !ok { + maxDepth = 0 + } + + if depth > maxDepth { + // We've reached the maximum depth. This avoids infinite recursion even + // for cyclical types. + return + } + + if len(extra) == 0 { + return + } + + wantSpace := false + _, _ = io.WriteString(f, "[") + for _, arg := range extra { + if wantSpace { + _, _ = io.WriteString(f, " ") + } + + switch v := arg.(type) { + case string: + _, _ = io.WriteString(f, v) + wantSpace = len(v) > 0 && v[len(v)-1] != '=' + continue + + case formattableType: + v.Format(&formatState{f, depth + 1}, verb) + + default: + fmt.Fprint(f, arg) + } + + wantSpace = true + } + _, _ = io.WriteString(f, "]") +} diff --git a/vendor/github.com/cilium/ebpf/btf/unmarshal.go b/vendor/github.com/cilium/ebpf/btf/unmarshal.go new file mode 100644 index 000000000..26ae320d2 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/unmarshal.go @@ -0,0 +1,789 @@ +package btf + +import ( + "bytes" + "encoding/binary" + "fmt" + "hash/maphash" + "io" + "iter" + "maps" + "math" + "slices" + "sync" +) + +// sharedBuf is a buffer which may be shared between multiple decoders. +// +// It must not be modified. Some sharedBuf may be backed by an mmap-ed file, in +// which case the sharedBuf has a finalizer. sharedBuf must therefore always be +// passed as a pointer. +type sharedBuf struct { + raw []byte +} + +type decoder struct { + // Immutable fields, may be shared. + + base *decoder + byteOrder binary.ByteOrder + *sharedBuf + strings *stringTable + // The ID for offsets[0]. + firstTypeID TypeID + // Map from TypeID to offset of the marshaled data in raw. Contains an entry + // for each TypeID, including 0 aka Void. The offset for Void is invalid. + offsets []int + declTags map[TypeID][]TypeID + // An index from essentialName to TypeID. + namedTypes *fuzzyStringIndex + + // Protection for mutable fields below. + mu sync.Mutex + types map[TypeID]Type + typeIDs map[Type]TypeID + legacyBitfields map[TypeID][2]Bits // offset, size +} + +func newDecoder(raw []byte, bo binary.ByteOrder, strings *stringTable, base *decoder) (*decoder, error) { + firstTypeID := TypeID(0) + if base != nil { + if base.byteOrder != bo { + return nil, fmt.Errorf("can't use %v base with %v split BTF", base.byteOrder, bo) + } + + if base.firstTypeID != 0 { + return nil, fmt.Errorf("can't use split BTF as base") + } + + firstTypeID = TypeID(len(base.offsets)) + } + + var header btfType + var numTypes, numDeclTags, numNamedTypes int + + for _, err := range allBtfTypeOffsets(raw, bo, &header) { + if err != nil { + return nil, err + } + + numTypes++ + + if header.Kind() == kindDeclTag { + numDeclTags++ + } + + if header.NameOff != 0 { + numNamedTypes++ + } + } + + if firstTypeID == 0 { + // Allocate an extra slot for Void so we don't have to deal with + // constant off by one issues. + numTypes++ + } + + offsets := make([]int, 0, numTypes) + declTags := make(map[TypeID][]TypeID, numDeclTags) + namedTypes := newFuzzyStringIndex(numNamedTypes) + + if firstTypeID == 0 { + // Add a sentinel for Void. + offsets = append(offsets, math.MaxInt) + } + + id := firstTypeID + TypeID(len(offsets)) + for offset := range allBtfTypeOffsets(raw, bo, &header) { + if id < firstTypeID { + return nil, fmt.Errorf("no more type IDs") + } + + offsets = append(offsets, offset) + + if header.Kind() == kindDeclTag { + declTags[header.Type()] = append(declTags[header.Type()], id) + } + + // Build named type index. + name, err := strings.LookupBytes(header.NameOff) + if err != nil { + return nil, fmt.Errorf("lookup type name for id %v: %w", id, err) + } + + if len(name) > 0 { + if i := bytes.Index(name, []byte("___")); i != -1 { + // Flavours are rare. It's cheaper to find the first index for some + // reason. + i = bytes.LastIndex(name, []byte("___")) + name = name[:i] + } + + namedTypes.Add(name, id) + } + + id++ + } + + namedTypes.Build() + + return &decoder{ + base, + bo, + &sharedBuf{raw}, + strings, + firstTypeID, + offsets, + declTags, + namedTypes, + sync.Mutex{}, + make(map[TypeID]Type), + make(map[Type]TypeID), + make(map[TypeID][2]Bits), + }, nil +} + +func allBtfTypeOffsets(buf []byte, bo binary.ByteOrder, header *btfType) iter.Seq2[int, error] { + return func(yield func(int, error) bool) { + for offset := 0; offset < len(buf); { + start := offset + + n, err := unmarshalBtfType(header, buf[offset:], bo) + if err != nil { + yield(-1, fmt.Errorf("unmarshal type header: %w", err)) + return + } + offset += n + + n, err = header.DataLen() + if err != nil { + yield(-1, err) + return + } + offset += n + + if offset > len(buf) { + yield(-1, fmt.Errorf("auxiliary type data: %w", io.ErrUnexpectedEOF)) + return + } + + if !yield(start, nil) { + return + } + } + } +} + +func rebaseDecoder(d *decoder, base *decoder) (*decoder, error) { + if d.base == nil { + return nil, fmt.Errorf("rebase split spec: not a split spec") + } + + if len(d.base.raw) != len(base.raw) || (len(d.base.raw) > 0 && &d.base.raw[0] != &base.raw[0]) { + return nil, fmt.Errorf("rebase split spec: raw BTF differs") + } + + return &decoder{ + base, + d.byteOrder, + d.sharedBuf, + d.strings, + d.firstTypeID, + d.offsets, + d.declTags, + d.namedTypes, + sync.Mutex{}, + make(map[TypeID]Type), + make(map[Type]TypeID), + make(map[TypeID][2]Bits), + }, nil +} + +// Copy performs a deep copy of a decoder and its base. +func (d *decoder) Copy() *decoder { + if d == nil { + return nil + } + + return d.copy(nil) +} + +func (d *decoder) copy(copiedTypes map[Type]Type) *decoder { + if d == nil { + return nil + } + + d.mu.Lock() + defer d.mu.Unlock() + + if copiedTypes == nil { + copiedTypes = make(map[Type]Type, len(d.types)) + } + + types := make(map[TypeID]Type, len(d.types)) + typeIDs := make(map[Type]TypeID, len(d.typeIDs)) + for id, typ := range d.types { + types[id] = copyType(typ, d.typeIDs, copiedTypes, typeIDs) + } + + return &decoder{ + d.base.copy(copiedTypes), + d.byteOrder, + d.sharedBuf, + d.strings, + d.firstTypeID, + d.offsets, + d.declTags, + d.namedTypes, + sync.Mutex{}, + types, + typeIDs, + maps.Clone(d.legacyBitfields), + } +} + +// TypeID returns the ID for a Type previously obtained via [TypeByID]. +func (d *decoder) TypeID(typ Type) (TypeID, error) { + if _, ok := typ.(*Void); ok { + // Equality is weird for void, since it is a zero sized type. + return 0, nil + } + + d.mu.Lock() + defer d.mu.Unlock() + + id, ok := d.typeIDs[typ] + if !ok { + return 0, fmt.Errorf("no ID for type %s: %w", typ, ErrNotFound) + } + + return id, nil +} + +// TypesByName returns all types which have the given essential name. +// +// Returns ErrNotFound if no matching Type exists. +func (d *decoder) TypesByName(name essentialName) ([]Type, error) { + var types []Type + for id := range d.namedTypes.Find(string(name)) { + typ, err := d.TypeByID(id) + if err != nil { + return nil, err + } + + if newEssentialName(typ.TypeName()) == name { + // Deal with hash collisions by checking against the name. + types = append(types, typ) + } + } + + if len(types) == 0 { + // Return an unwrapped error because this is on the hot path + // for CO-RE. + return nil, ErrNotFound + } + + return types, nil +} + +// TypeByID decodes a type and any of its descendants. +func (d *decoder) TypeByID(id TypeID) (Type, error) { + d.mu.Lock() + defer d.mu.Unlock() + + return d.inflateType(id) +} + +func (d *decoder) inflateType(id TypeID) (typ Type, err error) { + defer func() { + if r := recover(); r != nil { + err = r.(error) + } + + // err is the return value of the enclosing function, even if an explicit + // return is used. + // See https://go.dev/ref/spec#Defer_statements + if err != nil { + // Remove partially inflated type so that d.types only contains + // fully inflated ones. + delete(d.types, id) + } else { + // Populate reverse index. + d.typeIDs[typ] = id + } + }() + + if id < d.firstTypeID { + return d.base.inflateType(id) + } + + if id == 0 { + // Void is defined to always be type ID 0, and is thus omitted from BTF. + // Fast-path because it is looked up frequently. + return (*Void)(nil), nil + } + + if typ, ok := d.types[id]; ok { + return typ, nil + } + + fixup := func(id TypeID, typ *Type) { + fixup, err := d.inflateType(id) + if err != nil { + panic(err) + } + *typ = fixup + } + + convertMembers := func(header *btfType, buf []byte) ([]Member, error) { + var bm btfMember + members := make([]Member, 0, header.Vlen()) + for i := range header.Vlen() { + n, err := unmarshalBtfMember(&bm, buf, d.byteOrder) + if err != nil { + return nil, fmt.Errorf("unmarshal member: %w", err) + } + buf = buf[n:] + + name, err := d.strings.Lookup(bm.NameOff) + if err != nil { + return nil, fmt.Errorf("can't get name for member %d: %w", i, err) + } + + members = append(members, Member{ + Name: name, + Offset: Bits(bm.Offset), + }) + + m := &members[i] + fixup(bm.Type, &m.Type) + + if header.Bitfield() { + m.BitfieldSize = Bits(bm.Offset >> 24) + m.Offset &= 0xffffff + // We ignore legacy bitfield definitions if the current composite + // is a new-style bitfield. This is kind of safe since offset and + // size on the type of the member must be zero if kindFlat is set + // according to spec. + continue + } + + // This may be a legacy bitfield, try to fix it up. + data, ok := d.legacyBitfields[bm.Type] + if ok { + // Bingo! + m.Offset += data[0] + m.BitfieldSize = data[1] + continue + } + } + return members, nil + } + + idx := int(id - d.firstTypeID) + if idx >= len(d.offsets) { + return nil, fmt.Errorf("type id %v: %w", id, ErrNotFound) + } + + offset := d.offsets[idx] + if offset >= len(d.raw) { + return nil, fmt.Errorf("offset out of bounds") + } + + var ( + header btfType + bInt btfInt + bArr btfArray + bVariable btfVariable + bDeclTag btfDeclTag + pos = d.raw[offset:] + ) + + { + if n, err := unmarshalBtfType(&header, pos, d.byteOrder); err != nil { + return nil, fmt.Errorf("can't unmarshal type info for id %v: %v", id, err) + } else { + pos = pos[n:] + } + + name, err := d.strings.Lookup(header.NameOff) + if err != nil { + return nil, fmt.Errorf("get name for type id %d: %w", id, err) + } + + switch header.Kind() { + case kindInt: + size := header.Size() + if _, err := unmarshalBtfInt(&bInt, pos, d.byteOrder); err != nil { + return nil, fmt.Errorf("can't unmarshal btfInt, id: %d: %w", id, err) + } + if bInt.Offset() > 0 || bInt.Bits().Bytes() != size { + d.legacyBitfields[id] = [2]Bits{bInt.Offset(), bInt.Bits()} + } + typ = &Int{name, header.Size(), bInt.Encoding()} + d.types[id] = typ + + case kindPointer: + ptr := &Pointer{nil} + d.types[id] = ptr + + fixup(header.Type(), &ptr.Target) + typ = ptr + + case kindArray: + if _, err := unmarshalBtfArray(&bArr, pos, d.byteOrder); err != nil { + return nil, fmt.Errorf("can't unmarshal btfArray, id: %d: %w", id, err) + } + + arr := &Array{nil, nil, bArr.Nelems} + d.types[id] = arr + + fixup(bArr.IndexType, &arr.Index) + fixup(bArr.Type, &arr.Type) + typ = arr + + case kindStruct: + str := &Struct{name, header.Size(), nil, nil} + d.types[id] = str + typ = str + + str.Members, err = convertMembers(&header, pos) + if err != nil { + return nil, fmt.Errorf("struct %s (id %d): %w", name, id, err) + } + + case kindUnion: + uni := &Union{name, header.Size(), nil, nil} + d.types[id] = uni + typ = uni + + uni.Members, err = convertMembers(&header, pos) + if err != nil { + return nil, fmt.Errorf("union %s (id %d): %w", name, id, err) + } + + case kindEnum: + enum := &Enum{name, header.Size(), header.Signed(), nil} + d.types[id] = enum + typ = enum + + var be btfEnum + enum.Values = make([]EnumValue, 0, header.Vlen()) + for i := range header.Vlen() { + n, err := unmarshalBtfEnum(&be, pos, d.byteOrder) + if err != nil { + return nil, fmt.Errorf("unmarshal btfEnum %d, id: %d: %w", i, id, err) + } + pos = pos[n:] + + name, err := d.strings.Lookup(be.NameOff) + if err != nil { + return nil, fmt.Errorf("get name for enum value %d: %s", i, err) + } + + value := uint64(be.Val) + if enum.Signed { + // Sign extend values to 64 bit. + value = uint64(int32(be.Val)) + } + enum.Values = append(enum.Values, EnumValue{name, value}) + } + + case kindForward: + typ = &Fwd{name, header.FwdKind()} + d.types[id] = typ + + case kindTypedef: + typedef := &Typedef{name, nil, nil} + d.types[id] = typedef + + fixup(header.Type(), &typedef.Type) + typ = typedef + + case kindVolatile: + volatile := &Volatile{nil} + d.types[id] = volatile + + fixup(header.Type(), &volatile.Type) + typ = volatile + + case kindConst: + cnst := &Const{nil} + d.types[id] = cnst + + fixup(header.Type(), &cnst.Type) + typ = cnst + + case kindRestrict: + restrict := &Restrict{nil} + d.types[id] = restrict + + fixup(header.Type(), &restrict.Type) + typ = restrict + + case kindFunc: + fn := &Func{name, nil, header.Linkage(), nil, nil} + d.types[id] = fn + + fixup(header.Type(), &fn.Type) + typ = fn + + case kindFuncProto: + fp := &FuncProto{} + d.types[id] = fp + + params := make([]FuncParam, 0, header.Vlen()) + var bParam btfParam + for i := range header.Vlen() { + n, err := unmarshalBtfParam(&bParam, pos, d.byteOrder) + if err != nil { + return nil, fmt.Errorf("can't unmarshal btfParam %d, id: %d: %w", i, id, err) + } + pos = pos[n:] + + name, err := d.strings.Lookup(bParam.NameOff) + if err != nil { + return nil, fmt.Errorf("get name for func proto parameter %d: %s", i, err) + } + + param := FuncParam{Name: name} + fixup(bParam.Type, ¶m.Type) + params = append(params, param) + } + + fixup(header.Type(), &fp.Return) + fp.Params = params + typ = fp + + case kindVar: + if _, err := unmarshalBtfVariable(&bVariable, pos, d.byteOrder); err != nil { + return nil, fmt.Errorf("can't read btfVariable, id: %d: %w", id, err) + } + + v := &Var{name, nil, VarLinkage(bVariable.Linkage), nil} + d.types[id] = v + + fixup(header.Type(), &v.Type) + typ = v + + case kindDatasec: + ds := &Datasec{name, header.Size(), nil} + d.types[id] = ds + + vlen := header.Vlen() + vars := make([]VarSecinfo, 0, vlen) + var bSecInfo btfVarSecinfo + for i := 0; i < vlen; i++ { + n, err := unmarshalBtfVarSecInfo(&bSecInfo, pos, d.byteOrder) + if err != nil { + return nil, fmt.Errorf("can't unmarshal btfVarSecinfo %d, id: %d: %w", i, id, err) + } + pos = pos[n:] + + vs := VarSecinfo{ + Offset: bSecInfo.Offset, + Size: bSecInfo.Size, + } + fixup(bSecInfo.Type, &vs.Type) + vars = append(vars, vs) + } + ds.Vars = vars + typ = ds + + case kindFloat: + typ = &Float{name, header.Size()} + d.types[id] = typ + + case kindDeclTag: + if _, err := unmarshalBtfDeclTag(&bDeclTag, pos, d.byteOrder); err != nil { + return nil, fmt.Errorf("can't read btfDeclTag, id: %d: %w", id, err) + } + + btfIndex := bDeclTag.ComponentIdx + if uint64(btfIndex) > math.MaxInt { + return nil, fmt.Errorf("type id %d: index exceeds int", id) + } + + dt := &declTag{nil, name, int(int32(btfIndex))} + d.types[id] = dt + + fixup(header.Type(), &dt.Type) + typ = dt + + case kindTypeTag: + tt := &TypeTag{nil, name} + d.types[id] = tt + + fixup(header.Type(), &tt.Type) + typ = tt + + case kindEnum64: + enum := &Enum{name, header.Size(), header.Signed(), nil} + d.types[id] = enum + typ = enum + + enum.Values = make([]EnumValue, 0, header.Vlen()) + var bEnum64 btfEnum64 + for i := range header.Vlen() { + n, err := unmarshalBtfEnum64(&bEnum64, pos, d.byteOrder) + if err != nil { + return nil, fmt.Errorf("can't unmarshal btfEnum64 %d, id: %d: %w", i, id, err) + } + pos = pos[n:] + + name, err := d.strings.Lookup(bEnum64.NameOff) + if err != nil { + return nil, fmt.Errorf("get name for enum64 value %d: %s", i, err) + } + value := (uint64(bEnum64.ValHi32) << 32) | uint64(bEnum64.ValLo32) + enum.Values = append(enum.Values, EnumValue{name, value}) + } + + default: + return nil, fmt.Errorf("type id %d: unknown kind: %v", id, header.Kind()) + } + } + + for _, tagID := range d.declTags[id] { + dtType, err := d.inflateType(tagID) + if err != nil { + return nil, err + } + + dt, ok := dtType.(*declTag) + if !ok { + return nil, fmt.Errorf("type id %v: not a declTag", tagID) + } + + switch t := typ.(type) { + case *Var: + if dt.Index != -1 { + return nil, fmt.Errorf("type %s: component idx %d is not -1", dt, dt.Index) + } + t.Tags = append(t.Tags, dt.Value) + + case *Typedef: + if dt.Index != -1 { + return nil, fmt.Errorf("type %s: component idx %d is not -1", dt, dt.Index) + } + t.Tags = append(t.Tags, dt.Value) + + case composite: + if dt.Index >= 0 { + members := t.members() + if dt.Index >= len(members) { + return nil, fmt.Errorf("type %s: component idx %d exceeds members of %s", dt, dt.Index, t) + } + + members[dt.Index].Tags = append(members[dt.Index].Tags, dt.Value) + } else if dt.Index == -1 { + switch t2 := t.(type) { + case *Struct: + t2.Tags = append(t2.Tags, dt.Value) + case *Union: + t2.Tags = append(t2.Tags, dt.Value) + } + } else { + return nil, fmt.Errorf("type %s: decl tag for type %s has invalid component idx", dt, t) + } + + case *Func: + fp, ok := t.Type.(*FuncProto) + if !ok { + return nil, fmt.Errorf("type %s: %s is not a FuncProto", dt, t.Type) + } + + // Ensure the number of argument tag lists equals the number of arguments + if len(t.ParamTags) == 0 { + t.ParamTags = make([][]string, len(fp.Params)) + } + + if dt.Index >= 0 { + if dt.Index >= len(fp.Params) { + return nil, fmt.Errorf("type %s: component idx %d exceeds params of %s", dt, dt.Index, t) + } + + t.ParamTags[dt.Index] = append(t.ParamTags[dt.Index], dt.Value) + } else if dt.Index == -1 { + t.Tags = append(t.Tags, dt.Value) + } else { + return nil, fmt.Errorf("type %s: decl tag for type %s has invalid component idx", dt, t) + } + + default: + return nil, fmt.Errorf("type %s: decl tag for type %s is not supported", dt, t) + } + } + + return typ, nil +} + +// An index from string to TypeID. +// +// Fuzzy because it may return false positive matches. +type fuzzyStringIndex struct { + seed maphash.Seed + entries []fuzzyStringIndexEntry +} + +func newFuzzyStringIndex(capacity int) *fuzzyStringIndex { + return &fuzzyStringIndex{ + maphash.MakeSeed(), + make([]fuzzyStringIndexEntry, 0, capacity), + } +} + +// Add a string to the index. +// +// Calling the method with identical arguments will create duplicate entries. +func (idx *fuzzyStringIndex) Add(name []byte, id TypeID) { + hash := uint32(maphash.Bytes(idx.seed, name)) + idx.entries = append(idx.entries, newFuzzyStringIndexEntry(hash, id)) +} + +// Build the index. +// +// Must be called after [Add] and before [Match]. +func (idx *fuzzyStringIndex) Build() { + slices.Sort(idx.entries) +} + +// Find TypeIDs which may match the name. +// +// May return false positives, but is guaranteed to not have false negatives. +// +// You must call [Build] at least once before calling this method. +func (idx *fuzzyStringIndex) Find(name string) iter.Seq[TypeID] { + return func(yield func(TypeID) bool) { + hash := uint32(maphash.String(idx.seed, name)) + + // We match only on the first 32 bits here, so ignore found. + i, _ := slices.BinarySearch(idx.entries, fuzzyStringIndexEntry(hash)<<32) + for i := i; i < len(idx.entries); i++ { + if idx.entries[i].hash() != hash { + break + } + + if !yield(idx.entries[i].id()) { + return + } + } + } +} + +// Tuple mapping the hash of an essential name to a type. +// +// Encoded in an uint64 so that it implements cmp.Ordered. +type fuzzyStringIndexEntry uint64 + +func newFuzzyStringIndexEntry(hash uint32, id TypeID) fuzzyStringIndexEntry { + return fuzzyStringIndexEntry(hash)<<32 | fuzzyStringIndexEntry(id) +} + +func (e fuzzyStringIndexEntry) hash() uint32 { + return uint32(e >> 32) +} + +func (e fuzzyStringIndexEntry) id() TypeID { + return TypeID(e) +} diff --git a/vendor/github.com/cilium/ebpf/btf/workarounds.go b/vendor/github.com/cilium/ebpf/btf/workarounds.go new file mode 100644 index 000000000..eb09047fb --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/workarounds.go @@ -0,0 +1,26 @@ +package btf + +// datasecResolveWorkaround ensures that certain vars in a Datasec are added +// to a Spec before the Datasec. This avoids a bug in kernel BTF validation. +// +// See https://lore.kernel.org/bpf/20230302123440.1193507-1-lmb@isovalent.com/ +func datasecResolveWorkaround(b *Builder, ds *Datasec) error { + for _, vsi := range ds.Vars { + v, ok := vsi.Type.(*Var) + if !ok { + continue + } + + switch v.Type.(type) { + case *Typedef, *Volatile, *Const, *Restrict, *TypeTag: + // NB: We must never call Add on a Datasec, otherwise we risk + // infinite recursion. + _, err := b.Add(v.Type) + if err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/cilium/ebpf/collection.go b/vendor/github.com/cilium/ebpf/collection.go new file mode 100644 index 000000000..f99f354d4 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/collection.go @@ -0,0 +1,1154 @@ +package ebpf + +import ( + "encoding/binary" + "errors" + "fmt" + "path/filepath" + "reflect" + "runtime" + "slices" + "strings" + + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/kallsyms" + "github.com/cilium/ebpf/internal/kconfig" + "github.com/cilium/ebpf/internal/linux" + "github.com/cilium/ebpf/internal/platform" + "github.com/cilium/ebpf/internal/sys" +) + +// CollectionOptions control loading a collection into the kernel. +// +// Maps and Programs are passed to NewMapWithOptions and NewProgramsWithOptions. +type CollectionOptions struct { + Maps MapOptions + Programs ProgramOptions + + // MapReplacements takes a set of Maps that will be used instead of + // creating new ones when loading the CollectionSpec. + // + // For each given Map, there must be a corresponding MapSpec in + // CollectionSpec.Maps, and its type, key/value size, max entries and flags + // must match the values of the MapSpec. + // + // The given Maps are Clone()d before being used in the Collection, so the + // caller can Close() them freely when they are no longer needed. + MapReplacements map[string]*Map +} + +// CollectionSpec describes a collection. +type CollectionSpec struct { + Maps map[string]*MapSpec + Programs map[string]*ProgramSpec + + // Variables refer to global variables declared in the ELF. They can be read + // and modified freely before loading the Collection. Modifying them after + // loading has no effect on a running eBPF program. + Variables map[string]*VariableSpec + + // Types holds type information about Maps and Programs. + // Modifications to Types are currently undefined behaviour. + Types *btf.Spec + + // ByteOrder specifies whether the ELF was compiled for + // big-endian or little-endian architectures. + ByteOrder binary.ByteOrder +} + +// Copy returns a recursive copy of the spec. +func (cs *CollectionSpec) Copy() *CollectionSpec { + if cs == nil { + return nil + } + + cpy := CollectionSpec{ + Maps: copyMapOfSpecs(cs.Maps), + Programs: copyMapOfSpecs(cs.Programs), + Variables: make(map[string]*VariableSpec, len(cs.Variables)), + ByteOrder: cs.ByteOrder, + Types: cs.Types.Copy(), + } + + for name, spec := range cs.Variables { + cpy.Variables[name] = spec.copy(&cpy) + } + if cs.Variables == nil { + cpy.Variables = nil + } + + return &cpy +} + +func copyMapOfSpecs[T interface{ Copy() T }](m map[string]T) map[string]T { + if m == nil { + return nil + } + + cpy := make(map[string]T, len(m)) + for k, v := range m { + cpy[k] = v.Copy() + } + + return cpy +} + +// RewriteMaps replaces all references to specific maps. +// +// Use this function to use pre-existing maps instead of creating new ones +// when calling NewCollection. Any named maps are removed from CollectionSpec.Maps. +// +// Returns an error if a named map isn't used in at least one program. +// +// Deprecated: Pass CollectionOptions.MapReplacements when loading the Collection +// instead. +func (cs *CollectionSpec) RewriteMaps(maps map[string]*Map) error { + for symbol, m := range maps { + // have we seen a program that uses this symbol / map + seen := false + for progName, progSpec := range cs.Programs { + err := progSpec.Instructions.AssociateMap(symbol, m) + + switch { + case err == nil: + seen = true + + case errors.Is(err, asm.ErrUnreferencedSymbol): + // Not all programs need to use the map + + default: + return fmt.Errorf("program %s: %w", progName, err) + } + } + + if !seen { + return fmt.Errorf("map %s not referenced by any programs", symbol) + } + + // Prevent NewCollection from creating rewritten maps + delete(cs.Maps, symbol) + } + + return nil +} + +// MissingConstantsError is returned by [CollectionSpec.RewriteConstants]. +type MissingConstantsError struct { + // The constants missing from .rodata. + Constants []string +} + +func (m *MissingConstantsError) Error() string { + return fmt.Sprintf("some constants are missing from .rodata: %s", strings.Join(m.Constants, ", ")) +} + +// RewriteConstants replaces the value of multiple constants. +// +// The constant must be defined like so in the C program: +// +// volatile const type foobar; +// volatile const type foobar = default; +// +// Replacement values must be of the same length as the C sizeof(type). +// If necessary, they are marshalled according to the same rules as +// map values. +// +// From Linux 5.5 the verifier will use constants to eliminate dead code. +// +// Returns an error wrapping [MissingConstantsError] if a constant doesn't exist. +// +// Deprecated: Use [CollectionSpec.Variables] to interact with constants instead. +// RewriteConstants is now a wrapper around the VariableSpec API. +func (cs *CollectionSpec) RewriteConstants(consts map[string]interface{}) error { + var missing []string + for n, c := range consts { + v, ok := cs.Variables[n] + if !ok { + missing = append(missing, n) + continue + } + + if !v.Constant() { + return fmt.Errorf("variable %s is not a constant", n) + } + + if err := v.Set(c); err != nil { + return fmt.Errorf("rewriting constant %s: %w", n, err) + } + } + + if len(missing) != 0 { + return fmt.Errorf("rewrite constants: %w", &MissingConstantsError{Constants: missing}) + } + + return nil +} + +// Assign the contents of a CollectionSpec to a struct. +// +// This function is a shortcut to manually checking the presence +// of maps and programs in a CollectionSpec. Consider using bpf2go +// if this sounds useful. +// +// 'to' must be a pointer to a struct. A field of the +// struct is updated with values from Programs, Maps or Variables if it +// has an `ebpf` tag and its type is *ProgramSpec, *MapSpec or *VariableSpec. +// The tag's value specifies the name of the program or map as +// found in the CollectionSpec. +// +// struct { +// Foo *ebpf.ProgramSpec `ebpf:"xdp_foo"` +// Bar *ebpf.MapSpec `ebpf:"bar_map"` +// Var *ebpf.VariableSpec `ebpf:"some_var"` +// Ignored int +// } +// +// Returns an error if any of the eBPF objects can't be found, or +// if the same Spec is assigned multiple times. +func (cs *CollectionSpec) Assign(to interface{}) error { + getValue := func(typ reflect.Type, name string) (interface{}, error) { + switch typ { + case reflect.TypeOf((*ProgramSpec)(nil)): + if p := cs.Programs[name]; p != nil { + return p, nil + } + return nil, fmt.Errorf("missing program %q", name) + + case reflect.TypeOf((*MapSpec)(nil)): + if m := cs.Maps[name]; m != nil { + return m, nil + } + return nil, fmt.Errorf("missing map %q", name) + + case reflect.TypeOf((*VariableSpec)(nil)): + if v := cs.Variables[name]; v != nil { + return v, nil + } + return nil, fmt.Errorf("missing variable %q", name) + + default: + return nil, fmt.Errorf("unsupported type %s", typ) + } + } + + return assignValues(to, getValue) +} + +// LoadAndAssign loads Maps and Programs into the kernel and assigns them +// to a struct. +// +// Omitting Map/Program.Close() during application shutdown is an error. +// See the package documentation for details around Map and Program lifecycle. +// +// This function is a shortcut to manually checking the presence +// of maps and programs in a CollectionSpec. Consider using bpf2go +// if this sounds useful. +// +// 'to' must be a pointer to a struct. A field of the struct is updated with +// a Program or Map if it has an `ebpf` tag and its type is *Program or *Map. +// The tag's value specifies the name of the program or map as found in the +// CollectionSpec. Before updating the struct, the requested objects and their +// dependent resources are loaded into the kernel and populated with values if +// specified. +// +// struct { +// Foo *ebpf.Program `ebpf:"xdp_foo"` +// Bar *ebpf.Map `ebpf:"bar_map"` +// Ignored int +// } +// +// opts may be nil. +// +// Returns an error if any of the fields can't be found, or +// if the same Map or Program is assigned multiple times. +func (cs *CollectionSpec) LoadAndAssign(to interface{}, opts *CollectionOptions) error { + loader, err := newCollectionLoader(cs, opts) + if err != nil { + return err + } + defer loader.close() + + // Support assigning Programs and Maps, lazy-loading the required objects. + assignedMaps := make(map[string]bool) + assignedProgs := make(map[string]bool) + assignedVars := make(map[string]bool) + + getValue := func(typ reflect.Type, name string) (interface{}, error) { + switch typ { + + case reflect.TypeOf((*Program)(nil)): + assignedProgs[name] = true + return loader.loadProgram(name) + + case reflect.TypeOf((*Map)(nil)): + assignedMaps[name] = true + return loader.loadMap(name) + + case reflect.TypeOf((*Variable)(nil)): + assignedVars[name] = true + return loader.loadVariable(name) + + default: + return nil, fmt.Errorf("unsupported type %s", typ) + } + } + + // Load the Maps and Programs requested by the annotated struct. + if err := assignValues(to, getValue); err != nil { + return err + } + + // Populate the requested maps. Has a chance of lazy-loading other dependent maps. + if err := loader.populateDeferredMaps(); err != nil { + return err + } + + // Evaluate the loader's objects after all (lazy)loading has taken place. + for n, m := range loader.maps { + if m.typ.canStoreProgram() { + // Require all lazy-loaded ProgramArrays to be assigned to the given object. + // The kernel empties a ProgramArray once the last user space reference + // to it closes, which leads to failed tail calls. Combined with the library + // closing map fds via GC finalizers this can lead to surprising behaviour. + // Only allow unassigned ProgramArrays when the library hasn't pre-populated + // any entries from static value declarations. At this point, we know the map + // is empty and there's no way for the caller to interact with the map going + // forward. + if !assignedMaps[n] && len(cs.Maps[n].Contents) > 0 { + return fmt.Errorf("ProgramArray %s must be assigned to prevent missed tail calls", n) + } + } + } + + // Prevent loader.cleanup() from closing assigned Maps and Programs. + for m := range assignedMaps { + delete(loader.maps, m) + } + for p := range assignedProgs { + delete(loader.programs, p) + } + for p := range assignedVars { + delete(loader.vars, p) + } + + return nil +} + +// Collection is a collection of live BPF resources present in the kernel. +type Collection struct { + Programs map[string]*Program + Maps map[string]*Map + + // Variables contains global variables used by the Collection's program(s). On + // kernels older than 5.5, most interactions with Variables return + // [ErrNotSupported]. + Variables map[string]*Variable +} + +// NewCollection creates a Collection from the given spec, creating and +// loading its declared resources into the kernel. +// +// Omitting Collection.Close() during application shutdown is an error. +// See the package documentation for details around Map and Program lifecycle. +func NewCollection(spec *CollectionSpec) (*Collection, error) { + return NewCollectionWithOptions(spec, CollectionOptions{}) +} + +// NewCollectionWithOptions creates a Collection from the given spec using +// options, creating and loading its declared resources into the kernel. +// +// Omitting Collection.Close() during application shutdown is an error. +// See the package documentation for details around Map and Program lifecycle. +func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (*Collection, error) { + loader, err := newCollectionLoader(spec, &opts) + if err != nil { + return nil, err + } + defer loader.close() + + // Create maps first, as their fds need to be linked into programs. + for mapName := range spec.Maps { + if _, err := loader.loadMap(mapName); err != nil { + return nil, err + } + } + + for progName, prog := range spec.Programs { + if prog.Type == UnspecifiedProgram { + continue + } + + if _, err := loader.loadProgram(progName); err != nil { + return nil, err + } + } + + for varName := range spec.Variables { + if _, err := loader.loadVariable(varName); err != nil { + return nil, err + } + } + + // Maps can contain Program and Map stubs, so populate them after + // all Maps and Programs have been successfully loaded. + if err := loader.populateDeferredMaps(); err != nil { + return nil, err + } + + // Prevent loader.cleanup from closing maps, programs and vars. + maps, progs, vars := loader.maps, loader.programs, loader.vars + loader.maps, loader.programs, loader.vars = nil, nil, nil + + return &Collection{ + progs, + maps, + vars, + }, nil +} + +type collectionLoader struct { + coll *CollectionSpec + opts *CollectionOptions + maps map[string]*Map + programs map[string]*Program + vars map[string]*Variable + types *btf.Cache +} + +func newCollectionLoader(coll *CollectionSpec, opts *CollectionOptions) (*collectionLoader, error) { + if opts == nil { + opts = &CollectionOptions{} + } + + // Check for existing MapSpecs in the CollectionSpec for all provided replacement maps. + for name := range opts.MapReplacements { + if _, ok := coll.Maps[name]; !ok { + return nil, fmt.Errorf("replacement map %s not found in CollectionSpec", name) + } + } + + if err := populateKallsyms(coll.Programs); err != nil { + return nil, fmt.Errorf("populating kallsyms caches: %w", err) + } + + return &collectionLoader{ + coll, + opts, + make(map[string]*Map), + make(map[string]*Program), + make(map[string]*Variable), + btf.NewCache(), + }, nil +} + +// populateKallsyms populates kallsyms caches, making lookups cheaper later on +// during individual program loading. Since we have less context available +// at those stages, we batch the lookups here instead to avoid redundant work. +func populateKallsyms(progs map[string]*ProgramSpec) error { + // Look up addresses of all kernel symbols referenced by all programs. + addrs := make(map[string]uint64) + for _, p := range progs { + iter := p.Instructions.Iterate() + for iter.Next() { + ins := iter.Ins + meta, _ := ins.Metadata.Get(ksymMetaKey{}).(*ksymMeta) + if meta != nil { + addrs[meta.Name] = 0 + } + } + } + if len(addrs) != 0 { + if err := kallsyms.AssignAddresses(addrs); err != nil { + return fmt.Errorf("getting addresses from kallsyms: %w", err) + } + } + + return nil +} + +// close all resources left over in the collectionLoader. +func (cl *collectionLoader) close() { + for _, m := range cl.maps { + m.Close() + } + for _, p := range cl.programs { + p.Close() + } +} + +func (cl *collectionLoader) loadMap(mapName string) (*Map, error) { + if m := cl.maps[mapName]; m != nil { + return m, nil + } + + mapSpec := cl.coll.Maps[mapName] + if mapSpec == nil { + return nil, fmt.Errorf("missing map %s", mapName) + } + + mapSpec = mapSpec.Copy() + + // Defer setting the mmapable flag on maps until load time. This avoids the + // MapSpec having different flags on some kernel versions. Also avoid running + // syscalls during ELF loading, so platforms like wasm can also parse an ELF. + if isDataSection(mapSpec.Name) && haveMmapableMaps() == nil { + mapSpec.Flags |= sys.BPF_F_MMAPABLE + } + + if replaceMap, ok := cl.opts.MapReplacements[mapName]; ok { + // Check compatibility with the replacement map after setting + // feature-dependent map flags. + if err := mapSpec.Compatible(replaceMap); err != nil { + return nil, fmt.Errorf("using replacement map %s: %w", mapSpec.Name, err) + } + + // Clone the map to avoid closing user's map later on. + m, err := replaceMap.Clone() + if err != nil { + return nil, err + } + + cl.maps[mapName] = m + return m, nil + } + + m, err := newMapWithOptions(mapSpec, cl.opts.Maps, cl.types) + if err != nil { + return nil, fmt.Errorf("map %s: %w", mapName, err) + } + + // Finalize 'scalar' maps that don't refer to any other eBPF resources + // potentially pending creation. This is needed for frozen maps like .rodata + // that need to be finalized before invoking the verifier. + if !mapSpec.Type.canStoreMapOrProgram() { + if err := m.finalize(mapSpec); err != nil { + _ = m.Close() + return nil, fmt.Errorf("finalizing map %s: %w", mapName, err) + } + } + + cl.maps[mapName] = m + return m, nil +} + +func (cl *collectionLoader) loadProgram(progName string) (*Program, error) { + if prog := cl.programs[progName]; prog != nil { + return prog, nil + } + + progSpec := cl.coll.Programs[progName] + if progSpec == nil { + return nil, fmt.Errorf("unknown program %s", progName) + } + + // Bail out early if we know the kernel is going to reject the program. + // This skips loading map dependencies, saving some cleanup work later. + if progSpec.Type == UnspecifiedProgram { + return nil, fmt.Errorf("cannot load program %s: program type is unspecified", progName) + } + + progSpec = progSpec.Copy() + + // Rewrite any reference to a valid map in the program's instructions, + // which includes all of its dependencies. + for i := range progSpec.Instructions { + ins := &progSpec.Instructions[i] + + if !ins.IsLoadFromMap() || ins.Reference() == "" { + continue + } + + // Don't overwrite map loads containing non-zero map fd's, + // they can be manually included by the caller. + // Map FDs/IDs are placed in the lower 32 bits of Constant. + if int32(ins.Constant) > 0 { + continue + } + + m, err := cl.loadMap(ins.Reference()) + if err != nil { + return nil, fmt.Errorf("program %s: %w", progName, err) + } + + if err := ins.AssociateMap(m); err != nil { + return nil, fmt.Errorf("program %s: map %s: %w", progName, ins.Reference(), err) + } + } + + prog, err := newProgramWithOptions(progSpec, cl.opts.Programs, cl.types) + if err != nil { + return nil, fmt.Errorf("program %s: %w", progName, err) + } + + cl.programs[progName] = prog + + return prog, nil +} + +func (cl *collectionLoader) loadVariable(varName string) (*Variable, error) { + if v := cl.vars[varName]; v != nil { + return v, nil + } + + varSpec := cl.coll.Variables[varName] + if varSpec == nil { + return nil, fmt.Errorf("unknown variable %s", varName) + } + + // Get the key of the VariableSpec's MapSpec in the CollectionSpec. + var mapName string + for n, ms := range cl.coll.Maps { + if ms == varSpec.m { + mapName = n + break + } + } + if mapName == "" { + return nil, fmt.Errorf("variable %s: underlying MapSpec %s was removed from CollectionSpec", varName, varSpec.m.Name) + } + + m, err := cl.loadMap(mapName) + if err != nil { + return nil, fmt.Errorf("variable %s: %w", varName, err) + } + + // If the kernel is too old or the underlying map was created without + // BPF_F_MMAPABLE, [Map.Memory] will return ErrNotSupported. In this case, + // emit a Variable with a nil Memory. This keeps Collection{Spec}.Variables + // consistent across systems with different feature sets without breaking + // LoadAndAssign. + var mm *Memory + if unsafeMemory { + mm, err = m.unsafeMemory() + } else { + mm, err = m.Memory() + } + if err != nil && !errors.Is(err, ErrNotSupported) { + return nil, fmt.Errorf("variable %s: getting memory for map %s: %w", varName, mapName, err) + } + + v, err := newVariable( + varSpec.name, + varSpec.offset, + varSpec.size, + varSpec.t, + mm, + ) + if err != nil { + return nil, fmt.Errorf("variable %s: %w", varName, err) + } + + cl.vars[varName] = v + return v, nil +} + +// populateDeferredMaps iterates maps holding programs or other maps and loads +// any dependencies. Populates all maps in cl and freezes them if specified. +func (cl *collectionLoader) populateDeferredMaps() error { + for mapName, m := range cl.maps { + mapSpec, ok := cl.coll.Maps[mapName] + if !ok { + return fmt.Errorf("missing map spec %s", mapName) + } + + // Scalar maps without Map or Program references are finalized during + // creation. Don't finalize them again. + if !mapSpec.Type.canStoreMapOrProgram() { + continue + } + + mapSpec = mapSpec.Copy() + + // MapSpecs that refer to inner maps or programs within the same + // CollectionSpec do so using strings. These strings are used as the key + // to look up the respective object in the Maps or Programs fields. + // Resolve those references to actual Map or Program resources that + // have been loaded into the kernel. + for i, kv := range mapSpec.Contents { + objName, ok := kv.Value.(string) + if !ok { + continue + } + + switch t := mapSpec.Type; { + case t.canStoreProgram(): + // loadProgram is idempotent and could return an existing Program. + prog, err := cl.loadProgram(objName) + if err != nil { + return fmt.Errorf("loading program %s, for map %s: %w", objName, mapName, err) + } + mapSpec.Contents[i] = MapKV{kv.Key, prog} + + case t.canStoreMap(): + // loadMap is idempotent and could return an existing Map. + innerMap, err := cl.loadMap(objName) + if err != nil { + return fmt.Errorf("loading inner map %s, for map %s: %w", objName, mapName, err) + } + mapSpec.Contents[i] = MapKV{kv.Key, innerMap} + } + } + + if mapSpec.Type == StructOpsMap { + // populate StructOps data into `kernVData` + if err := cl.populateStructOps(m, mapSpec); err != nil { + return err + } + } + + // Populate and freeze the map if specified. + if err := m.finalize(mapSpec); err != nil { + return fmt.Errorf("populating map %s: %w", mapName, err) + } + } + + return nil +} + +// populateStructOps translates the user struct bytes into the kernel value struct +// layout for a struct_ops map and writes the result back to mapSpec.Contents[0]. +func (cl *collectionLoader) populateStructOps(m *Map, mapSpec *MapSpec) error { + userType, ok := btf.As[*btf.Struct](mapSpec.Value) + if !ok { + return fmt.Errorf("value should be a *Struct") + } + + userData, ok := mapSpec.Contents[0].Value.([]byte) + if !ok { + return fmt.Errorf("value should be an array of byte") + } + if len(userData) < int(userType.Size) { + return fmt.Errorf("user data too short: have %d, need at least %d", len(userData), userType.Size) + } + + vType, _, module, err := structOpsFindTarget(userType, cl.types) + if err != nil { + return fmt.Errorf("struct_ops value type %q: %w", userType.Name, err) + } + defer module.Close() + + // Find the inner ops struct embedded in the value struct. + kType, kTypeOff, err := structOpsFindInnerType(vType) + if err != nil { + return err + } + + kernVData := make([]byte, int(vType.Size)) + for _, m := range userType.Members { + i := slices.IndexFunc(kType.Members, func(km btf.Member) bool { + return km.Name == m.Name + }) + + // Allow field to not exist in target as long as the source is zero. + if i == -1 { + mSize, err := btf.Sizeof(m.Type) + if err != nil { + return fmt.Errorf("sizeof(user.%s): %w", m.Name, err) + } + srcOff := int(m.Offset.Bytes()) + if srcOff < 0 || srcOff+mSize > len(userData) { + return fmt.Errorf("member %q: userdata is too small", m.Name) + } + + // let fail if the field in type user type is missing in type kern type + if !structOpsIsMemZeroed(userData[srcOff : srcOff+mSize]) { + return fmt.Errorf("%s doesn't exist in %s, but it has non-zero value", m.Name, kType.Name) + } + + continue + } + + km := kType.Members[i] + + switch btf.UnderlyingType(m.Type).(type) { + case *btf.Pointer: + // If this is a pointer → resolve struct_ops program. + psKey := kType.Name + ":" + m.Name + for k, ps := range cl.coll.Programs { + if ps.AttachTo == psKey { + p, ok := cl.programs[k] + if !ok || p == nil { + return nil + } + if err := structOpsPopulateValue(km, kernVData[kTypeOff:], p); err != nil { + return err + } + } + } + + default: + // Otherwise → memcpy the field contents. + if err := structOpsCopyMember(m, km, userData, kernVData[kTypeOff:]); err != nil { + return fmt.Errorf("field %s: %w", kType.Name, err) + } + } + } + + // Populate the map explicitly and keep a reference on cl.programs. + // This is necessary because we may inline fds into kernVData which + // may become invalid if the GC frees them. + if err := m.Put(uint32(0), kernVData); err != nil { + return err + } + mapSpec.Contents = nil + runtime.KeepAlive(cl.programs) + + return nil +} + +// resolveKconfig resolves all variables declared in .kconfig and populates +// m.Contents. Does nothing if the given m.Contents is non-empty. +func resolveKconfig(m *MapSpec) error { + ds, ok := m.Value.(*btf.Datasec) + if !ok { + return errors.New("map value is not a Datasec") + } + + if platform.IsWindows { + return fmt.Errorf(".kconfig: %w", internal.ErrNotSupportedOnOS) + } + + type configInfo struct { + offset uint32 + size uint32 + typ btf.Type + } + + configs := make(map[string]configInfo) + + data := make([]byte, ds.Size) + for _, vsi := range ds.Vars { + v := vsi.Type.(*btf.Var) + n := v.TypeName() + + switch n { + case "LINUX_KERNEL_VERSION": + if integer, ok := v.Type.(*btf.Int); !ok || integer.Size != 4 { + return fmt.Errorf("variable %s must be a 32 bits integer, got %s", n, v.Type) + } + + kv, err := linux.KernelVersion() + if err != nil { + return fmt.Errorf("getting kernel version: %w", err) + } + internal.NativeEndian.PutUint32(data[vsi.Offset:], kv.Kernel()) + + case "LINUX_HAS_SYSCALL_WRAPPER": + integer, ok := v.Type.(*btf.Int) + if !ok { + return fmt.Errorf("variable %s must be an integer, got %s", n, v.Type) + } + var value uint64 = 1 + if err := haveSyscallWrapper(); errors.Is(err, ErrNotSupported) { + value = 0 + } else if err != nil { + return fmt.Errorf("unable to derive a value for LINUX_HAS_SYSCALL_WRAPPER: %w", err) + } + + if err := kconfig.PutInteger(data[vsi.Offset:], integer, value); err != nil { + return fmt.Errorf("set LINUX_HAS_SYSCALL_WRAPPER: %w", err) + } + + default: // Catch CONFIG_*. + configs[n] = configInfo{ + offset: vsi.Offset, + size: vsi.Size, + typ: v.Type, + } + } + } + + // We only parse kconfig file if a CONFIG_* variable was found. + if len(configs) > 0 { + f, err := linux.FindKConfig() + if err != nil { + return fmt.Errorf("cannot find a kconfig file: %w", err) + } + defer f.Close() + + filter := make(map[string]struct{}, len(configs)) + for config := range configs { + filter[config] = struct{}{} + } + + kernelConfig, err := kconfig.Parse(f, filter) + if err != nil { + return fmt.Errorf("cannot parse kconfig file: %w", err) + } + + for n, info := range configs { + value, ok := kernelConfig[n] + if !ok { + return fmt.Errorf("config option %q does not exist on this kernel", n) + } + + err := kconfig.PutValue(data[info.offset:info.offset+info.size], info.typ, value) + if err != nil { + return fmt.Errorf("problem adding value for %s: %w", n, err) + } + } + } + + m.Contents = []MapKV{{uint32(0), data}} + + return nil +} + +// LoadCollection reads an object file and creates and loads its declared +// resources into the kernel. +// +// Omitting Collection.Close() during application shutdown is an error. +// See the package documentation for details around Map and Program lifecycle. +func LoadCollection(file string) (*Collection, error) { + if platform.IsWindows { + // This mirrors a check in efW. + if ext := filepath.Ext(file); ext == ".sys" { + return loadCollectionFromNativeImage(file) + } + } + + spec, err := LoadCollectionSpec(file) + if err != nil { + return nil, err + } + return NewCollection(spec) +} + +// Assign the contents of a Collection to a struct. +// +// This function bridges functionality between bpf2go generated +// code and any functionality better implemented in Collection. +// +// 'to' must be a pointer to a struct. A field of the +// struct is updated with values from Programs or Maps if it +// has an `ebpf` tag and its type is *Program or *Map. +// The tag's value specifies the name of the program or map as +// found in the CollectionSpec. +// +// struct { +// Foo *ebpf.Program `ebpf:"xdp_foo"` +// Bar *ebpf.Map `ebpf:"bar_map"` +// Ignored int +// } +// +// Returns an error if any of the eBPF objects can't be found, or +// if the same Map or Program is assigned multiple times. +// +// Ownership and Close()ing responsibility is transferred to `to` +// for any successful assigns. On error `to` is left in an undefined state. +func (coll *Collection) Assign(to interface{}) error { + assignedMaps := make(map[string]bool) + assignedProgs := make(map[string]bool) + assignedVars := make(map[string]bool) + + // Assign() only transfers already-loaded Maps and Programs. No extra + // loading is done. + getValue := func(typ reflect.Type, name string) (interface{}, error) { + switch typ { + + case reflect.TypeOf((*Program)(nil)): + if p := coll.Programs[name]; p != nil { + assignedProgs[name] = true + return p, nil + } + return nil, fmt.Errorf("missing program %q", name) + + case reflect.TypeOf((*Map)(nil)): + if m := coll.Maps[name]; m != nil { + assignedMaps[name] = true + return m, nil + } + return nil, fmt.Errorf("missing map %q", name) + + case reflect.TypeOf((*Variable)(nil)): + if v := coll.Variables[name]; v != nil { + assignedVars[name] = true + return v, nil + } + return nil, fmt.Errorf("missing variable %q", name) + + default: + return nil, fmt.Errorf("unsupported type %s", typ) + } + } + + if err := assignValues(to, getValue); err != nil { + return err + } + + // Finalize ownership transfer + for p := range assignedProgs { + delete(coll.Programs, p) + } + for m := range assignedMaps { + delete(coll.Maps, m) + } + for s := range assignedVars { + delete(coll.Variables, s) + } + + return nil +} + +// Close frees all maps and programs associated with the collection. +// +// The collection mustn't be used afterwards. +func (coll *Collection) Close() { + for _, prog := range coll.Programs { + prog.Close() + } + for _, m := range coll.Maps { + m.Close() + } +} + +// DetachMap removes the named map from the Collection. +// +// This means that a later call to Close() will not affect this map. +// +// Returns nil if no map of that name exists. +func (coll *Collection) DetachMap(name string) *Map { + m := coll.Maps[name] + delete(coll.Maps, name) + return m +} + +// DetachProgram removes the named program from the Collection. +// +// This means that a later call to Close() will not affect this program. +// +// Returns nil if no program of that name exists. +func (coll *Collection) DetachProgram(name string) *Program { + p := coll.Programs[name] + delete(coll.Programs, name) + return p +} + +// structField represents a struct field containing the ebpf struct tag. +type structField struct { + reflect.StructField + value reflect.Value +} + +// ebpfFields extracts field names tagged with 'ebpf' from a struct type. +// Keep track of visited types to avoid infinite recursion. +func ebpfFields(structVal reflect.Value, visited map[reflect.Type]bool) ([]structField, error) { + if visited == nil { + visited = make(map[reflect.Type]bool) + } + + structType := structVal.Type() + if structType.Kind() != reflect.Struct { + return nil, fmt.Errorf("%s is not a struct", structType) + } + + if visited[structType] { + return nil, fmt.Errorf("recursion on type %s", structType) + } + + fields := make([]structField, 0, structType.NumField()) + for i := 0; i < structType.NumField(); i++ { + field := structField{structType.Field(i), structVal.Field(i)} + + // If the field is tagged, gather it and move on. + name := field.Tag.Get("ebpf") + if name != "" { + fields = append(fields, field) + continue + } + + // If the field does not have an ebpf tag, but is a struct or a pointer + // to a struct, attempt to gather its fields as well. + var v reflect.Value + switch field.Type.Kind() { + case reflect.Ptr: + if field.Type.Elem().Kind() != reflect.Struct { + continue + } + + if field.value.IsNil() { + return nil, fmt.Errorf("nil pointer to %s", structType) + } + + // Obtain the destination type of the pointer. + v = field.value.Elem() + + case reflect.Struct: + // Reference the value's type directly. + v = field.value + + default: + continue + } + + inner, err := ebpfFields(v, visited) + if err != nil { + return nil, fmt.Errorf("field %s: %w", field.Name, err) + } + + fields = append(fields, inner...) + } + + return fields, nil +} + +// assignValues attempts to populate all fields of 'to' tagged with 'ebpf'. +// +// getValue is called for every tagged field of 'to' and must return the value +// to be assigned to the field with the given typ and name. +func assignValues(to interface{}, + getValue func(typ reflect.Type, name string) (interface{}, error)) error { + + toValue := reflect.ValueOf(to) + if toValue.Type().Kind() != reflect.Ptr { + return fmt.Errorf("%T is not a pointer to struct", to) + } + + if toValue.IsNil() { + return fmt.Errorf("nil pointer to %T", to) + } + + fields, err := ebpfFields(toValue.Elem(), nil) + if err != nil { + return err + } + + type elem struct { + // Either *Map or *Program + typ reflect.Type + name string + } + + assigned := make(map[elem]string) + for _, field := range fields { + // Get string value the field is tagged with. + tag := field.Tag.Get("ebpf") + if strings.Contains(tag, ",") { + return fmt.Errorf("field %s: ebpf tag contains a comma", field.Name) + } + + // Check if the eBPF object with the requested + // type and tag was already assigned elsewhere. + e := elem{field.Type, tag} + if af := assigned[e]; af != "" { + return fmt.Errorf("field %s: object %q was already assigned to %s", field.Name, tag, af) + } + + // Get the eBPF object referred to by the tag. + value, err := getValue(field.Type, tag) + if err != nil { + return fmt.Errorf("field %s: %w", field.Name, err) + } + + if !field.value.CanSet() { + return fmt.Errorf("field %s: can't set value", field.Name) + } + field.value.Set(reflect.ValueOf(value)) + + assigned[e] = field.Name + } + + return nil +} diff --git a/vendor/github.com/cilium/ebpf/collection_other.go b/vendor/github.com/cilium/ebpf/collection_other.go new file mode 100644 index 000000000..0e69bb83a --- /dev/null +++ b/vendor/github.com/cilium/ebpf/collection_other.go @@ -0,0 +1,9 @@ +//go:build !windows + +package ebpf + +import "github.com/cilium/ebpf/internal" + +func loadCollectionFromNativeImage(_ string) (*Collection, error) { + return nil, internal.ErrNotSupportedOnOS +} diff --git a/vendor/github.com/cilium/ebpf/collection_windows.go b/vendor/github.com/cilium/ebpf/collection_windows.go new file mode 100644 index 000000000..c1bbaa21d --- /dev/null +++ b/vendor/github.com/cilium/ebpf/collection_windows.go @@ -0,0 +1,136 @@ +package ebpf + +import ( + "errors" + "fmt" + "unsafe" + + "github.com/cilium/ebpf/internal/efw" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +func loadCollectionFromNativeImage(file string) (_ *Collection, err error) { + mapFds := make([]efw.FD, 16) + programFds := make([]efw.FD, 16) + var maps map[string]*Map + var programs map[string]*Program + + defer func() { + if err == nil { + return + } + + for _, fd := range append(mapFds, programFds...) { + // efW never uses fd 0. + if fd != 0 { + _ = efw.EbpfCloseFd(int(fd)) + } + } + + for _, m := range maps { + _ = m.Close() + } + + for _, p := range programs { + _ = p.Close() + } + }() + + nMaps, nPrograms, err := efw.EbpfObjectLoadNativeFds(file, mapFds, programFds) + if errors.Is(err, efw.EBPF_NO_MEMORY) && (nMaps > len(mapFds) || nPrograms > len(programFds)) { + mapFds = make([]efw.FD, nMaps) + programFds = make([]efw.FD, nPrograms) + + nMaps, nPrograms, err = efw.EbpfObjectLoadNativeFds(file, mapFds, programFds) + } + if err != nil { + return nil, err + } + + mapFds = mapFds[:nMaps] + programFds = programFds[:nPrograms] + + // The maximum length of a name is only 16 bytes on Linux, longer names + // are truncated. This is not a problem when loading from an ELF, since + // we get the full object name from the symbol table. + // When loading a native image we do not have this luxury. Use an efW native + // API to retrieve up to 64 bytes of the object name. + + maps = make(map[string]*Map, len(mapFds)) + for _, raw := range mapFds { + fd, err := sys.NewFD(int(raw)) + if err != nil { + return nil, err + } + + m, mapErr := newMapFromFD(fd) + if mapErr != nil { + _ = fd.Close() + return nil, mapErr + } + + var efwMapInfo efw.BpfMapInfo + size := uint32(unsafe.Sizeof(efwMapInfo)) + _, err = efw.EbpfObjectGetInfoByFd(m.FD(), unsafe.Pointer(&efwMapInfo), &size) + if err != nil { + _ = m.Close() + return nil, err + } + + if size >= uint32(unsafe.Offsetof(efwMapInfo.Name)+unsafe.Sizeof(efwMapInfo.Name)) { + m.name = unix.ByteSliceToString(efwMapInfo.Name[:]) + } + + if m.name == "" { + _ = m.Close() + return nil, fmt.Errorf("unnamed map") + } + + if _, ok := maps[m.name]; ok { + return nil, fmt.Errorf("duplicate map with the same name: %s", m.name) + } + + maps[m.name] = m + } + + programs = make(map[string]*Program, len(programFds)) + for _, raw := range programFds { + fd, err := sys.NewFD(int(raw)) + if err != nil { + return nil, err + } + + program, err := newProgramFromFD(fd) + if err != nil { + _ = fd.Close() + return nil, err + } + + var efwProgInfo efw.BpfProgInfo + size := uint32(unsafe.Sizeof(efwProgInfo)) + _, err = efw.EbpfObjectGetInfoByFd(program.FD(), unsafe.Pointer(&efwProgInfo), &size) + if err != nil { + _ = program.Close() + return nil, err + } + + if size >= uint32(unsafe.Offsetof(efwProgInfo.Name)+unsafe.Sizeof(efwProgInfo.Name)) { + program.name = unix.ByteSliceToString(efwProgInfo.Name[:]) + } + + if program.name == "" { + _ = program.Close() + return nil, fmt.Errorf("unnamed program") + } + + if _, ok := programs[program.name]; ok { + _ = program.Close() + return nil, fmt.Errorf("duplicate program with the same name: %s", program.name) + } + + programs[program.name] = program + } + + return &Collection{programs, maps, nil}, nil +} diff --git a/vendor/github.com/cilium/ebpf/cpu.go b/vendor/github.com/cilium/ebpf/cpu.go new file mode 100644 index 000000000..3bcdc386d --- /dev/null +++ b/vendor/github.com/cilium/ebpf/cpu.go @@ -0,0 +1,17 @@ +package ebpf + +// PossibleCPU returns the max number of CPUs a system may possibly have +// Logical CPU numbers must be of the form 0-n +func PossibleCPU() (int, error) { + return possibleCPU() +} + +// MustPossibleCPU is a helper that wraps a call to PossibleCPU and panics if +// the error is non-nil. +func MustPossibleCPU() int { + cpus, err := PossibleCPU() + if err != nil { + panic(err) + } + return cpus +} diff --git a/vendor/github.com/cilium/ebpf/cpu_other.go b/vendor/github.com/cilium/ebpf/cpu_other.go new file mode 100644 index 000000000..eca5164c1 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/cpu_other.go @@ -0,0 +1,13 @@ +//go:build !windows + +package ebpf + +import ( + "sync" + + "github.com/cilium/ebpf/internal/linux" +) + +var possibleCPU = sync.OnceValues(func() (int, error) { + return linux.ParseCPUsFromFile("/sys/devices/system/cpu/possible") +}) diff --git a/vendor/github.com/cilium/ebpf/cpu_windows.go b/vendor/github.com/cilium/ebpf/cpu_windows.go new file mode 100644 index 000000000..9448b0916 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/cpu_windows.go @@ -0,0 +1,11 @@ +package ebpf + +import ( + "sync" + + "golang.org/x/sys/windows" +) + +var possibleCPU = sync.OnceValues(func() (int, error) { + return int(windows.GetMaximumProcessorCount(windows.ALL_PROCESSOR_GROUPS)), nil +}) diff --git a/vendor/github.com/cilium/ebpf/doc.go b/vendor/github.com/cilium/ebpf/doc.go new file mode 100644 index 000000000..396b3394d --- /dev/null +++ b/vendor/github.com/cilium/ebpf/doc.go @@ -0,0 +1,25 @@ +// Package ebpf is a toolkit for working with eBPF programs. +// +// eBPF programs are small snippets of code which are executed directly +// in a VM in the Linux kernel, which makes them very fast and flexible. +// Many Linux subsystems now accept eBPF programs. This makes it possible +// to implement highly application specific logic inside the kernel, +// without having to modify the actual kernel itself. +// +// This package is designed for long-running processes which +// want to use eBPF to implement part of their application logic. It has no +// run-time dependencies outside of the library and the Linux kernel itself. +// eBPF code should be compiled ahead of time using clang, and shipped with +// your application as any other resource. +// +// Use the link subpackage to attach a loaded program to a hook in the kernel. +// +// Note that losing all references to Map and Program resources will cause +// their underlying file descriptors to be closed, potentially removing those +// objects from the kernel. Always retain a reference by e.g. deferring a +// Close() of a Collection or LoadAndAssign object until application exit. +// +// Special care needs to be taken when handling maps of type ProgramArray, +// as the kernel erases its contents when the last userspace or bpffs +// reference disappears, regardless of the map being in active use. +package ebpf diff --git a/vendor/github.com/cilium/ebpf/elf_reader.go b/vendor/github.com/cilium/ebpf/elf_reader.go new file mode 100644 index 000000000..f2c9196b7 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/elf_reader.go @@ -0,0 +1,1508 @@ +package ebpf + +import ( + "bufio" + "bytes" + "debug/elf" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "os" + "slices" + "strings" + + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/platform" + "github.com/cilium/ebpf/internal/sys" +) + +type kconfigMetaKey struct{} + +type kconfigMeta struct { + Map *MapSpec + Offset uint32 +} + +type kfuncMetaKey struct{} + +type kfuncMeta struct { + Binding elf.SymBind + Func *btf.Func +} + +type ksymMetaKey struct{} + +type ksymMeta struct { + Binding elf.SymBind + Name string +} + +// elfCode is a convenience to reduce the amount of arguments that have to +// be passed around explicitly. You should treat its contents as immutable. +type elfCode struct { + *internal.SafeELFFile + sections map[elf.SectionIndex]*elfSection + license string + version uint32 + btf *btf.Spec + extInfo *btf.ExtInfos + maps map[string]*MapSpec + vars map[string]*VariableSpec + kfuncs map[string]*btf.Func + ksyms map[string]struct{} + kconfig *MapSpec +} + +// LoadCollectionSpec parses an ELF file into a CollectionSpec. +func LoadCollectionSpec(file string) (*CollectionSpec, error) { + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + + spec, err := LoadCollectionSpecFromReader(f) + if err != nil { + return nil, fmt.Errorf("file %s: %w", file, err) + } + return spec, nil +} + +// LoadCollectionSpecFromReader parses an ELF file into a CollectionSpec. +func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) { + f, err := internal.NewSafeELFFile(rd) + if err != nil { + return nil, err + } + + // Checks if the ELF file is for BPF data. + // Old LLVM versions set e_machine to EM_NONE. + if f.Machine != elf.EM_NONE && f.Machine != elf.EM_BPF { + return nil, fmt.Errorf("unexpected machine type for BPF ELF: %s", f.Machine) + } + + var ( + licenseSection *elf.Section + versionSection *elf.Section + sections = make(map[elf.SectionIndex]*elfSection) + relSections = make(map[elf.SectionIndex]*elf.Section) + ) + + // This is the target of relocations generated by inline assembly. + sections[elf.SHN_UNDEF] = newElfSection(new(elf.Section), undefSection) + + // Collect all the sections we're interested in. This includes relocations + // which we parse later. + // + // Keep the documentation at docs/ebpf/loading/elf-sections.md up-to-date. + for i, sec := range f.Sections { + idx := elf.SectionIndex(i) + + switch { + case strings.HasPrefix(sec.Name, "license"): + licenseSection = sec + case strings.HasPrefix(sec.Name, "version"): + versionSection = sec + case strings.HasPrefix(sec.Name, "maps"): + sections[idx] = newElfSection(sec, mapSection) + case sec.Name == ".maps": + sections[idx] = newElfSection(sec, btfMapSection) + case isDataSection(sec.Name): + sections[idx] = newElfSection(sec, dataSection) + case sec.Type == elf.SHT_REL: + // Store relocations under the section index of the target + relSections[elf.SectionIndex(sec.Info)] = sec + case sec.Type == elf.SHT_PROGBITS && (sec.Flags&elf.SHF_EXECINSTR) != 0 && sec.Size > 0: + sections[idx] = newElfSection(sec, programSection) + } + } + + license, err := loadLicense(licenseSection) + if err != nil { + return nil, fmt.Errorf("load license: %w", err) + } + + version, err := loadVersion(versionSection, f.ByteOrder) + if err != nil { + return nil, fmt.Errorf("load version: %w", err) + } + + btfSpec, btfExtInfo, err := btf.LoadSpecAndExtInfosFromReader(rd) + if err != nil && !errors.Is(err, btf.ErrNotFound) { + return nil, fmt.Errorf("load BTF: %w", err) + } + + ec := &elfCode{ + SafeELFFile: f, + sections: sections, + license: license, + version: version, + btf: btfSpec, + extInfo: btfExtInfo, + maps: make(map[string]*MapSpec), + vars: make(map[string]*VariableSpec), + kfuncs: make(map[string]*btf.Func), + ksyms: make(map[string]struct{}), + } + + symbols, err := f.Symbols() + if err != nil { + return nil, fmt.Errorf("load symbols: %v", err) + } + + ec.assignSymbols(symbols) + + if err := ec.loadRelocations(relSections, symbols); err != nil { + return nil, fmt.Errorf("load relocations: %w", err) + } + + if err := ec.loadMaps(); err != nil { + return nil, fmt.Errorf("load maps: %w", err) + } + + if err := ec.loadBTFMaps(); err != nil { + return nil, fmt.Errorf("load BTF maps: %w", err) + } + + if err := ec.loadDataSections(); err != nil { + return nil, fmt.Errorf("load data sections: %w", err) + } + + if err := ec.loadKconfigSection(); err != nil { + return nil, fmt.Errorf("load virtual .kconfig section: %w", err) + } + + if err := ec.loadKsymsSection(); err != nil { + return nil, fmt.Errorf("load virtual .ksyms section: %w", err) + } + + // Finally, collect programs and link them. + progs, err := ec.loadProgramSections() + if err != nil { + return nil, fmt.Errorf("load programs: %w", err) + } + + return &CollectionSpec{ + ec.maps, + progs, + ec.vars, + btfSpec, + ec.ByteOrder, + }, nil +} + +func loadLicense(sec *elf.Section) (string, error) { + if sec == nil { + return "", nil + } + + data, err := sec.Data() + if err != nil { + return "", fmt.Errorf("section %s: %v", sec.Name, err) + } + return string(bytes.TrimRight(data, "\000")), nil +} + +func loadVersion(sec *elf.Section, bo binary.ByteOrder) (uint32, error) { + if sec == nil { + return 0, nil + } + + var version uint32 + if err := binary.Read(sec.Open(), bo, &version); err != nil { + return 0, fmt.Errorf("section %s: %v", sec.Name, err) + } + return version, nil +} + +func isDataSection(name string) bool { + return name == ".bss" || strings.HasPrefix(name, ".data") || strings.HasPrefix(name, ".rodata") +} + +func isConstantDataSection(name string) bool { + return strings.HasPrefix(name, ".rodata") +} + +func isKconfigSection(name string) bool { + return name == ".kconfig" +} + +type elfSectionKind int + +const ( + undefSection elfSectionKind = iota + mapSection + btfMapSection + programSection + dataSection +) + +type elfSection struct { + *elf.Section + kind elfSectionKind + // Offset from the start of the section to a symbol + symbols map[uint64]elf.Symbol + // Offset from the start of the section to a relocation, which points at + // a symbol in another section. + relocations map[uint64]elf.Symbol + // The number of relocations pointing at this section. + references int +} + +func newElfSection(section *elf.Section, kind elfSectionKind) *elfSection { + return &elfSection{ + section, + kind, + make(map[uint64]elf.Symbol), + make(map[uint64]elf.Symbol), + 0, + } +} + +// assignSymbols takes a list of symbols and assigns them to their +// respective sections, indexed by name. +func (ec *elfCode) assignSymbols(symbols []elf.Symbol) { + for _, symbol := range symbols { + symType := elf.ST_TYPE(symbol.Info) + symSection := ec.sections[symbol.Section] + if symSection == nil { + continue + } + + // Anonymous symbols only occur in debug sections which we don't process + // relocations for. Anonymous symbols are not referenced from other sections. + if symbol.Name == "" { + continue + } + + // Older versions of LLVM don't tag symbols correctly, so keep + // all NOTYPE ones. + switch symSection.kind { + case mapSection, btfMapSection, dataSection: + if symType != elf.STT_NOTYPE && symType != elf.STT_OBJECT { + continue + } + case programSection: + if symType != elf.STT_NOTYPE && symType != elf.STT_FUNC { + continue + } + // LLVM emits LBB_ (Local Basic Block) symbols that seem to be jump + // targets within sections, but BPF has no use for them. + if symType == elf.STT_NOTYPE && elf.ST_BIND(symbol.Info) == elf.STB_LOCAL && + strings.HasPrefix(symbol.Name, "LBB") { + continue + } + // Only collect symbols that occur in program/maps/data sections. + default: + continue + } + + symSection.symbols[symbol.Value] = symbol + } +} + +// loadRelocations iterates .rel* sections and extracts relocation entries for +// sections of interest. Makes sure relocations point at valid sections. +func (ec *elfCode) loadRelocations(relSections map[elf.SectionIndex]*elf.Section, symbols []elf.Symbol) error { + for idx, relSection := range relSections { + section := ec.sections[idx] + if section == nil { + continue + } + + rels, err := ec.loadSectionRelocations(relSection, symbols) + if err != nil { + return fmt.Errorf("relocation for section %q: %w", section.Name, err) + } + + for _, rel := range rels { + target := ec.sections[rel.Section] + if target == nil { + return fmt.Errorf("section %q: reference to %q in section %s: %w", section.Name, rel.Name, rel.Section, ErrNotSupported) + } + + target.references++ + } + + section.relocations = rels + } + + return nil +} + +// loadProgramSections iterates ec's sections and emits a ProgramSpec +// for each function it finds. +// +// The resulting map is indexed by function name. +func (ec *elfCode) loadProgramSections() (map[string]*ProgramSpec, error) { + + progs := make(map[string]*ProgramSpec) + + // Generate a ProgramSpec for each function found in each program section. + var export []string + for _, sec := range ec.sections { + if sec.kind != programSection { + continue + } + + if len(sec.symbols) == 0 { + return nil, fmt.Errorf("section %v: missing symbols", sec.Name) + } + + funcs, err := ec.loadFunctions(sec) + if err != nil { + return nil, fmt.Errorf("section %v: %w", sec.Name, err) + } + + progType, attachType, progFlags, attachTo := getProgType(sec.Name) + + for name, insns := range funcs { + spec := &ProgramSpec{ + Name: name, + Type: progType, + Flags: progFlags, + AttachType: attachType, + AttachTo: attachTo, + SectionName: sec.Name, + License: ec.license, + KernelVersion: ec.version, + Instructions: insns, + ByteOrder: ec.ByteOrder, + } + + // Function names must be unique within a single ELF blob. + if progs[name] != nil { + return nil, fmt.Errorf("duplicate program name %s", name) + } + progs[name] = spec + + if spec.SectionName != ".text" { + export = append(export, name) + } + } + } + + flattenPrograms(progs, export) + + // Hide programs (e.g. library functions) that were not explicitly emitted + // to an ELF section. These could be exposed in a separate CollectionSpec + // field later to allow them to be modified. + for n, p := range progs { + if p.SectionName == ".text" { + delete(progs, n) + } + } + + return progs, nil +} + +// loadFunctions extracts instruction streams from the given program section +// starting at each symbol in the section. The section's symbols must already +// be narrowed down to STT_NOTYPE (emitted by clang <8) or STT_FUNC. +// +// The resulting map is indexed by function name. +func (ec *elfCode) loadFunctions(section *elfSection) (map[string]asm.Instructions, error) { + r := bufio.NewReader(section.Open()) + + // Decode the section's instruction stream. + insns := make(asm.Instructions, 0, section.Size/asm.InstructionSize) + insns, err := asm.AppendInstructions(insns, r, ec.ByteOrder, platform.Linux) + if err != nil { + return nil, fmt.Errorf("decoding instructions for section %s: %w", section.Name, err) + } + if len(insns) == 0 { + return nil, fmt.Errorf("no instructions found in section %s", section.Name) + } + + iter := insns.Iterate() + for iter.Next() { + ins := iter.Ins + offset := iter.Offset.Bytes() + + // Tag Symbol Instructions. + if sym, ok := section.symbols[offset]; ok { + *ins = ins.WithSymbol(sym.Name) + } + + // Apply any relocations for the current instruction. + // If no relocation is present, resolve any section-relative function calls. + if rel, ok := section.relocations[offset]; ok { + if err := ec.relocateInstruction(ins, rel); err != nil { + return nil, fmt.Errorf("offset %d: relocating instruction: %w", offset, err) + } + } else { + if err := referenceRelativeJump(ins, offset, section.symbols); err != nil { + return nil, fmt.Errorf("offset %d: resolving relative jump: %w", offset, err) + } + } + } + + if ec.extInfo != nil { + ec.extInfo.Assign(insns, section.Name) + } + + return splitSymbols(insns) +} + +// referenceRelativeJump turns a relative jump to another bpf subprogram within +// the same ELF section into a Reference Instruction. +// +// Up to LLVM 9, calls to subprograms within the same ELF section are sometimes +// encoded using relative jumps instead of relocation entries. These jumps go +// out of bounds of the current program, so their targets must be memoized +// before the section's instruction stream is split. +// +// The relative jump Constant is blinded to -1 and the target Symbol is set as +// the Instruction's Reference so it can be resolved by the linker. +func referenceRelativeJump(ins *asm.Instruction, offset uint64, symbols map[uint64]elf.Symbol) error { + if !ins.IsFunctionReference() || ins.Constant == -1 { + return nil + } + + tgt := jumpTarget(offset, *ins) + sym := symbols[tgt].Name + if sym == "" { + return fmt.Errorf("no jump target found at offset %d", tgt) + } + + *ins = ins.WithReference(sym) + ins.Constant = -1 + + return nil +} + +// jumpTarget takes ins' offset within an instruction stream (in bytes) +// and returns its absolute jump destination (in bytes) within the +// instruction stream. +func jumpTarget(offset uint64, ins asm.Instruction) uint64 { + // A relative jump instruction describes the amount of raw BPF instructions + // to jump, convert the offset into bytes. + dest := ins.Constant * asm.InstructionSize + + // The starting point of the jump is the end of the current instruction. + dest += int64(offset + asm.InstructionSize) + + if dest < 0 { + return 0 + } + + return uint64(dest) +} + +var errUnsupportedBinding = errors.New("unsupported binding") + +func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) error { + var ( + typ = elf.ST_TYPE(rel.Info) + bind = elf.ST_BIND(rel.Info) + name = rel.Name + ) + + target := ec.sections[rel.Section] + + switch target.kind { + case mapSection, btfMapSection: + if bind == elf.STB_LOCAL { + return fmt.Errorf("possible erroneous static qualifier on map definition: found reference to %q", name) + } + + if bind != elf.STB_GLOBAL { + return fmt.Errorf("map %q: %w: %s", name, errUnsupportedBinding, bind) + } + + if typ != elf.STT_OBJECT && typ != elf.STT_NOTYPE { + // STT_NOTYPE is generated on clang < 8 which doesn't tag + // relocations appropriately. + return fmt.Errorf("map load: incorrect relocation type %v", typ) + } + + ins.Src = asm.PseudoMapFD + + case dataSection: + var offset uint32 + switch typ { + case elf.STT_SECTION: + if bind != elf.STB_LOCAL { + return fmt.Errorf("direct load: %s: %w: %s", name, errUnsupportedBinding, bind) + } + + // This is really a reference to a static symbol, which clang doesn't + // emit a symbol table entry for. Instead it encodes the offset in + // the instruction itself. + offset = uint32(uint64(ins.Constant)) + + case elf.STT_OBJECT: + // LLVM 9 emits OBJECT-LOCAL symbols for anonymous constants. + if bind != elf.STB_GLOBAL && bind != elf.STB_LOCAL && bind != elf.STB_WEAK { + return fmt.Errorf("direct load: %s: %w: %s", name, errUnsupportedBinding, bind) + } + + offset = uint32(rel.Value) + + case elf.STT_NOTYPE: + // LLVM 7 emits NOTYPE-LOCAL symbols for anonymous constants. + if bind != elf.STB_LOCAL { + return fmt.Errorf("direct load: %s: %w: %s", name, errUnsupportedBinding, bind) + } + + offset = uint32(rel.Value) + + default: + return fmt.Errorf("incorrect relocation type %v for direct map load", typ) + } + + // We rely on using the name of the data section as the reference. It + // would be nicer to keep the real name in case of an STT_OBJECT, but + // it's not clear how to encode that into Instruction. + name = target.Name + + // The kernel expects the offset in the second basic BPF instruction. + ins.Constant = int64(uint64(offset) << 32) + ins.Src = asm.PseudoMapValue + + case programSection: + switch opCode := ins.OpCode; { + case opCode.JumpOp() == asm.Call: + if ins.Src != asm.PseudoCall { + return fmt.Errorf("call: %s: incorrect source register", name) + } + + switch typ { + case elf.STT_NOTYPE, elf.STT_FUNC: + if bind != elf.STB_GLOBAL { + return fmt.Errorf("call: %s: %w: %s", name, errUnsupportedBinding, bind) + } + + case elf.STT_SECTION: + if bind != elf.STB_LOCAL { + return fmt.Errorf("call: %s: %w: %s", name, errUnsupportedBinding, bind) + } + + // The function we want to call is in the indicated section, + // at the offset encoded in the instruction itself. Reverse + // the calculation to find the real function we're looking for. + // A value of -1 references the first instruction in the section. + offset := int64(int32(ins.Constant)+1) * asm.InstructionSize + sym, ok := target.symbols[uint64(offset)] + if !ok { + return fmt.Errorf("call: no symbol at offset %d", offset) + } + + name = sym.Name + ins.Constant = -1 + + default: + return fmt.Errorf("call: %s: invalid symbol type %s", name, typ) + } + case opCode.IsDWordLoad(): + switch typ { + case elf.STT_FUNC: + if bind != elf.STB_GLOBAL { + return fmt.Errorf("load: %s: %w: %s", name, errUnsupportedBinding, bind) + } + + case elf.STT_SECTION: + if bind != elf.STB_LOCAL { + return fmt.Errorf("load: %s: %w: %s", name, errUnsupportedBinding, bind) + } + + // ins.Constant already contains the offset in bytes from the + // start of the section. This is different than a call to a + // static function. + + default: + return fmt.Errorf("load: %s: invalid symbol type %s", name, typ) + } + + sym, ok := target.symbols[uint64(ins.Constant)] + if !ok { + return fmt.Errorf("load: no symbol at offset %d", ins.Constant) + } + + name = sym.Name + ins.Constant = -1 + ins.Src = asm.PseudoFunc + + default: + return fmt.Errorf("neither a call nor a load instruction: %v", ins) + } + + // The Undefined section is used for 'virtual' symbols that aren't backed by + // an ELF section. This includes symbol references from inline asm, forward + // function declarations, as well as extern kfunc declarations using __ksym + // and extern kconfig variables declared using __kconfig. + case undefSection: + if bind != elf.STB_GLOBAL && bind != elf.STB_WEAK { + return fmt.Errorf("asm relocation: %s: %w: %s", name, errUnsupportedBinding, bind) + } + + if typ != elf.STT_NOTYPE { + return fmt.Errorf("asm relocation: %s: unsupported type %s", name, typ) + } + + kf := ec.kfuncs[name] + _, ks := ec.ksyms[name] + + switch { + // If a Call / DWordLoad instruction is found and the datasec has a btf.Func with a Name + // that matches the symbol name we mark the instruction as a referencing a kfunc. + case kf != nil && ins.OpCode.JumpOp() == asm.Call: + ins.Metadata.Set(kfuncMetaKey{}, &kfuncMeta{ + Func: kf, + Binding: bind, + }) + + ins.Src = asm.PseudoKfuncCall + ins.Constant = -1 + + case kf != nil && ins.OpCode.IsDWordLoad(): + ins.Metadata.Set(kfuncMetaKey{}, &kfuncMeta{ + Func: kf, + Binding: bind, + }) + + ins.Constant = 0 + + case ks && ins.OpCode.IsDWordLoad(): + if bind != elf.STB_GLOBAL && bind != elf.STB_WEAK { + return fmt.Errorf("asm relocation: %s: %w: %s", name, errUnsupportedBinding, bind) + } + ins.Metadata.Set(ksymMetaKey{}, &ksymMeta{ + Binding: bind, + Name: name, + }) + + // If no kconfig map is found, this must be a symbol reference from inline + // asm (see testdata/loader.c:asm_relocation()) or a call to a forward + // function declaration (see testdata/fwd_decl.c). Don't interfere, These + // remain standard symbol references. + // extern __kconfig reads are represented as dword loads that need to be + // rewritten to pseudo map loads from .kconfig. If the map is present, + // require it to contain the symbol to disambiguate between inline asm + // relos and kconfigs. + case ec.kconfig != nil && ins.OpCode.IsDWordLoad(): + if bind != elf.STB_GLOBAL { + return fmt.Errorf("asm relocation: %s: %w: %s", name, errUnsupportedBinding, bind) + } + + for _, vsi := range ec.kconfig.Value.(*btf.Datasec).Vars { + if vsi.Type.(*btf.Var).Name != rel.Name { + continue + } + + ins.Src = asm.PseudoMapValue + ins.Metadata.Set(kconfigMetaKey{}, &kconfigMeta{ec.kconfig, vsi.Offset}) + return nil + } + + return fmt.Errorf("kconfig %s not found in .kconfig", rel.Name) + } + + default: + return fmt.Errorf("relocation to %q: %w", target.Name, ErrNotSupported) + } + + *ins = ins.WithReference(name) + return nil +} + +func (ec *elfCode) loadMaps() error { + for _, sec := range ec.sections { + if sec.kind != mapSection { + continue + } + + nSym := len(sec.symbols) + if nSym == 0 { + return fmt.Errorf("section %v: no symbols", sec.Name) + } + + if sec.Size%uint64(nSym) != 0 { + return fmt.Errorf("section %v: map descriptors are not of equal size", sec.Name) + } + + // If the ELF has BTF, pull out the btf.Var for each map definition to + // extract decl tags from. + varsByName := make(map[string]*btf.Var) + if ec.btf != nil { + var ds *btf.Datasec + if err := ec.btf.TypeByName(sec.Name, &ds); err == nil { + for _, vsi := range ds.Vars { + v, ok := btf.As[*btf.Var](vsi.Type) + if !ok { + return fmt.Errorf("section %v: btf.VarSecInfo doesn't point to a *btf.Var: %T", sec.Name, vsi.Type) + } + varsByName[string(v.Name)] = v + } + } + } + + var ( + r = bufio.NewReader(sec.Open()) + size = sec.Size / uint64(nSym) + ) + for i, offset := 0, uint64(0); i < nSym; i, offset = i+1, offset+size { + mapSym, ok := sec.symbols[offset] + if !ok { + return fmt.Errorf("section %s: missing symbol for map at offset %d", sec.Name, offset) + } + + mapName := mapSym.Name + if ec.maps[mapName] != nil { + return fmt.Errorf("section %v: map %v already exists", sec.Name, mapSym) + } + + lr := io.LimitReader(r, int64(size)) + + spec := MapSpec{ + Name: sanitizeName(mapName, -1), + } + switch { + case binary.Read(lr, ec.ByteOrder, &spec.Type) != nil: + return fmt.Errorf("map %s: missing type", mapName) + case binary.Read(lr, ec.ByteOrder, &spec.KeySize) != nil: + return fmt.Errorf("map %s: missing key size", mapName) + case binary.Read(lr, ec.ByteOrder, &spec.ValueSize) != nil: + return fmt.Errorf("map %s: missing value size", mapName) + case binary.Read(lr, ec.ByteOrder, &spec.MaxEntries) != nil: + return fmt.Errorf("map %s: missing max entries", mapName) + case binary.Read(lr, ec.ByteOrder, &spec.Flags) != nil: + return fmt.Errorf("map %s: missing flags", mapName) + } + + extra, err := io.ReadAll(lr) + if err != nil { + return fmt.Errorf("map %s: reading map tail: %w", mapName, err) + } + if len(extra) > 0 { + spec.Extra = bytes.NewReader(extra) + } + + if v, ok := varsByName[mapName]; ok { + spec.Tags = slices.Clone(v.Tags) + } + + ec.maps[mapName] = &spec + } + } + + return nil +} + +// loadBTFMaps iterates over all ELF sections marked as BTF map sections +// (like .maps) and parses them into MapSpecs. Dump the .maps section and +// any relocations with `readelf -x .maps -r `. +func (ec *elfCode) loadBTFMaps() error { + for _, sec := range ec.sections { + if sec.kind != btfMapSection { + continue + } + + if ec.btf == nil { + return fmt.Errorf("missing BTF") + } + + // Each section must appear as a DataSec in the ELF's BTF blob. + var ds *btf.Datasec + if err := ec.btf.TypeByName(sec.Name, &ds); err != nil { + return fmt.Errorf("cannot find section '%s' in BTF: %w", sec.Name, err) + } + + // Open a Reader to the ELF's raw section bytes so we can assert that all + // of them are zero on a per-map (per-Var) basis. For now, the section's + // sole purpose is to receive relocations, so all must be zero. + rs := sec.Open() + + for _, vs := range ds.Vars { + // BPF maps are declared as and assigned to global variables, + // so iterate over each Var in the DataSec and validate their types. + v, ok := vs.Type.(*btf.Var) + if !ok { + return fmt.Errorf("section %v: unexpected type %s", sec.Name, vs.Type) + } + name := string(v.Name) + + // The BTF metadata for each Var contains the full length of the map + // declaration, so read the corresponding amount of bytes from the ELF. + // This way, we can pinpoint which map declaration contains unexpected + // (and therefore unsupported) data. + _, err := io.Copy(internal.DiscardZeroes{}, io.LimitReader(rs, int64(vs.Size))) + if err != nil { + return fmt.Errorf("section %v: map %s: initializing BTF map definitions: %w", sec.Name, name, internal.ErrNotSupported) + } + + if ec.maps[name] != nil { + return fmt.Errorf("section %v: map %s already exists", sec.Name, name) + } + + // Each Var representing a BTF map definition contains a Struct. + mapStruct, ok := btf.UnderlyingType(v.Type).(*btf.Struct) + if !ok { + return fmt.Errorf("expected struct, got %s", v.Type) + } + + mapSpec, err := mapSpecFromBTF(sec, &vs, mapStruct, ec.btf, name, false) + if err != nil { + return fmt.Errorf("map %v: %w", name, err) + } + + ec.maps[name] = mapSpec + } + + // Drain the ELF section reader to make sure all bytes are accounted for + // with BTF metadata. + i, err := io.Copy(io.Discard, rs) + if err != nil { + return fmt.Errorf("section %v: unexpected error reading remainder of ELF section: %w", sec.Name, err) + } + if i > 0 { + return fmt.Errorf("section %v: %d unexpected remaining bytes in ELF section, invalid BTF?", sec.Name, i) + } + } + + return nil +} + +// mapSpecFromBTF produces a MapSpec based on a btf.Struct def representing +// a BTF map definition. The name and spec arguments will be copied to the +// resulting MapSpec, and inner must be true on any recursive invocations. +func mapSpecFromBTF(es *elfSection, vs *btf.VarSecinfo, def *btf.Struct, spec *btf.Spec, name string, inner bool) (*MapSpec, error) { + var ( + key, value btf.Type + keySize, valueSize uint64 + mapType MapType + flags, maxEntries uint64 + pinType PinType + mapExtra uint64 + innerMapSpec *MapSpec + contents []MapKV + err error + ) + + for i, member := range def.Members { + switch member.Name { + case "type": + mt, err := uintFromBTF(member.Type) + if err != nil { + return nil, fmt.Errorf("can't get type: %w", err) + } + mapType = MapType(mt) + + case "map_flags": + flags, err = uintFromBTF(member.Type) + if err != nil { + return nil, fmt.Errorf("can't get BTF map flags: %w", err) + } + + case "max_entries": + maxEntries, err = uintFromBTF(member.Type) + if err != nil { + return nil, fmt.Errorf("can't get BTF map max entries: %w", err) + } + + case "key": + if keySize != 0 { + return nil, errors.New("both key and key_size given") + } + + pk, ok := member.Type.(*btf.Pointer) + if !ok { + return nil, fmt.Errorf("key type is not a pointer: %T", member.Type) + } + + key = pk.Target + + size, err := btf.Sizeof(pk.Target) + if err != nil { + return nil, fmt.Errorf("can't get size of BTF key: %w", err) + } + + keySize = uint64(size) + + case "value": + if valueSize != 0 { + return nil, errors.New("both value and value_size given") + } + + vk, ok := member.Type.(*btf.Pointer) + if !ok { + return nil, fmt.Errorf("value type is not a pointer: %T", member.Type) + } + + value = vk.Target + + size, err := btf.Sizeof(vk.Target) + if err != nil { + return nil, fmt.Errorf("can't get size of BTF value: %w", err) + } + + valueSize = uint64(size) + + case "key_size": + // Key needs to be nil and keySize needs to be 0 for key_size to be + // considered a valid member. + if key != nil || keySize != 0 { + return nil, errors.New("both key and key_size given") + } + + keySize, err = uintFromBTF(member.Type) + if err != nil { + return nil, fmt.Errorf("can't get BTF key size: %w", err) + } + + case "value_size": + // Value needs to be nil and valueSize needs to be 0 for value_size to be + // considered a valid member. + if value != nil || valueSize != 0 { + return nil, errors.New("both value and value_size given") + } + + valueSize, err = uintFromBTF(member.Type) + if err != nil { + return nil, fmt.Errorf("can't get BTF value size: %w", err) + } + + case "pinning": + if inner { + return nil, errors.New("inner maps can't be pinned") + } + + pinning, err := uintFromBTF(member.Type) + if err != nil { + return nil, fmt.Errorf("can't get pinning: %w", err) + } + + pinType = PinType(pinning) + + case "values": + // The 'values' field in BTF map definitions is used for declaring map + // value types that are references to other BPF objects, like other maps + // or programs. It is always expected to be an array of pointers. + if i != len(def.Members)-1 { + return nil, errors.New("'values' must be the last member in a BTF map definition") + } + + if valueSize != 0 && valueSize != 4 { + return nil, errors.New("value_size must be 0 or 4") + } + valueSize = 4 + + valueType, err := resolveBTFArrayMacro(member.Type) + if err != nil { + return nil, fmt.Errorf("can't resolve type of member 'values': %w", err) + } + + switch t := valueType.(type) { + case *btf.Struct: + // The values member pointing to an array of structs means we're expecting + // a map-in-map declaration. + if mapType != ArrayOfMaps && mapType != HashOfMaps { + return nil, errors.New("outer map needs to be an array or a hash of maps") + } + if inner { + return nil, fmt.Errorf("nested inner maps are not supported") + } + + // This inner map spec is used as a map template, but it needs to be + // created as a traditional map before it can be used to do so. + // libbpf names the inner map template '.inner', but we + // opted for _inner to simplify validation logic. (dots only supported + // on kernels 5.2 and up) + // Pass the BTF spec from the parent object, since both parent and + // child must be created from the same BTF blob (on kernels that support BTF). + innerMapSpec, err = mapSpecFromBTF(es, vs, t, spec, name+"_inner", true) + if err != nil { + return nil, fmt.Errorf("can't parse BTF map definition of inner map: %w", err) + } + + case *btf.FuncProto: + // The values member contains an array of function pointers, meaning an + // autopopulated PROG_ARRAY. + if mapType != ProgramArray { + return nil, errors.New("map needs to be a program array") + } + + default: + return nil, fmt.Errorf("unsupported value type %q in 'values' field", t) + } + + contents, err = resolveBTFValuesContents(es, vs, member) + if err != nil { + return nil, fmt.Errorf("resolving values contents: %w", err) + } + + case "map_extra": + mapExtra, err = uintFromBTF(member.Type) + if err != nil { + return nil, fmt.Errorf("resolving map_extra: %w", err) + } + + default: + return nil, fmt.Errorf("unrecognized field %s in BTF map definition", member.Name) + } + } + + // Some maps don't support value sizes, but annotating their map definitions + // with __type macros can still be useful, especially to let bpf2go generate + // type definitions for them. + if value != nil && !mapType.canHaveValueSize() { + valueSize = 0 + } + + v, ok := btf.As[*btf.Var](vs.Type) + if !ok { + return nil, fmt.Errorf("BTF map definition: btf.VarSecInfo doesn't point to a *btf.Var: %T", vs.Type) + } + + return &MapSpec{ + Name: sanitizeName(name, -1), + Type: MapType(mapType), + KeySize: uint32(keySize), + ValueSize: uint32(valueSize), + MaxEntries: uint32(maxEntries), + Flags: uint32(flags), + Key: key, + Value: value, + Pinning: pinType, + InnerMap: innerMapSpec, + Contents: contents, + Tags: slices.Clone(v.Tags), + MapExtra: mapExtra, + }, nil +} + +// uintFromBTF resolves the __uint and __ulong macros. +// +// __uint emits a pointer to a sized array. For int (*foo)[10], this function +// will return 10. +// +// __ulong emits an enum with a single value that can represent a 64-bit +// integer. The first (and only) enum value is returned. +func uintFromBTF(typ btf.Type) (uint64, error) { + switch t := typ.(type) { + case *btf.Pointer: + arr, ok := t.Target.(*btf.Array) + if !ok { + return 0, fmt.Errorf("not a pointer to array: %v", typ) + } + return uint64(arr.Nelems), nil + + case *btf.Enum: + if len(t.Values) == 0 { + return 0, errors.New("enum has no values") + } + return t.Values[0].Value, nil + + default: + return 0, fmt.Errorf("not a pointer or enum: %v", typ) + } +} + +// resolveBTFArrayMacro resolves the __array macro, which declares an array +// of pointers to a given type. This function returns the target Type of +// the pointers in the array. +func resolveBTFArrayMacro(typ btf.Type) (btf.Type, error) { + arr, ok := typ.(*btf.Array) + if !ok { + return nil, fmt.Errorf("not an array: %v", typ) + } + + ptr, ok := arr.Type.(*btf.Pointer) + if !ok { + return nil, fmt.Errorf("not an array of pointers: %v", typ) + } + + return ptr.Target, nil +} + +// resolveBTFValuesContents resolves relocations into ELF sections belonging +// to btf.VarSecinfo's. This can be used on the 'values' member in BTF map +// definitions to extract static declarations of map contents. +func resolveBTFValuesContents(es *elfSection, vs *btf.VarSecinfo, member btf.Member) ([]MapKV, error) { + // The elements of a .values pointer array are not encoded in BTF. + // Instead, relocations are generated into each array index. + // However, it's possible to leave certain array indices empty, so all + // indices' offsets need to be checked for emitted relocations. + + // The offset of the 'values' member within the _struct_ (in bits) + // is the starting point of the array. Convert to bytes. Add VarSecinfo + // offset to get the absolute position in the ELF blob. + start := member.Offset.Bytes() + vs.Offset + // 'values' is encoded in BTF as a zero (variable) length struct + // member, and its contents run until the end of the VarSecinfo. + // Add VarSecinfo offset to get the absolute position in the ELF blob. + end := vs.Size + vs.Offset + // The size of an address in this section. This determines the width of + // an index in the array. + align := uint32(es.Addralign) + + // Check if variable-length section is aligned. + if (end-start)%align != 0 { + return nil, errors.New("unaligned static values section") + } + elems := (end - start) / align + + if elems == 0 { + return nil, nil + } + + contents := make([]MapKV, 0, elems) + + // k is the array index, off is its corresponding ELF section offset. + for k, off := uint32(0), start; k < elems; k, off = k+1, off+align { + r, ok := es.relocations[uint64(off)] + if !ok { + continue + } + + // Relocation exists for the current offset in the ELF section. + // Emit a value stub based on the type of relocation to be replaced by + // a real fd later in the pipeline before populating the map. + // Map keys are encoded in MapKV entries, so empty array indices are + // skipped here. + switch t := elf.ST_TYPE(r.Info); t { + case elf.STT_FUNC: + contents = append(contents, MapKV{uint32(k), r.Name}) + case elf.STT_OBJECT: + contents = append(contents, MapKV{uint32(k), r.Name}) + default: + return nil, fmt.Errorf("unknown relocation type %v for symbol %s", t, r.Name) + } + } + + return contents, nil +} + +func (ec *elfCode) loadDataSections() error { + for _, sec := range ec.sections { + if sec.kind != dataSection { + continue + } + + // If a section has no references, it will be freed as soon as the + // Collection closes, so creating and populating it is wasteful. If it has + // no symbols, it is likely an ephemeral section used during compilation + // that wasn't sanitized by the bpf linker. (like .rodata.str1.1) + // + // No symbols means no VariableSpecs can be generated from it, making it + // pointless to emit a data section for. + if sec.references == 0 && len(sec.symbols) == 0 { + continue + } + + if sec.Size > math.MaxUint32 { + return fmt.Errorf("data section %s: contents exceed maximum size", sec.Name) + } + + mapSpec := &MapSpec{ + Name: sanitizeName(sec.Name, -1), + Type: Array, + KeySize: 4, + ValueSize: uint32(sec.Size), + MaxEntries: 1, + } + + if isConstantDataSection(sec.Name) { + mapSpec.Flags = sys.BPF_F_RDONLY_PROG + } + + switch sec.Type { + // Only open the section if we know there's actual data to be read. + case elf.SHT_PROGBITS: + data, err := sec.Data() + if err != nil { + return fmt.Errorf("data section %s: can't get contents: %w", sec.Name, err) + } + mapSpec.Contents = []MapKV{{uint32(0), data}} + + case elf.SHT_NOBITS: + // NOBITS sections like .bss contain only zeroes and are not allocated in + // the ELF. Since data sections are Arrays, the kernel can preallocate + // them. Don't attempt reading zeroes from the ELF, instead allocate the + // zeroed memory to support getting and setting VariableSpecs for sections + // like .bss. + mapSpec.Contents = []MapKV{{uint32(0), make([]byte, sec.Size)}} + + default: + return fmt.Errorf("data section %s: unknown section type %s", sec.Name, sec.Type) + } + + for off, sym := range sec.symbols { + // Skip symbols marked with the 'hidden' attribute. + if elf.ST_VISIBILITY(sym.Other) == elf.STV_HIDDEN || + elf.ST_VISIBILITY(sym.Other) == elf.STV_INTERNAL { + continue + } + + // Only accept symbols with global or weak bindings. The common + // alternative is STB_LOCAL, which are either function-scoped or declared + // 'static'. + if elf.ST_BIND(sym.Info) != elf.STB_GLOBAL && + elf.ST_BIND(sym.Info) != elf.STB_WEAK { + continue + } + + if ec.vars[sym.Name] != nil { + return fmt.Errorf("data section %s: duplicate variable %s", sec.Name, sym.Name) + } + + // Skip symbols starting with a dot, they are compiler-internal symbols + // emitted by clang 11 and earlier and are not cleaned up by the bpf + // compiler backend (e.g. symbols named .Lconstinit.1 in sections like + // .rodata.cst32). Variables in C cannot start with a dot, so filter these + // out. + if strings.HasPrefix(sym.Name, ".") { + continue + } + + ec.vars[sym.Name] = &VariableSpec{ + name: sym.Name, + offset: off, + size: sym.Size, + m: mapSpec, + } + } + + // It is possible for a data section to exist without a corresponding BTF Datasec + // if it only contains anonymous values like macro-defined arrays. + if ec.btf != nil { + var ds *btf.Datasec + if ec.btf.TypeByName(sec.Name, &ds) == nil { + // Assign the spec's key and BTF only if the Datasec lookup was successful. + mapSpec.Key = &btf.Void{} + mapSpec.Value = ds + + // Populate VariableSpecs with type information, if available. + for _, v := range ds.Vars { + name := v.Type.TypeName() + if name == "" { + return fmt.Errorf("data section %s: anonymous variable %v", sec.Name, v) + } + + vt, ok := v.Type.(*btf.Var) + if !ok { + return fmt.Errorf("data section %s: unexpected type %T for variable %s", sec.Name, v.Type, name) + } + + ev := ec.vars[name] + if ev == nil { + // Hidden symbols appear in the BTF Datasec but don't receive a VariableSpec. + continue + } + + if uint64(v.Offset) != ev.offset { + return fmt.Errorf("data section %s: variable %s datasec offset (%d) doesn't match ELF symbol offset (%d)", sec.Name, name, v.Offset, ev.offset) + } + + if uint64(v.Size) != ev.size { + return fmt.Errorf("data section %s: variable %s size in datasec (%d) doesn't match ELF symbol size (%d)", sec.Name, name, v.Size, ev.size) + } + + // Decouple the Var in the VariableSpec from the underlying DataSec in + // the MapSpec to avoid modifications from affecting map loads later on. + ev.t = btf.Copy(vt).(*btf.Var) + } + } + } + + ec.maps[sec.Name] = mapSpec + } + + return nil +} + +// loadKconfigSection handles the 'virtual' Datasec .kconfig that doesn't +// have a corresponding ELF section and exist purely in BTF. +func (ec *elfCode) loadKconfigSection() error { + if ec.btf == nil { + return nil + } + + var ds *btf.Datasec + err := ec.btf.TypeByName(".kconfig", &ds) + if errors.Is(err, btf.ErrNotFound) { + return nil + } + if err != nil { + return err + } + + if ds.Size == 0 { + return errors.New("zero-length .kconfig") + } + + ec.kconfig = &MapSpec{ + Name: ".kconfig", + Type: Array, + KeySize: uint32(4), + ValueSize: ds.Size, + MaxEntries: 1, + Flags: sys.BPF_F_RDONLY_PROG, + Key: &btf.Int{Size: 4}, + Value: ds, + } + + return nil +} + +// loadKsymsSection handles the 'virtual' Datasec .ksyms that doesn't +// have a corresponding ELF section and exist purely in BTF. +func (ec *elfCode) loadKsymsSection() error { + if ec.btf == nil { + return nil + } + + var ds *btf.Datasec + err := ec.btf.TypeByName(".ksyms", &ds) + if errors.Is(err, btf.ErrNotFound) { + return nil + } + if err != nil { + return err + } + + for _, v := range ds.Vars { + switch t := v.Type.(type) { + case *btf.Func: + ec.kfuncs[t.TypeName()] = t + case *btf.Var: + ec.ksyms[t.TypeName()] = struct{}{} + default: + return fmt.Errorf("unexpected variable type in .ksyms: %T", v) + } + } + + return nil +} + +type libbpfElfSectionDef struct { + pattern string + programType sys.ProgType + attachType sys.AttachType + flags libbpfElfSectionFlag +} + +type libbpfElfSectionFlag uint32 + +// The values correspond to enum sec_def_flags in libbpf. +const ( + _SEC_NONE libbpfElfSectionFlag = 0 + + _SEC_EXP_ATTACH_OPT libbpfElfSectionFlag = 1 << (iota - 1) + _SEC_ATTACHABLE + _SEC_ATTACH_BTF + _SEC_SLEEPABLE + _SEC_XDP_FRAGS + _SEC_USDT + + // Ignore any present extra in order to preserve backwards compatibility + // with earlier versions of the library. + ignoreExtra + + _SEC_ATTACHABLE_OPT = _SEC_ATTACHABLE | _SEC_EXP_ATTACH_OPT +) + +func init() { + // Compatibility with older versions of the library. + // We prepend libbpf definitions since they contain a prefix match + // for "xdp". + elfSectionDefs = append([]libbpfElfSectionDef{ + {"xdp.frags/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP, _SEC_XDP_FRAGS | ignoreExtra}, + {"xdp.frags_devmap/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_DEVMAP, _SEC_XDP_FRAGS}, + {"xdp_devmap/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_DEVMAP, 0}, + {"xdp.frags_cpumap/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_CPUMAP, _SEC_XDP_FRAGS}, + {"xdp_cpumap/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_CPUMAP, 0}, + // This has been in the library since the beginning of time. Not sure + // where it came from. + {"seccomp", sys.BPF_PROG_TYPE_SOCKET_FILTER, 0, _SEC_NONE}, + }, elfSectionDefs...) +} + +func getProgType(sectionName string) (ProgramType, AttachType, uint32, string) { + // Skip optional program marking for now. + sectionName = strings.TrimPrefix(sectionName, "?") + + for _, t := range elfSectionDefs { + extra, ok := matchSectionName(sectionName, t.pattern) + if !ok { + continue + } + + programType := ProgramType(t.programType) + attachType := AttachType(t.attachType) + + var flags uint32 + if t.flags&_SEC_SLEEPABLE > 0 { + flags |= sys.BPF_F_SLEEPABLE + } + if t.flags&_SEC_XDP_FRAGS > 0 { + flags |= sys.BPF_F_XDP_HAS_FRAGS + } + if t.flags&_SEC_EXP_ATTACH_OPT > 0 { + if programType == XDP { + // The library doesn't yet have code to fallback to not specifying + // attach type. Only do this for XDP since we've enforced correct + // attach type for all other program types. + attachType = AttachNone + } + } + if t.flags&ignoreExtra > 0 { + extra = "" + } + + return programType, attachType, flags, extra + } + + return UnspecifiedProgram, AttachNone, 0, "" +} + +// matchSectionName checks a section name against a pattern. +// +// It's behaviour mirrors that of libbpf's sec_def_matches. +func matchSectionName(sectionName, pattern string) (extra string, found bool) { + have, extra, found := strings.Cut(sectionName, "/") + want := strings.TrimRight(pattern, "+/") + + if strings.HasSuffix(pattern, "/") { + // Section name must have a slash and extra may be empty. + return extra, have == want && found + } else if strings.HasSuffix(pattern, "+") { + // Section name may have a slash and extra may be empty. + return extra, have == want + } + + // Section name must have a prefix. extra is ignored. + return "", strings.HasPrefix(sectionName, pattern) +} + +func (ec *elfCode) loadSectionRelocations(sec *elf.Section, symbols []elf.Symbol) (map[uint64]elf.Symbol, error) { + rels := make(map[uint64]elf.Symbol) + + if sec.Entsize < 16 { + return nil, fmt.Errorf("section %s: relocations are less than 16 bytes", sec.Name) + } + + r := bufio.NewReader(sec.Open()) + for off := uint64(0); off < sec.Size; off += sec.Entsize { + ent := io.LimitReader(r, int64(sec.Entsize)) + + var rel elf.Rel64 + if binary.Read(ent, ec.ByteOrder, &rel) != nil { + return nil, fmt.Errorf("can't parse relocation at offset %v", off) + } + + symNo := int(elf.R_SYM64(rel.Info) - 1) + if symNo >= len(symbols) { + return nil, fmt.Errorf("offset %d: symbol %d doesn't exist", off, symNo) + } + + symbol := symbols[symNo] + rels[rel.Off] = symbol + } + + return rels, nil +} diff --git a/vendor/github.com/cilium/ebpf/elf_sections.go b/vendor/github.com/cilium/ebpf/elf_sections.go new file mode 100644 index 000000000..43dcfb103 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/elf_sections.go @@ -0,0 +1,111 @@ +// Code generated by internal/cmd/gensections.awk; DO NOT EDIT. + +package ebpf + +// Code in this file is derived from libbpf, available under BSD-2-Clause. + +import "github.com/cilium/ebpf/internal/sys" + +var elfSectionDefs = []libbpfElfSectionDef{ + {"socket", sys.BPF_PROG_TYPE_SOCKET_FILTER, 0, _SEC_NONE}, + {"sk_reuseport/migrate", sys.BPF_PROG_TYPE_SK_REUSEPORT, sys.BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, _SEC_ATTACHABLE}, + {"sk_reuseport", sys.BPF_PROG_TYPE_SK_REUSEPORT, sys.BPF_SK_REUSEPORT_SELECT, _SEC_ATTACHABLE}, + {"kprobe+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE}, + {"uprobe+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE}, + {"uprobe.s+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_SLEEPABLE}, + {"kretprobe+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE}, + {"uretprobe+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE}, + {"uretprobe.s+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_SLEEPABLE}, + {"kprobe.multi+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_KPROBE_MULTI, _SEC_NONE}, + {"kretprobe.multi+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_KPROBE_MULTI, _SEC_NONE}, + {"kprobe.session+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_KPROBE_SESSION, _SEC_NONE}, + {"uprobe.multi+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_MULTI, _SEC_NONE}, + {"uretprobe.multi+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_MULTI, _SEC_NONE}, + {"uprobe.multi.s+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_MULTI, _SEC_SLEEPABLE}, + {"uretprobe.multi.s+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_MULTI, _SEC_SLEEPABLE}, + {"ksyscall+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE}, + {"kretsyscall+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE}, + {"usdt+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_USDT}, + {"usdt.s+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_USDT | _SEC_SLEEPABLE}, + {"tc/ingress", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_TCX_INGRESS, _SEC_NONE}, + {"tc/egress", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_TCX_EGRESS, _SEC_NONE}, + {"tcx/ingress", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_TCX_INGRESS, _SEC_NONE}, + {"tcx/egress", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_TCX_EGRESS, _SEC_NONE}, + {"tc", sys.BPF_PROG_TYPE_SCHED_CLS, 0, _SEC_NONE}, + {"classifier", sys.BPF_PROG_TYPE_SCHED_CLS, 0, _SEC_NONE}, + {"action", sys.BPF_PROG_TYPE_SCHED_ACT, 0, _SEC_NONE}, + {"netkit/primary", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_NETKIT_PRIMARY, _SEC_NONE}, + {"netkit/peer", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_NETKIT_PEER, _SEC_NONE}, + {"tracepoint+", sys.BPF_PROG_TYPE_TRACEPOINT, 0, _SEC_NONE}, + {"tp+", sys.BPF_PROG_TYPE_TRACEPOINT, 0, _SEC_NONE}, + {"raw_tracepoint+", sys.BPF_PROG_TYPE_RAW_TRACEPOINT, 0, _SEC_NONE}, + {"raw_tp+", sys.BPF_PROG_TYPE_RAW_TRACEPOINT, 0, _SEC_NONE}, + {"raw_tracepoint.w+", sys.BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, 0, _SEC_NONE}, + {"raw_tp.w+", sys.BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, 0, _SEC_NONE}, + {"tp_btf+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_RAW_TP, _SEC_ATTACH_BTF}, + {"fentry+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_FENTRY, _SEC_ATTACH_BTF}, + {"fmod_ret+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_MODIFY_RETURN, _SEC_ATTACH_BTF}, + {"fexit+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_FEXIT, _SEC_ATTACH_BTF}, + {"fentry.s+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_FENTRY, _SEC_ATTACH_BTF | _SEC_SLEEPABLE}, + {"fmod_ret.s+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_MODIFY_RETURN, _SEC_ATTACH_BTF | _SEC_SLEEPABLE}, + {"fexit.s+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_FEXIT, _SEC_ATTACH_BTF | _SEC_SLEEPABLE}, + {"freplace+", sys.BPF_PROG_TYPE_EXT, 0, _SEC_ATTACH_BTF}, + {"lsm+", sys.BPF_PROG_TYPE_LSM, sys.BPF_LSM_MAC, _SEC_ATTACH_BTF}, + {"lsm.s+", sys.BPF_PROG_TYPE_LSM, sys.BPF_LSM_MAC, _SEC_ATTACH_BTF | _SEC_SLEEPABLE}, + {"lsm_cgroup+", sys.BPF_PROG_TYPE_LSM, sys.BPF_LSM_CGROUP, _SEC_ATTACH_BTF}, + {"iter+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_ITER, _SEC_ATTACH_BTF}, + {"iter.s+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_ITER, _SEC_ATTACH_BTF | _SEC_SLEEPABLE}, + {"syscall", sys.BPF_PROG_TYPE_SYSCALL, 0, _SEC_SLEEPABLE}, + {"xdp.frags/devmap", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_DEVMAP, _SEC_XDP_FRAGS}, + {"xdp/devmap", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_DEVMAP, _SEC_ATTACHABLE}, + {"xdp.frags/cpumap", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_CPUMAP, _SEC_XDP_FRAGS}, + {"xdp/cpumap", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_CPUMAP, _SEC_ATTACHABLE}, + {"xdp.frags", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP, _SEC_XDP_FRAGS}, + {"xdp", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP, _SEC_ATTACHABLE_OPT}, + {"perf_event", sys.BPF_PROG_TYPE_PERF_EVENT, 0, _SEC_NONE}, + {"lwt_in", sys.BPF_PROG_TYPE_LWT_IN, 0, _SEC_NONE}, + {"lwt_out", sys.BPF_PROG_TYPE_LWT_OUT, 0, _SEC_NONE}, + {"lwt_xmit", sys.BPF_PROG_TYPE_LWT_XMIT, 0, _SEC_NONE}, + {"lwt_seg6local", sys.BPF_PROG_TYPE_LWT_SEG6LOCAL, 0, _SEC_NONE}, + {"sockops", sys.BPF_PROG_TYPE_SOCK_OPS, sys.BPF_CGROUP_SOCK_OPS, _SEC_ATTACHABLE_OPT}, + {"sk_skb/stream_parser", sys.BPF_PROG_TYPE_SK_SKB, sys.BPF_SK_SKB_STREAM_PARSER, _SEC_ATTACHABLE_OPT}, + {"sk_skb/stream_verdict", sys.BPF_PROG_TYPE_SK_SKB, sys.BPF_SK_SKB_STREAM_VERDICT, _SEC_ATTACHABLE_OPT}, + {"sk_skb/verdict", sys.BPF_PROG_TYPE_SK_SKB, sys.BPF_SK_SKB_VERDICT, _SEC_ATTACHABLE_OPT}, + {"sk_skb", sys.BPF_PROG_TYPE_SK_SKB, 0, _SEC_NONE}, + {"sk_msg", sys.BPF_PROG_TYPE_SK_MSG, sys.BPF_SK_MSG_VERDICT, _SEC_ATTACHABLE_OPT}, + {"lirc_mode2", sys.BPF_PROG_TYPE_LIRC_MODE2, sys.BPF_LIRC_MODE2, _SEC_ATTACHABLE_OPT}, + {"flow_dissector", sys.BPF_PROG_TYPE_FLOW_DISSECTOR, sys.BPF_FLOW_DISSECTOR, _SEC_ATTACHABLE_OPT}, + {"cgroup_skb/ingress", sys.BPF_PROG_TYPE_CGROUP_SKB, sys.BPF_CGROUP_INET_INGRESS, _SEC_ATTACHABLE_OPT}, + {"cgroup_skb/egress", sys.BPF_PROG_TYPE_CGROUP_SKB, sys.BPF_CGROUP_INET_EGRESS, _SEC_ATTACHABLE_OPT}, + {"cgroup/skb", sys.BPF_PROG_TYPE_CGROUP_SKB, 0, _SEC_NONE}, + {"cgroup/sock_create", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET_SOCK_CREATE, _SEC_ATTACHABLE}, + {"cgroup/sock_release", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET_SOCK_RELEASE, _SEC_ATTACHABLE}, + {"cgroup/sock", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET_SOCK_CREATE, _SEC_ATTACHABLE_OPT}, + {"cgroup/post_bind4", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET4_POST_BIND, _SEC_ATTACHABLE}, + {"cgroup/post_bind6", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET6_POST_BIND, _SEC_ATTACHABLE}, + {"cgroup/bind4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET4_BIND, _SEC_ATTACHABLE}, + {"cgroup/bind6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET6_BIND, _SEC_ATTACHABLE}, + {"cgroup/connect4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET4_CONNECT, _SEC_ATTACHABLE}, + {"cgroup/connect6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET6_CONNECT, _SEC_ATTACHABLE}, + {"cgroup/connect_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_CONNECT, _SEC_ATTACHABLE}, + {"cgroup/sendmsg4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UDP4_SENDMSG, _SEC_ATTACHABLE}, + {"cgroup/sendmsg6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UDP6_SENDMSG, _SEC_ATTACHABLE}, + {"cgroup/sendmsg_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_SENDMSG, _SEC_ATTACHABLE}, + {"cgroup/recvmsg4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UDP4_RECVMSG, _SEC_ATTACHABLE}, + {"cgroup/recvmsg6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UDP6_RECVMSG, _SEC_ATTACHABLE}, + {"cgroup/recvmsg_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_RECVMSG, _SEC_ATTACHABLE}, + {"cgroup/getpeername4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET4_GETPEERNAME, _SEC_ATTACHABLE}, + {"cgroup/getpeername6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET6_GETPEERNAME, _SEC_ATTACHABLE}, + {"cgroup/getpeername_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_GETPEERNAME, _SEC_ATTACHABLE}, + {"cgroup/getsockname4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET4_GETSOCKNAME, _SEC_ATTACHABLE}, + {"cgroup/getsockname6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET6_GETSOCKNAME, _SEC_ATTACHABLE}, + {"cgroup/getsockname_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_GETSOCKNAME, _SEC_ATTACHABLE}, + {"cgroup/sysctl", sys.BPF_PROG_TYPE_CGROUP_SYSCTL, sys.BPF_CGROUP_SYSCTL, _SEC_ATTACHABLE}, + {"cgroup/getsockopt", sys.BPF_PROG_TYPE_CGROUP_SOCKOPT, sys.BPF_CGROUP_GETSOCKOPT, _SEC_ATTACHABLE}, + {"cgroup/setsockopt", sys.BPF_PROG_TYPE_CGROUP_SOCKOPT, sys.BPF_CGROUP_SETSOCKOPT, _SEC_ATTACHABLE}, + {"cgroup/dev", sys.BPF_PROG_TYPE_CGROUP_DEVICE, sys.BPF_CGROUP_DEVICE, _SEC_ATTACHABLE_OPT}, + {"struct_ops+", sys.BPF_PROG_TYPE_STRUCT_OPS, 0, _SEC_NONE}, + {"struct_ops.s+", sys.BPF_PROG_TYPE_STRUCT_OPS, 0, _SEC_SLEEPABLE}, + {"sk_lookup", sys.BPF_PROG_TYPE_SK_LOOKUP, sys.BPF_SK_LOOKUP, _SEC_ATTACHABLE}, + {"netfilter", sys.BPF_PROG_TYPE_NETFILTER, sys.BPF_NETFILTER, _SEC_NONE}, +} diff --git a/vendor/github.com/cilium/ebpf/info.go b/vendor/github.com/cilium/ebpf/info.go new file mode 100644 index 000000000..23c819aaa --- /dev/null +++ b/vendor/github.com/cilium/ebpf/info.go @@ -0,0 +1,989 @@ +package ebpf + +import ( + "bufio" + "bytes" + "encoding/hex" + "errors" + "fmt" + "io" + "os" + "reflect" + "time" + + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/platform" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +// The *Info structs expose metadata about a program or map. Most +// fields are exposed via a getter: +// +// func (*MapInfo) ID() (MapID, bool) +// +// This is because the metadata available changes based on kernel version. +// The second boolean return value indicates whether a particular field is +// available on the current kernel. +// +// Always add new metadata as such a getter, unless you can somehow get the +// value of the field on all supported kernels. Also document which version +// a particular field first appeared in. +// +// Some metadata is a buffer which needs additional parsing. In this case, +// store the undecoded data in the Info struct and provide a getter which +// decodes it when necessary. See ProgramInfo.Instructions for an example. + +// MapInfo describes a map. +type MapInfo struct { + // Type of the map. + Type MapType + // KeySize is the size of the map key in bytes. + KeySize uint32 + // ValueSize is the size of the map value in bytes. + ValueSize uint32 + // MaxEntries is the maximum number of entries the map can hold. Its meaning + // is map-specific. + MaxEntries uint32 + // Flags used during map creation. + Flags uint32 + // Name as supplied by user space at load time. Available from 4.15. + Name string + + id MapID + btf btf.ID + mapExtra uint64 + memlock uint64 + frozen bool +} + +// minimalMapInfoFromFd queries the minimum information needed to create a Map +// based on a file descriptor. This requires the map type, key/value sizes, +// maxentries and flags. +// +// Does not fall back to fdinfo since the version gap between fdinfo (4.10) and +// [sys.ObjInfo] (4.13) is small and both kernels are EOL since at least Nov +// 2017. +// +// Requires at least Linux 4.13. +func minimalMapInfoFromFd(fd *sys.FD) (*MapInfo, error) { + var info sys.MapInfo + if err := sys.ObjInfo(fd, &info); err != nil { + return nil, fmt.Errorf("getting object info: %w", err) + } + + typ, err := MapTypeForPlatform(platform.Native, info.Type) + if err != nil { + return nil, fmt.Errorf("map type: %w", err) + } + + return &MapInfo{ + Type: typ, + KeySize: info.KeySize, + ValueSize: info.ValueSize, + MaxEntries: info.MaxEntries, + Flags: uint32(info.MapFlags), + Name: unix.ByteSliceToString(info.Name[:]), + }, nil +} + +// newMapInfoFromFd queries map information about the given fd. [sys.ObjInfo] is +// attempted first, supplementing any missing values with information from +// /proc/self/fdinfo. Ignores EINVAL from ObjInfo as well as ErrNotSupported +// from reading fdinfo (indicating the file exists, but no fields of interest +// were found). If both fail, an error is always returned. +func newMapInfoFromFd(fd *sys.FD) (*MapInfo, error) { + var info sys.MapInfo + err1 := sys.ObjInfo(fd, &info) + // EINVAL means the kernel doesn't support BPF_OBJ_GET_INFO_BY_FD. Continue + // with fdinfo if that's the case. + if err1 != nil && !errors.Is(err1, unix.EINVAL) { + return nil, fmt.Errorf("getting object info: %w", err1) + } + + typ, err := MapTypeForPlatform(platform.Native, info.Type) + if err != nil { + return nil, fmt.Errorf("map type: %w", err) + } + + mi := &MapInfo{ + typ, + info.KeySize, + info.ValueSize, + info.MaxEntries, + uint32(info.MapFlags), + unix.ByteSliceToString(info.Name[:]), + MapID(info.Id), + btf.ID(info.BtfId), + info.MapExtra, + 0, + false, + } + + // Supplement OBJ_INFO with data from /proc/self/fdinfo. It contains fields + // like memlock and frozen that are not present in OBJ_INFO. + err2 := readMapInfoFromProc(fd, mi) + if err2 != nil && !errors.Is(err2, ErrNotSupported) { + return nil, fmt.Errorf("getting map info from fdinfo: %w", err2) + } + + if err1 != nil && err2 != nil { + return nil, fmt.Errorf("ObjInfo and fdinfo both failed: objinfo: %w, fdinfo: %w", err1, err2) + } + + return mi, nil +} + +// readMapInfoFromProc queries map information about the given fd from +// /proc/self/fdinfo. It only writes data into fields that have a zero value. +func readMapInfoFromProc(fd *sys.FD, mi *MapInfo) error { + var mapType uint32 + err := scanFdInfo(fd, map[string]interface{}{ + "map_type": &mapType, + "map_id": &mi.id, + "key_size": &mi.KeySize, + "value_size": &mi.ValueSize, + "max_entries": &mi.MaxEntries, + "map_flags": &mi.Flags, + "map_extra": &mi.mapExtra, + "memlock": &mi.memlock, + "frozen": &mi.frozen, + }) + if err != nil { + return err + } + + if mi.Type == 0 { + mi.Type, err = MapTypeForPlatform(platform.Linux, mapType) + if err != nil { + return fmt.Errorf("map type: %w", err) + } + } + + return nil +} + +// ID returns the map ID. +// +// Available from 4.13. +// +// The bool return value indicates whether this optional field is available. +func (mi *MapInfo) ID() (MapID, bool) { + return mi.id, mi.id > 0 +} + +// BTFID returns the BTF ID associated with the Map. +// +// The ID is only valid as long as the associated Map is kept alive. +// Available from 4.18. +// +// The bool return value indicates whether this optional field is available and +// populated. (The field may be available but not populated if the kernel +// supports the field but the Map was loaded without BTF information.) +func (mi *MapInfo) BTFID() (btf.ID, bool) { + return mi.btf, mi.btf > 0 +} + +// MapExtra returns an opaque field whose meaning is map-specific. +// +// Available from 5.16. +// +// The bool return value indicates whether this optional field is available and +// populated, if it was specified during Map creation. +func (mi *MapInfo) MapExtra() (uint64, bool) { + return mi.mapExtra, mi.mapExtra > 0 +} + +// Memlock returns an approximate number of bytes allocated to this map. +// +// Available from 4.10. +// +// The bool return value indicates whether this optional field is available. +func (mi *MapInfo) Memlock() (uint64, bool) { + return mi.memlock, mi.memlock > 0 +} + +// Frozen indicates whether [Map.Freeze] was called on this map. If true, +// modifications from user space are not allowed. +// +// Available from 5.2. Requires access to procfs. +// +// If the kernel doesn't support map freezing, this field will always be false. +func (mi *MapInfo) Frozen() bool { + return mi.frozen +} + +// ProgramStats contains runtime statistics for a single [Program], returned by +// [Program.Stats]. +// +// Will contain mostly zero values if the collection of statistics is not +// enabled, see [EnableStats]. +type ProgramStats struct { + // Total accumulated runtime of the Program. + // + // Requires at least Linux 5.8. + Runtime time.Duration + + // Total number of times the Program has executed. + // + // Requires at least Linux 5.8. + RunCount uint64 + + // Total number of times the program was not executed due to recursion. This + // can happen when another bpf program is already running on the cpu, when bpf + // program execution is interrupted, for example. + // + // Requires at least Linux 5.12. + RecursionMisses uint64 +} + +func newProgramStatsFromFd(fd *sys.FD) (*ProgramStats, error) { + var info sys.ProgInfo + if err := sys.ObjInfo(fd, &info); err != nil { + return nil, fmt.Errorf("getting program info: %w", err) + } + + return &ProgramStats{ + Runtime: time.Duration(info.RunTimeNs), + RunCount: info.RunCnt, + RecursionMisses: info.RecursionMisses, + }, nil +} + +// programJitedInfo holds information about JITed info of a program. +type programJitedInfo struct { + // ksyms holds the ksym addresses of the BPF program, including those of its + // subprograms. + // + // Available from 4.18. + ksyms []uint64 + numKsyms uint32 + + // insns holds the JITed machine native instructions of the program, + // including those of its subprograms. + // + // Available from 4.13. + insns []byte + numInsns uint32 + + // lineInfos holds the JITed line infos, which are kernel addresses. + // + // Available from 5.0. + lineInfos []uint64 + numLineInfos uint32 + + // lineInfoRecSize is the size of a single line info record. + // + // Available from 5.0. + lineInfoRecSize uint32 + + // funcLens holds the insns length of each function. + // + // Available from 4.18. + funcLens []uint32 + numFuncLens uint32 +} + +// ProgramInfo describes a Program's immutable metadata. For runtime statistics, +// see [ProgramStats]. +type ProgramInfo struct { + Type ProgramType + id ProgramID + // Truncated hash of the BPF bytecode. Available from 4.13. + Tag string + // Name as supplied by user space at load time. Available from 4.15. + Name string + + createdByUID uint32 + haveCreatedByUID bool + btf btf.ID + loadTime time.Duration + + restricted bool + + maps []MapID + insns []byte + jitedSize uint32 + verifiedInstructions uint32 + + jitedInfo programJitedInfo + + lineInfos []byte + numLineInfos uint32 + funcInfos []byte + numFuncInfos uint32 + + memlock uint64 +} + +// minimalProgramFromFd queries the minimum information needed to create a +// Program based on a file descriptor, requiring at least the program type. +// +// Does not fall back to fdinfo since the version gap between fdinfo (4.10) and +// [sys.ObjInfo] (4.13) is small and both kernels are EOL since at least Nov +// 2017. +// +// Requires at least Linux 4.13. +func minimalProgramInfoFromFd(fd *sys.FD) (*ProgramInfo, error) { + var info sys.ProgInfo + if err := sys.ObjInfo(fd, &info); err != nil { + return nil, fmt.Errorf("getting object info: %w", err) + } + + typ, err := ProgramTypeForPlatform(platform.Native, info.Type) + if err != nil { + return nil, fmt.Errorf("program type: %w", err) + } + + return &ProgramInfo{ + Type: typ, + Name: unix.ByteSliceToString(info.Name[:]), + }, nil +} + +// newProgramInfoFromFd queries program information about the given fd. +// +// [sys.ObjInfo] is attempted first, supplementing any missing values with +// information from /proc/self/fdinfo. Ignores EINVAL from ObjInfo as well as +// ErrNotSupported from reading fdinfo (indicating the file exists, but no +// fields of interest were found). If both fail, an error is always returned. +func newProgramInfoFromFd(fd *sys.FD) (*ProgramInfo, error) { + var info sys.ProgInfo + err1 := sys.ObjInfo(fd, &info) + // EINVAL means the kernel doesn't support BPF_OBJ_GET_INFO_BY_FD. Continue + // with fdinfo if that's the case. + if err1 != nil && !errors.Is(err1, unix.EINVAL) { + return nil, fmt.Errorf("getting object info: %w", err1) + } + + typ, err := ProgramTypeForPlatform(platform.Native, info.Type) + if err != nil { + return nil, fmt.Errorf("program type: %w", err) + } + + pi := ProgramInfo{ + Type: typ, + id: ProgramID(info.Id), + Tag: hex.EncodeToString(info.Tag[:]), + Name: unix.ByteSliceToString(info.Name[:]), + btf: btf.ID(info.BtfId), + jitedSize: info.JitedProgLen, + loadTime: time.Duration(info.LoadTime), + verifiedInstructions: info.VerifiedInsns, + } + + // Supplement OBJ_INFO with data from /proc/self/fdinfo. It contains fields + // like memlock that is not present in OBJ_INFO. + err2 := readProgramInfoFromProc(fd, &pi) + if err2 != nil && !errors.Is(err2, ErrNotSupported) { + return nil, fmt.Errorf("getting map info from fdinfo: %w", err2) + } + + if err1 != nil && err2 != nil { + return nil, fmt.Errorf("ObjInfo and fdinfo both failed: objinfo: %w, fdinfo: %w", err1, err2) + } + + if platform.IsWindows && info.Tag == [8]uint8{} { + // Windows doesn't support the tag field, clear it for now. + pi.Tag = "" + } + + // Start with a clean struct for the second call, otherwise we may get EFAULT. + var info2 sys.ProgInfo + + makeSecondCall := false + + if info.NrMapIds > 0 { + pi.maps = make([]MapID, info.NrMapIds) + info2.NrMapIds = info.NrMapIds + info2.MapIds = sys.SlicePointer(pi.maps) + makeSecondCall = true + } else if haveProgramInfoMapIDs() == nil { + // This program really has no associated maps. + pi.maps = make([]MapID, 0) + } else { + // The kernel doesn't report associated maps. + pi.maps = nil + } + + // createdByUID and NrMapIds were introduced in the same kernel version. + if pi.maps != nil && platform.IsLinux { + pi.createdByUID = info.CreatedByUid + pi.haveCreatedByUID = true + } + + if info.XlatedProgLen > 0 { + pi.insns = make([]byte, info.XlatedProgLen) + info2.XlatedProgLen = info.XlatedProgLen + info2.XlatedProgInsns = sys.SlicePointer(pi.insns) + makeSecondCall = true + } + + if info.NrLineInfo > 0 { + pi.lineInfos = make([]byte, btf.LineInfoSize*info.NrLineInfo) + info2.LineInfo = sys.SlicePointer(pi.lineInfos) + info2.LineInfoRecSize = btf.LineInfoSize + info2.NrLineInfo = info.NrLineInfo + pi.numLineInfos = info.NrLineInfo + makeSecondCall = true + } + + if info.NrFuncInfo > 0 { + pi.funcInfos = make([]byte, btf.FuncInfoSize*info.NrFuncInfo) + info2.FuncInfo = sys.SlicePointer(pi.funcInfos) + info2.FuncInfoRecSize = btf.FuncInfoSize + info2.NrFuncInfo = info.NrFuncInfo + pi.numFuncInfos = info.NrFuncInfo + makeSecondCall = true + } + + pi.jitedInfo.lineInfoRecSize = info.JitedLineInfoRecSize + if info.JitedProgLen > 0 { + pi.jitedInfo.numInsns = info.JitedProgLen + pi.jitedInfo.insns = make([]byte, info.JitedProgLen) + info2.JitedProgLen = info.JitedProgLen + info2.JitedProgInsns = sys.SlicePointer(pi.jitedInfo.insns) + makeSecondCall = true + } + + if info.NrJitedFuncLens > 0 { + pi.jitedInfo.numFuncLens = info.NrJitedFuncLens + pi.jitedInfo.funcLens = make([]uint32, info.NrJitedFuncLens) + info2.NrJitedFuncLens = info.NrJitedFuncLens + info2.JitedFuncLens = sys.SlicePointer(pi.jitedInfo.funcLens) + makeSecondCall = true + } + + if info.NrJitedLineInfo > 0 { + pi.jitedInfo.numLineInfos = info.NrJitedLineInfo + pi.jitedInfo.lineInfos = make([]uint64, info.NrJitedLineInfo) + info2.NrJitedLineInfo = info.NrJitedLineInfo + info2.JitedLineInfo = sys.SlicePointer(pi.jitedInfo.lineInfos) + info2.JitedLineInfoRecSize = info.JitedLineInfoRecSize + makeSecondCall = true + } + + if info.NrJitedKsyms > 0 { + pi.jitedInfo.numKsyms = info.NrJitedKsyms + pi.jitedInfo.ksyms = make([]uint64, info.NrJitedKsyms) + info2.JitedKsyms = sys.SlicePointer(pi.jitedInfo.ksyms) + info2.NrJitedKsyms = info.NrJitedKsyms + makeSecondCall = true + } + + if makeSecondCall { + if err := sys.ObjInfo(fd, &info2); err != nil { + return nil, err + } + } + + if info.XlatedProgLen > 0 && info2.XlatedProgInsns.IsNil() { + pi.restricted = true + pi.insns = nil + pi.lineInfos = nil + pi.funcInfos = nil + pi.jitedInfo = programJitedInfo{} + } + + return &pi, nil +} + +func readProgramInfoFromProc(fd *sys.FD, pi *ProgramInfo) error { + var progType uint32 + err := scanFdInfo(fd, map[string]interface{}{ + "prog_type": &progType, + "prog_tag": &pi.Tag, + "memlock": &pi.memlock, + }) + if errors.Is(err, ErrNotSupported) && !errors.Is(err, internal.ErrNotSupportedOnOS) { + return &internal.UnsupportedFeatureError{ + Name: "reading program info from /proc/self/fdinfo", + MinimumVersion: internal.Version{4, 10, 0}, + } + } + if err != nil { + return err + } + + pi.Type, err = ProgramTypeForPlatform(platform.Linux, progType) + if err != nil { + return fmt.Errorf("program type: %w", err) + } + + return nil +} + +// ID returns the program ID. +// +// Available from 4.13. +// +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) ID() (ProgramID, bool) { + return pi.id, pi.id > 0 +} + +// CreatedByUID returns the Uid that created the program. +// +// Available from 4.15. +// +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) CreatedByUID() (uint32, bool) { + return pi.createdByUID, pi.haveCreatedByUID +} + +// BTFID returns the BTF ID associated with the program. +// +// The ID is only valid as long as the associated program is kept alive. +// Available from 5.0. +// +// The bool return value indicates whether this optional field is available and +// populated. (The field may be available but not populated if the kernel +// supports the field but the program was loaded without BTF information.) +func (pi *ProgramInfo) BTFID() (btf.ID, bool) { + return pi.btf, pi.btf > 0 +} + +// btfSpec returns the BTF spec associated with the program. +func (pi *ProgramInfo) btfSpec() (*btf.Spec, error) { + id, ok := pi.BTFID() + if !ok { + return nil, fmt.Errorf("program created without BTF or unsupported kernel: %w", ErrNotSupported) + } + + h, err := btf.NewHandleFromID(id) + if err != nil { + return nil, fmt.Errorf("get BTF handle: %w", err) + } + defer h.Close() + + spec, err := h.Spec(nil) + if err != nil { + return nil, fmt.Errorf("get BTF spec: %w", err) + } + + return spec, nil +} + +// ErrRestrictedKernel is returned when kernel address information is restricted +// by kernel.kptr_restrict and/or net.core.bpf_jit_harden sysctls. +var ErrRestrictedKernel = internal.ErrRestrictedKernel + +// LineInfos returns the BTF line information of the program. +// +// Available from 5.0. +// +// Returns an error wrapping [ErrRestrictedKernel] if line infos are restricted +// by sysctls. +// +// Requires CAP_SYS_ADMIN or equivalent for reading BTF information. Returns +// ErrNotSupported if the program was created without BTF or if the kernel +// doesn't support the field. +func (pi *ProgramInfo) LineInfos() (btf.LineOffsets, error) { + if pi.restricted { + return nil, fmt.Errorf("line infos: %w", ErrRestrictedKernel) + } + + if len(pi.lineInfos) == 0 { + return nil, fmt.Errorf("insufficient permissions or unsupported kernel: %w", ErrNotSupported) + } + + spec, err := pi.btfSpec() + if err != nil { + return nil, err + } + + return btf.LoadLineInfos( + bytes.NewReader(pi.lineInfos), + internal.NativeEndian, + pi.numLineInfos, + spec, + ) +} + +// Instructions returns the 'xlated' instruction stream of the program +// after it has been verified and rewritten by the kernel. These instructions +// cannot be loaded back into the kernel as-is, this is mainly used for +// inspecting loaded programs for troubleshooting, dumping, etc. +// +// For example, map accesses are made to reference their kernel map IDs, +// not the FDs they had when the program was inserted. Note that before +// the introduction of bpf_insn_prepare_dump in kernel 4.16, xlated +// instructions were not sanitized, making the output even less reusable +// and less likely to round-trip or evaluate to the same program Tag. +// +// The first instruction is marked as a symbol using the Program's name. +// +// If available, the instructions will be annotated with metadata from the +// BTF. This includes line information and function information. Reading +// this metadata requires CAP_SYS_ADMIN or equivalent. If capability is +// unavailable, the instructions will be returned without metadata. +// +// Returns an error wrapping [ErrRestrictedKernel] if instructions are +// restricted by sysctls. +// +// Available from 4.13. Requires CAP_BPF or equivalent for plain instructions. +// Requires CAP_SYS_ADMIN for instructions with metadata. +func (pi *ProgramInfo) Instructions() (asm.Instructions, error) { + if platform.IsWindows && len(pi.insns) == 0 { + return nil, fmt.Errorf("read instructions: %w", internal.ErrNotSupportedOnOS) + } + + if pi.restricted { + return nil, fmt.Errorf("instructions: %w", ErrRestrictedKernel) + } + + // If the calling process is not BPF-capable or if the kernel doesn't + // support getting xlated instructions, the field will be zero. + if len(pi.insns) == 0 { + return nil, fmt.Errorf("insufficient permissions or unsupported kernel: %w", ErrNotSupported) + } + + r := bytes.NewReader(pi.insns) + insns, err := asm.AppendInstructions(nil, r, internal.NativeEndian, platform.Native) + if err != nil { + return nil, fmt.Errorf("unmarshaling instructions: %w", err) + } + + if pi.btf != 0 { + btfh, err := btf.NewHandleFromID(pi.btf) + if err != nil { + // Getting a BTF handle requires CAP_SYS_ADMIN, if not available we get an -EPERM. + // Ignore it and fall back to instructions without metadata. + if !errors.Is(err, unix.EPERM) { + return nil, fmt.Errorf("unable to get BTF handle: %w", err) + } + } + + // If we have a BTF handle, we can use it to assign metadata to the instructions. + if btfh != nil { + defer btfh.Close() + + spec, err := btfh.Spec(nil) + if err != nil { + return nil, fmt.Errorf("unable to get BTF spec: %w", err) + } + + lineInfos, err := btf.LoadLineInfos( + bytes.NewReader(pi.lineInfos), + internal.NativeEndian, + pi.numLineInfos, + spec, + ) + if err != nil { + return nil, fmt.Errorf("parse line info: %w", err) + } + + funcInfos, err := btf.LoadFuncInfos( + bytes.NewReader(pi.funcInfos), + internal.NativeEndian, + pi.numFuncInfos, + spec, + ) + if err != nil { + return nil, fmt.Errorf("parse func info: %w", err) + } + + btf.AssignMetadataToInstructions(insns, funcInfos, lineInfos, btf.CORERelocationInfos{}) + } + } + + fn := btf.FuncMetadata(&insns[0]) + name := pi.Name + if fn != nil { + name = fn.Name + } + insns[0] = insns[0].WithSymbol(name) + + return insns, nil +} + +// JitedSize returns the size of the program's JIT-compiled machine code in +// bytes, which is the actual code executed on the host's CPU. This field +// requires the BPF JIT compiler to be enabled. +// +// Returns an error wrapping [ErrRestrictedKernel] if jited program size is +// restricted by sysctls. +// +// Available from 4.13. Reading this metadata requires CAP_BPF or equivalent. +func (pi *ProgramInfo) JitedSize() (uint32, error) { + if pi.restricted { + return 0, fmt.Errorf("jited size: %w", ErrRestrictedKernel) + } + + if pi.jitedSize == 0 { + return 0, fmt.Errorf("insufficient permissions, unsupported kernel, or JIT compiler disabled: %w", ErrNotSupported) + } + return pi.jitedSize, nil +} + +// TranslatedSize returns the size of the program's translated instructions in +// bytes, after it has been verified and rewritten by the kernel. +// +// Returns an error wrapping [ErrRestrictedKernel] if translated instructions +// are restricted by sysctls. +// +// Available from 4.13. Reading this metadata requires CAP_BPF or equivalent. +func (pi *ProgramInfo) TranslatedSize() (int, error) { + if pi.restricted { + return 0, fmt.Errorf("xlated size: %w", ErrRestrictedKernel) + } + + insns := len(pi.insns) + if insns == 0 { + return 0, fmt.Errorf("insufficient permissions or unsupported kernel: %w", ErrNotSupported) + } + return insns, nil +} + +// MapIDs returns the maps related to the program. +// +// Available from 4.15. +// +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) MapIDs() ([]MapID, bool) { + return pi.maps, pi.maps != nil +} + +// LoadTime returns when the program was loaded since boot time. +// +// Available from 4.15. +// +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) LoadTime() (time.Duration, bool) { + // loadTime and NrMapIds were introduced in the same kernel version. + return pi.loadTime, pi.loadTime > 0 +} + +// VerifiedInstructions returns the number verified instructions in the program. +// +// Available from 5.16. +// +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) VerifiedInstructions() (uint32, bool) { + return pi.verifiedInstructions, pi.verifiedInstructions > 0 +} + +// JitedKsymAddrs returns the ksym addresses of the BPF program, including its +// subprograms. The addresses correspond to their symbols in /proc/kallsyms. +// +// Available from 4.18. Note that before 5.x, this field can be empty for +// programs without subprograms (bpf2bpf calls). +// +// The bool return value indicates whether this optional field is available. +// +// When a kernel address can't fit into uintptr (which is usually the case when +// running 32 bit program on a 64 bit kernel), this returns an empty slice and +// a false. +func (pi *ProgramInfo) JitedKsymAddrs() ([]uintptr, bool) { + ksyms := make([]uintptr, 0, len(pi.jitedInfo.ksyms)) + if cap(ksyms) == 0 { + return ksyms, false + } + // Check if a kernel address fits into uintptr (it might not when + // using a 32 bit binary on a 64 bit kernel). This check should work + // with any kernel address, since they have 1s at the highest bits. + if a := pi.jitedInfo.ksyms[0]; uint64(uintptr(a)) != a { + return nil, false + } + for _, ksym := range pi.jitedInfo.ksyms { + ksyms = append(ksyms, uintptr(ksym)) + } + return ksyms, true +} + +// JitedInsns returns the JITed machine native instructions of the program. +// +// Available from 4.13. +// +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) JitedInsns() ([]byte, bool) { + return pi.jitedInfo.insns, len(pi.jitedInfo.insns) > 0 +} + +// JitedLineInfos returns the JITed line infos of the program. +// +// Available from 5.0. +// +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) JitedLineInfos() ([]uint64, bool) { + return pi.jitedInfo.lineInfos, len(pi.jitedInfo.lineInfos) > 0 +} + +// JitedFuncLens returns the insns length of each function in the JITed program. +// +// Available from 4.18. +// +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) JitedFuncLens() ([]uint32, bool) { + return pi.jitedInfo.funcLens, len(pi.jitedInfo.funcLens) > 0 +} + +// FuncInfos returns the offset and function information of all (sub)programs in +// a BPF program. +// +// Available from 5.0. +// +// Returns an error wrapping [ErrRestrictedKernel] if function information is +// restricted by sysctls. +// +// Requires CAP_SYS_ADMIN or equivalent for reading BTF information. Returns +// ErrNotSupported if the program was created without BTF or if the kernel +// doesn't support the field. +func (pi *ProgramInfo) FuncInfos() (btf.FuncOffsets, error) { + if pi.restricted { + return nil, fmt.Errorf("func infos: %w", ErrRestrictedKernel) + } + + if len(pi.funcInfos) == 0 { + return nil, fmt.Errorf("insufficient permissions or unsupported kernel: %w", ErrNotSupported) + } + + spec, err := pi.btfSpec() + if err != nil { + return nil, err + } + + return btf.LoadFuncInfos( + bytes.NewReader(pi.funcInfos), + internal.NativeEndian, + pi.numFuncInfos, + spec, + ) +} + +// ProgramInfo returns an approximate number of bytes allocated to this program. +// +// Available from 4.10. +// +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) Memlock() (uint64, bool) { + return pi.memlock, pi.memlock > 0 +} + +func scanFdInfo(fd *sys.FD, fields map[string]interface{}) error { + if platform.IsWindows { + return fmt.Errorf("read fdinfo: %w", internal.ErrNotSupportedOnOS) + } + + fh, err := os.Open(fmt.Sprintf("/proc/self/fdinfo/%d", fd.Int())) + if err != nil { + return err + } + defer fh.Close() + + if err := scanFdInfoReader(fh, fields); err != nil { + return fmt.Errorf("%s: %w", fh.Name(), err) + } + return nil +} + +func scanFdInfoReader(r io.Reader, fields map[string]interface{}) error { + var ( + scanner = bufio.NewScanner(r) + scanned int + reader bytes.Reader + ) + + for scanner.Scan() { + key, rest, found := bytes.Cut(scanner.Bytes(), []byte(":")) + if !found { + // Line doesn't contain a colon, skip. + continue + } + field, ok := fields[string(key)] + if !ok { + continue + } + // If field already contains a non-zero value, don't overwrite it with fdinfo. + if !zero(field) { + scanned++ + continue + } + + // Cut the \t following the : as well as any potential trailing whitespace. + rest = bytes.TrimSpace(rest) + + reader.Reset(rest) + if n, err := fmt.Fscan(&reader, field); err != nil || n != 1 { + return fmt.Errorf("can't parse field %s: %v", key, err) + } + + scanned++ + } + + if err := scanner.Err(); err != nil { + return fmt.Errorf("scanning fdinfo: %w", err) + } + + if len(fields) > 0 && scanned == 0 { + return ErrNotSupported + } + + return nil +} + +func zero(arg any) bool { + v := reflect.ValueOf(arg) + + // Unwrap pointers and interfaces. + for v.Kind() == reflect.Pointer || + v.Kind() == reflect.Interface { + v = v.Elem() + } + + return v.IsZero() +} + +// EnableStats starts collecting runtime statistics of eBPF programs, like the +// amount of program executions and the cumulative runtime. +// +// Specify a BPF_STATS_* constant to select which statistics to collect, like +// [unix.BPF_STATS_RUN_TIME]. Closing the returned [io.Closer] will stop +// collecting statistics. +// +// Collecting statistics may have a performance impact. +// +// Requires at least Linux 5.8. +func EnableStats(which uint32) (io.Closer, error) { + fd, err := sys.EnableStats(&sys.EnableStatsAttr{ + Type: which, + }) + if err != nil { + return nil, err + } + return fd, nil +} + +var haveProgramInfoMapIDs = internal.NewFeatureTest("map IDs in program info", func() error { + if platform.IsWindows { + // We only support efW versions which have this feature, no need to probe. + return nil + } + + prog, err := progLoad(asm.Instructions{ + asm.LoadImm(asm.R0, 0, asm.DWord), + asm.Return(), + }, SocketFilter, "MIT") + if err != nil { + return err + } + defer prog.Close() + + err = sys.ObjInfo(prog, &sys.ProgInfo{ + // NB: Don't need to allocate MapIds since the program isn't using + // any maps. + NrMapIds: 1, + }) + if errors.Is(err, unix.EINVAL) { + // Most likely the syscall doesn't exist. + return internal.ErrNotSupported + } + if errors.Is(err, unix.E2BIG) { + // We've hit check_uarg_tail_zero on older kernels. + return internal.ErrNotSupported + } + + return err +}, "4.15", "windows:0.21.0") diff --git a/vendor/github.com/cilium/ebpf/internal/deque.go b/vendor/github.com/cilium/ebpf/internal/deque.go new file mode 100644 index 000000000..ed113ddd7 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/deque.go @@ -0,0 +1,88 @@ +package internal + +import "math/bits" + +// Deque implements a double ended queue. +type Deque[T any] struct { + elems []T + read, write uint64 + mask uint64 +} + +// Reset clears the contents of the deque while retaining the backing buffer. +func (dq *Deque[T]) Reset() { + var zero T + + for i := dq.read; i < dq.write; i++ { + dq.elems[i&dq.mask] = zero + } + + dq.read, dq.write = 0, 0 +} + +func (dq *Deque[T]) Empty() bool { + return dq.read == dq.write +} + +// Push adds an element to the end. +func (dq *Deque[T]) Push(e T) { + dq.Grow(1) + dq.elems[dq.write&dq.mask] = e + dq.write++ +} + +// Shift returns the first element or the zero value. +func (dq *Deque[T]) Shift() T { + var zero T + + if dq.Empty() { + return zero + } + + index := dq.read & dq.mask + t := dq.elems[index] + dq.elems[index] = zero + dq.read++ + return t +} + +// Pop returns the last element or the zero value. +func (dq *Deque[T]) Pop() T { + var zero T + + if dq.Empty() { + return zero + } + + dq.write-- + index := dq.write & dq.mask + t := dq.elems[index] + dq.elems[index] = zero + return t +} + +// Grow the deque's capacity, if necessary, to guarantee space for another n +// elements. +func (dq *Deque[T]) Grow(n int) { + have := dq.write - dq.read + need := have + uint64(n) + if need < have { + panic("overflow") + } + if uint64(len(dq.elems)) >= need { + return + } + + // Round up to the new power of two which is at least 8. + // See https://jameshfisher.com/2018/03/30/round-up-power-2/ + capacity := max(1<<(64-bits.LeadingZeros64(need-1)), 8) + + elems := make([]T, have, capacity) + pivot := dq.read & dq.mask + copied := copy(elems, dq.elems[pivot:]) + copy(elems[copied:], dq.elems[:pivot]) + + dq.elems = elems[:capacity] + dq.mask = uint64(capacity) - 1 + dq.read, dq.write = 0, have +} diff --git a/vendor/github.com/cilium/ebpf/internal/efw/enums.go b/vendor/github.com/cilium/ebpf/internal/efw/enums.go new file mode 100644 index 000000000..71320b631 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/efw/enums.go @@ -0,0 +1,65 @@ +//go:build windows + +package efw + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +/* +Converts an attach type enum into a GUID. + + ebpf_result_t ebpf_get_ebpf_attach_type( + bpf_attach_type_t bpf_attach_type, + _Out_ ebpf_attach_type_t* ebpf_attach_type_t *ebpf_attach_type) +*/ +var ebpfGetEbpfAttachTypeProc = newProc("ebpf_get_ebpf_attach_type") + +func EbpfGetEbpfAttachType(attachType uint32) (windows.GUID, error) { + addr, err := ebpfGetEbpfAttachTypeProc.Find() + if err != nil { + return windows.GUID{}, err + } + + var attachTypeGUID windows.GUID + err = errorResult(syscall.SyscallN(addr, + uintptr(attachType), + uintptr(unsafe.Pointer(&attachTypeGUID)), + )) + return attachTypeGUID, err +} + +/* +Retrieve a program type given a GUID. + + bpf_prog_type_t ebpf_get_bpf_program_type(_In_ const ebpf_program_type_t* program_type) +*/ +var ebpfGetBpfProgramTypeProc = newProc("ebpf_get_bpf_program_type") + +func EbpfGetBpfProgramType(programType windows.GUID) (uint32, error) { + addr, err := ebpfGetBpfProgramTypeProc.Find() + if err != nil { + return 0, err + } + + return uint32Result(syscall.SyscallN(addr, uintptr(unsafe.Pointer(&programType)))), nil +} + +/* +Retrieve an attach type given a GUID. + + bpf_attach_type_t ebpf_get_bpf_attach_type(_In_ const ebpf_attach_type_t* ebpf_attach_type) +*/ +var ebpfGetBpfAttachTypeProc = newProc("ebpf_get_bpf_attach_type") + +func EbpfGetBpfAttachType(attachType windows.GUID) (uint32, error) { + addr, err := ebpfGetBpfAttachTypeProc.Find() + if err != nil { + return 0, err + } + + return uint32Result(syscall.SyscallN(addr, uintptr(unsafe.Pointer(&attachType)))), nil +} diff --git a/vendor/github.com/cilium/ebpf/internal/efw/error_reporting.go b/vendor/github.com/cilium/ebpf/internal/efw/error_reporting.go new file mode 100644 index 000000000..83b9a265e --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/efw/error_reporting.go @@ -0,0 +1,155 @@ +//go:build windows + +package efw + +import ( + "errors" + "fmt" + "os" + "syscall" + "testing" + + "golang.org/x/sys/windows" +) + +func init() { + if !testing.Testing() { + return + } + + if isDebuggerPresent() { + return + } + + if err := configureCRTErrorReporting(); err != nil { + fmt.Fprintln(os.Stderr, "WARNING: Could not configure CRT error reporting, tests may hang:", err) + } +} + +var errErrorReportingAlreadyConfigured = errors.New("error reporting already configured") + +// Configure built-in error reporting of the C runtime library. +// +// The C runtime emits assertion failures into a graphical message box by default. +// This causes a hang in CI environments. This function configures the CRT to +// log to stderr instead. +func configureCRTErrorReporting() error { + const ucrtDebug = "ucrtbased.dll" + + // Constants from crtdbg.h + // + // See https://doxygen.reactos.org/da/d40/crt_2crtdbg_8h_source.html + const ( + _CRT_ERROR = 1 + _CRT_ASSERT = 2 + _CRTDBG_MODE_FILE = 0x1 + _CRTDBG_MODE_WNDW = 0x4 + _CRTDBG_HFILE_ERROR = -2 + _CRTDBG_FILE_STDERR = -4 + ) + + // Load the efW API to trigger loading the CRT. This may fail, in which case + // we can't figure out which CRT is being used. + // In that case we rely on the error bubbling up via some other path. + _ = module.Load() + + ucrtHandle, err := syscall.UTF16PtrFromString(ucrtDebug) + if err != nil { + return err + } + + var handle windows.Handle + err = windows.GetModuleHandleEx(0, ucrtHandle, &handle) + if errors.Is(err, windows.ERROR_MOD_NOT_FOUND) { + // Loading the ebpf api did not pull in the debug UCRT, so there is + // nothing to configure. + return nil + } else if err != nil { + return err + } + defer windows.FreeLibrary(handle) + + setReportModeAddr, err := windows.GetProcAddress(handle, "_CrtSetReportMode") + if err != nil { + return err + } + + setReportMode := func(reportType int, reportMode int) (int, error) { + // See https://learn.microsoft.com/en-us/cpp/c-runtime-library/reference/crtsetreportmode?view=msvc-170 + r1, _, err := syscall.SyscallN(setReportModeAddr, uintptr(reportType), uintptr(reportMode)) + if int(r1) == -1 { + return 0, fmt.Errorf("set report mode for type %d: %w", reportType, err) + } + return int(r1), nil + } + + setReportFileAddr, err := windows.GetProcAddress(handle, "_CrtSetReportFile") + if err != nil { + return err + } + + setReportFile := func(reportType int, reportFile int) (int, error) { + // See https://learn.microsoft.com/en-us/cpp/c-runtime-library/reference/crtsetreportfile?view=msvc-170 + r1, _, err := syscall.SyscallN(setReportFileAddr, uintptr(reportType), uintptr(reportFile)) + if int(r1) == _CRTDBG_HFILE_ERROR { + return 0, fmt.Errorf("set report file for type %d: %w", reportType, err) + } + return int(r1), nil + } + + reportToFile := func(reportType, defaultMode int) error { + oldMode, err := setReportMode(reportType, _CRTDBG_MODE_FILE) + if err != nil { + return err + } + + if oldMode != defaultMode { + // Attempt to restore old mode if it was different from the expected default. + _, _ = setReportMode(reportType, oldMode) + return errErrorReportingAlreadyConfigured + } + + oldFile, err := setReportFile(reportType, _CRTDBG_FILE_STDERR) + if err != nil { + return err + } + + if oldFile != -1 { + // Attempt to restore old file if it was different from the expected default. + _, _ = setReportFile(reportType, oldFile) + return errErrorReportingAlreadyConfigured + } + + return nil + } + + // See https://learn.microsoft.com/en-us/cpp/c-runtime-library/reference/crtsetreportmode?view=msvc-170#remarks + // for defaults. + if err := reportToFile(_CRT_ASSERT, _CRTDBG_MODE_WNDW); err != nil { + return err + } + + if err := reportToFile(_CRT_ERROR, _CRTDBG_MODE_WNDW); err != nil { + return err + } + + return nil +} + +// isDebuggerPresent returns true if the current process is being debugged. +// +// See https://learn.microsoft.com/en-us/windows/win32/api/debugapi/nf-debugapi-isdebuggerpresent +func isDebuggerPresent() bool { + kernel32Handle, err := windows.LoadLibrary("kernel32.dll") + if err != nil { + return false + } + + isDebuggerPresentAddr, err := windows.GetProcAddress(kernel32Handle, "IsDebuggerPresent") + if err != nil { + return false + } + + r1, _, _ := syscall.SyscallN(isDebuggerPresentAddr) + return r1 != 0 +} diff --git a/vendor/github.com/cilium/ebpf/internal/efw/fd.go b/vendor/github.com/cilium/ebpf/internal/efw/fd.go new file mode 100644 index 000000000..b0d0bcdd4 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/efw/fd.go @@ -0,0 +1,34 @@ +//go:build windows + +package efw + +import ( + "syscall" + "unsafe" +) + +// ebpf_result_t ebpf_close_fd(fd_t fd) +var ebpfCloseFdProc = newProc("ebpf_close_fd") + +func EbpfCloseFd(fd int) error { + addr, err := ebpfCloseFdProc.Find() + if err != nil { + return err + } + + return errorResult(syscall.SyscallN(addr, uintptr(fd))) +} + +// ebpf_result_t ebpf_duplicate_fd(fd_t fd, _Out_ fd_t* dup) +var ebpfDuplicateFdProc = newProc("ebpf_duplicate_fd") + +func EbpfDuplicateFd(fd int) (int, error) { + addr, err := ebpfDuplicateFdProc.Find() + if err != nil { + return -1, err + } + + var dup FD + err = errorResult(syscall.SyscallN(addr, uintptr(fd), uintptr(unsafe.Pointer(&dup)))) + return int(dup), err +} diff --git a/vendor/github.com/cilium/ebpf/internal/efw/map.go b/vendor/github.com/cilium/ebpf/internal/efw/map.go new file mode 100644 index 000000000..82f510fef --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/efw/map.go @@ -0,0 +1,109 @@ +//go:build windows + +package efw + +import ( + "runtime" + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +/* +ebpf_ring_buffer_map_map_buffer( + + fd_t map_fd, + _Outptr_result_maybenull_ void** consumer, + _Outptr_result_maybenull_ const void** producer, + _Outptr_result_buffer_maybenull_(*data_size) const uint8_t** data, + _Out_ size_t* data_size) EBPF_NO_EXCEPT; +*/ +var ebpfRingBufferMapMapBufferProc = newProc("ebpf_ring_buffer_map_map_buffer") + +func EbpfRingBufferMapMapBuffer(mapFd int) (consumer, producer, data *uint8, dataLen Size, _ error) { + addr, err := ebpfRingBufferMapMapBufferProc.Find() + if err != nil { + return nil, nil, nil, 0, err + } + + err = errorResult(syscall.SyscallN(addr, + uintptr(mapFd), + uintptr(unsafe.Pointer(&consumer)), + uintptr(unsafe.Pointer(&producer)), + uintptr(unsafe.Pointer(&data)), + uintptr(unsafe.Pointer(&dataLen)), + )) + if err != nil { + return nil, nil, nil, 0, err + } + + return consumer, producer, data, dataLen, nil +} + +/* +ebpf_ring_buffer_map_unmap_buffer( + + fd_t map_fd, _In_ void* consumer, _In_ const void* producer, _In_ const void* data) EBPF_NO_EXCEPT; +*/ +var ebpfRingBufferMapUnmapBufferProc = newProc("ebpf_ring_buffer_map_unmap_buffer") + +func EbpfRingBufferMapUnmapBuffer(mapFd int, consumer, producer, data *uint8) error { + addr, err := ebpfRingBufferMapUnmapBufferProc.Find() + if err != nil { + return err + } + + return errorResult(syscall.SyscallN(addr, + uintptr(mapFd), + uintptr(unsafe.Pointer(consumer)), + uintptr(unsafe.Pointer(producer)), + uintptr(unsafe.Pointer(data)), + )) +} + +/* +ebpf_result_t ebpf_map_set_wait_handle( + + fd_t map_fd, + uint64_t index, + ebpf_handle_t handle) +*/ +var ebpfMapSetWaitHandleProc = newProc("ebpf_map_set_wait_handle") + +func EbpfMapSetWaitHandle(mapFd int, index uint64, handle windows.Handle) error { + addr, err := ebpfMapSetWaitHandleProc.Find() + if err != nil { + return err + } + + return errorResult(syscall.SyscallN(addr, + uintptr(mapFd), + uintptr(index), + uintptr(handle), + )) +} + +/* +ebpf_result_t ebpf_ring_buffer_map_write( + + fd_t ring_buffer_map_fd, + const void* data, + size_t data_length) +*/ +var ebpfRingBufferMapWriteProc = newProc("ebpf_ring_buffer_map_write") + +func EbpfRingBufferMapWrite(ringBufferMapFd int, data []byte) error { + addr, err := ebpfRingBufferMapWriteProc.Find() + if err != nil { + return err + } + + err = errorResult(syscall.SyscallN(addr, + uintptr(ringBufferMapFd), + uintptr(unsafe.Pointer(&data[0])), + uintptr(len(data)), + )) + runtime.KeepAlive(data) + return err +} diff --git a/vendor/github.com/cilium/ebpf/internal/efw/module.go b/vendor/github.com/cilium/ebpf/internal/efw/module.go new file mode 100644 index 000000000..606d83930 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/efw/module.go @@ -0,0 +1,36 @@ +//go:build windows + +// Package efw contains support code for eBPF for Windows. +package efw + +import ( + "golang.org/x/sys/windows" +) + +// module is the global handle for the eBPF for Windows user-space API. +var module = windows.NewLazyDLL("ebpfapi.dll") + +// FD is the equivalent of fd_t. +// +// See https://github.com/microsoft/ebpf-for-windows/blob/54632eb360c560ebef2f173be1a4a4625d540744/include/ebpf_api.h#L24 +type FD int32 + +// Size is the equivalent of size_t. +// +// This is correct on amd64 and arm64 according to tests on godbolt.org. +type Size uint64 + +// Int is the equivalent of int on MSVC (am64, arm64) and MinGW (gcc, clang). +type Int int32 + +// ObjectType is the equivalent of ebpf_object_type_t. +// +// See https://github.com/microsoft/ebpf-for-windows/blob/44f5de09ec0f3f7ad176c00a290c1cb7106cdd5e/include/ebpf_core_structs.h#L41 +type ObjectType uint32 + +const ( + EBPF_OBJECT_UNKNOWN ObjectType = iota + EBPF_OBJECT_MAP + EBPF_OBJECT_LINK + EBPF_OBJECT_PROGRAM +) diff --git a/vendor/github.com/cilium/ebpf/internal/efw/native.go b/vendor/github.com/cilium/ebpf/internal/efw/native.go new file mode 100644 index 000000000..04f796abb --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/efw/native.go @@ -0,0 +1,44 @@ +//go:build windows + +package efw + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +/* +ebpf_result_t ebpf_object_load_native_by_fds( + + _In_z_ const char* file_name, + _Inout_ size_t* count_of_maps, + _Out_writes_opt_(count_of_maps) fd_t* map_fds, + _Inout_ size_t* count_of_programs, + _Out_writes_opt_(count_of_programs) fd_t* program_fds) +*/ +var ebpfObjectLoadNativeByFdsProc = newProc("ebpf_object_load_native_by_fds") + +func EbpfObjectLoadNativeFds(fileName string, mapFds []FD, programFds []FD) (int, int, error) { + addr, err := ebpfObjectLoadNativeByFdsProc.Find() + if err != nil { + return 0, 0, err + } + + fileBytes, err := windows.ByteSliceFromString(fileName) + if err != nil { + return 0, 0, err + } + + countOfMaps := Size(len(mapFds)) + countOfPrograms := Size(len(programFds)) + err = errorResult(syscall.SyscallN(addr, + uintptr(unsafe.Pointer(&fileBytes[0])), + uintptr(unsafe.Pointer(&countOfMaps)), + uintptr(unsafe.Pointer(&mapFds[0])), + uintptr(unsafe.Pointer(&countOfPrograms)), + uintptr(unsafe.Pointer(&programFds[0])), + )) + return int(countOfMaps), int(countOfPrograms), err +} diff --git a/vendor/github.com/cilium/ebpf/internal/efw/object.go b/vendor/github.com/cilium/ebpf/internal/efw/object.go new file mode 100644 index 000000000..560e2f09b --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/efw/object.go @@ -0,0 +1,117 @@ +//go:build windows + +package efw + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +// https://github.com/microsoft/ebpf-for-windows/blob/9d9003c39c3fd75be5225ac0fce30077d6bf0604/include/ebpf_core_structs.h#L15 +const _EBPF_MAX_PIN_PATH_LENGTH = 256 + +/* +Retrieve object info and type from a fd. + + ebpf_result_t ebpf_object_get_info_by_fd( + fd_t bpf_fd, + _Inout_updates_bytes_to_opt_(*info_size, *info_size) void* info, + _Inout_opt_ uint32_t* info_size, + _Out_opt_ ebpf_object_type_t* type) +*/ +var ebpfObjectGetInfoByFdProc = newProc("ebpf_object_get_info_by_fd") + +func EbpfObjectGetInfoByFd(fd int, info unsafe.Pointer, info_size *uint32) (ObjectType, error) { + addr, err := ebpfObjectGetInfoByFdProc.Find() + if err != nil { + return 0, err + } + + var objectType ObjectType + err = errorResult(syscall.SyscallN(addr, + uintptr(fd), + uintptr(info), + uintptr(unsafe.Pointer(info_size)), + uintptr(unsafe.Pointer(&objectType)), + )) + return objectType, err +} + +// ebpf_result_t ebpf_object_unpin(_In_z_ const char* path) +var ebpfObjectUnpinProc = newProc("ebpf_object_unpin") + +func EbpfObjectUnpin(path string) error { + addr, err := ebpfObjectUnpinProc.Find() + if err != nil { + return err + } + + pathBytes, err := windows.ByteSliceFromString(path) + if err != nil { + return err + } + + return errorResult(syscall.SyscallN(addr, uintptr(unsafe.Pointer(&pathBytes[0])))) +} + +/* +Retrieve the next pinned object path. + + ebpf_result_t ebpf_get_next_pinned_object_path( + _In_opt_z_ const char* start_path, + _Out_writes_z_(next_path_len) char* next_path, + size_t next_path_len, + _Inout_opt_ ebpf_object_type_t* type) +*/ +var ebpfGetNextPinnedObjectPath = newProc("ebpf_get_next_pinned_object_path") + +func EbpfGetNextPinnedObjectPath(startPath string, objectType ObjectType) (string, ObjectType, error) { + addr, err := ebpfGetNextPinnedObjectPath.Find() + if err != nil { + return "", 0, err + } + + ptr, err := windows.BytePtrFromString(startPath) + if err != nil { + return "", 0, err + } + + tmp := make([]byte, _EBPF_MAX_PIN_PATH_LENGTH) + err = errorResult(syscall.SyscallN(addr, + uintptr(unsafe.Pointer(ptr)), + uintptr(unsafe.Pointer(&tmp[0])), + uintptr(len(tmp)), + uintptr(unsafe.Pointer(&objectType)), + )) + return windows.ByteSliceToString(tmp), objectType, err +} + +/* +Canonicalize a path using filesystem canonicalization rules. + + _Must_inspect_result_ ebpf_result_t + ebpf_canonicalize_pin_path(_Out_writes_(output_size) char* output, size_t output_size, _In_z_ const char* input) +*/ +var ebpfCanonicalizePinPath = newProc("ebpf_canonicalize_pin_path") + +func EbpfCanonicalizePinPath(input string) (string, error) { + addr, err := ebpfCanonicalizePinPath.Find() + if err != nil { + return "", err + } + + inputBytes, err := windows.ByteSliceFromString(input) + if err != nil { + return "", err + } + + output := make([]byte, _EBPF_MAX_PIN_PATH_LENGTH) + err = errorResult(syscall.SyscallN(addr, + uintptr(unsafe.Pointer(&output[0])), + uintptr(len(output)), + uintptr(unsafe.Pointer(&inputBytes[0])), + )) + return windows.ByteSliceToString(output), err +} diff --git a/vendor/github.com/cilium/ebpf/internal/efw/proc.go b/vendor/github.com/cilium/ebpf/internal/efw/proc.go new file mode 100644 index 000000000..81329905f --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/efw/proc.go @@ -0,0 +1,50 @@ +//go:build windows + +package efw + +import ( + "errors" + "fmt" + "syscall" + + "golang.org/x/sys/windows" +) + +/* +The BPF syscall wrapper which is ABI compatible with Linux. + + int bpf(int cmd, union bpf_attr* attr, unsigned int size) +*/ +var BPF = newProc("bpf") + +type proc struct { + proc *windows.LazyProc +} + +func newProc(name string) proc { + return proc{module.NewProc(name)} +} + +func (p proc) Find() (uintptr, error) { + if err := p.proc.Find(); err != nil { + if errors.Is(err, windows.ERROR_MOD_NOT_FOUND) { + return 0, fmt.Errorf("load %s: not found", module.Name) + } + return 0, err + } + return p.proc.Addr(), nil +} + +// uint32Result wraps a function which returns a uint32_t. +func uint32Result(r1, _ uintptr, _ syscall.Errno) uint32 { + return uint32(r1) +} + +// errorResult wraps a function which returns ebpf_result_t. +func errorResult(r1, _ uintptr, errNo syscall.Errno) error { + err := resultToError(Result(r1)) + if err != nil && errNo != 0 { + return fmt.Errorf("%w (errno: %v)", err, errNo) + } + return err +} diff --git a/vendor/github.com/cilium/ebpf/internal/efw/program.go b/vendor/github.com/cilium/ebpf/internal/efw/program.go new file mode 100644 index 000000000..6202acf32 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/efw/program.go @@ -0,0 +1,39 @@ +//go:build windows + +package efw + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +/* +Attach a program. + + ebpf_result_t ebpf_program_attach_by_fds( + fd_t program_fd, + _In_opt_ const ebpf_attach_type_t* attach_type, + _In_reads_bytes_opt_(attach_parameters_size) void* attach_parameters, + size_t attach_parameters_size, + _Out_ fd_t* link) +*/ +var ebpfProgramAttachByFdsProc = newProc("ebpf_program_attach_by_fds") + +func EbpfProgramAttachFds(fd int, attachType windows.GUID, params unsafe.Pointer, params_size uintptr) (int, error) { + addr, err := ebpfProgramAttachByFdsProc.Find() + if err != nil { + return 0, err + } + + var link FD + err = errorResult(syscall.SyscallN(addr, + uintptr(fd), + uintptr(unsafe.Pointer(&attachType)), + uintptr(params), + params_size, + uintptr(unsafe.Pointer(&link)), + )) + return int(link), err +} diff --git a/vendor/github.com/cilium/ebpf/internal/efw/result.go b/vendor/github.com/cilium/ebpf/internal/efw/result.go new file mode 100644 index 000000000..4c68da931 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/efw/result.go @@ -0,0 +1,57 @@ +//go:build windows + +package efw + +// See https://github.com/microsoft/ebpf-for-windows/blob/main/include/ebpf_result.h +type Result int32 + +//go:generate go tool stringer -tags windows -output result_string_windows.go -type=Result + +const ( + EBPF_SUCCESS Result = iota + EBPF_VERIFICATION_FAILED + EBPF_JIT_COMPILATION_FAILED + EBPF_PROGRAM_LOAD_FAILED + EBPF_INVALID_FD + EBPF_INVALID_OBJECT + EBPF_INVALID_ARGUMENT + EBPF_OBJECT_NOT_FOUND + EBPF_OBJECT_ALREADY_EXISTS + EBPF_FILE_NOT_FOUND + EBPF_ALREADY_PINNED + EBPF_NOT_PINNED + EBPF_NO_MEMORY + EBPF_PROGRAM_TOO_LARGE + EBPF_RPC_EXCEPTION + EBPF_ALREADY_INITIALIZED + EBPF_ELF_PARSING_FAILED + EBPF_FAILED + EBPF_OPERATION_NOT_SUPPORTED + EBPF_KEY_NOT_FOUND + EBPF_ACCESS_DENIED + EBPF_BLOCKED_BY_POLICY + EBPF_ARITHMETIC_OVERFLOW + EBPF_EXTENSION_FAILED_TO_LOAD + EBPF_INSUFFICIENT_BUFFER + EBPF_NO_MORE_KEYS + EBPF_KEY_ALREADY_EXISTS + EBPF_NO_MORE_TAIL_CALLS + EBPF_PENDING + EBPF_OUT_OF_SPACE + EBPF_CANCELED + EBPF_INVALID_POINTER + EBPF_TIMEOUT + EBPF_STALE_ID + EBPF_INVALID_STATE +) + +func (r Result) Error() string { + return r.String() +} + +func resultToError(res Result) error { + if res == EBPF_SUCCESS { + return nil + } + return res +} diff --git a/vendor/github.com/cilium/ebpf/internal/efw/result_string_windows.go b/vendor/github.com/cilium/ebpf/internal/efw/result_string_windows.go new file mode 100644 index 000000000..1e55b5186 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/efw/result_string_windows.go @@ -0,0 +1,57 @@ +// Code generated by "stringer -tags windows -output result_string_windows.go -type=Result"; DO NOT EDIT. + +package efw + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[EBPF_SUCCESS-0] + _ = x[EBPF_VERIFICATION_FAILED-1] + _ = x[EBPF_JIT_COMPILATION_FAILED-2] + _ = x[EBPF_PROGRAM_LOAD_FAILED-3] + _ = x[EBPF_INVALID_FD-4] + _ = x[EBPF_INVALID_OBJECT-5] + _ = x[EBPF_INVALID_ARGUMENT-6] + _ = x[EBPF_OBJECT_NOT_FOUND-7] + _ = x[EBPF_OBJECT_ALREADY_EXISTS-8] + _ = x[EBPF_FILE_NOT_FOUND-9] + _ = x[EBPF_ALREADY_PINNED-10] + _ = x[EBPF_NOT_PINNED-11] + _ = x[EBPF_NO_MEMORY-12] + _ = x[EBPF_PROGRAM_TOO_LARGE-13] + _ = x[EBPF_RPC_EXCEPTION-14] + _ = x[EBPF_ALREADY_INITIALIZED-15] + _ = x[EBPF_ELF_PARSING_FAILED-16] + _ = x[EBPF_FAILED-17] + _ = x[EBPF_OPERATION_NOT_SUPPORTED-18] + _ = x[EBPF_KEY_NOT_FOUND-19] + _ = x[EBPF_ACCESS_DENIED-20] + _ = x[EBPF_BLOCKED_BY_POLICY-21] + _ = x[EBPF_ARITHMETIC_OVERFLOW-22] + _ = x[EBPF_EXTENSION_FAILED_TO_LOAD-23] + _ = x[EBPF_INSUFFICIENT_BUFFER-24] + _ = x[EBPF_NO_MORE_KEYS-25] + _ = x[EBPF_KEY_ALREADY_EXISTS-26] + _ = x[EBPF_NO_MORE_TAIL_CALLS-27] + _ = x[EBPF_PENDING-28] + _ = x[EBPF_OUT_OF_SPACE-29] + _ = x[EBPF_CANCELED-30] + _ = x[EBPF_INVALID_POINTER-31] + _ = x[EBPF_TIMEOUT-32] + _ = x[EBPF_STALE_ID-33] + _ = x[EBPF_INVALID_STATE-34] +} + +const _Result_name = "EBPF_SUCCESSEBPF_VERIFICATION_FAILEDEBPF_JIT_COMPILATION_FAILEDEBPF_PROGRAM_LOAD_FAILEDEBPF_INVALID_FDEBPF_INVALID_OBJECTEBPF_INVALID_ARGUMENTEBPF_OBJECT_NOT_FOUNDEBPF_OBJECT_ALREADY_EXISTSEBPF_FILE_NOT_FOUNDEBPF_ALREADY_PINNEDEBPF_NOT_PINNEDEBPF_NO_MEMORYEBPF_PROGRAM_TOO_LARGEEBPF_RPC_EXCEPTIONEBPF_ALREADY_INITIALIZEDEBPF_ELF_PARSING_FAILEDEBPF_FAILEDEBPF_OPERATION_NOT_SUPPORTEDEBPF_KEY_NOT_FOUNDEBPF_ACCESS_DENIEDEBPF_BLOCKED_BY_POLICYEBPF_ARITHMETIC_OVERFLOWEBPF_EXTENSION_FAILED_TO_LOADEBPF_INSUFFICIENT_BUFFEREBPF_NO_MORE_KEYSEBPF_KEY_ALREADY_EXISTSEBPF_NO_MORE_TAIL_CALLSEBPF_PENDINGEBPF_OUT_OF_SPACEEBPF_CANCELEDEBPF_INVALID_POINTEREBPF_TIMEOUTEBPF_STALE_IDEBPF_INVALID_STATE" + +var _Result_index = [...]uint16{0, 12, 36, 63, 87, 102, 121, 142, 163, 189, 208, 227, 242, 256, 278, 296, 320, 343, 354, 382, 400, 418, 440, 464, 493, 517, 534, 557, 580, 592, 609, 622, 642, 654, 667, 685} + +func (i Result) String() string { + if i < 0 || i >= Result(len(_Result_index)-1) { + return "Result(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Result_name[_Result_index[i]:_Result_index[i+1]] +} diff --git a/vendor/github.com/cilium/ebpf/internal/efw/structs.go b/vendor/github.com/cilium/ebpf/internal/efw/structs.go new file mode 100644 index 000000000..558dbb865 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/efw/structs.go @@ -0,0 +1,36 @@ +//go:build windows + +package efw + +import "golang.org/x/sys/windows" + +// https://github.com/microsoft/ebpf-for-windows/blob/95267a53b26c68a94145d1731e2a4c8b546034c3/include/ebpf_structs.h#L366 +const _BPF_OBJ_NAME_LEN = 64 + +// See https://github.com/microsoft/ebpf-for-windows/blob/95267a53b26c68a94145d1731e2a4c8b546034c3/include/ebpf_structs.h#L372-L386 +type BpfMapInfo struct { + _ uint32 ///< Map ID. + _ uint32 ///< Type of map. + _ uint32 ///< Size in bytes of a map key. + _ uint32 ///< Size in bytes of a map value. + _ uint32 ///< Maximum number of entries allowed in the map. + Name [_BPF_OBJ_NAME_LEN]byte ///< Null-terminated map name. + _ uint32 ///< Map flags. + + _ uint32 ///< ID of inner map template. + _ uint32 ///< Number of pinned paths. +} + +// See https://github.com/microsoft/ebpf-for-windows/blob/95267a53b26c68a94145d1731e2a4c8b546034c3/include/ebpf_structs.h#L396-L410 +type BpfProgInfo struct { + _ uint32 ///< Program ID. + _ uint32 ///< Program type, if a cross-platform type. + _ uint32 ///< Number of maps associated with this program. + _ uintptr ///< Pointer to caller-allocated array to fill map IDs into. + Name [_BPF_OBJ_NAME_LEN]byte ///< Null-terminated map name. + + _ windows.GUID ///< Program type UUID. + _ windows.GUID ///< Attach type UUID. + _ uint32 ///< Number of pinned paths. + _ uint32 ///< Number of attached links. +} diff --git a/vendor/github.com/cilium/ebpf/internal/elf.go b/vendor/github.com/cilium/ebpf/internal/elf.go new file mode 100644 index 000000000..011581938 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/elf.go @@ -0,0 +1,102 @@ +package internal + +import ( + "debug/elf" + "fmt" + "io" +) + +type SafeELFFile struct { + *elf.File +} + +// NewSafeELFFile reads an ELF safely. +// +// Any panic during parsing is turned into an error. This is necessary since +// there are a bunch of unfixed bugs in debug/elf. +// +// https://github.com/golang/go/issues?q=is%3Aissue+is%3Aopen+debug%2Felf+in%3Atitle +func NewSafeELFFile(r io.ReaderAt) (safe *SafeELFFile, err error) { + defer func() { + r := recover() + if r == nil { + return + } + + safe = nil + err = fmt.Errorf("reading ELF file panicked: %s", r) + }() + + file, err := elf.NewFile(r) + if err != nil { + return nil, err + } + + return &SafeELFFile{file}, nil +} + +// OpenSafeELFFile reads an ELF from a file. +// +// It works like NewSafeELFFile, with the exception that safe.Close will +// close the underlying file. +func OpenSafeELFFile(path string) (safe *SafeELFFile, err error) { + defer func() { + r := recover() + if r == nil { + return + } + + safe = nil + err = fmt.Errorf("reading ELF file panicked: %s", r) + }() + + file, err := elf.Open(path) + if err != nil { + return nil, err + } + + return &SafeELFFile{file}, nil +} + +// Symbols is the safe version of elf.File.Symbols. +func (se *SafeELFFile) Symbols() (syms []elf.Symbol, err error) { + defer func() { + r := recover() + if r == nil { + return + } + + syms = nil + err = fmt.Errorf("reading ELF symbols panicked: %s", r) + }() + + syms, err = se.File.Symbols() + return +} + +// DynamicSymbols is the safe version of elf.File.DynamicSymbols. +func (se *SafeELFFile) DynamicSymbols() (syms []elf.Symbol, err error) { + defer func() { + r := recover() + if r == nil { + return + } + + syms = nil + err = fmt.Errorf("reading ELF dynamic symbols panicked: %s", r) + }() + + syms, err = se.File.DynamicSymbols() + return +} + +// SectionsByType returns all sections in the file with the specified section type. +func (se *SafeELFFile) SectionsByType(typ elf.SectionType) []*elf.Section { + sections := make([]*elf.Section, 0, 1) + for _, section := range se.Sections { + if section.Type == typ { + sections = append(sections, section) + } + } + return sections +} diff --git a/vendor/github.com/cilium/ebpf/internal/endian_be.go b/vendor/github.com/cilium/ebpf/internal/endian_be.go new file mode 100644 index 000000000..a37777f21 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/endian_be.go @@ -0,0 +1,9 @@ +//go:build armbe || arm64be || mips || mips64 || mips64p32 || ppc64 || s390 || s390x || sparc || sparc64 + +package internal + +import "encoding/binary" + +// NativeEndian is set to either binary.BigEndian or binary.LittleEndian, +// depending on the host's endianness. +var NativeEndian = binary.BigEndian diff --git a/vendor/github.com/cilium/ebpf/internal/endian_le.go b/vendor/github.com/cilium/ebpf/internal/endian_le.go new file mode 100644 index 000000000..d833ea764 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/endian_le.go @@ -0,0 +1,9 @@ +//go:build 386 || amd64 || amd64p32 || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || ppc64le || riscv64 || wasm + +package internal + +import "encoding/binary" + +// NativeEndian is set to either binary.BigEndian or binary.LittleEndian, +// depending on the host's endianness. +var NativeEndian = binary.LittleEndian diff --git a/vendor/github.com/cilium/ebpf/internal/errors.go b/vendor/github.com/cilium/ebpf/internal/errors.go new file mode 100644 index 000000000..19d5294ca --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/errors.go @@ -0,0 +1,179 @@ +package internal + +import ( + "bytes" + "fmt" + "io" + "strings" +) + +// ErrorWithLog wraps err in a VerifierError that includes the parsed verifier +// log buffer. +// +// The default error output is a summary of the full log. The latter can be +// accessed via VerifierError.Log or by formatting the error, see Format. +func ErrorWithLog(source string, err error, log []byte) *VerifierError { + const whitespace = "\t\r\v\n " + + // Convert verifier log C string by truncating it on the first 0 byte + // and trimming trailing whitespace before interpreting as a Go string. + if i := bytes.IndexByte(log, 0); i != -1 { + log = log[:i] + } + + log = bytes.Trim(log, whitespace) + if len(log) == 0 { + return &VerifierError{source, err, nil} + } + + logLines := bytes.Split(log, []byte{'\n'}) + lines := make([]string, 0, len(logLines)) + for _, line := range logLines { + // Don't remove leading white space on individual lines. We rely on it + // when outputting logs. + lines = append(lines, string(bytes.TrimRight(line, whitespace))) + } + + return &VerifierError{source, err, lines} +} + +// VerifierError includes information from the eBPF verifier. +// +// It summarises the log output, see Format if you want to output the full contents. +type VerifierError struct { + source string + // The error which caused this error. + Cause error + // The verifier output split into lines. + Log []string +} + +func (le *VerifierError) Unwrap() error { + return le.Cause +} + +func (le *VerifierError) Error() string { + log := le.Log + if n := len(log); n > 0 && strings.HasPrefix(log[n-1], "processed ") { + // Get rid of "processed 39 insns (limit 1000000) ..." from summary. + log = log[:n-1] + } + + var b strings.Builder + fmt.Fprintf(&b, "%s: %s", le.source, le.Cause.Error()) + + n := len(log) + if n == 0 { + return b.String() + } + + lines := log[n-1:] + if n >= 2 && includePreviousLine(log[n-1]) { + // Add one more line of context if it aids understanding the error. + lines = log[n-2:] + } + + for _, line := range lines { + b.WriteString(": ") + b.WriteString(strings.TrimSpace(line)) + } + + omitted := len(le.Log) - len(lines) + if omitted > 0 { + fmt.Fprintf(&b, " (%d line(s) omitted)", omitted) + } + + return b.String() +} + +// includePreviousLine returns true if the given line likely is better +// understood with additional context from the preceding line. +func includePreviousLine(line string) bool { + // We need to find a good trade off between understandable error messages + // and too much complexity here. Checking the string prefix is ok, requiring + // regular expressions to do it is probably overkill. + + if strings.HasPrefix(line, "\t") { + // [13] STRUCT drm_rect size=16 vlen=4 + // \tx1 type_id=2 + return true + } + + if len(line) >= 2 && line[0] == 'R' && line[1] >= '0' && line[1] <= '9' { + // 0: (95) exit + // R0 !read_ok + return true + } + + if strings.HasPrefix(line, "invalid bpf_context access") { + // 0: (79) r6 = *(u64 *)(r1 +0) + // func '__x64_sys_recvfrom' arg0 type FWD is not a struct + // invalid bpf_context access off=0 size=8 + return true + } + + return false +} + +// Format the error. +// +// Understood verbs are %s and %v, which are equivalent to calling Error(). %v +// allows outputting additional information using the following flags: +// +// %+v: Output the first lines, or all lines if no width is given. +// %-v: Output the last lines, or all lines if no width is given. +// +// Use width to specify how many lines to output. Use the '-' flag to output +// lines from the end of the log instead of the beginning. +func (le *VerifierError) Format(f fmt.State, verb rune) { + switch verb { + case 's': + _, _ = io.WriteString(f, le.Error()) + + case 'v': + n, haveWidth := f.Width() + if !haveWidth || n > len(le.Log) { + n = len(le.Log) + } + + if !f.Flag('+') && !f.Flag('-') { + if haveWidth { + _, _ = io.WriteString(f, "%!v(BADWIDTH)") + return + } + + _, _ = io.WriteString(f, le.Error()) + return + } + + if f.Flag('+') && f.Flag('-') { + _, _ = io.WriteString(f, "%!v(BADFLAG)") + return + } + + fmt.Fprintf(f, "%s: %s:", le.source, le.Cause.Error()) + + omitted := len(le.Log) - n + lines := le.Log[:n] + if f.Flag('-') { + // Print last instead of first lines. + lines = le.Log[len(le.Log)-n:] + if omitted > 0 { + fmt.Fprintf(f, "\n\t(%d line(s) omitted)", omitted) + } + } + + for _, line := range lines { + fmt.Fprintf(f, "\n\t%s", line) + } + + if !f.Flag('-') { + if omitted > 0 { + fmt.Fprintf(f, "\n\t(%d line(s) omitted)", omitted) + } + } + + default: + fmt.Fprintf(f, "%%!%c(BADVERB)", verb) + } +} diff --git a/vendor/github.com/cilium/ebpf/internal/feature.go b/vendor/github.com/cilium/ebpf/internal/feature.go new file mode 100644 index 000000000..e27064c23 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/feature.go @@ -0,0 +1,227 @@ +package internal + +import ( + "errors" + "fmt" + "runtime" + "sync" + + "github.com/cilium/ebpf/internal/platform" +) + +// ErrNotSupported indicates that a feature is not supported. +var ErrNotSupported = errors.New("not supported") + +// ErrNotSupportedOnOS indicates that a feature is not supported on the current +// operating system. +var ErrNotSupportedOnOS = fmt.Errorf("%w on %s", ErrNotSupported, runtime.GOOS) + +// ErrRestrictedKernel is returned when kernel address information is restricted +// by kernel.kptr_restrict and/or net.core.bpf_jit_harden sysctls. +var ErrRestrictedKernel = errors.New("restricted by kernel.kptr_restrict and/or net.core.bpf_jit_harden sysctls") + +// UnsupportedFeatureError is returned by FeatureTest() functions. +type UnsupportedFeatureError struct { + // The minimum version required for this feature. + // + // On Linux this refers to the mainline kernel version, on other platforms + // to the version of the runtime. + // + // Used for the error string, and for sanity checking during testing. + MinimumVersion Version + + // The name of the feature that isn't supported. + Name string +} + +func (ufe *UnsupportedFeatureError) Error() string { + if ufe.MinimumVersion.Unspecified() { + return fmt.Sprintf("%s not supported", ufe.Name) + } + return fmt.Sprintf("%s not supported (requires >= %s)", ufe.Name, ufe.MinimumVersion) +} + +// Is indicates that UnsupportedFeatureError is ErrNotSupported. +func (ufe *UnsupportedFeatureError) Is(target error) bool { + return target == ErrNotSupported +} + +// FeatureTest caches the result of a [FeatureTestFn]. +// +// Fields should not be modified after creation. +type FeatureTest struct { + // The name of the feature being detected. + Name string + // Version in the form Major.Minor[.Patch]. + Version string + // The feature test itself. + Fn FeatureTestFn + + mu sync.RWMutex + done bool + result error +} + +// FeatureTestFn is used to determine whether the kernel supports +// a certain feature. +// +// The return values have the following semantics: +// +// err == ErrNotSupported: the feature is not available +// err == nil: the feature is available +// err != nil: the test couldn't be executed +type FeatureTestFn func() error + +// NewFeatureTest is a convenient way to create a single [FeatureTest]. +// +// versions specifies in which version of a BPF runtime a feature appeared. +// The format is "GOOS:Major.Minor[.Patch]". GOOS may be omitted when targeting +// Linux. Returns [ErrNotSupportedOnOS] if there is no version specified for the +// current OS. +func NewFeatureTest(name string, fn FeatureTestFn, versions ...string) func() error { + version, err := platform.SelectVersion(versions) + if err != nil { + return func() error { return err } + } + + if version == "" { + return func() error { + // We don't return an UnsupportedFeatureError here, since that will + // trigger version checks which don't make sense. + return fmt.Errorf("%s: %w", name, ErrNotSupportedOnOS) + } + } + + ft := &FeatureTest{ + Name: name, + Version: version, + Fn: fn, + } + + return ft.execute +} + +// execute the feature test. +// +// The result is cached if the test is conclusive. +// +// See [FeatureTestFn] for the meaning of the returned error. +func (ft *FeatureTest) execute() error { + ft.mu.RLock() + result, done := ft.result, ft.done + ft.mu.RUnlock() + + if done { + return result + } + + ft.mu.Lock() + defer ft.mu.Unlock() + + // The test may have been executed by another caller while we were + // waiting to acquire ft.mu. + if ft.done { + return ft.result + } + + err := ft.Fn() + if err == nil { + ft.done = true + return nil + } + + if errors.Is(err, ErrNotSupported) { + var v Version + if ft.Version != "" { + v, err = NewVersion(ft.Version) + if err != nil { + return fmt.Errorf("feature %s: %w", ft.Name, err) + } + } + + ft.done = true + ft.result = &UnsupportedFeatureError{ + MinimumVersion: v, + Name: ft.Name, + } + + return ft.result + } + + // We couldn't execute the feature test to a point + // where it could make a determination. + // Don't cache the result, just return it. + return fmt.Errorf("detect support for %s: %w", ft.Name, err) +} + +// FeatureMatrix groups multiple related feature tests into a map. +// +// Useful when there is a small number of discrete features which are known +// at compile time. +// +// It must not be modified concurrently with calling [FeatureMatrix.Result]. +type FeatureMatrix[K comparable] map[K]*FeatureTest + +// Result returns the outcome of the feature test for the given key. +// +// It's safe to call this function concurrently. +// +// Always returns [ErrNotSupportedOnOS] on Windows. +func (fm FeatureMatrix[K]) Result(key K) error { + ft, ok := fm[key] + if !ok { + return fmt.Errorf("no feature probe for %v", key) + } + + if platform.IsWindows { + return fmt.Errorf("%s: %w", ft.Name, ErrNotSupportedOnOS) + } + + return ft.execute() +} + +// FeatureCache caches a potentially unlimited number of feature probes. +// +// Useful when there is a high cardinality for a feature test. +type FeatureCache[K comparable] struct { + mu sync.RWMutex + newTest func(K) *FeatureTest + features map[K]*FeatureTest +} + +func NewFeatureCache[K comparable](newTest func(K) *FeatureTest) *FeatureCache[K] { + return &FeatureCache[K]{ + newTest: newTest, + features: make(map[K]*FeatureTest), + } +} + +func (fc *FeatureCache[K]) Result(key K) error { + if platform.IsWindows { + return fmt.Errorf("feature probe for %v: %w", key, ErrNotSupportedOnOS) + } + + // NB: Executing the feature test happens without fc.mu taken. + return fc.retrieve(key).execute() +} + +func (fc *FeatureCache[K]) retrieve(key K) *FeatureTest { + fc.mu.RLock() + ft := fc.features[key] + fc.mu.RUnlock() + + if ft != nil { + return ft + } + + fc.mu.Lock() + defer fc.mu.Unlock() + + if ft := fc.features[key]; ft != nil { + return ft + } + + ft = fc.newTest(key) + fc.features[key] = ft + return ft +} diff --git a/vendor/github.com/cilium/ebpf/internal/io.go b/vendor/github.com/cilium/ebpf/internal/io.go new file mode 100644 index 000000000..1eaf4775a --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/io.go @@ -0,0 +1,128 @@ +package internal + +import ( + "bufio" + "bytes" + "compress/gzip" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "sync" +) + +// NewBufferedSectionReader wraps an io.ReaderAt in an appropriately-sized +// buffered reader. It is a convenience function for reading subsections of +// ELF sections while minimizing the amount of read() syscalls made. +// +// Syscall overhead is non-negligible in continuous integration context +// where ELFs might be accessed over virtual filesystems with poor random +// access performance. Buffering reads makes sense because (sub)sections +// end up being read completely anyway. +// +// Use instead of the r.Seek() + io.LimitReader() pattern. +func NewBufferedSectionReader(ra io.ReaderAt, off, n int64) *bufio.Reader { + // Clamp the size of the buffer to one page to avoid slurping large parts + // of a file into memory. bufio.NewReader uses a hardcoded default buffer + // of 4096. Allow arches with larger pages to allocate more, but don't + // allocate a fixed 4k buffer if we only need to read a small segment. + buf := n + if ps := int64(os.Getpagesize()); n > ps { + buf = ps + } + + return bufio.NewReaderSize(io.NewSectionReader(ra, off, n), int(buf)) +} + +// DiscardZeroes makes sure that all written bytes are zero +// before discarding them. +type DiscardZeroes struct{} + +func (DiscardZeroes) Write(p []byte) (int, error) { + for _, b := range p { + if b != 0 { + return 0, errors.New("encountered non-zero byte") + } + } + return len(p), nil +} + +// ReadAllCompressed decompresses a gzipped file into memory. +func ReadAllCompressed(file string) ([]byte, error) { + fh, err := os.Open(file) + if err != nil { + return nil, err + } + defer fh.Close() + + gz, err := gzip.NewReader(fh) + if err != nil { + return nil, err + } + defer gz.Close() + + return io.ReadAll(gz) +} + +// ReadUint64FromFile reads a uint64 from a file. +// +// format specifies the contents of the file in fmt.Scanf syntax. +func ReadUint64FromFile(format string, path ...string) (uint64, error) { + filename := filepath.Join(path...) + data, err := os.ReadFile(filename) + if err != nil { + return 0, fmt.Errorf("reading file %q: %w", filename, err) + } + + var value uint64 + n, err := fmt.Fscanf(bytes.NewReader(data), format, &value) + if err != nil { + return 0, fmt.Errorf("parsing file %q: %w", filename, err) + } + if n != 1 { + return 0, fmt.Errorf("parsing file %q: expected 1 item, got %d", filename, n) + } + + return value, nil +} + +type uint64FromFileKey struct { + format, path string +} + +var uint64FromFileCache = struct { + sync.RWMutex + values map[uint64FromFileKey]uint64 +}{ + values: map[uint64FromFileKey]uint64{}, +} + +// ReadUint64FromFileOnce is like readUint64FromFile but memoizes the result. +func ReadUint64FromFileOnce(format string, path ...string) (uint64, error) { + filename := filepath.Join(path...) + key := uint64FromFileKey{format, filename} + + uint64FromFileCache.RLock() + if value, ok := uint64FromFileCache.values[key]; ok { + uint64FromFileCache.RUnlock() + return value, nil + } + uint64FromFileCache.RUnlock() + + value, err := ReadUint64FromFile(format, filename) + if err != nil { + return 0, err + } + + uint64FromFileCache.Lock() + defer uint64FromFileCache.Unlock() + + if value, ok := uint64FromFileCache.values[key]; ok { + // Someone else got here before us, use what is cached. + return value, nil + } + + uint64FromFileCache.values[key] = value + return value, nil +} diff --git a/vendor/github.com/cilium/ebpf/internal/kallsyms/cache.go b/vendor/github.com/cilium/ebpf/internal/kallsyms/cache.go new file mode 100644 index 000000000..b7f3e0b78 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/kallsyms/cache.go @@ -0,0 +1,20 @@ +package kallsyms + +import "sync" + +type cache[K, V comparable] struct { + m sync.Map +} + +func (c *cache[K, V]) Load(key K) (value V, _ bool) { + v, ok := c.m.Load(key) + if !ok { + return value, false + } + value = v.(V) + return value, true +} + +func (c *cache[K, V]) Store(key K, value V) { + c.m.Store(key, value) +} diff --git a/vendor/github.com/cilium/ebpf/internal/kallsyms/kallsyms.go b/vendor/github.com/cilium/ebpf/internal/kallsyms/kallsyms.go new file mode 100644 index 000000000..efc64a503 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/kallsyms/kallsyms.go @@ -0,0 +1,161 @@ +package kallsyms + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "slices" + "strconv" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/platform" +) + +var errAmbiguousKsym = errors.New("multiple kernel symbols with the same name") + +var symAddrs cache[string, uint64] + +// AssignAddresses looks up the addresses of the requested symbols in the kernel +// and assigns them to their corresponding values in the symbols map. Results +// of all lookups are cached, successful or otherwise. +// +// Any symbols missing in the kernel are ignored. Returns an error if multiple +// addresses were found for a symbol. +func AssignAddresses(symbols map[string]uint64) error { + if !platform.IsLinux { + return fmt.Errorf("read /proc/kallsyms: %w", internal.ErrNotSupportedOnOS) + } + + if len(symbols) == 0 { + return nil + } + + // Attempt to fetch symbols from cache. + request := make(map[string]uint64) + for name := range symbols { + if addr, ok := symAddrs.Load(name); ok { + symbols[name] = addr + continue + } + + // Mark the symbol to be read from /proc/kallsyms. + request[name] = 0 + } + if len(request) == 0 { + // All symbols satisfied from cache. + return nil + } + + f, err := os.Open("/proc/kallsyms") + if err != nil { + return err + } + defer f.Close() + + if err := assignAddresses(f, request); err != nil { + return fmt.Errorf("loading symbol addresses: %w", err) + } + + // Update the cache with the new symbols. Cache all requested symbols even if + // they weren't found, to avoid repeated lookups. + for name, addr := range request { + symAddrs.Store(name, addr) + symbols[name] = addr + } + + return nil +} + +// assignAddresses assigns kernel symbol addresses read from f to values +// requested by symbols. Always scans the whole input to make sure the user +// didn't request an ambiguous symbol. +func assignAddresses(f io.Reader, symbols map[string]uint64) error { + if len(symbols) == 0 { + return nil + } + r := newReader(f) + for r.Line() { + s, err, skip := parseSymbol(r, nil) + if err != nil { + return fmt.Errorf("parsing kallsyms line: %w", err) + } + if skip { + continue + } + + existing, requested := symbols[string(s.name)] + if existing != 0 { + // Multiple addresses for a symbol have been found. Return a friendly + // error to avoid silently attaching to the wrong symbol. libbpf also + // rejects referring to ambiguous symbols. + return fmt.Errorf("symbol %s(0x%x): duplicate found at address 0x%x: %w", s.name, existing, s.addr, errAmbiguousKsym) + } + if requested { + // Reading a symbol with a zero address is a strong indication that + // kptr_restrict is set and the process doesn't have CAP_SYSLOG, or + // kptr_restrict is set to 2 (never show addresses). + // + // When running the kernel with KASLR disabled (like CI kernels running in + // microVMs), kallsyms will display many absolute symbols at address 0. + // This memory is unlikely to contain anything useful, and production + // machines are unlikely to run without KASLR. + // + // Return a helpful error instead of silently returning zero addresses. + if s.addr == 0 { + return fmt.Errorf("symbol %s: %w", s.name, internal.ErrRestrictedKernel) + } + symbols[string(s.name)] = s.addr + } + } + if err := r.Err(); err != nil { + return fmt.Errorf("reading kallsyms: %w", err) + } + + return nil +} + +type ksym struct { + addr uint64 + name []byte + mod []byte +} + +// parseSymbol parses a line from /proc/kallsyms into an address, type, name and +// module. Skip will be true if the symbol doesn't match any of the given symbol +// types. See `man 1 nm` for all available types. +// +// Only yields symbols whose type is contained in types. An empty value for types +// disables this filtering. +// +// Example line: `ffffffffc1682010 T nf_nat_init\t[nf_nat]` +func parseSymbol(r *reader, types []rune) (s ksym, err error, skip bool) { + for i := 0; r.Word(); i++ { + switch i { + // Address of the symbol. + case 0: + s.addr, err = strconv.ParseUint(r.Text(), 16, 64) + if err != nil { + return s, fmt.Errorf("parsing address: %w", err), false + } + // Type of the symbol. Assume the character is ASCII-encoded by converting + // it directly to a rune, since it's a fixed field controlled by the kernel. + case 1: + if len(types) > 0 && !slices.Contains(types, rune(r.Bytes()[0])) { + return s, nil, true + } + // Name of the symbol. + case 2: + s.name = r.Bytes() + // Kernel module the symbol is provided by. + case 3: + s.mod = bytes.Trim(r.Bytes(), "[]") + // Ignore any future fields. + default: + return + } + } + + return +} diff --git a/vendor/github.com/cilium/ebpf/internal/kallsyms/reader.go b/vendor/github.com/cilium/ebpf/internal/kallsyms/reader.go new file mode 100644 index 000000000..3011e83f6 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/kallsyms/reader.go @@ -0,0 +1,89 @@ +package kallsyms + +import ( + "bufio" + "bytes" + "io" +) + +// reader is a line and word-oriented reader built for reading /proc/kallsyms. +// It takes an io.Reader and iterates its contents line by line, then word by +// word. +// +// It's designed to allow partial reading of lines without paying the cost of +// allocating objects that will never be accessed, resulting in less work for +// the garbage collector. +type reader struct { + s *bufio.Scanner + line []byte + word []byte + + err error +} + +func newReader(r io.Reader) *reader { + return &reader{ + s: bufio.NewScanner(r), + } +} + +// Bytes returns the current word as a byte slice. +func (r *reader) Bytes() []byte { + return r.word +} + +// Text returns the output of Bytes as a string. +func (r *reader) Text() string { + return string(r.Bytes()) +} + +// Line advances the reader to the next line in the input. Calling Line resets +// the current word, making [reader.Bytes] and [reader.Text] return empty +// values. Follow this up with a call to [reader.Word]. +// +// Like [bufio.Scanner], [reader.Err] needs to be checked after Line returns +// false to determine if an error occurred during reading. +// +// Returns true if Line can be called again. Returns false if all lines in the +// input have been read. +func (r *reader) Line() bool { + for r.s.Scan() { + line := r.s.Bytes() + if len(line) == 0 { + continue + } + + r.line = line + r.word = nil + + return true + } + if err := r.s.Err(); err != nil { + r.err = err + } + + return false +} + +// Word advances the reader to the next word in the current line. +// +// Returns true if a word is found and Word should be called again. Returns +// false when all words on the line have been read. +func (r *reader) Word() bool { + line := bytes.TrimSpace(r.line) + + if len(line) == 0 { + return false + } + + var found bool + r.word, r.line, found = bytes.Cut(line, []byte{' '}) + if !found { + r.word, r.line, _ = bytes.Cut(line, []byte{'\t'}) + } + return true +} + +func (r *reader) Err() error { + return r.err +} diff --git a/vendor/github.com/cilium/ebpf/internal/kconfig/kconfig.go b/vendor/github.com/cilium/ebpf/internal/kconfig/kconfig.go new file mode 100644 index 000000000..29c62b626 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/kconfig/kconfig.go @@ -0,0 +1,274 @@ +// Package kconfig implements a parser for the format of Linux's .config file. +package kconfig + +import ( + "bufio" + "bytes" + "compress/gzip" + "fmt" + "io" + "math" + "strconv" + "strings" + + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal" +) + +// Parse parses the kconfig file for which a reader is given. +// All the CONFIG_* which are in filter and which are set set will be +// put in the returned map as key with their corresponding value as map value. +// If filter is nil, no filtering will occur. +// If the kconfig file is not valid, error will be returned. +func Parse(source io.ReaderAt, filter map[string]struct{}) (map[string]string, error) { + var r io.Reader + zr, err := gzip.NewReader(io.NewSectionReader(source, 0, math.MaxInt64)) + if err != nil { + r = io.NewSectionReader(source, 0, math.MaxInt64) + } else { + // Source is gzip compressed, transparently decompress. + r = zr + } + + ret := make(map[string]string, len(filter)) + + s := bufio.NewScanner(r) + + for s.Scan() { + line := s.Bytes() + err = processKconfigLine(line, ret, filter) + if err != nil { + return nil, fmt.Errorf("cannot parse line: %w", err) + } + + if filter != nil && len(ret) == len(filter) { + break + } + } + + if err := s.Err(); err != nil { + return nil, fmt.Errorf("cannot parse: %w", err) + } + + if zr != nil { + return ret, zr.Close() + } + + return ret, nil +} + +// Golang translation of libbpf bpf_object__process_kconfig_line(): +// https://github.com/libbpf/libbpf/blob/fbd60dbff51c870f5e80a17c4f2fd639eb80af90/src/libbpf.c#L1874 +// It does the same checks but does not put the data inside the BPF map. +func processKconfigLine(line []byte, m map[string]string, filter map[string]struct{}) error { + // Ignore empty lines and "# CONFIG_* is not set". + if !bytes.HasPrefix(line, []byte("CONFIG_")) { + return nil + } + + key, value, found := bytes.Cut(line, []byte{'='}) + if !found { + return fmt.Errorf("line %q does not contain separator '='", line) + } + + if len(value) == 0 { + return fmt.Errorf("line %q has no value", line) + } + + if filter != nil { + // NB: map[string(key)] gets special optimisation help from the compiler + // and doesn't allocate. Don't turn this into a variable. + _, ok := filter[string(key)] + if !ok { + return nil + } + } + + // This can seem odd, but libbpf only sets the value the first time the key is + // met: + // https://github.com/torvalds/linux/blob/0d85b27b0cc6/tools/lib/bpf/libbpf.c#L1906-L1908 + _, ok := m[string(key)] + if !ok { + m[string(key)] = string(value) + } + + return nil +} + +// PutValue translates the value given as parameter depending on the BTF +// type, the translated value is then written to the byte array. +func PutValue(data []byte, typ btf.Type, value string) error { + typ = btf.UnderlyingType(typ) + + switch value { + case "y", "n", "m": + return putValueTri(data, typ, value) + } + + if strings.HasPrefix(value, `"`) { + return putValueString(data, typ, value) + } + + return putValueNumber(data, typ, value) +} + +// Golang translation of libbpf_tristate enum: +// https://github.com/libbpf/libbpf/blob/fbd60dbff51c870f5e80a17c4f2fd639eb80af90/src/bpf_helpers.h#L169 +type triState int + +const ( + TriNo triState = 0 + TriYes triState = 1 + TriModule triState = 2 +) + +func putValueTri(data []byte, typ btf.Type, value string) error { + switch v := typ.(type) { + case *btf.Int: + if v.Encoding != btf.Bool { + return fmt.Errorf("cannot add tri value, expected btf.Bool, got: %v", v.Encoding) + } + + if v.Size != 1 { + return fmt.Errorf("cannot add tri value, expected size of 1 byte, got: %d", v.Size) + } + + switch value { + case "y": + data[0] = 1 + case "n": + data[0] = 0 + default: + return fmt.Errorf("cannot use %q for btf.Bool", value) + } + case *btf.Enum: + if v.Name != "libbpf_tristate" { + return fmt.Errorf("cannot use enum %q, only libbpf_tristate is supported", v.Name) + } + + if len(data) != 4 { + return fmt.Errorf("expected enum value to occupy 4 bytes in datasec, got: %d", len(data)) + } + + var tri triState + switch value { + case "y": + tri = TriYes + case "m": + tri = TriModule + case "n": + tri = TriNo + default: + return fmt.Errorf("value %q is not supported for libbpf_tristate", value) + } + + internal.NativeEndian.PutUint32(data, uint32(tri)) + default: + return fmt.Errorf("cannot add number value, expected btf.Int or btf.Enum, got: %T", v) + } + + return nil +} + +func putValueString(data []byte, typ btf.Type, value string) error { + array, ok := typ.(*btf.Array) + if !ok { + return fmt.Errorf("cannot add string value, expected btf.Array, got %T", array) + } + + contentType, ok := btf.UnderlyingType(array.Type).(*btf.Int) + if !ok { + return fmt.Errorf("cannot add string value, expected array of btf.Int, got %T", contentType) + } + + // Any Int, which is not bool, of one byte could be used to store char: + // https://github.com/torvalds/linux/blob/1a5304fecee5/tools/lib/bpf/libbpf.c#L3637-L3638 + if contentType.Size != 1 && contentType.Encoding != btf.Bool { + return fmt.Errorf("cannot add string value, expected array of btf.Int of size 1, got array of btf.Int of size: %v", contentType.Size) + } + + if !strings.HasPrefix(value, `"`) || !strings.HasSuffix(value, `"`) { + return fmt.Errorf(`value %q must start and finish with '"'`, value) + } + + str := strings.Trim(value, `"`) + + // We need to trim string if the bpf array is smaller. + if uint32(len(str)) >= array.Nelems { + str = str[:array.Nelems] + } + + // Write the string content to .kconfig. + copy(data, str) + + return nil +} + +func putValueNumber(data []byte, typ btf.Type, value string) error { + integer, ok := typ.(*btf.Int) + if !ok { + return fmt.Errorf("cannot add number value, expected *btf.Int, got: %T", integer) + } + + size := integer.Size + sizeInBits := size * 8 + + var n uint64 + var err error + if integer.Encoding == btf.Signed { + parsed, e := strconv.ParseInt(value, 0, int(sizeInBits)) + + n = uint64(parsed) + err = e + } else { + parsed, e := strconv.ParseUint(value, 0, int(sizeInBits)) + + n = uint64(parsed) + err = e + } + + if err != nil { + return fmt.Errorf("cannot parse value: %w", err) + } + + return PutInteger(data, integer, n) +} + +// PutInteger writes n into data. +// +// integer determines how much is written into data and what the valid values +// are. +func PutInteger(data []byte, integer *btf.Int, n uint64) error { + // This function should match set_kcfg_value_num in libbpf. + if integer.Encoding == btf.Bool && n > 1 { + return fmt.Errorf("invalid boolean value: %d", n) + } + + if len(data) < int(integer.Size) { + return fmt.Errorf("can't fit an integer of size %d into a byte slice of length %d", integer.Size, len(data)) + } + + switch integer.Size { + case 1: + if integer.Encoding == btf.Signed && (int64(n) > math.MaxInt8 || int64(n) < math.MinInt8) { + return fmt.Errorf("can't represent %d as a signed integer of size %d", int64(n), integer.Size) + } + data[0] = byte(n) + case 2: + if integer.Encoding == btf.Signed && (int64(n) > math.MaxInt16 || int64(n) < math.MinInt16) { + return fmt.Errorf("can't represent %d as a signed integer of size %d", int64(n), integer.Size) + } + internal.NativeEndian.PutUint16(data, uint16(n)) + case 4: + if integer.Encoding == btf.Signed && (int64(n) > math.MaxInt32 || int64(n) < math.MinInt32) { + return fmt.Errorf("can't represent %d as a signed integer of size %d", int64(n), integer.Size) + } + internal.NativeEndian.PutUint32(data, uint32(n)) + case 8: + internal.NativeEndian.PutUint64(data, uint64(n)) + default: + return fmt.Errorf("size (%d) is not valid, expected: 1, 2, 4 or 8", integer.Size) + } + + return nil +} diff --git a/vendor/github.com/cilium/ebpf/internal/linux/auxv.go b/vendor/github.com/cilium/ebpf/internal/linux/auxv.go new file mode 100644 index 000000000..a864d6b4a --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/linux/auxv.go @@ -0,0 +1,63 @@ +package linux + +import ( + "fmt" + "io" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/platform" + "github.com/cilium/ebpf/internal/unix" +) + +type auxvPairReader interface { + Close() error + ReadAuxvPair() (uint64, uint64, error) +} + +// See https://elixir.bootlin.com/linux/v6.5.5/source/include/uapi/linux/auxvec.h +const ( + _AT_NULL = 0 // End of vector + _AT_SYSINFO_EHDR = 33 // Offset to vDSO blob in process image +) + +type auxvRuntimeReader struct { + data [][2]uintptr + index int +} + +func (r *auxvRuntimeReader) Close() error { + return nil +} + +func (r *auxvRuntimeReader) ReadAuxvPair() (uint64, uint64, error) { + if r.index >= len(r.data)+2 { + return 0, 0, io.EOF + } + + // we manually add the (_AT_NULL, _AT_NULL) pair at the end + // that is not provided by the go runtime + var tag, value uintptr + if r.index < len(r.data) { + tag, value = r.data[r.index][0], r.data[r.index][1] + } else { + tag, value = _AT_NULL, _AT_NULL + } + r.index += 1 + return uint64(tag), uint64(value), nil +} + +func newAuxvRuntimeReader() (auxvPairReader, error) { + if !platform.IsLinux { + return nil, fmt.Errorf("read auxv from runtime: %w", internal.ErrNotSupportedOnOS) + } + + data, err := unix.Auxv() + if err != nil { + return nil, fmt.Errorf("read auxv from runtime: %w", err) + } + + return &auxvRuntimeReader{ + data: data, + index: 0, + }, nil +} diff --git a/vendor/github.com/cilium/ebpf/internal/linux/cpu.go b/vendor/github.com/cilium/ebpf/internal/linux/cpu.go new file mode 100644 index 000000000..bd55ac915 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/linux/cpu.go @@ -0,0 +1,45 @@ +package linux + +import ( + "fmt" + "os" + "strings" +) + +func ParseCPUsFromFile(path string) (int, error) { + spec, err := os.ReadFile(path) + if err != nil { + return 0, err + } + + n, err := parseCPUs(string(spec)) + if err != nil { + return 0, fmt.Errorf("can't parse %s: %v", path, err) + } + + return n, nil +} + +// parseCPUs parses the number of cpus from a string produced +// by bitmap_list_string() in the Linux kernel. +// Multiple ranges are rejected, since they can't be unified +// into a single number. +// This is the format of /sys/devices/system/cpu/possible, it +// is not suitable for /sys/devices/system/cpu/online, etc. +func parseCPUs(spec string) (int, error) { + if strings.Trim(spec, "\n") == "0" { + return 1, nil + } + + var low, high int + n, err := fmt.Sscanf(spec, "%d-%d\n", &low, &high) + if n != 2 || err != nil { + return 0, fmt.Errorf("invalid format: %s", spec) + } + if low != 0 { + return 0, fmt.Errorf("CPU spec doesn't start at zero: %s", spec) + } + + // cpus is 0 indexed + return high + 1, nil +} diff --git a/vendor/github.com/cilium/ebpf/internal/linux/doc.go b/vendor/github.com/cilium/ebpf/internal/linux/doc.go new file mode 100644 index 000000000..064e75437 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/linux/doc.go @@ -0,0 +1,2 @@ +// Package linux contains OS specific wrappers around package unix. +package linux diff --git a/vendor/github.com/cilium/ebpf/internal/linux/kconfig.go b/vendor/github.com/cilium/ebpf/internal/linux/kconfig.go new file mode 100644 index 000000000..1488ecb35 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/linux/kconfig.go @@ -0,0 +1,31 @@ +package linux + +import ( + "fmt" + "os" +) + +// FindKConfig searches for a kconfig file on the host. +// +// It first reads from /boot/config- of the current running kernel and tries +// /proc/config.gz if nothing was found in /boot. +// If none of the file provide a kconfig, it returns an error. +func FindKConfig() (*os.File, error) { + kernelRelease, err := KernelRelease() + if err != nil { + return nil, fmt.Errorf("cannot get kernel release: %w", err) + } + + path := "/boot/config-" + kernelRelease + f, err := os.Open(path) + if err == nil { + return f, nil + } + + f, err = os.Open("/proc/config.gz") + if err == nil { + return f, nil + } + + return nil, fmt.Errorf("neither %s nor /proc/config.gz provide a kconfig", path) +} diff --git a/vendor/github.com/cilium/ebpf/internal/linux/platform.go b/vendor/github.com/cilium/ebpf/internal/linux/platform.go new file mode 100644 index 000000000..39bdcc51f --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/linux/platform.go @@ -0,0 +1,43 @@ +package linux + +import ( + "runtime" +) + +// PlatformPrefix returns the platform-dependent syscall wrapper prefix used by +// the linux kernel. +// +// Based on https://github.com/golang/go/blob/master/src/go/build/syslist.go +// and https://github.com/libbpf/libbpf/blob/master/src/libbpf.c#L10047 +func PlatformPrefix() string { + switch runtime.GOARCH { + case "386": + return "__ia32_" + case "amd64", "amd64p32": + return "__x64_" + + case "arm", "armbe": + return "__arm_" + case "arm64", "arm64be": + return "__arm64_" + + case "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le": + return "__mips_" + + case "s390": + return "__s390_" + case "s390x": + return "__s390x_" + + case "riscv", "riscv64": + return "__riscv_" + + case "ppc": + return "__powerpc_" + case "ppc64", "ppc64le": + return "__powerpc64_" + + default: + return "" + } +} diff --git a/vendor/github.com/cilium/ebpf/internal/linux/statfs.go b/vendor/github.com/cilium/ebpf/internal/linux/statfs.go new file mode 100644 index 000000000..e268c06fa --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/linux/statfs.go @@ -0,0 +1,23 @@ +package linux + +import ( + "unsafe" + + "github.com/cilium/ebpf/internal/unix" +) + +func FSType(path string) (int64, error) { + var statfs unix.Statfs_t + if err := unix.Statfs(path, &statfs); err != nil { + return 0, err + } + + fsType := int64(statfs.Type) + if unsafe.Sizeof(statfs.Type) == 4 { + // We're on a 32 bit arch, where statfs.Type is int32. bpfFSType is a + // negative number when interpreted as int32 so we need to cast via + // uint32 to avoid sign extension. + fsType = int64(uint32(statfs.Type)) + } + return fsType, nil +} diff --git a/vendor/github.com/cilium/ebpf/internal/linux/vdso.go b/vendor/github.com/cilium/ebpf/internal/linux/vdso.go new file mode 100644 index 000000000..1d8d0ef6b --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/linux/vdso.go @@ -0,0 +1,144 @@ +package linux + +import ( + "debug/elf" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "os" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/unix" +) + +var ( + errAuxvNoVDSO = errors.New("no vdso address found in auxv") +) + +// vdsoVersion returns the LINUX_VERSION_CODE embedded in the vDSO library +// linked into the current process image. +func vdsoVersion() (uint32, error) { + av, err := newAuxvRuntimeReader() + if err != nil { + return 0, err + } + + defer av.Close() + + vdsoAddr, err := vdsoMemoryAddress(av) + if err != nil { + return 0, fmt.Errorf("finding vDSO memory address: %w", err) + } + + // Use /proc/self/mem rather than unsafe.Pointer tricks. + mem, err := os.Open("/proc/self/mem") + if err != nil { + return 0, fmt.Errorf("opening mem: %w", err) + } + defer mem.Close() + + // Open ELF at provided memory address, as offset into /proc/self/mem. + c, err := vdsoLinuxVersionCode(io.NewSectionReader(mem, int64(vdsoAddr), math.MaxInt64)) + if err != nil { + return 0, fmt.Errorf("reading linux version code: %w", err) + } + + return c, nil +} + +// vdsoMemoryAddress returns the memory address of the vDSO library +// linked into the current process image. r is an io.Reader into an auxv blob. +func vdsoMemoryAddress(r auxvPairReader) (uintptr, error) { + // Loop through all tag/value pairs in auxv until we find `AT_SYSINFO_EHDR`, + // the address of a page containing the virtual Dynamic Shared Object (vDSO). + for { + tag, value, err := r.ReadAuxvPair() + if err != nil { + return 0, err + } + + switch tag { + case _AT_SYSINFO_EHDR: + if value != 0 { + return uintptr(value), nil + } + return 0, fmt.Errorf("invalid vDSO address in auxv") + // _AT_NULL is always the last tag/val pair in the aux vector + // and can be treated like EOF. + case _AT_NULL: + return 0, errAuxvNoVDSO + } + } +} + +// format described at https://www.man7.org/linux/man-pages/man5/elf.5.html in section 'Notes (Nhdr)' +type elfNoteHeader struct { + NameSize int32 + DescSize int32 + Type int32 +} + +// vdsoLinuxVersionCode returns the LINUX_VERSION_CODE embedded in +// the ELF notes section of the binary provided by the reader. +func vdsoLinuxVersionCode(r io.ReaderAt) (uint32, error) { + hdr, err := internal.NewSafeELFFile(r) + if err != nil { + return 0, fmt.Errorf("reading vDSO ELF: %w", err) + } + + sections := hdr.SectionsByType(elf.SHT_NOTE) + if len(sections) == 0 { + return 0, fmt.Errorf("no note section found in vDSO ELF") + } + + for _, sec := range sections { + sr := sec.Open() + var n elfNoteHeader + + // Read notes until we find one named 'Linux'. + for { + if err := binary.Read(sr, hdr.ByteOrder, &n); err != nil { + if errors.Is(err, io.EOF) { + // We looked at all the notes in this section + break + } + return 0, fmt.Errorf("reading note header: %w", err) + } + + // If a note name is defined, it follows the note header. + var name string + if n.NameSize > 0 { + // Read the note name, aligned to 4 bytes. + buf := make([]byte, internal.Align(n.NameSize, 4)) + if err := binary.Read(sr, hdr.ByteOrder, &buf); err != nil { + return 0, fmt.Errorf("reading note name: %w", err) + } + + // Read nul-terminated string. + name = unix.ByteSliceToString(buf[:n.NameSize]) + } + + // If a note descriptor is defined, it follows the name. + // It is possible for a note to have a descriptor but not a name. + if n.DescSize > 0 { + // LINUX_VERSION_CODE is a uint32 value. + if name == "Linux" && n.DescSize == 4 && n.Type == 0 { + var version uint32 + if err := binary.Read(sr, hdr.ByteOrder, &version); err != nil { + return 0, fmt.Errorf("reading note descriptor: %w", err) + } + return version, nil + } + + // Discard the note descriptor if it exists but we're not interested in it. + if _, err := io.CopyN(io.Discard, sr, int64(internal.Align(n.DescSize, 4))); err != nil { + return 0, err + } + } + } + } + + return 0, fmt.Errorf("no Linux note in ELF") +} diff --git a/vendor/github.com/cilium/ebpf/internal/linux/version.go b/vendor/github.com/cilium/ebpf/internal/linux/version.go new file mode 100644 index 000000000..798dd3fed --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/linux/version.go @@ -0,0 +1,34 @@ +package linux + +import ( + "fmt" + "sync" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/unix" +) + +// KernelVersion returns the version of the currently running kernel. +var KernelVersion = sync.OnceValues(detectKernelVersion) + +// detectKernelVersion returns the version of the running kernel. +func detectKernelVersion() (internal.Version, error) { + vc, err := vdsoVersion() + if err != nil { + return internal.Version{}, err + } + return internal.NewVersionFromCode(vc), nil +} + +// KernelRelease returns the release string of the running kernel. +// Its format depends on the Linux distribution and corresponds to directory +// names in /lib/modules by convention. Some examples are 5.15.17-1-lts and +// 4.19.0-16-amd64. +func KernelRelease() (string, error) { + var uname unix.Utsname + if err := unix.Uname(&uname); err != nil { + return "", fmt.Errorf("uname failed: %w", err) + } + + return unix.ByteSliceToString(uname.Release[:]), nil +} diff --git a/vendor/github.com/cilium/ebpf/internal/math.go b/vendor/github.com/cilium/ebpf/internal/math.go new file mode 100644 index 000000000..10cde6686 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/math.go @@ -0,0 +1,33 @@ +package internal + +// Align returns 'n' updated to 'alignment' boundary. +func Align[I Integer](n, alignment I) I { + return (n + alignment - 1) / alignment * alignment +} + +// IsPow returns true if n is a power of two. +func IsPow[I Integer](n I) bool { + return n != 0 && (n&(n-1)) == 0 +} + +// Between returns the value clamped between a and b. +func Between[I Integer](val, a, b I) I { + lower, upper := a, b + if lower > upper { + upper, lower = a, b + } + + val = min(val, upper) + return max(val, lower) +} + +// Integer represents all possible integer types. +// Remove when x/exp/constraints is moved to the standard library. +type Integer interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr +} + +// List of integer types known by the Go compiler. Used by TestIntegerConstraint +// to warn if a new integer type is introduced. Remove when x/exp/constraints +// is moved to the standard library. +var integers = []string{"int", "int8", "int16", "int32", "int64", "uint", "uint8", "uint16", "uint32", "uint64", "uintptr"} diff --git a/vendor/github.com/cilium/ebpf/internal/output.go b/vendor/github.com/cilium/ebpf/internal/output.go new file mode 100644 index 000000000..bcbb6818d --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/output.go @@ -0,0 +1,102 @@ +package internal + +import ( + "bytes" + "errors" + "go/format" + "go/scanner" + "io" + "reflect" + "strings" + "unicode" +) + +// Identifier turns a C style type or field name into an exportable Go equivalent. +func Identifier(str string) string { + prev := rune(-1) + return strings.Map(func(r rune) rune { + // See https://golang.org/ref/spec#Identifiers + switch { + case unicode.IsLetter(r): + if prev == -1 { + r = unicode.ToUpper(r) + } + + case r == '_': + switch { + // The previous rune was deleted, or we are at the + // beginning of the string. + case prev == -1: + fallthrough + + // The previous rune is a lower case letter or a digit. + case unicode.IsDigit(prev) || (unicode.IsLetter(prev) && unicode.IsLower(prev)): + // delete the current rune, and force the + // next character to be uppercased. + r = -1 + } + + case unicode.IsDigit(r): + + default: + // Delete the current rune. prev is unchanged. + return -1 + } + + prev = r + return r + }, str) +} + +// WriteFormatted outputs a formatted src into out. +// +// If formatting fails it returns an informative error message. +func WriteFormatted(src []byte, out io.Writer) error { + formatted, err := format.Source(src) + if err == nil { + _, err = out.Write(formatted) + return err + } + + var el scanner.ErrorList + if !errors.As(err, &el) { + return err + } + + var nel scanner.ErrorList + for _, err := range el { + if !err.Pos.IsValid() { + nel = append(nel, err) + continue + } + + buf := src[err.Pos.Offset:] + nl := bytes.IndexRune(buf, '\n') + if nl == -1 { + nel = append(nel, err) + continue + } + + err.Msg += ": " + string(buf[:nl]) + nel = append(nel, err) + } + + return nel +} + +// GoTypeName is like %T, but elides the package name. +// +// Pointers to a type are peeled off. +func GoTypeName(t any) string { + rT := reflect.TypeOf(t) + for rT.Kind() == reflect.Pointer { + rT = rT.Elem() + } + + name := rT.Name() + if pkgPath := rT.PkgPath(); pkgPath != "" { + name = strings.ReplaceAll(name, pkgPath+".", "") + } + + return name +} diff --git a/vendor/github.com/cilium/ebpf/internal/platform/constants.go b/vendor/github.com/cilium/ebpf/internal/platform/constants.go new file mode 100644 index 000000000..b57ae1e59 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/platform/constants.go @@ -0,0 +1,62 @@ +package platform + +import "fmt" + +// Values used to tag platform specific constants. +// +// The value for Linux is zero so that existing constants do not change. +const ( + LinuxTag = uint32(iota) << platformShift + WindowsTag +) + +const ( + platformMax = 1<<3 - 1 // most not exceed 3 bits to avoid setting the high bit + platformShift = 28 + platformMask = platformMax << platformShift +) + +func tagForPlatform(platform string) (uint32, error) { + switch platform { + case Linux: + return LinuxTag, nil + case Windows: + return WindowsTag, nil + default: + return 0, fmt.Errorf("unrecognized platform: %s", platform) + } +} + +func platformForConstant(c uint32) string { + tag := uint32(c & platformMask) + switch tag { + case LinuxTag: + return Linux + case WindowsTag: + return Windows + default: + return "" + } +} + +// Encode a platform and a value into a tagged constant. +// +// Returns an error if platform is unknown or c is out of bounds. +func EncodeConstant[T ~uint32](platform string, c uint32) (T, error) { + if c>>platformShift > 0 { + return 0, fmt.Errorf("invalid constant 0x%x", c) + } + + tag, err := tagForPlatform(platform) + if err != nil { + return 0, err + } + + return T(tag | c), nil +} + +// Decode a platform and a value from a tagged constant. +func DecodeConstant[T ~uint32](c T) (string, uint32) { + v := uint32(c) & ^uint32(platformMask) + return platformForConstant(uint32(c)), v +} diff --git a/vendor/github.com/cilium/ebpf/internal/platform/platform.go b/vendor/github.com/cilium/ebpf/internal/platform/platform.go new file mode 100644 index 000000000..1c5bad396 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/platform/platform.go @@ -0,0 +1,42 @@ +package platform + +import ( + "errors" + "runtime" + "strings" +) + +const ( + Linux = "linux" + Windows = "windows" +) + +const ( + IsLinux = runtime.GOOS == "linux" + IsWindows = runtime.GOOS == "windows" +) + +// SelectVersion extracts the platform-appropriate version from a list of strings like +// `linux:6.1` or `windows:0.20.0`. +// +// Returns an empty string and nil if no version matched or an error if no strings were passed. +func SelectVersion(versions []string) (string, error) { + const prefix = runtime.GOOS + ":" + + if len(versions) == 0 { + return "", errors.New("no versions specified") + } + + for _, version := range versions { + if after, ok := strings.CutPrefix(version, prefix); ok { + return after, nil + } + + if IsLinux && !strings.ContainsRune(version, ':') { + // Allow version numbers without a GOOS prefix on Linux. + return version, nil + } + } + + return "", nil +} diff --git a/vendor/github.com/cilium/ebpf/internal/platform/platform_linux.go b/vendor/github.com/cilium/ebpf/internal/platform/platform_linux.go new file mode 100644 index 000000000..f0aa240dc --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/platform/platform_linux.go @@ -0,0 +1,3 @@ +package platform + +const Native = Linux diff --git a/vendor/github.com/cilium/ebpf/internal/platform/platform_other.go b/vendor/github.com/cilium/ebpf/internal/platform/platform_other.go new file mode 100644 index 000000000..cd33b3f68 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/platform/platform_other.go @@ -0,0 +1,5 @@ +//go:build !linux && !windows + +package platform + +const Native = "" diff --git a/vendor/github.com/cilium/ebpf/internal/platform/platform_windows.go b/vendor/github.com/cilium/ebpf/internal/platform/platform_windows.go new file mode 100644 index 000000000..26b4a8ecb --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/platform/platform_windows.go @@ -0,0 +1,3 @@ +package platform + +const Native = Windows diff --git a/vendor/github.com/cilium/ebpf/internal/prog.go b/vendor/github.com/cilium/ebpf/internal/prog.go new file mode 100644 index 000000000..d629145b6 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/prog.go @@ -0,0 +1,11 @@ +package internal + +// EmptyBPFContext is the smallest-possible BPF input context to be used for +// invoking `Program.{Run,Benchmark,Test}`. +// +// Programs require a context input buffer of at least 15 bytes. Looking in +// net/bpf/test_run.c, bpf_test_init() requires that the input is at least +// ETH_HLEN (14) bytes. As of Linux commit fd18942 ("bpf: Don't redirect packets +// with invalid pkt_len"), it also requires the skb to be non-empty after +// removing the Layer 2 header. +var EmptyBPFContext = make([]byte, 15) diff --git a/vendor/github.com/cilium/ebpf/internal/sys/doc.go b/vendor/github.com/cilium/ebpf/internal/sys/doc.go new file mode 100644 index 000000000..75d7e4013 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/doc.go @@ -0,0 +1,6 @@ +// Package sys contains bindings for the BPF syscall. +package sys + +// Regenerate types.go by invoking go generate in the current directory. + +//go:generate go tool gentypes ../../btf/testdata/vmlinux.btf.gz diff --git a/vendor/github.com/cilium/ebpf/internal/sys/fd.go b/vendor/github.com/cilium/ebpf/internal/sys/fd.go new file mode 100644 index 000000000..f12d11c20 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/fd.go @@ -0,0 +1,59 @@ +package sys + +import ( + "math" + "runtime" + "strconv" + + "github.com/cilium/ebpf/internal/testutils/testmain" + "github.com/cilium/ebpf/internal/unix" +) + +var ErrClosedFd = unix.EBADF + +// A value for an invalid fd. +// +// Luckily this is consistent across Linux and Windows. +// +// See https://github.com/microsoft/ebpf-for-windows/blob/54632eb360c560ebef2f173be1a4a4625d540744/include/ebpf_api.h#L25 +const invalidFd = -1 + +func newFD(value int) *FD { + testmain.TraceFD(value, 1) + + fd := &FD{raw: value} + fd.cleanup = runtime.AddCleanup(fd, func(raw int) { + testmain.LeakFD(raw) + _ = unix.Close(raw) + }, fd.raw) + return fd +} + +func (fd *FD) String() string { + return strconv.FormatInt(int64(fd.raw), 10) +} + +func (fd *FD) Int() int { + return int(fd.raw) +} + +func (fd *FD) Uint() uint32 { + if fd.raw == invalidFd { + // Best effort: this is the number most likely to be an invalid file + // descriptor. It is equal to -1 (on two's complement arches). + return math.MaxUint32 + } + return uint32(fd.raw) +} + +// Disown destroys the FD and returns its raw file descriptor without closing +// it. After this call, the underlying fd is no longer tied to the FD's +// lifecycle. +func (fd *FD) Disown() int { + value := fd.raw + testmain.ForgetFD(value) + fd.raw = invalidFd + + fd.cleanup.Stop() + return value +} diff --git a/vendor/github.com/cilium/ebpf/internal/sys/fd_other.go b/vendor/github.com/cilium/ebpf/internal/sys/fd_other.go new file mode 100644 index 000000000..2a6423a59 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/fd_other.go @@ -0,0 +1,72 @@ +//go:build !windows + +package sys + +import ( + "fmt" + "os" + "runtime" + + "github.com/cilium/ebpf/internal/unix" +) + +type FD struct { + raw int + cleanup runtime.Cleanup +} + +// NewFD wraps a raw fd with a finalizer. +// +// You must not use the raw fd after calling this function, since the underlying +// file descriptor number may change. This is because the BPF UAPI assumes that +// zero is not a valid fd value. +func NewFD(value int) (*FD, error) { + if value < 0 { + return nil, fmt.Errorf("invalid fd %d", value) + } + + fd := newFD(value) + if value != 0 { + return fd, nil + } + + dup, err := fd.Dup() + _ = fd.Close() + return dup, err +} + +func (fd *FD) Close() error { + if fd.raw < 0 { + return nil + } + + return unix.Close(fd.Disown()) +} + +func (fd *FD) Dup() (*FD, error) { + if fd.raw < 0 { + return nil, ErrClosedFd + } + + // Always require the fd to be larger than zero: the BPF API treats the value + // as "no argument provided". + dup, err := unix.FcntlInt(uintptr(fd.raw), unix.F_DUPFD_CLOEXEC, 1) + if err != nil { + return nil, fmt.Errorf("can't dup fd: %v", err) + } + + return newFD(dup), nil +} + +// File takes ownership of FD and turns it into an [*os.File]. +// +// You must not use the FD after the call returns. +// +// Returns [ErrClosedFd] if the fd is not valid. +func (fd *FD) File(name string) (*os.File, error) { + if fd.raw == invalidFd { + return nil, ErrClosedFd + } + + return os.NewFile(uintptr(fd.Disown()), name), nil +} diff --git a/vendor/github.com/cilium/ebpf/internal/sys/fd_windows.go b/vendor/github.com/cilium/ebpf/internal/sys/fd_windows.go new file mode 100644 index 000000000..1291c763f --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/fd_windows.go @@ -0,0 +1,60 @@ +package sys + +import ( + "fmt" + "os" + "runtime" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/efw" +) + +// FD wraps a handle which is managed by the eBPF for Windows runtime. +// +// It is not equivalent to a real file descriptor or handle. +type FD struct { + raw int + cleanup runtime.Cleanup +} + +// NewFD wraps a raw fd with a finalizer. +// +// You must not use the raw fd after calling this function. +func NewFD(value int) (*FD, error) { + if value == invalidFd { + return nil, fmt.Errorf("invalid fd %d", value) + } + + if value == 0 { + // The efW runtime never uses zero fd it seems. No need to dup it. + return nil, fmt.Errorf("invalid zero fd") + } + + return newFD(value), nil +} + +func (fd *FD) Close() error { + if fd.raw == invalidFd { + return nil + } + + return efw.EbpfCloseFd(fd.Disown()) +} + +func (fd *FD) Dup() (*FD, error) { + if fd.raw == invalidFd { + return nil, ErrClosedFd + } + + dup, err := efw.EbpfDuplicateFd(fd.raw) + if err != nil { + return nil, err + } + + return NewFD(int(dup)) +} + +// File is not implemented. +func (fd *FD) File(name string) (*os.File, error) { + return nil, fmt.Errorf("file from fd: %w", internal.ErrNotSupportedOnOS) +} diff --git a/vendor/github.com/cilium/ebpf/internal/sys/pinning_other.go b/vendor/github.com/cilium/ebpf/internal/sys/pinning_other.go new file mode 100644 index 000000000..96ad43abd --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/pinning_other.go @@ -0,0 +1,67 @@ +//go:build !windows + +package sys + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "runtime" + + "github.com/cilium/ebpf/internal/linux" + "github.com/cilium/ebpf/internal/unix" +) + +func Pin(currentPath, newPath string, fd *FD) error { + if newPath == "" { + return errors.New("given pinning path cannot be empty") + } + if currentPath == newPath { + return nil + } + + fsType, err := linux.FSType(filepath.Dir(newPath)) + if err != nil { + return err + } + if fsType != unix.BPF_FS_MAGIC { + return fmt.Errorf("%s is not on a bpf filesystem", newPath) + } + + defer runtime.KeepAlive(fd) + + if currentPath == "" { + return ObjPin(&ObjPinAttr{ + Pathname: NewStringPointer(newPath), + BpfFd: fd.Uint(), + }) + } + + // Renameat2 is used instead of os.Rename to disallow the new path replacing + // an existing path. + err = unix.Renameat2(unix.AT_FDCWD, currentPath, unix.AT_FDCWD, newPath, unix.RENAME_NOREPLACE) + if err == nil { + // Object is now moved to the new pinning path. + return nil + } + if !os.IsNotExist(err) { + return fmt.Errorf("unable to move pinned object to new path %v: %w", newPath, err) + } + // Internal state not in sync with the file system so let's fix it. + return ObjPin(&ObjPinAttr{ + Pathname: NewStringPointer(newPath), + BpfFd: fd.Uint(), + }) +} + +func Unpin(pinnedPath string) error { + if pinnedPath == "" { + return nil + } + err := os.Remove(pinnedPath) + if err == nil || os.IsNotExist(err) { + return nil + } + return err +} diff --git a/vendor/github.com/cilium/ebpf/internal/sys/pinning_windows.go b/vendor/github.com/cilium/ebpf/internal/sys/pinning_windows.go new file mode 100644 index 000000000..c8ab68550 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/pinning_windows.go @@ -0,0 +1,44 @@ +package sys + +import ( + "errors" + "runtime" + + "github.com/cilium/ebpf/internal/efw" +) + +func Pin(currentPath, newPath string, fd *FD) error { + defer runtime.KeepAlive(fd) + + if newPath == "" { + return errors.New("given pinning path cannot be empty") + } + if currentPath == newPath { + return nil + } + + if currentPath == "" { + return ObjPin(&ObjPinAttr{ + Pathname: NewStringPointer(newPath), + BpfFd: fd.Uint(), + }) + } + + return ObjPin(&ObjPinAttr{ + Pathname: NewStringPointer(newPath), + BpfFd: fd.Uint(), + }) +} + +func Unpin(pinnedPath string) error { + if pinnedPath == "" { + return nil + } + + err := efw.EbpfObjectUnpin(pinnedPath) + if err != nil && !errors.Is(err, efw.EBPF_KEY_NOT_FOUND) { + return err + } + + return nil +} diff --git a/vendor/github.com/cilium/ebpf/internal/sys/ptr.go b/vendor/github.com/cilium/ebpf/internal/sys/ptr.go new file mode 100644 index 000000000..aa6c2e91a --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/ptr.go @@ -0,0 +1,74 @@ +package sys + +import ( + "unsafe" + + "github.com/cilium/ebpf/internal/unix" +) + +// UnsafePointer creates a 64-bit pointer from an unsafe Pointer. +func UnsafePointer(ptr unsafe.Pointer) Pointer { + return Pointer{ptr: ptr} +} + +// UnsafeSlicePointer creates an untyped [Pointer] from a slice. +func UnsafeSlicePointer[T comparable](buf []T) Pointer { + if len(buf) == 0 { + return Pointer{} + } + + return Pointer{ptr: unsafe.Pointer(unsafe.SliceData(buf))} +} + +// TypedPointer points to typed memory. +// +// It is like a *T except that it accounts for the BPF syscall interface. +type TypedPointer[T any] struct { + _ [0]*T // prevent TypedPointer[a] to be convertible to TypedPointer[b] + ptr Pointer +} + +func (p TypedPointer[T]) IsNil() bool { + return p.ptr.ptr == nil +} + +// SlicePointer creates a [TypedPointer] from a slice. +func SlicePointer[T comparable](s []T) TypedPointer[T] { + return TypedPointer[T]{ptr: UnsafeSlicePointer(s)} +} + +// StringPointer points to a null-terminated string. +type StringPointer struct { + _ [0]string + ptr Pointer +} + +// NewStringPointer creates a [StringPointer] from a string. +func NewStringPointer(str string) StringPointer { + slice, err := unix.ByteSliceFromString(str) + if err != nil { + return StringPointer{} + } + + return StringPointer{ptr: Pointer{ptr: unsafe.Pointer(&slice[0])}} +} + +// StringSlicePointer points to a slice of [StringPointer]. +type StringSlicePointer struct { + _ [0][]string + ptr Pointer +} + +// NewStringSlicePointer allocates an array of Pointers to each string in the +// given slice of strings and returns a 64-bit pointer to the start of the +// resulting array. +// +// Use this function to pass arrays of strings as syscall arguments. +func NewStringSlicePointer(strings []string) StringSlicePointer { + sp := make([]StringPointer, 0, len(strings)) + for _, s := range strings { + sp = append(sp, NewStringPointer(s)) + } + + return StringSlicePointer{ptr: Pointer{ptr: unsafe.Pointer(&sp[0])}} +} diff --git a/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go b/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go new file mode 100644 index 000000000..0b0feeb7a --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go @@ -0,0 +1,16 @@ +//go:build armbe || mips || mips64p32 + +package sys + +import ( + "structs" + "unsafe" +) + +// Pointer wraps an unsafe.Pointer to be 64bit to +// conform to the syscall specification. +type Pointer struct { + structs.HostLayout + pad uint32 + ptr unsafe.Pointer +} diff --git a/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go b/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go new file mode 100644 index 000000000..f9007fe84 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go @@ -0,0 +1,16 @@ +//go:build 386 || amd64p32 || arm || mipsle || mips64p32le + +package sys + +import ( + "structs" + "unsafe" +) + +// Pointer wraps an unsafe.Pointer to be 64bit to +// conform to the syscall specification. +type Pointer struct { + structs.HostLayout + ptr unsafe.Pointer + pad uint32 +} diff --git a/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go b/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go new file mode 100644 index 000000000..05196cca7 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go @@ -0,0 +1,15 @@ +//go:build !386 && !amd64p32 && !arm && !mipsle && !mips64p32le && !armbe && !mips && !mips64p32 + +package sys + +import ( + "structs" + "unsafe" +) + +// Pointer wraps an unsafe.Pointer to be 64bit to +// conform to the syscall specification. +type Pointer struct { + structs.HostLayout + ptr unsafe.Pointer +} diff --git a/vendor/github.com/cilium/ebpf/internal/sys/signals.go b/vendor/github.com/cilium/ebpf/internal/sys/signals.go new file mode 100644 index 000000000..e75e96052 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/signals.go @@ -0,0 +1,85 @@ +//go:build !windows + +package sys + +import ( + "fmt" + "runtime" + "unsafe" + + "github.com/cilium/ebpf/internal/unix" +) + +// A sigset containing only SIGPROF. +var profSet unix.Sigset_t + +func init() { + // See sigsetAdd for details on the implementation. Open coded here so + // that the compiler will check the constant calculations for us. + profSet.Val[sigprofBit/wordBits] |= 1 << (sigprofBit % wordBits) +} + +// maskProfilerSignal locks the calling goroutine to its underlying OS thread +// and adds SIGPROF to the thread's signal mask. This prevents pprof from +// interrupting expensive syscalls like e.g. BPF_PROG_LOAD. +// +// The caller must defer unmaskProfilerSignal() to reverse the operation. +func maskProfilerSignal() { + runtime.LockOSThread() + + if err := unix.PthreadSigmask(unix.SIG_BLOCK, &profSet, nil); err != nil { + runtime.UnlockOSThread() + panic(fmt.Errorf("masking profiler signal: %w", err)) + } +} + +// unmaskProfilerSignal removes SIGPROF from the underlying thread's signal +// mask, allowing it to be interrupted for profiling once again. +// +// It also unlocks the current goroutine from its underlying OS thread. +func unmaskProfilerSignal() { + defer runtime.UnlockOSThread() + + if err := unix.PthreadSigmask(unix.SIG_UNBLOCK, &profSet, nil); err != nil { + panic(fmt.Errorf("unmasking profiler signal: %w", err)) + } +} + +const ( + // Signal is the nth bit in the bitfield. + sigprofBit = int(unix.SIGPROF - 1) + // The number of bits in one Sigset_t word. + wordBits = int(unsafe.Sizeof(unix.Sigset_t{}.Val[0])) * 8 +) + +// sigsetAdd adds signal to set. +// +// Note: Sigset_t.Val's value type is uint32 or uint64 depending on the arch. +// This function must be able to deal with both and so must avoid any direct +// references to u32 or u64 types. +func sigsetAdd(set *unix.Sigset_t, signal unix.Signal) error { + if signal < 1 { + return fmt.Errorf("signal %d must be larger than 0", signal) + } + + // For amd64, runtime.sigaddset() performs the following operation: + // set[(signal-1)/32] |= 1 << ((uint32(signal) - 1) & 31) + // + // This trick depends on sigset being two u32's, causing a signal in the + // bottom 31 bits to be written to the low word if bit 32 is low, or the high + // word if bit 32 is high. + + // Signal is the nth bit in the bitfield. + bit := int(signal - 1) + // Word within the sigset the bit needs to be written to. + word := bit / wordBits + + if word >= len(set.Val) { + return fmt.Errorf("signal %d does not fit within unix.Sigset_t", signal) + } + + // Write the signal bit into its corresponding word at the corrected offset. + set.Val[word] |= 1 << (bit % wordBits) + + return nil +} diff --git a/vendor/github.com/cilium/ebpf/internal/sys/syscall.go b/vendor/github.com/cilium/ebpf/internal/sys/syscall.go new file mode 100644 index 000000000..f2fffd26b --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/syscall.go @@ -0,0 +1,191 @@ +package sys + +import ( + "runtime" + "unsafe" + + "github.com/cilium/ebpf/internal/unix" +) + +// ENOTSUPP is a Linux internal error code that has leaked into UAPI. +// +// It is not the same as ENOTSUP or EOPNOTSUPP. +const ENOTSUPP = unix.Errno(524) + +// Info is implemented by all structs that can be passed to the ObjInfo syscall. +// +// MapInfo +// ProgInfo +// LinkInfo +// BtfInfo +type Info interface { + info() (unsafe.Pointer, uint32) +} + +var _ Info = (*MapInfo)(nil) + +func (i *MapInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +var _ Info = (*ProgInfo)(nil) + +func (i *ProgInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +var _ Info = (*LinkInfo)(nil) + +func (i *LinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *TracingLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *CgroupLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *NetNsLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *XDPLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *TcxLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *NetfilterLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *NetkitLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *KprobeMultiLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *KprobeLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +var _ Info = (*BtfInfo)(nil) + +func (i *BtfInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *PerfEventLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +// ObjInfo retrieves information about a BPF Fd. +// +// info may be one of MapInfo, ProgInfo, LinkInfo and BtfInfo. +func ObjInfo(fd *FD, info Info) error { + ptr, len := info.info() + err := ObjGetInfoByFd(&ObjGetInfoByFdAttr{ + BpfFd: fd.Uint(), + InfoLen: len, + Info: UnsafePointer(ptr), + }) + runtime.KeepAlive(fd) + return err +} + +// BPFObjName is a null-terminated string made up of +// 'A-Za-z0-9_' characters. +type ObjName [BPF_OBJ_NAME_LEN]byte + +// NewObjName truncates the result if it is too long. +func NewObjName(name string) ObjName { + var result ObjName + copy(result[:BPF_OBJ_NAME_LEN-1], name) + return result +} + +// LogLevel controls the verbosity of the kernel's eBPF program verifier. +type LogLevel uint32 + +const ( + BPF_LOG_LEVEL1 LogLevel = 1 << iota + BPF_LOG_LEVEL2 + BPF_LOG_STATS +) + +// MapID uniquely identifies a bpf_map. +type MapID uint32 + +// ProgramID uniquely identifies a bpf_map. +type ProgramID uint32 + +// LinkID uniquely identifies a bpf_link. +type LinkID uint32 + +// BTFID uniquely identifies a BTF blob loaded into the kernel. +type BTFID uint32 + +// TypeID identifies a type in a BTF blob. +type TypeID uint32 + +// Flags used by bpf_mprog. +const ( + BPF_F_REPLACE = 1 << (iota + 2) + BPF_F_BEFORE + BPF_F_AFTER + BPF_F_ID + BPF_F_LINK_MPROG = 1 << 13 // aka BPF_F_LINK +) + +// Flags used by BPF_PROG_LOAD. +const ( + BPF_F_SLEEPABLE = 1 << 4 + BPF_F_XDP_HAS_FRAGS = 1 << 5 + BPF_F_XDP_DEV_BOUND_ONLY = 1 << 6 +) + +const BPF_TAG_SIZE = 8 +const BPF_OBJ_NAME_LEN = 16 + +// wrappedErrno wraps [unix.Errno] to prevent direct comparisons with +// syscall.E* or unix.E* constants. +// +// You should never export an error of this type. +type wrappedErrno struct { + unix.Errno +} + +func (we wrappedErrno) Unwrap() error { + return we.Errno +} + +func (we wrappedErrno) Error() string { + if we.Errno == ENOTSUPP { + return "operation not supported" + } + return we.Errno.Error() +} + +type syscallError struct { + error + errno unix.Errno +} + +func Error(err error, errno unix.Errno) error { + return &syscallError{err, errno} +} + +func (se *syscallError) Is(target error) bool { + return target == se.error +} + +func (se *syscallError) Unwrap() error { + return se.errno +} diff --git a/vendor/github.com/cilium/ebpf/internal/sys/syscall_other.go b/vendor/github.com/cilium/ebpf/internal/sys/syscall_other.go new file mode 100644 index 000000000..b99e6e462 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/syscall_other.go @@ -0,0 +1,84 @@ +//go:build !windows + +package sys + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + "unsafe" + + "github.com/cilium/ebpf/internal/unix" +) + +// BPF wraps SYS_BPF. +// +// Any pointers contained in attr must use the Pointer type from this package. +func BPF(cmd Cmd, attr unsafe.Pointer, size uintptr) (uintptr, error) { + // Prevent the Go profiler from repeatedly interrupting the verifier, + // which could otherwise lead to a livelock due to receiving EAGAIN. + if cmd == BPF_PROG_LOAD || cmd == BPF_PROG_RUN { + maskProfilerSignal() + defer unmaskProfilerSignal() + } + + for { + r1, _, errNo := unix.Syscall(unix.SYS_BPF, uintptr(cmd), uintptr(attr), size) + runtime.KeepAlive(attr) + + // As of ~4.20 the verifier can be interrupted by a signal, + // and returns EAGAIN in that case. + if errNo == unix.EAGAIN && cmd == BPF_PROG_LOAD { + continue + } + + var err error + if errNo != 0 { + err = wrappedErrno{errNo} + } + + return r1, err + } +} + +// ObjGetTyped wraps [ObjGet] with a readlink call to extract the type of the +// underlying bpf object. +func ObjGetTyped(attr *ObjGetAttr) (*FD, ObjType, error) { + fd, err := ObjGet(attr) + if err != nil { + return nil, 0, err + } + + typ, err := readType(fd) + if err != nil { + _ = fd.Close() + return nil, 0, fmt.Errorf("reading fd type: %w", err) + } + + return fd, typ, nil +} + +// readType returns the bpf object type of the file descriptor by calling +// readlink(3). Returns an error if the file descriptor does not represent a bpf +// object. +func readType(fd *FD) (ObjType, error) { + s, err := os.Readlink(filepath.Join("/proc/self/fd/", fd.String())) + if err != nil { + return 0, fmt.Errorf("readlink fd %d: %w", fd.Int(), err) + } + + s = strings.TrimPrefix(s, "anon_inode:") + + switch s { + case "bpf-map": + return BPF_TYPE_MAP, nil + case "bpf-prog": + return BPF_TYPE_PROG, nil + case "bpf-link": + return BPF_TYPE_LINK, nil + } + + return 0, fmt.Errorf("unknown type %s of fd %d", s, fd.Int()) +} diff --git a/vendor/github.com/cilium/ebpf/internal/sys/syscall_windows.go b/vendor/github.com/cilium/ebpf/internal/sys/syscall_windows.go new file mode 100644 index 000000000..08f73805c --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/syscall_windows.go @@ -0,0 +1,69 @@ +package sys + +import ( + "fmt" + "syscall" + "unsafe" + + "golang.org/x/sys/windows" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/efw" + "github.com/cilium/ebpf/internal/unix" +) + +// BPF calls the BPF syscall wrapper in ebpfapi.dll. +// +// Any pointers contained in attr must use the Pointer type from this package. +// +// The implementation lives in https://github.com/microsoft/ebpf-for-windows/blob/main/libs/api/bpf_syscall.cpp +func BPF(cmd Cmd, attr unsafe.Pointer, size uintptr) (uintptr, error) { + // On Linux we need to guard against preemption by the profiler here. On + // Windows it seems like a cgocall may not be preempted: + // https://github.com/golang/go/blob/8b51146c698bcfcc2c2b73fa9390db5230f2ce0a/src/runtime/os_windows.go#L1240-L1246 + + addr, err := efw.BPF.Find() + if err != nil { + return 0, err + } + + // Using [LazyProc.Call] forces attr to escape, which isn't the case when using syscall.Syscall directly. + r1, _, lastError := syscall.SyscallN(addr, uintptr(cmd), uintptr(attr), size) + + if ret := int(efw.Int(r1)); ret < 0 { + errNo := unix.Errno(-ret) + if errNo == unix.EINVAL && lastError == windows.ERROR_CALL_NOT_IMPLEMENTED { + return 0, internal.ErrNotSupportedOnOS + } + return 0, wrappedErrno{errNo} + } + + return r1, nil +} + +// ObjGetTyped retrieves an pinned object and its type. +func ObjGetTyped(attr *ObjGetAttr) (*FD, ObjType, error) { + fd, err := ObjGet(attr) + if err != nil { + return nil, 0, err + } + + efwType, err := efw.EbpfObjectGetInfoByFd(fd.Int(), nil, nil) + if err != nil { + _ = fd.Close() + return nil, 0, err + } + + switch efwType { + case efw.EBPF_OBJECT_UNKNOWN: + return fd, BPF_TYPE_UNSPEC, nil + case efw.EBPF_OBJECT_MAP: + return fd, BPF_TYPE_MAP, nil + case efw.EBPF_OBJECT_LINK: + return fd, BPF_TYPE_LINK, nil + case efw.EBPF_OBJECT_PROGRAM: + return fd, BPF_TYPE_PROG, nil + default: + return nil, 0, fmt.Errorf("unrecognized object type %v", efwType) + } +} diff --git a/vendor/github.com/cilium/ebpf/internal/sys/types.go b/vendor/github.com/cilium/ebpf/internal/sys/types.go new file mode 100644 index 000000000..2e6674862 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/types.go @@ -0,0 +1,1662 @@ +// Code generated by internal/cmd/gentypes; DO NOT EDIT. + +package sys + +import ( + "structs" + "unsafe" +) + +const ( + BPF_ADJ_ROOM_ENCAP_L2_MASK = 255 + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 56 + BPF_ANY = 0 + BPF_CSUM_LEVEL_DEC = 2 + BPF_CSUM_LEVEL_INC = 1 + BPF_CSUM_LEVEL_QUERY = 0 + BPF_CSUM_LEVEL_RESET = 3 + BPF_EXIST = 2 + BPF_FIB_LKUP_RET_BLACKHOLE = 1 + BPF_FIB_LKUP_RET_FRAG_NEEDED = 8 + BPF_FIB_LKUP_RET_FWD_DISABLED = 5 + BPF_FIB_LKUP_RET_NOT_FWDED = 4 + BPF_FIB_LKUP_RET_NO_NEIGH = 7 + BPF_FIB_LKUP_RET_NO_SRC_ADDR = 9 + BPF_FIB_LKUP_RET_PROHIBIT = 3 + BPF_FIB_LKUP_RET_SUCCESS = 0 + BPF_FIB_LKUP_RET_UNREACHABLE = 2 + BPF_FIB_LKUP_RET_UNSUPP_LWT = 6 + BPF_FIB_LOOKUP_DIRECT = 1 + BPF_FIB_LOOKUP_MARK = 32 + BPF_FIB_LOOKUP_OUTPUT = 2 + BPF_FIB_LOOKUP_SKIP_NEIGH = 4 + BPF_FIB_LOOKUP_SRC = 16 + BPF_FIB_LOOKUP_TBID = 8 + BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = 1 + BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = 4 + BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = 2 + BPF_F_ADJ_ROOM_DECAP_L3_IPV4 = 128 + BPF_F_ADJ_ROOM_DECAP_L3_IPV6 = 256 + BPF_F_ADJ_ROOM_ENCAP_L2_ETH = 64 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 16 + BPF_F_ADJ_ROOM_FIXED_GSO = 1 + BPF_F_ADJ_ROOM_NO_CSUM_RESET = 32 + BPF_F_BPRM_SECUREEXEC = 1 + BPF_F_BROADCAST = 8 + BPF_F_CLONE = 512 + BPF_F_CTXLEN_MASK = 4503595332403200 + BPF_F_CURRENT_CPU = 4294967295 + BPF_F_CURRENT_NETNS = 18446744073709551615 + BPF_F_DONT_FRAGMENT = 4 + BPF_F_EXCLUDE_INGRESS = 16 + BPF_F_FAST_STACK_CMP = 512 + BPF_F_GET_BRANCH_RECORDS_SIZE = 1 + BPF_F_HDR_FIELD_MASK = 15 + BPF_F_INDEX_MASK = 4294967295 + BPF_F_INGRESS = 1 + BPF_F_INNER_MAP = 4096 + BPF_F_INVALIDATE_HASH = 2 + BPF_F_KPROBE_MULTI_RETURN = 1 + BPF_F_LINK = 8192 + BPF_F_LOCK = 4 + BPF_F_MARK_ENFORCE = 64 + BPF_F_MARK_MANGLED_0 = 32 + BPF_F_MMAPABLE = 1024 + BPF_F_NEIGH = 2 + BPF_F_NEXTHOP = 8 + BPF_F_NO_COMMON_LRU = 2 + BPF_F_NO_PREALLOC = 1 + BPF_F_NO_TUNNEL_KEY = 16 + BPF_F_NO_USER_CONV = 262144 + BPF_F_NUMA_NODE = 4 + BPF_F_PATH_FD = 16384 + BPF_F_PEER = 4 + BPF_F_PRESERVE_ELEMS = 2048 + BPF_F_PSEUDO_HDR = 16 + BPF_F_RDONLY = 8 + BPF_F_RDONLY_PROG = 128 + BPF_F_RECOMPUTE_CSUM = 1 + BPF_F_REUSE_STACKID = 1024 + BPF_F_SEGV_ON_FAULT = 131072 + BPF_F_SEQ_NUMBER = 8 + BPF_F_SKIP_FIELD_MASK = 255 + BPF_F_STACK_BUILD_ID = 32 + BPF_F_SYSCTL_BASE_NAME = 1 + BPF_F_TIMER_ABS = 1 + BPF_F_TIMER_CPU_PIN = 2 + BPF_F_TOKEN_FD = 65536 + BPF_F_TUNINFO_FLAGS = 16 + BPF_F_TUNINFO_IPV6 = 1 + BPF_F_UPROBE_MULTI_RETURN = 1 + BPF_F_USER_BUILD_ID = 2048 + BPF_F_USER_STACK = 256 + BPF_F_VTYPE_BTF_OBJ_FD = 32768 + BPF_F_WRONLY = 16 + BPF_F_WRONLY_PROG = 256 + BPF_F_ZERO_CSUM_TX = 2 + BPF_F_ZERO_SEED = 64 + BPF_LOAD_HDR_OPT_TCP_SYN = 1 + BPF_LOCAL_STORAGE_GET_F_CREATE = 1 + BPF_MAX_LOOPS = 8388608 + BPF_MAX_TRAMP_LINKS = 38 + BPF_NOEXIST = 1 + BPF_RB_AVAIL_DATA = 0 + BPF_RB_CONS_POS = 2 + BPF_RB_FORCE_WAKEUP = 2 + BPF_RB_NO_WAKEUP = 1 + BPF_RB_PROD_POS = 3 + BPF_RB_RING_SIZE = 1 + BPF_REG_0 = 0 + BPF_REG_1 = 1 + BPF_REG_10 = 10 + BPF_REG_2 = 2 + BPF_REG_3 = 3 + BPF_REG_4 = 4 + BPF_REG_5 = 5 + BPF_REG_6 = 6 + BPF_REG_7 = 7 + BPF_REG_8 = 8 + BPF_REG_9 = 9 + BPF_RINGBUF_BUSY_BIT = 2147483648 + BPF_RINGBUF_DISCARD_BIT = 1073741824 + BPF_RINGBUF_HDR_SZ = 8 + BPF_SKB_CLOCK_MONOTONIC = 1 + BPF_SKB_CLOCK_REALTIME = 0 + BPF_SKB_CLOCK_TAI = 2 + BPF_SKB_TSTAMP_DELIVERY_MONO = 1 + BPF_SKB_TSTAMP_UNSPEC = 0 + BPF_SK_LOOKUP_F_NO_REUSEPORT = 2 + BPF_SK_LOOKUP_F_REPLACE = 1 + BPF_SK_STORAGE_GET_F_CREATE = 1 + BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 4 + BPF_SOCK_OPS_ALL_CB_FLAGS = 127 + BPF_SOCK_OPS_BASE_RTT = 7 + BPF_SOCK_OPS_HDR_OPT_LEN_CB = 14 + BPF_SOCK_OPS_NEEDS_ECN = 6 + BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG = 16 + BPF_SOCK_OPS_PARSE_HDR_OPT_CB = 13 + BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG = 32 + BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 5 + BPF_SOCK_OPS_RETRANS_CB = 9 + BPF_SOCK_OPS_RETRANS_CB_FLAG = 2 + BPF_SOCK_OPS_RTO_CB = 8 + BPF_SOCK_OPS_RTO_CB_FLAG = 1 + BPF_SOCK_OPS_RTT_CB = 12 + BPF_SOCK_OPS_RTT_CB_FLAG = 8 + BPF_SOCK_OPS_RWND_INIT = 2 + BPF_SOCK_OPS_STATE_CB = 10 + BPF_SOCK_OPS_STATE_CB_FLAG = 4 + BPF_SOCK_OPS_TCP_CONNECT_CB = 3 + BPF_SOCK_OPS_TCP_LISTEN_CB = 11 + BPF_SOCK_OPS_TIMEOUT_INIT = 1 + BPF_SOCK_OPS_VOID = 0 + BPF_SOCK_OPS_WRITE_HDR_OPT_CB = 15 + BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG = 64 + BPF_TASK_ITER_ALL_PROCS = 0 + BPF_TASK_ITER_ALL_THREADS = 1 + BPF_TASK_ITER_PROC_THREADS = 2 + BPF_TCP_BOUND_INACTIVE = 13 + BPF_TCP_CLOSE = 7 + BPF_TCP_CLOSE_WAIT = 8 + BPF_TCP_CLOSING = 11 + BPF_TCP_ESTABLISHED = 1 + BPF_TCP_FIN_WAIT1 = 4 + BPF_TCP_FIN_WAIT2 = 5 + BPF_TCP_LAST_ACK = 9 + BPF_TCP_LISTEN = 10 + BPF_TCP_MAX_STATES = 14 + BPF_TCP_NEW_SYN_RECV = 12 + BPF_TCP_SYN_RECV = 3 + BPF_TCP_SYN_SENT = 2 + BPF_TCP_TIME_WAIT = 6 + BPF_WRITE_HDR_TCP_CURRENT_MSS = 1 + BPF_WRITE_HDR_TCP_SYNACK_COOKIE = 2 + BPF_XFRM_STATE_OPTS_SZ = 36 +) + +type AdjRoomMode uint32 + +const ( + BPF_ADJ_ROOM_NET AdjRoomMode = 0 + BPF_ADJ_ROOM_MAC AdjRoomMode = 1 +) + +type AttachType uint32 + +const ( + BPF_CGROUP_INET_INGRESS AttachType = 0 + BPF_CGROUP_INET_EGRESS AttachType = 1 + BPF_CGROUP_INET_SOCK_CREATE AttachType = 2 + BPF_CGROUP_SOCK_OPS AttachType = 3 + BPF_SK_SKB_STREAM_PARSER AttachType = 4 + BPF_SK_SKB_STREAM_VERDICT AttachType = 5 + BPF_CGROUP_DEVICE AttachType = 6 + BPF_SK_MSG_VERDICT AttachType = 7 + BPF_CGROUP_INET4_BIND AttachType = 8 + BPF_CGROUP_INET6_BIND AttachType = 9 + BPF_CGROUP_INET4_CONNECT AttachType = 10 + BPF_CGROUP_INET6_CONNECT AttachType = 11 + BPF_CGROUP_INET4_POST_BIND AttachType = 12 + BPF_CGROUP_INET6_POST_BIND AttachType = 13 + BPF_CGROUP_UDP4_SENDMSG AttachType = 14 + BPF_CGROUP_UDP6_SENDMSG AttachType = 15 + BPF_LIRC_MODE2 AttachType = 16 + BPF_FLOW_DISSECTOR AttachType = 17 + BPF_CGROUP_SYSCTL AttachType = 18 + BPF_CGROUP_UDP4_RECVMSG AttachType = 19 + BPF_CGROUP_UDP6_RECVMSG AttachType = 20 + BPF_CGROUP_GETSOCKOPT AttachType = 21 + BPF_CGROUP_SETSOCKOPT AttachType = 22 + BPF_TRACE_RAW_TP AttachType = 23 + BPF_TRACE_FENTRY AttachType = 24 + BPF_TRACE_FEXIT AttachType = 25 + BPF_MODIFY_RETURN AttachType = 26 + BPF_LSM_MAC AttachType = 27 + BPF_TRACE_ITER AttachType = 28 + BPF_CGROUP_INET4_GETPEERNAME AttachType = 29 + BPF_CGROUP_INET6_GETPEERNAME AttachType = 30 + BPF_CGROUP_INET4_GETSOCKNAME AttachType = 31 + BPF_CGROUP_INET6_GETSOCKNAME AttachType = 32 + BPF_XDP_DEVMAP AttachType = 33 + BPF_CGROUP_INET_SOCK_RELEASE AttachType = 34 + BPF_XDP_CPUMAP AttachType = 35 + BPF_SK_LOOKUP AttachType = 36 + BPF_XDP AttachType = 37 + BPF_SK_SKB_VERDICT AttachType = 38 + BPF_SK_REUSEPORT_SELECT AttachType = 39 + BPF_SK_REUSEPORT_SELECT_OR_MIGRATE AttachType = 40 + BPF_PERF_EVENT AttachType = 41 + BPF_TRACE_KPROBE_MULTI AttachType = 42 + BPF_LSM_CGROUP AttachType = 43 + BPF_STRUCT_OPS AttachType = 44 + BPF_NETFILTER AttachType = 45 + BPF_TCX_INGRESS AttachType = 46 + BPF_TCX_EGRESS AttachType = 47 + BPF_TRACE_UPROBE_MULTI AttachType = 48 + BPF_CGROUP_UNIX_CONNECT AttachType = 49 + BPF_CGROUP_UNIX_SENDMSG AttachType = 50 + BPF_CGROUP_UNIX_RECVMSG AttachType = 51 + BPF_CGROUP_UNIX_GETPEERNAME AttachType = 52 + BPF_CGROUP_UNIX_GETSOCKNAME AttachType = 53 + BPF_NETKIT_PRIMARY AttachType = 54 + BPF_NETKIT_PEER AttachType = 55 + BPF_TRACE_KPROBE_SESSION AttachType = 56 + __MAX_BPF_ATTACH_TYPE AttachType = 57 +) + +type Cmd uint32 + +const ( + BPF_MAP_CREATE Cmd = 0 + BPF_MAP_LOOKUP_ELEM Cmd = 1 + BPF_MAP_UPDATE_ELEM Cmd = 2 + BPF_MAP_DELETE_ELEM Cmd = 3 + BPF_MAP_GET_NEXT_KEY Cmd = 4 + BPF_PROG_LOAD Cmd = 5 + BPF_OBJ_PIN Cmd = 6 + BPF_OBJ_GET Cmd = 7 + BPF_PROG_ATTACH Cmd = 8 + BPF_PROG_DETACH Cmd = 9 + BPF_PROG_TEST_RUN Cmd = 10 + BPF_PROG_RUN Cmd = 10 + BPF_PROG_GET_NEXT_ID Cmd = 11 + BPF_MAP_GET_NEXT_ID Cmd = 12 + BPF_PROG_GET_FD_BY_ID Cmd = 13 + BPF_MAP_GET_FD_BY_ID Cmd = 14 + BPF_OBJ_GET_INFO_BY_FD Cmd = 15 + BPF_PROG_QUERY Cmd = 16 + BPF_RAW_TRACEPOINT_OPEN Cmd = 17 + BPF_BTF_LOAD Cmd = 18 + BPF_BTF_GET_FD_BY_ID Cmd = 19 + BPF_TASK_FD_QUERY Cmd = 20 + BPF_MAP_LOOKUP_AND_DELETE_ELEM Cmd = 21 + BPF_MAP_FREEZE Cmd = 22 + BPF_BTF_GET_NEXT_ID Cmd = 23 + BPF_MAP_LOOKUP_BATCH Cmd = 24 + BPF_MAP_LOOKUP_AND_DELETE_BATCH Cmd = 25 + BPF_MAP_UPDATE_BATCH Cmd = 26 + BPF_MAP_DELETE_BATCH Cmd = 27 + BPF_LINK_CREATE Cmd = 28 + BPF_LINK_UPDATE Cmd = 29 + BPF_LINK_GET_FD_BY_ID Cmd = 30 + BPF_LINK_GET_NEXT_ID Cmd = 31 + BPF_ENABLE_STATS Cmd = 32 + BPF_ITER_CREATE Cmd = 33 + BPF_LINK_DETACH Cmd = 34 + BPF_PROG_BIND_MAP Cmd = 35 + BPF_TOKEN_CREATE Cmd = 36 + __MAX_BPF_CMD Cmd = 37 +) + +type FunctionId uint32 + +const ( + BPF_FUNC_unspec FunctionId = 0 + BPF_FUNC_map_lookup_elem FunctionId = 1 + BPF_FUNC_map_update_elem FunctionId = 2 + BPF_FUNC_map_delete_elem FunctionId = 3 + BPF_FUNC_probe_read FunctionId = 4 + BPF_FUNC_ktime_get_ns FunctionId = 5 + BPF_FUNC_trace_printk FunctionId = 6 + BPF_FUNC_get_prandom_u32 FunctionId = 7 + BPF_FUNC_get_smp_processor_id FunctionId = 8 + BPF_FUNC_skb_store_bytes FunctionId = 9 + BPF_FUNC_l3_csum_replace FunctionId = 10 + BPF_FUNC_l4_csum_replace FunctionId = 11 + BPF_FUNC_tail_call FunctionId = 12 + BPF_FUNC_clone_redirect FunctionId = 13 + BPF_FUNC_get_current_pid_tgid FunctionId = 14 + BPF_FUNC_get_current_uid_gid FunctionId = 15 + BPF_FUNC_get_current_comm FunctionId = 16 + BPF_FUNC_get_cgroup_classid FunctionId = 17 + BPF_FUNC_skb_vlan_push FunctionId = 18 + BPF_FUNC_skb_vlan_pop FunctionId = 19 + BPF_FUNC_skb_get_tunnel_key FunctionId = 20 + BPF_FUNC_skb_set_tunnel_key FunctionId = 21 + BPF_FUNC_perf_event_read FunctionId = 22 + BPF_FUNC_redirect FunctionId = 23 + BPF_FUNC_get_route_realm FunctionId = 24 + BPF_FUNC_perf_event_output FunctionId = 25 + BPF_FUNC_skb_load_bytes FunctionId = 26 + BPF_FUNC_get_stackid FunctionId = 27 + BPF_FUNC_csum_diff FunctionId = 28 + BPF_FUNC_skb_get_tunnel_opt FunctionId = 29 + BPF_FUNC_skb_set_tunnel_opt FunctionId = 30 + BPF_FUNC_skb_change_proto FunctionId = 31 + BPF_FUNC_skb_change_type FunctionId = 32 + BPF_FUNC_skb_under_cgroup FunctionId = 33 + BPF_FUNC_get_hash_recalc FunctionId = 34 + BPF_FUNC_get_current_task FunctionId = 35 + BPF_FUNC_probe_write_user FunctionId = 36 + BPF_FUNC_current_task_under_cgroup FunctionId = 37 + BPF_FUNC_skb_change_tail FunctionId = 38 + BPF_FUNC_skb_pull_data FunctionId = 39 + BPF_FUNC_csum_update FunctionId = 40 + BPF_FUNC_set_hash_invalid FunctionId = 41 + BPF_FUNC_get_numa_node_id FunctionId = 42 + BPF_FUNC_skb_change_head FunctionId = 43 + BPF_FUNC_xdp_adjust_head FunctionId = 44 + BPF_FUNC_probe_read_str FunctionId = 45 + BPF_FUNC_get_socket_cookie FunctionId = 46 + BPF_FUNC_get_socket_uid FunctionId = 47 + BPF_FUNC_set_hash FunctionId = 48 + BPF_FUNC_setsockopt FunctionId = 49 + BPF_FUNC_skb_adjust_room FunctionId = 50 + BPF_FUNC_redirect_map FunctionId = 51 + BPF_FUNC_sk_redirect_map FunctionId = 52 + BPF_FUNC_sock_map_update FunctionId = 53 + BPF_FUNC_xdp_adjust_meta FunctionId = 54 + BPF_FUNC_perf_event_read_value FunctionId = 55 + BPF_FUNC_perf_prog_read_value FunctionId = 56 + BPF_FUNC_getsockopt FunctionId = 57 + BPF_FUNC_override_return FunctionId = 58 + BPF_FUNC_sock_ops_cb_flags_set FunctionId = 59 + BPF_FUNC_msg_redirect_map FunctionId = 60 + BPF_FUNC_msg_apply_bytes FunctionId = 61 + BPF_FUNC_msg_cork_bytes FunctionId = 62 + BPF_FUNC_msg_pull_data FunctionId = 63 + BPF_FUNC_bind FunctionId = 64 + BPF_FUNC_xdp_adjust_tail FunctionId = 65 + BPF_FUNC_skb_get_xfrm_state FunctionId = 66 + BPF_FUNC_get_stack FunctionId = 67 + BPF_FUNC_skb_load_bytes_relative FunctionId = 68 + BPF_FUNC_fib_lookup FunctionId = 69 + BPF_FUNC_sock_hash_update FunctionId = 70 + BPF_FUNC_msg_redirect_hash FunctionId = 71 + BPF_FUNC_sk_redirect_hash FunctionId = 72 + BPF_FUNC_lwt_push_encap FunctionId = 73 + BPF_FUNC_lwt_seg6_store_bytes FunctionId = 74 + BPF_FUNC_lwt_seg6_adjust_srh FunctionId = 75 + BPF_FUNC_lwt_seg6_action FunctionId = 76 + BPF_FUNC_rc_repeat FunctionId = 77 + BPF_FUNC_rc_keydown FunctionId = 78 + BPF_FUNC_skb_cgroup_id FunctionId = 79 + BPF_FUNC_get_current_cgroup_id FunctionId = 80 + BPF_FUNC_get_local_storage FunctionId = 81 + BPF_FUNC_sk_select_reuseport FunctionId = 82 + BPF_FUNC_skb_ancestor_cgroup_id FunctionId = 83 + BPF_FUNC_sk_lookup_tcp FunctionId = 84 + BPF_FUNC_sk_lookup_udp FunctionId = 85 + BPF_FUNC_sk_release FunctionId = 86 + BPF_FUNC_map_push_elem FunctionId = 87 + BPF_FUNC_map_pop_elem FunctionId = 88 + BPF_FUNC_map_peek_elem FunctionId = 89 + BPF_FUNC_msg_push_data FunctionId = 90 + BPF_FUNC_msg_pop_data FunctionId = 91 + BPF_FUNC_rc_pointer_rel FunctionId = 92 + BPF_FUNC_spin_lock FunctionId = 93 + BPF_FUNC_spin_unlock FunctionId = 94 + BPF_FUNC_sk_fullsock FunctionId = 95 + BPF_FUNC_tcp_sock FunctionId = 96 + BPF_FUNC_skb_ecn_set_ce FunctionId = 97 + BPF_FUNC_get_listener_sock FunctionId = 98 + BPF_FUNC_skc_lookup_tcp FunctionId = 99 + BPF_FUNC_tcp_check_syncookie FunctionId = 100 + BPF_FUNC_sysctl_get_name FunctionId = 101 + BPF_FUNC_sysctl_get_current_value FunctionId = 102 + BPF_FUNC_sysctl_get_new_value FunctionId = 103 + BPF_FUNC_sysctl_set_new_value FunctionId = 104 + BPF_FUNC_strtol FunctionId = 105 + BPF_FUNC_strtoul FunctionId = 106 + BPF_FUNC_sk_storage_get FunctionId = 107 + BPF_FUNC_sk_storage_delete FunctionId = 108 + BPF_FUNC_send_signal FunctionId = 109 + BPF_FUNC_tcp_gen_syncookie FunctionId = 110 + BPF_FUNC_skb_output FunctionId = 111 + BPF_FUNC_probe_read_user FunctionId = 112 + BPF_FUNC_probe_read_kernel FunctionId = 113 + BPF_FUNC_probe_read_user_str FunctionId = 114 + BPF_FUNC_probe_read_kernel_str FunctionId = 115 + BPF_FUNC_tcp_send_ack FunctionId = 116 + BPF_FUNC_send_signal_thread FunctionId = 117 + BPF_FUNC_jiffies64 FunctionId = 118 + BPF_FUNC_read_branch_records FunctionId = 119 + BPF_FUNC_get_ns_current_pid_tgid FunctionId = 120 + BPF_FUNC_xdp_output FunctionId = 121 + BPF_FUNC_get_netns_cookie FunctionId = 122 + BPF_FUNC_get_current_ancestor_cgroup_id FunctionId = 123 + BPF_FUNC_sk_assign FunctionId = 124 + BPF_FUNC_ktime_get_boot_ns FunctionId = 125 + BPF_FUNC_seq_printf FunctionId = 126 + BPF_FUNC_seq_write FunctionId = 127 + BPF_FUNC_sk_cgroup_id FunctionId = 128 + BPF_FUNC_sk_ancestor_cgroup_id FunctionId = 129 + BPF_FUNC_ringbuf_output FunctionId = 130 + BPF_FUNC_ringbuf_reserve FunctionId = 131 + BPF_FUNC_ringbuf_submit FunctionId = 132 + BPF_FUNC_ringbuf_discard FunctionId = 133 + BPF_FUNC_ringbuf_query FunctionId = 134 + BPF_FUNC_csum_level FunctionId = 135 + BPF_FUNC_skc_to_tcp6_sock FunctionId = 136 + BPF_FUNC_skc_to_tcp_sock FunctionId = 137 + BPF_FUNC_skc_to_tcp_timewait_sock FunctionId = 138 + BPF_FUNC_skc_to_tcp_request_sock FunctionId = 139 + BPF_FUNC_skc_to_udp6_sock FunctionId = 140 + BPF_FUNC_get_task_stack FunctionId = 141 + BPF_FUNC_load_hdr_opt FunctionId = 142 + BPF_FUNC_store_hdr_opt FunctionId = 143 + BPF_FUNC_reserve_hdr_opt FunctionId = 144 + BPF_FUNC_inode_storage_get FunctionId = 145 + BPF_FUNC_inode_storage_delete FunctionId = 146 + BPF_FUNC_d_path FunctionId = 147 + BPF_FUNC_copy_from_user FunctionId = 148 + BPF_FUNC_snprintf_btf FunctionId = 149 + BPF_FUNC_seq_printf_btf FunctionId = 150 + BPF_FUNC_skb_cgroup_classid FunctionId = 151 + BPF_FUNC_redirect_neigh FunctionId = 152 + BPF_FUNC_per_cpu_ptr FunctionId = 153 + BPF_FUNC_this_cpu_ptr FunctionId = 154 + BPF_FUNC_redirect_peer FunctionId = 155 + BPF_FUNC_task_storage_get FunctionId = 156 + BPF_FUNC_task_storage_delete FunctionId = 157 + BPF_FUNC_get_current_task_btf FunctionId = 158 + BPF_FUNC_bprm_opts_set FunctionId = 159 + BPF_FUNC_ktime_get_coarse_ns FunctionId = 160 + BPF_FUNC_ima_inode_hash FunctionId = 161 + BPF_FUNC_sock_from_file FunctionId = 162 + BPF_FUNC_check_mtu FunctionId = 163 + BPF_FUNC_for_each_map_elem FunctionId = 164 + BPF_FUNC_snprintf FunctionId = 165 + BPF_FUNC_sys_bpf FunctionId = 166 + BPF_FUNC_btf_find_by_name_kind FunctionId = 167 + BPF_FUNC_sys_close FunctionId = 168 + BPF_FUNC_timer_init FunctionId = 169 + BPF_FUNC_timer_set_callback FunctionId = 170 + BPF_FUNC_timer_start FunctionId = 171 + BPF_FUNC_timer_cancel FunctionId = 172 + BPF_FUNC_get_func_ip FunctionId = 173 + BPF_FUNC_get_attach_cookie FunctionId = 174 + BPF_FUNC_task_pt_regs FunctionId = 175 + BPF_FUNC_get_branch_snapshot FunctionId = 176 + BPF_FUNC_trace_vprintk FunctionId = 177 + BPF_FUNC_skc_to_unix_sock FunctionId = 178 + BPF_FUNC_kallsyms_lookup_name FunctionId = 179 + BPF_FUNC_find_vma FunctionId = 180 + BPF_FUNC_loop FunctionId = 181 + BPF_FUNC_strncmp FunctionId = 182 + BPF_FUNC_get_func_arg FunctionId = 183 + BPF_FUNC_get_func_ret FunctionId = 184 + BPF_FUNC_get_func_arg_cnt FunctionId = 185 + BPF_FUNC_get_retval FunctionId = 186 + BPF_FUNC_set_retval FunctionId = 187 + BPF_FUNC_xdp_get_buff_len FunctionId = 188 + BPF_FUNC_xdp_load_bytes FunctionId = 189 + BPF_FUNC_xdp_store_bytes FunctionId = 190 + BPF_FUNC_copy_from_user_task FunctionId = 191 + BPF_FUNC_skb_set_tstamp FunctionId = 192 + BPF_FUNC_ima_file_hash FunctionId = 193 + BPF_FUNC_kptr_xchg FunctionId = 194 + BPF_FUNC_map_lookup_percpu_elem FunctionId = 195 + BPF_FUNC_skc_to_mptcp_sock FunctionId = 196 + BPF_FUNC_dynptr_from_mem FunctionId = 197 + BPF_FUNC_ringbuf_reserve_dynptr FunctionId = 198 + BPF_FUNC_ringbuf_submit_dynptr FunctionId = 199 + BPF_FUNC_ringbuf_discard_dynptr FunctionId = 200 + BPF_FUNC_dynptr_read FunctionId = 201 + BPF_FUNC_dynptr_write FunctionId = 202 + BPF_FUNC_dynptr_data FunctionId = 203 + BPF_FUNC_tcp_raw_gen_syncookie_ipv4 FunctionId = 204 + BPF_FUNC_tcp_raw_gen_syncookie_ipv6 FunctionId = 205 + BPF_FUNC_tcp_raw_check_syncookie_ipv4 FunctionId = 206 + BPF_FUNC_tcp_raw_check_syncookie_ipv6 FunctionId = 207 + BPF_FUNC_ktime_get_tai_ns FunctionId = 208 + BPF_FUNC_user_ringbuf_drain FunctionId = 209 + BPF_FUNC_cgrp_storage_get FunctionId = 210 + BPF_FUNC_cgrp_storage_delete FunctionId = 211 + __BPF_FUNC_MAX_ID FunctionId = 212 +) + +type HdrStartOff uint32 + +const ( + BPF_HDR_START_MAC HdrStartOff = 0 + BPF_HDR_START_NET HdrStartOff = 1 +) + +type LinkType uint32 + +const ( + BPF_LINK_TYPE_UNSPEC LinkType = 0 + BPF_LINK_TYPE_RAW_TRACEPOINT LinkType = 1 + BPF_LINK_TYPE_TRACING LinkType = 2 + BPF_LINK_TYPE_CGROUP LinkType = 3 + BPF_LINK_TYPE_ITER LinkType = 4 + BPF_LINK_TYPE_NETNS LinkType = 5 + BPF_LINK_TYPE_XDP LinkType = 6 + BPF_LINK_TYPE_PERF_EVENT LinkType = 7 + BPF_LINK_TYPE_KPROBE_MULTI LinkType = 8 + BPF_LINK_TYPE_STRUCT_OPS LinkType = 9 + BPF_LINK_TYPE_NETFILTER LinkType = 10 + BPF_LINK_TYPE_TCX LinkType = 11 + BPF_LINK_TYPE_UPROBE_MULTI LinkType = 12 + BPF_LINK_TYPE_NETKIT LinkType = 13 + BPF_LINK_TYPE_SOCKMAP LinkType = 14 + __MAX_BPF_LINK_TYPE LinkType = 15 +) + +type MapType uint32 + +const ( + BPF_MAP_TYPE_UNSPEC MapType = 0 + BPF_MAP_TYPE_HASH MapType = 1 + BPF_MAP_TYPE_ARRAY MapType = 2 + BPF_MAP_TYPE_PROG_ARRAY MapType = 3 + BPF_MAP_TYPE_PERF_EVENT_ARRAY MapType = 4 + BPF_MAP_TYPE_PERCPU_HASH MapType = 5 + BPF_MAP_TYPE_PERCPU_ARRAY MapType = 6 + BPF_MAP_TYPE_STACK_TRACE MapType = 7 + BPF_MAP_TYPE_CGROUP_ARRAY MapType = 8 + BPF_MAP_TYPE_LRU_HASH MapType = 9 + BPF_MAP_TYPE_LRU_PERCPU_HASH MapType = 10 + BPF_MAP_TYPE_LPM_TRIE MapType = 11 + BPF_MAP_TYPE_ARRAY_OF_MAPS MapType = 12 + BPF_MAP_TYPE_HASH_OF_MAPS MapType = 13 + BPF_MAP_TYPE_DEVMAP MapType = 14 + BPF_MAP_TYPE_SOCKMAP MapType = 15 + BPF_MAP_TYPE_CPUMAP MapType = 16 + BPF_MAP_TYPE_XSKMAP MapType = 17 + BPF_MAP_TYPE_SOCKHASH MapType = 18 + BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED MapType = 19 + BPF_MAP_TYPE_CGROUP_STORAGE MapType = 19 + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY MapType = 20 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED MapType = 21 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE MapType = 21 + BPF_MAP_TYPE_QUEUE MapType = 22 + BPF_MAP_TYPE_STACK MapType = 23 + BPF_MAP_TYPE_SK_STORAGE MapType = 24 + BPF_MAP_TYPE_DEVMAP_HASH MapType = 25 + BPF_MAP_TYPE_STRUCT_OPS MapType = 26 + BPF_MAP_TYPE_RINGBUF MapType = 27 + BPF_MAP_TYPE_INODE_STORAGE MapType = 28 + BPF_MAP_TYPE_TASK_STORAGE MapType = 29 + BPF_MAP_TYPE_BLOOM_FILTER MapType = 30 + BPF_MAP_TYPE_USER_RINGBUF MapType = 31 + BPF_MAP_TYPE_CGRP_STORAGE MapType = 32 + BPF_MAP_TYPE_ARENA MapType = 33 + __MAX_BPF_MAP_TYPE MapType = 34 +) + +type ObjType uint32 + +const ( + BPF_TYPE_UNSPEC ObjType = 0 + BPF_TYPE_PROG ObjType = 1 + BPF_TYPE_MAP ObjType = 2 + BPF_TYPE_LINK ObjType = 3 +) + +type PerfEventType uint32 + +const ( + BPF_PERF_EVENT_UNSPEC PerfEventType = 0 + BPF_PERF_EVENT_UPROBE PerfEventType = 1 + BPF_PERF_EVENT_URETPROBE PerfEventType = 2 + BPF_PERF_EVENT_KPROBE PerfEventType = 3 + BPF_PERF_EVENT_KRETPROBE PerfEventType = 4 + BPF_PERF_EVENT_TRACEPOINT PerfEventType = 5 + BPF_PERF_EVENT_EVENT PerfEventType = 6 +) + +type ProgType uint32 + +const ( + BPF_PROG_TYPE_UNSPEC ProgType = 0 + BPF_PROG_TYPE_SOCKET_FILTER ProgType = 1 + BPF_PROG_TYPE_KPROBE ProgType = 2 + BPF_PROG_TYPE_SCHED_CLS ProgType = 3 + BPF_PROG_TYPE_SCHED_ACT ProgType = 4 + BPF_PROG_TYPE_TRACEPOINT ProgType = 5 + BPF_PROG_TYPE_XDP ProgType = 6 + BPF_PROG_TYPE_PERF_EVENT ProgType = 7 + BPF_PROG_TYPE_CGROUP_SKB ProgType = 8 + BPF_PROG_TYPE_CGROUP_SOCK ProgType = 9 + BPF_PROG_TYPE_LWT_IN ProgType = 10 + BPF_PROG_TYPE_LWT_OUT ProgType = 11 + BPF_PROG_TYPE_LWT_XMIT ProgType = 12 + BPF_PROG_TYPE_SOCK_OPS ProgType = 13 + BPF_PROG_TYPE_SK_SKB ProgType = 14 + BPF_PROG_TYPE_CGROUP_DEVICE ProgType = 15 + BPF_PROG_TYPE_SK_MSG ProgType = 16 + BPF_PROG_TYPE_RAW_TRACEPOINT ProgType = 17 + BPF_PROG_TYPE_CGROUP_SOCK_ADDR ProgType = 18 + BPF_PROG_TYPE_LWT_SEG6LOCAL ProgType = 19 + BPF_PROG_TYPE_LIRC_MODE2 ProgType = 20 + BPF_PROG_TYPE_SK_REUSEPORT ProgType = 21 + BPF_PROG_TYPE_FLOW_DISSECTOR ProgType = 22 + BPF_PROG_TYPE_CGROUP_SYSCTL ProgType = 23 + BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE ProgType = 24 + BPF_PROG_TYPE_CGROUP_SOCKOPT ProgType = 25 + BPF_PROG_TYPE_TRACING ProgType = 26 + BPF_PROG_TYPE_STRUCT_OPS ProgType = 27 + BPF_PROG_TYPE_EXT ProgType = 28 + BPF_PROG_TYPE_LSM ProgType = 29 + BPF_PROG_TYPE_SK_LOOKUP ProgType = 30 + BPF_PROG_TYPE_SYSCALL ProgType = 31 + BPF_PROG_TYPE_NETFILTER ProgType = 32 + __MAX_BPF_PROG_TYPE ProgType = 33 +) + +type RetCode uint32 + +const ( + BPF_OK RetCode = 0 + BPF_DROP RetCode = 2 + BPF_REDIRECT RetCode = 7 + BPF_LWT_REROUTE RetCode = 128 + BPF_FLOW_DISSECTOR_CONTINUE RetCode = 129 +) + +type SkAction uint32 + +const ( + SK_DROP SkAction = 0 + SK_PASS SkAction = 1 +) + +type StackBuildIdStatus uint32 + +const ( + BPF_STACK_BUILD_ID_EMPTY StackBuildIdStatus = 0 + BPF_STACK_BUILD_ID_VALID StackBuildIdStatus = 1 + BPF_STACK_BUILD_ID_IP StackBuildIdStatus = 2 +) + +type StatsType uint32 + +const ( + BPF_STATS_RUN_TIME StatsType = 0 +) + +type TcxActionBase int32 + +const ( + TCX_NEXT TcxActionBase = -1 + TCX_PASS TcxActionBase = 0 + TCX_DROP TcxActionBase = 2 + TCX_REDIRECT TcxActionBase = 7 +) + +type XdpAction uint32 + +const ( + XDP_ABORTED XdpAction = 0 + XDP_DROP XdpAction = 1 + XDP_PASS XdpAction = 2 + XDP_TX XdpAction = 3 + XDP_REDIRECT XdpAction = 4 +) + +type BtfInfo struct { + _ structs.HostLayout + Btf TypedPointer[uint8] + BtfSize uint32 + Id BTFID + Name TypedPointer[uint8] + NameLen uint32 + KernelBtf uint32 +} + +type FuncInfo struct { + _ structs.HostLayout + InsnOff uint32 + TypeId uint32 +} + +type LineInfo struct { + _ structs.HostLayout + InsnOff uint32 + FileNameOff uint32 + LineOff uint32 + LineCol uint32 +} + +type LinkInfo struct { + _ structs.HostLayout + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + Extra [48]uint8 +} + +type MapInfo struct { + _ structs.HostLayout + Type uint32 + Id MapID + KeySize uint32 + ValueSize uint32 + MaxEntries uint32 + MapFlags uint32 + Name ObjName + Ifindex uint32 + BtfVmlinuxValueTypeId TypeID + NetnsDev uint64 + NetnsIno uint64 + BtfId uint32 + BtfKeyTypeId TypeID + BtfValueTypeId TypeID + BtfVmlinuxId uint32 + MapExtra uint64 +} + +type ProgInfo struct { + _ structs.HostLayout + Type uint32 + Id uint32 + Tag [8]uint8 + JitedProgLen uint32 + XlatedProgLen uint32 + JitedProgInsns TypedPointer[uint8] + XlatedProgInsns TypedPointer[uint8] + LoadTime uint64 + CreatedByUid uint32 + NrMapIds uint32 + MapIds TypedPointer[MapID] + Name ObjName + Ifindex uint32 + _ [4]byte /* unsupported bitfield */ + NetnsDev uint64 + NetnsIno uint64 + NrJitedKsyms uint32 + NrJitedFuncLens uint32 + JitedKsyms TypedPointer[uint64] + JitedFuncLens TypedPointer[uint32] + BtfId BTFID + FuncInfoRecSize uint32 + FuncInfo TypedPointer[uint8] + NrFuncInfo uint32 + NrLineInfo uint32 + LineInfo TypedPointer[uint8] + JitedLineInfo TypedPointer[uint64] + NrJitedLineInfo uint32 + LineInfoRecSize uint32 + JitedLineInfoRecSize uint32 + NrProgTags uint32 + ProgTags uint64 + RunTimeNs uint64 + RunCnt uint64 + RecursionMisses uint64 + VerifiedInsns uint32 + AttachBtfObjId BTFID + AttachBtfId TypeID + _ [4]byte +} + +type SkLookup struct { + _ structs.HostLayout + Cookie uint64 + Family uint32 + Protocol uint32 + RemoteIp4 [4]uint8 + RemoteIp6 [16]uint8 + RemotePort uint16 + _ [2]byte + LocalIp4 [4]uint8 + LocalIp6 [16]uint8 + LocalPort uint32 + IngressIfindex uint32 + _ [4]byte +} + +type XdpMd struct { + _ structs.HostLayout + Data uint32 + DataEnd uint32 + DataMeta uint32 + IngressIfindex uint32 + RxQueueIndex uint32 + EgressIfindex uint32 +} + +type BtfGetFdByIdAttr struct { + _ structs.HostLayout + Id uint32 +} + +func BtfGetFdById(attr *BtfGetFdByIdAttr) (*FD, error) { + fd, err := BPF(BPF_BTF_GET_FD_BY_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type BtfGetNextIdAttr struct { + _ structs.HostLayout + Id BTFID + NextId BTFID +} + +func BtfGetNextId(attr *BtfGetNextIdAttr) error { + _, err := BPF(BPF_BTF_GET_NEXT_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type BtfLoadAttr struct { + _ structs.HostLayout + Btf TypedPointer[uint8] + BtfLogBuf TypedPointer[uint8] + BtfSize uint32 + BtfLogSize uint32 + BtfLogLevel uint32 + BtfLogTrueSize uint32 + BtfFlags uint32 + BtfTokenFd int32 +} + +func BtfLoad(attr *BtfLoadAttr) (*FD, error) { + fd, err := BPF(BPF_BTF_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type EnableStatsAttr struct { + _ structs.HostLayout + Type uint32 +} + +func EnableStats(attr *EnableStatsAttr) (*FD, error) { + fd, err := BPF(BPF_ENABLE_STATS, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type IterCreateAttr struct { + _ structs.HostLayout + LinkFd uint32 + Flags uint32 +} + +func IterCreate(attr *IterCreateAttr) (*FD, error) { + fd, err := BPF(BPF_ITER_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreateAttr struct { + _ structs.HostLayout + ProgFd uint32 + TargetFd uint32 + AttachType AttachType + Flags uint32 + TargetBtfId TypeID + _ [44]byte +} + +func LinkCreate(attr *LinkCreateAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreateIterAttr struct { + _ structs.HostLayout + ProgFd uint32 + TargetFd uint32 + AttachType AttachType + Flags uint32 + IterInfo Pointer + IterInfoLen uint32 + _ [36]byte +} + +func LinkCreateIter(attr *LinkCreateIterAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreateKprobeMultiAttr struct { + _ structs.HostLayout + ProgFd uint32 + TargetFd uint32 + AttachType AttachType + Flags uint32 + KprobeMultiFlags uint32 + Count uint32 + Syms StringSlicePointer + Addrs TypedPointer[uintptr] + Cookies TypedPointer[uint64] + _ [16]byte +} + +func LinkCreateKprobeMulti(attr *LinkCreateKprobeMultiAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreateNetfilterAttr struct { + _ structs.HostLayout + ProgFd uint32 + TargetFd uint32 + AttachType AttachType + Flags uint32 + Pf uint32 + Hooknum uint32 + Priority int32 + NetfilterFlags uint32 + _ [32]byte +} + +func LinkCreateNetfilter(attr *LinkCreateNetfilterAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreateNetkitAttr struct { + _ structs.HostLayout + ProgFd uint32 + TargetIfindex uint32 + AttachType AttachType + Flags uint32 + RelativeFdOrId uint32 + _ [4]byte + ExpectedRevision uint64 + _ [32]byte +} + +func LinkCreateNetkit(attr *LinkCreateNetkitAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreatePerfEventAttr struct { + _ structs.HostLayout + ProgFd uint32 + TargetFd uint32 + AttachType AttachType + Flags uint32 + BpfCookie uint64 + _ [40]byte +} + +func LinkCreatePerfEvent(attr *LinkCreatePerfEventAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreateTcxAttr struct { + _ structs.HostLayout + ProgFd uint32 + TargetIfindex uint32 + AttachType AttachType + Flags uint32 + RelativeFdOrId uint32 + _ [4]byte + ExpectedRevision uint64 + _ [32]byte +} + +func LinkCreateTcx(attr *LinkCreateTcxAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreateTracingAttr struct { + _ structs.HostLayout + ProgFd uint32 + TargetFd uint32 + AttachType AttachType + Flags uint32 + TargetBtfId BTFID + _ [4]byte + Cookie uint64 + _ [32]byte +} + +func LinkCreateTracing(attr *LinkCreateTracingAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreateUprobeMultiAttr struct { + _ structs.HostLayout + ProgFd uint32 + TargetFd uint32 + AttachType AttachType + Flags uint32 + Path StringPointer + Offsets TypedPointer[uint64] + RefCtrOffsets TypedPointer[uint64] + Cookies TypedPointer[uint64] + Count uint32 + UprobeMultiFlags uint32 + Pid uint32 + _ [4]byte +} + +func LinkCreateUprobeMulti(attr *LinkCreateUprobeMultiAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkDetachAttr struct { + _ structs.HostLayout + LinkFd uint32 +} + +func LinkDetach(attr *LinkDetachAttr) error { + _, err := BPF(BPF_LINK_DETACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type LinkGetFdByIdAttr struct { + _ structs.HostLayout + Id LinkID +} + +func LinkGetFdById(attr *LinkGetFdByIdAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_GET_FD_BY_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkGetNextIdAttr struct { + _ structs.HostLayout + Id LinkID + NextId LinkID +} + +func LinkGetNextId(attr *LinkGetNextIdAttr) error { + _, err := BPF(BPF_LINK_GET_NEXT_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type LinkUpdateAttr struct { + _ structs.HostLayout + LinkFd uint32 + NewProgFd uint32 + Flags uint32 + OldProgFd uint32 +} + +func LinkUpdate(attr *LinkUpdateAttr) error { + _, err := BPF(BPF_LINK_UPDATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapCreateAttr struct { + _ structs.HostLayout + MapType MapType + KeySize uint32 + ValueSize uint32 + MaxEntries uint32 + MapFlags uint32 + InnerMapFd uint32 + NumaNode uint32 + MapName ObjName + MapIfindex uint32 + BtfFd uint32 + BtfKeyTypeId TypeID + BtfValueTypeId TypeID + BtfVmlinuxValueTypeId TypeID + MapExtra uint64 + ValueTypeBtfObjFd int32 + MapTokenFd int32 +} + +func MapCreate(attr *MapCreateAttr) (*FD, error) { + fd, err := BPF(BPF_MAP_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type MapDeleteBatchAttr struct { + _ structs.HostLayout + InBatch Pointer + OutBatch Pointer + Keys Pointer + Values Pointer + Count uint32 + MapFd uint32 + ElemFlags uint64 + Flags uint64 +} + +func MapDeleteBatch(attr *MapDeleteBatchAttr) error { + _, err := BPF(BPF_MAP_DELETE_BATCH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapDeleteElemAttr struct { + _ structs.HostLayout + MapFd uint32 + _ [4]byte + Key Pointer + Value Pointer + Flags uint64 +} + +func MapDeleteElem(attr *MapDeleteElemAttr) error { + _, err := BPF(BPF_MAP_DELETE_ELEM, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapFreezeAttr struct { + _ structs.HostLayout + MapFd uint32 +} + +func MapFreeze(attr *MapFreezeAttr) error { + _, err := BPF(BPF_MAP_FREEZE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapGetFdByIdAttr struct { + _ structs.HostLayout + Id uint32 +} + +func MapGetFdById(attr *MapGetFdByIdAttr) (*FD, error) { + fd, err := BPF(BPF_MAP_GET_FD_BY_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type MapGetNextIdAttr struct { + _ structs.HostLayout + Id uint32 + NextId uint32 +} + +func MapGetNextId(attr *MapGetNextIdAttr) error { + _, err := BPF(BPF_MAP_GET_NEXT_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapGetNextKeyAttr struct { + _ structs.HostLayout + MapFd uint32 + _ [4]byte + Key Pointer + NextKey Pointer +} + +func MapGetNextKey(attr *MapGetNextKeyAttr) error { + _, err := BPF(BPF_MAP_GET_NEXT_KEY, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapLookupAndDeleteBatchAttr struct { + _ structs.HostLayout + InBatch Pointer + OutBatch Pointer + Keys Pointer + Values Pointer + Count uint32 + MapFd uint32 + ElemFlags uint64 + Flags uint64 +} + +func MapLookupAndDeleteBatch(attr *MapLookupAndDeleteBatchAttr) error { + _, err := BPF(BPF_MAP_LOOKUP_AND_DELETE_BATCH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapLookupAndDeleteElemAttr struct { + _ structs.HostLayout + MapFd uint32 + _ [4]byte + Key Pointer + Value Pointer + Flags uint64 +} + +func MapLookupAndDeleteElem(attr *MapLookupAndDeleteElemAttr) error { + _, err := BPF(BPF_MAP_LOOKUP_AND_DELETE_ELEM, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapLookupBatchAttr struct { + _ structs.HostLayout + InBatch Pointer + OutBatch Pointer + Keys Pointer + Values Pointer + Count uint32 + MapFd uint32 + ElemFlags uint64 + Flags uint64 +} + +func MapLookupBatch(attr *MapLookupBatchAttr) error { + _, err := BPF(BPF_MAP_LOOKUP_BATCH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapLookupElemAttr struct { + _ structs.HostLayout + MapFd uint32 + _ [4]byte + Key Pointer + Value Pointer + Flags uint64 +} + +func MapLookupElem(attr *MapLookupElemAttr) error { + _, err := BPF(BPF_MAP_LOOKUP_ELEM, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapUpdateBatchAttr struct { + _ structs.HostLayout + InBatch Pointer + OutBatch Pointer + Keys Pointer + Values Pointer + Count uint32 + MapFd uint32 + ElemFlags uint64 + Flags uint64 +} + +func MapUpdateBatch(attr *MapUpdateBatchAttr) error { + _, err := BPF(BPF_MAP_UPDATE_BATCH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapUpdateElemAttr struct { + _ structs.HostLayout + MapFd uint32 + _ [4]byte + Key Pointer + Value Pointer + Flags uint64 +} + +func MapUpdateElem(attr *MapUpdateElemAttr) error { + _, err := BPF(BPF_MAP_UPDATE_ELEM, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ObjGetAttr struct { + _ structs.HostLayout + Pathname StringPointer + BpfFd uint32 + FileFlags uint32 + PathFd int32 + _ [4]byte +} + +func ObjGet(attr *ObjGetAttr) (*FD, error) { + fd, err := BPF(BPF_OBJ_GET, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type ObjGetInfoByFdAttr struct { + _ structs.HostLayout + BpfFd uint32 + InfoLen uint32 + Info Pointer +} + +func ObjGetInfoByFd(attr *ObjGetInfoByFdAttr) error { + _, err := BPF(BPF_OBJ_GET_INFO_BY_FD, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ObjPinAttr struct { + _ structs.HostLayout + Pathname StringPointer + BpfFd uint32 + FileFlags uint32 + PathFd int32 + _ [4]byte +} + +func ObjPin(attr *ObjPinAttr) error { + _, err := BPF(BPF_OBJ_PIN, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ProgAttachAttr struct { + _ structs.HostLayout + TargetFdOrIfindex uint32 + AttachBpfFd uint32 + AttachType uint32 + AttachFlags uint32 + ReplaceBpfFd uint32 + RelativeFdOrId uint32 + ExpectedRevision uint64 +} + +func ProgAttach(attr *ProgAttachAttr) error { + _, err := BPF(BPF_PROG_ATTACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ProgBindMapAttr struct { + _ structs.HostLayout + ProgFd uint32 + MapFd uint32 + Flags uint32 +} + +func ProgBindMap(attr *ProgBindMapAttr) error { + _, err := BPF(BPF_PROG_BIND_MAP, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ProgDetachAttr struct { + _ structs.HostLayout + TargetFdOrIfindex uint32 + AttachBpfFd uint32 + AttachType uint32 + AttachFlags uint32 + _ [4]byte + RelativeFdOrId uint32 + ExpectedRevision uint64 +} + +func ProgDetach(attr *ProgDetachAttr) error { + _, err := BPF(BPF_PROG_DETACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ProgGetFdByIdAttr struct { + _ structs.HostLayout + Id uint32 +} + +func ProgGetFdById(attr *ProgGetFdByIdAttr) (*FD, error) { + fd, err := BPF(BPF_PROG_GET_FD_BY_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type ProgGetNextIdAttr struct { + _ structs.HostLayout + Id uint32 + NextId uint32 +} + +func ProgGetNextId(attr *ProgGetNextIdAttr) error { + _, err := BPF(BPF_PROG_GET_NEXT_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ProgLoadAttr struct { + _ structs.HostLayout + ProgType ProgType + InsnCnt uint32 + Insns TypedPointer[uint8] + License StringPointer + LogLevel LogLevel + LogSize uint32 + LogBuf TypedPointer[uint8] + KernVersion uint32 + ProgFlags uint32 + ProgName ObjName + ProgIfindex uint32 + ExpectedAttachType AttachType + ProgBtfFd uint32 + FuncInfoRecSize uint32 + FuncInfo TypedPointer[uint8] + FuncInfoCnt uint32 + LineInfoRecSize uint32 + LineInfo TypedPointer[uint8] + LineInfoCnt uint32 + AttachBtfId TypeID + AttachBtfObjFd uint32 + CoreReloCnt uint32 + FdArray TypedPointer[int32] + CoreRelos TypedPointer[uint8] + CoreReloRecSize uint32 + LogTrueSize uint32 + ProgTokenFd int32 + _ [4]byte +} + +func ProgLoad(attr *ProgLoadAttr) (*FD, error) { + fd, err := BPF(BPF_PROG_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type ProgQueryAttr struct { + _ structs.HostLayout + TargetFdOrIfindex uint32 + AttachType AttachType + QueryFlags uint32 + AttachFlags uint32 + ProgIds TypedPointer[ProgramID] + Count uint32 + _ [4]byte + ProgAttachFlags TypedPointer[ProgramID] + LinkIds TypedPointer[LinkID] + LinkAttachFlags TypedPointer[LinkID] + Revision uint64 +} + +func ProgQuery(attr *ProgQueryAttr) error { + _, err := BPF(BPF_PROG_QUERY, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ProgRunAttr struct { + _ structs.HostLayout + ProgFd uint32 + Retval uint32 + DataSizeIn uint32 + DataSizeOut uint32 + DataIn TypedPointer[uint8] + DataOut TypedPointer[uint8] + Repeat uint32 + Duration uint32 + CtxSizeIn uint32 + CtxSizeOut uint32 + CtxIn TypedPointer[uint8] + CtxOut TypedPointer[uint8] + Flags uint32 + Cpu uint32 + BatchSize uint32 + _ [4]byte +} + +func ProgRun(attr *ProgRunAttr) error { + _, err := BPF(BPF_PROG_TEST_RUN, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type RawTracepointOpenAttr struct { + _ structs.HostLayout + Name StringPointer + ProgFd uint32 + _ [4]byte + Cookie uint64 +} + +func RawTracepointOpen(attr *RawTracepointOpenAttr) (*FD, error) { + fd, err := BPF(BPF_RAW_TRACEPOINT_OPEN, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type CgroupLinkInfo struct { + _ structs.HostLayout + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + CgroupId uint64 + AttachType AttachType + _ [36]byte +} + +type IterLinkInfo struct { + _ structs.HostLayout + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + TargetName TypedPointer[uint8] + TargetNameLen uint32 +} + +type KprobeLinkInfo struct { + _ structs.HostLayout + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + PerfEventType PerfEventType + _ [4]byte + FuncName TypedPointer[uint8] + NameLen uint32 + Offset uint32 + Addr uint64 + Missed uint64 + Cookie uint64 +} + +type KprobeMultiLinkInfo struct { + _ structs.HostLayout + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + Addrs TypedPointer[uint64] + Count uint32 + Flags uint32 + Missed uint64 + Cookies TypedPointer[uint64] + _ [16]byte +} + +type NetNsLinkInfo struct { + _ structs.HostLayout + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + NetnsIno uint32 + AttachType AttachType + _ [40]byte +} + +type NetfilterLinkInfo struct { + _ structs.HostLayout + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + Pf uint32 + Hooknum uint32 + Priority int32 + Flags uint32 + _ [32]byte +} + +type NetkitLinkInfo struct { + _ structs.HostLayout + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + Ifindex uint32 + AttachType AttachType + _ [40]byte +} + +type PerfEventLinkInfo struct { + _ structs.HostLayout + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + PerfEventType PerfEventType +} + +type RawTracepointLinkInfo struct { + _ structs.HostLayout + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + TpName TypedPointer[uint8] + TpNameLen uint32 + _ [36]byte +} + +type TcxLinkInfo struct { + _ structs.HostLayout + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + Ifindex uint32 + AttachType AttachType + _ [40]byte +} + +type TracingLinkInfo struct { + _ structs.HostLayout + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + AttachType AttachType + TargetObjId uint32 + TargetBtfId TypeID + _ [36]byte +} + +type XDPLinkInfo struct { + _ structs.HostLayout + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + Ifindex uint32 + _ [44]byte +} diff --git a/vendor/github.com/cilium/ebpf/internal/sysenc/buffer.go b/vendor/github.com/cilium/ebpf/internal/sysenc/buffer.go new file mode 100644 index 000000000..62e483a1c --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sysenc/buffer.go @@ -0,0 +1,85 @@ +package sysenc + +import ( + "unsafe" + + "github.com/cilium/ebpf/internal/sys" +) + +type Buffer struct { + ptr unsafe.Pointer + // Size of the buffer. syscallPointerOnly if created from UnsafeBuffer or when using + // zero-copy unmarshaling. + size int +} + +const syscallPointerOnly = -1 + +func newBuffer(buf []byte) Buffer { + if len(buf) == 0 { + return Buffer{} + } + return Buffer{unsafe.Pointer(&buf[0]), len(buf)} +} + +// UnsafeBuffer constructs a Buffer for zero-copy unmarshaling. +// +// [Pointer] is the only valid method to call on such a Buffer. +// Use [SyscallBuffer] instead if possible. +func UnsafeBuffer(ptr unsafe.Pointer) Buffer { + return Buffer{ptr, syscallPointerOnly} +} + +// SyscallOutput prepares a Buffer for a syscall to write into. +// +// size is the length of the desired buffer in bytes. +// The buffer may point at the underlying memory of dst, in which case [Unmarshal] +// becomes a no-op. +// +// The contents of the buffer are undefined and may be non-zero. +func SyscallOutput(dst any, size int) Buffer { + if dstBuf := unsafeBackingMemory(dst); len(dstBuf) == size { + buf := newBuffer(dstBuf) + buf.size = syscallPointerOnly + return buf + } + + return newBuffer(make([]byte, size)) +} + +// CopyTo copies the buffer into dst. +// +// Returns the number of copied bytes. +func (b Buffer) CopyTo(dst []byte) int { + return copy(dst, b.Bytes()) +} + +// AppendTo appends the buffer onto dst. +func (b Buffer) AppendTo(dst []byte) []byte { + return append(dst, b.Bytes()...) +} + +// Pointer returns the location where a syscall should write. +func (b Buffer) Pointer() sys.Pointer { + // NB: This deliberately ignores b.length to support zero-copy + // marshaling / unmarshaling using unsafe.Pointer. + return sys.UnsafePointer(b.ptr) +} + +// Unmarshal the buffer into the provided value. +func (b Buffer) Unmarshal(data any) error { + if b.size == syscallPointerOnly { + return nil + } + + return Unmarshal(data, b.Bytes()) +} + +// Bytes returns the buffer as a byte slice. Returns nil if the Buffer was +// created using UnsafeBuffer or by zero-copy unmarshaling. +func (b Buffer) Bytes() []byte { + if b.size == syscallPointerOnly { + return nil + } + return unsafe.Slice((*byte)(b.ptr), b.size) +} diff --git a/vendor/github.com/cilium/ebpf/internal/sysenc/doc.go b/vendor/github.com/cilium/ebpf/internal/sysenc/doc.go new file mode 100644 index 000000000..676ad98ba --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sysenc/doc.go @@ -0,0 +1,3 @@ +// Package sysenc provides efficient conversion of Go values to system +// call interfaces. +package sysenc diff --git a/vendor/github.com/cilium/ebpf/internal/sysenc/layout.go b/vendor/github.com/cilium/ebpf/internal/sysenc/layout.go new file mode 100644 index 000000000..52d111e7a --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sysenc/layout.go @@ -0,0 +1,41 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found at https://go.dev/LICENSE. + +package sysenc + +import ( + "reflect" + "sync" +) + +var hasUnexportedFieldsCache sync.Map // map[reflect.Type]bool + +func hasUnexportedFields(typ reflect.Type) bool { + switch typ.Kind() { + case reflect.Slice, reflect.Array, reflect.Pointer: + return hasUnexportedFields(typ.Elem()) + + case reflect.Struct: + if unexported, ok := hasUnexportedFieldsCache.Load(typ); ok { + return unexported.(bool) + } + + unexported := false + for i, n := 0, typ.NumField(); i < n; i++ { + field := typ.Field(i) + // Package binary allows _ fields but always writes zeroes into them. + if (!field.IsExported() && field.Name != "_") || hasUnexportedFields(field.Type) { + unexported = true + break + } + } + + hasUnexportedFieldsCache.Store(typ, unexported) + return unexported + + default: + // NB: It's not clear what this means for Chan and so on. + return false + } +} diff --git a/vendor/github.com/cilium/ebpf/internal/sysenc/marshal.go b/vendor/github.com/cilium/ebpf/internal/sysenc/marshal.go new file mode 100644 index 000000000..3f7deb80f --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sysenc/marshal.go @@ -0,0 +1,161 @@ +package sysenc + +import ( + "encoding" + "encoding/binary" + "errors" + "fmt" + "reflect" + "slices" + "unsafe" + + "github.com/cilium/ebpf/internal" +) + +// Marshal turns data into a byte slice using the system's native endianness. +// +// If possible, avoids allocations by directly using the backing memory +// of data. This means that the variable must not be modified for the lifetime +// of the returned [Buffer]. +// +// Returns an error if the data can't be turned into a byte slice according to +// the behaviour of [binary.Write]. +func Marshal(data any, size int) (Buffer, error) { + if data == nil { + return Buffer{}, errors.New("can't marshal a nil value") + } + + var buf []byte + var err error + switch value := data.(type) { + case encoding.BinaryMarshaler: + buf, err = value.MarshalBinary() + case string: + buf = unsafe.Slice(unsafe.StringData(value), len(value)) + case []byte: + buf = value + case int16: + buf = internal.NativeEndian.AppendUint16(make([]byte, 0, 2), uint16(value)) + case uint16: + buf = internal.NativeEndian.AppendUint16(make([]byte, 0, 2), value) + case int32: + buf = internal.NativeEndian.AppendUint32(make([]byte, 0, 4), uint32(value)) + case uint32: + buf = internal.NativeEndian.AppendUint32(make([]byte, 0, 4), value) + case int64: + buf = internal.NativeEndian.AppendUint64(make([]byte, 0, 8), uint64(value)) + case uint64: + buf = internal.NativeEndian.AppendUint64(make([]byte, 0, 8), value) + default: + if buf := unsafeBackingMemory(data); len(buf) == size { + return newBuffer(buf), nil + } + + buf, err = binary.Append(nil, internal.NativeEndian, value) + } + if err != nil { + return Buffer{}, err + } + + if len(buf) != size { + return Buffer{}, fmt.Errorf("%T doesn't marshal to %d bytes", data, size) + } + + return newBuffer(buf), nil +} + +// Unmarshal a byte slice in the system's native endianness into data. +// +// Returns an error if buf can't be unmarshalled according to the behaviour +// of [binary.Decode]. +func Unmarshal(data interface{}, buf []byte) error { + switch value := data.(type) { + case encoding.BinaryUnmarshaler: + return value.UnmarshalBinary(buf) + + case *string: + *value = string(buf) + return nil + + case *[]byte: + // Backwards compat: unmarshaling into a slice replaces the whole slice. + *value = slices.Clone(buf) + return nil + + default: + if dataBuf := unsafeBackingMemory(data); len(dataBuf) == len(buf) { + copy(dataBuf, buf) + return nil + } + + n, err := binary.Decode(buf, internal.NativeEndian, value) + if err != nil { + return err + } + + if n != len(buf) { + return fmt.Errorf("unmarshaling %T doesn't consume all data", data) + } + + return nil + } +} + +// unsafeBackingMemory returns the backing memory of data if it can be used +// instead of calling into package binary. +// +// Returns nil if the value is not a pointer or a slice, or if it contains +// padding or unexported fields. +func unsafeBackingMemory(data any) []byte { + if data == nil { + return nil + } + + value := reflect.ValueOf(data) + var valueSize int + switch value.Kind() { + case reflect.Pointer: + if value.IsNil() { + return nil + } + + if elemType := value.Type().Elem(); elemType.Kind() != reflect.Slice { + valueSize = int(elemType.Size()) + break + } + + // We're dealing with a pointer to a slice. Dereference and + // handle it like a regular slice. + value = value.Elem() + fallthrough + + case reflect.Slice: + valueSize = int(value.Type().Elem().Size()) * value.Len() + + default: + // Prevent Value.UnsafePointer from panicking. + return nil + } + + // Some nil pointer types currently crash binary.Size. Call it after our own + // code so that the panic isn't reachable. + // See https://github.com/golang/go/issues/60892 + if size := binary.Size(data); size == -1 || size != valueSize { + // The type contains padding or unsupported types. + return nil + } + + if hasUnexportedFields(reflect.TypeOf(data)) { + return nil + } + + // Reinterpret the pointer as a byte slice. This violates the unsafe.Pointer + // rules because it's very unlikely that the source data has "an equivalent + // memory layout". However, we can make it safe-ish because of the + // following reasons: + // - There is no alignment mismatch since we cast to a type with an + // alignment of 1. + // - There are no pointers in the source type so we don't upset the GC. + // - The length is verified at runtime. + return unsafe.Slice((*byte)(value.UnsafePointer()), valueSize) +} diff --git a/vendor/github.com/cilium/ebpf/internal/testutils/testmain/fd_trace.go b/vendor/github.com/cilium/ebpf/internal/testutils/testmain/fd_trace.go new file mode 100644 index 000000000..c47acf89c --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/testutils/testmain/fd_trace.go @@ -0,0 +1,103 @@ +package testmain + +import ( + "bytes" + "fmt" + "os" + "runtime" + "sync" + "sync/atomic" +) + +// foundLeak is atomic since the GC may collect objects in parallel. +var foundLeak atomic.Bool + +func onLeakFD(fs *runtime.Frames) { + foundLeak.Store(true) + fmt.Fprintln(os.Stderr, "leaked fd created at:") + fmt.Fprintln(os.Stderr, formatFrames(fs)) +} + +// fds is a registry of all file descriptors wrapped into sys.fds that were +// created while an fd tracer was active. +var fds *sync.Map // map[int]*runtime.Frames + +// TraceFD associates raw with the current execution stack. +// +// skip controls how many entries of the stack the function should skip. +func TraceFD(raw int, skip int) { + if fds == nil { + return + } + + // Attempt to store the caller's stack for the given fd value. + // Panic if fds contains an existing stack for the fd. + old, exist := fds.LoadOrStore(raw, callersFrames(skip)) + if exist { + f := old.(*runtime.Frames) + panic(fmt.Sprintf("found existing stack for fd %d:\n%s", raw, formatFrames(f))) + } +} + +// ForgetFD removes any existing association for raw. +func ForgetFD(raw int) { + if fds != nil { + fds.Delete(raw) + } +} + +// LeakFD indicates that raw was leaked. +// +// Calling the function with a value that was not passed to [TraceFD] before +// is undefined. +func LeakFD(raw int) { + if fds == nil { + return + } + + // Invoke the fd leak callback. Calls LoadAndDelete to guarantee the callback + // is invoked at most once for one sys.FD allocation, runtime.Frames can only + // be unwound once. + f, ok := fds.LoadAndDelete(raw) + if ok { + onLeakFD(f.(*runtime.Frames)) + } +} + +// flushFrames removes all elements from fds and returns them as a slice. This +// deals with the fact that a runtime.Frames can only be unwound once using +// Next(). +func flushFrames() []*runtime.Frames { + var frames []*runtime.Frames + fds.Range(func(key, value any) bool { + frames = append(frames, value.(*runtime.Frames)) + fds.Delete(key) + return true + }) + return frames +} + +func callersFrames(skip int) *runtime.Frames { + c := make([]uintptr, 32) + + // Skip runtime.Callers and this function. + i := runtime.Callers(skip+2, c) + if i == 0 { + return nil + } + + return runtime.CallersFrames(c) +} + +// formatFrames formats a runtime.Frames as a human-readable string. +func formatFrames(fs *runtime.Frames) string { + var b bytes.Buffer + for { + f, more := fs.Next() + b.WriteString(fmt.Sprintf("\t%s+%#x\n\t\t%s:%d\n", f.Function, f.PC-f.Entry, f.File, f.Line)) + if !more { + break + } + } + return b.String() +} diff --git a/vendor/github.com/cilium/ebpf/internal/testutils/testmain/main.go b/vendor/github.com/cilium/ebpf/internal/testutils/testmain/main.go new file mode 100644 index 000000000..53de97c86 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/testutils/testmain/main.go @@ -0,0 +1,58 @@ +package testmain + +import ( + "flag" + "fmt" + "os" + "sync" + + "github.com/cilium/ebpf/internal/platform" +) + +type testingM interface { + Run() int +} + +// Run m with various debug aids enabled. +// +// The function calls [os.Exit] and does not return. +func Run(m testingM) { + const traceLogFlag = "trace-log" + + var ts *traceSession + if platform.IsWindows { + traceLog := flag.Bool(traceLogFlag, false, "Output a trace of eBPF runtime activity") + flag.Parse() + + if *traceLog { + var err error + ts, err = newTraceSession() + if err != nil { + fmt.Fprintln(os.Stderr, "Disabling trace logging:", err) + } + } + } + defer ts.Close() + + fds = new(sync.Map) + ret := m.Run() + + for _, f := range flushFrames() { + onLeakFD(f) + } + + if foundLeak.Load() { + ret = 99 + } + + if err := ts.Dump(os.Stderr); err != nil { + fmt.Fprintln(os.Stderr, "Error while dumping trace log:", err) + ret = 99 + } + + if platform.IsWindows && ret != 0 && ts == nil { + fmt.Fprintf(os.Stderr, "Consider enabling trace logging with -%s\n", traceLogFlag) + } + + os.Exit(ret) +} diff --git a/vendor/github.com/cilium/ebpf/internal/testutils/testmain/windows.go b/vendor/github.com/cilium/ebpf/internal/testutils/testmain/windows.go new file mode 100644 index 000000000..533af9dbb --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/testutils/testmain/windows.go @@ -0,0 +1,219 @@ +package testmain + +import ( + "encoding/xml" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "slices" + "strconv" + "strings" + "text/tabwriter" +) + +type tracelogKeywords uint64 + +// Know tracelog keywords. +// +// See https://github.com/microsoft/ebpf-for-windows/blob/main/libs/shared/ebpf_tracelog.h +var allKeywords = []string{ + "entry-exit", + "base", + "error", + "epoch", + "core", + "link", + "map", + "program", + "api", + "printk", + "native", +} + +func (kw *tracelogKeywords) UnmarshalText(text []byte) error { + decoded, err := strconv.ParseUint(string(text), 0, 64) + if err != nil { + return fmt.Errorf("foo: %w", err) + } + *kw = tracelogKeywords(decoded) + return nil +} + +func (kw tracelogKeywords) decode() []string { + var keywords []string + for _, keyword := range allKeywords { + if kw&1 > 0 { + keywords = append(keywords, keyword) + } + kw >>= 1 + } + if kw > 0 { + keywords = append(keywords, fmt.Sprintf("0x%x", kw)) + } + return keywords +} + +type traceSession struct { + session string +} + +// newTraceSession starts a trace log for eBPF for Windows related events. +// +// * https://github.com/microsoft/ebpf-for-windows/blob/main/docs/GettingStarted.md#using-tracing +// * https://devblogs.microsoft.com/performance-diagnostics/controlling-the-event-session-name-with-the-instance-name/ and +func newTraceSession() (*traceSession, error) { + def := filepath.Join(os.Getenv("ProgramFiles"), "ebpf-for-windows\\ebpfforwindows.wprp") + if _, err := os.Stat(def); err != nil { + return nil, err + } + + session := fmt.Sprintf("epbf-go-%d", os.Getpid()) + wpr := exec.Command("wpr.exe", "-start", def, "-filemode", "-instancename", session) + wpr.Stderr = os.Stderr + if err := wpr.Run(); err != nil { + return nil, err + } + + return &traceSession{session}, nil +} + +func (ts *traceSession) Close() error { + if ts == nil { + return nil + } + + return ts.stop(os.DevNull) +} + +func (ts *traceSession) stop(file string) error { + if ts.session == "" { + return nil + } + + wpr := exec.Command("wpr.exe", "-stop", file, "-instancename", ts.session) + if err := wpr.Run(); err != nil { + return err + } + + ts.session = "" + return nil +} + +func (ts *traceSession) Dump(w io.Writer) error { + if ts == nil { + return nil + } + + path, err := os.MkdirTemp("", "ebpf-go-trace") + if err != nil { + return err + } + defer os.RemoveAll(path) + + trace := filepath.Join(path, "trace.etl") + if err := ts.stop(trace); err != nil { + return fmt.Errorf("write trace: %w", err) + } + + netsh := exec.Command("netsh.exe", "trace", "convert", trace, "dump=XML") + if err := netsh.Run(); err != nil { + return err + } + + f, err := os.Open(filepath.Join(path, "trace.xml")) + if err != nil { + return err + } + defer f.Close() + + return summariseWPRTrace(f, w) +} + +func summariseWPRTrace(r io.Reader, w io.Writer) error { + type nameValue struct { + Name string `xml:"Name,attr"` + Value string `xml:",chardata"` + } + + type event struct { + XMLName xml.Name `xml:"Event"` + System struct { + Provider struct { + Name string `xml:"Name,attr"` + } `xml:"Provider"` + TimeCreated struct { + SystemTime string `xml:"SystemTime,attr"` + } `xml:"TimeCreated"` + Keywords tracelogKeywords `xml:"Keywords"` + Level uint64 `xml:"Level"` + } `xml:"System"` + EventData struct { + Data []nameValue `xml:"Data"` + } `xml:"EventData"` + RenderingInfo struct { + Task string `xml:"Task"` + } `xml:"RenderingInfo"` + } + + var events struct { + Events []event `xml:"Event"` + } + + err := xml.NewDecoder(r).Decode(&events) + if err != nil { + return fmt.Errorf("unmarshal trace XML: %w", err) + } + + tw := tabwriter.NewWriter(w, 0, 0, 1, ' ', 0) + for _, event := range events.Events { + if !strings.Contains(event.System.Provider.Name, "Ebpf") { + continue + } + + flag := " " + // See https://learn.microsoft.com/en-us/windows/win32/api/traceloggingprovider/nf-traceloggingprovider-tracelogginglevel#remarks + if event.System.Level > 0 && event.System.Level <= 3 { + flag = "!" + } + + kw := event.System.Keywords.decode() + fmt.Fprintf(tw, "%s\t%s\t", flag, strings.Join(kw, ",")) + + data := event.EventData.Data + slices.SortFunc(data, func(a, b nameValue) int { + return strings.Compare(a.Name, b.Name) + }) + + var first string + for _, name := range []string{ + "Entry", + "Message", + "ErrorMessage", + } { + i := slices.IndexFunc(data, func(kv nameValue) bool { + return kv.Name == name + }) + + if i == -1 { + continue + } + + first = data[i].Value + data = slices.Delete(data, i, i+1) + break + } + + // NB: This may be empty. + fmt.Fprintf(tw, "%s\t", first) + + for _, data := range data { + fmt.Fprintf(tw, "%s=%s\t", data.Name, data.Value) + } + + fmt.Fprintln(tw) + } + + return tw.Flush() +} diff --git a/vendor/github.com/cilium/ebpf/internal/tracefs/kprobe.go b/vendor/github.com/cilium/ebpf/internal/tracefs/kprobe.go new file mode 100644 index 000000000..d0b5be66c --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/tracefs/kprobe.go @@ -0,0 +1,378 @@ +package tracefs + +import ( + "crypto/rand" + "errors" + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + "syscall" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/linux" + "github.com/cilium/ebpf/internal/platform" + "github.com/cilium/ebpf/internal/unix" +) + +var ( + ErrInvalidInput = errors.New("invalid input") + + ErrInvalidMaxActive = errors.New("can only set maxactive on kretprobes") +) + +//go:generate go tool stringer -type=ProbeType -linecomment + +type ProbeType uint8 + +const ( + Kprobe ProbeType = iota // kprobe + Uprobe // uprobe +) + +func (pt ProbeType) eventsFile() (*os.File, error) { + path, err := sanitizeTracefsPath(fmt.Sprintf("%s_events", pt.String())) + if err != nil { + return nil, err + } + + return os.OpenFile(path, os.O_APPEND|os.O_WRONLY, 0666) +} + +type ProbeArgs struct { + Type ProbeType + Symbol, Group, Path string + Offset, RefCtrOffset, Cookie uint64 + Pid, RetprobeMaxActive int + Ret bool +} + +// RandomGroup generates a pseudorandom string for use as a tracefs group name. +// Returns an error when the output string would exceed 63 characters (kernel +// limitation), when rand.Read() fails or when prefix contains characters not +// allowed by IsValidTraceID. +func RandomGroup(prefix string) (string, error) { + if !validIdentifier(prefix) { + return "", fmt.Errorf("prefix '%s' must be alphanumeric or underscore: %w", prefix, ErrInvalidInput) + } + + b := make([]byte, 8) + if _, err := rand.Read(b); err != nil { + return "", fmt.Errorf("reading random bytes: %w", err) + } + + group := fmt.Sprintf("%s_%x", prefix, b) + if len(group) > 63 { + return "", fmt.Errorf("group name '%s' cannot be longer than 63 characters: %w", group, ErrInvalidInput) + } + + return group, nil +} + +// validIdentifier implements the equivalent of a regex match +// against "^[a-zA-Z_][0-9a-zA-Z_-]*$". +// +// Trace event groups, names and kernel symbols must adhere to this set of +// characters. Non-empty, first character must not be a number or hyphen, all +// characters must be alphanumeric, underscore or hyphen. +func validIdentifier(s string) bool { + if len(s) < 1 { + return false + } + for i, c := range []byte(s) { + switch { + case c >= 'a' && c <= 'z': + case c >= 'A' && c <= 'Z': + case c == '_': + case i > 0 && (c == '-' || c >= '0' && c <= '9'): + + default: + return false + } + } + + return true +} + +func sanitizeTracefsPath(path ...string) (string, error) { + base, err := getTracefsPath() + if err != nil { + return "", err + } + l := filepath.Join(path...) + p := filepath.Join(base, l) + if !strings.HasPrefix(p, base) { + return "", fmt.Errorf("path '%s' attempts to escape base path '%s': %w", l, base, ErrInvalidInput) + } + return p, nil +} + +// getTracefsPath will return a correct path to the tracefs mount point. +// Since kernel 4.1 tracefs should be mounted by default at /sys/kernel/tracing, +// but may be also be available at /sys/kernel/debug/tracing if debugfs is mounted. +// The available tracefs paths will depends on distribution choices. +var getTracefsPath = sync.OnceValues(func() (string, error) { + if !platform.IsLinux { + return "", fmt.Errorf("tracefs: %w", internal.ErrNotSupportedOnOS) + } + + for _, p := range []struct { + path string + fsType int64 + }{ + {"/sys/kernel/tracing", unix.TRACEFS_MAGIC}, + {"/sys/kernel/debug/tracing", unix.TRACEFS_MAGIC}, + // RHEL/CentOS + {"/sys/kernel/debug/tracing", unix.DEBUGFS_MAGIC}, + } { + if fsType, err := linux.FSType(p.path); err == nil && fsType == p.fsType { + return p.path, nil + } + } + + return "", errors.New("neither debugfs nor tracefs are mounted") +}) + +// sanitizeIdentifier replaces every invalid character for the tracefs api with an underscore. +// +// It is equivalent to calling regexp.MustCompile("[^a-zA-Z0-9]+").ReplaceAllString("_"). +func sanitizeIdentifier(s string) string { + var skip bool + return strings.Map(func(c rune) rune { + switch { + case c >= 'a' && c <= 'z', + c >= 'A' && c <= 'Z', + c >= '0' && c <= '9': + skip = false + return c + + case skip: + return -1 + + default: + skip = true + return '_' + } + }, s) +} + +// EventID reads a trace event's ID from tracefs given its group and name. +// The kernel requires group and name to be alphanumeric or underscore. +func EventID(group, name string) (uint64, error) { + if !validIdentifier(group) { + return 0, fmt.Errorf("invalid tracefs group: %q", group) + } + + if !validIdentifier(name) { + return 0, fmt.Errorf("invalid tracefs name: %q", name) + } + + path, err := sanitizeTracefsPath("events", group, name, "id") + if err != nil { + return 0, err + } + tid, err := internal.ReadUint64FromFile("%d\n", path) + if errors.Is(err, os.ErrNotExist) { + return 0, err + } + if err != nil { + return 0, fmt.Errorf("reading trace event ID of %s/%s: %w", group, name, err) + } + + return tid, nil +} + +func probePrefix(ret bool, maxActive int) string { + if ret { + if maxActive > 0 { + return fmt.Sprintf("r%d", maxActive) + } + return "r" + } + return "p" +} + +// Event represents an entry in a tracefs probe events file. +type Event struct { + typ ProbeType + group, name string + // event id allocated by the kernel. 0 if the event has already been removed. + id uint64 + + cleanup runtime.Cleanup +} + +// NewEvent creates a new ephemeral trace event. +// +// Returns os.ErrNotExist if symbol is not a valid +// kernel symbol, or if it is not traceable with kprobes. Returns os.ErrExist +// if a probe with the same group and symbol already exists. Returns an error if +// args.RetprobeMaxActive is used on non kprobe types. Returns ErrNotSupported if +// the kernel is too old to support kretprobe maxactive. +func NewEvent(args ProbeArgs) (*Event, error) { + // Before attempting to create a trace event through tracefs, + // check if an event with the same group and name already exists. + // Kernels 4.x and earlier don't return os.ErrExist on writing a duplicate + // entry, so we need to rely on reads for detecting uniqueness. + eventName := sanitizeIdentifier(args.Symbol) + _, err := EventID(args.Group, eventName) + if err == nil { + return nil, fmt.Errorf("trace event %s/%s: %w", args.Group, eventName, os.ErrExist) + } + if errors.Is(err, unix.EINVAL) { + return nil, fmt.Errorf("trace event %s/%s: %w (unknown symbol?)", args.Group, eventName, err) + } + if !errors.Is(err, os.ErrNotExist) { + return nil, fmt.Errorf("checking trace event %s/%s: %w", args.Group, eventName, err) + } + + // Open the kprobe_events file in tracefs. + f, err := args.Type.eventsFile() + if err != nil { + return nil, err + } + defer f.Close() + + var pe, token string + switch args.Type { + case Kprobe: + // The kprobe_events syntax is as follows (see Documentation/trace/kprobetrace.txt): + // p[:[GRP/]EVENT] [MOD:]SYM[+offs]|MEMADDR [FETCHARGS] : Set a probe + // r[MAXACTIVE][:[GRP/]EVENT] [MOD:]SYM[+0] [FETCHARGS] : Set a return probe + // -:[GRP/]EVENT : Clear a probe + // + // Some examples: + // r:ebpf_1234/r_my_kretprobe nf_conntrack_destroy + // p:ebpf_5678/p_my_kprobe __x64_sys_execve + // + // Leaving the kretprobe's MAXACTIVE set to 0 (or absent) will make the + // kernel default to NR_CPUS. This is desired in most eBPF cases since + // subsampling or rate limiting logic can be more accurately implemented in + // the eBPF program itself. + // See Documentation/kprobes.txt for more details. + if args.RetprobeMaxActive != 0 && !args.Ret { + return nil, ErrInvalidMaxActive + } + token = KprobeToken(args) + pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(args.Ret, args.RetprobeMaxActive), args.Group, eventName, token) + case Uprobe: + // The uprobe_events syntax is as follows: + // p[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a probe + // r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a return probe + // -:[GRP/]EVENT : Clear a probe + // + // Some examples: + // r:ebpf_1234/readline /bin/bash:0x12345 + // p:ebpf_5678/main_mySymbol /bin/mybin:0x12345(0x123) + // + // See Documentation/trace/uprobetracer.txt for more details. + if args.RetprobeMaxActive != 0 { + return nil, ErrInvalidMaxActive + } + token = UprobeToken(args) + pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(args.Ret, 0), args.Group, eventName, token) + } + _, err = f.WriteString(pe) + + // Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL + // when trying to create a retprobe for a missing symbol. + if errors.Is(err, os.ErrNotExist) { + return nil, fmt.Errorf("token %s: not found: %w", token, err) + } + // Since commit ab105a4fb894, EILSEQ is returned when a kprobe sym+offset is resolved + // to an invalid insn boundary. The exact conditions that trigger this error are + // arch specific however. + if errors.Is(err, syscall.EILSEQ) { + return nil, fmt.Errorf("token %s: bad insn boundary: %w", token, os.ErrNotExist) + } + // ERANGE is returned when the `SYM[+offs]` token is too big and cannot + // be resolved. + if errors.Is(err, syscall.ERANGE) { + return nil, fmt.Errorf("token %s: offset too big: %w", token, os.ErrNotExist) + } + + if err != nil { + return nil, fmt.Errorf("token %s: writing '%s': %w", token, pe, err) + } + + // Get the newly-created trace event's id. + tid, err := EventID(args.Group, eventName) + if args.RetprobeMaxActive != 0 && errors.Is(err, os.ErrNotExist) { + // Kernels < 4.12 don't support maxactive and therefore auto generate + // group and event names from the symbol and offset. The symbol is used + // without any sanitization. + // See https://elixir.bootlin.com/linux/v4.10/source/kernel/trace/trace_kprobe.c#L712 + event := fmt.Sprintf("kprobes/r_%s_%d", args.Symbol, args.Offset) + if err := removeEvent(args.Type, event); err != nil { + return nil, fmt.Errorf("failed to remove spurious maxactive event: %s", err) + } + + return nil, &internal.UnsupportedFeatureError{ + MinimumVersion: internal.Version{4, 12}, + Name: "trace event with non-default maxactive", + } + } + if err != nil { + return nil, fmt.Errorf("get trace event id: %w", err) + } + + evt := &Event{typ: args.Type, group: args.Group, name: eventName, id: tid} + evt.cleanup = runtime.AddCleanup(evt, func(*byte) { + _ = removeEvent(args.Type, fmt.Sprintf("%s/%s", args.Group, eventName)) + }, nil) + + return evt, nil +} + +// Close removes the event from tracefs. +// +// Returns os.ErrClosed if the event has already been closed before. +func (evt *Event) Close() error { + if evt.id == 0 { + return os.ErrClosed + } + + evt.id = 0 + evt.cleanup.Stop() + pe := fmt.Sprintf("%s/%s", evt.group, evt.name) + return removeEvent(evt.typ, pe) +} + +func removeEvent(typ ProbeType, pe string) error { + f, err := typ.eventsFile() + if err != nil { + return err + } + defer f.Close() + + // See [k,u]probe_events syntax above. The probe type does not need to be specified + // for removals. + if _, err = f.WriteString("-:" + pe); err != nil { + return fmt.Errorf("remove event %q from %s: %w", pe, f.Name(), err) + } + + return nil +} + +// ID returns the tracefs ID associated with the event. +func (evt *Event) ID() uint64 { + return evt.id +} + +// Group returns the tracefs group used by the event. +func (evt *Event) Group() string { + return evt.group +} + +// KprobeToken creates the SYM[+offs] token for the tracefs api. +func KprobeToken(args ProbeArgs) string { + po := args.Symbol + + if args.Offset != 0 { + po += fmt.Sprintf("+%#x", args.Offset) + } + + return po +} diff --git a/vendor/github.com/cilium/ebpf/internal/tracefs/probetype_string.go b/vendor/github.com/cilium/ebpf/internal/tracefs/probetype_string.go new file mode 100644 index 000000000..ed8471a89 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/tracefs/probetype_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type=ProbeType -linecomment"; DO NOT EDIT. + +package tracefs + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[Kprobe-0] + _ = x[Uprobe-1] +} + +const _ProbeType_name = "kprobeuprobe" + +var _ProbeType_index = [...]uint8{0, 6, 12} + +func (i ProbeType) String() string { + idx := int(i) - 0 + if i < 0 || idx >= len(_ProbeType_index)-1 { + return "ProbeType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _ProbeType_name[_ProbeType_index[idx]:_ProbeType_index[idx+1]] +} diff --git a/vendor/github.com/cilium/ebpf/internal/tracefs/uprobe.go b/vendor/github.com/cilium/ebpf/internal/tracefs/uprobe.go new file mode 100644 index 000000000..994f31260 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/tracefs/uprobe.go @@ -0,0 +1,16 @@ +package tracefs + +import "fmt" + +// UprobeToken creates the PATH:OFFSET(REF_CTR_OFFSET) token for the tracefs api. +func UprobeToken(args ProbeArgs) string { + po := fmt.Sprintf("%s:%#x", args.Path, args.Offset) + + if args.RefCtrOffset != 0 { + // This is not documented in Documentation/trace/uprobetracer.txt. + // elixir.bootlin.com/linux/v5.15-rc7/source/kernel/trace/trace.c#L5564 + po += fmt.Sprintf("(%#x)", args.RefCtrOffset) + } + + return po +} diff --git a/vendor/github.com/cilium/ebpf/internal/unix/doc.go b/vendor/github.com/cilium/ebpf/internal/unix/doc.go new file mode 100644 index 000000000..d168d36f1 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/unix/doc.go @@ -0,0 +1,11 @@ +// Package unix re-exports Linux specific parts of golang.org/x/sys/unix. +// +// It avoids breaking compilation on other OS by providing stubs as follows: +// - Invoking a function always returns an error. +// - Errnos have distinct, non-zero values. +// - Constants have distinct but meaningless values. +// - Types use the same names for members, but may or may not follow the +// Linux layout. +package unix + +// Note: please don't add any custom API to this package. Use internal/sys instead. diff --git a/vendor/github.com/cilium/ebpf/internal/unix/errno_linux.go b/vendor/github.com/cilium/ebpf/internal/unix/errno_linux.go new file mode 100644 index 000000000..0c4886bd1 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/unix/errno_linux.go @@ -0,0 +1,29 @@ +package unix + +import ( + "syscall" + + linux "golang.org/x/sys/unix" +) + +type Errno = syscall.Errno + +const ( + E2BIG = linux.E2BIG + EACCES = linux.EACCES + EAGAIN = linux.EAGAIN + EBADF = linux.EBADF + EEXIST = linux.EEXIST + EFAULT = linux.EFAULT + EILSEQ = linux.EILSEQ + EINTR = linux.EINTR + EINVAL = linux.EINVAL + ENODEV = linux.ENODEV + ENOENT = linux.ENOENT + ENOSPC = linux.ENOSPC + EOPNOTSUPP = linux.EOPNOTSUPP + EPERM = linux.EPERM + EPOLLIN = linux.EPOLLIN + ESRCH = linux.ESRCH + ESTALE = linux.ESTALE +) diff --git a/vendor/github.com/cilium/ebpf/internal/unix/errno_other.go b/vendor/github.com/cilium/ebpf/internal/unix/errno_other.go new file mode 100644 index 000000000..fc2b042b5 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/unix/errno_other.go @@ -0,0 +1,29 @@ +//go:build !linux && !windows + +package unix + +import "syscall" + +type Errno = syscall.Errno + +// Errnos are distinct and non-zero. +const ( + E2BIG Errno = iota + 1 + EACCES + EAGAIN + EBADF + EEXIST + EFAULT + EILSEQ + EINTR + EINVAL + ENODEV + ENOENT + ENOSPC + ENOTSUP + ENOTSUPP + EOPNOTSUPP + EPERM + ESRCH + ESTALE +) diff --git a/vendor/github.com/cilium/ebpf/internal/unix/errno_string_windows.go b/vendor/github.com/cilium/ebpf/internal/unix/errno_string_windows.go new file mode 100644 index 000000000..6077e983f --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/unix/errno_string_windows.go @@ -0,0 +1,59 @@ +// Code generated by "stringer -type=Errno -tags=windows -output=errno_string_windows.go"; DO NOT EDIT. + +package unix + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[EPERM-1] + _ = x[ENOENT-2] + _ = x[ESRCH-3] + _ = x[EINTR-4] + _ = x[E2BIG-7] + _ = x[EBADF-9] + _ = x[EAGAIN-11] + _ = x[EACCES-13] + _ = x[EFAULT-14] + _ = x[EEXIST-17] + _ = x[ENODEV-19] + _ = x[EINVAL-22] + _ = x[ENOSPC-28] + _ = x[EILSEQ-42] + _ = x[ENOTSUP-129] + _ = x[EOPNOTSUPP-130] + _ = x[ENOTSUPP-536870912] + _ = x[ESTALE-536870913] +} + +const _Errno_name = "EPERMENOENTESRCHEINTRE2BIGEBADFEAGAINEACCESEFAULTEEXISTENODEVEINVALENOSPCEILSEQENOTSUPEOPNOTSUPPENOTSUPPESTALE" + +var _Errno_map = map[Errno]string{ + 1: _Errno_name[0:5], + 2: _Errno_name[5:11], + 3: _Errno_name[11:16], + 4: _Errno_name[16:21], + 7: _Errno_name[21:26], + 9: _Errno_name[26:31], + 11: _Errno_name[31:37], + 13: _Errno_name[37:43], + 14: _Errno_name[43:49], + 17: _Errno_name[49:55], + 19: _Errno_name[55:61], + 22: _Errno_name[61:67], + 28: _Errno_name[67:73], + 42: _Errno_name[73:79], + 129: _Errno_name[79:86], + 130: _Errno_name[86:96], + 536870912: _Errno_name[96:104], + 536870913: _Errno_name[104:110], +} + +func (i Errno) String() string { + if str, ok := _Errno_map[i]; ok { + return str + } + return "Errno(" + strconv.FormatInt(int64(i), 10) + ")" +} diff --git a/vendor/github.com/cilium/ebpf/internal/unix/errno_windows.go b/vendor/github.com/cilium/ebpf/internal/unix/errno_windows.go new file mode 100644 index 000000000..266e43daa --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/unix/errno_windows.go @@ -0,0 +1,79 @@ +package unix + +// The code in this file is derived from syscall_unix.go in the Go source code, +// licensed under the MIT license. + +import ( + "errors" + "os" + "syscall" +) + +//go:generate go tool stringer -type=Errno -tags=windows -output=errno_string_windows.go + +// Windows specific constants for Unix errnos. +// +// The values do not always match Linux, for example EILSEQ and EOPNOTSUPP. +// +// See https://learn.microsoft.com/en-us/cpp/c-runtime-library/errno-constants?view=msvc-170 +const ( + EPERM Errno = 1 + ENOENT Errno = 2 + ESRCH Errno = 3 + EINTR Errno = 4 + E2BIG Errno = 7 + EBADF Errno = 9 + EAGAIN Errno = 11 + EACCES Errno = 13 + EFAULT Errno = 14 + EEXIST Errno = 17 + ENODEV Errno = 19 + EINVAL Errno = 22 + ENFILE Errno = 23 + EMFILE Errno = 24 + ENOSPC Errno = 28 + ENOSYS Errno = 40 + ENOTEMPTY Errno = 41 + EILSEQ Errno = 42 + ENOTSUP Errno = 129 + EOPNOTSUPP Errno = 130 + EOTHER Errno = 131 + ETIMEDOUT Errno = 138 + EWOULDBLOCK Errno = 140 +) + +// These constants do not exist on Windows and therefore have a non-zero +// dummy value. +const ( + ENOTSUPP Errno = Errno(syscall.APPLICATION_ERROR) + iota + ESTALE +) + +// Errno is a Windows compatibility shim for Unix errnos. +type Errno uintptr + +func (e Errno) Error() string { + return e.String() +} + +func (e Errno) Is(target error) bool { + switch target { + case os.ErrPermission: + return e == EACCES || e == EPERM + case os.ErrExist: + return e == EEXIST || e == ENOTEMPTY + case os.ErrNotExist: + return e == ENOENT + case errors.ErrUnsupported: + return e == ENOSYS || e == ENOTSUP || e == EOPNOTSUPP + } + return false +} + +func (e Errno) Temporary() bool { + return e == EINTR || e == EMFILE || e == ENFILE || e.Timeout() +} + +func (e Errno) Timeout() bool { + return e == EAGAIN || e == EWOULDBLOCK || e == ETIMEDOUT +} diff --git a/vendor/github.com/cilium/ebpf/internal/unix/error.go b/vendor/github.com/cilium/ebpf/internal/unix/error.go new file mode 100644 index 000000000..48017c100 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/unix/error.go @@ -0,0 +1,23 @@ +package unix + +import ( + "fmt" + "runtime" + "strings" + + "github.com/cilium/ebpf/internal" +) + +// errNonLinux returns an error which wraps [internal.ErrNotSupportedOnOS] and +// includes the name of the calling function. +func errNonLinux() error { + name := "unknown" + pc, _, _, ok := runtime.Caller(1) + if ok { + name = runtime.FuncForPC(pc).Name() + if pos := strings.LastIndexByte(name, '.'); pos != -1 { + name = name[pos+1:] + } + } + return fmt.Errorf("unix: %s: %w", name, internal.ErrNotSupportedOnOS) +} diff --git a/vendor/github.com/cilium/ebpf/internal/unix/strings_other.go b/vendor/github.com/cilium/ebpf/internal/unix/strings_other.go new file mode 100644 index 000000000..76f367aa8 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/unix/strings_other.go @@ -0,0 +1,15 @@ +//go:build !linux && !windows + +package unix + +func BytePtrFromString(s string) (*byte, error) { + return nil, errNonLinux() +} + +func ByteSliceToString(s []byte) string { + return "" +} + +func ByteSliceFromString(s string) ([]byte, error) { + return nil, errNonLinux() +} diff --git a/vendor/github.com/cilium/ebpf/internal/unix/strings_windows.go b/vendor/github.com/cilium/ebpf/internal/unix/strings_windows.go new file mode 100644 index 000000000..00af5a968 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/unix/strings_windows.go @@ -0,0 +1,23 @@ +package unix + +import ( + "syscall" + + "golang.org/x/sys/windows" +) + +func BytePtrFromString(s string) (*byte, error) { + p, err := windows.BytePtrFromString(s) + if err == syscall.EINVAL { + err = EINVAL + } + return p, err +} + +func ByteSliceToString(s []byte) string { + return windows.ByteSliceToString(s) +} + +func ByteSliceFromString(s string) ([]byte, error) { + return windows.ByteSliceFromString(s) +} diff --git a/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go b/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go new file mode 100644 index 000000000..14a0a1929 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go @@ -0,0 +1,212 @@ +//go:build linux + +package unix + +import ( + "syscall" + "unsafe" + + linux "golang.org/x/sys/unix" +) + +const ( + BPF_F_NO_PREALLOC = linux.BPF_F_NO_PREALLOC + BPF_F_NUMA_NODE = linux.BPF_F_NUMA_NODE + BPF_F_RDONLY = linux.BPF_F_RDONLY + BPF_F_WRONLY = linux.BPF_F_WRONLY + BPF_F_RDONLY_PROG = linux.BPF_F_RDONLY_PROG + BPF_F_WRONLY_PROG = linux.BPF_F_WRONLY_PROG + BPF_F_SLEEPABLE = linux.BPF_F_SLEEPABLE + BPF_F_XDP_HAS_FRAGS = linux.BPF_F_XDP_HAS_FRAGS + BPF_F_MMAPABLE = linux.BPF_F_MMAPABLE + BPF_F_INNER_MAP = linux.BPF_F_INNER_MAP + BPF_F_KPROBE_MULTI_RETURN = linux.BPF_F_KPROBE_MULTI_RETURN + BPF_F_UPROBE_MULTI_RETURN = linux.BPF_F_UPROBE_MULTI_RETURN + BPF_F_LOCK = linux.BPF_F_LOCK + BPF_OBJ_NAME_LEN = linux.BPF_OBJ_NAME_LEN + BPF_TAG_SIZE = linux.BPF_TAG_SIZE + BPF_RINGBUF_BUSY_BIT = linux.BPF_RINGBUF_BUSY_BIT + BPF_RINGBUF_DISCARD_BIT = linux.BPF_RINGBUF_DISCARD_BIT + BPF_RINGBUF_HDR_SZ = linux.BPF_RINGBUF_HDR_SZ + SYS_BPF = linux.SYS_BPF + F_DUPFD_CLOEXEC = linux.F_DUPFD_CLOEXEC + EPOLL_CTL_ADD = linux.EPOLL_CTL_ADD + EPOLL_CLOEXEC = linux.EPOLL_CLOEXEC + O_CLOEXEC = linux.O_CLOEXEC + O_NONBLOCK = linux.O_NONBLOCK + PROT_NONE = linux.PROT_NONE + PROT_READ = linux.PROT_READ + PROT_WRITE = linux.PROT_WRITE + MAP_ANON = linux.MAP_ANON + MAP_SHARED = linux.MAP_SHARED + MAP_FIXED = linux.MAP_FIXED + MAP_PRIVATE = linux.MAP_PRIVATE + PERF_ATTR_SIZE_VER1 = linux.PERF_ATTR_SIZE_VER1 + PERF_TYPE_SOFTWARE = linux.PERF_TYPE_SOFTWARE + PERF_TYPE_TRACEPOINT = linux.PERF_TYPE_TRACEPOINT + PERF_COUNT_SW_BPF_OUTPUT = linux.PERF_COUNT_SW_BPF_OUTPUT + PERF_EVENT_IOC_DISABLE = linux.PERF_EVENT_IOC_DISABLE + PERF_EVENT_IOC_ENABLE = linux.PERF_EVENT_IOC_ENABLE + PERF_EVENT_IOC_SET_BPF = linux.PERF_EVENT_IOC_SET_BPF + PerfBitWatermark = linux.PerfBitWatermark + PerfBitWriteBackward = linux.PerfBitWriteBackward + PERF_SAMPLE_RAW = linux.PERF_SAMPLE_RAW + PERF_FLAG_FD_CLOEXEC = linux.PERF_FLAG_FD_CLOEXEC + RLIM_INFINITY = linux.RLIM_INFINITY + RLIMIT_MEMLOCK = linux.RLIMIT_MEMLOCK + BPF_STATS_RUN_TIME = linux.BPF_STATS_RUN_TIME + PERF_RECORD_LOST = linux.PERF_RECORD_LOST + PERF_RECORD_SAMPLE = linux.PERF_RECORD_SAMPLE + AT_FDCWD = linux.AT_FDCWD + RENAME_NOREPLACE = linux.RENAME_NOREPLACE + SO_ATTACH_BPF = linux.SO_ATTACH_BPF + SO_DETACH_BPF = linux.SO_DETACH_BPF + SOL_SOCKET = linux.SOL_SOCKET + SIGPROF = linux.SIGPROF + SIGUSR1 = linux.SIGUSR1 + SIG_BLOCK = linux.SIG_BLOCK + SIG_UNBLOCK = linux.SIG_UNBLOCK + BPF_FS_MAGIC = linux.BPF_FS_MAGIC + TRACEFS_MAGIC = linux.TRACEFS_MAGIC + DEBUGFS_MAGIC = linux.DEBUGFS_MAGIC + BPF_RB_NO_WAKEUP = linux.BPF_RB_NO_WAKEUP + BPF_RB_FORCE_WAKEUP = linux.BPF_RB_FORCE_WAKEUP + AF_UNSPEC = linux.AF_UNSPEC + IFF_UP = linux.IFF_UP +) + +type Statfs_t = linux.Statfs_t +type Stat_t = linux.Stat_t +type Rlimit = linux.Rlimit +type Signal = linux.Signal +type Sigset_t = linux.Sigset_t +type PerfEventMmapPage = linux.PerfEventMmapPage +type EpollEvent = linux.EpollEvent +type PerfEventAttr = linux.PerfEventAttr +type Utsname = linux.Utsname +type CPUSet = linux.CPUSet + +func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { + return linux.Syscall(trap, a1, a2, a3) +} + +func PthreadSigmask(how int, set, oldset *Sigset_t) error { + return linux.PthreadSigmask(how, set, oldset) +} + +func FcntlInt(fd uintptr, cmd, arg int) (int, error) { + return linux.FcntlInt(fd, cmd, arg) +} + +func IoctlSetInt(fd int, req uint, value int) error { + return linux.IoctlSetInt(fd, req, value) +} + +func Statfs(path string, buf *Statfs_t) (err error) { + return linux.Statfs(path, buf) +} + +func Close(fd int) (err error) { + return linux.Close(fd) +} + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + return linux.EpollWait(epfd, events, msec) +} + +func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + return linux.EpollCtl(epfd, op, fd, event) +} + +func Eventfd(initval uint, flags int) (fd int, err error) { + return linux.Eventfd(initval, flags) +} + +func Write(fd int, p []byte) (n int, err error) { + return linux.Write(fd, p) +} + +func EpollCreate1(flag int) (fd int, err error) { + return linux.EpollCreate1(flag) +} + +func SetNonblock(fd int, nonblocking bool) (err error) { + return linux.SetNonblock(fd, nonblocking) +} + +func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + return linux.Mmap(fd, offset, length, prot, flags) +} + +//go:nocheckptr +func MmapPtr(fd int, offset int64, addr unsafe.Pointer, length uintptr, prot int, flags int) (ret unsafe.Pointer, err error) { + return linux.MmapPtr(fd, offset, addr, length, prot, flags) +} + +func Munmap(b []byte) (err error) { + return linux.Munmap(b) +} + +func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { + return linux.PerfEventOpen(attr, pid, cpu, groupFd, flags) +} + +func Uname(buf *Utsname) (err error) { + return linux.Uname(buf) +} + +func Getpid() int { + return linux.Getpid() +} + +func Gettid() int { + return linux.Gettid() +} + +func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { + return linux.Tgkill(tgid, tid, sig) +} + +func BytePtrFromString(s string) (*byte, error) { + return linux.BytePtrFromString(s) +} + +func ByteSliceToString(s []byte) string { + return linux.ByteSliceToString(s) +} + +func ByteSliceFromString(s string) ([]byte, error) { + return linux.ByteSliceFromString(s) +} + +func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) error { + return linux.Renameat2(olddirfd, oldpath, newdirfd, newpath, flags) +} + +func Prlimit(pid, resource int, new, old *Rlimit) error { + return linux.Prlimit(pid, resource, new, old) +} + +func Open(path string, mode int, perm uint32) (int, error) { + return linux.Open(path, mode, perm) +} + +func Fstat(fd int, stat *Stat_t) error { + return linux.Fstat(fd, stat) +} + +func SetsockoptInt(fd, level, opt, value int) error { + return linux.SetsockoptInt(fd, level, opt, value) +} + +func SchedSetaffinity(pid int, set *CPUSet) error { + return linux.SchedSetaffinity(pid, set) +} + +func SchedGetaffinity(pid int, set *CPUSet) error { + return linux.SchedGetaffinity(pid, set) +} + +func Auxv() ([][2]uintptr, error) { + return linux.Auxv() +} diff --git a/vendor/github.com/cilium/ebpf/internal/unix/types_other.go b/vendor/github.com/cilium/ebpf/internal/unix/types_other.go new file mode 100644 index 000000000..f3f764ebe --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/unix/types_other.go @@ -0,0 +1,290 @@ +//go:build !linux + +package unix + +import ( + "syscall" + "unsafe" +) + +// Constants are distinct to avoid breaking switch statements. +const ( + BPF_F_NO_PREALLOC = iota + BPF_F_NUMA_NODE + BPF_F_RDONLY + BPF_F_WRONLY + BPF_F_RDONLY_PROG + BPF_F_WRONLY_PROG + BPF_F_SLEEPABLE + BPF_F_MMAPABLE + BPF_F_INNER_MAP + BPF_F_KPROBE_MULTI_RETURN + BPF_F_UPROBE_MULTI_RETURN + BPF_F_XDP_HAS_FRAGS + BPF_OBJ_NAME_LEN + BPF_TAG_SIZE + BPF_RINGBUF_BUSY_BIT + BPF_RINGBUF_DISCARD_BIT + BPF_RINGBUF_HDR_SZ + SYS_BPF + F_DUPFD_CLOEXEC + EPOLLIN + EPOLL_CTL_ADD + EPOLL_CLOEXEC + O_CLOEXEC + O_NONBLOCK + PROT_NONE + PROT_READ + PROT_WRITE + MAP_ANON + MAP_SHARED + MAP_FIXED + MAP_PRIVATE + PERF_ATTR_SIZE_VER1 + PERF_TYPE_SOFTWARE + PERF_TYPE_TRACEPOINT + PERF_COUNT_SW_BPF_OUTPUT + PERF_EVENT_IOC_DISABLE + PERF_EVENT_IOC_ENABLE + PERF_EVENT_IOC_SET_BPF + PerfBitWatermark + PerfBitWriteBackward + PERF_SAMPLE_RAW + PERF_FLAG_FD_CLOEXEC + RLIM_INFINITY + RLIMIT_MEMLOCK + BPF_STATS_RUN_TIME + PERF_RECORD_LOST + PERF_RECORD_SAMPLE + AT_FDCWD + RENAME_NOREPLACE + SO_ATTACH_BPF + SO_DETACH_BPF + SOL_SOCKET + SIGPROF + SIGUSR1 + SIG_BLOCK + SIG_UNBLOCK + BPF_FS_MAGIC + TRACEFS_MAGIC + DEBUGFS_MAGIC + BPF_RB_NO_WAKEUP + BPF_RB_FORCE_WAKEUP + BPF_F_LOCK + AF_UNSPEC + IFF_UP +) + +type Statfs_t struct { + Type int64 + Bsize int64 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid [2]int32 + Namelen int64 + Frsize int64 + Flags int64 + Spare [4]int64 +} + +type Stat_t struct { + Dev uint64 + Ino uint64 + Nlink uint64 + Mode uint32 + Uid uint32 + Gid uint32 + _ int32 + Rdev uint64 + Size int64 + Blksize int64 + Blocks int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type Signal int + +type Sigset_t struct { + Val [4]uint64 +} + +func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) { + return 0, 0, ENOTSUP +} + +func PthreadSigmask(how int, set, oldset *Sigset_t) error { + return errNonLinux() +} + +func FcntlInt(fd uintptr, cmd, arg int) (int, error) { + return -1, errNonLinux() +} + +func IoctlSetInt(fd int, req uint, value int) error { + return errNonLinux() +} + +func Statfs(path string, buf *Statfs_t) error { + return errNonLinux() +} + +func Close(fd int) (err error) { + return errNonLinux() +} + +type EpollEvent struct { + Events uint32 + Fd int32 + Pad int32 +} + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + return 0, errNonLinux() +} + +func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + return errNonLinux() +} + +func Eventfd(initval uint, flags int) (fd int, err error) { + return 0, errNonLinux() +} + +func Write(fd int, p []byte) (n int, err error) { + return 0, errNonLinux() +} + +func EpollCreate1(flag int) (fd int, err error) { + return 0, errNonLinux() +} + +type PerfEventMmapPage struct { + Version uint32 + Compat_version uint32 + Lock uint32 + Index uint32 + Offset int64 + Time_enabled uint64 + Time_running uint64 + Capabilities uint64 + Pmc_width uint16 + Time_shift uint16 + Time_mult uint32 + Time_offset uint64 + Time_zero uint64 + Size uint32 + + Data_head uint64 + Data_tail uint64 + Data_offset uint64 + Data_size uint64 + Aux_head uint64 + Aux_tail uint64 + Aux_offset uint64 + Aux_size uint64 +} + +func SetNonblock(fd int, nonblocking bool) (err error) { + return errNonLinux() +} + +func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + return []byte{}, errNonLinux() +} + +func MmapPtr(fd int, offset int64, addr unsafe.Pointer, length uintptr, prot int, flags int) (ret unsafe.Pointer, err error) { + return nil, errNonLinux() +} + +func Munmap(b []byte) (err error) { + return errNonLinux() +} + +type PerfEventAttr struct { + Type uint32 + Size uint32 + Config uint64 + Sample uint64 + Sample_type uint64 + Read_format uint64 + Bits uint64 + Wakeup uint32 + Bp_type uint32 + Ext1 uint64 + Ext2 uint64 + Branch_sample_type uint64 + Sample_regs_user uint64 + Sample_stack_user uint32 + Clockid int32 + Sample_regs_intr uint64 + Aux_watermark uint32 + Sample_max_stack uint16 +} + +func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { + return 0, errNonLinux() +} + +type Utsname struct { + Release [65]byte + Version [65]byte +} + +func Uname(buf *Utsname) (err error) { + return errNonLinux() +} + +func Getpid() int { + return -1 +} + +func Gettid() int { + return -1 +} + +func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { + return errNonLinux() +} + +func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) error { + return errNonLinux() +} + +func Prlimit(pid, resource int, new, old *Rlimit) error { + return errNonLinux() +} + +func Open(path string, mode int, perm uint32) (int, error) { + return -1, errNonLinux() +} + +func Fstat(fd int, stat *Stat_t) error { + return errNonLinux() +} + +func SetsockoptInt(fd, level, opt, value int) error { + return errNonLinux() +} + +type CPUSet struct{} + +func (*CPUSet) Set(int) {} + +func SchedSetaffinity(pid int, set *CPUSet) error { + return errNonLinux() +} + +func SchedGetaffinity(pid int, set *CPUSet) error { + return errNonLinux() +} + +func Auxv() ([][2]uintptr, error) { + return nil, errNonLinux() +} diff --git a/vendor/github.com/cilium/ebpf/internal/version.go b/vendor/github.com/cilium/ebpf/internal/version.go new file mode 100644 index 000000000..3123dc9f0 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/version.go @@ -0,0 +1,74 @@ +package internal + +import ( + "fmt" +) + +const ( + // Version constant used in ELF binaries indicating that the loader needs to + // substitute the eBPF program's version with the value of the kernel's + // KERNEL_VERSION compile-time macro. Used for compatibility with BCC, gobpf + // and RedSift. + MagicKernelVersion = 0xFFFFFFFE +) + +// A Version in the form Major.Minor.Patch. +type Version [3]uint16 + +// NewVersion creates a version from a string like "Major.Minor.Patch". +// +// Patch is optional. +func NewVersion(ver string) (Version, error) { + var major, minor, patch uint16 + n, _ := fmt.Sscanf(ver, "%d.%d.%d", &major, &minor, &patch) + if n < 2 { + return Version{}, fmt.Errorf("invalid version: %s", ver) + } + return Version{major, minor, patch}, nil +} + +// NewVersionFromCode creates a version from a LINUX_VERSION_CODE. +func NewVersionFromCode(code uint32) Version { + return Version{ + uint16(uint8(code >> 16)), + uint16(uint8(code >> 8)), + uint16(uint8(code)), + } +} + +func (v Version) String() string { + if v[2] == 0 { + return fmt.Sprintf("v%d.%d", v[0], v[1]) + } + return fmt.Sprintf("v%d.%d.%d", v[0], v[1], v[2]) +} + +// Less returns true if the version is less than another version. +func (v Version) Less(other Version) bool { + for i, a := range v { + if a == other[i] { + continue + } + return a < other[i] + } + return false +} + +// Unspecified returns true if the version is all zero. +func (v Version) Unspecified() bool { + return v[0] == 0 && v[1] == 0 && v[2] == 0 +} + +// Kernel implements the kernel's KERNEL_VERSION macro from linux/version.h. +// It represents the kernel version and patch level as a single value. +func (v Version) Kernel() uint32 { + + // Kernels 4.4 and 4.9 have their SUBLEVEL clamped to 255 to avoid + // overflowing into PATCHLEVEL. + // See kernel commit 9b82f13e7ef3 ("kbuild: clamp SUBLEVEL to 255"). + s := min(v[2], 255) + + // Truncate members to uint8 to prevent them from spilling over into + // each other when overflowing 8 bits. + return uint32(uint8(v[0]))<<16 | uint32(uint8(v[1]))<<8 | uint32(uint8(s)) +} diff --git a/vendor/github.com/cilium/ebpf/linker.go b/vendor/github.com/cilium/ebpf/linker.go new file mode 100644 index 000000000..98c4a0d0b --- /dev/null +++ b/vendor/github.com/cilium/ebpf/linker.go @@ -0,0 +1,557 @@ +package ebpf + +import ( + "debug/elf" + "encoding/binary" + "errors" + "fmt" + "io" + "io/fs" + "math" + "slices" + "strings" + + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/kallsyms" + "github.com/cilium/ebpf/internal/platform" +) + +// handles stores handle objects to avoid gc cleanup +type handles []*btf.Handle + +func (hs *handles) add(h *btf.Handle) (int, error) { + if h == nil { + return 0, nil + } + + if len(*hs) == math.MaxInt16 { + return 0, fmt.Errorf("can't add more than %d module FDs to fdArray", math.MaxInt16) + } + + *hs = append(*hs, h) + + // return length of slice so that indexes start at 1 + return len(*hs), nil +} + +func (hs handles) fdArray() []int32 { + // first element of fda is reserved as no module can be indexed with 0 + fda := []int32{0} + for _, h := range hs { + fda = append(fda, int32(h.FD())) + } + + return fda +} + +func (hs *handles) Close() error { + var errs []error + for _, h := range *hs { + errs = append(errs, h.Close()) + } + return errors.Join(errs...) +} + +// splitSymbols splits insns into subsections delimited by Symbol Instructions. +// insns cannot be empty and must start with a Symbol Instruction. +// +// The resulting map is indexed by Symbol name. +func splitSymbols(insns asm.Instructions) (map[string]asm.Instructions, error) { + if len(insns) == 0 { + return nil, errors.New("insns is empty") + } + + currentSym := insns[0].Symbol() + if currentSym == "" { + return nil, errors.New("insns must start with a Symbol") + } + + start := 0 + progs := make(map[string]asm.Instructions) + for i, ins := range insns[1:] { + i := i + 1 + + sym := ins.Symbol() + if sym == "" { + continue + } + + // New symbol, flush the old one out. + progs[currentSym] = slices.Clone(insns[start:i]) + + if progs[sym] != nil { + return nil, fmt.Errorf("insns contains duplicate Symbol %s", sym) + } + currentSym = sym + start = i + } + + if tail := insns[start:]; len(tail) > 0 { + progs[currentSym] = slices.Clone(tail) + } + + return progs, nil +} + +// The linker is responsible for resolving bpf-to-bpf calls between programs +// within an ELF. Each BPF program must be a self-contained binary blob, +// so when an instruction in one ELF program section wants to jump to +// a function in another, the linker needs to pull in the bytecode +// (and BTF info) of the target function and concatenate the instruction +// streams. +// +// Later on in the pipeline, all call sites are fixed up with relative jumps +// within this newly-created instruction stream to then finally hand off to +// the kernel with BPF_PROG_LOAD. +// +// Each function is denoted by an ELF symbol and the compiler takes care of +// register setup before each jump instruction. + +// hasFunctionReferences returns true if insns contains one or more bpf2bpf +// function references. +func hasFunctionReferences(insns asm.Instructions) bool { + for _, i := range insns { + if i.IsFunctionReference() { + return true + } + } + return false +} + +// applyRelocations collects and applies any CO-RE relocations in insns. +// +// insns are modified in place. +func applyRelocations(insns asm.Instructions, bo binary.ByteOrder, b *btf.Builder, c *btf.Cache, kernelOverride *btf.Spec, extraTargets []*btf.Spec) error { + var relos []*btf.CORERelocation + var reloInsns []*asm.Instruction + iter := insns.Iterate() + for iter.Next() { + if relo := btf.CORERelocationMetadata(iter.Ins); relo != nil { + relos = append(relos, relo) + reloInsns = append(reloInsns, iter.Ins) + } + } + + if len(relos) == 0 { + return nil + } + + if bo == nil { + bo = internal.NativeEndian + } + + var targets []*btf.Spec + if kernelOverride == nil { + kernel, err := c.Kernel() + if err != nil { + return fmt.Errorf("load kernel spec: %w", err) + } + + modules, err := c.Modules() + // Ignore ErrNotExists to cater to kernels which have CONFIG_DEBUG_INFO_BTF_MODULES + // or CONFIG_DEBUG_INFO_BTF disabled. + if err != nil && !errors.Is(err, fs.ErrNotExist) { + return err + } + + targets = make([]*btf.Spec, 0, 1+len(modules)+len(extraTargets)) + targets = append(targets, kernel) + + for _, kmod := range modules { + spec, err := c.Module(kmod) + if err != nil { + return fmt.Errorf("load BTF for kmod %s: %w", kmod, err) + } + + targets = append(targets, spec) + } + } else { + // We expect kernelOverride to contain the merged types + // of vmlinux and kernel modules, as distributed by btfhub. + targets = []*btf.Spec{kernelOverride} + } + + targets = append(targets, extraTargets...) + + fixups, err := btf.CORERelocate(relos, targets, bo, b.Add) + if err != nil { + return err + } + + for i, fixup := range fixups { + if err := fixup.Apply(reloInsns[i]); err != nil { + return fmt.Errorf("fixup for %s: %w", relos[i], err) + } + } + + return nil +} + +// flattenPrograms resolves bpf-to-bpf calls for a set of programs. +// +// Links all programs in names by modifying their ProgramSpec in progs. +func flattenPrograms(progs map[string]*ProgramSpec, names []string) { + // Pre-calculate all function references. + refs := make(map[*ProgramSpec][]string) + for _, prog := range progs { + refs[prog] = prog.Instructions.FunctionReferences() + } + + // Create a flattened instruction stream, but don't modify progs yet to + // avoid linking multiple times. + flattened := make([]asm.Instructions, 0, len(names)) + for _, name := range names { + flattened = append(flattened, flattenInstructions(name, progs, refs)) + } + + // Finally, assign the flattened instructions. + for i, name := range names { + progs[name].Instructions = flattened[i] + } +} + +// flattenInstructions resolves bpf-to-bpf calls for a single program. +// +// Flattens the instructions of prog by concatenating the instructions of all +// direct and indirect dependencies. +// +// progs contains all referenceable programs, while refs contain the direct +// dependencies of each program. +func flattenInstructions(name string, progs map[string]*ProgramSpec, refs map[*ProgramSpec][]string) asm.Instructions { + prog := progs[name] + progRefs := refs[prog] + + if len(progRefs) == 0 { + // No references, nothing to do. + return prog.Instructions + } + + insns := make(asm.Instructions, len(prog.Instructions)) + copy(insns, prog.Instructions) + + // Add all direct references of prog to the list of to be linked programs. + pending := make([]string, len(progRefs)) + copy(pending, progRefs) + + // All references for which we've appended instructions. + linked := make(map[string]bool) + + // Iterate all pending references. We can't use a range since pending is + // modified in the body below. + for len(pending) > 0 { + var ref string + ref, pending = pending[0], pending[1:] + + if linked[ref] { + // We've already linked this ref, don't append instructions again. + continue + } + + progRef := progs[ref] + if progRef == nil { + // We don't have instructions that go with this reference. This + // happens when calling extern functions. + continue + } + + insns = append(insns, progRef.Instructions...) + linked[ref] = true + + // Make sure we link indirect references. + pending = append(pending, refs[progRef]...) + } + + return insns +} + +// fixupAndValidate is called by the ELF reader right before marshaling the +// instruction stream. It performs last-minute adjustments to the program and +// runs some sanity checks before sending it off to the kernel. +func fixupAndValidate(insns asm.Instructions) error { + iter := insns.Iterate() + for iter.Next() { + ins := iter.Ins + + // Map load was tagged with a Reference, but does not contain a Map pointer. + needsMap := ins.Reference() != "" || ins.Metadata.Get(kconfigMetaKey{}) != nil + if ins.IsLoadFromMap() && needsMap && ins.Map() == nil { + return fmt.Errorf("instruction %d: %w", iter.Index, asm.ErrUnsatisfiedMapReference) + } + + fixupProbeReadKernel(ins) + } + + return nil +} + +// A constant used to poison calls to non-existent kfuncs. +// +// Similar POISON_CALL_KFUNC_BASE in libbpf, except that we use a value lower +// than 2^28 to fit into a tagged constant. +const kfuncCallPoisonBase = 0xdedc0de + +// fixupKfuncs loops over all instructions in search for kfunc calls. +// If at least one is found, the current kernels BTF and module BTFis are searched to set Instruction.Constant +// and Instruction.Offset to the correct values. +func fixupKfuncs(insns asm.Instructions, cache *btf.Cache) (_ handles, err error) { + closeOnError := func(c io.Closer) { + if err != nil { + c.Close() + } + } + + iter := insns.Iterate() + for iter.Next() { + ins := iter.Ins + if metadata := ins.Metadata.Get(kfuncMetaKey{}); metadata != nil { + goto fixups + } + } + + return nil, nil + +fixups: + // Only load kernel BTF if we found at least one kfunc call. kernelSpec can be + // nil if the kernel does not have BTF, in which case we poison all kfunc + // calls. + _, err = cache.Kernel() + // ErrNotSupportedOnOS wraps ErrNotSupported, check for it first. + if errors.Is(err, internal.ErrNotSupportedOnOS) { + return nil, fmt.Errorf("kfuncs are not supported on this platform: %w", err) + } + if err != nil && !errors.Is(err, ErrNotSupported) { + return nil, err + } + + fdArray := make(handles, 0) + defer closeOnError(&fdArray) + + for { + ins := iter.Ins + + metadata := ins.Metadata.Get(kfuncMetaKey{}) + if metadata == nil { + if !iter.Next() { + // break loop if this was the last instruction in the stream. + break + } + continue + } + + // check meta, if no meta return err + kfm, _ := metadata.(*kfuncMeta) + if kfm == nil { + return nil, fmt.Errorf("kfuncMetaKey doesn't contain kfuncMeta") + } + + // findTargetInKernel returns btf.ErrNotFound if the input btf.Spec is nil. + target := btf.Type((*btf.Func)(nil)) + spec, module, err := findTargetInKernel(kfm.Func.Name, &target, cache) + if errors.Is(err, btf.ErrNotFound) { + if kfm.Binding == elf.STB_WEAK { + if ins.IsKfuncCall() { + // If the kfunc call is weak and not found, poison the call. Use a + // recognizable constant to make it easier to debug. + fn, err := asm.BuiltinFuncForPlatform(platform.Native, kfuncCallPoisonBase) + if err != nil { + return nil, err + } + *ins = fn.Call() + } else if ins.OpCode.IsDWordLoad() { + // If the kfunc DWordLoad is weak and not found, set its address to 0. + ins.Constant = 0 + ins.Src = 0 + } else { + return nil, fmt.Errorf("only kfunc calls and dword loads may have kfunc metadata") + } + + iter.Next() + continue + } + + // Error on non-weak kfunc not found. + return nil, fmt.Errorf("kfunc %q: %w", kfm.Func.Name, ErrNotSupported) + } + if err != nil { + return nil, fmt.Errorf("finding kfunc in kernel: %w", err) + } + + idx, err := fdArray.add(module) + if err != nil { + return nil, err + } + + if err := btf.CheckTypeCompatibility(kfm.Func.Type, target.(*btf.Func).Type); err != nil { + return nil, &incompatibleKfuncError{kfm.Func.Name, err} + } + + id, err := spec.TypeID(target) + if err != nil { + return nil, err + } + + ins.Constant = int64(id) + ins.Offset = int16(idx) + + if !iter.Next() { + break + } + } + + return fdArray, nil +} + +type incompatibleKfuncError struct { + name string + err error +} + +func (ike *incompatibleKfuncError) Error() string { + return fmt.Sprintf("kfunc %q: %s", ike.name, ike.err) +} + +// fixupProbeReadKernel replaces calls to bpf_probe_read_{kernel,user}(_str) +// with bpf_probe_read(_str) on kernels that don't support it yet. +func fixupProbeReadKernel(ins *asm.Instruction) { + if !ins.IsBuiltinCall() { + return + } + + // Kernel supports bpf_probe_read_kernel, nothing to do. + if haveProbeReadKernel() == nil { + return + } + + switch asm.BuiltinFunc(ins.Constant) { + case asm.FnProbeReadKernel, asm.FnProbeReadUser: + ins.Constant = int64(asm.FnProbeRead) + case asm.FnProbeReadKernelStr, asm.FnProbeReadUserStr: + ins.Constant = int64(asm.FnProbeReadStr) + } +} + +// resolveKconfigReferences creates and populates a .kconfig map if necessary. +// +// Returns a nil Map and no error if no references exist. +func resolveKconfigReferences(insns asm.Instructions) (_ *Map, err error) { + closeOnError := func(c io.Closer) { + if err != nil { + c.Close() + } + } + + var spec *MapSpec + iter := insns.Iterate() + for iter.Next() { + meta, _ := iter.Ins.Metadata.Get(kconfigMetaKey{}).(*kconfigMeta) + if meta != nil { + spec = meta.Map + break + } + } + + if spec == nil { + return nil, nil + } + + cpy := spec.Copy() + if err := resolveKconfig(cpy); err != nil { + return nil, err + } + + kconfig, err := NewMap(cpy) + if err != nil { + return nil, err + } + defer closeOnError(kconfig) + + // Resolve all instructions which load from .kconfig map with actual map + // and offset inside it. + iter = insns.Iterate() + for iter.Next() { + meta, _ := iter.Ins.Metadata.Get(kconfigMetaKey{}).(*kconfigMeta) + if meta == nil { + continue + } + + if meta.Map != spec { + return nil, fmt.Errorf("instruction %d: reference to multiple .kconfig maps is not allowed", iter.Index) + } + + if err := iter.Ins.AssociateMap(kconfig); err != nil { + return nil, fmt.Errorf("instruction %d: %w", iter.Index, err) + } + + // Encode a map read at the offset of the var in the datasec. + iter.Ins.Constant = int64(uint64(meta.Offset) << 32) + iter.Ins.Metadata.Set(kconfigMetaKey{}, nil) + } + + return kconfig, nil +} + +func resolveKsymReferences(insns asm.Instructions) error { + type fixup struct { + *asm.Instruction + *ksymMeta + } + + var symbols map[string]uint64 + var fixups []fixup + + iter := insns.Iterate() + for iter.Next() { + ins := iter.Ins + meta, _ := ins.Metadata.Get(ksymMetaKey{}).(*ksymMeta) + if meta == nil { + continue + } + + if symbols == nil { + symbols = make(map[string]uint64) + } + + symbols[meta.Name] = 0 + fixups = append(fixups, fixup{ + iter.Ins, meta, + }) + } + + if len(symbols) == 0 { + return nil + } + + err := kallsyms.AssignAddresses(symbols) + // Tolerate ErrRestrictedKernel during initial lookup, user may have all weak + // ksyms and a fallback path. + if err != nil && !errors.Is(err, ErrRestrictedKernel) { + return fmt.Errorf("resolve ksyms: %w", err) + } + + var missing []string + for _, fixup := range fixups { + addr := symbols[fixup.Name] + // A weak ksym variable in eBPF C means its resolution is optional. + if addr == 0 && fixup.Binding != elf.STB_WEAK { + if !slices.Contains(missing, fixup.Name) { + missing = append(missing, fixup.Name) + } + continue + } + + fixup.Constant = int64(addr) + } + + if len(missing) > 0 { + if err != nil { + // Program contains required ksyms, return the error from above. + return fmt.Errorf("resolve required ksyms: %s: %w", strings.Join(missing, ","), err) + } + + return fmt.Errorf("kernel is missing symbol: %s", strings.Join(missing, ",")) + } + + return nil +} diff --git a/vendor/github.com/cilium/ebpf/map.go b/vendor/github.com/cilium/ebpf/map.go new file mode 100644 index 000000000..f9499272b --- /dev/null +++ b/vendor/github.com/cilium/ebpf/map.go @@ -0,0 +1,1853 @@ +package ebpf + +import ( + "bytes" + "errors" + "fmt" + "io" + "math/rand" + "os" + "path/filepath" + "reflect" + "slices" + "strings" + "sync" + "time" + "unsafe" + + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/platform" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/sysenc" + "github.com/cilium/ebpf/internal/unix" +) + +// Errors returned by Map and MapIterator methods. +var ( + ErrKeyNotExist = errors.New("key does not exist") + ErrKeyExist = errors.New("key already exists") + ErrIterationAborted = errors.New("iteration aborted") + ErrMapIncompatible = errors.New("map spec is incompatible with existing map") + errMapNoBTFValue = errors.New("map spec does not contain a BTF Value") + + // pre-allocating these errors here since they may get called in hot code paths + // and cause unnecessary memory allocations + errMapLookupKeyNotExist = fmt.Errorf("lookup: %w", sysErrKeyNotExist) +) + +// MapOptions control loading a map into the kernel. +type MapOptions struct { + // The base path to pin maps in if requested via PinByName. + // Existing maps will be re-used if they are compatible, otherwise an + // error is returned. + PinPath string + LoadPinOptions LoadPinOptions +} + +// MapID represents the unique ID of an eBPF map +type MapID = sys.MapID + +// MapSpec defines a Map. +type MapSpec struct { + // Name is passed to the kernel as a debug aid. + // + // Unsupported characters will be stripped. + Name string + Type MapType + KeySize uint32 + ValueSize uint32 + MaxEntries uint32 + + // Flags is passed to the kernel and specifies additional map + // creation attributes. + Flags uint32 + + // Automatically pin and load a map from MapOptions.PinPath. + // Generates an error if an existing pinned map is incompatible with the MapSpec. + Pinning PinType + + // Specify numa node during map creation + // (effective only if sys.BPF_F_NUMA_NODE flag is set, + // which can be imported from golang.org/x/sys/unix) + NumaNode uint32 + + // The initial contents of the map. May be nil. + Contents []MapKV + + // InnerMap is used as a template for ArrayOfMaps and HashOfMaps + InnerMap *MapSpec + + // MapExtra is an opaque field whose meaning is map-specific. + // + // Available from 5.16. + MapExtra uint64 + + // Extra trailing bytes found in the ELF map definition when using structs + // larger than libbpf's bpf_map_def. nil if no trailing bytes were present. + // Must be nil or empty before instantiating the MapSpec into a Map. + Extra *bytes.Reader + + // The key and value type of this map. May be nil. + Key, Value btf.Type + + // Tags is a list of btf_decl_tag attributes set on the map definition. + // + // Decorate a map definition with `__attribute__((btf_decl_tag("foo")))`. + Tags []string +} + +func (ms *MapSpec) String() string { + return fmt.Sprintf("%s(keySize=%d, valueSize=%d, maxEntries=%d, flags=%d)", ms.Type, ms.KeySize, ms.ValueSize, ms.MaxEntries, ms.Flags) +} + +// Copy returns a copy of the spec. +// +// MapSpec.Contents is a shallow copy. +func (ms *MapSpec) Copy() *MapSpec { + if ms == nil { + return nil + } + + cpy := *ms + cpy.Contents = slices.Clone(cpy.Contents) + cpy.Key = btf.Copy(cpy.Key) + cpy.Value = btf.Copy(cpy.Value) + cpy.Tags = slices.Clone(cpy.Tags) + + if cpy.InnerMap == ms { + cpy.InnerMap = &cpy + } else { + cpy.InnerMap = ms.InnerMap.Copy() + } + + if cpy.Extra != nil { + extra := *cpy.Extra + cpy.Extra = &extra + } + + return &cpy +} + +// fixupMagicFields fills fields of MapSpec which are usually +// left empty in ELF or which depend on runtime information. +// +// The method doesn't modify Spec, instead returning a copy. +// The copy is only performed if fixups are necessary, so callers mustn't mutate +// the returned spec. +func (spec *MapSpec) fixupMagicFields() (*MapSpec, error) { + switch { + case spec.Type.canStoreMap(): + if spec.ValueSize != 0 && spec.ValueSize != 4 { + return nil, errors.New("ValueSize must be zero or four for map of map") + } + + spec = spec.Copy() + spec.ValueSize = 4 + + case spec.Type == PerfEventArray: + if spec.KeySize != 0 && spec.KeySize != 4 { + return nil, errors.New("KeySize must be zero or four for perf event array") + } + + if spec.ValueSize != 0 && spec.ValueSize != 4 { + return nil, errors.New("ValueSize must be zero or four for perf event array") + } + + spec = spec.Copy() + spec.KeySize = 4 + spec.ValueSize = 4 + + n, err := PossibleCPU() + if err != nil { + return nil, fmt.Errorf("fixup perf event array: %w", err) + } + + if n := uint32(n); spec.MaxEntries == 0 || spec.MaxEntries > n { + // MaxEntries should be zero most of the time, but there is code + // out there which hardcodes large constants. Clamp the number + // of entries to the number of CPUs at most. Allow creating maps with + // less than n items since some kernel selftests relied on this + // behaviour in the past. + spec.MaxEntries = n + } + + case spec.Type == CPUMap: + n, err := PossibleCPU() + if err != nil { + return nil, fmt.Errorf("fixup cpu map: %w", err) + } + + if n := uint32(n); spec.MaxEntries == 0 || spec.MaxEntries > n { + // Perform clamping similar to PerfEventArray. + spec.MaxEntries = n + } + } + + return spec, nil +} + +// dataSection returns the contents and BTF Datasec descriptor of the spec. +func (ms *MapSpec) dataSection() ([]byte, *btf.Datasec, error) { + if ms.Value == nil { + return nil, nil, errMapNoBTFValue + } + + ds, ok := ms.Value.(*btf.Datasec) + if !ok { + return nil, nil, fmt.Errorf("map value BTF is a %T, not a *btf.Datasec", ms.Value) + } + + if n := len(ms.Contents); n != 1 { + return nil, nil, fmt.Errorf("expected one key, found %d", n) + } + + kv := ms.Contents[0] + value, ok := kv.Value.([]byte) + if !ok { + return nil, nil, fmt.Errorf("value at first map key is %T, not []byte", kv.Value) + } + + return value, ds, nil +} + +func (ms *MapSpec) readOnly() bool { + return (ms.Flags & sys.BPF_F_RDONLY_PROG) > 0 +} + +func (ms *MapSpec) writeOnly() bool { + return (ms.Flags & sys.BPF_F_WRONLY_PROG) > 0 +} + +// MapKV is used to initialize the contents of a Map. +type MapKV struct { + Key interface{} + Value interface{} +} + +// Compatible returns nil if an existing map may be used instead of creating +// one from the spec. +// +// Returns an error wrapping [ErrMapIncompatible] otherwise. +func (ms *MapSpec) Compatible(m *Map) error { + ms, err := ms.fixupMagicFields() + if err != nil { + return err + } + + diffs := []string{} + if m.typ != ms.Type { + diffs = append(diffs, fmt.Sprintf("Type: %s changed to %s", m.typ, ms.Type)) + } + if m.keySize != ms.KeySize { + diffs = append(diffs, fmt.Sprintf("KeySize: %d changed to %d", m.keySize, ms.KeySize)) + } + if m.valueSize != ms.ValueSize { + diffs = append(diffs, fmt.Sprintf("ValueSize: %d changed to %d", m.valueSize, ms.ValueSize)) + } + if m.maxEntries != ms.MaxEntries { + diffs = append(diffs, fmt.Sprintf("MaxEntries: %d changed to %d", m.maxEntries, ms.MaxEntries)) + } + + flags := ms.Flags + if ms.Type == DevMap || ms.Type == DevMapHash { + // As of 0cdbb4b09a06 ("devmap: Allow map lookups from eBPF") + // BPF_F_RDONLY_PROG is set unconditionally for devmaps. Explicitly + // allow this mismatch. + flags |= (m.flags & sys.BPF_F_RDONLY_PROG) + } + + if m.flags != flags { + diffs = append(diffs, fmt.Sprintf("Flags: %d changed to %d", m.flags, flags)) + } + + if len(diffs) == 0 { + return nil + } + + return fmt.Errorf("%s: %w", strings.Join(diffs, ", "), ErrMapIncompatible) +} + +// Map represents a Map file descriptor. +// +// It is not safe to close a map which is used by other goroutines. +// +// Methods which take interface{} arguments by default encode +// them using binary.Read/Write in the machine's native endianness. +// +// Implement encoding.BinaryMarshaler or encoding.BinaryUnmarshaler +// if you require custom encoding. +type Map struct { + name string + fd *sys.FD + typ MapType + keySize uint32 + valueSize uint32 + maxEntries uint32 + flags uint32 + pinnedPath string + // Per CPU maps return values larger than the size in the spec + fullValueSize int + + memory *Memory +} + +// NewMapFromFD creates a [Map] around a raw fd. +// +// You should not use fd after calling this function. +// +// Requires at least Linux 4.13. +func NewMapFromFD(fd int) (*Map, error) { + f, err := sys.NewFD(fd) + if err != nil { + return nil, err + } + + return newMapFromFD(f) +} + +func newMapFromFD(fd *sys.FD) (*Map, error) { + info, err := minimalMapInfoFromFd(fd) + if err != nil { + fd.Close() + return nil, fmt.Errorf("get map info: %w", err) + } + + return newMapFromParts(fd, info.Name, info.Type, info.KeySize, info.ValueSize, info.MaxEntries, info.Flags) +} + +// NewMap creates a new Map. +// +// It's equivalent to calling NewMapWithOptions with default options. +func NewMap(spec *MapSpec) (*Map, error) { + return NewMapWithOptions(spec, MapOptions{}) +} + +// NewMapWithOptions creates a new Map. +// +// Creating a map for the first time will perform feature detection +// by creating small, temporary maps. +// +// The caller is responsible for ensuring the process' rlimit is set +// sufficiently high for locking memory during map creation. This can be done +// by calling rlimit.RemoveMemlock() prior to calling NewMapWithOptions. +// +// May return an error wrapping ErrMapIncompatible. +func NewMapWithOptions(spec *MapSpec, opts MapOptions) (*Map, error) { + m, err := newMapWithOptions(spec, opts, btf.NewCache()) + if err != nil { + return nil, fmt.Errorf("creating map: %w", err) + } + + if err := m.finalize(spec); err != nil { + m.Close() + return nil, fmt.Errorf("populating map: %w", err) + } + + return m, nil +} + +func newMapWithOptions(spec *MapSpec, opts MapOptions, c *btf.Cache) (_ *Map, err error) { + closeOnError := func(c io.Closer) { + if err != nil { + c.Close() + } + } + + switch spec.Pinning { + case PinByName: + if spec.Name == "" { + return nil, fmt.Errorf("pin by name: missing Name") + } + + if opts.PinPath == "" { + return nil, fmt.Errorf("pin by name: missing MapOptions.PinPath") + } + + path := filepath.Join(opts.PinPath, spec.Name) + m, err := LoadPinnedMap(path, &opts.LoadPinOptions) + if errors.Is(err, unix.ENOENT) { + break + } + if err != nil { + return nil, fmt.Errorf("load pinned map: %w", err) + } + defer closeOnError(m) + + if err := spec.Compatible(m); err != nil { + return nil, fmt.Errorf("use pinned map %s: %w", spec.Name, err) + } + + return m, nil + + case PinNone: + // Nothing to do here + + default: + return nil, fmt.Errorf("pin type %d: %w", int(spec.Pinning), ErrNotSupported) + } + + var innerFd *sys.FD + if spec.Type.canStoreMap() { + if spec.InnerMap == nil { + return nil, fmt.Errorf("%s requires InnerMap", spec.Type) + } + + if spec.InnerMap.Pinning != PinNone { + return nil, errors.New("inner maps cannot be pinned") + } + + template, err := spec.InnerMap.createMap(nil, c) + if err != nil { + return nil, fmt.Errorf("inner map: %w", err) + } + defer template.Close() + + // Intentionally skip populating and freezing (finalizing) + // the inner map template since it will be removed shortly. + + innerFd = template.fd + } + + m, err := spec.createMap(innerFd, c) + if err != nil { + return nil, err + } + defer closeOnError(m) + + if spec.Pinning == PinByName { + path := filepath.Join(opts.PinPath, spec.Name) + if err := m.Pin(path); err != nil { + return nil, fmt.Errorf("pin map to %s: %w", path, err) + } + } + + return m, nil +} + +// Memory returns a memory-mapped region for the Map. The Map must have been +// created with the BPF_F_MMAPABLE flag. Repeated calls to Memory return the +// same mapping. Callers are responsible for coordinating access to Memory. +func (m *Map) Memory() (*Memory, error) { + if m.memory != nil { + return m.memory, nil + } + + if m.flags&sys.BPF_F_MMAPABLE == 0 { + return nil, fmt.Errorf("Map was not created with the BPF_F_MMAPABLE flag: %w", ErrNotSupported) + } + + size, err := m.memorySize() + if err != nil { + return nil, err + } + + mm, err := newMemory(m.FD(), size) + if err != nil { + return nil, fmt.Errorf("creating new Memory: %w", err) + } + + m.memory = mm + + return mm, nil +} + +// unsafeMemory returns a heap-mapped memory region for the Map. The Map must +// have been created with the BPF_F_MMAPABLE flag. Repeated calls to Memory +// return the same mapping. Callers are responsible for coordinating access to +// Memory. +func (m *Map) unsafeMemory() (*Memory, error) { + if m.memory != nil { + if !m.memory.heap { + return nil, errors.New("unsafeMemory would return existing non-heap memory") + } + + return m.memory, nil + } + + if m.flags&sys.BPF_F_MMAPABLE == 0 { + return nil, fmt.Errorf("Map was not created with the BPF_F_MMAPABLE flag: %w", ErrNotSupported) + } + + size, err := m.memorySize() + if err != nil { + return nil, err + } + + mm, err := newUnsafeMemory(m.FD(), size) + if err != nil { + return nil, fmt.Errorf("creating new Memory: %w", err) + } + + m.memory = mm + + return mm, nil +} + +func (m *Map) memorySize() (int, error) { + switch m.Type() { + case Array: + // In Arrays, values are always laid out on 8-byte boundaries regardless of + // architecture. Multiply by MaxEntries and align the result to the host's + // page size. + size := int(internal.Align(m.ValueSize(), 8) * m.MaxEntries()) + size = internal.Align(size, os.Getpagesize()) + return size, nil + case Arena: + // For Arenas, MaxEntries denotes the maximum number of pages available to + // the arena. + return int(m.MaxEntries()) * os.Getpagesize(), nil + } + + return 0, fmt.Errorf("determine memory size of map type %s: %w", m.Type(), ErrNotSupported) +} + +// createMap validates the spec's properties and creates the map in the kernel +// using the given opts. It does not populate or freeze the map. +func (spec *MapSpec) createMap(inner *sys.FD, c *btf.Cache) (_ *Map, err error) { + closeOnError := func(closer io.Closer) { + if err != nil { + closer.Close() + } + } + + // Kernels 4.13 through 5.4 used a struct bpf_map_def that contained + // additional 'inner_map_idx' and later 'numa_node' fields. + // In order to support loading these definitions, tolerate the presence of + // extra bytes, but require them to be zeroes. + if spec.Extra != nil { + if _, err := io.Copy(internal.DiscardZeroes{}, spec.Extra); err != nil { + return nil, errors.New("extra contains unhandled non-zero bytes, drain before creating map") + } + } + + spec, err = spec.fixupMagicFields() + if err != nil { + return nil, err + } + + p, sysMapType := platform.DecodeConstant(spec.Type) + if p != platform.Native { + return nil, fmt.Errorf("map type %s (%s): %w", spec.Type, p, internal.ErrNotSupportedOnOS) + } + + attr := sys.MapCreateAttr{ + MapName: maybeFillObjName(spec.Name), + MapType: sys.MapType(sysMapType), + KeySize: spec.KeySize, + ValueSize: spec.ValueSize, + MaxEntries: spec.MaxEntries, + MapFlags: spec.Flags, + NumaNode: spec.NumaNode, + MapExtra: spec.MapExtra, + } + + if inner != nil { + attr.InnerMapFd = inner.Uint() + } + + if spec.Key != nil || spec.Value != nil { + handle, keyTypeID, valueTypeID, err := btf.MarshalMapKV(spec.Key, spec.Value) + if err != nil && !errors.Is(err, btf.ErrNotSupported) { + return nil, fmt.Errorf("load BTF: %w", err) + } + + if handle != nil { + defer handle.Close() + + // Use BTF k/v during map creation. + attr.BtfFd = uint32(handle.FD()) + attr.BtfKeyTypeId = keyTypeID + attr.BtfValueTypeId = valueTypeID + } + + if spec.Type == StructOpsMap { + if handle == nil { + return nil, fmt.Errorf("struct_ops requires BTF") + } + + localValue, ok := btf.As[*btf.Struct](spec.Value) + if !ok { + return nil, fmt.Errorf("struct_ops: value must be struct") + } + + targetValue, targetID, module, err := structOpsFindTarget(localValue, c) + if err != nil { + return nil, fmt.Errorf("struct_ops: %w", err) + } + defer module.Close() + + spec = spec.Copy() + spec.ValueSize = targetValue.Size + + attr.ValueSize = targetValue.Size + attr.BtfVmlinuxValueTypeId = targetID + + if module != nil { + // BPF_F_VTYPE_BTF_OBJ_FD is required if the type comes from a module + attr.MapFlags |= sys.BPF_F_VTYPE_BTF_OBJ_FD + // set FD for the kernel module + attr.ValueTypeBtfObjFd = int32(module.FD()) + } + + // StructOpsMap forbids passing BtfKeyTypeId or BtfValueTypeId, but + // requires BtfFd. Do the simple thing and just zero out the fields. + // See https://github.com/torvalds/linux/blob/9b332cece987ee1790b2ed4c989e28162fa47860/kernel/bpf/syscall.c#L1382-L1384 + attr.BtfKeyTypeId = 0 + attr.BtfValueTypeId = 0 + } + } + + fd, err := sys.MapCreate(&attr) + + // Some map types don't support BTF k/v in earlier kernel versions. + // Remove BTF metadata and retry map creation. + if (errors.Is(err, sys.ENOTSUPP) || errors.Is(err, unix.EINVAL)) && attr.BtfFd != 0 { + attr.BtfFd, attr.BtfKeyTypeId, attr.BtfValueTypeId = 0, 0, 0 + fd, err = sys.MapCreate(&attr) + } + if err != nil { + return nil, handleMapCreateError(attr, spec, err) + } + + defer closeOnError(fd) + m, err := newMapFromParts(fd, spec.Name, spec.Type, spec.KeySize, spec.ValueSize, spec.MaxEntries, spec.Flags) + if err != nil { + return nil, fmt.Errorf("map create: %w", err) + } + return m, nil +} + +func handleMapCreateError(attr sys.MapCreateAttr, spec *MapSpec, err error) error { + if platform.IsWindows { + if errors.Is(err, unix.EINVAL) && attr.MapFlags != 0 { + return fmt.Errorf("map create: flags: %w", internal.ErrNotSupportedOnOS) + } + + return err + } + + if errors.Is(err, unix.EPERM) { + return fmt.Errorf("map create: %w (MEMLOCK may be too low, consider rlimit.RemoveMemlock)", err) + } + if errors.Is(err, unix.EINVAL) { + if spec.MaxEntries == 0 { + return fmt.Errorf("map create: %w (MaxEntries may be incorrectly set to zero)", err) + } + if spec.Type == UnspecifiedMap { + return fmt.Errorf("map create: cannot use type %s", UnspecifiedMap) + } + if spec.Flags&sys.BPF_F_NO_PREALLOC != 0 && !spec.Type.mustHaveNoPrealloc() { + return fmt.Errorf("map create: %w (BPF_F_NO_PREALLOC flag may be incompatible with map type %s)", err, spec.Type) + } + if spec.Flags&sys.BPF_F_NO_PREALLOC == 0 && spec.Type.mustHaveNoPrealloc() { + return fmt.Errorf("map create: %w (BPF_F_NO_PREALLOC flag may need to be set for map type %s)", err, spec.Type) + } + } + + if spec.Type.canStoreMap() { + if haveFeatErr := haveNestedMaps(); haveFeatErr != nil { + return fmt.Errorf("map create: %w", haveFeatErr) + } + } + + if spec.readOnly() || spec.writeOnly() { + if haveFeatErr := haveMapMutabilityModifiers(); haveFeatErr != nil { + return fmt.Errorf("map create: %w", haveFeatErr) + } + } + if spec.Flags&sys.BPF_F_MMAPABLE > 0 { + if haveFeatErr := haveMmapableMaps(); haveFeatErr != nil { + return fmt.Errorf("map create: %w", haveFeatErr) + } + } + if spec.Flags&sys.BPF_F_INNER_MAP > 0 { + if haveFeatErr := haveInnerMaps(); haveFeatErr != nil { + return fmt.Errorf("map create: %w", haveFeatErr) + } + } + if spec.Flags&sys.BPF_F_NO_PREALLOC > 0 { + if haveFeatErr := haveNoPreallocMaps(); haveFeatErr != nil { + return fmt.Errorf("map create: %w", haveFeatErr) + } + } + // BPF_MAP_TYPE_RINGBUF's max_entries must be a power-of-2 multiple of kernel's page size. + if errors.Is(err, unix.EINVAL) && + (attr.MapType == sys.BPF_MAP_TYPE_RINGBUF || attr.MapType == sys.BPF_MAP_TYPE_USER_RINGBUF) { + pageSize := uint32(os.Getpagesize()) + maxEntries := attr.MaxEntries + if maxEntries%pageSize != 0 || !internal.IsPow(maxEntries) { + return fmt.Errorf("map create: %w (ring map size %d not a multiple of page size %d)", err, maxEntries, pageSize) + } + } + + return fmt.Errorf("map create: %w", err) +} + +// newMapFromParts allocates and returns a new Map structure. +// Sets the fullValueSize on per-CPU maps. +func newMapFromParts(fd *sys.FD, name string, typ MapType, keySize, valueSize, maxEntries, flags uint32) (*Map, error) { + m := &Map{ + name, + fd, + typ, + keySize, + valueSize, + maxEntries, + flags, + "", + int(valueSize), + nil, + } + + if !typ.hasPerCPUValue() { + return m, nil + } + + possibleCPUs, err := PossibleCPU() + if err != nil { + return nil, err + } + + m.fullValueSize = int(internal.Align(valueSize, 8)) * possibleCPUs + return m, nil +} + +func (m *Map) String() string { + if m.name != "" { + return fmt.Sprintf("%s(%s)#%v", m.typ, m.name, m.fd) + } + return fmt.Sprintf("%s#%v", m.typ, m.fd) +} + +// Type returns the underlying type of the map. +func (m *Map) Type() MapType { + return m.typ +} + +// KeySize returns the size of the map key in bytes. +func (m *Map) KeySize() uint32 { + return m.keySize +} + +// ValueSize returns the size of the map value in bytes. +func (m *Map) ValueSize() uint32 { + return m.valueSize +} + +// MaxEntries returns the maximum number of elements the map can hold. +func (m *Map) MaxEntries() uint32 { + return m.maxEntries +} + +// Flags returns the flags of the map. +func (m *Map) Flags() uint32 { + return m.flags +} + +// Info returns metadata about the map. This was first introduced in Linux 4.5, +// but newer kernels support more MapInfo fields with the introduction of more +// features. See [MapInfo] and its methods for more details. +// +// Returns an error wrapping [ErrNotSupported] if the kernel supports neither +// BPF_OBJ_GET_INFO_BY_FD nor reading map information from /proc/self/fdinfo. +func (m *Map) Info() (*MapInfo, error) { + return newMapInfoFromFd(m.fd) +} + +// Handle returns a reference to the Map's type information in the kernel. +// +// Returns [ErrNotSupported] if the kernel has no BTF support, or if there is no +// BTF associated with the Map. +func (m *Map) Handle() (*btf.Handle, error) { + info, err := m.Info() + if err != nil { + return nil, err + } + + id, ok := info.BTFID() + if !ok { + return nil, fmt.Errorf("map %s: retrieve BTF ID: %w", m, ErrNotSupported) + } + + return btf.NewHandleFromID(id) +} + +// MapLookupFlags controls the behaviour of the map lookup calls. +type MapLookupFlags uint64 + +// LookupLock look up the value of a spin-locked map. +const LookupLock MapLookupFlags = sys.BPF_F_LOCK + +// Lookup retrieves a value from a Map. +// +// Calls Close() on valueOut if it is of type **Map or **Program, +// and *valueOut is not nil. +// +// Returns an error if the key doesn't exist, see ErrKeyNotExist. +func (m *Map) Lookup(key, valueOut interface{}) error { + return m.LookupWithFlags(key, valueOut, 0) +} + +// LookupWithFlags retrieves a value from a Map with flags. +// +// Passing LookupLock flag will look up the value of a spin-locked +// map without returning the lock. This must be specified if the +// elements contain a spinlock. +// +// Calls Close() on valueOut if it is of type **Map or **Program, +// and *valueOut is not nil. +// +// Returns an error if the key doesn't exist, see ErrKeyNotExist. +func (m *Map) LookupWithFlags(key, valueOut interface{}, flags MapLookupFlags) error { + if m.typ.hasPerCPUValue() { + return m.lookupPerCPU(key, valueOut, flags) + } + + valueBytes := makeMapSyscallOutput(valueOut, m.fullValueSize) + if err := m.lookup(key, valueBytes.Pointer(), flags); err != nil { + return err + } + + return m.unmarshalValue(valueOut, valueBytes) +} + +// LookupAndDelete retrieves and deletes a value from a Map. +// +// Returns ErrKeyNotExist if the key doesn't exist. +func (m *Map) LookupAndDelete(key, valueOut interface{}) error { + return m.LookupAndDeleteWithFlags(key, valueOut, 0) +} + +// LookupAndDeleteWithFlags retrieves and deletes a value from a Map. +// +// Passing LookupLock flag will look up and delete the value of a spin-locked +// map without returning the lock. This must be specified if the elements +// contain a spinlock. +// +// Returns ErrKeyNotExist if the key doesn't exist. +func (m *Map) LookupAndDeleteWithFlags(key, valueOut interface{}, flags MapLookupFlags) error { + if m.typ.hasPerCPUValue() { + return m.lookupAndDeletePerCPU(key, valueOut, flags) + } + + valueBytes := makeMapSyscallOutput(valueOut, m.fullValueSize) + if err := m.lookupAndDelete(key, valueBytes.Pointer(), flags); err != nil { + return err + } + return m.unmarshalValue(valueOut, valueBytes) +} + +// LookupBytes gets a value from Map. +// +// Returns a nil value if a key doesn't exist. +func (m *Map) LookupBytes(key interface{}) ([]byte, error) { + valueBytes := make([]byte, m.fullValueSize) + valuePtr := sys.UnsafeSlicePointer(valueBytes) + + err := m.lookup(key, valuePtr, 0) + if errors.Is(err, ErrKeyNotExist) { + return nil, nil + } + + return valueBytes, err +} + +func (m *Map) lookupPerCPU(key, valueOut any, flags MapLookupFlags) error { + slice, err := ensurePerCPUSlice(valueOut) + if err != nil { + return err + } + valueBytes := make([]byte, m.fullValueSize) + if err := m.lookup(key, sys.UnsafeSlicePointer(valueBytes), flags); err != nil { + return err + } + return unmarshalPerCPUValue(slice, int(m.valueSize), valueBytes) +} + +func (m *Map) lookup(key interface{}, valueOut sys.Pointer, flags MapLookupFlags) error { + keyPtr, err := m.marshalKey(key) + if err != nil { + return fmt.Errorf("can't marshal key: %w", err) + } + + attr := sys.MapLookupElemAttr{ + MapFd: m.fd.Uint(), + Key: keyPtr, + Value: valueOut, + Flags: uint64(flags), + } + + if err = sys.MapLookupElem(&attr); err != nil { + if errors.Is(err, unix.ENOENT) { + return errMapLookupKeyNotExist + } + return fmt.Errorf("lookup: %w", wrapMapError(err)) + } + return nil +} + +func (m *Map) lookupAndDeletePerCPU(key, valueOut any, flags MapLookupFlags) error { + slice, err := ensurePerCPUSlice(valueOut) + if err != nil { + return err + } + valueBytes := make([]byte, m.fullValueSize) + if err := m.lookupAndDelete(key, sys.UnsafeSlicePointer(valueBytes), flags); err != nil { + return err + } + return unmarshalPerCPUValue(slice, int(m.valueSize), valueBytes) +} + +// ensurePerCPUSlice allocates a slice for a per-CPU value if necessary. +func ensurePerCPUSlice(sliceOrPtr any) (any, error) { + sliceOrPtrType := reflect.TypeOf(sliceOrPtr) + if sliceOrPtrType.Kind() == reflect.Slice { + // The target is a slice, the caller is responsible for ensuring that + // size is correct. + return sliceOrPtr, nil + } + + slicePtrType := sliceOrPtrType + if slicePtrType.Kind() != reflect.Ptr || slicePtrType.Elem().Kind() != reflect.Slice { + return nil, fmt.Errorf("per-cpu value requires a slice or a pointer to slice") + } + + possibleCPUs, err := PossibleCPU() + if err != nil { + return nil, err + } + + sliceType := slicePtrType.Elem() + slice := reflect.MakeSlice(sliceType, possibleCPUs, possibleCPUs) + + sliceElemType := sliceType.Elem() + sliceElemIsPointer := sliceElemType.Kind() == reflect.Ptr + reflect.ValueOf(sliceOrPtr).Elem().Set(slice) + if !sliceElemIsPointer { + return slice.Interface(), nil + } + sliceElemType = sliceElemType.Elem() + + for i := 0; i < possibleCPUs; i++ { + newElem := reflect.New(sliceElemType) + slice.Index(i).Set(newElem) + } + + return slice.Interface(), nil +} + +func (m *Map) lookupAndDelete(key any, valuePtr sys.Pointer, flags MapLookupFlags) error { + keyPtr, err := m.marshalKey(key) + if err != nil { + return fmt.Errorf("can't marshal key: %w", err) + } + + attr := sys.MapLookupAndDeleteElemAttr{ + MapFd: m.fd.Uint(), + Key: keyPtr, + Value: valuePtr, + Flags: uint64(flags), + } + + if err := sys.MapLookupAndDeleteElem(&attr); err != nil { + return fmt.Errorf("lookup and delete: %w", wrapMapError(err)) + } + + return nil +} + +// MapUpdateFlags controls the behaviour of the Map.Update call. +// +// The exact semantics depend on the specific MapType. +type MapUpdateFlags uint64 + +const ( + // UpdateAny creates a new element or update an existing one. + UpdateAny MapUpdateFlags = iota + // UpdateNoExist creates a new element. + UpdateNoExist MapUpdateFlags = 1 << (iota - 1) + // UpdateExist updates an existing element. + UpdateExist + // UpdateLock updates elements under bpf_spin_lock. + UpdateLock +) + +// Put replaces or creates a value in map. +// +// It is equivalent to calling Update with UpdateAny. +func (m *Map) Put(key, value interface{}) error { + return m.Update(key, value, UpdateAny) +} + +// Update changes the value of a key. +func (m *Map) Update(key, value any, flags MapUpdateFlags) error { + if m.typ.hasPerCPUValue() { + return m.updatePerCPU(key, value, flags) + } + + valuePtr, err := m.marshalValue(value) + if err != nil { + return fmt.Errorf("marshal value: %w", err) + } + + return m.update(key, valuePtr, flags) +} + +func (m *Map) updatePerCPU(key, value any, flags MapUpdateFlags) error { + valuePtr, err := marshalPerCPUValue(value, int(m.valueSize)) + if err != nil { + return fmt.Errorf("marshal value: %w", err) + } + + return m.update(key, valuePtr, flags) +} + +func (m *Map) update(key any, valuePtr sys.Pointer, flags MapUpdateFlags) error { + keyPtr, err := m.marshalKey(key) + if err != nil { + return fmt.Errorf("marshal key: %w", err) + } + + attr := sys.MapUpdateElemAttr{ + MapFd: m.fd.Uint(), + Key: keyPtr, + Value: valuePtr, + Flags: uint64(flags), + } + + if err = sys.MapUpdateElem(&attr); err != nil { + return fmt.Errorf("update: %w", wrapMapError(err)) + } + + return nil +} + +// Delete removes a value. +// +// Returns ErrKeyNotExist if the key does not exist. +func (m *Map) Delete(key interface{}) error { + keyPtr, err := m.marshalKey(key) + if err != nil { + return fmt.Errorf("can't marshal key: %w", err) + } + + attr := sys.MapDeleteElemAttr{ + MapFd: m.fd.Uint(), + Key: keyPtr, + } + + if err = sys.MapDeleteElem(&attr); err != nil { + return fmt.Errorf("delete: %w", wrapMapError(err)) + } + return nil +} + +// NextKey finds the key following an initial key. +// +// See NextKeyBytes for details. +// +// Returns ErrKeyNotExist if there is no next key. +func (m *Map) NextKey(key, nextKeyOut interface{}) error { + nextKeyBytes := makeMapSyscallOutput(nextKeyOut, int(m.keySize)) + + if err := m.nextKey(key, nextKeyBytes.Pointer()); err != nil { + return err + } + + if err := nextKeyBytes.Unmarshal(nextKeyOut); err != nil { + return fmt.Errorf("can't unmarshal next key: %w", err) + } + return nil +} + +// NextKeyBytes returns the key following an initial key as a byte slice. +// +// Passing nil will return the first key. +// +// Use Iterate if you want to traverse all entries in the map. +// +// Returns nil if there are no more keys. +func (m *Map) NextKeyBytes(key interface{}) ([]byte, error) { + nextKey := make([]byte, m.keySize) + nextKeyPtr := sys.UnsafeSlicePointer(nextKey) + + err := m.nextKey(key, nextKeyPtr) + if errors.Is(err, ErrKeyNotExist) { + return nil, nil + } + + return nextKey, err +} + +func (m *Map) nextKey(key interface{}, nextKeyOut sys.Pointer) error { + var ( + keyPtr sys.Pointer + err error + ) + + if key != nil { + keyPtr, err = m.marshalKey(key) + if err != nil { + return fmt.Errorf("can't marshal key: %w", err) + } + } + + attr := sys.MapGetNextKeyAttr{ + MapFd: m.fd.Uint(), + Key: keyPtr, + NextKey: nextKeyOut, + } + + if err = sys.MapGetNextKey(&attr); err != nil { + // Kernels 4.4.131 and earlier return EFAULT instead of a pointer to the + // first map element when a nil key pointer is specified. + if platform.IsLinux && key == nil && errors.Is(err, unix.EFAULT) { + var guessKey []byte + guessKey, err = m.guessNonExistentKey() + if err != nil { + return err + } + + // Retry the syscall with a valid non-existing key. + attr.Key = sys.UnsafeSlicePointer(guessKey) + if err = sys.MapGetNextKey(&attr); err == nil { + return nil + } + } + + return fmt.Errorf("next key: %w", wrapMapError(err)) + } + + return nil +} + +var mmapProtectedPage = sync.OnceValues(func() ([]byte, error) { + return unix.Mmap(-1, 0, os.Getpagesize(), unix.PROT_NONE, unix.MAP_ANON|unix.MAP_SHARED) +}) + +// guessNonExistentKey attempts to perform a map lookup that returns ENOENT. +// This is necessary on kernels before 4.4.132, since those don't support +// iterating maps from the start by providing an invalid key pointer. +func (m *Map) guessNonExistentKey() ([]byte, error) { + // Map a protected page and use that as the value pointer. This saves some + // work copying out the value, which we're not interested in. + page, err := mmapProtectedPage() + if err != nil { + return nil, err + } + valuePtr := sys.UnsafeSlicePointer(page) + + randKey := make([]byte, int(m.keySize)) + + for i := 0; i < 4; i++ { + switch i { + // For hash maps, the 0 key is less likely to be occupied. They're often + // used for storing data related to pointers, and their access pattern is + // generally scattered across the keyspace. + case 0: + // An all-0xff key is guaranteed to be out of bounds of any array, since + // those have a fixed key size of 4 bytes. The only corner case being + // arrays with 2^32 max entries, but those are prohibitively expensive + // in many environments. + case 1: + for r := range randKey { + randKey[r] = 0xff + } + // Inspired by BCC, 0x55 is an alternating binary pattern (0101), so + // is unlikely to be taken. + case 2: + for r := range randKey { + randKey[r] = 0x55 + } + // Last ditch effort, generate a random key. + case 3: + rand.New(rand.NewSource(time.Now().UnixNano())).Read(randKey) + } + + err := m.lookup(randKey, valuePtr, 0) + if errors.Is(err, ErrKeyNotExist) { + return randKey, nil + } + } + + return nil, errors.New("couldn't find non-existing key") +} + +// BatchLookup looks up many elements in a map at once. +// +// "keysOut" and "valuesOut" must be of type slice, a pointer +// to a slice or buffer will not work. +// "cursor" is an pointer to an opaque handle. It must be non-nil. Pass +// "cursor" to subsequent calls of this function to continue the batching +// operation in the case of chunking. +// +// Warning: This API is not very safe to use as the kernel implementation for +// batching relies on the user to be aware of subtle details with regarding to +// different map type implementations. +// +// ErrKeyNotExist is returned when the batch lookup has reached +// the end of all possible results, even when partial results +// are returned. It should be used to evaluate when lookup is "done". +func (m *Map) BatchLookup(cursor *MapBatchCursor, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { + n, err := m.batchLookup(sys.BPF_MAP_LOOKUP_BATCH, cursor, keysOut, valuesOut, opts) + if err != nil { + return n, fmt.Errorf("map batch lookup: %w", err) + } + return n, nil +} + +// BatchLookupAndDelete looks up many elements in a map at once, +// +// It then deletes all those elements. +// "keysOut" and "valuesOut" must be of type slice, a pointer +// to a slice or buffer will not work. +// "cursor" is an pointer to an opaque handle. It must be non-nil. Pass +// "cursor" to subsequent calls of this function to continue the batching +// operation in the case of chunking. +// +// Warning: This API is not very safe to use as the kernel implementation for +// batching relies on the user to be aware of subtle details with regarding to +// different map type implementations. +// +// ErrKeyNotExist is returned when the batch lookup has reached +// the end of all possible results, even when partial results +// are returned. It should be used to evaluate when lookup is "done". +func (m *Map) BatchLookupAndDelete(cursor *MapBatchCursor, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { + n, err := m.batchLookup(sys.BPF_MAP_LOOKUP_AND_DELETE_BATCH, cursor, keysOut, valuesOut, opts) + if err != nil { + return n, fmt.Errorf("map batch lookup and delete: %w", err) + } + return n, nil +} + +// MapBatchCursor represents a starting point for a batch operation. +type MapBatchCursor struct { + m *Map + opaque []byte +} + +func (m *Map) batchLookup(cmd sys.Cmd, cursor *MapBatchCursor, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { + if m.typ.hasPerCPUValue() { + return m.batchLookupPerCPU(cmd, cursor, keysOut, valuesOut, opts) + } + + count, err := batchCount(keysOut, valuesOut) + if err != nil { + return 0, err + } + + valueBuf := sysenc.SyscallOutput(valuesOut, count*int(m.fullValueSize)) + + n, sysErr := m.batchLookupCmd(cmd, cursor, count, keysOut, valueBuf.Pointer(), opts) + if errors.Is(sysErr, unix.ENOSPC) { + // Hash tables return ENOSPC when the size of the batch is smaller than + // any bucket. + return n, fmt.Errorf("%w (batch size too small?)", sysErr) + } else if sysErr != nil && !errors.Is(sysErr, unix.ENOENT) { + return 0, sysErr + } + + err = valueBuf.Unmarshal(valuesOut) + if err != nil { + return 0, err + } + + return n, sysErr +} + +func (m *Map) batchLookupPerCPU(cmd sys.Cmd, cursor *MapBatchCursor, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { + count, err := sliceLen(keysOut) + if err != nil { + return 0, fmt.Errorf("keys: %w", err) + } + + valueBuf := sysenc.SyscallOutput(valuesOut, count*int(m.fullValueSize)) + + n, sysErr := m.batchLookupCmd(cmd, cursor, count, keysOut, valueBuf.Pointer(), opts) + if sysErr != nil && !errors.Is(sysErr, unix.ENOENT) { + return 0, sysErr + } + + if bytesBuf := valueBuf.Bytes(); bytesBuf != nil { + err = unmarshalBatchPerCPUValue(valuesOut, count, int(m.valueSize), bytesBuf) + if err != nil { + return 0, err + } + } + + return n, sysErr +} + +func (m *Map) batchLookupCmd(cmd sys.Cmd, cursor *MapBatchCursor, count int, keysOut any, valuePtr sys.Pointer, opts *BatchOptions) (int, error) { + // * generic_map_lookup_batch requires that batch_out is key_size bytes. + // This is used by array and LPM maps. + // + // * __htab_map_lookup_and_delete_batch requires u32. This is used by the + // various hash maps. + // + // Use a minimum of 4 bytes to avoid having to distinguish between the two. + cursorLen := max(int(m.keySize), 4) + + inBatch := cursor.opaque + if inBatch == nil { + // This is the first lookup, allocate a buffer to hold the cursor. + cursor.opaque = make([]byte, cursorLen) + cursor.m = m + } else if cursor.m != m { + // Prevent reuse of a cursor across maps. First, it's unlikely to work. + // Second, the maps may require different cursorLen and cursor.opaque + // may therefore be too short. This could lead to the kernel clobbering + // user space memory. + return 0, errors.New("a cursor may not be reused across maps") + } + + if err := haveBatchAPI(); err != nil { + return 0, err + } + + keyBuf := sysenc.SyscallOutput(keysOut, count*int(m.keySize)) + + attr := sys.MapLookupBatchAttr{ + MapFd: m.fd.Uint(), + Keys: keyBuf.Pointer(), + Values: valuePtr, + Count: uint32(count), + InBatch: sys.UnsafeSlicePointer(inBatch), + OutBatch: sys.UnsafeSlicePointer(cursor.opaque), + } + + if opts != nil { + attr.ElemFlags = opts.ElemFlags + attr.Flags = opts.Flags + } + + _, sysErr := sys.BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) + sysErr = wrapMapError(sysErr) + if sysErr != nil && !errors.Is(sysErr, unix.ENOENT) { + return 0, sysErr + } + + if err := keyBuf.Unmarshal(keysOut); err != nil { + return 0, err + } + + return int(attr.Count), sysErr +} + +// BatchUpdate updates the map with multiple keys and values +// simultaneously. +// "keys" and "values" must be of type slice, a pointer +// to a slice or buffer will not work. +func (m *Map) BatchUpdate(keys, values interface{}, opts *BatchOptions) (int, error) { + if m.typ.hasPerCPUValue() { + return m.batchUpdatePerCPU(keys, values, opts) + } + + count, err := batchCount(keys, values) + if err != nil { + return 0, err + } + + valuePtr, err := marshalMapSyscallInput(values, count*int(m.valueSize)) + if err != nil { + return 0, err + } + + return m.batchUpdate(count, keys, valuePtr, opts) +} + +func (m *Map) batchUpdate(count int, keys any, valuePtr sys.Pointer, opts *BatchOptions) (int, error) { + keyPtr, err := marshalMapSyscallInput(keys, count*int(m.keySize)) + if err != nil { + return 0, err + } + + attr := sys.MapUpdateBatchAttr{ + MapFd: m.fd.Uint(), + Keys: keyPtr, + Values: valuePtr, + Count: uint32(count), + } + if opts != nil { + attr.ElemFlags = opts.ElemFlags + attr.Flags = opts.Flags + } + + err = sys.MapUpdateBatch(&attr) + if err != nil { + if haveFeatErr := haveBatchAPI(); haveFeatErr != nil { + return 0, haveFeatErr + } + return int(attr.Count), fmt.Errorf("batch update: %w", wrapMapError(err)) + } + + return int(attr.Count), nil +} + +func (m *Map) batchUpdatePerCPU(keys, values any, opts *BatchOptions) (int, error) { + count, err := sliceLen(keys) + if err != nil { + return 0, fmt.Errorf("keys: %w", err) + } + + valueBuf, err := marshalBatchPerCPUValue(values, count, int(m.valueSize)) + if err != nil { + return 0, err + } + + return m.batchUpdate(count, keys, sys.UnsafeSlicePointer(valueBuf), opts) +} + +// BatchDelete batch deletes entries in the map by keys. +// "keys" must be of type slice, a pointer to a slice or buffer will not work. +func (m *Map) BatchDelete(keys interface{}, opts *BatchOptions) (int, error) { + count, err := sliceLen(keys) + if err != nil { + return 0, fmt.Errorf("keys: %w", err) + } + + keyPtr, err := marshalMapSyscallInput(keys, count*int(m.keySize)) + if err != nil { + return 0, fmt.Errorf("cannot marshal keys: %v", err) + } + + attr := sys.MapDeleteBatchAttr{ + MapFd: m.fd.Uint(), + Keys: keyPtr, + Count: uint32(count), + } + + if opts != nil { + attr.ElemFlags = opts.ElemFlags + attr.Flags = opts.Flags + } + + if err = sys.MapDeleteBatch(&attr); err != nil { + if haveFeatErr := haveBatchAPI(); haveFeatErr != nil { + return 0, haveFeatErr + } + return int(attr.Count), fmt.Errorf("batch delete: %w", wrapMapError(err)) + } + + return int(attr.Count), nil +} + +func batchCount(keys, values any) (int, error) { + keysLen, err := sliceLen(keys) + if err != nil { + return 0, fmt.Errorf("keys: %w", err) + } + + valuesLen, err := sliceLen(values) + if err != nil { + return 0, fmt.Errorf("values: %w", err) + } + + if keysLen != valuesLen { + return 0, fmt.Errorf("keys and values must have the same length") + } + + return keysLen, nil +} + +// Iterate traverses a map. +// +// It's safe to create multiple iterators at the same time. +// +// It's not possible to guarantee that all keys in a map will be +// returned if there are concurrent modifications to the map. +func (m *Map) Iterate() *MapIterator { + return newMapIterator(m) +} + +// Close the Map's underlying file descriptor, which could unload the +// Map from the kernel if it is not pinned or in use by a loaded Program. +func (m *Map) Close() error { + if m == nil { + // This makes it easier to clean up when iterating maps + // of maps / programs. + return nil + } + + return m.fd.Close() +} + +// FD gets the file descriptor of the Map. +// +// Calling this function is invalid after Close has been called. +func (m *Map) FD() int { + return m.fd.Int() +} + +// Clone creates a duplicate of the Map. +// +// Closing the duplicate does not affect the original, and vice versa. +// Changes made to the map are reflected by both instances however. +// If the original map was pinned, the cloned map will not be pinned by default. +// +// Cloning a nil Map returns nil. +func (m *Map) Clone() (*Map, error) { + if m == nil { + return nil, nil + } + + dup, err := m.fd.Dup() + if err != nil { + return nil, fmt.Errorf("can't clone map: %w", err) + } + + return &Map{ + m.name, + dup, + m.typ, + m.keySize, + m.valueSize, + m.maxEntries, + m.flags, + "", + m.fullValueSize, + nil, + }, nil +} + +// Pin persists the map on the BPF virtual file system past the lifetime of +// the process that created it . +// +// Calling Pin on a previously pinned map will overwrite the path, except when +// the new path already exists. Re-pinning across filesystems is not supported. +// You can Clone a map to pin it to a different path. +// +// This requires bpffs to be mounted above fileName. +// See https://docs.cilium.io/en/stable/network/kubernetes/configuration/#mounting-bpffs-with-systemd +func (m *Map) Pin(fileName string) error { + if err := sys.Pin(m.pinnedPath, fileName, m.fd); err != nil { + return err + } + m.pinnedPath = fileName + return nil +} + +// Unpin removes the persisted state for the map from the BPF virtual filesystem. +// +// Failed calls to Unpin will not alter the state returned by IsPinned. +// +// Unpinning an unpinned Map returns nil. +func (m *Map) Unpin() error { + if err := sys.Unpin(m.pinnedPath); err != nil { + return err + } + m.pinnedPath = "" + return nil +} + +// IsPinned returns true if the map has a non-empty pinned path. +func (m *Map) IsPinned() bool { + return m.pinnedPath != "" +} + +// Freeze prevents a map to be modified from user space. +// +// It makes no changes to kernel-side restrictions. +func (m *Map) Freeze() error { + attr := sys.MapFreezeAttr{ + MapFd: m.fd.Uint(), + } + + if err := sys.MapFreeze(&attr); err != nil { + if haveFeatErr := haveMapMutabilityModifiers(); haveFeatErr != nil { + return fmt.Errorf("can't freeze map: %w", haveFeatErr) + } + return fmt.Errorf("can't freeze map: %w", err) + } + return nil +} + +// finalize populates the Map according to the Contents specified +// in spec and freezes the Map if requested by spec. +func (m *Map) finalize(spec *MapSpec) error { + for _, kv := range spec.Contents { + if err := m.Put(kv.Key, kv.Value); err != nil { + return fmt.Errorf("putting value: key %v: %w", kv.Key, err) + } + } + + if isConstantDataSection(spec.Name) || isKconfigSection(spec.Name) { + if err := m.Freeze(); err != nil { + return fmt.Errorf("freezing map: %w", err) + } + } + + return nil +} + +func (m *Map) marshalKey(data interface{}) (sys.Pointer, error) { + if data == nil { + if m.keySize == 0 { + // Queues have a key length of zero, so passing nil here is valid. + return sys.UnsafePointer(nil), nil + } + return sys.Pointer{}, errors.New("can't use nil as key of map") + } + + return marshalMapSyscallInput(data, int(m.keySize)) +} + +func (m *Map) marshalValue(data interface{}) (sys.Pointer, error) { + var ( + buf []byte + err error + ) + + switch value := data.(type) { + case *Map: + if !m.typ.canStoreMap() { + return sys.Pointer{}, fmt.Errorf("can't store map in %s", m.typ) + } + buf, err = marshalMap(value, int(m.valueSize)) + + case *Program: + if !m.typ.canStoreProgram() { + return sys.Pointer{}, fmt.Errorf("can't store program in %s", m.typ) + } + buf, err = marshalProgram(value, int(m.valueSize)) + + default: + return marshalMapSyscallInput(data, int(m.valueSize)) + } + + if err != nil { + return sys.Pointer{}, err + } + + return sys.UnsafeSlicePointer(buf), nil +} + +func (m *Map) unmarshalValue(value any, buf sysenc.Buffer) error { + switch value := value.(type) { + case **Map: + if !m.typ.canStoreMap() { + return fmt.Errorf("can't read a map from %s", m.typ) + } + + other, err := unmarshalMap(buf) + if err != nil { + return err + } + + // The caller might close the map externally, so ignore errors. + _ = (*value).Close() + + *value = other + return nil + + case *Map: + if !m.typ.canStoreMap() { + return fmt.Errorf("can't read a map from %s", m.typ) + } + return errors.New("require pointer to *Map") + + case **Program: + if !m.typ.canStoreProgram() { + return fmt.Errorf("can't read a program from %s", m.typ) + } + + other, err := unmarshalProgram(buf) + if err != nil { + return err + } + + // The caller might close the program externally, so ignore errors. + _ = (*value).Close() + + *value = other + return nil + + case *Program: + if !m.typ.canStoreProgram() { + return fmt.Errorf("can't read a program from %s", m.typ) + } + return errors.New("require pointer to *Program") + } + + return buf.Unmarshal(value) +} + +// LoadPinnedMap opens a Map from a pin (file) on the BPF virtual filesystem. +// +// Requires at least Linux 4.5. +func LoadPinnedMap(fileName string, opts *LoadPinOptions) (*Map, error) { + fd, typ, err := sys.ObjGetTyped(&sys.ObjGetAttr{ + Pathname: sys.NewStringPointer(fileName), + FileFlags: opts.Marshal(), + }) + if err != nil { + return nil, err + } + + if typ != sys.BPF_TYPE_MAP { + _ = fd.Close() + return nil, fmt.Errorf("%s is not a Map", fileName) + } + + m, err := newMapFromFD(fd) + if err == nil { + m.pinnedPath = fileName + } + + return m, err +} + +// unmarshalMap creates a map from a map ID encoded in host endianness. +func unmarshalMap(buf sysenc.Buffer) (*Map, error) { + var id uint32 + if err := buf.Unmarshal(&id); err != nil { + return nil, err + } + return NewMapFromID(MapID(id)) +} + +// marshalMap marshals the fd of a map into a buffer in host endianness. +func marshalMap(m *Map, length int) ([]byte, error) { + if m == nil { + return nil, errors.New("can't marshal a nil Map") + } + + if length != 4 { + return nil, fmt.Errorf("can't marshal map to %d bytes", length) + } + + buf := make([]byte, 4) + internal.NativeEndian.PutUint32(buf, m.fd.Uint()) + return buf, nil +} + +// MapIterator iterates a Map. +// +// See Map.Iterate. +type MapIterator struct { + target *Map + // Temporary storage to avoid allocations in Next(). This is any instead + // of []byte to avoid allocations. + cursor any + count, maxEntries uint32 + done bool + err error +} + +func newMapIterator(target *Map) *MapIterator { + return &MapIterator{ + target: target, + maxEntries: target.maxEntries, + } +} + +// Next decodes the next key and value. +// +// Iterating a hash map from which keys are being deleted is not +// safe. You may see the same key multiple times. Iteration may +// also abort with an error, see IsIterationAborted. +// +// Returns false if there are no more entries. You must check +// the result of Err afterwards. +// +// See Map.Get for further caveats around valueOut. +func (mi *MapIterator) Next(keyOut, valueOut interface{}) bool { + if mi.err != nil || mi.done { + return false + } + + // For array-like maps NextKey returns nil only after maxEntries + // iterations. + for mi.count <= mi.maxEntries { + if mi.cursor == nil { + // Pass nil interface to NextKey to make sure the Map's first key + // is returned. If we pass an uninitialized []byte instead, it'll see a + // non-nil interface and try to marshal it. + mi.cursor = make([]byte, mi.target.keySize) + mi.err = mi.target.NextKey(nil, mi.cursor) + } else { + mi.err = mi.target.NextKey(mi.cursor, mi.cursor) + } + + if errors.Is(mi.err, ErrKeyNotExist) { + mi.done = true + mi.err = nil + return false + } else if mi.err != nil { + mi.err = fmt.Errorf("get next key: %w", mi.err) + return false + } + + mi.count++ + mi.err = mi.target.Lookup(mi.cursor, valueOut) + if errors.Is(mi.err, ErrKeyNotExist) { + // Even though the key should be valid, we couldn't look up + // its value. If we're iterating a hash map this is probably + // because a concurrent delete removed the value before we + // could get it. This means that the next call to NextKeyBytes + // is very likely to restart iteration. + // If we're iterating one of the fd maps like + // ProgramArray it means that a given slot doesn't have + // a valid fd associated. It's OK to continue to the next slot. + continue + } + if mi.err != nil { + mi.err = fmt.Errorf("look up next key: %w", mi.err) + return false + } + + buf := mi.cursor.([]byte) + if ptr, ok := keyOut.(unsafe.Pointer); ok { + copy(unsafe.Slice((*byte)(ptr), len(buf)), buf) + } else { + mi.err = sysenc.Unmarshal(keyOut, buf) + } + + return mi.err == nil + } + + mi.err = fmt.Errorf("%w", ErrIterationAborted) + return false +} + +// Err returns any encountered error. +// +// The method must be called after Next returns nil. +// +// Returns ErrIterationAborted if it wasn't possible to do a full iteration. +func (mi *MapIterator) Err() error { + return mi.err +} + +// MapGetNextID returns the ID of the next eBPF map. +// +// Returns ErrNotExist, if there is no next eBPF map. +func MapGetNextID(startID MapID) (MapID, error) { + attr := &sys.MapGetNextIdAttr{Id: uint32(startID)} + return MapID(attr.NextId), sys.MapGetNextId(attr) +} + +// NewMapFromID returns the [Map] for a given map id. Returns [ErrNotExist] if +// there is no eBPF map with the given id. +// +// Requires at least Linux 4.13. +func NewMapFromID(id MapID) (*Map, error) { + fd, err := sys.MapGetFdById(&sys.MapGetFdByIdAttr{ + Id: uint32(id), + }) + if err != nil { + return nil, err + } + + return newMapFromFD(fd) +} + +// sliceLen returns the length if the value is a slice or an error otherwise. +func sliceLen(slice any) (int, error) { + sliceValue := reflect.ValueOf(slice) + if sliceValue.Kind() != reflect.Slice { + return 0, fmt.Errorf("%T is not a slice", slice) + } + return sliceValue.Len(), nil +} diff --git a/vendor/github.com/cilium/ebpf/marshalers.go b/vendor/github.com/cilium/ebpf/marshalers.go new file mode 100644 index 000000000..d4e719c60 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/marshalers.go @@ -0,0 +1,210 @@ +package ebpf + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "slices" + "unsafe" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/sysenc" +) + +// marshalMapSyscallInput converts an arbitrary value into a pointer suitable +// to be passed to the kernel. +// +// As an optimization, it returns the original value if it is an +// unsafe.Pointer. +func marshalMapSyscallInput(data any, length int) (sys.Pointer, error) { + if ptr, ok := data.(unsafe.Pointer); ok { + return sys.UnsafePointer(ptr), nil + } + + buf, err := sysenc.Marshal(data, length) + if err != nil { + return sys.Pointer{}, err + } + + return buf.Pointer(), nil +} + +func makeMapSyscallOutput(dst any, length int) sysenc.Buffer { + if ptr, ok := dst.(unsafe.Pointer); ok { + return sysenc.UnsafeBuffer(ptr) + } + + _, ok := dst.(encoding.BinaryUnmarshaler) + if ok { + return sysenc.SyscallOutput(nil, length) + } + + return sysenc.SyscallOutput(dst, length) +} + +// appendPerCPUSlice encodes a slice containing one value per +// possible CPU into a buffer of bytes. +// +// Values are initialized to zero if the slice has less elements than CPUs. +func appendPerCPUSlice(buf []byte, slice any, possibleCPUs, elemLength, alignedElemLength int) ([]byte, error) { + sliceType := reflect.TypeOf(slice) + if sliceType.Kind() != reflect.Slice { + return nil, errors.New("per-CPU value requires slice") + } + + sliceValue := reflect.ValueOf(slice) + sliceLen := sliceValue.Len() + if sliceLen > possibleCPUs { + return nil, fmt.Errorf("per-CPU value greater than number of CPUs") + } + + // Grow increases the slice's capacity, _if_necessary_ + buf = slices.Grow(buf, alignedElemLength*possibleCPUs) + for i := 0; i < sliceLen; i++ { + elem := sliceValue.Index(i).Interface() + elemBytes, err := sysenc.Marshal(elem, elemLength) + if err != nil { + return nil, err + } + + buf = elemBytes.AppendTo(buf) + buf = append(buf, make([]byte, alignedElemLength-elemLength)...) + } + + // Ensure buf is zero-padded full size. + buf = append(buf, make([]byte, (possibleCPUs-sliceLen)*alignedElemLength)...) + + return buf, nil +} + +// marshalPerCPUValue encodes a slice containing one value per +// possible CPU into a buffer of bytes. +// +// Values are initialized to zero if the slice has less elements than CPUs. +func marshalPerCPUValue(slice any, elemLength int) (sys.Pointer, error) { + possibleCPUs, err := PossibleCPU() + if err != nil { + return sys.Pointer{}, err + } + + alignedElemLength := internal.Align(elemLength, 8) + buf := make([]byte, 0, alignedElemLength*possibleCPUs) + buf, err = appendPerCPUSlice(buf, slice, possibleCPUs, elemLength, alignedElemLength) + if err != nil { + return sys.Pointer{}, err + } + + return sys.UnsafeSlicePointer(buf), nil +} + +// marshalBatchPerCPUValue encodes a batch-sized slice of slices containing +// one value per possible CPU into a buffer of bytes. +func marshalBatchPerCPUValue(slice any, batchLen, elemLength int) ([]byte, error) { + sliceType := reflect.TypeOf(slice) + if sliceType.Kind() != reflect.Slice { + return nil, fmt.Errorf("batch value requires a slice") + } + sliceValue := reflect.ValueOf(slice) + + possibleCPUs, err := PossibleCPU() + if err != nil { + return nil, err + } + if sliceValue.Len() != batchLen*possibleCPUs { + return nil, fmt.Errorf("per-CPU slice has incorrect length, expected %d, got %d", + batchLen*possibleCPUs, sliceValue.Len()) + } + alignedElemLength := internal.Align(elemLength, 8) + buf := make([]byte, 0, batchLen*alignedElemLength*possibleCPUs) + for i := 0; i < batchLen; i++ { + batch := sliceValue.Slice(i*possibleCPUs, (i+1)*possibleCPUs).Interface() + buf, err = appendPerCPUSlice(buf, batch, possibleCPUs, elemLength, alignedElemLength) + if err != nil { + return nil, fmt.Errorf("batch %d: %w", i, err) + } + } + return buf, nil +} + +// unmarshalPerCPUValue decodes a buffer into a slice containing one value per +// possible CPU. +// +// slice must be a literal slice and not a pointer. +func unmarshalPerCPUValue(slice any, elemLength int, buf []byte) error { + sliceType := reflect.TypeOf(slice) + if sliceType.Kind() != reflect.Slice { + return fmt.Errorf("per-CPU value requires a slice") + } + + possibleCPUs, err := PossibleCPU() + if err != nil { + return err + } + + sliceValue := reflect.ValueOf(slice) + if sliceValue.Len() != possibleCPUs { + return fmt.Errorf("per-CPU slice has incorrect length, expected %d, got %d", + possibleCPUs, sliceValue.Len()) + } + + sliceElemType := sliceType.Elem() + sliceElemIsPointer := sliceElemType.Kind() == reflect.Ptr + stride := internal.Align(elemLength, 8) + for i := 0; i < possibleCPUs; i++ { + var elem any + v := sliceValue.Index(i) + if sliceElemIsPointer { + if !v.Elem().CanAddr() { + return fmt.Errorf("per-CPU slice elements cannot be nil") + } + elem = v.Elem().Addr().Interface() + } else { + elem = v.Addr().Interface() + } + err := sysenc.Unmarshal(elem, buf[:elemLength]) + if err != nil { + return fmt.Errorf("cpu %d: %w", i, err) + } + + buf = buf[stride:] + } + return nil +} + +// unmarshalBatchPerCPUValue decodes a buffer into a batch-sized slice +// containing one value per possible CPU. +// +// slice must have length batchLen * PossibleCPUs(). +func unmarshalBatchPerCPUValue(slice any, batchLen, elemLength int, buf []byte) error { + sliceType := reflect.TypeOf(slice) + if sliceType.Kind() != reflect.Slice { + return fmt.Errorf("batch requires a slice") + } + + sliceValue := reflect.ValueOf(slice) + possibleCPUs, err := PossibleCPU() + if err != nil { + return err + } + if sliceValue.Len() != batchLen*possibleCPUs { + return fmt.Errorf("per-CPU slice has incorrect length, expected %d, got %d", + sliceValue.Len(), batchLen*possibleCPUs) + } + + fullValueSize := possibleCPUs * internal.Align(elemLength, 8) + if len(buf) != batchLen*fullValueSize { + return fmt.Errorf("input buffer has incorrect length, expected %d, got %d", + len(buf), batchLen*fullValueSize) + } + + for i := 0; i < batchLen; i++ { + elem := sliceValue.Slice(i*possibleCPUs, (i+1)*possibleCPUs).Interface() + if err := unmarshalPerCPUValue(elem, elemLength, buf[:fullValueSize]); err != nil { + return fmt.Errorf("batch %d: %w", i, err) + } + buf = buf[fullValueSize:] + } + return nil +} diff --git a/vendor/github.com/cilium/ebpf/memory.go b/vendor/github.com/cilium/ebpf/memory.go new file mode 100644 index 000000000..e470bf24f --- /dev/null +++ b/vendor/github.com/cilium/ebpf/memory.go @@ -0,0 +1,155 @@ +package ebpf + +import ( + "errors" + "fmt" + "io" + "runtime" + + "github.com/cilium/ebpf/internal/unix" +) + +// Memory is the building block for accessing the memory of specific bpf map +// types (Array and Arena at the time of writing) without going through the bpf +// syscall interface. +// +// Given the fd of a bpf map created with the BPF_F_MMAPABLE flag, a shared +// 'file'-based memory-mapped region can be allocated in the process' address +// space, exposing the bpf map's memory by simply accessing a memory location. + +var ErrReadOnly = errors.New("resource is read-only") + +// Memory implements accessing a Map's memory without making any syscalls. +// Pay attention to the difference between Go and C struct alignment rules. Use +// [structs.HostLayout] on supported Go versions to help with alignment. +// +// Note on memory coherence: avoid using packed structs in memory shared between +// user space and eBPF C programs. This drops a struct's memory alignment to 1, +// forcing the compiler to use single-byte loads and stores for field accesses. +// This may lead to partially-written data to be observed from user space. +// +// On most architectures, the memmove implementation used by Go's copy() will +// access data in word-sized chunks. If paired with a matching access pattern on +// the eBPF C side (and if using default memory alignment), accessing shared +// memory without atomics or other synchronization primitives should be sound +// for individual values. For accesses beyond a single value, the usual +// concurrent programming rules apply. +type Memory struct { + b []byte + ro bool + heap bool + + cleanup runtime.Cleanup +} + +func newMemory(fd, size int) (*Memory, error) { + // Typically, maps created with BPF_F_RDONLY_PROG remain writable from user + // space until frozen. As a security precaution, the kernel doesn't allow + // mapping bpf map memory as read-write into user space if the bpf map was + // frozen, or if it was created using the RDONLY_PROG flag. + // + // The user would be able to write to the map after freezing (since the kernel + // can't change the protection mode of an already-mapped page), while the + // verifier assumes the contents to be immutable. + b, err := unix.Mmap(fd, 0, size, unix.PROT_READ|unix.PROT_WRITE, unix.MAP_SHARED) + + // If the map is frozen when an rw mapping is requested, expect EPERM. If the + // map was created with BPF_F_RDONLY_PROG, expect EACCES. + var ro bool + if errors.Is(err, unix.EPERM) || errors.Is(err, unix.EACCES) { + ro = true + b, err = unix.Mmap(fd, 0, size, unix.PROT_READ, unix.MAP_SHARED) + } + if err != nil { + return nil, fmt.Errorf("setting up memory-mapped region: %w", err) + } + + mm := &Memory{b: b, ro: ro, heap: false} + mm.cleanup = runtime.AddCleanup(mm, memoryCleanupFunc(), b) + + return mm, nil +} + +func memoryCleanupFunc() func([]byte) { + return func(b []byte) { + if err := unix.Munmap(b); err != nil { + panic(fmt.Errorf("unmapping memory: %w", err)) + } + } +} + +func (mm *Memory) close() { + mm.cleanup.Stop() + memoryCleanupFunc()(mm.b) + mm.b = nil +} + +// Size returns the size of the memory-mapped region in bytes. +func (mm *Memory) Size() int { + return len(mm.b) +} + +// ReadOnly returns true if the memory-mapped region is read-only. +func (mm *Memory) ReadOnly() bool { + return mm.ro +} + +// bounds returns true if an access at off of the given size is within bounds. +func (mm *Memory) bounds(off uint64, size uint64) bool { + if off+size < off { + return false + } + return off+size <= uint64(len(mm.b)) +} + +// ReadAt implements [io.ReaderAt]. Useful for creating a new [io.OffsetWriter]. +// +// See [Memory] for details around memory coherence. +func (mm *Memory) ReadAt(p []byte, off int64) (int, error) { + if mm.b == nil { + return 0, fmt.Errorf("memory-mapped region closed") + } + + if p == nil { + return 0, fmt.Errorf("input buffer p is nil") + } + + if off < 0 || off >= int64(len(mm.b)) { + return 0, fmt.Errorf("read offset out of range") + } + + n := copy(p, mm.b[off:]) + if n < len(p) { + return n, io.EOF + } + + return n, nil +} + +// WriteAt implements [io.WriterAt]. Useful for creating a new +// [io.SectionReader]. +// +// See [Memory] for details around memory coherence. +func (mm *Memory) WriteAt(p []byte, off int64) (int, error) { + if mm.b == nil { + return 0, fmt.Errorf("memory-mapped region closed") + } + if mm.ro { + return 0, fmt.Errorf("memory-mapped region not writable: %w", ErrReadOnly) + } + + if p == nil { + return 0, fmt.Errorf("output buffer p is nil") + } + + if off < 0 || off >= int64(len(mm.b)) { + return 0, fmt.Errorf("write offset out of range") + } + + n := copy(mm.b[off:], p) + if n < len(p) { + return n, io.EOF + } + + return n, nil +} diff --git a/vendor/github.com/cilium/ebpf/memory_unsafe.go b/vendor/github.com/cilium/ebpf/memory_unsafe.go new file mode 100644 index 000000000..9518ff35d --- /dev/null +++ b/vendor/github.com/cilium/ebpf/memory_unsafe.go @@ -0,0 +1,343 @@ +package ebpf + +import ( + "errors" + "fmt" + "os" + "reflect" + "runtime" + "unsafe" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/unix" +) + +// This file contains an experimental, unsafe implementation of Memory that +// allows taking a Go pointer to a memory-mapped region. This currently does not +// have first-class support from the Go runtime, so it may break in future Go +// versions. The Go proposal for the runtime to track off-heap pointers is here: +// https://github.com/golang/go/issues/70224. +// +// In Go, the programmer should not have to worry about freeing memory. Since +// this API synthesizes Go variables around global variables declared in a BPF +// C program, we want to lean on the runtime for making sure accessing them is +// safe at all times. Unfortunately, Go (as of 1.24) does not have the ability +// of automatically managing memory that was not allocated by the runtime. +// +// This led to a solution that requests regular Go heap memory by allocating a +// slice (making the runtime track pointers into the slice's backing array) and +// memory-mapping the bpf map's memory over it. Then, before returning the +// Memory to the caller, a finalizer is set on the backing array, making sure +// the bpf map's memory is unmapped from the heap before releasing the backing +// array to the runtime for reallocation. +// +// This obviates the need to maintain a reference to the *Memory at all times, +// which is difficult for the caller to achieve if the variable access is done +// through another object (like a sync.Atomic) that can potentially be passed +// around the Go application. Accidentally losing the reference to the *Memory +// would result in hard-to-debug segfaults, which are always unexpected in Go. + +//go:linkname heapObjectsCanMove runtime.heapObjectsCanMove +func heapObjectsCanMove() bool + +// Set from a file behind the ebpf_unsafe_memory_experiment build tag to enable +// features that require mapping bpf map memory over the Go heap. +var unsafeMemory = false + +// ErrInvalidType is returned when the given type cannot be used as a Memory or +// Variable pointer. +var ErrInvalidType = errors.New("invalid type") + +func newUnsafeMemory(fd, size int) (*Memory, error) { + // Some architectures need the size to be page-aligned to work with MAP_FIXED. + if size%os.Getpagesize() != 0 { + return nil, fmt.Errorf("memory: must be a multiple of page size (requested %d bytes)", size) + } + + // Allocate a page-aligned span of memory on the Go heap. + alloc, err := allocate(size) + if err != nil { + return nil, fmt.Errorf("allocating memory: %w", err) + } + + // Typically, maps created with BPF_F_RDONLY_PROG remain writable from user + // space until frozen. As a security precaution, the kernel doesn't allow + // mapping bpf map memory as read-write into user space if the bpf map was + // frozen, or if it was created using the RDONLY_PROG flag. + // + // The user would be able to write to the map after freezing (since the kernel + // can't change the protection mode of an already-mapped page), while the + // verifier assumes the contents to be immutable. + // + // Map the bpf map memory over a page-aligned allocation on the Go heap. + err = mapmap(fd, alloc, size, unix.PROT_READ|unix.PROT_WRITE) + + // If the map is frozen when an rw mapping is requested, expect EPERM. If the + // map was created with BPF_F_RDONLY_PROG, expect EACCES. + var ro bool + if errors.Is(err, unix.EPERM) || errors.Is(err, unix.EACCES) { + ro = true + err = mapmap(fd, alloc, size, unix.PROT_READ) + } + if err != nil { + return nil, fmt.Errorf("setting up memory-mapped region: %w", err) + } + + mm := &Memory{ + unsafe.Slice((*byte)(alloc), size), + ro, + true, + runtime.Cleanup{}, + } + + return mm, nil +} + +// allocate returns a pointer to a page-aligned section of memory on the Go +// heap, managed by the runtime. +// +//go:nocheckptr +func allocate(size int) (unsafe.Pointer, error) { + // Memory-mapping over a piece of the Go heap is unsafe when the GC can + // randomly decide to move objects around, in which case the mapped region + // will not move along with it. + if heapObjectsCanMove() { + return nil, errors.New("this Go runtime has a moving garbage collector") + } + + if size == 0 { + return nil, errors.New("size must be greater than 0") + } + + // Request at least two pages of memory from the runtime to ensure we can + // align the requested allocation to a page boundary. This is needed for + // MAP_FIXED and makes sure we don't mmap over some other allocation on the Go + // heap. + size = internal.Align(size+os.Getpagesize(), os.Getpagesize()) + + // Allocate a new slice and store a pointer to its backing array. + alloc := unsafe.Pointer(unsafe.SliceData(make([]byte, size))) + + // nolint:govet + // + // Align the pointer to a page boundary within the allocation. This may alias + // the initial pointer if it was already page-aligned. Ignore govet warnings + // since we're calling [runtime.KeepAlive] on the original Go memory. + aligned := unsafe.Pointer(internal.Align(uintptr(alloc), uintptr(os.Getpagesize()))) + runtime.KeepAlive(alloc) + + // Return an aligned pointer into the backing array, losing the original + // reference. The runtime.SetFinalizer docs specify that its argument 'must be + // a pointer to an object, complit or local var', but this is still somewhat + // vague and not enforced by the current implementation. + // + // Currently, finalizers can be set and triggered from any address within a + // heap allocation, even individual struct fields or arbitrary offsets within + // a slice. In this case, finalizers set on struct fields or slice offsets + // will only run when the whole struct or backing array are collected. The + // accepted runtime.AddCleanup proposal makes this behaviour more explicit and + // is set to deprecate runtime.SetFinalizer. + // + // Alternatively, we'd have to track the original allocation and the aligned + // pointer separately, which severely complicates finalizer setup and makes it + // prone to human error. For now, just bump the pointer and treat it as the + // new and only reference to the backing array. + return aligned, nil +} + +// mapmap memory-maps the given file descriptor at the given address and sets a +// finalizer on addr to unmap it when it's no longer reachable. +func mapmap(fd int, addr unsafe.Pointer, size, flags int) error { + // Map the bpf map memory over the Go heap. This will result in the following + // mmap layout in the process' address space (0xc000000000 is a span of Go + // heap), visualized using pmap: + // + // Address Kbytes RSS Dirty Mode Mapping + // 000000c000000000 1824 864 864 rw--- [ anon ] + // 000000c0001c8000 4 4 4 rw-s- [ anon ] + // 000000c0001c9000 2268 16 16 rw--- [ anon ] + // + // This will break up the Go heap, but as long as the runtime doesn't try to + // move our allocation around, this is safe for as long as we hold a reference + // to our allocated object. + // + // Use MAP_SHARED to make sure the kernel sees any writes we do, and MAP_FIXED + // to ensure the mapping starts exactly at the address we requested. If alloc + // isn't page-aligned, the mapping operation will fail. + if _, err := unix.MmapPtr(fd, 0, addr, uintptr(size), + flags, unix.MAP_SHARED|unix.MAP_FIXED); err != nil { + return fmt.Errorf("setting up memory-mapped region: %w", err) + } + + // Set a finalizer on the heap allocation to undo the mapping before the span + // is collected and reused by the runtime. This has a few reasons: + // + // - Avoid leaking memory/mappings. + // - Future writes to this memory should never clobber a bpf map's contents. + // - Some bpf maps are mapped read-only, causing a segfault if the runtime + // reallocates and zeroes the span later. + runtime.SetFinalizer((*byte)(addr), unmap(size)) + + return nil +} + +// unmap returns a function that takes a pointer to a memory-mapped region on +// the Go heap. The function undoes any mappings and discards the span's +// contents. +// +// Used as a finalizer in [newMemory], split off into a separate function for +// testing and to avoid accidentally closing over the unsafe.Pointer to the +// memory region, which would cause a cyclical reference. +// +// The resulting function panics if the mmap operation returns an error, since +// it would mean the integrity of the Go heap is compromised. +func unmap(size int) func(*byte) { + return func(a *byte) { + // Create another mapping at the same address to undo the original mapping. + // This will cause the kernel to repair the slab since we're using the same + // protection mode and flags as the original mapping for the Go heap. + // + // Address Kbytes RSS Dirty Mode Mapping + // 000000c000000000 4096 884 884 rw--- [ anon ] + // + // Using munmap here would leave an unmapped hole in the heap, compromising + // its integrity. + // + // MmapPtr allocates another unsafe.Pointer at the same address. Even though + // we discard it here, it may temporarily resurrect the backing array and + // delay its collection to the next GC cycle. + _, err := unix.MmapPtr(-1, 0, unsafe.Pointer(a), uintptr(size), + unix.PROT_READ|unix.PROT_WRITE, + unix.MAP_PRIVATE|unix.MAP_FIXED|unix.MAP_ANON) + if err != nil { + panic(fmt.Errorf("undoing bpf map memory mapping: %w", err)) + } + } +} + +// checkUnsafeMemory ensures value T can be accessed in mm at offset off. +// +// The comparable constraint narrows down the set of eligible types to exclude +// slices, maps and functions. These complex types cannot be mapped to memory +// directly. +func checkUnsafeMemory[T comparable](mm *Memory, off uint64) error { + if mm.b == nil { + return fmt.Errorf("memory-mapped region is nil") + } + if mm.ro { + return ErrReadOnly + } + if !mm.heap { + return fmt.Errorf("memory region is not heap-mapped, build with '-tags ebpf_unsafe_memory_experiment' to enable: %w", ErrNotSupported) + } + + t := reflect.TypeFor[T]() + if err := checkType(t.String(), t); err != nil { + return err + } + + size := t.Size() + if size == 0 { + return fmt.Errorf("zero-sized type %s: %w", t, ErrInvalidType) + } + + if off%uint64(t.Align()) != 0 { + return fmt.Errorf("unaligned access of memory-mapped region: %d-byte aligned read at offset %d", t.Align(), off) + } + + vs, bs := uint64(size), uint64(len(mm.b)) + if off+vs > bs { + return fmt.Errorf("%d-byte value at offset %d exceeds mmap size of %d bytes", vs, off, bs) + } + + return nil +} + +// checkType recursively checks if the given type is supported for memory +// mapping. Only fixed-size, non-Go-pointer types are supported: bools, floats, +// (u)int[8-64], arrays, and structs containing them. As an exception, uintptr +// is allowed since the backing memory is expected to contain 32-bit pointers on +// 32-bit systems despite BPF always allocating 64 bits for pointers in a data +// section. +// +// Doesn't check for loops since it rejects pointers. Should that ever change, a +// visited set would be needed. +func checkType(name string, t reflect.Type) error { + // Special-case atomic types to allow them to be used as root types as well as + // struct fields. Notably, omit atomic.Value and atomic.Pointer since those + // are pointer types. Also, atomic.Value embeds an interface value, which + // doesn't make sense to share with C land. + if t.PkgPath() == "sync/atomic" { + switch t.Name() { + case "Bool", "Int32", "Int64", "Uint32", "Uint64", "Uintptr": + return nil + } + } + + switch t.Kind() { + case reflect.Uintptr, reflect.Bool, reflect.Float32, reflect.Float64, + reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return nil + + case reflect.Array: + at := t.Elem() + if err := checkType(fmt.Sprintf("%s.%s", name, at.String()), at); err != nil { + return err + } + + case reflect.Struct: + var hasHostLayout bool + for i := range t.NumField() { + at := t.Field(i).Type + + // Require [structs.HostLayout] to be embedded in all structs. Check the + // full package path to reject a user-defined HostLayout type. + if at.PkgPath() == "structs" && at.Name() == "HostLayout" { + hasHostLayout = true + continue + } + + if err := checkType(fmt.Sprintf("%s.%s", name, at.String()), at); err != nil { + return err + } + } + + if !hasHostLayout { + return fmt.Errorf("struct %s must embed structs.HostLayout: %w", name, ErrInvalidType) + } + + default: + // For basic types like int and bool, the kind name is the same as the type + // name, so the fallthrough case would print 'int type int not supported'. + // Omit the kind name if it matches the type name. + if t.String() == t.Kind().String() { + // Output: type int not supported + return fmt.Errorf("type %s not supported: %w", name, ErrInvalidType) + } + + // Output: interface value io.Reader not supported + return fmt.Errorf("%s type %s not supported: %w", t.Kind(), name, ErrInvalidType) + } + + return nil +} + +// memoryPointer returns a pointer to a value of type T at offset off in mm. +// Taking a pointer to a read-only Memory or to a Memory that is not heap-mapped +// is not supported. +// +// T must contain only fixed-size, non-Go-pointer types: bools, floats, +// (u)int[8-64], arrays, and structs containing them. Structs must embed +// [structs.HostLayout]. [ErrInvalidType] is returned if T is not a valid type. +// +// Memory must be writable, off must be aligned to the size of T, and the value +// must be within bounds of the Memory. +// +// To access read-only memory, use [Memory.ReadAt]. +func memoryPointer[T comparable](mm *Memory, off uint64) (*T, error) { + if err := checkUnsafeMemory[T](mm, off); err != nil { + return nil, fmt.Errorf("memory pointer: %w", err) + } + return (*T)(unsafe.Pointer(&mm.b[off])), nil +} diff --git a/vendor/github.com/cilium/ebpf/memory_unsafe_tag.go b/vendor/github.com/cilium/ebpf/memory_unsafe_tag.go new file mode 100644 index 000000000..e662065ed --- /dev/null +++ b/vendor/github.com/cilium/ebpf/memory_unsafe_tag.go @@ -0,0 +1,7 @@ +//go:build ebpf_unsafe_memory_experiment + +package ebpf + +func init() { + unsafeMemory = true +} diff --git a/vendor/github.com/cilium/ebpf/netlify.toml b/vendor/github.com/cilium/ebpf/netlify.toml new file mode 100644 index 000000000..764c3b447 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/netlify.toml @@ -0,0 +1,5 @@ +[build] + base = "docs/" + publish = "site/" + command = "mkdocs build" + environment = { PYTHON_VERSION = "3.13" } diff --git a/vendor/github.com/cilium/ebpf/prog.go b/vendor/github.com/cilium/ebpf/prog.go new file mode 100644 index 000000000..3e724234d --- /dev/null +++ b/vendor/github.com/cilium/ebpf/prog.go @@ -0,0 +1,1244 @@ +package ebpf + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "math" + "path/filepath" + "runtime" + "slices" + "strings" + "time" + + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/linux" + "github.com/cilium/ebpf/internal/platform" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/sysenc" + "github.com/cilium/ebpf/internal/unix" +) + +// ErrNotSupported is returned whenever the kernel doesn't support a feature. +var ErrNotSupported = internal.ErrNotSupported + +// errBadRelocation is returned when the verifier rejects a program due to a +// bad CO-RE relocation. +// +// This error is detected based on heuristics and therefore may not be reliable. +var errBadRelocation = errors.New("bad CO-RE relocation") + +// errUnknownKfunc is returned when the verifier rejects a program due to an +// unknown kfunc. +// +// This error is detected based on heuristics and therefore may not be reliable. +var errUnknownKfunc = errors.New("unknown kfunc") + +// ProgramID represents the unique ID of an eBPF program. +type ProgramID = sys.ProgramID + +const ( + // Number of bytes to pad the output buffer for BPF_PROG_TEST_RUN. + // This is currently the maximum of spare space allocated for SKB + // and XDP programs, and equal to XDP_PACKET_HEADROOM + NET_IP_ALIGN. + outputPad = 256 + 2 +) + +// minVerifierLogSize is the default number of bytes allocated for the +// verifier log. +const minVerifierLogSize = 64 * 1024 + +// maxVerifierLogSize is the maximum size of verifier log buffer the kernel +// will accept before returning EINVAL. May be increased to MaxUint32 in the +// future, but avoid the unnecessary EINVAL for now. +const maxVerifierLogSize = math.MaxUint32 >> 2 + +// maxVerifierAttempts is the maximum number of times the verifier will retry +// loading a program with a growing log buffer before giving up. Since we double +// the log size on every attempt, this is the absolute maximum number of +// attempts before the buffer reaches [maxVerifierLogSize]. +const maxVerifierAttempts = 30 + +// ProgramOptions control loading a program into the kernel. +type ProgramOptions struct { + // Bitmap controlling the detail emitted by the kernel's eBPF verifier log. + // LogLevel-type values can be ORed together to request specific kinds of + // verifier output. See the documentation on [ebpf.LogLevel] for details. + // + // opts.LogLevel = (ebpf.LogLevelBranch | ebpf.LogLevelStats) + // + // If left to its default value, the program will first be loaded without + // verifier output enabled. Upon error, the program load will be repeated + // with LogLevelBranch and the given (or default) LogSize value. + // + // Unless LogDisabled is set, setting this to a non-zero value will enable the verifier + // log, populating the [ebpf.Program.VerifierLog] field on successful loads + // and including detailed verifier errors if the program is rejected. This + // will always allocate an output buffer, but will result in only a single + // attempt at loading the program. + LogLevel LogLevel + + // Starting size of the verifier log buffer. If the verifier log is larger + // than this size, the buffer will be grown to fit the entire log. Leave at + // its default value unless troubleshooting. + LogSizeStart uint32 + + // Disables the verifier log completely, regardless of other options. + LogDisabled bool + + // Type information used for CO-RE relocations. + // + // This is useful in environments where the kernel BTF is not available + // (containers) or where it is in a non-standard location. Defaults to + // use the kernel BTF from a well-known location if nil. + KernelTypes *btf.Spec + + // Additional targets to consider for CO-RE relocations. This can be used to + // pass BTF information for kernel modules when it's not present on + // KernelTypes. + ExtraRelocationTargets []*btf.Spec +} + +// ProgramSpec defines a Program. +type ProgramSpec struct { + // Name is passed to the kernel as a debug aid. + // + // Unsupported characters will be stripped. + Name string + + // Type determines at which hook in the kernel a program will run. + Type ProgramType + + // Network interface index the user intends to attach this program to after + // loading. Only valid for some program types. + // + // Provides driver-specific context about the target interface to the + // verifier, required when using certain BPF helpers. + Ifindex uint32 + + // AttachType of the program, needed to differentiate allowed context + // accesses in some newer program types like CGroupSockAddr. + // + // Available on kernels 4.17 and later. + AttachType AttachType + + // Name of a kernel data structure or function to attach to. Its + // interpretation depends on Type and AttachType. + AttachTo string + + // The program to attach to. Must be provided manually. + AttachTarget *Program + + // The name of the ELF section this program originated from. + SectionName string + + Instructions asm.Instructions + + // Flags is passed to the kernel and specifies additional program + // load attributes. + Flags uint32 + + // License of the program. Some helpers are only available if + // the license is deemed compatible with the GPL. + // + // See https://www.kernel.org/doc/html/latest/process/license-rules.html#id1 + License string + + // Version used by Kprobe programs. + // + // Deprecated on kernels 5.0 and later. Leave empty to let the library + // detect this value automatically. + KernelVersion uint32 + + // The byte order this program was compiled for, may be nil. + ByteOrder binary.ByteOrder +} + +// Copy returns a copy of the spec. +func (ps *ProgramSpec) Copy() *ProgramSpec { + if ps == nil { + return nil + } + + cpy := *ps + cpy.Instructions = make(asm.Instructions, len(ps.Instructions)) + copy(cpy.Instructions, ps.Instructions) + return &cpy +} + +// Tag calculates the kernel tag for a series of instructions. +// +// Use asm.Instructions.Tag if you need to calculate for non-native endianness. +func (ps *ProgramSpec) Tag() (string, error) { + return ps.Instructions.Tag(internal.NativeEndian) +} + +// targetsKernelModule returns true if the program supports being attached to a +// symbol provided by a kernel module. +func (ps *ProgramSpec) targetsKernelModule() bool { + if ps.AttachTo == "" { + return false + } + + switch ps.Type { + case Tracing: + switch ps.AttachType { + case AttachTraceFEntry, AttachTraceFExit: + return true + } + case Kprobe: + return true + } + + return false +} + +// VerifierError is returned by [NewProgram] and [NewProgramWithOptions] if a +// program is rejected by the verifier. +// +// Use [errors.As] to access the error. +type VerifierError = internal.VerifierError + +// Program represents BPF program loaded into the kernel. +// +// It is not safe to close a Program which is used by other goroutines. +type Program struct { + // Contains the output of the kernel verifier if enabled, + // otherwise it is empty. + VerifierLog string + + fd *sys.FD + name string + pinnedPath string + typ ProgramType +} + +// NewProgram creates a new Program. +// +// See [NewProgramWithOptions] for details. +// +// Returns a [VerifierError] containing the full verifier log if the program is +// rejected by the kernel. +func NewProgram(spec *ProgramSpec) (*Program, error) { + return NewProgramWithOptions(spec, ProgramOptions{}) +} + +// NewProgramWithOptions creates a new Program. +// +// Loading a program for the first time will perform +// feature detection by loading small, temporary programs. +// +// Returns a [VerifierError] containing the full verifier log if the program is +// rejected by the kernel. +func NewProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, error) { + if spec == nil { + return nil, errors.New("can't load a program from a nil spec") + } + + prog, err := newProgramWithOptions(spec, opts, btf.NewCache()) + if errors.Is(err, asm.ErrUnsatisfiedMapReference) { + return nil, fmt.Errorf("cannot load program without loading its whole collection: %w", err) + } + return prog, err +} + +var ( + coreBadLoad = []byte(fmt.Sprintf("(18) r10 = 0x%x\n", btf.COREBadRelocationSentinel)) + // This log message was introduced by ebb676daa1a3 ("bpf: Print function name in + // addition to function id") which first appeared in v4.10 and has remained + // unchanged since. + coreBadCall = []byte(fmt.Sprintf("invalid func unknown#%d\n", btf.COREBadRelocationSentinel)) + kfuncBadCall = []byte(fmt.Sprintf("invalid func unknown#%d\n", kfuncCallPoisonBase)) +) + +func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, c *btf.Cache) (*Program, error) { + if len(spec.Instructions) == 0 { + return nil, errors.New("instructions cannot be empty") + } + + if spec.Type == UnspecifiedProgram { + return nil, errors.New("can't load program of unspecified type") + } + + if spec.ByteOrder != nil && spec.ByteOrder != internal.NativeEndian { + return nil, fmt.Errorf("can't load %s program on %s", spec.ByteOrder, internal.NativeEndian) + } + + // Kernels before 5.0 (6c4fc209fcf9 "bpf: remove useless version check for prog load") + // require the version field to be set to the value of the KERNEL_VERSION + // macro for kprobe-type programs. + // Overwrite Kprobe program version if set to zero or the magic version constant. + kv := spec.KernelVersion + if spec.Type == Kprobe && (kv == 0 || kv == internal.MagicKernelVersion) { + v, err := linux.KernelVersion() + if err != nil { + return nil, fmt.Errorf("detecting kernel version: %w", err) + } + kv = v.Kernel() + } + + p, progType := platform.DecodeConstant(spec.Type) + if p != platform.Native { + return nil, fmt.Errorf("program type %s (%s): %w", spec.Type, p, internal.ErrNotSupportedOnOS) + } + + attr := &sys.ProgLoadAttr{ + ProgName: maybeFillObjName(spec.Name), + ProgType: sys.ProgType(progType), + ProgFlags: spec.Flags, + ProgIfindex: spec.Ifindex, + ExpectedAttachType: sys.AttachType(spec.AttachType), + License: sys.NewStringPointer(spec.License), + KernVersion: kv, + } + + insns := make(asm.Instructions, len(spec.Instructions)) + copy(insns, spec.Instructions) + + var b btf.Builder + if err := applyRelocations(insns, spec.ByteOrder, &b, c, opts.KernelTypes, opts.ExtraRelocationTargets); err != nil { + return nil, fmt.Errorf("apply CO-RE relocations: %w", err) + } + + errExtInfos := haveProgramExtInfos() + if !b.Empty() && errors.Is(errExtInfos, ErrNotSupported) { + // There is at least one CO-RE relocation which relies on a stable local + // type ID. + // Return ErrNotSupported instead of E2BIG if there is no BTF support. + return nil, errExtInfos + } + + if errExtInfos == nil { + // Only add func and line info if the kernel supports it. This allows + // BPF compiled with modern toolchains to work on old kernels. + fib, lib, err := btf.MarshalExtInfos(insns, &b) + if err != nil { + return nil, fmt.Errorf("marshal ext_infos: %w", err) + } + + attr.FuncInfoRecSize = btf.FuncInfoSize + attr.FuncInfoCnt = uint32(len(fib)) / btf.FuncInfoSize + attr.FuncInfo = sys.SlicePointer(fib) + + attr.LineInfoRecSize = btf.LineInfoSize + attr.LineInfoCnt = uint32(len(lib)) / btf.LineInfoSize + attr.LineInfo = sys.SlicePointer(lib) + } + + if !b.Empty() { + handle, err := btf.NewHandle(&b) + if err != nil { + return nil, fmt.Errorf("load BTF: %w", err) + } + defer handle.Close() + + attr.ProgBtfFd = uint32(handle.FD()) + } + + kconfig, err := resolveKconfigReferences(insns) + if err != nil { + return nil, fmt.Errorf("resolve .kconfig: %w", err) + } + defer kconfig.Close() + + if err := resolveKsymReferences(insns); err != nil { + return nil, fmt.Errorf("resolve .ksyms: %w", err) + } + + if err := fixupAndValidate(insns); err != nil { + return nil, err + } + + handles, err := fixupKfuncs(insns, c) + if err != nil { + return nil, fmt.Errorf("fixing up kfuncs: %w", err) + } + defer handles.Close() + + if len(handles) > 0 { + fdArray := handles.fdArray() + attr.FdArray = sys.SlicePointer(fdArray) + } + + buf := bytes.NewBuffer(make([]byte, 0, insns.Size())) + err = insns.Marshal(buf, internal.NativeEndian) + if err != nil { + return nil, err + } + + bytecode := buf.Bytes() + attr.Insns = sys.SlicePointer(bytecode) + attr.InsnCnt = uint32(len(bytecode) / asm.InstructionSize) + + if spec.AttachTarget != nil { + targetID, err := findTargetInProgram(spec.AttachTarget, spec.AttachTo, spec.Type, spec.AttachType) + if err != nil { + return nil, fmt.Errorf("attach %s/%s: %w", spec.Type, spec.AttachType, err) + } + + attr.AttachBtfId = targetID + attr.AttachBtfObjFd = uint32(spec.AttachTarget.FD()) + defer runtime.KeepAlive(spec.AttachTarget) + } else if spec.AttachTo != "" { + var targetMember string + attachTo := spec.AttachTo + + if spec.Type == StructOps { + attachTo, targetMember, _ = strings.Cut(attachTo, ":") + if targetMember == "" { + return nil, fmt.Errorf("struct_ops: AttachTo must be ':' (got %s)", spec.AttachTo) + } + } + + module, targetID, err := findProgramTargetInKernel(attachTo, spec.Type, spec.AttachType, c) + if err != nil && !errors.Is(err, errUnrecognizedAttachType) { + // We ignore errUnrecognizedAttachType since AttachTo may be non-empty + // for programs that don't attach anywhere. + return nil, fmt.Errorf("attach %s/%s: %w", spec.Type, spec.AttachType, err) + } + + if spec.Type == StructOps { + var s *btf.Spec + + target := btf.Type((*btf.Struct)(nil)) + s, module, err = findTargetInKernel(attachTo, &target, c) + if err != nil { + return nil, fmt.Errorf("lookup struct_ops kern type %q: %w", attachTo, err) + } + kType := target.(*btf.Struct) + + targetID, err = s.TypeID(kType) + if err != nil { + return nil, fmt.Errorf("type id for %s: %w", kType.TypeName(), err) + } + + idx := slices.IndexFunc(kType.Members, func(m btf.Member) bool { + return m.Name == targetMember + }) + if idx < 0 { + return nil, fmt.Errorf("member %q not found in %s", targetMember, kType.Name) + } + + // ExpectedAttachType: index of the target member in the struct + attr.ExpectedAttachType = sys.AttachType(idx) + } + + attr.AttachBtfId = targetID + if module != nil && attr.AttachBtfObjFd == 0 { + attr.AttachBtfObjFd = uint32(module.FD()) + defer module.Close() + } + } + + if platform.IsWindows && opts.LogLevel != 0 { + return nil, fmt.Errorf("log level: %w", internal.ErrNotSupportedOnOS) + } + + var logBuf []byte + var fd *sys.FD + if opts.LogDisabled { + // Loading with logging disabled should never retry. + fd, err = sys.ProgLoad(attr) + if err == nil { + return &Program{"", fd, spec.Name, "", spec.Type}, nil + } + } else { + // Only specify log size if log level is also specified. Setting size + // without level results in EINVAL. Level will be bumped to LogLevelBranch + // if the first load fails. + if opts.LogLevel != 0 { + attr.LogLevel = opts.LogLevel + attr.LogSize = internal.Between(opts.LogSizeStart, minVerifierLogSize, maxVerifierLogSize) + } + + attempts := 1 + for { + if attr.LogLevel != 0 { + logBuf = make([]byte, attr.LogSize) + attr.LogBuf = sys.SlicePointer(logBuf) + } + + fd, err = sys.ProgLoad(attr) + if err == nil { + return &Program{unix.ByteSliceToString(logBuf), fd, spec.Name, "", spec.Type}, nil + } + + if !retryLogAttrs(attr, opts.LogSizeStart, err) { + break + } + + if attempts >= maxVerifierAttempts { + return nil, fmt.Errorf("load program: %w (bug: hit %d verifier attempts)", err, maxVerifierAttempts) + } + attempts++ + } + } + + end := bytes.IndexByte(logBuf, 0) + if end < 0 { + end = len(logBuf) + } + + tail := logBuf[max(end-256, 0):end] + switch { + case errors.Is(err, unix.EPERM): + if len(logBuf) > 0 && logBuf[0] == 0 { + // EPERM due to RLIMIT_MEMLOCK happens before the verifier, so we can + // check that the log is empty to reduce false positives. + return nil, fmt.Errorf("load program: %w (MEMLOCK may be too low, consider rlimit.RemoveMemlock)", err) + } + + case errors.Is(err, unix.EFAULT): + // EFAULT is returned when the kernel hits a verifier bug, and always + // overrides ENOSPC, defeating the buffer growth strategy. Warn the user + // that they may need to increase the buffer size manually. + return nil, fmt.Errorf("load program: %w (hit verifier bug, increase LogSizeStart to fit the log and check dmesg)", err) + + case errors.Is(err, unix.EINVAL): + if bytes.Contains(tail, coreBadCall) { + err = errBadRelocation + break + } else if bytes.Contains(tail, kfuncBadCall) { + err = errUnknownKfunc + break + } + + case errors.Is(err, unix.EACCES): + if bytes.Contains(tail, coreBadLoad) { + err = errBadRelocation + break + } + } + + // hasFunctionReferences may be expensive, so check it last. + if (errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM)) && + hasFunctionReferences(spec.Instructions) { + if err := haveBPFToBPFCalls(); err != nil { + return nil, fmt.Errorf("load program: %w", err) + } + } + + return nil, internal.ErrorWithLog("load program", err, logBuf) +} + +func retryLogAttrs(attr *sys.ProgLoadAttr, startSize uint32, err error) bool { + if attr.LogSize == maxVerifierLogSize { + // Maximum buffer size reached, don't grow or retry. + return false + } + + // ENOSPC means the log was enabled on the previous iteration, so we only + // need to grow the buffer. + if errors.Is(err, unix.ENOSPC) { + if attr.LogTrueSize != 0 { + // Kernel supports LogTrueSize and previous iteration undershot the buffer + // size. Try again with the given true size. + attr.LogSize = attr.LogTrueSize + return true + } + + // Ensure the size doesn't overflow. + const factor = 2 + if attr.LogSize >= maxVerifierLogSize/factor { + attr.LogSize = maxVerifierLogSize + return true + } + + // Make an educated guess how large the buffer should be by multiplying. Due + // to int division, this rounds down odd sizes. + attr.LogSize = internal.Between(attr.LogSize, minVerifierLogSize, maxVerifierLogSize/factor) + attr.LogSize *= factor + + return true + } + + if attr.LogLevel == 0 { + // Loading the program failed, it wasn't a buffer-related error, and the log + // was disabled the previous iteration. Enable basic logging and retry. + attr.LogLevel = LogLevelBranch + attr.LogSize = internal.Between(startSize, minVerifierLogSize, maxVerifierLogSize) + return true + } + + // Loading the program failed for a reason other than buffer size and the log + // was already enabled the previous iteration. Don't retry. + return false +} + +// NewProgramFromFD creates a [Program] around a raw fd. +// +// You should not use fd after calling this function. +// +// Requires at least Linux 4.13. Returns an error on Windows. +func NewProgramFromFD(fd int) (*Program, error) { + f, err := sys.NewFD(fd) + if err != nil { + return nil, err + } + + return newProgramFromFD(f) +} + +// NewProgramFromID returns the [Program] for a given program id. Returns +// [ErrNotExist] if there is no eBPF program with the given id. +// +// Requires at least Linux 4.13. +func NewProgramFromID(id ProgramID) (*Program, error) { + fd, err := sys.ProgGetFdById(&sys.ProgGetFdByIdAttr{ + Id: uint32(id), + }) + if err != nil { + return nil, fmt.Errorf("get program by id: %w", err) + } + + return newProgramFromFD(fd) +} + +func newProgramFromFD(fd *sys.FD) (*Program, error) { + info, err := minimalProgramInfoFromFd(fd) + if err != nil { + fd.Close() + return nil, fmt.Errorf("discover program type: %w", err) + } + + return &Program{"", fd, info.Name, "", info.Type}, nil +} + +func (p *Program) String() string { + if p.name != "" { + return fmt.Sprintf("%s(%s)#%v", p.typ, p.name, p.fd) + } + return fmt.Sprintf("%s(%v)", p.typ, p.fd) +} + +// Type returns the underlying type of the program. +func (p *Program) Type() ProgramType { + return p.typ +} + +// Info returns metadata about the program. +// +// Requires at least 4.10. +func (p *Program) Info() (*ProgramInfo, error) { + return newProgramInfoFromFd(p.fd) +} + +// Stats returns runtime statistics about the Program. Requires BPF statistics +// collection to be enabled, see [EnableStats]. +// +// Requires at least Linux 5.8. +func (p *Program) Stats() (*ProgramStats, error) { + return newProgramStatsFromFd(p.fd) +} + +// Handle returns a reference to the program's type information in the kernel. +// +// Returns ErrNotSupported if the kernel has no BTF support, or if there is no +// BTF associated with the program. +func (p *Program) Handle() (*btf.Handle, error) { + info, err := p.Info() + if err != nil { + return nil, err + } + + id, ok := info.BTFID() + if !ok { + return nil, fmt.Errorf("program %s: retrieve BTF ID: %w", p, ErrNotSupported) + } + + return btf.NewHandleFromID(id) +} + +// FD gets the file descriptor of the Program. +// +// It is invalid to call this function after Close has been called. +func (p *Program) FD() int { + return p.fd.Int() +} + +// Clone creates a duplicate of the Program. +// +// Closing the duplicate does not affect the original, and vice versa. +// +// Cloning a nil Program returns nil. +func (p *Program) Clone() (*Program, error) { + if p == nil { + return nil, nil + } + + dup, err := p.fd.Dup() + if err != nil { + return nil, fmt.Errorf("can't clone program: %w", err) + } + + return &Program{p.VerifierLog, dup, p.name, "", p.typ}, nil +} + +// Pin persists the Program on the BPF virtual file system past the lifetime of +// the process that created it +// +// Calling Pin on a previously pinned program will overwrite the path, except when +// the new path already exists. Re-pinning across filesystems is not supported. +// +// This requires bpffs to be mounted above fileName. +// See https://docs.cilium.io/en/stable/network/kubernetes/configuration/#mounting-bpffs-with-systemd +func (p *Program) Pin(fileName string) error { + if err := sys.Pin(p.pinnedPath, fileName, p.fd); err != nil { + return err + } + p.pinnedPath = fileName + return nil +} + +// Unpin removes the persisted state for the Program from the BPF virtual filesystem. +// +// Failed calls to Unpin will not alter the state returned by IsPinned. +// +// Unpinning an unpinned Program returns nil. +func (p *Program) Unpin() error { + if err := sys.Unpin(p.pinnedPath); err != nil { + return err + } + p.pinnedPath = "" + return nil +} + +// IsPinned returns true if the Program has a non-empty pinned path. +func (p *Program) IsPinned() bool { + return p.pinnedPath != "" +} + +// Close the Program's underlying file descriptor, which could unload +// the program from the kernel if it is not pinned or attached to a +// kernel hook. +func (p *Program) Close() error { + if p == nil { + return nil + } + + return p.fd.Close() +} + +// Various options for Run'ing a Program +type RunOptions struct { + // Program's data input. Required field. + // + // The kernel expects at least 14 bytes input for an ethernet header for + // XDP and SKB programs. + Data []byte + // Program's data after Program has run. Caller must allocate. Optional field. + DataOut []byte + // Program's context input. Optional field. + Context interface{} + // Program's context after Program has run. Must be a pointer or slice. Optional field. + ContextOut interface{} + // Minimum number of times to run Program. Optional field. Defaults to 1. + // + // The program may be executed more often than this due to interruptions, e.g. + // when runtime.AllThreadsSyscall is invoked. + Repeat uint32 + // Optional flags. + Flags uint32 + // CPU to run Program on. Optional field. + // Note not all program types support this field. + CPU uint32 + // Called whenever the syscall is interrupted, and should be set to testing.B.ResetTimer + // or similar. Typically used during benchmarking. Optional field. + // + // Deprecated: use [testing.B.ReportMetric] with unit "ns/op" instead. + Reset func() +} + +// Test runs the Program in the kernel with the given input and returns the +// value returned by the eBPF program. +// +// Note: the kernel expects at least 14 bytes input for an ethernet header for +// XDP and SKB programs. +// +// This function requires at least Linux 4.12. +func (p *Program) Test(in []byte) (uint32, []byte, error) { + // Older kernels ignore the dataSizeOut argument when copying to user space. + // Combined with things like bpf_xdp_adjust_head() we don't really know what the final + // size will be. Hence we allocate an output buffer which we hope will always be large + // enough, and panic if the kernel wrote past the end of the allocation. + // See https://patchwork.ozlabs.org/cover/1006822/ + var out []byte + if len(in) > 0 { + out = make([]byte, len(in)+outputPad) + } + + opts := RunOptions{ + Data: in, + DataOut: out, + Repeat: 1, + } + + ret, _, err := p.run(&opts) + if err != nil { + return ret, nil, fmt.Errorf("test program: %w", err) + } + return ret, opts.DataOut, nil +} + +// Run runs the Program in kernel with given RunOptions. +// +// Note: the same restrictions from Test apply. +func (p *Program) Run(opts *RunOptions) (uint32, error) { + if opts == nil { + opts = &RunOptions{} + } + + ret, _, err := p.run(opts) + if err != nil { + return ret, fmt.Errorf("run program: %w", err) + } + return ret, nil +} + +// Benchmark runs the Program with the given input for a number of times +// and returns the time taken per iteration. +// +// Returns the result of the last execution of the program and the time per +// run or an error. reset is called whenever the benchmark syscall is +// interrupted, and should be set to testing.B.ResetTimer or similar. +// +// This function requires at least Linux 4.12. +func (p *Program) Benchmark(in []byte, repeat int, reset func()) (uint32, time.Duration, error) { + if uint(repeat) > math.MaxUint32 { + return 0, 0, fmt.Errorf("repeat is too high") + } + + opts := RunOptions{ + Data: in, + Repeat: uint32(repeat), + Reset: reset, + } + + ret, total, err := p.run(&opts) + if err != nil { + return ret, total, fmt.Errorf("benchmark program: %w", err) + } + return ret, total, nil +} + +var haveProgRun = internal.NewFeatureTest("BPF_PROG_RUN", func() error { + if platform.IsWindows { + return nil + } + + prog, err := NewProgram(&ProgramSpec{ + // SocketFilter does not require privileges on newer kernels. + Type: SocketFilter, + Instructions: asm.Instructions{ + asm.LoadImm(asm.R0, 0, asm.DWord), + asm.Return(), + }, + License: "MIT", + }) + if err != nil { + // This may be because we lack sufficient permissions, etc. + return err + } + defer prog.Close() + + in := internal.EmptyBPFContext + attr := sys.ProgRunAttr{ + ProgFd: uint32(prog.FD()), + DataSizeIn: uint32(len(in)), + DataIn: sys.SlicePointer(in), + } + + err = sys.ProgRun(&attr) + switch { + case errors.Is(err, unix.EINVAL): + // Check for EINVAL specifically, rather than err != nil since we + // otherwise misdetect due to insufficient permissions. + return internal.ErrNotSupported + + case errors.Is(err, unix.EINTR): + // We know that PROG_TEST_RUN is supported if we get EINTR. + return nil + + case errors.Is(err, sys.ENOTSUPP): + // The first PROG_TEST_RUN patches shipped in 4.12 didn't include + // a test runner for SocketFilter. ENOTSUPP means PROG_TEST_RUN is + // supported, but not for the program type used in the probe. + return nil + } + + return err +}, "4.12", "windows:0.20") + +func (p *Program) run(opts *RunOptions) (uint32, time.Duration, error) { + if uint(len(opts.Data)) > math.MaxUint32 { + return 0, 0, fmt.Errorf("input is too long") + } + + if err := haveProgRun(); err != nil { + return 0, 0, err + } + + var ctxIn []byte + if opts.Context != nil { + var err error + ctxIn, err = binary.Append(nil, internal.NativeEndian, opts.Context) + if err != nil { + return 0, 0, fmt.Errorf("cannot serialize context: %v", err) + } + } + + var ctxOut []byte + if opts.ContextOut != nil { + ctxOut = make([]byte, binary.Size(opts.ContextOut)) + } else if platform.IsWindows && len(ctxIn) > 0 { + // Windows rejects a non-zero ctxIn with a nil ctxOut. + ctxOut = make([]byte, len(ctxIn)) + } + + attr := sys.ProgRunAttr{ + ProgFd: p.fd.Uint(), + DataSizeIn: uint32(len(opts.Data)), + DataSizeOut: uint32(len(opts.DataOut)), + DataIn: sys.SlicePointer(opts.Data), + DataOut: sys.SlicePointer(opts.DataOut), + Repeat: uint32(opts.Repeat), + CtxSizeIn: uint32(len(ctxIn)), + CtxSizeOut: uint32(len(ctxOut)), + CtxIn: sys.SlicePointer(ctxIn), + CtxOut: sys.SlicePointer(ctxOut), + Flags: opts.Flags, + Cpu: opts.CPU, + } + + if p.Type() == Syscall && ctxIn != nil && ctxOut != nil { + // Linux syscall program errors on non-nil ctxOut, uses ctxIn + // for both input and output. Shield the user from this wart. + if len(ctxIn) != len(ctxOut) { + return 0, 0, errors.New("length mismatch: Context and ContextOut") + } + attr.CtxOut, attr.CtxSizeOut = sys.TypedPointer[uint8]{}, 0 + ctxOut = ctxIn + } + +retry: + for { + err := sys.ProgRun(&attr) + if err == nil { + break retry + } + + if errors.Is(err, unix.EINTR) { + if attr.Repeat <= 1 { + // Older kernels check whether enough repetitions have been + // executed only after checking for pending signals. + // + // run signal? done? run ... + // + // As a result we can get EINTR for repeat==1 even though + // the program was run exactly once. Treat this as a + // successful run instead. + // + // Since commit 607b9cc92bd7 ("bpf: Consolidate shared test timing code") + // the conditions are reversed: + // run done? signal? ... + break retry + } + + if opts.Reset != nil { + opts.Reset() + } + continue retry + } + + if errors.Is(err, sys.ENOTSUPP) { + return 0, 0, fmt.Errorf("kernel doesn't support running %s: %w", p.Type(), ErrNotSupported) + } + + return 0, 0, err + } + + if opts.DataOut != nil { + if int(attr.DataSizeOut) > cap(opts.DataOut) { + // Houston, we have a problem. The program created more data than we allocated, + // and the kernel wrote past the end of our buffer. + panic("kernel wrote past end of output buffer") + } + opts.DataOut = opts.DataOut[:int(attr.DataSizeOut)] + } + + if opts.ContextOut != nil { + b := bytes.NewReader(ctxOut) + if err := binary.Read(b, internal.NativeEndian, opts.ContextOut); err != nil { + return 0, 0, fmt.Errorf("failed to decode ContextOut: %v", err) + } + } + + total := time.Duration(attr.Duration) * time.Nanosecond + return attr.Retval, total, nil +} + +func unmarshalProgram(buf sysenc.Buffer) (*Program, error) { + var id uint32 + if err := buf.Unmarshal(&id); err != nil { + return nil, err + } + + // Looking up an entry in a nested map or prog array returns an id, + // not an fd. + return NewProgramFromID(ProgramID(id)) +} + +func marshalProgram(p *Program, length int) ([]byte, error) { + if p == nil { + return nil, errors.New("can't marshal a nil Program") + } + + if length != 4 { + return nil, fmt.Errorf("can't marshal program to %d bytes", length) + } + + buf := make([]byte, 4) + internal.NativeEndian.PutUint32(buf, p.fd.Uint()) + return buf, nil +} + +// LoadPinnedProgram loads a Program from a pin (file) on the BPF virtual +// filesystem. +// +// Requires at least Linux 4.11. +func LoadPinnedProgram(fileName string, opts *LoadPinOptions) (*Program, error) { + fd, typ, err := sys.ObjGetTyped(&sys.ObjGetAttr{ + Pathname: sys.NewStringPointer(fileName), + FileFlags: opts.Marshal(), + }) + if err != nil { + return nil, err + } + + if typ != sys.BPF_TYPE_PROG { + _ = fd.Close() + return nil, fmt.Errorf("%s is not a Program", fileName) + } + + p, err := newProgramFromFD(fd) + if err == nil { + p.pinnedPath = fileName + + if haveObjName() != nil { + p.name = filepath.Base(fileName) + } + } + + return p, err +} + +// ProgramGetNextID returns the ID of the next eBPF program. +// +// Returns ErrNotExist, if there is no next eBPF program. +func ProgramGetNextID(startID ProgramID) (ProgramID, error) { + attr := &sys.ProgGetNextIdAttr{Id: uint32(startID)} + return ProgramID(attr.NextId), sys.ProgGetNextId(attr) +} + +// BindMap binds map to the program and is only released once program is released. +// +// This may be used in cases where metadata should be associated with the program +// which otherwise does not contain any references to the map. +func (p *Program) BindMap(m *Map) error { + attr := &sys.ProgBindMapAttr{ + ProgFd: uint32(p.FD()), + MapFd: uint32(m.FD()), + } + + return sys.ProgBindMap(attr) +} + +var errUnrecognizedAttachType = errors.New("unrecognized attach type") + +// find an attach target type in the kernel. +// +// name, progType and attachType determine which type we need to attach to. +// +// The attach target may be in a loaded kernel module. +// In that case the returned handle will be non-nil. +// The caller is responsible for closing the handle. +// +// Returns errUnrecognizedAttachType if the combination of progType and attachType +// is not recognised. +func findProgramTargetInKernel(name string, progType ProgramType, attachType AttachType, cache *btf.Cache) (*btf.Handle, btf.TypeID, error) { + type match struct { + p ProgramType + a AttachType + } + + var ( + typeName, featureName string + target btf.Type + ) + + switch (match{progType, attachType}) { + case match{StructOps, AttachStructOps}: + typeName = name + featureName = "struct_ops " + name + target = (*btf.Struct)(nil) + case match{LSM, AttachLSMMac}: + typeName = "bpf_lsm_" + name + featureName = name + " LSM hook" + target = (*btf.Func)(nil) + case match{Tracing, AttachTraceIter}: + typeName = "bpf_iter_" + name + featureName = name + " iterator" + target = (*btf.Func)(nil) + case match{Tracing, AttachTraceFEntry}: + typeName = name + featureName = fmt.Sprintf("fentry %s", name) + target = (*btf.Func)(nil) + case match{Tracing, AttachTraceFExit}: + typeName = name + featureName = fmt.Sprintf("fexit %s", name) + target = (*btf.Func)(nil) + case match{Tracing, AttachModifyReturn}: + typeName = name + featureName = fmt.Sprintf("fmod_ret %s", name) + target = (*btf.Func)(nil) + case match{Tracing, AttachTraceRawTp}: + typeName = fmt.Sprintf("btf_trace_%s", name) + featureName = fmt.Sprintf("raw_tp %s", name) + target = (*btf.Typedef)(nil) + default: + return nil, 0, errUnrecognizedAttachType + } + + spec, module, err := findTargetInKernel(typeName, &target, cache) + if errors.Is(err, btf.ErrNotFound) { + return nil, 0, &internal.UnsupportedFeatureError{Name: featureName} + } + // See cilium/ebpf#894. Until we can disambiguate between equally-named kernel + // symbols, we should explicitly refuse program loads. They will not reliably + // do what the caller intended. + if errors.Is(err, btf.ErrMultipleMatches) { + return nil, 0, fmt.Errorf("attaching to ambiguous kernel symbol is not supported: %w", err) + } + if err != nil { + return nil, 0, fmt.Errorf("find target for %s: %w", featureName, err) + } + + id, err := spec.TypeID(target) + if err != nil { + module.Close() + return nil, 0, err + } + + return module, id, nil +} + +// findTargetInKernel attempts to find a named type in the current kernel. +// +// target will point at the found type after a successful call. Searches both +// vmlinux and any loaded modules. +// +// Returns a non-nil handle if the type was found in a module, or btf.ErrNotFound +// if the type wasn't found at all. +func findTargetInKernel(typeName string, target *btf.Type, cache *btf.Cache) (*btf.Spec, *btf.Handle, error) { + kernelSpec, err := cache.Kernel() + if err != nil { + return nil, nil, err + } + + err = kernelSpec.TypeByName(typeName, target) + if errors.Is(err, btf.ErrNotFound) { + spec, module, err := findTargetInModule(typeName, target, cache) + if err != nil { + return nil, nil, fmt.Errorf("find target in modules: %w", err) + } + return spec, module, nil + } + if err != nil { + return nil, nil, fmt.Errorf("find target in vmlinux: %w", err) + } + return kernelSpec, nil, err +} + +// findTargetInModule attempts to find a named type in any loaded module. +// +// base must contain the kernel's types and is used to parse kmod BTF. Modules +// are searched in the order they were loaded. +// +// Returns btf.ErrNotFound if the target can't be found in any module. +func findTargetInModule(typeName string, target *btf.Type, cache *btf.Cache) (*btf.Spec, *btf.Handle, error) { + it := new(btf.HandleIterator) + defer it.Handle.Close() + + for it.Next() { + info, err := it.Handle.Info() + if err != nil { + return nil, nil, fmt.Errorf("get info for BTF ID %d: %w", it.ID, err) + } + + if !info.IsModule() { + continue + } + + spec, err := cache.Module(info.Name) + if err != nil { + return nil, nil, fmt.Errorf("parse types for module %s: %w", info.Name, err) + } + + err = spec.TypeByName(typeName, target) + if errors.Is(err, btf.ErrNotFound) { + continue + } + if err != nil { + return nil, nil, fmt.Errorf("lookup type in module %s: %w", info.Name, err) + } + + return spec, it.Take(), nil + } + if err := it.Err(); err != nil { + return nil, nil, fmt.Errorf("iterate modules: %w", err) + } + + return nil, nil, btf.ErrNotFound +} + +// find an attach target type in a program. +// +// Returns errUnrecognizedAttachType. +func findTargetInProgram(prog *Program, name string, progType ProgramType, attachType AttachType) (btf.TypeID, error) { + type match struct { + p ProgramType + a AttachType + } + + var typeName string + switch (match{progType, attachType}) { + case match{Extension, AttachNone}, + match{Tracing, AttachTraceFEntry}, + match{Tracing, AttachTraceFExit}: + typeName = name + default: + return 0, errUnrecognizedAttachType + } + + btfHandle, err := prog.Handle() + if err != nil { + return 0, fmt.Errorf("load target BTF: %w", err) + } + defer btfHandle.Close() + + spec, err := btfHandle.Spec(nil) + if err != nil { + return 0, err + } + + var targetFunc *btf.Func + err = spec.TypeByName(typeName, &targetFunc) + if err != nil { + return 0, fmt.Errorf("find target %s: %w", typeName, err) + } + + return spec.TypeID(targetFunc) +} diff --git a/vendor/github.com/cilium/ebpf/staticcheck.conf b/vendor/github.com/cilium/ebpf/staticcheck.conf new file mode 100644 index 000000000..cfc907da3 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/staticcheck.conf @@ -0,0 +1,3 @@ +# Default configuration from https://staticcheck.dev/docs/configuration with +# SA4003 disabled. Remove when https://github.com/cilium/ebpf/issues/1876 is fixed. +checks = ["all", "-SA9003", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023", "-SA4003"] diff --git a/vendor/github.com/cilium/ebpf/struct_ops.go b/vendor/github.com/cilium/ebpf/struct_ops.go new file mode 100644 index 000000000..162f344ea --- /dev/null +++ b/vendor/github.com/cilium/ebpf/struct_ops.go @@ -0,0 +1,139 @@ +package ebpf + +import ( + "fmt" + "reflect" + "strings" + + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal" +) + +const structOpsValuePrefix = "bpf_struct_ops_" + +// structOpsFindInnerType returns the "inner" struct inside a value struct_ops type. +// +// Given a value like: +// +// struct bpf_struct_ops_bpf_testmod_ops { +// struct bpf_struct_ops_common common; +// struct bpf_testmod_ops data; +// }; +// +// this function returns the *btf.Struct for "bpf_testmod_ops" along with the +// byte offset of the "data" member inside the value type. +// +// The inner struct name is derived by trimming the "bpf_struct_ops_" prefix +// from the value's name. +func structOpsFindInnerType(vType *btf.Struct) (*btf.Struct, uint32, error) { + innerName := strings.TrimPrefix(vType.Name, structOpsValuePrefix) + + for _, m := range vType.Members { + if st, ok := btf.As[*btf.Struct](m.Type); ok && st.Name == innerName { + return st, m.Offset.Bytes(), nil + } + } + + return nil, 0, fmt.Errorf("inner struct %q not found in %s", innerName, vType.Name) +} + +// structOpsFindTarget resolves the kernel-side "value struct" for a struct_ops map. +func structOpsFindTarget(userType *btf.Struct, cache *btf.Cache) (vType *btf.Struct, id btf.TypeID, module *btf.Handle, err error) { + // the kernel value type name, e.g. "bpf_struct_ops_" + vTypeName := structOpsValuePrefix + userType.Name + + target := btf.Type((*btf.Struct)(nil)) + spec, module, err := findTargetInKernel(vTypeName, &target, cache) + if err != nil { + return nil, 0, nil, fmt.Errorf("lookup value type %q: %w", vTypeName, err) + } + + id, err = spec.TypeID(target) + if err != nil { + return nil, 0, nil, err + } + + return target.(*btf.Struct), id, module, nil +} + +// structOpsPopulateValue writes a `prog FD` which references to `p` into the +// struct_ops value buffer `kernVData` at byte offset `dstOff` corresponding to +// the member `km`. +func structOpsPopulateValue(km btf.Member, kernVData []byte, p *Program) error { + kmPtr, ok := btf.As[*btf.Pointer](km.Type) + if !ok { + return fmt.Errorf("member %s is not a func pointer", km.Name) + } + + if _, isFuncProto := btf.As[*btf.FuncProto](kmPtr.Target); !isFuncProto { + return fmt.Errorf("member %s is not a func pointer", km.Name) + } + + dstOff := int(km.Offset.Bytes()) + if dstOff < 0 || dstOff+8 > len(kernVData) { + return fmt.Errorf("member %q: value buffer too small for func ptr", km.Name) + } + + internal.NativeEndian.PutUint64(kernVData[dstOff:dstOff+8], uint64(p.FD())) + return nil +} + +// structOpsCopyMember copies a single member from the user struct (m) +// into the kernel value struct (km) for struct_ops. +func structOpsCopyMember(m, km btf.Member, data []byte, kernVData []byte) error { + mSize, err := btf.Sizeof(m.Type) + if err != nil { + return fmt.Errorf("sizeof(user.%s): %w", m.Name, err) + } + kSize, err := btf.Sizeof(km.Type) + if err != nil { + return fmt.Errorf("sizeof(kernel.%s): %w", km.Name, err) + } + if mSize != kSize { + return fmt.Errorf("size mismatch for %s: user=%d kernel=%d", m.Name, mSize, kSize) + } + if km.BitfieldSize > 0 || m.BitfieldSize > 0 { + return fmt.Errorf("bitfield %s not supported", m.Name) + } + + srcOff := int(m.Offset.Bytes()) + dstOff := int(km.Offset.Bytes()) + + if srcOff < 0 || srcOff+mSize > len(data) { + return fmt.Errorf("member %q: userdata is too small", m.Name) + } + + if dstOff < 0 || dstOff+mSize > len(kernVData) { + return fmt.Errorf("member %q: value type is too small", m.Name) + } + + // skip mods(const, restrict, volatile and typetag) + // and typedef to check type compatibility + mType := btf.UnderlyingType(m.Type) + kernMType := btf.UnderlyingType(km.Type) + if reflect.TypeOf(mType) != reflect.TypeOf(kernMType) { + return fmt.Errorf("unmatched member type %s != %s (kernel)", m.Name, km.Name) + } + + switch mType.(type) { + case *btf.Struct, *btf.Union: + if !structOpsIsMemZeroed(data[srcOff : srcOff+mSize]) { + return fmt.Errorf("non-zero nested struct %s: %w", m.Name, ErrNotSupported) + } + // the bytes has zeroed value, we simply skip the copy. + return nil + } + + copy(kernVData[dstOff:dstOff+mSize], data[srcOff:srcOff+mSize]) + return nil +} + +// structOpsIsMemZeroed() checks whether all bytes in data are zero. +func structOpsIsMemZeroed(data []byte) bool { + for _, b := range data { + if b != 0 { + return false + } + } + return true +} diff --git a/vendor/github.com/cilium/ebpf/syscalls.go b/vendor/github.com/cilium/ebpf/syscalls.go new file mode 100644 index 000000000..f0f42b77d --- /dev/null +++ b/vendor/github.com/cilium/ebpf/syscalls.go @@ -0,0 +1,371 @@ +package ebpf + +import ( + "bytes" + "errors" + "fmt" + "math" + "os" + "runtime" + "strings" + + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/linux" + "github.com/cilium/ebpf/internal/platform" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/tracefs" + "github.com/cilium/ebpf/internal/unix" +) + +var ( + // pre-allocating these here since they may + // get called in hot code paths and cause + // unnecessary memory allocations + sysErrKeyNotExist = sys.Error(ErrKeyNotExist, unix.ENOENT) + sysErrKeyExist = sys.Error(ErrKeyExist, unix.EEXIST) + sysErrNotSupported = sys.Error(ErrNotSupported, sys.ENOTSUPP) +) + +// sanitizeName replaces all invalid characters in name with replacement. +// Passing a negative value for replacement will delete characters instead +// of replacing them. +// +// The set of allowed characters may change over time. +func sanitizeName(name string, replacement rune) string { + return strings.Map(func(char rune) rune { + switch { + case char >= 'A' && char <= 'Z': + return char + case char >= 'a' && char <= 'z': + return char + case char >= '0' && char <= '9': + return char + case char == '.': + return char + case char == '_': + return char + default: + return replacement + } + }, name) +} + +func maybeFillObjName(name string) sys.ObjName { + if errors.Is(haveObjName(), ErrNotSupported) { + return sys.ObjName{} + } + + name = sanitizeName(name, -1) + if errors.Is(objNameAllowsDot(), ErrNotSupported) { + name = strings.ReplaceAll(name, ".", "") + } + + return sys.NewObjName(name) +} + +func progLoad(insns asm.Instructions, typ ProgramType, license string) (*sys.FD, error) { + buf := bytes.NewBuffer(make([]byte, 0, insns.Size())) + if err := insns.Marshal(buf, internal.NativeEndian); err != nil { + return nil, err + } + bytecode := buf.Bytes() + + return sys.ProgLoad(&sys.ProgLoadAttr{ + ProgType: sys.ProgType(typ), + License: sys.NewStringPointer(license), + Insns: sys.SlicePointer(bytecode), + InsnCnt: uint32(len(bytecode) / asm.InstructionSize), + }) +} + +var haveNestedMaps = internal.NewFeatureTest("nested maps", func() error { + if platform.IsWindows { + // We only support efW versions which have this feature, no need to probe. + return nil + } + + _, err := sys.MapCreate(&sys.MapCreateAttr{ + MapType: sys.MapType(ArrayOfMaps), + KeySize: 4, + ValueSize: 4, + MaxEntries: 1, + // Invalid file descriptor. + InnerMapFd: ^uint32(0), + }) + if errors.Is(err, unix.EINVAL) { + return internal.ErrNotSupported + } + if errors.Is(err, unix.EBADF) { + return nil + } + return err +}, "4.12", "windows:0.21.0") + +var haveMapMutabilityModifiers = internal.NewFeatureTest("read- and write-only maps", func() error { + // This checks BPF_F_RDONLY_PROG and BPF_F_WRONLY_PROG. Since + // BPF_MAP_FREEZE appeared in 5.2 as well we don't do a separate check. + m, err := sys.MapCreate(&sys.MapCreateAttr{ + MapType: sys.MapType(Array), + KeySize: 4, + ValueSize: 4, + MaxEntries: 1, + MapFlags: sys.BPF_F_RDONLY_PROG, + }) + if err != nil { + return internal.ErrNotSupported + } + _ = m.Close() + return nil +}, "5.2") + +var haveMmapableMaps = internal.NewFeatureTest("mmapable maps", func() error { + // This checks BPF_F_MMAPABLE, which appeared in 5.5 for array maps. + m, err := sys.MapCreate(&sys.MapCreateAttr{ + MapType: sys.MapType(Array), + KeySize: 4, + ValueSize: 4, + MaxEntries: 1, + MapFlags: sys.BPF_F_MMAPABLE, + }) + if err != nil { + return internal.ErrNotSupported + } + _ = m.Close() + return nil +}, "5.5") + +var haveInnerMaps = internal.NewFeatureTest("inner maps", func() error { + // This checks BPF_F_INNER_MAP, which appeared in 5.10. + m, err := sys.MapCreate(&sys.MapCreateAttr{ + MapType: sys.MapType(Array), + KeySize: 4, + ValueSize: 4, + MaxEntries: 1, + MapFlags: sys.BPF_F_INNER_MAP, + }) + + if err != nil { + return internal.ErrNotSupported + } + _ = m.Close() + return nil +}, "5.10") + +var haveNoPreallocMaps = internal.NewFeatureTest("prealloc maps", func() error { + // This checks BPF_F_NO_PREALLOC, which appeared in 4.6. + m, err := sys.MapCreate(&sys.MapCreateAttr{ + MapType: sys.MapType(Hash), + KeySize: 4, + ValueSize: 4, + MaxEntries: 1, + MapFlags: sys.BPF_F_NO_PREALLOC, + }) + + if err != nil { + return internal.ErrNotSupported + } + _ = m.Close() + return nil +}, "4.6") + +func wrapMapError(err error) error { + if err == nil { + return nil + } + + if errors.Is(err, unix.ENOENT) { + return sysErrKeyNotExist + } + + if errors.Is(err, unix.EEXIST) { + return sysErrKeyExist + } + + if errors.Is(err, sys.ENOTSUPP) { + return sysErrNotSupported + } + + if errors.Is(err, unix.E2BIG) { + return fmt.Errorf("key too big for map: %w", err) + } + + return err +} + +var haveObjName = internal.NewFeatureTest("object names", func() error { + if platform.IsWindows { + // We only support efW versions which have this feature, no need to probe. + return nil + } + + attr := sys.MapCreateAttr{ + MapType: sys.MapType(Array), + KeySize: 4, + ValueSize: 4, + MaxEntries: 1, + MapName: sys.NewObjName("feature_test"), + } + + fd, err := sys.MapCreate(&attr) + if err != nil { + return internal.ErrNotSupported + } + + _ = fd.Close() + return nil +}, "4.15", "windows:0.21.0") + +var objNameAllowsDot = internal.NewFeatureTest("dot in object names", func() error { + if platform.IsWindows { + // We only support efW versions which have this feature, no need to probe. + return nil + } + + if err := haveObjName(); err != nil { + return err + } + + attr := sys.MapCreateAttr{ + MapType: sys.MapType(Array), + KeySize: 4, + ValueSize: 4, + MaxEntries: 1, + MapName: sys.NewObjName(".test"), + } + + fd, err := sys.MapCreate(&attr) + if err != nil { + return internal.ErrNotSupported + } + + _ = fd.Close() + return nil +}, "5.2", "windows:0.21.0") + +var haveBatchAPI = internal.NewFeatureTest("map batch api", func() error { + var maxEntries uint32 = 2 + attr := sys.MapCreateAttr{ + MapType: sys.MapType(Hash), + KeySize: 4, + ValueSize: 4, + MaxEntries: maxEntries, + } + + fd, err := sys.MapCreate(&attr) + if err != nil { + return internal.ErrNotSupported + } + defer fd.Close() + + keys := []uint32{1, 2} + values := []uint32{3, 4} + kp, _ := marshalMapSyscallInput(keys, 8) + vp, _ := marshalMapSyscallInput(values, 8) + + err = sys.MapUpdateBatch(&sys.MapUpdateBatchAttr{ + MapFd: fd.Uint(), + Keys: kp, + Values: vp, + Count: maxEntries, + }) + if err != nil { + return internal.ErrNotSupported + } + return nil +}, "5.6") + +var haveProbeReadKernel = internal.NewFeatureTest("bpf_probe_read_kernel", func() error { + insns := asm.Instructions{ + asm.Mov.Reg(asm.R1, asm.R10), + asm.Add.Imm(asm.R1, -8), + asm.Mov.Imm(asm.R2, 8), + asm.Mov.Imm(asm.R3, 0), + asm.FnProbeReadKernel.Call(), + asm.Return(), + } + + fd, err := progLoad(insns, Kprobe, "GPL") + if err != nil { + return internal.ErrNotSupported + } + _ = fd.Close() + return nil +}, "5.5") + +var haveBPFToBPFCalls = internal.NewFeatureTest("bpf2bpf calls", func() error { + insns := asm.Instructions{ + asm.Call.Label("prog2").WithSymbol("prog1"), + asm.Return(), + asm.Mov.Imm(asm.R0, 0).WithSymbol("prog2"), + asm.Return(), + } + + fd, err := progLoad(insns, SocketFilter, "MIT") + if err != nil { + return internal.ErrNotSupported + } + _ = fd.Close() + return nil +}, "4.16") + +var haveSyscallWrapper = internal.NewFeatureTest("syscall wrapper", func() error { + prefix := linux.PlatformPrefix() + if prefix == "" { + return fmt.Errorf("unable to find the platform prefix for (%s)", runtime.GOARCH) + } + + args := tracefs.ProbeArgs{ + Type: tracefs.Kprobe, + Symbol: prefix + "sys_bpf", + Pid: -1, + } + + var err error + args.Group, err = tracefs.RandomGroup("ebpf_probe") + if err != nil { + return err + } + + evt, err := tracefs.NewEvent(args) + if errors.Is(err, os.ErrNotExist) { + return internal.ErrNotSupported + } + if err != nil { + return err + } + + return evt.Close() +}, "4.17") + +var haveProgramExtInfos = internal.NewFeatureTest("program ext_infos", func() error { + insns := asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + } + + buf := bytes.NewBuffer(make([]byte, 0, insns.Size())) + if err := insns.Marshal(buf, internal.NativeEndian); err != nil { + return err + } + bytecode := buf.Bytes() + + _, err := sys.ProgLoad(&sys.ProgLoadAttr{ + ProgType: sys.ProgType(SocketFilter), + License: sys.NewStringPointer("MIT"), + Insns: sys.SlicePointer(bytecode), + InsnCnt: uint32(len(bytecode) / asm.InstructionSize), + FuncInfoCnt: 1, + ProgBtfFd: math.MaxUint32, + }) + + if errors.Is(err, unix.EBADF) { + return nil + } + + if errors.Is(err, unix.E2BIG) { + return ErrNotSupported + } + + return err +}, "5.0") diff --git a/vendor/github.com/cilium/ebpf/types.go b/vendor/github.com/cilium/ebpf/types.go new file mode 100644 index 000000000..52ff75b5c --- /dev/null +++ b/vendor/github.com/cilium/ebpf/types.go @@ -0,0 +1,410 @@ +package ebpf + +import ( + "github.com/cilium/ebpf/internal/platform" + "github.com/cilium/ebpf/internal/sys" +) + +//go:generate go tool stringer -output types_string.go -type=MapType,ProgramType,PinType + +// MapType indicates the type map structure +// that will be initialized in the kernel. +type MapType uint32 + +// All the various map types that can be created +const ( + UnspecifiedMap MapType = MapType(platform.LinuxTag | iota) + // Hash is a hash map + Hash + // Array is an array map + Array + // ProgramArray - A program array map is a special kind of array map whose map + // values contain only file descriptors referring to other eBPF + // programs. Thus, both the key_size and value_size must be + // exactly four bytes. This map is used in conjunction with the + // TailCall helper. + ProgramArray + // PerfEventArray - A perf event array is used in conjunction with PerfEventRead + // and PerfEventOutput calls, to read the raw bpf_perf_data from the registers. + PerfEventArray + // PerCPUHash - This data structure is useful for people who have high performance + // network needs and can reconcile adds at the end of some cycle, so that + // hashes can be lock free without the use of XAdd, which can be costly. + PerCPUHash + // PerCPUArray - This data structure is useful for people who have high performance + // network needs and can reconcile adds at the end of some cycle, so that + // hashes can be lock free without the use of XAdd, which can be costly. + // Each CPU gets a copy of this hash, the contents of all of which can be reconciled + // later. + PerCPUArray + // StackTrace - This holds whole user and kernel stack traces, it can be retrieved with + // GetStackID + StackTrace + // CGroupArray - This is a very niche structure used to help SKBInCGroup determine + // if an skb is from a socket belonging to a specific cgroup + CGroupArray + // LRUHash - This allows you to create a small hash structure that will purge the + // least recently used items rather than throw an error when you run out of memory + LRUHash + // LRUCPUHash - This is NOT like PerCPUHash, this structure is shared among the CPUs, + // it has more to do with including the CPU id with the LRU calculation so that if a + // particular CPU is using a value over-and-over again, then it will be saved, but if + // a value is being retrieved a lot but sparsely across CPUs it is not as important, basically + // giving weight to CPU locality over overall usage. + LRUCPUHash + // LPMTrie - This is an implementation of Longest-Prefix-Match Trie structure. It is useful, + // for storing things like IP addresses which can be bit masked allowing for keys of differing + // values to refer to the same reference based on their masks. See wikipedia for more details. + LPMTrie + // ArrayOfMaps - Each item in the array is another map. The inner map mustn't be a map of maps + // itself. + ArrayOfMaps + // HashOfMaps - Each item in the hash map is another map. The inner map mustn't be a map of maps + // itself. + HashOfMaps + // DevMap - Specialized map to store references to network devices. + DevMap + // SockMap - Specialized map to store references to sockets. + SockMap + // CPUMap - Specialized map to store references to CPUs. + CPUMap + // XSKMap - Specialized map for XDP programs to store references to open sockets. + XSKMap + // SockHash - Specialized hash to store references to sockets. + SockHash + // CGroupStorage - Special map for CGroups. + CGroupStorage + // ReusePortSockArray - Specialized map to store references to sockets that can be reused. + ReusePortSockArray + // PerCPUCGroupStorage - Special per CPU map for CGroups. + PerCPUCGroupStorage + // Queue - FIFO storage for BPF programs. + Queue + // Stack - LIFO storage for BPF programs. + Stack + // SkStorage - Specialized map for local storage at SK for BPF programs. + SkStorage + // DevMapHash - Hash-based indexing scheme for references to network devices. + DevMapHash + // StructOpsMap - This map holds a kernel struct with its function pointer implemented in a BPF + // program. + StructOpsMap + // RingBuf - Similar to PerfEventArray, but shared across all CPUs. + RingBuf + // InodeStorage - Specialized local storage map for inodes. + InodeStorage + // TaskStorage - Specialized local storage map for task_struct. + TaskStorage + // BloomFilter - Space-efficient data structure to quickly test whether an element exists in a set. + BloomFilter + // UserRingbuf - The reverse of RingBuf, used to send messages from user space to BPF programs. + UserRingbuf + // CgroupStorage - Store data keyed on a cgroup. If the cgroup disappears, the key is automatically removed. + CgroupStorage + // Arena - Sparse shared memory region between a BPF program and user space. + Arena +) + +// Map types (Windows). +const ( + WindowsHash MapType = MapType(platform.WindowsTag | iota + 1) + WindowsArray + WindowsProgramArray + WindowsPerCPUHash + WindowsPerCPUArray + WindowsHashOfMaps + WindowsArrayOfMaps + WindowsLRUHash + WindowsLPMTrie + WindowsQueue + WindowsLRUCPUHash + WindowsStack + WindowsRingBuf +) + +// MapTypeForPlatform returns a platform specific map type. +// +// Use this if the library doesn't provide a constant yet. +func MapTypeForPlatform(plat string, typ uint32) (MapType, error) { + return platform.EncodeConstant[MapType](plat, typ) +} + +// hasPerCPUValue returns true if the Map stores a value per CPU. +func (mt MapType) hasPerCPUValue() bool { + switch mt { + case PerCPUHash, PerCPUArray, LRUCPUHash, PerCPUCGroupStorage: + return true + case WindowsPerCPUHash, WindowsPerCPUArray, WindowsLRUCPUHash: + return true + default: + return false + } +} + +// canStoreMapOrProgram returns true if the Map stores references to another Map +// or Program. +func (mt MapType) canStoreMapOrProgram() bool { + return mt.canStoreMap() || mt.canStoreProgram() || mt == StructOpsMap +} + +// canStoreMap returns true if the map type accepts a map fd +// for update and returns a map id for lookup. +func (mt MapType) canStoreMap() bool { + return mt == ArrayOfMaps || mt == HashOfMaps || mt == WindowsArrayOfMaps || mt == WindowsHashOfMaps +} + +// canStoreProgram returns true if the map type accepts a program fd +// for update and returns a program id for lookup. +func (mt MapType) canStoreProgram() bool { + return mt == ProgramArray || mt == WindowsProgramArray +} + +// canHaveValueSize returns true if the map type supports setting a value size. +func (mt MapType) canHaveValueSize() bool { + switch mt { + case RingBuf, Arena: + return false + + // Special-case perf events since they require a value size of either 0 or 4 + // for historical reasons. Let the library fix this up later. + case PerfEventArray: + return false + } + + return true +} + +// mustHaveNoPrealloc returns true if the map type does not support +// preallocation and needs the BPF_F_NO_PREALLOC flag set to be created +// successfully. +func (mt MapType) mustHaveNoPrealloc() bool { + switch mt { + case CgroupStorage, InodeStorage, TaskStorage, SkStorage: + return true + case LPMTrie: + return true + } + + return false +} + +// ProgramType of the eBPF program +type ProgramType uint32 + +// eBPF program types (Linux). +const ( + UnspecifiedProgram = ProgramType(sys.BPF_PROG_TYPE_UNSPEC) + SocketFilter = ProgramType(sys.BPF_PROG_TYPE_SOCKET_FILTER) + Kprobe = ProgramType(sys.BPF_PROG_TYPE_KPROBE) + SchedCLS = ProgramType(sys.BPF_PROG_TYPE_SCHED_CLS) + SchedACT = ProgramType(sys.BPF_PROG_TYPE_SCHED_ACT) + TracePoint = ProgramType(sys.BPF_PROG_TYPE_TRACEPOINT) + XDP = ProgramType(sys.BPF_PROG_TYPE_XDP) + PerfEvent = ProgramType(sys.BPF_PROG_TYPE_PERF_EVENT) + CGroupSKB = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SKB) + CGroupSock = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SOCK) + LWTIn = ProgramType(sys.BPF_PROG_TYPE_LWT_IN) + LWTOut = ProgramType(sys.BPF_PROG_TYPE_LWT_OUT) + LWTXmit = ProgramType(sys.BPF_PROG_TYPE_LWT_XMIT) + SockOps = ProgramType(sys.BPF_PROG_TYPE_SOCK_OPS) + SkSKB = ProgramType(sys.BPF_PROG_TYPE_SK_SKB) + CGroupDevice = ProgramType(sys.BPF_PROG_TYPE_CGROUP_DEVICE) + SkMsg = ProgramType(sys.BPF_PROG_TYPE_SK_MSG) + RawTracepoint = ProgramType(sys.BPF_PROG_TYPE_RAW_TRACEPOINT) + CGroupSockAddr = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR) + LWTSeg6Local = ProgramType(sys.BPF_PROG_TYPE_LWT_SEG6LOCAL) + LircMode2 = ProgramType(sys.BPF_PROG_TYPE_LIRC_MODE2) + SkReuseport = ProgramType(sys.BPF_PROG_TYPE_SK_REUSEPORT) + FlowDissector = ProgramType(sys.BPF_PROG_TYPE_FLOW_DISSECTOR) + CGroupSysctl = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SYSCTL) + RawTracepointWritable = ProgramType(sys.BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE) + CGroupSockopt = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SOCKOPT) + Tracing = ProgramType(sys.BPF_PROG_TYPE_TRACING) + StructOps = ProgramType(sys.BPF_PROG_TYPE_STRUCT_OPS) + Extension = ProgramType(sys.BPF_PROG_TYPE_EXT) + LSM = ProgramType(sys.BPF_PROG_TYPE_LSM) + SkLookup = ProgramType(sys.BPF_PROG_TYPE_SK_LOOKUP) + Syscall = ProgramType(sys.BPF_PROG_TYPE_SYSCALL) + Netfilter = ProgramType(sys.BPF_PROG_TYPE_NETFILTER) +) + +// eBPF program types (Windows). +// +// See https://github.com/microsoft/ebpf-for-windows/blob/main/include/ebpf_structs.h#L170 +const ( + WindowsXDP ProgramType = ProgramType(platform.WindowsTag) | (iota + 1) + WindowsBind + WindowsCGroupSockAddr + WindowsSockOps + WindowsXDPTest ProgramType = ProgramType(platform.WindowsTag) | 998 + WindowsSample ProgramType = ProgramType(platform.WindowsTag) | 999 +) + +// ProgramTypeForPlatform returns a platform specific program type. +// +// Use this if the library doesn't provide a constant yet. +func ProgramTypeForPlatform(plat string, value uint32) (ProgramType, error) { + return platform.EncodeConstant[ProgramType](plat, value) +} + +// AttachType of the eBPF program, needed to differentiate allowed context accesses in +// some newer program types like CGroupSockAddr. Should be set to AttachNone if not required. +// Will cause invalid argument (EINVAL) at program load time if set incorrectly. +type AttachType uint32 + +//go:generate go tool stringer -type AttachType -trimprefix Attach + +// AttachNone is an alias for AttachCGroupInetIngress for readability reasons. +const AttachNone AttachType = 0 + +// Attach types (Linux). +const ( + AttachCGroupInetIngress = AttachType(sys.BPF_CGROUP_INET_INGRESS) + AttachCGroupInetEgress = AttachType(sys.BPF_CGROUP_INET_EGRESS) + AttachCGroupInetSockCreate = AttachType(sys.BPF_CGROUP_INET_SOCK_CREATE) + AttachCGroupSockOps = AttachType(sys.BPF_CGROUP_SOCK_OPS) + AttachSkSKBStreamParser = AttachType(sys.BPF_SK_SKB_STREAM_PARSER) + AttachSkSKBStreamVerdict = AttachType(sys.BPF_SK_SKB_STREAM_VERDICT) + AttachCGroupDevice = AttachType(sys.BPF_CGROUP_DEVICE) + AttachSkMsgVerdict = AttachType(sys.BPF_SK_MSG_VERDICT) + AttachCGroupInet4Bind = AttachType(sys.BPF_CGROUP_INET4_BIND) + AttachCGroupInet6Bind = AttachType(sys.BPF_CGROUP_INET6_BIND) + AttachCGroupInet4Connect = AttachType(sys.BPF_CGROUP_INET4_CONNECT) + AttachCGroupInet6Connect = AttachType(sys.BPF_CGROUP_INET6_CONNECT) + AttachCGroupInet4PostBind = AttachType(sys.BPF_CGROUP_INET4_POST_BIND) + AttachCGroupInet6PostBind = AttachType(sys.BPF_CGROUP_INET6_POST_BIND) + AttachCGroupUDP4Sendmsg = AttachType(sys.BPF_CGROUP_UDP4_SENDMSG) + AttachCGroupUDP6Sendmsg = AttachType(sys.BPF_CGROUP_UDP6_SENDMSG) + AttachLircMode2 = AttachType(sys.BPF_LIRC_MODE2) + AttachFlowDissector = AttachType(sys.BPF_FLOW_DISSECTOR) + AttachCGroupSysctl = AttachType(sys.BPF_CGROUP_SYSCTL) + AttachCGroupUDP4Recvmsg = AttachType(sys.BPF_CGROUP_UDP4_RECVMSG) + AttachCGroupUDP6Recvmsg = AttachType(sys.BPF_CGROUP_UDP6_RECVMSG) + AttachCGroupGetsockopt = AttachType(sys.BPF_CGROUP_GETSOCKOPT) + AttachCGroupSetsockopt = AttachType(sys.BPF_CGROUP_SETSOCKOPT) + AttachTraceRawTp = AttachType(sys.BPF_TRACE_RAW_TP) + AttachTraceFEntry = AttachType(sys.BPF_TRACE_FENTRY) + AttachTraceFExit = AttachType(sys.BPF_TRACE_FEXIT) + AttachModifyReturn = AttachType(sys.BPF_MODIFY_RETURN) + AttachLSMMac = AttachType(sys.BPF_LSM_MAC) + AttachTraceIter = AttachType(sys.BPF_TRACE_ITER) + AttachCgroupInet4GetPeername = AttachType(sys.BPF_CGROUP_INET4_GETPEERNAME) + AttachCgroupInet6GetPeername = AttachType(sys.BPF_CGROUP_INET6_GETPEERNAME) + AttachCgroupInet4GetSockname = AttachType(sys.BPF_CGROUP_INET4_GETSOCKNAME) + AttachCgroupInet6GetSockname = AttachType(sys.BPF_CGROUP_INET6_GETSOCKNAME) + AttachXDPDevMap = AttachType(sys.BPF_XDP_DEVMAP) + AttachCgroupInetSockRelease = AttachType(sys.BPF_CGROUP_INET_SOCK_RELEASE) + AttachXDPCPUMap = AttachType(sys.BPF_XDP_CPUMAP) + AttachSkLookup = AttachType(sys.BPF_SK_LOOKUP) + AttachXDP = AttachType(sys.BPF_XDP) + AttachSkSKBVerdict = AttachType(sys.BPF_SK_SKB_VERDICT) + AttachSkReuseportSelect = AttachType(sys.BPF_SK_REUSEPORT_SELECT) + AttachSkReuseportSelectOrMigrate = AttachType(sys.BPF_SK_REUSEPORT_SELECT_OR_MIGRATE) + AttachPerfEvent = AttachType(sys.BPF_PERF_EVENT) + AttachTraceKprobeMulti = AttachType(sys.BPF_TRACE_KPROBE_MULTI) + AttachTraceKprobeSession = AttachType(sys.BPF_TRACE_KPROBE_SESSION) + AttachLSMCgroup = AttachType(sys.BPF_LSM_CGROUP) + AttachStructOps = AttachType(sys.BPF_STRUCT_OPS) + AttachNetfilter = AttachType(sys.BPF_NETFILTER) + AttachTCXIngress = AttachType(sys.BPF_TCX_INGRESS) + AttachTCXEgress = AttachType(sys.BPF_TCX_EGRESS) + AttachTraceUprobeMulti = AttachType(sys.BPF_TRACE_UPROBE_MULTI) + AttachCgroupUnixConnect = AttachType(sys.BPF_CGROUP_UNIX_CONNECT) + AttachCgroupUnixSendmsg = AttachType(sys.BPF_CGROUP_UNIX_SENDMSG) + AttachCgroupUnixRecvmsg = AttachType(sys.BPF_CGROUP_UNIX_RECVMSG) + AttachCgroupUnixGetpeername = AttachType(sys.BPF_CGROUP_UNIX_GETPEERNAME) + AttachCgroupUnixGetsockname = AttachType(sys.BPF_CGROUP_UNIX_GETSOCKNAME) + AttachNetkitPrimary = AttachType(sys.BPF_NETKIT_PRIMARY) + AttachNetkitPeer = AttachType(sys.BPF_NETKIT_PEER) +) + +// Attach types (Windows). +// +// See https://github.com/microsoft/ebpf-for-windows/blob/main/include/ebpf_structs.h#L260 +const ( + AttachWindowsXDP = AttachType(platform.WindowsTag | iota + 1) + AttachWindowsBind + AttachWindowsCGroupInet4Connect + AttachWindowsCGroupInet6Connect + AttachWindowsCgroupInet4RecvAccept + AttachWindowsCgroupInet6RecvAccept + AttachWindowsCGroupSockOps + AttachWindowsSample + AttachWindowsXDPTest +) + +// AttachTypeForPlatform returns a platform specific attach type. +// +// Use this if the library doesn't provide a constant yet. +func AttachTypeForPlatform(plat string, value uint32) (AttachType, error) { + return platform.EncodeConstant[AttachType](plat, value) +} + +// AttachFlags of the eBPF program used in BPF_PROG_ATTACH command +type AttachFlags uint32 + +// PinType determines whether a map is pinned into a BPFFS. +type PinType uint32 + +// Valid pin types. +// +// Mirrors enum libbpf_pin_type. +const ( + PinNone PinType = iota + // Pin an object by using its name as the filename. + PinByName +) + +// LoadPinOptions control how a pinned object is loaded. +type LoadPinOptions struct { + // Request a read-only or write-only object. The default is a read-write + // object. Only one of the flags may be set. + ReadOnly bool + WriteOnly bool + + // Raw flags for the syscall. Other fields of this struct take precedence. + Flags uint32 +} + +// Marshal returns a value suitable for BPF_OBJ_GET syscall file_flags parameter. +func (lpo *LoadPinOptions) Marshal() uint32 { + if lpo == nil { + return 0 + } + + flags := lpo.Flags + if lpo.ReadOnly { + flags |= sys.BPF_F_RDONLY + } + if lpo.WriteOnly { + flags |= sys.BPF_F_WRONLY + } + return flags +} + +// BatchOptions batch map operations options +// +// Mirrors libbpf struct bpf_map_batch_opts +// Currently BPF_F_FLAG is the only supported +// flag (for ElemFlags). +type BatchOptions struct { + ElemFlags uint64 + Flags uint64 +} + +// LogLevel controls the verbosity of the kernel's eBPF program verifier. +// These constants can be used for the ProgramOptions.LogLevel field. +type LogLevel = sys.LogLevel + +const ( + // Print verifier state at branch points. + LogLevelBranch = sys.BPF_LOG_LEVEL1 + + // Print verifier state for every instruction. + // Available since Linux v5.2. + LogLevelInstruction = sys.BPF_LOG_LEVEL2 + + // Print verifier errors and stats at the end of the verification process. + // Available since Linux v5.2. + LogLevelStats = sys.BPF_LOG_STATS +) diff --git a/vendor/github.com/cilium/ebpf/types_string.go b/vendor/github.com/cilium/ebpf/types_string.go new file mode 100644 index 000000000..94bc2e26c --- /dev/null +++ b/vendor/github.com/cilium/ebpf/types_string.go @@ -0,0 +1,170 @@ +// Code generated by "stringer -output types_string.go -type=MapType,ProgramType,PinType"; DO NOT EDIT. + +package ebpf + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[UnspecifiedMap-0] + _ = x[Hash-1] + _ = x[Array-2] + _ = x[ProgramArray-3] + _ = x[PerfEventArray-4] + _ = x[PerCPUHash-5] + _ = x[PerCPUArray-6] + _ = x[StackTrace-7] + _ = x[CGroupArray-8] + _ = x[LRUHash-9] + _ = x[LRUCPUHash-10] + _ = x[LPMTrie-11] + _ = x[ArrayOfMaps-12] + _ = x[HashOfMaps-13] + _ = x[DevMap-14] + _ = x[SockMap-15] + _ = x[CPUMap-16] + _ = x[XSKMap-17] + _ = x[SockHash-18] + _ = x[CGroupStorage-19] + _ = x[ReusePortSockArray-20] + _ = x[PerCPUCGroupStorage-21] + _ = x[Queue-22] + _ = x[Stack-23] + _ = x[SkStorage-24] + _ = x[DevMapHash-25] + _ = x[StructOpsMap-26] + _ = x[RingBuf-27] + _ = x[InodeStorage-28] + _ = x[TaskStorage-29] + _ = x[BloomFilter-30] + _ = x[UserRingbuf-31] + _ = x[CgroupStorage-32] + _ = x[Arena-33] + _ = x[WindowsHash-268435457] + _ = x[WindowsArray-268435458] + _ = x[WindowsProgramArray-268435459] + _ = x[WindowsPerCPUHash-268435460] + _ = x[WindowsPerCPUArray-268435461] + _ = x[WindowsHashOfMaps-268435462] + _ = x[WindowsArrayOfMaps-268435463] + _ = x[WindowsLRUHash-268435464] + _ = x[WindowsLPMTrie-268435465] + _ = x[WindowsQueue-268435466] + _ = x[WindowsLRUCPUHash-268435467] + _ = x[WindowsStack-268435468] + _ = x[WindowsRingBuf-268435469] +} + +const ( + _MapType_name_0 = "UnspecifiedMapHashArrayProgramArrayPerfEventArrayPerCPUHashPerCPUArrayStackTraceCGroupArrayLRUHashLRUCPUHashLPMTrieArrayOfMapsHashOfMapsDevMapSockMapCPUMapXSKMapSockHashCGroupStorageReusePortSockArrayPerCPUCGroupStorageQueueStackSkStorageDevMapHashStructOpsMapRingBufInodeStorageTaskStorageBloomFilterUserRingbufCgroupStorageArena" + _MapType_name_1 = "WindowsHashWindowsArrayWindowsProgramArrayWindowsPerCPUHashWindowsPerCPUArrayWindowsHashOfMapsWindowsArrayOfMapsWindowsLRUHashWindowsLPMTrieWindowsQueueWindowsLRUCPUHashWindowsStackWindowsRingBuf" +) + +var ( + _MapType_index_0 = [...]uint16{0, 14, 18, 23, 35, 49, 59, 70, 80, 91, 98, 108, 115, 126, 136, 142, 149, 155, 161, 169, 182, 200, 219, 224, 229, 238, 248, 260, 267, 279, 290, 301, 312, 325, 330} + _MapType_index_1 = [...]uint8{0, 11, 23, 42, 59, 77, 94, 112, 126, 140, 152, 169, 181, 195} +) + +func (i MapType) String() string { + switch { + case i <= 33: + return _MapType_name_0[_MapType_index_0[i]:_MapType_index_0[i+1]] + case 268435457 <= i && i <= 268435469: + i -= 268435457 + return _MapType_name_1[_MapType_index_1[i]:_MapType_index_1[i+1]] + default: + return "MapType(" + strconv.FormatInt(int64(i), 10) + ")" + } +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[UnspecifiedProgram-0] + _ = x[SocketFilter-1] + _ = x[Kprobe-2] + _ = x[SchedCLS-3] + _ = x[SchedACT-4] + _ = x[TracePoint-5] + _ = x[XDP-6] + _ = x[PerfEvent-7] + _ = x[CGroupSKB-8] + _ = x[CGroupSock-9] + _ = x[LWTIn-10] + _ = x[LWTOut-11] + _ = x[LWTXmit-12] + _ = x[SockOps-13] + _ = x[SkSKB-14] + _ = x[CGroupDevice-15] + _ = x[SkMsg-16] + _ = x[RawTracepoint-17] + _ = x[CGroupSockAddr-18] + _ = x[LWTSeg6Local-19] + _ = x[LircMode2-20] + _ = x[SkReuseport-21] + _ = x[FlowDissector-22] + _ = x[CGroupSysctl-23] + _ = x[RawTracepointWritable-24] + _ = x[CGroupSockopt-25] + _ = x[Tracing-26] + _ = x[StructOps-27] + _ = x[Extension-28] + _ = x[LSM-29] + _ = x[SkLookup-30] + _ = x[Syscall-31] + _ = x[Netfilter-32] + _ = x[WindowsXDP-268435457] + _ = x[WindowsBind-268435458] + _ = x[WindowsCGroupSockAddr-268435459] + _ = x[WindowsSockOps-268435460] + _ = x[WindowsXDPTest-268436454] + _ = x[WindowsSample-268436455] +} + +const ( + _ProgramType_name_0 = "UnspecifiedProgramSocketFilterKprobeSchedCLSSchedACTTracePointXDPPerfEventCGroupSKBCGroupSockLWTInLWTOutLWTXmitSockOpsSkSKBCGroupDeviceSkMsgRawTracepointCGroupSockAddrLWTSeg6LocalLircMode2SkReuseportFlowDissectorCGroupSysctlRawTracepointWritableCGroupSockoptTracingStructOpsExtensionLSMSkLookupSyscallNetfilter" + _ProgramType_name_1 = "WindowsXDPWindowsBindWindowsCGroupSockAddrWindowsSockOps" + _ProgramType_name_2 = "WindowsXDPTestWindowsSample" +) + +var ( + _ProgramType_index_0 = [...]uint16{0, 18, 30, 36, 44, 52, 62, 65, 74, 83, 93, 98, 104, 111, 118, 123, 135, 140, 153, 167, 179, 188, 199, 212, 224, 245, 258, 265, 274, 283, 286, 294, 301, 310} + _ProgramType_index_1 = [...]uint8{0, 10, 21, 42, 56} + _ProgramType_index_2 = [...]uint8{0, 14, 27} +) + +func (i ProgramType) String() string { + switch { + case i <= 32: + return _ProgramType_name_0[_ProgramType_index_0[i]:_ProgramType_index_0[i+1]] + case 268435457 <= i && i <= 268435460: + i -= 268435457 + return _ProgramType_name_1[_ProgramType_index_1[i]:_ProgramType_index_1[i+1]] + case 268436454 <= i && i <= 268436455: + i -= 268436454 + return _ProgramType_name_2[_ProgramType_index_2[i]:_ProgramType_index_2[i+1]] + default: + return "ProgramType(" + strconv.FormatInt(int64(i), 10) + ")" + } +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[PinNone-0] + _ = x[PinByName-1] +} + +const _PinType_name = "PinNonePinByName" + +var _PinType_index = [...]uint8{0, 7, 16} + +func (i PinType) String() string { + idx := int(i) - 0 + if i < 0 || idx >= len(_PinType_index)-1 { + return "PinType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _PinType_name[_PinType_index[idx]:_PinType_index[idx+1]] +} diff --git a/vendor/github.com/cilium/ebpf/types_windows.go b/vendor/github.com/cilium/ebpf/types_windows.go new file mode 100644 index 000000000..0b7e836b0 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/types_windows.go @@ -0,0 +1,57 @@ +package ebpf + +import ( + "fmt" + "os" + + "golang.org/x/sys/windows" + + "github.com/cilium/ebpf/internal/efw" + "github.com/cilium/ebpf/internal/platform" +) + +// WindowsProgramTypeForGUID resolves a GUID to a ProgramType. +// +// The GUID must be in the form of "{XXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}". +// +// Returns an error wrapping [os.ErrNotExist] if the GUID is not recignized. +func WindowsProgramTypeForGUID(guid string) (ProgramType, error) { + progTypeGUID, err := windows.GUIDFromString(guid) + if err != nil { + return 0, fmt.Errorf("parse GUID: %w", err) + } + + rawProgramType, err := efw.EbpfGetBpfProgramType(progTypeGUID) + if err != nil { + return 0, fmt.Errorf("get program type: %w", err) + } + + if rawProgramType == 0 { + return 0, fmt.Errorf("program type not found for GUID %v: %w", guid, os.ErrNotExist) + } + + return ProgramTypeForPlatform(platform.Windows, rawProgramType) +} + +// WindowsAttachTypeForGUID resolves a GUID to an AttachType. +// +// The GUID must be in the form of "{XXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}". +// +// Returns an error wrapping [os.ErrNotExist] if the GUID is not recignized. +func WindowsAttachTypeForGUID(guid string) (AttachType, error) { + attachTypeGUID, err := windows.GUIDFromString(guid) + if err != nil { + return 0, fmt.Errorf("parse GUID: %w", err) + } + + rawAttachType, err := efw.EbpfGetBpfAttachType(attachTypeGUID) + if err != nil { + return 0, fmt.Errorf("get attach type: %w", err) + } + + if rawAttachType == 0 { + return 0, fmt.Errorf("attach type not found for GUID %v: %w", attachTypeGUID, os.ErrNotExist) + } + + return AttachTypeForPlatform(platform.Windows, rawAttachType) +} diff --git a/vendor/github.com/cilium/ebpf/variable.go b/vendor/github.com/cilium/ebpf/variable.go new file mode 100644 index 000000000..c6fd55cba --- /dev/null +++ b/vendor/github.com/cilium/ebpf/variable.go @@ -0,0 +1,270 @@ +package ebpf + +import ( + "fmt" + "io" + "reflect" + + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal/sysenc" +) + +// VariableSpec is a convenience wrapper for modifying global variables of a +// CollectionSpec before loading it into the kernel. +// +// All operations on a VariableSpec's underlying MapSpec are performed in the +// host's native endianness. +type VariableSpec struct { + name string + offset uint64 + size uint64 + + m *MapSpec + t *btf.Var +} + +// Set sets the value of the VariableSpec to the provided input using the host's +// native endianness. +func (s *VariableSpec) Set(in any) error { + buf, err := sysenc.Marshal(in, int(s.size)) + if err != nil { + return fmt.Errorf("marshaling value %s: %w", s.name, err) + } + + b, _, err := s.m.dataSection() + if err != nil { + return fmt.Errorf("getting data section of map %s: %w", s.m.Name, err) + } + + if int(s.offset+s.size) > len(b) { + return fmt.Errorf("offset %d(+%d) for variable %s is out of bounds", s.offset, s.size, s.name) + } + + // MapSpec.Copy() performs a shallow copy. Fully copy the byte slice + // to avoid any changes affecting other copies of the MapSpec. + cpy := make([]byte, len(b)) + copy(cpy, b) + + buf.CopyTo(cpy[s.offset : s.offset+s.size]) + + s.m.Contents[0] = MapKV{Key: uint32(0), Value: cpy} + + return nil +} + +// Get writes the value of the VariableSpec to the provided output using the +// host's native endianness. +func (s *VariableSpec) Get(out any) error { + b, _, err := s.m.dataSection() + if err != nil { + return fmt.Errorf("getting data section of map %s: %w", s.m.Name, err) + } + + if int(s.offset+s.size) > len(b) { + return fmt.Errorf("offset %d(+%d) for variable %s is out of bounds", s.offset, s.size, s.name) + } + + if err := sysenc.Unmarshal(out, b[s.offset:s.offset+s.size]); err != nil { + return fmt.Errorf("unmarshaling value: %w", err) + } + + return nil +} + +// Size returns the size of the variable in bytes. +func (s *VariableSpec) Size() uint64 { + return s.size +} + +// MapName returns the name of the underlying MapSpec. +func (s *VariableSpec) MapName() string { + return s.m.Name +} + +// Offset returns the offset of the variable in the underlying MapSpec. +func (s *VariableSpec) Offset() uint64 { + return s.offset +} + +// Constant returns true if the VariableSpec represents a variable that is +// read-only from the perspective of the BPF program. +func (s *VariableSpec) Constant() bool { + return s.m.readOnly() +} + +// Type returns the [btf.Var] representing the variable in its data section. +// This is useful for inspecting the variable's decl tags and the type +// information of the inner type. +// +// Returns nil if the original ELF object did not contain BTF information. +func (s *VariableSpec) Type() *btf.Var { + return s.t +} + +func (s *VariableSpec) String() string { + return fmt.Sprintf("%s (type=%v, map=%s, offset=%d, size=%d)", s.name, s.t, s.m.Name, s.offset, s.size) +} + +// copy returns a new VariableSpec with the same values as the original, +// but with a different underlying MapSpec. This is useful when copying a +// CollectionSpec. Returns nil if a MapSpec with the same name is not found. +func (s *VariableSpec) copy(cpy *CollectionSpec) *VariableSpec { + out := &VariableSpec{ + name: s.name, + offset: s.offset, + size: s.size, + } + if s.t != nil { + out.t = btf.Copy(s.t).(*btf.Var) + } + + // Attempt to find a MapSpec with the same name in the copied CollectionSpec. + for _, m := range cpy.Maps { + if m.Name == s.m.Name { + out.m = m + return out + } + } + + return nil +} + +// Variable is a convenience wrapper for modifying global variables of a +// Collection after loading it into the kernel. Operations on a Variable are +// performed using direct memory access, bypassing the BPF map syscall API. +// +// On kernels older than 5.5, most interactions with Variable return +// [ErrNotSupported]. +type Variable struct { + name string + offset uint64 + size uint64 + t *btf.Var + + mm *Memory +} + +func newVariable(name string, offset, size uint64, t *btf.Var, mm *Memory) (*Variable, error) { + if mm != nil { + if int(offset+size) > mm.Size() { + return nil, fmt.Errorf("offset %d(+%d) is out of bounds", offset, size) + } + } + + return &Variable{ + name: name, + offset: offset, + size: size, + t: t, + mm: mm, + }, nil +} + +// Size returns the size of the variable. +func (v *Variable) Size() uint64 { + return v.size +} + +// ReadOnly returns true if the Variable represents a variable that is read-only +// after loading the Collection into the kernel. +// +// On systems without BPF_F_MMAPABLE support, ReadOnly always returns true. +func (v *Variable) ReadOnly() bool { + if v.mm == nil { + return true + } + return v.mm.ReadOnly() +} + +// Type returns the [btf.Var] representing the variable in its data section. +// This is useful for inspecting the variable's decl tags and the type +// information of the inner type. +// +// Returns nil if the original ELF object did not contain BTF information. +func (v *Variable) Type() *btf.Var { + return v.t +} + +func (v *Variable) String() string { + return fmt.Sprintf("%s (type=%v)", v.name, v.t) +} + +// Set the value of the Variable to the provided input. The input must marshal +// to the same length as the size of the Variable. +func (v *Variable) Set(in any) error { + if v.mm == nil { + return fmt.Errorf("variable %s: direct access requires Linux 5.5 or later: %w", v.name, ErrNotSupported) + } + + if v.ReadOnly() { + return fmt.Errorf("variable %s: %w", v.name, ErrReadOnly) + } + + if !v.mm.bounds(v.offset, v.size) { + return fmt.Errorf("variable %s: access out of bounds: %w", v.name, io.EOF) + } + + buf, err := sysenc.Marshal(in, int(v.size)) + if err != nil { + return fmt.Errorf("marshaling value %s: %w", v.name, err) + } + + if _, err := v.mm.WriteAt(buf.Bytes(), int64(v.offset)); err != nil { + return fmt.Errorf("writing value to %s: %w", v.name, err) + } + + return nil +} + +// Get writes the value of the Variable to the provided output. The output must +// be a pointer to a value whose size matches the Variable. +func (v *Variable) Get(out any) error { + if v.mm == nil { + return fmt.Errorf("variable %s: direct access requires Linux 5.5 or later: %w", v.name, ErrNotSupported) + } + + if !v.mm.bounds(v.offset, v.size) { + return fmt.Errorf("variable %s: access out of bounds: %w", v.name, io.EOF) + } + + if err := sysenc.Unmarshal(out, v.mm.b[v.offset:v.offset+v.size]); err != nil { + return fmt.Errorf("unmarshaling value %s: %w", v.name, err) + } + + return nil +} + +func checkVariable[T any](v *Variable) error { + if v.ReadOnly() { + return ErrReadOnly + } + + t := reflect.TypeFor[T]() + size := uint64(t.Size()) + if t.Kind() == reflect.Uintptr && v.size == 8 { + // uintptr is 8 bytes on 64-bit and 4 on 32-bit. In BPF/BTF, pointers are + // always 8 bytes. For the sake of portability, allow accessing 8-byte BPF + // variables as uintptr on 32-bit systems, since the upper 32 bits of the + // pointer should be zero anyway. + return nil + } + if v.size != size { + return fmt.Errorf("can't create %d-byte accessor to %d-byte variable: %w", size, v.size, ErrInvalidType) + } + + return nil +} + +// VariablePointer returns a pointer to a variable of type T backed by memory +// shared with the BPF program. Requires building the Go application with -tags +// ebpf_unsafe_memory_experiment. +// +// T must contain only fixed-size, non-Go-pointer types: bools, floats, +// (u)int[8-64], arrays, and structs containing them. Structs must embed +// [structs.HostLayout]. [ErrInvalidType] is returned if T is not a valid type. +func VariablePointer[T comparable](v *Variable) (*T, error) { + if err := checkVariable[T](v); err != nil { + return nil, fmt.Errorf("variable pointer %s: %w", v.name, err) + } + return memoryPointer[T](v.mm, v.offset) +} diff --git a/vendor/github.com/containernetworking/cni/LICENSE b/vendor/github.com/containernetworking/cni/LICENSE new file mode 100644 index 000000000..8f71f43fe --- /dev/null +++ b/vendor/github.com/containernetworking/cni/LICENSE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/containernetworking/cni/libcni/api.go b/vendor/github.com/containernetworking/cni/libcni/api.go new file mode 100644 index 000000000..0d82a2dd3 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/libcni/api.go @@ -0,0 +1,679 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package libcni + +// Note this is the actual implementation of the CNI specification, which +// is reflected in the https://github.com/containernetworking/cni/blob/master/SPEC.md file +// it is typically bundled into runtime providers (i.e. containerd or cri-o would use this +// before calling runc or hcsshim). It is also bundled into CNI providers as well, for example, +// to add an IP to a container, to parse the configuration of the CNI and so on. + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/containernetworking/cni/pkg/invoke" + "github.com/containernetworking/cni/pkg/types" + "github.com/containernetworking/cni/pkg/types/create" + "github.com/containernetworking/cni/pkg/utils" + "github.com/containernetworking/cni/pkg/version" +) + +var ( + CacheDir = "/var/lib/cni" +) + +const ( + CNICacheV1 = "cniCacheV1" +) + +// A RuntimeConf holds the arguments to one invocation of a CNI plugin +// excepting the network configuration, with the nested exception that +// the `runtimeConfig` from the network configuration is included +// here. +type RuntimeConf struct { + ContainerID string + NetNS string + IfName string + Args [][2]string + // A dictionary of capability-specific data passed by the runtime + // to plugins as top-level keys in the 'runtimeConfig' dictionary + // of the plugin's stdin data. libcni will ensure that only keys + // in this map which match the capabilities of the plugin are passed + // to the plugin + CapabilityArgs map[string]interface{} + + // DEPRECATED. Will be removed in a future release. + CacheDir string +} + +type NetworkConfig struct { + Network *types.NetConf + Bytes []byte +} + +type NetworkConfigList struct { + Name string + CNIVersion string + DisableCheck bool + Plugins []*NetworkConfig + Bytes []byte +} + +type CNI interface { + AddNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) (types.Result, error) + CheckNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) error + DelNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) error + GetNetworkListCachedResult(net *NetworkConfigList, rt *RuntimeConf) (types.Result, error) + GetNetworkListCachedConfig(net *NetworkConfigList, rt *RuntimeConf) ([]byte, *RuntimeConf, error) + + AddNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) (types.Result, error) + CheckNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error + DelNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error + GetNetworkCachedResult(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) + GetNetworkCachedConfig(net *NetworkConfig, rt *RuntimeConf) ([]byte, *RuntimeConf, error) + + ValidateNetworkList(ctx context.Context, net *NetworkConfigList) ([]string, error) + ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]string, error) +} + +type CNIConfig struct { + Path []string + exec invoke.Exec + cacheDir string +} + +// CNIConfig implements the CNI interface +var _ CNI = &CNIConfig{} + +// NewCNIConfig returns a new CNIConfig object that will search for plugins +// in the given paths and use the given exec interface to run those plugins, +// or if the exec interface is not given, will use a default exec handler. +func NewCNIConfig(path []string, exec invoke.Exec) *CNIConfig { + return NewCNIConfigWithCacheDir(path, "", exec) +} + +// NewCNIConfigWithCacheDir returns a new CNIConfig object that will search for plugins +// in the given paths use the given exec interface to run those plugins, +// or if the exec interface is not given, will use a default exec handler. +// The given cache directory will be used for temporary data storage when needed. +func NewCNIConfigWithCacheDir(path []string, cacheDir string, exec invoke.Exec) *CNIConfig { + return &CNIConfig{ + Path: path, + cacheDir: cacheDir, + exec: exec, + } +} + +func buildOneConfig(name, cniVersion string, orig *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (*NetworkConfig, error) { + var err error + + inject := map[string]interface{}{ + "name": name, + "cniVersion": cniVersion, + } + // Add previous plugin result + if prevResult != nil { + inject["prevResult"] = prevResult + } + + // Ensure every config uses the same name and version + orig, err = InjectConf(orig, inject) + if err != nil { + return nil, err + } + + return injectRuntimeConfig(orig, rt) +} + +// This function takes a libcni RuntimeConf structure and injects values into +// a "runtimeConfig" dictionary in the CNI network configuration JSON that +// will be passed to the plugin on stdin. +// +// Only "capabilities arguments" passed by the runtime are currently injected. +// These capabilities arguments are filtered through the plugin's advertised +// capabilities from its config JSON, and any keys in the CapabilityArgs +// matching plugin capabilities are added to the "runtimeConfig" dictionary +// sent to the plugin via JSON on stdin. For example, if the plugin's +// capabilities include "portMappings", and the CapabilityArgs map includes a +// "portMappings" key, that key and its value are added to the "runtimeConfig" +// dictionary to be passed to the plugin's stdin. +func injectRuntimeConfig(orig *NetworkConfig, rt *RuntimeConf) (*NetworkConfig, error) { + var err error + + rc := make(map[string]interface{}) + for capability, supported := range orig.Network.Capabilities { + if !supported { + continue + } + if data, ok := rt.CapabilityArgs[capability]; ok { + rc[capability] = data + } + } + + if len(rc) > 0 { + orig, err = InjectConf(orig, map[string]interface{}{"runtimeConfig": rc}) + if err != nil { + return nil, err + } + } + + return orig, nil +} + +// ensure we have a usable exec if the CNIConfig was not given one +func (c *CNIConfig) ensureExec() invoke.Exec { + if c.exec == nil { + c.exec = &invoke.DefaultExec{ + RawExec: &invoke.RawExec{Stderr: os.Stderr}, + PluginDecoder: version.PluginDecoder{}, + } + } + return c.exec +} + +type cachedInfo struct { + Kind string `json:"kind"` + ContainerID string `json:"containerId"` + Config []byte `json:"config"` + IfName string `json:"ifName"` + NetworkName string `json:"networkName"` + CniArgs [][2]string `json:"cniArgs,omitempty"` + CapabilityArgs map[string]interface{} `json:"capabilityArgs,omitempty"` + RawResult map[string]interface{} `json:"result,omitempty"` + Result types.Result `json:"-"` +} + +// getCacheDir returns the cache directory in this order: +// 1) global cacheDir from CNIConfig object +// 2) deprecated cacheDir from RuntimeConf object +// 3) fall back to default cache directory +func (c *CNIConfig) getCacheDir(rt *RuntimeConf) string { + if c.cacheDir != "" { + return c.cacheDir + } + if rt.CacheDir != "" { + return rt.CacheDir + } + return CacheDir +} + +func (c *CNIConfig) getCacheFilePath(netName string, rt *RuntimeConf) (string, error) { + if netName == "" || rt.ContainerID == "" || rt.IfName == "" { + return "", fmt.Errorf("cache file path requires network name (%q), container ID (%q), and interface name (%q)", netName, rt.ContainerID, rt.IfName) + } + return filepath.Join(c.getCacheDir(rt), "results", fmt.Sprintf("%s-%s-%s", netName, rt.ContainerID, rt.IfName)), nil +} + +func (c *CNIConfig) cacheAdd(result types.Result, config []byte, netName string, rt *RuntimeConf) error { + cached := cachedInfo{ + Kind: CNICacheV1, + ContainerID: rt.ContainerID, + Config: config, + IfName: rt.IfName, + NetworkName: netName, + CniArgs: rt.Args, + CapabilityArgs: rt.CapabilityArgs, + } + + // We need to get type.Result into cachedInfo as JSON map + // Marshal to []byte, then Unmarshal into cached.RawResult + data, err := json.Marshal(result) + if err != nil { + return err + } + + err = json.Unmarshal(data, &cached.RawResult) + if err != nil { + return err + } + + newBytes, err := json.Marshal(&cached) + if err != nil { + return err + } + + fname, err := c.getCacheFilePath(netName, rt) + if err != nil { + return err + } + if err := os.MkdirAll(filepath.Dir(fname), 0700); err != nil { + return err + } + + return ioutil.WriteFile(fname, newBytes, 0600) +} + +func (c *CNIConfig) cacheDel(netName string, rt *RuntimeConf) error { + fname, err := c.getCacheFilePath(netName, rt) + if err != nil { + // Ignore error + return nil + } + return os.Remove(fname) +} + +func (c *CNIConfig) getCachedConfig(netName string, rt *RuntimeConf) ([]byte, *RuntimeConf, error) { + var bytes []byte + + fname, err := c.getCacheFilePath(netName, rt) + if err != nil { + return nil, nil, err + } + bytes, err = ioutil.ReadFile(fname) + if err != nil { + // Ignore read errors; the cached result may not exist on-disk + return nil, nil, nil + } + + unmarshaled := cachedInfo{} + if err := json.Unmarshal(bytes, &unmarshaled); err != nil { + return nil, nil, fmt.Errorf("failed to unmarshal cached network %q config: %w", netName, err) + } + if unmarshaled.Kind != CNICacheV1 { + return nil, nil, fmt.Errorf("read cached network %q config has wrong kind: %v", netName, unmarshaled.Kind) + } + + newRt := *rt + if unmarshaled.CniArgs != nil { + newRt.Args = unmarshaled.CniArgs + } + newRt.CapabilityArgs = unmarshaled.CapabilityArgs + + return unmarshaled.Config, &newRt, nil +} + +func (c *CNIConfig) getLegacyCachedResult(netName, cniVersion string, rt *RuntimeConf) (types.Result, error) { + fname, err := c.getCacheFilePath(netName, rt) + if err != nil { + return nil, err + } + data, err := ioutil.ReadFile(fname) + if err != nil { + // Ignore read errors; the cached result may not exist on-disk + return nil, nil + } + + // Load the cached result + result, err := create.CreateFromBytes(data) + if err != nil { + return nil, err + } + + // Convert to the config version to ensure plugins get prevResult + // in the same version as the config. The cached result version + // should match the config version unless the config was changed + // while the container was running. + result, err = result.GetAsVersion(cniVersion) + if err != nil { + return nil, fmt.Errorf("failed to convert cached result to config version %q: %w", cniVersion, err) + } + return result, nil +} + +func (c *CNIConfig) getCachedResult(netName, cniVersion string, rt *RuntimeConf) (types.Result, error) { + fname, err := c.getCacheFilePath(netName, rt) + if err != nil { + return nil, err + } + fdata, err := ioutil.ReadFile(fname) + if err != nil { + // Ignore read errors; the cached result may not exist on-disk + return nil, nil + } + + cachedInfo := cachedInfo{} + if err := json.Unmarshal(fdata, &cachedInfo); err != nil || cachedInfo.Kind != CNICacheV1 { + return c.getLegacyCachedResult(netName, cniVersion, rt) + } + + newBytes, err := json.Marshal(&cachedInfo.RawResult) + if err != nil { + return nil, fmt.Errorf("failed to marshal cached network %q config: %w", netName, err) + } + + // Load the cached result + result, err := create.CreateFromBytes(newBytes) + if err != nil { + return nil, err + } + + // Convert to the config version to ensure plugins get prevResult + // in the same version as the config. The cached result version + // should match the config version unless the config was changed + // while the container was running. + result, err = result.GetAsVersion(cniVersion) + if err != nil { + return nil, fmt.Errorf("failed to convert cached result to config version %q: %w", cniVersion, err) + } + return result, nil +} + +// GetNetworkListCachedResult returns the cached Result of the previous +// AddNetworkList() operation for a network list, or an error. +func (c *CNIConfig) GetNetworkListCachedResult(list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) { + return c.getCachedResult(list.Name, list.CNIVersion, rt) +} + +// GetNetworkCachedResult returns the cached Result of the previous +// AddNetwork() operation for a network, or an error. +func (c *CNIConfig) GetNetworkCachedResult(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) { + return c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) +} + +// GetNetworkListCachedConfig copies the input RuntimeConf to output +// RuntimeConf with fields updated with info from the cached Config. +func (c *CNIConfig) GetNetworkListCachedConfig(list *NetworkConfigList, rt *RuntimeConf) ([]byte, *RuntimeConf, error) { + return c.getCachedConfig(list.Name, rt) +} + +// GetNetworkCachedConfig copies the input RuntimeConf to output +// RuntimeConf with fields updated with info from the cached Config. +func (c *CNIConfig) GetNetworkCachedConfig(net *NetworkConfig, rt *RuntimeConf) ([]byte, *RuntimeConf, error) { + return c.getCachedConfig(net.Network.Name, rt) +} + +func (c *CNIConfig) addNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (types.Result, error) { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) + if err != nil { + return nil, err + } + if err := utils.ValidateContainerID(rt.ContainerID); err != nil { + return nil, err + } + if err := utils.ValidateNetworkName(name); err != nil { + return nil, err + } + if err := utils.ValidateInterfaceName(rt.IfName); err != nil { + return nil, err + } + + newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt) + if err != nil { + return nil, err + } + + return invoke.ExecPluginWithResult(ctx, pluginPath, newConf.Bytes, c.args("ADD", rt), c.exec) +} + +// AddNetworkList executes a sequence of plugins with the ADD command +func (c *CNIConfig) AddNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) { + var err error + var result types.Result + for _, net := range list.Plugins { + result, err = c.addNetwork(ctx, list.Name, list.CNIVersion, net, result, rt) + if err != nil { + return nil, fmt.Errorf("plugin %s failed (add): %w", pluginDescription(net.Network), err) + } + } + + if err = c.cacheAdd(result, list.Bytes, list.Name, rt); err != nil { + return nil, fmt.Errorf("failed to set network %q cached result: %w", list.Name, err) + } + + return result, nil +} + +func (c *CNIConfig) checkNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) error { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) + if err != nil { + return err + } + + newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt) + if err != nil { + return err + } + + return invoke.ExecPluginWithoutResult(ctx, pluginPath, newConf.Bytes, c.args("CHECK", rt), c.exec) +} + +// CheckNetworkList executes a sequence of plugins with the CHECK command +func (c *CNIConfig) CheckNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) error { + // CHECK was added in CNI spec version 0.4.0 and higher + if gtet, err := version.GreaterThanOrEqualTo(list.CNIVersion, "0.4.0"); err != nil { + return err + } else if !gtet { + return fmt.Errorf("configuration version %q does not support the CHECK command", list.CNIVersion) + } + + if list.DisableCheck { + return nil + } + + cachedResult, err := c.getCachedResult(list.Name, list.CNIVersion, rt) + if err != nil { + return fmt.Errorf("failed to get network %q cached result: %w", list.Name, err) + } + + for _, net := range list.Plugins { + if err := c.checkNetwork(ctx, list.Name, list.CNIVersion, net, cachedResult, rt); err != nil { + return err + } + } + + return nil +} + +func (c *CNIConfig) delNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) error { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) + if err != nil { + return err + } + + newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt) + if err != nil { + return err + } + + return invoke.ExecPluginWithoutResult(ctx, pluginPath, newConf.Bytes, c.args("DEL", rt), c.exec) +} + +// DelNetworkList executes a sequence of plugins with the DEL command +func (c *CNIConfig) DelNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) error { + var cachedResult types.Result + + // Cached result on DEL was added in CNI spec version 0.4.0 and higher + if gtet, err := version.GreaterThanOrEqualTo(list.CNIVersion, "0.4.0"); err != nil { + return err + } else if gtet { + cachedResult, err = c.getCachedResult(list.Name, list.CNIVersion, rt) + if err != nil { + return fmt.Errorf("failed to get network %q cached result: %w", list.Name, err) + } + } + + for i := len(list.Plugins) - 1; i >= 0; i-- { + net := list.Plugins[i] + if err := c.delNetwork(ctx, list.Name, list.CNIVersion, net, cachedResult, rt); err != nil { + return fmt.Errorf("plugin %s failed (delete): %w", pluginDescription(net.Network), err) + } + } + _ = c.cacheDel(list.Name, rt) + + return nil +} + +func pluginDescription(net *types.NetConf) string { + if net == nil { + return "" + } + pluginType := net.Type + out := fmt.Sprintf("type=%q", pluginType) + name := net.Name + if name != "" { + out += fmt.Sprintf(" name=%q", name) + } + return out +} + +// AddNetwork executes the plugin with the ADD command +func (c *CNIConfig) AddNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) (types.Result, error) { + result, err := c.addNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, nil, rt) + if err != nil { + return nil, err + } + + if err = c.cacheAdd(result, net.Bytes, net.Network.Name, rt); err != nil { + return nil, fmt.Errorf("failed to set network %q cached result: %w", net.Network.Name, err) + } + + return result, nil +} + +// CheckNetwork executes the plugin with the CHECK command +func (c *CNIConfig) CheckNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error { + // CHECK was added in CNI spec version 0.4.0 and higher + if gtet, err := version.GreaterThanOrEqualTo(net.Network.CNIVersion, "0.4.0"); err != nil { + return err + } else if !gtet { + return fmt.Errorf("configuration version %q does not support the CHECK command", net.Network.CNIVersion) + } + + cachedResult, err := c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) + if err != nil { + return fmt.Errorf("failed to get network %q cached result: %w", net.Network.Name, err) + } + return c.checkNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt) +} + +// DelNetwork executes the plugin with the DEL command +func (c *CNIConfig) DelNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error { + var cachedResult types.Result + + // Cached result on DEL was added in CNI spec version 0.4.0 and higher + if gtet, err := version.GreaterThanOrEqualTo(net.Network.CNIVersion, "0.4.0"); err != nil { + return err + } else if gtet { + cachedResult, err = c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) + if err != nil { + return fmt.Errorf("failed to get network %q cached result: %w", net.Network.Name, err) + } + } + + if err := c.delNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt); err != nil { + return err + } + _ = c.cacheDel(net.Network.Name, rt) + return nil +} + +// ValidateNetworkList checks that a configuration is reasonably valid. +// - all the specified plugins exist on disk +// - every plugin supports the desired version. +// +// Returns a list of all capabilities supported by the configuration, or error +func (c *CNIConfig) ValidateNetworkList(ctx context.Context, list *NetworkConfigList) ([]string, error) { + version := list.CNIVersion + + // holding map for seen caps (in case of duplicates) + caps := map[string]interface{}{} + + errs := []error{} + for _, net := range list.Plugins { + if err := c.validatePlugin(ctx, net.Network.Type, version); err != nil { + errs = append(errs, err) + } + for c, enabled := range net.Network.Capabilities { + if !enabled { + continue + } + caps[c] = struct{}{} + } + } + + if len(errs) > 0 { + return nil, fmt.Errorf("%v", errs) + } + + // make caps list + cc := make([]string, 0, len(caps)) + for c := range caps { + cc = append(cc, c) + } + + return cc, nil +} + +// ValidateNetwork checks that a configuration is reasonably valid. +// It uses the same logic as ValidateNetworkList) +// Returns a list of capabilities +func (c *CNIConfig) ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]string, error) { + caps := []string{} + for c, ok := range net.Network.Capabilities { + if ok { + caps = append(caps, c) + } + } + if err := c.validatePlugin(ctx, net.Network.Type, net.Network.CNIVersion); err != nil { + return nil, err + } + return caps, nil +} + +// validatePlugin checks that an individual plugin's configuration is sane +func (c *CNIConfig) validatePlugin(ctx context.Context, pluginName, expectedVersion string) error { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(pluginName, c.Path) + if err != nil { + return err + } + if expectedVersion == "" { + expectedVersion = "0.1.0" + } + + vi, err := invoke.GetVersionInfo(ctx, pluginPath, c.exec) + if err != nil { + return err + } + for _, vers := range vi.SupportedVersions() { + if vers == expectedVersion { + return nil + } + } + return fmt.Errorf("plugin %s does not support config version %q", pluginName, expectedVersion) +} + +// GetVersionInfo reports which versions of the CNI spec are supported by +// the given plugin. +func (c *CNIConfig) GetVersionInfo(ctx context.Context, pluginType string) (version.PluginInfo, error) { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(pluginType, c.Path) + if err != nil { + return nil, err + } + + return invoke.GetVersionInfo(ctx, pluginPath, c.exec) +} + +// ===== +func (c *CNIConfig) args(action string, rt *RuntimeConf) *invoke.Args { + return &invoke.Args{ + Command: action, + ContainerID: rt.ContainerID, + NetNS: rt.NetNS, + PluginArgs: rt.Args, + IfName: rt.IfName, + Path: strings.Join(c.Path, string(os.PathListSeparator)), + } +} diff --git a/vendor/github.com/containernetworking/cni/libcni/conf.go b/vendor/github.com/containernetworking/cni/libcni/conf.go new file mode 100644 index 000000000..3cd6a59d1 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/libcni/conf.go @@ -0,0 +1,270 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package libcni + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sort" + + "github.com/containernetworking/cni/pkg/types" +) + +type NotFoundError struct { + Dir string + Name string +} + +func (e NotFoundError) Error() string { + return fmt.Sprintf(`no net configuration with name "%s" in %s`, e.Name, e.Dir) +} + +type NoConfigsFoundError struct { + Dir string +} + +func (e NoConfigsFoundError) Error() string { + return fmt.Sprintf(`no net configurations found in %s`, e.Dir) +} + +func ConfFromBytes(bytes []byte) (*NetworkConfig, error) { + conf := &NetworkConfig{Bytes: bytes, Network: &types.NetConf{}} + if err := json.Unmarshal(bytes, conf.Network); err != nil { + return nil, fmt.Errorf("error parsing configuration: %w", err) + } + if conf.Network.Type == "" { + return nil, fmt.Errorf("error parsing configuration: missing 'type'") + } + return conf, nil +} + +func ConfFromFile(filename string) (*NetworkConfig, error) { + bytes, err := ioutil.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("error reading %s: %w", filename, err) + } + return ConfFromBytes(bytes) +} + +func ConfListFromBytes(bytes []byte) (*NetworkConfigList, error) { + rawList := make(map[string]interface{}) + if err := json.Unmarshal(bytes, &rawList); err != nil { + return nil, fmt.Errorf("error parsing configuration list: %w", err) + } + + rawName, ok := rawList["name"] + if !ok { + return nil, fmt.Errorf("error parsing configuration list: no name") + } + name, ok := rawName.(string) + if !ok { + return nil, fmt.Errorf("error parsing configuration list: invalid name type %T", rawName) + } + + var cniVersion string + rawVersion, ok := rawList["cniVersion"] + if ok { + cniVersion, ok = rawVersion.(string) + if !ok { + return nil, fmt.Errorf("error parsing configuration list: invalid cniVersion type %T", rawVersion) + } + } + + disableCheck := false + if rawDisableCheck, ok := rawList["disableCheck"]; ok { + disableCheck, ok = rawDisableCheck.(bool) + if !ok { + return nil, fmt.Errorf("error parsing configuration list: invalid disableCheck type %T", rawDisableCheck) + } + } + + list := &NetworkConfigList{ + Name: name, + DisableCheck: disableCheck, + CNIVersion: cniVersion, + Bytes: bytes, + } + + var plugins []interface{} + plug, ok := rawList["plugins"] + if !ok { + return nil, fmt.Errorf("error parsing configuration list: no 'plugins' key") + } + plugins, ok = plug.([]interface{}) + if !ok { + return nil, fmt.Errorf("error parsing configuration list: invalid 'plugins' type %T", plug) + } + if len(plugins) == 0 { + return nil, fmt.Errorf("error parsing configuration list: no plugins in list") + } + + for i, conf := range plugins { + newBytes, err := json.Marshal(conf) + if err != nil { + return nil, fmt.Errorf("failed to marshal plugin config %d: %w", i, err) + } + netConf, err := ConfFromBytes(newBytes) + if err != nil { + return nil, fmt.Errorf("failed to parse plugin config %d: %w", i, err) + } + list.Plugins = append(list.Plugins, netConf) + } + + return list, nil +} + +func ConfListFromFile(filename string) (*NetworkConfigList, error) { + bytes, err := ioutil.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("error reading %s: %w", filename, err) + } + return ConfListFromBytes(bytes) +} + +func ConfFiles(dir string, extensions []string) ([]string, error) { + // In part, adapted from rkt/networking/podenv.go#listFiles + files, err := ioutil.ReadDir(dir) + switch { + case err == nil: // break + case os.IsNotExist(err): + return nil, nil + default: + return nil, err + } + + confFiles := []string{} + for _, f := range files { + if f.IsDir() { + continue + } + fileExt := filepath.Ext(f.Name()) + for _, ext := range extensions { + if fileExt == ext { + confFiles = append(confFiles, filepath.Join(dir, f.Name())) + } + } + } + return confFiles, nil +} + +func LoadConf(dir, name string) (*NetworkConfig, error) { + files, err := ConfFiles(dir, []string{".conf", ".json"}) + switch { + case err != nil: + return nil, err + case len(files) == 0: + return nil, NoConfigsFoundError{Dir: dir} + } + sort.Strings(files) + + for _, confFile := range files { + conf, err := ConfFromFile(confFile) + if err != nil { + return nil, err + } + if conf.Network.Name == name { + return conf, nil + } + } + return nil, NotFoundError{dir, name} +} + +func LoadConfList(dir, name string) (*NetworkConfigList, error) { + files, err := ConfFiles(dir, []string{".conflist"}) + if err != nil { + return nil, err + } + sort.Strings(files) + + for _, confFile := range files { + conf, err := ConfListFromFile(confFile) + if err != nil { + return nil, err + } + if conf.Name == name { + return conf, nil + } + } + + // Try and load a network configuration file (instead of list) + // from the same name, then upconvert. + singleConf, err := LoadConf(dir, name) + if err != nil { + // A little extra logic so the error makes sense + if _, ok := err.(NoConfigsFoundError); len(files) != 0 && ok { + // Config lists found but no config files found + return nil, NotFoundError{dir, name} + } + + return nil, err + } + return ConfListFromConf(singleConf) +} + +func InjectConf(original *NetworkConfig, newValues map[string]interface{}) (*NetworkConfig, error) { + config := make(map[string]interface{}) + err := json.Unmarshal(original.Bytes, &config) + if err != nil { + return nil, fmt.Errorf("unmarshal existing network bytes: %w", err) + } + + for key, value := range newValues { + if key == "" { + return nil, fmt.Errorf("keys cannot be empty") + } + + if value == nil { + return nil, fmt.Errorf("key '%s' value must not be nil", key) + } + + config[key] = value + } + + newBytes, err := json.Marshal(config) + if err != nil { + return nil, err + } + + return ConfFromBytes(newBytes) +} + +// ConfListFromConf "upconverts" a network config in to a NetworkConfigList, +// with the single network as the only entry in the list. +func ConfListFromConf(original *NetworkConfig) (*NetworkConfigList, error) { + // Re-deserialize the config's json, then make a raw map configlist. + // This may seem a bit strange, but it's to make the Bytes fields + // actually make sense. Otherwise, the generated json is littered with + // golang default values. + + rawConfig := make(map[string]interface{}) + if err := json.Unmarshal(original.Bytes, &rawConfig); err != nil { + return nil, err + } + + rawConfigList := map[string]interface{}{ + "name": original.Network.Name, + "cniVersion": original.Network.CNIVersion, + "plugins": []interface{}{rawConfig}, + } + + b, err := json.Marshal(rawConfigList) + if err != nil { + return nil, err + } + return ConfListFromBytes(b) +} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/args.go b/vendor/github.com/containernetworking/cni/pkg/invoke/args.go new file mode 100644 index 000000000..3cdb4bc8d --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/args.go @@ -0,0 +1,128 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +import ( + "fmt" + "os" + "strings" +) + +type CNIArgs interface { + // For use with os/exec; i.e., return nil to inherit the + // environment from this process + // For use in delegation; inherit the environment from this + // process and allow overrides + AsEnv() []string +} + +type inherited struct{} + +var inheritArgsFromEnv inherited + +func (*inherited) AsEnv() []string { + return nil +} + +func ArgsFromEnv() CNIArgs { + return &inheritArgsFromEnv +} + +type Args struct { + Command string + ContainerID string + NetNS string + PluginArgs [][2]string + PluginArgsStr string + IfName string + Path string +} + +// Args implements the CNIArgs interface +var _ CNIArgs = &Args{} + +func (args *Args) AsEnv() []string { + env := os.Environ() + pluginArgsStr := args.PluginArgsStr + if pluginArgsStr == "" { + pluginArgsStr = stringify(args.PluginArgs) + } + + // Duplicated values which come first will be overridden, so we must put the + // custom values in the end to avoid being overridden by the process environments. + env = append(env, + "CNI_COMMAND="+args.Command, + "CNI_CONTAINERID="+args.ContainerID, + "CNI_NETNS="+args.NetNS, + "CNI_ARGS="+pluginArgsStr, + "CNI_IFNAME="+args.IfName, + "CNI_PATH="+args.Path, + ) + return dedupEnv(env) +} + +// taken from rkt/networking/net_plugin.go +func stringify(pluginArgs [][2]string) string { + entries := make([]string, len(pluginArgs)) + + for i, kv := range pluginArgs { + entries[i] = strings.Join(kv[:], "=") + } + + return strings.Join(entries, ";") +} + +// DelegateArgs implements the CNIArgs interface +// used for delegation to inherit from environments +// and allow some overrides like CNI_COMMAND +var _ CNIArgs = &DelegateArgs{} + +type DelegateArgs struct { + Command string +} + +func (d *DelegateArgs) AsEnv() []string { + env := os.Environ() + + // The custom values should come in the end to override the existing + // process environment of the same key. + env = append(env, + "CNI_COMMAND="+d.Command, + ) + return dedupEnv(env) +} + +// dedupEnv returns a copy of env with any duplicates removed, in favor of later values. +// Items not of the normal environment "key=value" form are preserved unchanged. +func dedupEnv(env []string) []string { + out := make([]string, 0, len(env)) + envMap := map[string]string{} + + for _, kv := range env { + // find the first "=" in environment, if not, just keep it + eq := strings.Index(kv, "=") + if eq < 0 { + out = append(out, kv) + continue + } + envMap[kv[:eq]] = kv[eq+1:] + } + + for k, v := range envMap { + out = append(out, fmt.Sprintf("%s=%s", k, v)) + } + + return out +} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go b/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go new file mode 100644 index 000000000..8defe4dd3 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go @@ -0,0 +1,80 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +import ( + "context" + "os" + "path/filepath" + + "github.com/containernetworking/cni/pkg/types" +) + +func delegateCommon(delegatePlugin string, exec Exec) (string, Exec, error) { + if exec == nil { + exec = defaultExec + } + + paths := filepath.SplitList(os.Getenv("CNI_PATH")) + pluginPath, err := exec.FindInPath(delegatePlugin, paths) + if err != nil { + return "", nil, err + } + + return pluginPath, exec, nil +} + +// DelegateAdd calls the given delegate plugin with the CNI ADD action and +// JSON configuration +func DelegateAdd(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) (types.Result, error) { + pluginPath, realExec, err := delegateCommon(delegatePlugin, exec) + if err != nil { + return nil, err + } + + // DelegateAdd will override the original "CNI_COMMAND" env from process with ADD + return ExecPluginWithResult(ctx, pluginPath, netconf, delegateArgs("ADD"), realExec) +} + +// DelegateCheck calls the given delegate plugin with the CNI CHECK action and +// JSON configuration +func DelegateCheck(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error { + pluginPath, realExec, err := delegateCommon(delegatePlugin, exec) + if err != nil { + return err + } + + // DelegateCheck will override the original CNI_COMMAND env from process with CHECK + return ExecPluginWithoutResult(ctx, pluginPath, netconf, delegateArgs("CHECK"), realExec) +} + +// DelegateDel calls the given delegate plugin with the CNI DEL action and +// JSON configuration +func DelegateDel(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error { + pluginPath, realExec, err := delegateCommon(delegatePlugin, exec) + if err != nil { + return err + } + + // DelegateDel will override the original CNI_COMMAND env from process with DEL + return ExecPluginWithoutResult(ctx, pluginPath, netconf, delegateArgs("DEL"), realExec) +} + +// return CNIArgs used by delegation +func delegateArgs(action string) *DelegateArgs { + return &DelegateArgs{ + Command: action, + } +} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go b/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go new file mode 100644 index 000000000..3ad07aa8f --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go @@ -0,0 +1,187 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +import ( + "context" + "encoding/json" + "fmt" + "os" + + "github.com/containernetworking/cni/pkg/types" + "github.com/containernetworking/cni/pkg/types/create" + "github.com/containernetworking/cni/pkg/version" +) + +// Exec is an interface encapsulates all operations that deal with finding +// and executing a CNI plugin. Tests may provide a fake implementation +// to avoid writing fake plugins to temporary directories during the test. +type Exec interface { + ExecPlugin(ctx context.Context, pluginPath string, stdinData []byte, environ []string) ([]byte, error) + FindInPath(plugin string, paths []string) (string, error) + Decode(jsonBytes []byte) (version.PluginInfo, error) +} + +// Plugin must return result in same version as specified in netconf; but +// for backwards compatibility reasons if the result version is empty use +// config version (rather than technically correct 0.1.0). +// https://github.com/containernetworking/cni/issues/895 +func fixupResultVersion(netconf, result []byte) (string, []byte, error) { + versionDecoder := &version.ConfigDecoder{} + confVersion, err := versionDecoder.Decode(netconf) + if err != nil { + return "", nil, err + } + + var rawResult map[string]interface{} + if err := json.Unmarshal(result, &rawResult); err != nil { + return "", nil, fmt.Errorf("failed to unmarshal raw result: %w", err) + } + + // plugin output of "null" is successfully unmarshalled, but results in a nil + // map which causes a panic when the confVersion is assigned below. + if rawResult == nil { + rawResult = make(map[string]interface{}) + } + + // Manually decode Result version; we need to know whether its cniVersion + // is empty, while built-in decoders (correctly) substitute 0.1.0 for an + // empty version per the CNI spec. + if resultVerRaw, ok := rawResult["cniVersion"]; ok { + resultVer, ok := resultVerRaw.(string) + if ok && resultVer != "" { + return resultVer, result, nil + } + } + + // If the cniVersion is not present or empty, assume the result is + // the same CNI spec version as the config + rawResult["cniVersion"] = confVersion + newBytes, err := json.Marshal(rawResult) + if err != nil { + return "", nil, fmt.Errorf("failed to remarshal fixed result: %w", err) + } + + return confVersion, newBytes, nil +} + +// For example, a testcase could pass an instance of the following fakeExec +// object to ExecPluginWithResult() to verify the incoming stdin and environment +// and provide a tailored response: +// +//import ( +// "encoding/json" +// "path" +// "strings" +//) +// +//type fakeExec struct { +// version.PluginDecoder +//} +// +//func (f *fakeExec) ExecPlugin(pluginPath string, stdinData []byte, environ []string) ([]byte, error) { +// net := &types.NetConf{} +// err := json.Unmarshal(stdinData, net) +// if err != nil { +// return nil, fmt.Errorf("failed to unmarshal configuration: %v", err) +// } +// pluginName := path.Base(pluginPath) +// if pluginName != net.Type { +// return nil, fmt.Errorf("plugin name %q did not match config type %q", pluginName, net.Type) +// } +// for _, e := range environ { +// // Check environment for forced failure request +// parts := strings.Split(e, "=") +// if len(parts) > 0 && parts[0] == "FAIL" { +// return nil, fmt.Errorf("failed to execute plugin %s", pluginName) +// } +// } +// return []byte("{\"CNIVersion\":\"0.4.0\"}"), nil +//} +// +//func (f *fakeExec) FindInPath(plugin string, paths []string) (string, error) { +// if len(paths) > 0 { +// return path.Join(paths[0], plugin), nil +// } +// return "", fmt.Errorf("failed to find plugin %s in paths %v", plugin, paths) +//} + +func ExecPluginWithResult(ctx context.Context, pluginPath string, netconf []byte, args CNIArgs, exec Exec) (types.Result, error) { + if exec == nil { + exec = defaultExec + } + + stdoutBytes, err := exec.ExecPlugin(ctx, pluginPath, netconf, args.AsEnv()) + if err != nil { + return nil, err + } + + resultVersion, fixedBytes, err := fixupResultVersion(netconf, stdoutBytes) + if err != nil { + return nil, err + } + + return create.Create(resultVersion, fixedBytes) +} + +func ExecPluginWithoutResult(ctx context.Context, pluginPath string, netconf []byte, args CNIArgs, exec Exec) error { + if exec == nil { + exec = defaultExec + } + _, err := exec.ExecPlugin(ctx, pluginPath, netconf, args.AsEnv()) + return err +} + +// GetVersionInfo returns the version information available about the plugin. +// For recent-enough plugins, it uses the information returned by the VERSION +// command. For older plugins which do not recognize that command, it reports +// version 0.1.0 +func GetVersionInfo(ctx context.Context, pluginPath string, exec Exec) (version.PluginInfo, error) { + if exec == nil { + exec = defaultExec + } + args := &Args{ + Command: "VERSION", + + // set fake values required by plugins built against an older version of skel + NetNS: "dummy", + IfName: "dummy", + Path: "dummy", + } + stdin := []byte(fmt.Sprintf(`{"cniVersion":%q}`, version.Current())) + stdoutBytes, err := exec.ExecPlugin(ctx, pluginPath, stdin, args.AsEnv()) + if err != nil { + if err.Error() == "unknown CNI_COMMAND: VERSION" { + return version.PluginSupports("0.1.0"), nil + } + return nil, err + } + + return exec.Decode(stdoutBytes) +} + +// DefaultExec is an object that implements the Exec interface which looks +// for and executes plugins from disk. +type DefaultExec struct { + *RawExec + version.PluginDecoder +} + +// DefaultExec implements the Exec interface +var _ Exec = &DefaultExec{} + +var defaultExec = &DefaultExec{ + RawExec: &RawExec{Stderr: os.Stderr}, +} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/find.go b/vendor/github.com/containernetworking/cni/pkg/invoke/find.go new file mode 100644 index 000000000..e62029eb7 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/find.go @@ -0,0 +1,48 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +import ( + "fmt" + "os" + "path/filepath" + "strings" +) + +// FindInPath returns the full path of the plugin by searching in the provided path +func FindInPath(plugin string, paths []string) (string, error) { + if plugin == "" { + return "", fmt.Errorf("no plugin name provided") + } + + if strings.ContainsRune(plugin, os.PathSeparator) { + return "", fmt.Errorf("invalid plugin name: %s", plugin) + } + + if len(paths) == 0 { + return "", fmt.Errorf("no paths provided") + } + + for _, path := range paths { + for _, fe := range ExecutableFileExtensions { + fullpath := filepath.Join(path, plugin) + fe + if fi, err := os.Stat(fullpath); err == nil && fi.Mode().IsRegular() { + return fullpath, nil + } + } + } + + return "", fmt.Errorf("failed to find plugin %q in path %s", plugin, paths) +} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go b/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go new file mode 100644 index 000000000..9bcfb4553 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go @@ -0,0 +1,20 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package invoke + +// Valid file extensions for plugin executables. +var ExecutableFileExtensions = []string{""} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go b/vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go new file mode 100644 index 000000000..7665125b1 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go @@ -0,0 +1,18 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +// Valid file extensions for plugin executables. +var ExecutableFileExtensions = []string{".exe", ""} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go b/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go new file mode 100644 index 000000000..5ab5cc885 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go @@ -0,0 +1,88 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "os/exec" + "strings" + "time" + + "github.com/containernetworking/cni/pkg/types" +) + +type RawExec struct { + Stderr io.Writer +} + +func (e *RawExec) ExecPlugin(ctx context.Context, pluginPath string, stdinData []byte, environ []string) ([]byte, error) { + stdout := &bytes.Buffer{} + stderr := &bytes.Buffer{} + c := exec.CommandContext(ctx, pluginPath) + c.Env = environ + c.Stdin = bytes.NewBuffer(stdinData) + c.Stdout = stdout + c.Stderr = stderr + + // Retry the command on "text file busy" errors + for i := 0; i <= 5; i++ { + err := c.Run() + + // Command succeeded + if err == nil { + break + } + + // If the plugin is currently about to be written, then we wait a + // second and try it again + if strings.Contains(err.Error(), "text file busy") { + time.Sleep(time.Second) + continue + } + + // All other errors except than the busy text file + return nil, e.pluginErr(err, stdout.Bytes(), stderr.Bytes()) + } + + // Copy stderr to caller's buffer in case plugin printed to both + // stdout and stderr for some reason. Ignore failures as stderr is + // only informational. + if e.Stderr != nil && stderr.Len() > 0 { + _, _ = stderr.WriteTo(e.Stderr) + } + return stdout.Bytes(), nil +} + +func (e *RawExec) pluginErr(err error, stdout, stderr []byte) error { + emsg := types.Error{} + if len(stdout) == 0 { + if len(stderr) == 0 { + emsg.Msg = fmt.Sprintf("netplugin failed with no error message: %v", err) + } else { + emsg.Msg = fmt.Sprintf("netplugin failed: %q", string(stderr)) + } + } else if perr := json.Unmarshal(stdout, &emsg); perr != nil { + emsg.Msg = fmt.Sprintf("netplugin failed but error parsing its diagnostic message %q: %v", string(stdout), perr) + } + return &emsg +} + +func (e *RawExec) FindInPath(plugin string, paths []string) (string, error) { + return FindInPath(plugin, paths) +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/020/types.go b/vendor/github.com/containernetworking/cni/pkg/types/020/types.go new file mode 100644 index 000000000..99b151ff2 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/types/020/types.go @@ -0,0 +1,189 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types020 + +import ( + "encoding/json" + "fmt" + "io" + "net" + "os" + + "github.com/containernetworking/cni/pkg/types" + convert "github.com/containernetworking/cni/pkg/types/internal" +) + +const ImplementedSpecVersion string = "0.2.0" + +var supportedVersions = []string{"", "0.1.0", ImplementedSpecVersion} + +// Register converters for all versions less than the implemented spec version +func init() { + convert.RegisterConverter("0.1.0", []string{ImplementedSpecVersion}, convertFrom010) + convert.RegisterConverter(ImplementedSpecVersion, []string{"0.1.0"}, convertTo010) + + // Creator + convert.RegisterCreator(supportedVersions, NewResult) +} + +// Compatibility types for CNI version 0.1.0 and 0.2.0 + +// NewResult creates a new Result object from JSON data. The JSON data +// must be compatible with the CNI versions implemented by this type. +func NewResult(data []byte) (types.Result, error) { + result := &Result{} + if err := json.Unmarshal(data, result); err != nil { + return nil, err + } + for _, v := range supportedVersions { + if result.CNIVersion == v { + if result.CNIVersion == "" { + result.CNIVersion = "0.1.0" + } + return result, nil + } + } + return nil, fmt.Errorf("result type supports %v but unmarshalled CNIVersion is %q", + supportedVersions, result.CNIVersion) +} + +// GetResult converts the given Result object to the ImplementedSpecVersion +// and returns the concrete type or an error +func GetResult(r types.Result) (*Result, error) { + result020, err := convert.Convert(r, ImplementedSpecVersion) + if err != nil { + return nil, err + } + result, ok := result020.(*Result) + if !ok { + return nil, fmt.Errorf("failed to convert result") + } + return result, nil +} + +func convertFrom010(from types.Result, toVersion string) (types.Result, error) { + if toVersion != "0.2.0" { + panic("only converts to version 0.2.0") + } + fromResult := from.(*Result) + return &Result{ + CNIVersion: ImplementedSpecVersion, + IP4: fromResult.IP4.Copy(), + IP6: fromResult.IP6.Copy(), + DNS: *fromResult.DNS.Copy(), + }, nil +} + +func convertTo010(from types.Result, toVersion string) (types.Result, error) { + if toVersion != "0.1.0" { + panic("only converts to version 0.1.0") + } + fromResult := from.(*Result) + return &Result{ + CNIVersion: "0.1.0", + IP4: fromResult.IP4.Copy(), + IP6: fromResult.IP6.Copy(), + DNS: *fromResult.DNS.Copy(), + }, nil +} + +// Result is what gets returned from the plugin (via stdout) to the caller +type Result struct { + CNIVersion string `json:"cniVersion,omitempty"` + IP4 *IPConfig `json:"ip4,omitempty"` + IP6 *IPConfig `json:"ip6,omitempty"` + DNS types.DNS `json:"dns,omitempty"` +} + +func (r *Result) Version() string { + return r.CNIVersion +} + +func (r *Result) GetAsVersion(version string) (types.Result, error) { + // If the creator of the result did not set the CNIVersion, assume it + // should be the highest spec version implemented by this Result + if r.CNIVersion == "" { + r.CNIVersion = ImplementedSpecVersion + } + return convert.Convert(r, version) +} + +func (r *Result) Print() error { + return r.PrintTo(os.Stdout) +} + +func (r *Result) PrintTo(writer io.Writer) error { + data, err := json.MarshalIndent(r, "", " ") + if err != nil { + return err + } + _, err = writer.Write(data) + return err +} + +// IPConfig contains values necessary to configure an interface +type IPConfig struct { + IP net.IPNet + Gateway net.IP + Routes []types.Route +} + +func (i *IPConfig) Copy() *IPConfig { + if i == nil { + return nil + } + + var routes []types.Route + for _, fromRoute := range i.Routes { + routes = append(routes, *fromRoute.Copy()) + } + return &IPConfig{ + IP: i.IP, + Gateway: i.Gateway, + Routes: routes, + } +} + +// net.IPNet is not JSON (un)marshallable so this duality is needed +// for our custom IPNet type + +// JSON (un)marshallable types +type ipConfig struct { + IP types.IPNet `json:"ip"` + Gateway net.IP `json:"gateway,omitempty"` + Routes []types.Route `json:"routes,omitempty"` +} + +func (c *IPConfig) MarshalJSON() ([]byte, error) { + ipc := ipConfig{ + IP: types.IPNet(c.IP), + Gateway: c.Gateway, + Routes: c.Routes, + } + + return json.Marshal(ipc) +} + +func (c *IPConfig) UnmarshalJSON(data []byte) error { + ipc := ipConfig{} + if err := json.Unmarshal(data, &ipc); err != nil { + return err + } + + c.IP = net.IPNet(ipc.IP) + c.Gateway = ipc.Gateway + c.Routes = ipc.Routes + return nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/040/types.go b/vendor/github.com/containernetworking/cni/pkg/types/040/types.go new file mode 100644 index 000000000..3633b0eaa --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/types/040/types.go @@ -0,0 +1,306 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types040 + +import ( + "encoding/json" + "fmt" + "io" + "net" + "os" + + "github.com/containernetworking/cni/pkg/types" + types020 "github.com/containernetworking/cni/pkg/types/020" + convert "github.com/containernetworking/cni/pkg/types/internal" +) + +const ImplementedSpecVersion string = "0.4.0" + +var supportedVersions = []string{"0.3.0", "0.3.1", ImplementedSpecVersion} + +// Register converters for all versions less than the implemented spec version +func init() { + // Up-converters + convert.RegisterConverter("0.1.0", supportedVersions, convertFrom02x) + convert.RegisterConverter("0.2.0", supportedVersions, convertFrom02x) + convert.RegisterConverter("0.3.0", supportedVersions, convertInternal) + convert.RegisterConverter("0.3.1", supportedVersions, convertInternal) + + // Down-converters + convert.RegisterConverter("0.4.0", []string{"0.3.0", "0.3.1"}, convertInternal) + convert.RegisterConverter("0.4.0", []string{"0.1.0", "0.2.0"}, convertTo02x) + convert.RegisterConverter("0.3.1", []string{"0.1.0", "0.2.0"}, convertTo02x) + convert.RegisterConverter("0.3.0", []string{"0.1.0", "0.2.0"}, convertTo02x) + + // Creator + convert.RegisterCreator(supportedVersions, NewResult) +} + +func NewResult(data []byte) (types.Result, error) { + result := &Result{} + if err := json.Unmarshal(data, result); err != nil { + return nil, err + } + for _, v := range supportedVersions { + if result.CNIVersion == v { + return result, nil + } + } + return nil, fmt.Errorf("result type supports %v but unmarshalled CNIVersion is %q", + supportedVersions, result.CNIVersion) +} + +func GetResult(r types.Result) (*Result, error) { + resultCurrent, err := r.GetAsVersion(ImplementedSpecVersion) + if err != nil { + return nil, err + } + result, ok := resultCurrent.(*Result) + if !ok { + return nil, fmt.Errorf("failed to convert result") + } + return result, nil +} + +func NewResultFromResult(result types.Result) (*Result, error) { + newResult, err := convert.Convert(result, ImplementedSpecVersion) + if err != nil { + return nil, err + } + return newResult.(*Result), nil +} + +// Result is what gets returned from the plugin (via stdout) to the caller +type Result struct { + CNIVersion string `json:"cniVersion,omitempty"` + Interfaces []*Interface `json:"interfaces,omitempty"` + IPs []*IPConfig `json:"ips,omitempty"` + Routes []*types.Route `json:"routes,omitempty"` + DNS types.DNS `json:"dns,omitempty"` +} + +func convert020IPConfig(from *types020.IPConfig, ipVersion string) *IPConfig { + return &IPConfig{ + Version: ipVersion, + Address: from.IP, + Gateway: from.Gateway, + } +} + +func convertFrom02x(from types.Result, toVersion string) (types.Result, error) { + fromResult := from.(*types020.Result) + toResult := &Result{ + CNIVersion: toVersion, + DNS: *fromResult.DNS.Copy(), + Routes: []*types.Route{}, + } + if fromResult.IP4 != nil { + toResult.IPs = append(toResult.IPs, convert020IPConfig(fromResult.IP4, "4")) + for _, fromRoute := range fromResult.IP4.Routes { + toResult.Routes = append(toResult.Routes, fromRoute.Copy()) + } + } + + if fromResult.IP6 != nil { + toResult.IPs = append(toResult.IPs, convert020IPConfig(fromResult.IP6, "6")) + for _, fromRoute := range fromResult.IP6.Routes { + toResult.Routes = append(toResult.Routes, fromRoute.Copy()) + } + } + + return toResult, nil +} + +func convertInternal(from types.Result, toVersion string) (types.Result, error) { + fromResult := from.(*Result) + toResult := &Result{ + CNIVersion: toVersion, + DNS: *fromResult.DNS.Copy(), + Routes: []*types.Route{}, + } + for _, fromIntf := range fromResult.Interfaces { + toResult.Interfaces = append(toResult.Interfaces, fromIntf.Copy()) + } + for _, fromIPC := range fromResult.IPs { + toResult.IPs = append(toResult.IPs, fromIPC.Copy()) + } + for _, fromRoute := range fromResult.Routes { + toResult.Routes = append(toResult.Routes, fromRoute.Copy()) + } + return toResult, nil +} + +func convertTo02x(from types.Result, toVersion string) (types.Result, error) { + fromResult := from.(*Result) + toResult := &types020.Result{ + CNIVersion: toVersion, + DNS: *fromResult.DNS.Copy(), + } + + for _, fromIP := range fromResult.IPs { + // Only convert the first IP address of each version as 0.2.0 + // and earlier cannot handle multiple IP addresses + if fromIP.Version == "4" && toResult.IP4 == nil { + toResult.IP4 = &types020.IPConfig{ + IP: fromIP.Address, + Gateway: fromIP.Gateway, + } + } else if fromIP.Version == "6" && toResult.IP6 == nil { + toResult.IP6 = &types020.IPConfig{ + IP: fromIP.Address, + Gateway: fromIP.Gateway, + } + } + if toResult.IP4 != nil && toResult.IP6 != nil { + break + } + } + + for _, fromRoute := range fromResult.Routes { + is4 := fromRoute.Dst.IP.To4() != nil + if is4 && toResult.IP4 != nil { + toResult.IP4.Routes = append(toResult.IP4.Routes, types.Route{ + Dst: fromRoute.Dst, + GW: fromRoute.GW, + }) + } else if !is4 && toResult.IP6 != nil { + toResult.IP6.Routes = append(toResult.IP6.Routes, types.Route{ + Dst: fromRoute.Dst, + GW: fromRoute.GW, + }) + } + } + + // 0.2.0 and earlier require at least one IP address in the Result + if toResult.IP4 == nil && toResult.IP6 == nil { + return nil, fmt.Errorf("cannot convert: no valid IP addresses") + } + + return toResult, nil +} + +func (r *Result) Version() string { + return r.CNIVersion +} + +func (r *Result) GetAsVersion(version string) (types.Result, error) { + // If the creator of the result did not set the CNIVersion, assume it + // should be the highest spec version implemented by this Result + if r.CNIVersion == "" { + r.CNIVersion = ImplementedSpecVersion + } + return convert.Convert(r, version) +} + +func (r *Result) Print() error { + return r.PrintTo(os.Stdout) +} + +func (r *Result) PrintTo(writer io.Writer) error { + data, err := json.MarshalIndent(r, "", " ") + if err != nil { + return err + } + _, err = writer.Write(data) + return err +} + +// Interface contains values about the created interfaces +type Interface struct { + Name string `json:"name"` + Mac string `json:"mac,omitempty"` + Sandbox string `json:"sandbox,omitempty"` +} + +func (i *Interface) String() string { + return fmt.Sprintf("%+v", *i) +} + +func (i *Interface) Copy() *Interface { + if i == nil { + return nil + } + newIntf := *i + return &newIntf +} + +// Int returns a pointer to the int value passed in. Used to +// set the IPConfig.Interface field. +func Int(v int) *int { + return &v +} + +// IPConfig contains values necessary to configure an IP address on an interface +type IPConfig struct { + // IP version, either "4" or "6" + Version string + // Index into Result structs Interfaces list + Interface *int + Address net.IPNet + Gateway net.IP +} + +func (i *IPConfig) String() string { + return fmt.Sprintf("%+v", *i) +} + +func (i *IPConfig) Copy() *IPConfig { + if i == nil { + return nil + } + + ipc := &IPConfig{ + Version: i.Version, + Address: i.Address, + Gateway: i.Gateway, + } + if i.Interface != nil { + intf := *i.Interface + ipc.Interface = &intf + } + return ipc +} + +// JSON (un)marshallable types +type ipConfig struct { + Version string `json:"version"` + Interface *int `json:"interface,omitempty"` + Address types.IPNet `json:"address"` + Gateway net.IP `json:"gateway,omitempty"` +} + +func (c *IPConfig) MarshalJSON() ([]byte, error) { + ipc := ipConfig{ + Version: c.Version, + Interface: c.Interface, + Address: types.IPNet(c.Address), + Gateway: c.Gateway, + } + + return json.Marshal(ipc) +} + +func (c *IPConfig) UnmarshalJSON(data []byte) error { + ipc := ipConfig{} + if err := json.Unmarshal(data, &ipc); err != nil { + return err + } + + c.Version = ipc.Version + c.Interface = ipc.Interface + c.Address = net.IPNet(ipc.Address) + c.Gateway = ipc.Gateway + return nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/100/types.go b/vendor/github.com/containernetworking/cni/pkg/types/100/types.go new file mode 100644 index 000000000..0e1e8b857 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/types/100/types.go @@ -0,0 +1,307 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types100 + +import ( + "encoding/json" + "fmt" + "io" + "net" + "os" + + "github.com/containernetworking/cni/pkg/types" + types040 "github.com/containernetworking/cni/pkg/types/040" + convert "github.com/containernetworking/cni/pkg/types/internal" +) + +const ImplementedSpecVersion string = "1.0.0" + +var supportedVersions = []string{ImplementedSpecVersion} + +// Register converters for all versions less than the implemented spec version +func init() { + // Up-converters + convert.RegisterConverter("0.1.0", supportedVersions, convertFrom02x) + convert.RegisterConverter("0.2.0", supportedVersions, convertFrom02x) + convert.RegisterConverter("0.3.0", supportedVersions, convertFrom04x) + convert.RegisterConverter("0.3.1", supportedVersions, convertFrom04x) + convert.RegisterConverter("0.4.0", supportedVersions, convertFrom04x) + + // Down-converters + convert.RegisterConverter("1.0.0", []string{"0.3.0", "0.3.1", "0.4.0"}, convertTo04x) + convert.RegisterConverter("1.0.0", []string{"0.1.0", "0.2.0"}, convertTo02x) + + // Creator + convert.RegisterCreator(supportedVersions, NewResult) +} + +func NewResult(data []byte) (types.Result, error) { + result := &Result{} + if err := json.Unmarshal(data, result); err != nil { + return nil, err + } + for _, v := range supportedVersions { + if result.CNIVersion == v { + return result, nil + } + } + return nil, fmt.Errorf("result type supports %v but unmarshalled CNIVersion is %q", + supportedVersions, result.CNIVersion) +} + +func GetResult(r types.Result) (*Result, error) { + resultCurrent, err := r.GetAsVersion(ImplementedSpecVersion) + if err != nil { + return nil, err + } + result, ok := resultCurrent.(*Result) + if !ok { + return nil, fmt.Errorf("failed to convert result") + } + return result, nil +} + +func NewResultFromResult(result types.Result) (*Result, error) { + newResult, err := convert.Convert(result, ImplementedSpecVersion) + if err != nil { + return nil, err + } + return newResult.(*Result), nil +} + +// Result is what gets returned from the plugin (via stdout) to the caller +type Result struct { + CNIVersion string `json:"cniVersion,omitempty"` + Interfaces []*Interface `json:"interfaces,omitempty"` + IPs []*IPConfig `json:"ips,omitempty"` + Routes []*types.Route `json:"routes,omitempty"` + DNS types.DNS `json:"dns,omitempty"` +} + +func convertFrom02x(from types.Result, toVersion string) (types.Result, error) { + result040, err := convert.Convert(from, "0.4.0") + if err != nil { + return nil, err + } + result100, err := convertFrom04x(result040, ImplementedSpecVersion) + if err != nil { + return nil, err + } + return result100, nil +} + +func convertIPConfigFrom040(from *types040.IPConfig) *IPConfig { + to := &IPConfig{ + Address: from.Address, + Gateway: from.Gateway, + } + if from.Interface != nil { + intf := *from.Interface + to.Interface = &intf + } + return to +} + +func convertInterfaceFrom040(from *types040.Interface) *Interface { + return &Interface{ + Name: from.Name, + Mac: from.Mac, + Sandbox: from.Sandbox, + } +} + +func convertFrom04x(from types.Result, toVersion string) (types.Result, error) { + fromResult := from.(*types040.Result) + toResult := &Result{ + CNIVersion: toVersion, + DNS: *fromResult.DNS.Copy(), + Routes: []*types.Route{}, + } + for _, fromIntf := range fromResult.Interfaces { + toResult.Interfaces = append(toResult.Interfaces, convertInterfaceFrom040(fromIntf)) + } + for _, fromIPC := range fromResult.IPs { + toResult.IPs = append(toResult.IPs, convertIPConfigFrom040(fromIPC)) + } + for _, fromRoute := range fromResult.Routes { + toResult.Routes = append(toResult.Routes, fromRoute.Copy()) + } + return toResult, nil +} + +func convertIPConfigTo040(from *IPConfig) *types040.IPConfig { + version := "6" + if from.Address.IP.To4() != nil { + version = "4" + } + to := &types040.IPConfig{ + Version: version, + Address: from.Address, + Gateway: from.Gateway, + } + if from.Interface != nil { + intf := *from.Interface + to.Interface = &intf + } + return to +} + +func convertInterfaceTo040(from *Interface) *types040.Interface { + return &types040.Interface{ + Name: from.Name, + Mac: from.Mac, + Sandbox: from.Sandbox, + } +} + +func convertTo04x(from types.Result, toVersion string) (types.Result, error) { + fromResult := from.(*Result) + toResult := &types040.Result{ + CNIVersion: toVersion, + DNS: *fromResult.DNS.Copy(), + Routes: []*types.Route{}, + } + for _, fromIntf := range fromResult.Interfaces { + toResult.Interfaces = append(toResult.Interfaces, convertInterfaceTo040(fromIntf)) + } + for _, fromIPC := range fromResult.IPs { + toResult.IPs = append(toResult.IPs, convertIPConfigTo040(fromIPC)) + } + for _, fromRoute := range fromResult.Routes { + toResult.Routes = append(toResult.Routes, fromRoute.Copy()) + } + return toResult, nil +} + +func convertTo02x(from types.Result, toVersion string) (types.Result, error) { + // First convert to 0.4.0 + result040, err := convertTo04x(from, "0.4.0") + if err != nil { + return nil, err + } + result02x, err := convert.Convert(result040, toVersion) + if err != nil { + return nil, err + } + return result02x, nil +} + +func (r *Result) Version() string { + return r.CNIVersion +} + +func (r *Result) GetAsVersion(version string) (types.Result, error) { + // If the creator of the result did not set the CNIVersion, assume it + // should be the highest spec version implemented by this Result + if r.CNIVersion == "" { + r.CNIVersion = ImplementedSpecVersion + } + return convert.Convert(r, version) +} + +func (r *Result) Print() error { + return r.PrintTo(os.Stdout) +} + +func (r *Result) PrintTo(writer io.Writer) error { + data, err := json.MarshalIndent(r, "", " ") + if err != nil { + return err + } + _, err = writer.Write(data) + return err +} + +// Interface contains values about the created interfaces +type Interface struct { + Name string `json:"name"` + Mac string `json:"mac,omitempty"` + Sandbox string `json:"sandbox,omitempty"` +} + +func (i *Interface) String() string { + return fmt.Sprintf("%+v", *i) +} + +func (i *Interface) Copy() *Interface { + if i == nil { + return nil + } + newIntf := *i + return &newIntf +} + +// Int returns a pointer to the int value passed in. Used to +// set the IPConfig.Interface field. +func Int(v int) *int { + return &v +} + +// IPConfig contains values necessary to configure an IP address on an interface +type IPConfig struct { + // Index into Result structs Interfaces list + Interface *int + Address net.IPNet + Gateway net.IP +} + +func (i *IPConfig) String() string { + return fmt.Sprintf("%+v", *i) +} + +func (i *IPConfig) Copy() *IPConfig { + if i == nil { + return nil + } + + ipc := &IPConfig{ + Address: i.Address, + Gateway: i.Gateway, + } + if i.Interface != nil { + intf := *i.Interface + ipc.Interface = &intf + } + return ipc +} + +// JSON (un)marshallable types +type ipConfig struct { + Interface *int `json:"interface,omitempty"` + Address types.IPNet `json:"address"` + Gateway net.IP `json:"gateway,omitempty"` +} + +func (c *IPConfig) MarshalJSON() ([]byte, error) { + ipc := ipConfig{ + Interface: c.Interface, + Address: types.IPNet(c.Address), + Gateway: c.Gateway, + } + + return json.Marshal(ipc) +} + +func (c *IPConfig) UnmarshalJSON(data []byte) error { + ipc := ipConfig{} + if err := json.Unmarshal(data, &ipc); err != nil { + return err + } + + c.Interface = ipc.Interface + c.Address = net.IPNet(ipc.Address) + c.Gateway = ipc.Gateway + return nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/args.go b/vendor/github.com/containernetworking/cni/pkg/types/args.go new file mode 100644 index 000000000..7516f03ef --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/types/args.go @@ -0,0 +1,122 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "encoding" + "fmt" + "reflect" + "strings" +) + +// UnmarshallableBool typedef for builtin bool +// because builtin type's methods can't be declared +type UnmarshallableBool bool + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// Returns boolean true if the string is "1" or "[Tt]rue" +// Returns boolean false if the string is "0" or "[Ff]alse" +func (b *UnmarshallableBool) UnmarshalText(data []byte) error { + s := strings.ToLower(string(data)) + switch s { + case "1", "true": + *b = true + case "0", "false": + *b = false + default: + return fmt.Errorf("boolean unmarshal error: invalid input %s", s) + } + return nil +} + +// UnmarshallableString typedef for builtin string +type UnmarshallableString string + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// Returns the string +func (s *UnmarshallableString) UnmarshalText(data []byte) error { + *s = UnmarshallableString(data) + return nil +} + +// CommonArgs contains the IgnoreUnknown argument +// and must be embedded by all Arg structs +type CommonArgs struct { + IgnoreUnknown UnmarshallableBool `json:"ignoreunknown,omitempty"` +} + +// GetKeyField is a helper function to receive Values +// Values that represent a pointer to a struct +func GetKeyField(keyString string, v reflect.Value) reflect.Value { + return v.Elem().FieldByName(keyString) +} + +// UnmarshalableArgsError is used to indicate error unmarshalling args +// from the args-string in the form "K=V;K2=V2;..." +type UnmarshalableArgsError struct { + error +} + +// LoadArgs parses args from a string in the form "K=V;K2=V2;..." +func LoadArgs(args string, container interface{}) error { + if args == "" { + return nil + } + + containerValue := reflect.ValueOf(container) + + pairs := strings.Split(args, ";") + unknownArgs := []string{} + for _, pair := range pairs { + kv := strings.Split(pair, "=") + if len(kv) != 2 { + return fmt.Errorf("ARGS: invalid pair %q", pair) + } + keyString := kv[0] + valueString := kv[1] + keyField := GetKeyField(keyString, containerValue) + if !keyField.IsValid() { + unknownArgs = append(unknownArgs, pair) + continue + } + + var keyFieldInterface interface{} + switch { + case keyField.Kind() == reflect.Ptr: + keyField.Set(reflect.New(keyField.Type().Elem())) + keyFieldInterface = keyField.Interface() + case keyField.CanAddr() && keyField.Addr().CanInterface(): + keyFieldInterface = keyField.Addr().Interface() + default: + return UnmarshalableArgsError{fmt.Errorf("field '%s' has no valid interface", keyString)} + } + u, ok := keyFieldInterface.(encoding.TextUnmarshaler) + if !ok { + return UnmarshalableArgsError{fmt.Errorf( + "ARGS: cannot unmarshal into field '%s' - type '%s' does not implement encoding.TextUnmarshaler", + keyString, reflect.TypeOf(keyFieldInterface))} + } + err := u.UnmarshalText([]byte(valueString)) + if err != nil { + return fmt.Errorf("ARGS: error parsing value of pair %q: %w", pair, err) + } + } + + isIgnoreUnknown := GetKeyField("IgnoreUnknown", containerValue).Bool() + if len(unknownArgs) > 0 && !isIgnoreUnknown { + return fmt.Errorf("ARGS: unknown args %q", unknownArgs) + } + return nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/create/create.go b/vendor/github.com/containernetworking/cni/pkg/types/create/create.go new file mode 100644 index 000000000..ed28b33e8 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/types/create/create.go @@ -0,0 +1,56 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package create + +import ( + "encoding/json" + "fmt" + + "github.com/containernetworking/cni/pkg/types" + convert "github.com/containernetworking/cni/pkg/types/internal" +) + +// DecodeVersion returns the CNI version from CNI configuration or result JSON, +// or an error if the operation could not be performed. +func DecodeVersion(jsonBytes []byte) (string, error) { + var conf struct { + CNIVersion string `json:"cniVersion"` + } + err := json.Unmarshal(jsonBytes, &conf) + if err != nil { + return "", fmt.Errorf("decoding version from network config: %w", err) + } + if conf.CNIVersion == "" { + return "0.1.0", nil + } + return conf.CNIVersion, nil +} + +// Create creates a CNI Result using the given JSON with the expected +// version, or an error if the creation could not be performed +func Create(version string, bytes []byte) (types.Result, error) { + return convert.Create(version, bytes) +} + +// CreateFromBytes creates a CNI Result from the given JSON, automatically +// detecting the CNI spec version of the result. An error is returned if the +// operation could not be performed. +func CreateFromBytes(bytes []byte) (types.Result, error) { + version, err := DecodeVersion(bytes) + if err != nil { + return nil, err + } + return convert.Create(version, bytes) +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/internal/convert.go b/vendor/github.com/containernetworking/cni/pkg/types/internal/convert.go new file mode 100644 index 000000000..bdbe4b0a5 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/types/internal/convert.go @@ -0,0 +1,92 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package convert + +import ( + "fmt" + + "github.com/containernetworking/cni/pkg/types" +) + +// ConvertFn should convert from the given arbitrary Result type into a +// Result implementing CNI specification version passed in toVersion. +// The function is guaranteed to be passed a Result type matching the +// fromVersion it was registered with, and is guaranteed to be +// passed a toVersion matching one of the toVersions it was registered with. +type ConvertFn func(from types.Result, toVersion string) (types.Result, error) + +type converter struct { + // fromVersion is the CNI Result spec version that convertFn accepts + fromVersion string + // toVersions is a list of versions that convertFn can convert to + toVersions []string + convertFn ConvertFn +} + +var converters []*converter + +func findConverter(fromVersion, toVersion string) *converter { + for _, c := range converters { + if c.fromVersion == fromVersion { + for _, v := range c.toVersions { + if v == toVersion { + return c + } + } + } + } + return nil +} + +// Convert converts a CNI Result to the requested CNI specification version, +// or returns an error if the conversion could not be performed or failed +func Convert(from types.Result, toVersion string) (types.Result, error) { + if toVersion == "" { + toVersion = "0.1.0" + } + + fromVersion := from.Version() + + // Shortcut for same version + if fromVersion == toVersion { + return from, nil + } + + // Otherwise find the right converter + c := findConverter(fromVersion, toVersion) + if c == nil { + return nil, fmt.Errorf("no converter for CNI result version %s to %s", + fromVersion, toVersion) + } + return c.convertFn(from, toVersion) +} + +// RegisterConverter registers a CNI Result converter. SHOULD NOT BE CALLED +// EXCEPT FROM CNI ITSELF. +func RegisterConverter(fromVersion string, toVersions []string, convertFn ConvertFn) { + // Make sure there is no converter already registered for these + // from and to versions + for _, v := range toVersions { + if findConverter(fromVersion, v) != nil { + panic(fmt.Sprintf("converter already registered for %s to %s", + fromVersion, v)) + } + } + converters = append(converters, &converter{ + fromVersion: fromVersion, + toVersions: toVersions, + convertFn: convertFn, + }) +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/internal/create.go b/vendor/github.com/containernetworking/cni/pkg/types/internal/create.go new file mode 100644 index 000000000..963630912 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/types/internal/create.go @@ -0,0 +1,66 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package convert + +import ( + "fmt" + + "github.com/containernetworking/cni/pkg/types" +) + +type ResultFactoryFunc func([]byte) (types.Result, error) + +type creator struct { + // CNI Result spec versions that createFn can create a Result for + versions []string + createFn ResultFactoryFunc +} + +var creators []*creator + +func findCreator(version string) *creator { + for _, c := range creators { + for _, v := range c.versions { + if v == version { + return c + } + } + } + return nil +} + +// Create creates a CNI Result using the given JSON, or an error if the creation +// could not be performed +func Create(version string, bytes []byte) (types.Result, error) { + if c := findCreator(version); c != nil { + return c.createFn(bytes) + } + return nil, fmt.Errorf("unsupported CNI result version %q", version) +} + +// RegisterCreator registers a CNI Result creator. SHOULD NOT BE CALLED +// EXCEPT FROM CNI ITSELF. +func RegisterCreator(versions []string, createFn ResultFactoryFunc) { + // Make sure there is no creator already registered for these versions + for _, v := range versions { + if findCreator(v) != nil { + panic(fmt.Sprintf("creator already registered for %s", v)) + } + } + creators = append(creators, &creator{ + versions: versions, + createFn: createFn, + }) +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/types.go b/vendor/github.com/containernetworking/cni/pkg/types/types.go new file mode 100644 index 000000000..fba17dfc0 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/types/types.go @@ -0,0 +1,234 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "encoding/json" + "fmt" + "io" + "net" + "os" +) + +// like net.IPNet but adds JSON marshalling and unmarshalling +type IPNet net.IPNet + +// ParseCIDR takes a string like "10.2.3.1/24" and +// return IPNet with "10.2.3.1" and /24 mask +func ParseCIDR(s string) (*net.IPNet, error) { + ip, ipn, err := net.ParseCIDR(s) + if err != nil { + return nil, err + } + + ipn.IP = ip + return ipn, nil +} + +func (n IPNet) MarshalJSON() ([]byte, error) { + return json.Marshal((*net.IPNet)(&n).String()) +} + +func (n *IPNet) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + + tmp, err := ParseCIDR(s) + if err != nil { + return err + } + + *n = IPNet(*tmp) + return nil +} + +// NetConf describes a network. +type NetConf struct { + CNIVersion string `json:"cniVersion,omitempty"` + + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` + Capabilities map[string]bool `json:"capabilities,omitempty"` + IPAM IPAM `json:"ipam,omitempty"` + DNS DNS `json:"dns"` + + RawPrevResult map[string]interface{} `json:"prevResult,omitempty"` + PrevResult Result `json:"-"` +} + +type IPAM struct { + Type string `json:"type,omitempty"` +} + +// NetConfList describes an ordered list of networks. +type NetConfList struct { + CNIVersion string `json:"cniVersion,omitempty"` + + Name string `json:"name,omitempty"` + DisableCheck bool `json:"disableCheck,omitempty"` + Plugins []*NetConf `json:"plugins,omitempty"` +} + +// Result is an interface that provides the result of plugin execution +type Result interface { + // The highest CNI specification result version the result supports + // without having to convert + Version() string + + // Returns the result converted into the requested CNI specification + // result version, or an error if conversion failed + GetAsVersion(version string) (Result, error) + + // Prints the result in JSON format to stdout + Print() error + + // Prints the result in JSON format to provided writer + PrintTo(writer io.Writer) error +} + +func PrintResult(result Result, version string) error { + newResult, err := result.GetAsVersion(version) + if err != nil { + return err + } + return newResult.Print() +} + +// DNS contains values interesting for DNS resolvers +type DNS struct { + Nameservers []string `json:"nameservers,omitempty"` + Domain string `json:"domain,omitempty"` + Search []string `json:"search,omitempty"` + Options []string `json:"options,omitempty"` +} + +func (d *DNS) Copy() *DNS { + if d == nil { + return nil + } + + to := &DNS{Domain: d.Domain} + for _, ns := range d.Nameservers { + to.Nameservers = append(to.Nameservers, ns) + } + for _, s := range d.Search { + to.Search = append(to.Search, s) + } + for _, o := range d.Options { + to.Options = append(to.Options, o) + } + return to +} + +type Route struct { + Dst net.IPNet + GW net.IP +} + +func (r *Route) String() string { + return fmt.Sprintf("%+v", *r) +} + +func (r *Route) Copy() *Route { + if r == nil { + return nil + } + + return &Route{ + Dst: r.Dst, + GW: r.GW, + } +} + +// Well known error codes +// see https://github.com/containernetworking/cni/blob/master/SPEC.md#well-known-error-codes +const ( + ErrUnknown uint = iota // 0 + ErrIncompatibleCNIVersion // 1 + ErrUnsupportedField // 2 + ErrUnknownContainer // 3 + ErrInvalidEnvironmentVariables // 4 + ErrIOFailure // 5 + ErrDecodingFailure // 6 + ErrInvalidNetworkConfig // 7 + ErrTryAgainLater uint = 11 + ErrInternal uint = 999 +) + +type Error struct { + Code uint `json:"code"` + Msg string `json:"msg"` + Details string `json:"details,omitempty"` +} + +func NewError(code uint, msg, details string) *Error { + return &Error{ + Code: code, + Msg: msg, + Details: details, + } +} + +func (e *Error) Error() string { + details := "" + if e.Details != "" { + details = fmt.Sprintf("; %v", e.Details) + } + return fmt.Sprintf("%v%v", e.Msg, details) +} + +func (e *Error) Print() error { + return prettyPrint(e) +} + +// net.IPNet is not JSON (un)marshallable so this duality is needed +// for our custom IPNet type + +// JSON (un)marshallable types +type route struct { + Dst IPNet `json:"dst"` + GW net.IP `json:"gw,omitempty"` +} + +func (r *Route) UnmarshalJSON(data []byte) error { + rt := route{} + if err := json.Unmarshal(data, &rt); err != nil { + return err + } + + r.Dst = net.IPNet(rt.Dst) + r.GW = rt.GW + return nil +} + +func (r Route) MarshalJSON() ([]byte, error) { + rt := route{ + Dst: IPNet(r.Dst), + GW: r.GW, + } + + return json.Marshal(rt) +} + +func prettyPrint(obj interface{}) error { + data, err := json.MarshalIndent(obj, "", " ") + if err != nil { + return err + } + _, err = os.Stdout.Write(data) + return err +} diff --git a/vendor/github.com/containernetworking/cni/pkg/utils/utils.go b/vendor/github.com/containernetworking/cni/pkg/utils/utils.go new file mode 100644 index 000000000..b8ec38874 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/utils/utils.go @@ -0,0 +1,84 @@ +// Copyright 2019 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "bytes" + "fmt" + "regexp" + "unicode" + + "github.com/containernetworking/cni/pkg/types" +) + +const ( + // cniValidNameChars is the regexp used to validate valid characters in + // containerID and networkName + cniValidNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.\-]` + + // maxInterfaceNameLength is the length max of a valid interface name + maxInterfaceNameLength = 15 +) + +var cniReg = regexp.MustCompile(`^` + cniValidNameChars + `*$`) + +// ValidateContainerID will validate that the supplied containerID is not empty does not contain invalid characters +func ValidateContainerID(containerID string) *types.Error { + + if containerID == "" { + return types.NewError(types.ErrUnknownContainer, "missing containerID", "") + } + if !cniReg.MatchString(containerID) { + return types.NewError(types.ErrInvalidEnvironmentVariables, "invalid characters in containerID", containerID) + } + return nil +} + +// ValidateNetworkName will validate that the supplied networkName does not contain invalid characters +func ValidateNetworkName(networkName string) *types.Error { + + if networkName == "" { + return types.NewError(types.ErrInvalidNetworkConfig, "missing network name:", "") + } + if !cniReg.MatchString(networkName) { + return types.NewError(types.ErrInvalidNetworkConfig, "invalid characters found in network name", networkName) + } + return nil +} + +// ValidateInterfaceName will validate the interface name based on the three rules below +// 1. The name must not be empty +// 2. The name must be less than 16 characters +// 3. The name must not be "." or ".." +// 3. The name must not contain / or : or any whitespace characters +// ref to https://github.com/torvalds/linux/blob/master/net/core/dev.c#L1024 +func ValidateInterfaceName(ifName string) *types.Error { + if len(ifName) == 0 { + return types.NewError(types.ErrInvalidEnvironmentVariables, "interface name is empty", "") + } + if len(ifName) > maxInterfaceNameLength { + return types.NewError(types.ErrInvalidEnvironmentVariables, "interface name is too long", fmt.Sprintf("interface name should be less than %d characters", maxInterfaceNameLength+1)) + } + if ifName == "." || ifName == ".." { + return types.NewError(types.ErrInvalidEnvironmentVariables, "interface name is . or ..", "") + } + for _, r := range bytes.Runes([]byte(ifName)) { + if r == '/' || r == ':' || unicode.IsSpace(r) { + return types.NewError(types.ErrInvalidEnvironmentVariables, "interface name contains / or : or whitespace characters", "") + } + } + + return nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/version/conf.go b/vendor/github.com/containernetworking/cni/pkg/version/conf.go new file mode 100644 index 000000000..808c33b83 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/version/conf.go @@ -0,0 +1,26 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import ( + "github.com/containernetworking/cni/pkg/types/create" +) + +// ConfigDecoder can decode the CNI version available in network config data +type ConfigDecoder struct{} + +func (*ConfigDecoder) Decode(jsonBytes []byte) (string, error) { + return create.DecodeVersion(jsonBytes) +} diff --git a/vendor/github.com/containernetworking/cni/pkg/version/plugin.go b/vendor/github.com/containernetworking/cni/pkg/version/plugin.go new file mode 100644 index 000000000..17b22b6b0 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/version/plugin.go @@ -0,0 +1,144 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import ( + "encoding/json" + "fmt" + "io" + "strconv" + "strings" +) + +// PluginInfo reports information about CNI versioning +type PluginInfo interface { + // SupportedVersions returns one or more CNI spec versions that the plugin + // supports. If input is provided in one of these versions, then the plugin + // promises to use the same CNI version in its response + SupportedVersions() []string + + // Encode writes this CNI version information as JSON to the given Writer + Encode(io.Writer) error +} + +type pluginInfo struct { + CNIVersion_ string `json:"cniVersion"` + SupportedVersions_ []string `json:"supportedVersions,omitempty"` +} + +// pluginInfo implements the PluginInfo interface +var _ PluginInfo = &pluginInfo{} + +func (p *pluginInfo) Encode(w io.Writer) error { + return json.NewEncoder(w).Encode(p) +} + +func (p *pluginInfo) SupportedVersions() []string { + return p.SupportedVersions_ +} + +// PluginSupports returns a new PluginInfo that will report the given versions +// as supported +func PluginSupports(supportedVersions ...string) PluginInfo { + if len(supportedVersions) < 1 { + panic("programmer error: you must support at least one version") + } + return &pluginInfo{ + CNIVersion_: Current(), + SupportedVersions_: supportedVersions, + } +} + +// PluginDecoder can decode the response returned by a plugin's VERSION command +type PluginDecoder struct{} + +func (*PluginDecoder) Decode(jsonBytes []byte) (PluginInfo, error) { + var info pluginInfo + err := json.Unmarshal(jsonBytes, &info) + if err != nil { + return nil, fmt.Errorf("decoding version info: %w", err) + } + if info.CNIVersion_ == "" { + return nil, fmt.Errorf("decoding version info: missing field cniVersion") + } + if len(info.SupportedVersions_) == 0 { + if info.CNIVersion_ == "0.2.0" { + return PluginSupports("0.1.0", "0.2.0"), nil + } + return nil, fmt.Errorf("decoding version info: missing field supportedVersions") + } + return &info, nil +} + +// ParseVersion parses a version string like "3.0.1" or "0.4.5" into major, +// minor, and micro numbers or returns an error +func ParseVersion(version string) (int, int, int, error) { + var major, minor, micro int + if version == "" { // special case: no version declared == v0.1.0 + return 0, 1, 0, nil + } + + parts := strings.Split(version, ".") + if len(parts) >= 4 { + return -1, -1, -1, fmt.Errorf("invalid version %q: too many parts", version) + } + + major, err := strconv.Atoi(parts[0]) + if err != nil { + return -1, -1, -1, fmt.Errorf("failed to convert major version part %q: %w", parts[0], err) + } + + if len(parts) >= 2 { + minor, err = strconv.Atoi(parts[1]) + if err != nil { + return -1, -1, -1, fmt.Errorf("failed to convert minor version part %q: %w", parts[1], err) + } + } + + if len(parts) >= 3 { + micro, err = strconv.Atoi(parts[2]) + if err != nil { + return -1, -1, -1, fmt.Errorf("failed to convert micro version part %q: %w", parts[2], err) + } + } + + return major, minor, micro, nil +} + +// GreaterThanOrEqualTo takes two string versions, parses them into major/minor/micro +// numbers, and compares them to determine whether the first version is greater +// than or equal to the second +func GreaterThanOrEqualTo(version, otherVersion string) (bool, error) { + firstMajor, firstMinor, firstMicro, err := ParseVersion(version) + if err != nil { + return false, err + } + + secondMajor, secondMinor, secondMicro, err := ParseVersion(otherVersion) + if err != nil { + return false, err + } + + if firstMajor > secondMajor { + return true, nil + } else if firstMajor == secondMajor { + if firstMinor > secondMinor { + return true, nil + } else if firstMinor == secondMinor && firstMicro >= secondMicro { + return true, nil + } + } + return false, nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/version/reconcile.go b/vendor/github.com/containernetworking/cni/pkg/version/reconcile.go new file mode 100644 index 000000000..25c3810b2 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/version/reconcile.go @@ -0,0 +1,49 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import "fmt" + +type ErrorIncompatible struct { + Config string + Supported []string +} + +func (e *ErrorIncompatible) Details() string { + return fmt.Sprintf("config is %q, plugin supports %q", e.Config, e.Supported) +} + +func (e *ErrorIncompatible) Error() string { + return fmt.Sprintf("incompatible CNI versions: %s", e.Details()) +} + +type Reconciler struct{} + +func (r *Reconciler) Check(configVersion string, pluginInfo PluginInfo) *ErrorIncompatible { + return r.CheckRaw(configVersion, pluginInfo.SupportedVersions()) +} + +func (*Reconciler) CheckRaw(configVersion string, supportedVersions []string) *ErrorIncompatible { + for _, supportedVersion := range supportedVersions { + if configVersion == supportedVersion { + return nil + } + } + + return &ErrorIncompatible{ + Config: configVersion, + Supported: supportedVersions, + } +} diff --git a/vendor/github.com/containernetworking/cni/pkg/version/version.go b/vendor/github.com/containernetworking/cni/pkg/version/version.go new file mode 100644 index 000000000..1326f8038 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/version/version.go @@ -0,0 +1,89 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import ( + "encoding/json" + "fmt" + + "github.com/containernetworking/cni/pkg/types" + types100 "github.com/containernetworking/cni/pkg/types/100" + "github.com/containernetworking/cni/pkg/types/create" +) + +// Current reports the version of the CNI spec implemented by this library +func Current() string { + return types100.ImplementedSpecVersion +} + +// Legacy PluginInfo describes a plugin that is backwards compatible with the +// CNI spec version 0.1.0. In particular, a runtime compiled against the 0.1.0 +// library ought to work correctly with a plugin that reports support for +// Legacy versions. +// +// Any future CNI spec versions which meet this definition should be added to +// this list. +var Legacy = PluginSupports("0.1.0", "0.2.0") +var All = PluginSupports("0.1.0", "0.2.0", "0.3.0", "0.3.1", "0.4.0", "1.0.0") + +// VersionsFrom returns a list of versions starting from min, inclusive +func VersionsStartingFrom(min string) PluginInfo { + out := []string{} + // cheat, just assume ordered + ok := false + for _, v := range All.SupportedVersions() { + if !ok && v == min { + ok = true + } + if ok { + out = append(out, v) + } + } + return PluginSupports(out...) +} + +// Finds a Result object matching the requested version (if any) and asks +// that object to parse the plugin result, returning an error if parsing failed. +func NewResult(version string, resultBytes []byte) (types.Result, error) { + return create.Create(version, resultBytes) +} + +// ParsePrevResult parses a prevResult in a NetConf structure and sets +// the NetConf's PrevResult member to the parsed Result object. +func ParsePrevResult(conf *types.NetConf) error { + if conf.RawPrevResult == nil { + return nil + } + + // Prior to 1.0.0, Result types may not marshal a CNIVersion. Since the + // result version must match the config version, if the Result's version + // is empty, inject the config version. + if ver, ok := conf.RawPrevResult["CNIVersion"]; !ok || ver == "" { + conf.RawPrevResult["CNIVersion"] = conf.CNIVersion + } + + resultBytes, err := json.Marshal(conf.RawPrevResult) + if err != nil { + return fmt.Errorf("could not serialize prevResult: %w", err) + } + + conf.RawPrevResult = nil + conf.PrevResult, err = create.Create(conf.CNIVersion, resultBytes) + if err != nil { + return fmt.Errorf("could not parse prevResult: %w", err) + } + + return nil +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/LICENSE b/vendor/github.com/containernetworking/plugins/LICENSE similarity index 100% rename from vendor/sigs.k8s.io/structured-merge-diff/v4/LICENSE rename to vendor/github.com/containernetworking/plugins/LICENSE diff --git a/vendor/github.com/containernetworking/plugins/pkg/ip/addr_linux.go b/vendor/github.com/containernetworking/plugins/pkg/ip/addr_linux.go new file mode 100644 index 000000000..b4db50b9a --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/pkg/ip/addr_linux.go @@ -0,0 +1,68 @@ +// Copyright 2017 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ip + +import ( + "fmt" + "syscall" + "time" + + "github.com/vishvananda/netlink" +) + +const SETTLE_INTERVAL = 50 * time.Millisecond + +// SettleAddresses waits for all addresses on a link to leave tentative state. +// This is particularly useful for ipv6, where all addresses need to do DAD. +// There is no easy way to wait for this as an event, so just loop until the +// addresses are no longer tentative. +// If any addresses are still tentative after timeout seconds, then error. +func SettleAddresses(ifName string, timeout int) error { + link, err := netlink.LinkByName(ifName) + if err != nil { + return fmt.Errorf("failed to retrieve link: %v", err) + } + + deadline := time.Now().Add(time.Duration(timeout) * time.Second) + for { + addrs, err := netlink.AddrList(link, netlink.FAMILY_ALL) + if err != nil { + return fmt.Errorf("could not list addresses: %v", err) + } + + if len(addrs) == 0 { + return nil + } + + ok := true + for _, addr := range addrs { + if addr.Flags&(syscall.IFA_F_TENTATIVE|syscall.IFA_F_DADFAILED) > 0 { + ok = false + break // Break out of the `range addrs`, not the `for` + } + } + + if ok { + return nil + } + if time.Now().After(deadline) { + return fmt.Errorf("link %s still has tentative addresses after %d seconds", + ifName, + timeout) + } + + time.Sleep(SETTLE_INTERVAL) + } +} diff --git a/vendor/github.com/containernetworking/plugins/pkg/ip/cidr.go b/vendor/github.com/containernetworking/plugins/pkg/ip/cidr.go new file mode 100644 index 000000000..8b380fc74 --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/pkg/ip/cidr.go @@ -0,0 +1,105 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ip + +import ( + "math/big" + "net" +) + +// NextIP returns IP incremented by 1, if IP is invalid, return nil +func NextIP(ip net.IP) net.IP { + normalizedIP := normalizeIP(ip) + if normalizedIP == nil { + return nil + } + + i := ipToInt(normalizedIP) + return intToIP(i.Add(i, big.NewInt(1)), len(normalizedIP) == net.IPv6len) +} + +// PrevIP returns IP decremented by 1, if IP is invalid, return nil +func PrevIP(ip net.IP) net.IP { + normalizedIP := normalizeIP(ip) + if normalizedIP == nil { + return nil + } + + i := ipToInt(normalizedIP) + return intToIP(i.Sub(i, big.NewInt(1)), len(normalizedIP) == net.IPv6len) +} + +// Cmp compares two IPs, returning the usual ordering: +// a < b : -1 +// a == b : 0 +// a > b : 1 +// incomparable : -2 +func Cmp(a, b net.IP) int { + normalizedA := normalizeIP(a) + normalizedB := normalizeIP(b) + + if len(normalizedA) == len(normalizedB) && len(normalizedA) != 0 { + return ipToInt(normalizedA).Cmp(ipToInt(normalizedB)) + } + + return -2 +} + +func ipToInt(ip net.IP) *big.Int { + return big.NewInt(0).SetBytes(ip) +} + +func intToIP(i *big.Int, isIPv6 bool) net.IP { + intBytes := i.Bytes() + + if len(intBytes) == net.IPv4len || len(intBytes) == net.IPv6len { + return intBytes + } + + if isIPv6 { + return append(make([]byte, net.IPv6len-len(intBytes)), intBytes...) + } + + return append(make([]byte, net.IPv4len-len(intBytes)), intBytes...) +} + +// normalizeIP will normalize IP by family, +// IPv4 : 4-byte form +// IPv6 : 16-byte form +// others : nil +func normalizeIP(ip net.IP) net.IP { + if ipTo4 := ip.To4(); ipTo4 != nil { + return ipTo4 + } + return ip.To16() +} + +// Network masks off the host portion of the IP, if IPNet is invalid, +// return nil +func Network(ipn *net.IPNet) *net.IPNet { + if ipn == nil { + return nil + } + + maskedIP := ipn.IP.Mask(ipn.Mask) + if maskedIP == nil { + return nil + } + + return &net.IPNet{ + IP: maskedIP, + Mask: ipn.Mask, + } +} diff --git a/vendor/github.com/containernetworking/plugins/pkg/ip/ip.go b/vendor/github.com/containernetworking/plugins/pkg/ip/ip.go new file mode 100644 index 000000000..4469e1b5d --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/pkg/ip/ip.go @@ -0,0 +1,105 @@ +// Copyright 2021 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ip + +import ( + "fmt" + "net" + "strings" +) + +// IP is a CNI maintained type inherited from net.IPNet which can +// represent a single IP address with or without prefix. +type IP struct { + net.IPNet +} + +// newIP will create an IP with net.IP and net.IPMask +func newIP(ip net.IP, mask net.IPMask) *IP { + return &IP{ + IPNet: net.IPNet{ + IP: ip, + Mask: mask, + }, + } +} + +// ParseIP will parse string s as an IP, and return it. +// The string s must be formed like [/]. +// If s is not a valid textual representation of an IP, +// will return nil. +func ParseIP(s string) *IP { + if strings.ContainsAny(s, "/") { + ip, ipNet, err := net.ParseCIDR(s) + if err != nil { + return nil + } + return newIP(ip, ipNet.Mask) + } else { + ip := net.ParseIP(s) + if ip == nil { + return nil + } + return newIP(ip, nil) + } +} + +// ToIP will return a net.IP in standard form from this IP. +// If this IP can not be converted to a valid net.IP, will return nil. +func (i *IP) ToIP() net.IP { + switch { + case i.IP.To4() != nil: + return i.IP.To4() + case i.IP.To16() != nil: + return i.IP.To16() + default: + return nil + } +} + +// String returns the string form of this IP. +func (i *IP) String() string { + if len(i.Mask) > 0 { + return i.IPNet.String() + } + return i.IP.String() +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The encoding is the same as returned by String, +// But when len(ip) is zero, will return an empty slice. +func (i *IP) MarshalText() ([]byte, error) { + if len(i.IP) == 0 { + return []byte{}, nil + } + return []byte(i.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// The textual bytes are expected in a form accepted by Parse, +// But when len(b) is zero, will return an empty IP. +func (i *IP) UnmarshalText(b []byte) error { + if len(b) == 0 { + *i = IP{} + return nil + } + + ip := ParseIP(string(b)) + if ip == nil { + return fmt.Errorf("invalid IP address %s", string(b)) + } + *i = *ip + return nil +} diff --git a/vendor/github.com/containernetworking/plugins/pkg/ip/ipforward_linux.go b/vendor/github.com/containernetworking/plugins/pkg/ip/ipforward_linux.go new file mode 100644 index 000000000..0e8b6b691 --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/pkg/ip/ipforward_linux.go @@ -0,0 +1,62 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ip + +import ( + "bytes" + "os" + + current "github.com/containernetworking/cni/pkg/types/100" +) + +func EnableIP4Forward() error { + return echo1("/proc/sys/net/ipv4/ip_forward") +} + +func EnableIP6Forward() error { + return echo1("/proc/sys/net/ipv6/conf/all/forwarding") +} + +// EnableForward will enable forwarding for all configured +// address families +func EnableForward(ips []*current.IPConfig) error { + v4 := false + v6 := false + + for _, ip := range ips { + isV4 := ip.Address.IP.To4() != nil + if isV4 && !v4 { + if err := EnableIP4Forward(); err != nil { + return err + } + v4 = true + } else if !isV4 && !v6 { + if err := EnableIP6Forward(); err != nil { + return err + } + v6 = true + } + } + return nil +} + +func echo1(f string) error { + if content, err := os.ReadFile(f); err == nil { + if bytes.Equal(bytes.TrimSpace(content), []byte("1")) { + return nil + } + } + return os.WriteFile(f, []byte("1"), 0644) +} diff --git a/vendor/github.com/containernetworking/plugins/pkg/ip/ipmasq_linux.go b/vendor/github.com/containernetworking/plugins/pkg/ip/ipmasq_linux.go new file mode 100644 index 000000000..cc640a605 --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/pkg/ip/ipmasq_linux.go @@ -0,0 +1,126 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ip + +import ( + "fmt" + "net" + + "github.com/coreos/go-iptables/iptables" +) + +// SetupIPMasq installs iptables rules to masquerade traffic +// coming from ip of ipn and going outside of ipn +func SetupIPMasq(ipn *net.IPNet, chain string, comment string) error { + isV6 := ipn.IP.To4() == nil + + var ipt *iptables.IPTables + var err error + var multicastNet string + + if isV6 { + ipt, err = iptables.NewWithProtocol(iptables.ProtocolIPv6) + multicastNet = "ff00::/8" + } else { + ipt, err = iptables.NewWithProtocol(iptables.ProtocolIPv4) + multicastNet = "224.0.0.0/4" + } + if err != nil { + return fmt.Errorf("failed to locate iptables: %v", err) + } + + // Create chain if doesn't exist + exists := false + chains, err := ipt.ListChains("nat") + if err != nil { + return fmt.Errorf("failed to list chains: %v", err) + } + for _, ch := range chains { + if ch == chain { + exists = true + break + } + } + if !exists { + if err = ipt.NewChain("nat", chain); err != nil { + return err + } + } + + // Packets to this network should not be touched + if err := ipt.AppendUnique("nat", chain, "-d", ipn.String(), "-j", "ACCEPT", "-m", "comment", "--comment", comment); err != nil { + return err + } + + // Don't masquerade multicast - pods should be able to talk to other pods + // on the local network via multicast. + if err := ipt.AppendUnique("nat", chain, "!", "-d", multicastNet, "-j", "MASQUERADE", "-m", "comment", "--comment", comment); err != nil { + return err + } + + // Packets from the specific IP of this network will hit the chain + return ipt.AppendUnique("nat", "POSTROUTING", "-s", ipn.IP.String(), "-j", chain, "-m", "comment", "--comment", comment) +} + +// TeardownIPMasq undoes the effects of SetupIPMasq +func TeardownIPMasq(ipn *net.IPNet, chain string, comment string) error { + isV6 := ipn.IP.To4() == nil + + var ipt *iptables.IPTables + var err error + + if isV6 { + ipt, err = iptables.NewWithProtocol(iptables.ProtocolIPv6) + } else { + ipt, err = iptables.NewWithProtocol(iptables.ProtocolIPv4) + } + if err != nil { + return fmt.Errorf("failed to locate iptables: %v", err) + } + + err = ipt.Delete("nat", "POSTROUTING", "-s", ipn.IP.String(), "-j", chain, "-m", "comment", "--comment", comment) + if err != nil && !isNotExist(err) { + return err + } + + // for downward compatibility + err = ipt.Delete("nat", "POSTROUTING", "-s", ipn.String(), "-j", chain, "-m", "comment", "--comment", comment) + if err != nil && !isNotExist(err) { + return err + } + + err = ipt.ClearChain("nat", chain) + if err != nil && !isNotExist(err) { + return err + + } + + err = ipt.DeleteChain("nat", chain) + if err != nil && !isNotExist(err) { + return err + } + + return nil +} + +// isNotExist returnst true if the error is from iptables indicating +// that the target does not exist. +func isNotExist(err error) bool { + e, ok := err.(*iptables.Error) + if !ok { + return false + } + return e.IsNotExist() +} diff --git a/vendor/github.com/containernetworking/plugins/pkg/ip/link_linux.go b/vendor/github.com/containernetworking/plugins/pkg/ip/link_linux.go new file mode 100644 index 000000000..91f931b57 --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/pkg/ip/link_linux.go @@ -0,0 +1,261 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ip + +import ( + "crypto/rand" + "errors" + "fmt" + "net" + "os" + + "github.com/safchain/ethtool" + "github.com/vishvananda/netlink" + + "github.com/containernetworking/plugins/pkg/ns" + "github.com/containernetworking/plugins/pkg/utils/sysctl" +) + +var ( + ErrLinkNotFound = errors.New("link not found") +) + +// makeVethPair is called from within the container's network namespace +func makeVethPair(name, peer string, mtu int, mac string, hostNS ns.NetNS) (netlink.Link, error) { + veth := &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{ + Name: name, + MTU: mtu, + }, + PeerName: peer, + PeerNamespace: netlink.NsFd(int(hostNS.Fd())), + } + if mac != "" { + m, err := net.ParseMAC(mac) + if err != nil { + return nil, err + } + veth.LinkAttrs.HardwareAddr = m + } + if err := netlink.LinkAdd(veth); err != nil { + return nil, err + } + // Re-fetch the container link to get its creation-time parameters, e.g. index and mac + veth2, err := netlink.LinkByName(name) + if err != nil { + netlink.LinkDel(veth) // try and clean up the link if possible. + return nil, err + } + + return veth2, nil +} + +func peerExists(name string) bool { + if _, err := netlink.LinkByName(name); err != nil { + return false + } + return true +} + +func makeVeth(name, vethPeerName string, mtu int, mac string, hostNS ns.NetNS) (peerName string, veth netlink.Link, err error) { + for i := 0; i < 10; i++ { + if vethPeerName != "" { + peerName = vethPeerName + } else { + peerName, err = RandomVethName() + if err != nil { + return + } + } + + veth, err = makeVethPair(name, peerName, mtu, mac, hostNS) + switch { + case err == nil: + return + + case os.IsExist(err): + if peerExists(peerName) && vethPeerName == "" { + continue + } + err = fmt.Errorf("container veth name provided (%v) already exists", name) + return + + default: + err = fmt.Errorf("failed to make veth pair: %v", err) + return + } + } + + // should really never be hit + err = fmt.Errorf("failed to find a unique veth name") + return +} + +// RandomVethName returns string "veth" with random prefix (hashed from entropy) +func RandomVethName() (string, error) { + entropy := make([]byte, 4) + _, err := rand.Read(entropy) + if err != nil { + return "", fmt.Errorf("failed to generate random veth name: %v", err) + } + + // NetworkManager (recent versions) will ignore veth devices that start with "veth" + return fmt.Sprintf("veth%x", entropy), nil +} + +func RenameLink(curName, newName string) error { + link, err := netlink.LinkByName(curName) + if err == nil { + err = netlink.LinkSetName(link, newName) + } + return err +} + +func ifaceFromNetlinkLink(l netlink.Link) net.Interface { + a := l.Attrs() + return net.Interface{ + Index: a.Index, + MTU: a.MTU, + Name: a.Name, + HardwareAddr: a.HardwareAddr, + Flags: a.Flags, + } +} + +// SetupVethWithName sets up a pair of virtual ethernet devices. +// Call SetupVethWithName from inside the container netns. It will create both veth +// devices and move the host-side veth into the provided hostNS namespace. +// hostVethName: If hostVethName is not specified, the host-side veth name will use a random string. +// On success, SetupVethWithName returns (hostVeth, containerVeth, nil) +func SetupVethWithName(contVethName, hostVethName string, mtu int, contVethMac string, hostNS ns.NetNS) (net.Interface, net.Interface, error) { + hostVethName, contVeth, err := makeVeth(contVethName, hostVethName, mtu, contVethMac, hostNS) + if err != nil { + return net.Interface{}, net.Interface{}, err + } + + var hostVeth netlink.Link + err = hostNS.Do(func(_ ns.NetNS) error { + hostVeth, err = netlink.LinkByName(hostVethName) + if err != nil { + return fmt.Errorf("failed to lookup %q in %q: %v", hostVethName, hostNS.Path(), err) + } + + if err = netlink.LinkSetUp(hostVeth); err != nil { + return fmt.Errorf("failed to set %q up: %v", hostVethName, err) + } + + // we want to own the routes for this interface + _, _ = sysctl.Sysctl(fmt.Sprintf("net/ipv6/conf/%s/accept_ra", hostVethName), "0") + return nil + }) + if err != nil { + return net.Interface{}, net.Interface{}, err + } + return ifaceFromNetlinkLink(hostVeth), ifaceFromNetlinkLink(contVeth), nil +} + +// SetupVeth sets up a pair of virtual ethernet devices. +// Call SetupVeth from inside the container netns. It will create both veth +// devices and move the host-side veth into the provided hostNS namespace. +// On success, SetupVeth returns (hostVeth, containerVeth, nil) +func SetupVeth(contVethName string, mtu int, contVethMac string, hostNS ns.NetNS) (net.Interface, net.Interface, error) { + return SetupVethWithName(contVethName, "", mtu, contVethMac, hostNS) +} + +// DelLinkByName removes an interface link. +func DelLinkByName(ifName string) error { + iface, err := netlink.LinkByName(ifName) + if err != nil { + if _, ok := err.(netlink.LinkNotFoundError); ok { + return ErrLinkNotFound + } + return fmt.Errorf("failed to lookup %q: %v", ifName, err) + } + + if err = netlink.LinkDel(iface); err != nil { + return fmt.Errorf("failed to delete %q: %v", ifName, err) + } + + return nil +} + +// DelLinkByNameAddr remove an interface and returns its addresses +func DelLinkByNameAddr(ifName string) ([]*net.IPNet, error) { + iface, err := netlink.LinkByName(ifName) + if err != nil { + if _, ok := err.(netlink.LinkNotFoundError); ok { + return nil, ErrLinkNotFound + } + return nil, fmt.Errorf("failed to lookup %q: %v", ifName, err) + } + + addrs, err := netlink.AddrList(iface, netlink.FAMILY_ALL) + if err != nil { + return nil, fmt.Errorf("failed to get IP addresses for %q: %v", ifName, err) + } + + if err = netlink.LinkDel(iface); err != nil { + return nil, fmt.Errorf("failed to delete %q: %v", ifName, err) + } + + out := []*net.IPNet{} + for _, addr := range addrs { + if addr.IP.IsGlobalUnicast() { + out = append(out, addr.IPNet) + } + } + + return out, nil +} + +// GetVethPeerIfindex returns the veth link object, the peer ifindex of the +// veth, or an error. This peer ifindex will only be valid in the peer's +// network namespace. +func GetVethPeerIfindex(ifName string) (netlink.Link, int, error) { + link, err := netlink.LinkByName(ifName) + if err != nil { + return nil, -1, fmt.Errorf("could not look up %q: %v", ifName, err) + } + if _, ok := link.(*netlink.Veth); !ok { + return nil, -1, fmt.Errorf("interface %q was not a veth interface", ifName) + } + + // veth supports IFLA_LINK (what vishvananda/netlink calls ParentIndex) + // on 4.1 and higher kernels + peerIndex := link.Attrs().ParentIndex + if peerIndex <= 0 { + // Fall back to ethtool for 4.0 and earlier kernels + e, err := ethtool.NewEthtool() + if err != nil { + return nil, -1, fmt.Errorf("failed to initialize ethtool: %v", err) + } + defer e.Close() + + stats, err := e.Stats(link.Attrs().Name) + if err != nil { + return nil, -1, fmt.Errorf("failed to request ethtool stats: %v", err) + } + n, ok := stats["peer_ifindex"] + if !ok { + return nil, -1, fmt.Errorf("failed to find 'peer_ifindex' in ethtool stats") + } + if n > 32767 || n == 0 { + return nil, -1, fmt.Errorf("invalid 'peer_ifindex' %d", n) + } + peerIndex = int(n) + } + + return link, peerIndex, nil +} diff --git a/vendor/github.com/containernetworking/plugins/pkg/ip/route_linux.go b/vendor/github.com/containernetworking/plugins/pkg/ip/route_linux.go new file mode 100644 index 000000000..e92b6c53e --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/pkg/ip/route_linux.go @@ -0,0 +1,52 @@ +// Copyright 2015-2017 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ip + +import ( + "net" + + "github.com/vishvananda/netlink" +) + +// AddRoute adds a universally-scoped route to a device. +func AddRoute(ipn *net.IPNet, gw net.IP, dev netlink.Link) error { + return netlink.RouteAdd(&netlink.Route{ + LinkIndex: dev.Attrs().Index, + Scope: netlink.SCOPE_UNIVERSE, + Dst: ipn, + Gw: gw, + }) +} + +// AddHostRoute adds a host-scoped route to a device. +func AddHostRoute(ipn *net.IPNet, gw net.IP, dev netlink.Link) error { + return netlink.RouteAdd(&netlink.Route{ + LinkIndex: dev.Attrs().Index, + Scope: netlink.SCOPE_HOST, + Dst: ipn, + Gw: gw, + }) +} + +// AddDefaultRoute sets the default route on the given gateway. +func AddDefaultRoute(gw net.IP, dev netlink.Link) error { + var defNet *net.IPNet + if gw.To4() != nil { + _, defNet, _ = net.ParseCIDR("0.0.0.0/0") + } else { + _, defNet, _ = net.ParseCIDR("::/0") + } + return AddRoute(defNet, gw, dev) +} diff --git a/vendor/github.com/containernetworking/plugins/pkg/ip/utils_linux.go b/vendor/github.com/containernetworking/plugins/pkg/ip/utils_linux.go new file mode 100644 index 000000000..943117e18 --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/pkg/ip/utils_linux.go @@ -0,0 +1,116 @@ +//go:build linux +// +build linux + +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ip + +import ( + "fmt" + "net" + + "github.com/containernetworking/cni/pkg/types" + current "github.com/containernetworking/cni/pkg/types/100" + "github.com/vishvananda/netlink" +) + +func ValidateExpectedInterfaceIPs(ifName string, resultIPs []*current.IPConfig) error { + + // Ensure ips + for _, ips := range resultIPs { + ourAddr := netlink.Addr{IPNet: &ips.Address} + match := false + + link, err := netlink.LinkByName(ifName) + if err != nil { + return fmt.Errorf("Cannot find container link %v", ifName) + } + + addrList, err := netlink.AddrList(link, netlink.FAMILY_ALL) + if err != nil { + return fmt.Errorf("Cannot obtain List of IP Addresses") + } + + for _, addr := range addrList { + if addr.Equal(ourAddr) { + match = true + break + } + } + if match == false { + return fmt.Errorf("Failed to match addr %v on interface %v", ourAddr, ifName) + } + + // Convert the host/prefixlen to just prefix for route lookup. + _, ourPrefix, err := net.ParseCIDR(ourAddr.String()) + + findGwy := &netlink.Route{Dst: ourPrefix} + routeFilter := netlink.RT_FILTER_DST + + family := netlink.FAMILY_V6 + if ips.Address.IP.To4() != nil { + family = netlink.FAMILY_V4 + } + + gwy, err := netlink.RouteListFiltered(family, findGwy, routeFilter) + if err != nil { + return fmt.Errorf("Error %v trying to find Gateway %v for interface %v", err, ips.Gateway, ifName) + } + if gwy == nil { + return fmt.Errorf("Failed to find Gateway %v for interface %v", ips.Gateway, ifName) + } + } + + return nil +} + +func ValidateExpectedRoute(resultRoutes []*types.Route) error { + + // Ensure that each static route in prevResults is found in the routing table + for _, route := range resultRoutes { + find := &netlink.Route{Dst: &route.Dst, Gw: route.GW} + routeFilter := netlink.RT_FILTER_DST | netlink.RT_FILTER_GW + var family int + + switch { + case route.Dst.IP.To4() != nil: + family = netlink.FAMILY_V4 + // Default route needs Dst set to nil + if route.Dst.String() == "0.0.0.0/0" { + find = &netlink.Route{Dst: nil, Gw: route.GW} + routeFilter = netlink.RT_FILTER_DST + } + case len(route.Dst.IP) == net.IPv6len: + family = netlink.FAMILY_V6 + // Default route needs Dst set to nil + if route.Dst.String() == "::/0" { + find = &netlink.Route{Dst: nil, Gw: route.GW} + routeFilter = netlink.RT_FILTER_DST + } + default: + return fmt.Errorf("Invalid static route found %v", route) + } + + wasFound, err := netlink.RouteListFiltered(family, find, routeFilter) + if err != nil { + return fmt.Errorf("Expected Route %v not route table lookup error %v", route, err) + } + if wasFound == nil { + return fmt.Errorf("Expected Route %v not found in routing table", route) + } + } + + return nil +} diff --git a/vendor/github.com/containernetworking/plugins/pkg/ns/README.md b/vendor/github.com/containernetworking/plugins/pkg/ns/README.md new file mode 100644 index 000000000..1e265c7a0 --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/pkg/ns/README.md @@ -0,0 +1,41 @@ +### Namespaces, Threads, and Go +On Linux each OS thread can have a different network namespace. Go's thread scheduling model switches goroutines between OS threads based on OS thread load and whether the goroutine would block other goroutines. This can result in a goroutine switching network namespaces without notice and lead to errors in your code. + +### Namespace Switching +Switching namespaces with the `ns.Set()` method is not recommended without additional strategies to prevent unexpected namespace changes when your goroutines switch OS threads. + +Go provides the `runtime.LockOSThread()` function to ensure a specific goroutine executes on its current OS thread and prevents any other goroutine from running in that thread until the locked one exits. Careful usage of `LockOSThread()` and goroutines can provide good control over which network namespace a given goroutine executes in. + +For example, you cannot rely on the `ns.Set()` namespace being the current namespace after the `Set()` call unless you do two things. First, the goroutine calling `Set()` must have previously called `LockOSThread()`. Second, you must ensure `runtime.UnlockOSThread()` is not called somewhere in-between. You also cannot rely on the initial network namespace remaining the current network namespace if any other code in your program switches namespaces, unless you have already called `LockOSThread()` in that goroutine. Note that `LockOSThread()` prevents the Go scheduler from optimally scheduling goroutines for best performance, so `LockOSThread()` should only be used in small, isolated goroutines that release the lock quickly. + +### Do() The Recommended Thing +The `ns.Do()` method provides **partial** control over network namespaces for you by implementing these strategies. All code dependent on a particular network namespace (including the root namespace) should be wrapped in the `ns.Do()` method to ensure the correct namespace is selected for the duration of your code. For example: + +```go +err = targetNs.Do(func(hostNs ns.NetNS) error { + dummy := &netlink.Dummy{ + LinkAttrs: netlink.LinkAttrs{ + Name: "dummy0", + }, + } + return netlink.LinkAdd(dummy) +}) +``` + +Note this requirement to wrap every network call is very onerous - any libraries you call might call out to network services such as DNS, and all such calls need to be protected after you call `ns.Do()`. All goroutines spawned from within the `ns.Do` will not inherit the new namespace. The CNI plugins all exit very soon after calling `ns.Do()` which helps to minimize the problem. + +When a new thread is spawned in Linux, it inherits the namespace of its parent. In versions of go **prior to 1.10**, if the runtime spawns a new OS thread, it picks the parent randomly. If the chosen parent thread has been moved to a new namespace (even temporarily), the new OS thread will be permanently "stuck in the wrong namespace", and goroutines will non-deterministically switch namespaces as they are rescheduled. + +In short, **there was no safe way to change network namespaces, even temporarily, from within a long-lived, multithreaded Go process**. If you wish to do this, you must use go 1.10 or greater. + + +### Creating network namespaces +Earlier versions of this library managed namespace creation, but as CNI does not actually utilize this feature (and it was essentially unmaintained), it was removed. If you're writing a container runtime, you should implement namespace management yourself. However, there are some gotchas when doing so, especially around handling `/var/run/netns`. A reasonably correct reference implementation, borrowed from `rkt`, can be found in `pkg/testutils/netns_linux.go` if you're in need of a source of inspiration. + + +### Further Reading + - https://github.com/golang/go/wiki/LockOSThread + - http://morsmachine.dk/go-scheduler + - https://github.com/containernetworking/cni/issues/262 + - https://golang.org/pkg/runtime/ + - https://www.weave.works/blog/linux-namespaces-and-go-don-t-mix diff --git a/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go b/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go new file mode 100644 index 000000000..f260f2813 --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go @@ -0,0 +1,234 @@ +// Copyright 2015-2017 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ns + +import ( + "fmt" + "os" + "runtime" + "sync" + "syscall" + + "golang.org/x/sys/unix" +) + +// Returns an object representing the current OS thread's network namespace +func GetCurrentNS() (NetNS, error) { + // Lock the thread in case other goroutine executes in it and changes its + // network namespace after getCurrentThreadNetNSPath(), otherwise it might + // return an unexpected network namespace. + runtime.LockOSThread() + defer runtime.UnlockOSThread() + return GetNS(getCurrentThreadNetNSPath()) +} + +func getCurrentThreadNetNSPath() string { + // /proc/self/ns/net returns the namespace of the main thread, not + // of whatever thread this goroutine is running on. Make sure we + // use the thread's net namespace since the thread is switching around + return fmt.Sprintf("/proc/%d/task/%d/ns/net", os.Getpid(), unix.Gettid()) +} + +func (ns *netNS) Close() error { + if err := ns.errorIfClosed(); err != nil { + return err + } + + if err := ns.file.Close(); err != nil { + return fmt.Errorf("Failed to close %q: %v", ns.file.Name(), err) + } + ns.closed = true + + return nil +} + +func (ns *netNS) Set() error { + if err := ns.errorIfClosed(); err != nil { + return err + } + + if err := unix.Setns(int(ns.Fd()), unix.CLONE_NEWNET); err != nil { + return fmt.Errorf("Error switching to ns %v: %v", ns.file.Name(), err) + } + + return nil +} + +type NetNS interface { + // Executes the passed closure in this object's network namespace, + // attempting to restore the original namespace before returning. + // However, since each OS thread can have a different network namespace, + // and Go's thread scheduling is highly variable, callers cannot + // guarantee any specific namespace is set unless operations that + // require that namespace are wrapped with Do(). Also, no code called + // from Do() should call runtime.UnlockOSThread(), or the risk + // of executing code in an incorrect namespace will be greater. See + // https://github.com/golang/go/wiki/LockOSThread for further details. + Do(toRun func(NetNS) error) error + + // Sets the current network namespace to this object's network namespace. + // Note that since Go's thread scheduling is highly variable, callers + // cannot guarantee the requested namespace will be the current namespace + // after this function is called; to ensure this wrap operations that + // require the namespace with Do() instead. + Set() error + + // Returns the filesystem path representing this object's network namespace + Path() string + + // Returns a file descriptor representing this object's network namespace + Fd() uintptr + + // Cleans up this instance of the network namespace; if this instance + // is the last user the namespace will be destroyed + Close() error +} + +type netNS struct { + file *os.File + closed bool +} + +// netNS implements the NetNS interface +var _ NetNS = &netNS{} + +const ( + // https://github.com/torvalds/linux/blob/master/include/uapi/linux/magic.h + NSFS_MAGIC = unix.NSFS_MAGIC + PROCFS_MAGIC = unix.PROC_SUPER_MAGIC +) + +type NSPathNotExistErr struct{ msg string } + +func (e NSPathNotExistErr) Error() string { return e.msg } + +type NSPathNotNSErr struct{ msg string } + +func (e NSPathNotNSErr) Error() string { return e.msg } + +func IsNSorErr(nspath string) error { + stat := syscall.Statfs_t{} + if err := syscall.Statfs(nspath, &stat); err != nil { + if os.IsNotExist(err) { + err = NSPathNotExistErr{msg: fmt.Sprintf("failed to Statfs %q: %v", nspath, err)} + } else { + err = fmt.Errorf("failed to Statfs %q: %v", nspath, err) + } + return err + } + + switch stat.Type { + case PROCFS_MAGIC, NSFS_MAGIC: + return nil + default: + return NSPathNotNSErr{msg: fmt.Sprintf("unknown FS magic on %q: %x", nspath, stat.Type)} + } +} + +// Returns an object representing the namespace referred to by @path +func GetNS(nspath string) (NetNS, error) { + err := IsNSorErr(nspath) + if err != nil { + return nil, err + } + + fd, err := os.Open(nspath) + if err != nil { + return nil, err + } + + return &netNS{file: fd}, nil +} + +func (ns *netNS) Path() string { + return ns.file.Name() +} + +func (ns *netNS) Fd() uintptr { + return ns.file.Fd() +} + +func (ns *netNS) errorIfClosed() error { + if ns.closed { + return fmt.Errorf("%q has already been closed", ns.file.Name()) + } + return nil +} + +func (ns *netNS) Do(toRun func(NetNS) error) error { + if err := ns.errorIfClosed(); err != nil { + return err + } + + containedCall := func(hostNS NetNS) error { + threadNS, err := GetCurrentNS() + if err != nil { + return fmt.Errorf("failed to open current netns: %v", err) + } + defer threadNS.Close() + + // switch to target namespace + if err = ns.Set(); err != nil { + return fmt.Errorf("error switching to ns %v: %v", ns.file.Name(), err) + } + defer func() { + err := threadNS.Set() // switch back + if err == nil { + // Unlock the current thread only when we successfully switched back + // to the original namespace; otherwise leave the thread locked which + // will force the runtime to scrap the current thread, that is maybe + // not as optimal but at least always safe to do. + runtime.UnlockOSThread() + } + }() + + return toRun(hostNS) + } + + // save a handle to current network namespace + hostNS, err := GetCurrentNS() + if err != nil { + return fmt.Errorf("Failed to open current namespace: %v", err) + } + defer hostNS.Close() + + var wg sync.WaitGroup + wg.Add(1) + + // Start the callback in a new green thread so that if we later fail + // to switch the namespace back to the original one, we can safely + // leave the thread locked to die without a risk of the current thread + // left lingering with incorrect namespace. + var innerError error + go func() { + defer wg.Done() + runtime.LockOSThread() + innerError = containedCall(hostNS) + }() + wg.Wait() + + return innerError +} + +// WithNetNSPath executes the passed closure under the given network +// namespace, restoring the original namespace afterwards. +func WithNetNSPath(nspath string, toRun func(NetNS) error) error { + ns, err := GetNS(nspath) + if err != nil { + return err + } + defer ns.Close() + return ns.Do(toRun) +} diff --git a/vendor/github.com/containernetworking/plugins/pkg/utils/sysctl/sysctl_linux.go b/vendor/github.com/containernetworking/plugins/pkg/utils/sysctl/sysctl_linux.go new file mode 100644 index 000000000..469e9be9e --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/pkg/utils/sysctl/sysctl_linux.go @@ -0,0 +1,78 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sysctl + +import ( + "fmt" + "os" + "path/filepath" + "strings" +) + +// Sysctl provides a method to set/get values from /proc/sys - in linux systems +// new interface to set/get values of variables formerly handled by sysctl syscall +// If optional `params` have only one string value - this function will +// set this value into corresponding sysctl variable +func Sysctl(name string, params ...string) (string, error) { + if len(params) > 1 { + return "", fmt.Errorf("unexcepted additional parameters") + } else if len(params) == 1 { + return setSysctl(name, params[0]) + } + return getSysctl(name) +} + +func getSysctl(name string) (string, error) { + fullName := filepath.Join("/proc/sys", toNormalName(name)) + data, err := os.ReadFile(fullName) + if err != nil { + return "", err + } + + return string(data[:len(data)-1]), nil +} + +func setSysctl(name, value string) (string, error) { + fullName := filepath.Join("/proc/sys", toNormalName(name)) + if err := os.WriteFile(fullName, []byte(value), 0644); err != nil { + return "", err + } + + return getSysctl(name) +} + +// Normalize names by using slash as separator +// Sysctl names can use dots or slashes as separator: +// - if dots are used, dots and slashes are interchanged. +// - if slashes are used, slashes and dots are left intact. +// Separator in use is determined by first occurrence. +func toNormalName(name string) string { + interchange := false + for _, c := range name { + if c == '.' { + interchange = true + break + } + if c == '/' { + break + } + } + + if interchange { + r := strings.NewReplacer(".", "/", "/", ".") + return r.Replace(name) + } + return name +} diff --git a/vendor/github.com/coreos/go-iptables/LICENSE b/vendor/github.com/coreos/go-iptables/LICENSE new file mode 100644 index 000000000..37ec93a14 --- /dev/null +++ b/vendor/github.com/coreos/go-iptables/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/go-iptables/NOTICE b/vendor/github.com/coreos/go-iptables/NOTICE new file mode 100644 index 000000000..23a0ada2f --- /dev/null +++ b/vendor/github.com/coreos/go-iptables/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2018 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/go-iptables/iptables/iptables.go b/vendor/github.com/coreos/go-iptables/iptables/iptables.go new file mode 100644 index 000000000..85047e59d --- /dev/null +++ b/vendor/github.com/coreos/go-iptables/iptables/iptables.go @@ -0,0 +1,680 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package iptables + +import ( + "bytes" + "fmt" + "io" + "net" + "os/exec" + "regexp" + "strconv" + "strings" + "syscall" +) + +// Adds the output of stderr to exec.ExitError +type Error struct { + exec.ExitError + cmd exec.Cmd + msg string + exitStatus *int //for overriding +} + +func (e *Error) ExitStatus() int { + if e.exitStatus != nil { + return *e.exitStatus + } + return e.Sys().(syscall.WaitStatus).ExitStatus() +} + +func (e *Error) Error() string { + return fmt.Sprintf("running %v: exit status %v: %v", e.cmd.Args, e.ExitStatus(), e.msg) +} + +// IsNotExist returns true if the error is due to the chain or rule not existing +func (e *Error) IsNotExist() bool { + if e.ExitStatus() != 1 { + return false + } + msgNoRuleExist := "Bad rule (does a matching rule exist in that chain?).\n" + msgNoChainExist := "No chain/target/match by that name.\n" + return strings.Contains(e.msg, msgNoRuleExist) || strings.Contains(e.msg, msgNoChainExist) +} + +// Protocol to differentiate between IPv4 and IPv6 +type Protocol byte + +const ( + ProtocolIPv4 Protocol = iota + ProtocolIPv6 +) + +type IPTables struct { + path string + proto Protocol + hasCheck bool + hasWait bool + waitSupportSecond bool + hasRandomFully bool + v1 int + v2 int + v3 int + mode string // the underlying iptables operating mode, e.g. nf_tables + timeout int // time to wait for the iptables lock, default waits forever +} + +// Stat represents a structured statistic entry. +type Stat struct { + Packets uint64 `json:"pkts"` + Bytes uint64 `json:"bytes"` + Target string `json:"target"` + Protocol string `json:"prot"` + Opt string `json:"opt"` + Input string `json:"in"` + Output string `json:"out"` + Source *net.IPNet `json:"source"` + Destination *net.IPNet `json:"destination"` + Options string `json:"options"` +} + +type option func(*IPTables) + +func IPFamily(proto Protocol) option { + return func(ipt *IPTables) { + ipt.proto = proto + } +} + +func Timeout(timeout int) option { + return func(ipt *IPTables) { + ipt.timeout = timeout + } +} + +// New creates a new IPTables configured with the options passed as parameter. +// For backwards compatibility, by default always uses IPv4 and timeout 0. +// i.e. you can create an IPv6 IPTables using a timeout of 5 seconds passing +// the IPFamily and Timeout options as follow: +// ip6t := New(IPFamily(ProtocolIPv6), Timeout(5)) +func New(opts ...option) (*IPTables, error) { + + ipt := &IPTables{ + proto: ProtocolIPv4, + timeout: 0, + } + + for _, opt := range opts { + opt(ipt) + } + + path, err := exec.LookPath(getIptablesCommand(ipt.proto)) + if err != nil { + return nil, err + } + ipt.path = path + + vstring, err := getIptablesVersionString(path) + if err != nil { + return nil, fmt.Errorf("could not get iptables version: %v", err) + } + v1, v2, v3, mode, err := extractIptablesVersion(vstring) + if err != nil { + return nil, fmt.Errorf("failed to extract iptables version from [%s]: %v", vstring, err) + } + ipt.v1 = v1 + ipt.v2 = v2 + ipt.v3 = v3 + ipt.mode = mode + + checkPresent, waitPresent, waitSupportSecond, randomFullyPresent := getIptablesCommandSupport(v1, v2, v3) + ipt.hasCheck = checkPresent + ipt.hasWait = waitPresent + ipt.waitSupportSecond = waitSupportSecond + ipt.hasRandomFully = randomFullyPresent + + return ipt, nil +} + +// New creates a new IPTables for the given proto. +// The proto will determine which command is used, either "iptables" or "ip6tables". +func NewWithProtocol(proto Protocol) (*IPTables, error) { + return New(IPFamily(proto), Timeout(0)) +} + +// Proto returns the protocol used by this IPTables. +func (ipt *IPTables) Proto() Protocol { + return ipt.proto +} + +// Exists checks if given rulespec in specified table/chain exists +func (ipt *IPTables) Exists(table, chain string, rulespec ...string) (bool, error) { + if !ipt.hasCheck { + return ipt.existsForOldIptables(table, chain, rulespec) + + } + cmd := append([]string{"-t", table, "-C", chain}, rulespec...) + err := ipt.run(cmd...) + eerr, eok := err.(*Error) + switch { + case err == nil: + return true, nil + case eok && eerr.ExitStatus() == 1: + return false, nil + default: + return false, err + } +} + +// Insert inserts rulespec to specified table/chain (in specified pos) +func (ipt *IPTables) Insert(table, chain string, pos int, rulespec ...string) error { + cmd := append([]string{"-t", table, "-I", chain, strconv.Itoa(pos)}, rulespec...) + return ipt.run(cmd...) +} + +// Append appends rulespec to specified table/chain +func (ipt *IPTables) Append(table, chain string, rulespec ...string) error { + cmd := append([]string{"-t", table, "-A", chain}, rulespec...) + return ipt.run(cmd...) +} + +// AppendUnique acts like Append except that it won't add a duplicate +func (ipt *IPTables) AppendUnique(table, chain string, rulespec ...string) error { + exists, err := ipt.Exists(table, chain, rulespec...) + if err != nil { + return err + } + + if !exists { + return ipt.Append(table, chain, rulespec...) + } + + return nil +} + +// Delete removes rulespec in specified table/chain +func (ipt *IPTables) Delete(table, chain string, rulespec ...string) error { + cmd := append([]string{"-t", table, "-D", chain}, rulespec...) + return ipt.run(cmd...) +} + +func (ipt *IPTables) DeleteIfExists(table, chain string, rulespec ...string) error { + exists, err := ipt.Exists(table, chain, rulespec...) + if err == nil && exists { + err = ipt.Delete(table, chain, rulespec...) + } + return err +} + +// List rules in specified table/chain +func (ipt *IPTables) List(table, chain string) ([]string, error) { + args := []string{"-t", table, "-S", chain} + return ipt.executeList(args) +} + +// List rules (with counters) in specified table/chain +func (ipt *IPTables) ListWithCounters(table, chain string) ([]string, error) { + args := []string{"-t", table, "-v", "-S", chain} + return ipt.executeList(args) +} + +// ListChains returns a slice containing the name of each chain in the specified table. +func (ipt *IPTables) ListChains(table string) ([]string, error) { + args := []string{"-t", table, "-S"} + + result, err := ipt.executeList(args) + if err != nil { + return nil, err + } + + // Iterate over rules to find all default (-P) and user-specified (-N) chains. + // Chains definition always come before rules. + // Format is the following: + // -P OUTPUT ACCEPT + // -N Custom + var chains []string + for _, val := range result { + if strings.HasPrefix(val, "-P") || strings.HasPrefix(val, "-N") { + chains = append(chains, strings.Fields(val)[1]) + } else { + break + } + } + return chains, nil +} + +// '-S' is fine with non existing rule index as long as the chain exists +// therefore pass index 1 to reduce overhead for large chains +func (ipt *IPTables) ChainExists(table, chain string) (bool, error) { + err := ipt.run("-t", table, "-S", chain, "1") + eerr, eok := err.(*Error) + switch { + case err == nil: + return true, nil + case eok && eerr.ExitStatus() == 1: + return false, nil + default: + return false, err + } +} + +// Stats lists rules including the byte and packet counts +func (ipt *IPTables) Stats(table, chain string) ([][]string, error) { + args := []string{"-t", table, "-L", chain, "-n", "-v", "-x"} + lines, err := ipt.executeList(args) + if err != nil { + return nil, err + } + + appendSubnet := func(addr string) string { + if strings.IndexByte(addr, byte('/')) < 0 { + if strings.IndexByte(addr, '.') < 0 { + return addr + "/128" + } + return addr + "/32" + } + return addr + } + + ipv6 := ipt.proto == ProtocolIPv6 + + rows := [][]string{} + for i, line := range lines { + // Skip over chain name and field header + if i < 2 { + continue + } + + // Fields: + // 0=pkts 1=bytes 2=target 3=prot 4=opt 5=in 6=out 7=source 8=destination 9=options + line = strings.TrimSpace(line) + fields := strings.Fields(line) + + // The ip6tables verbose output cannot be naively split due to the default "opt" + // field containing 2 single spaces. + if ipv6 { + // Check if field 6 is "opt" or "source" address + dest := fields[6] + ip, _, _ := net.ParseCIDR(dest) + if ip == nil { + ip = net.ParseIP(dest) + } + + // If we detected a CIDR or IP, the "opt" field is empty.. insert it. + if ip != nil { + f := []string{} + f = append(f, fields[:4]...) + f = append(f, " ") // Empty "opt" field for ip6tables + f = append(f, fields[4:]...) + fields = f + } + } + + // Adjust "source" and "destination" to include netmask, to match regular + // List output + fields[7] = appendSubnet(fields[7]) + fields[8] = appendSubnet(fields[8]) + + // Combine "options" fields 9... into a single space-delimited field. + options := fields[9:] + fields = fields[:9] + fields = append(fields, strings.Join(options, " ")) + rows = append(rows, fields) + } + return rows, nil +} + +// ParseStat parses a single statistic row into a Stat struct. The input should +// be a string slice that is returned from calling the Stat method. +func (ipt *IPTables) ParseStat(stat []string) (parsed Stat, err error) { + // For forward-compatibility, expect at least 10 fields in the stat + if len(stat) < 10 { + return parsed, fmt.Errorf("stat contained fewer fields than expected") + } + + // Convert the fields that are not plain strings + parsed.Packets, err = strconv.ParseUint(stat[0], 0, 64) + if err != nil { + return parsed, fmt.Errorf(err.Error(), "could not parse packets") + } + parsed.Bytes, err = strconv.ParseUint(stat[1], 0, 64) + if err != nil { + return parsed, fmt.Errorf(err.Error(), "could not parse bytes") + } + _, parsed.Source, err = net.ParseCIDR(stat[7]) + if err != nil { + return parsed, fmt.Errorf(err.Error(), "could not parse source") + } + _, parsed.Destination, err = net.ParseCIDR(stat[8]) + if err != nil { + return parsed, fmt.Errorf(err.Error(), "could not parse destination") + } + + // Put the fields that are strings + parsed.Target = stat[2] + parsed.Protocol = stat[3] + parsed.Opt = stat[4] + parsed.Input = stat[5] + parsed.Output = stat[6] + parsed.Options = stat[9] + + return parsed, nil +} + +// StructuredStats returns statistics as structured data which may be further +// parsed and marshaled. +func (ipt *IPTables) StructuredStats(table, chain string) ([]Stat, error) { + rawStats, err := ipt.Stats(table, chain) + if err != nil { + return nil, err + } + + structStats := []Stat{} + for _, rawStat := range rawStats { + stat, err := ipt.ParseStat(rawStat) + if err != nil { + return nil, err + } + structStats = append(structStats, stat) + } + + return structStats, nil +} + +func (ipt *IPTables) executeList(args []string) ([]string, error) { + var stdout bytes.Buffer + if err := ipt.runWithOutput(args, &stdout); err != nil { + return nil, err + } + + rules := strings.Split(stdout.String(), "\n") + + // strip trailing newline + if len(rules) > 0 && rules[len(rules)-1] == "" { + rules = rules[:len(rules)-1] + } + + for i, rule := range rules { + rules[i] = filterRuleOutput(rule) + } + + return rules, nil +} + +// NewChain creates a new chain in the specified table. +// If the chain already exists, it will result in an error. +func (ipt *IPTables) NewChain(table, chain string) error { + return ipt.run("-t", table, "-N", chain) +} + +const existsErr = 1 + +// ClearChain flushed (deletes all rules) in the specified table/chain. +// If the chain does not exist, a new one will be created +func (ipt *IPTables) ClearChain(table, chain string) error { + err := ipt.NewChain(table, chain) + + eerr, eok := err.(*Error) + switch { + case err == nil: + return nil + case eok && eerr.ExitStatus() == existsErr: + // chain already exists. Flush (clear) it. + return ipt.run("-t", table, "-F", chain) + default: + return err + } +} + +// RenameChain renames the old chain to the new one. +func (ipt *IPTables) RenameChain(table, oldChain, newChain string) error { + return ipt.run("-t", table, "-E", oldChain, newChain) +} + +// DeleteChain deletes the chain in the specified table. +// The chain must be empty +func (ipt *IPTables) DeleteChain(table, chain string) error { + return ipt.run("-t", table, "-X", chain) +} + +func (ipt *IPTables) ClearAndDeleteChain(table, chain string) error { + exists, err := ipt.ChainExists(table, chain) + if err != nil || !exists { + return err + } + err = ipt.run("-t", table, "-F", chain) + if err == nil { + err = ipt.run("-t", table, "-X", chain) + } + return err +} + +func (ipt *IPTables) ClearAll() error { + return ipt.run("-F") +} + +func (ipt *IPTables) DeleteAll() error { + return ipt.run("-X") +} + +// ChangePolicy changes policy on chain to target +func (ipt *IPTables) ChangePolicy(table, chain, target string) error { + return ipt.run("-t", table, "-P", chain, target) +} + +// Check if the underlying iptables command supports the --random-fully flag +func (ipt *IPTables) HasRandomFully() bool { + return ipt.hasRandomFully +} + +// Return version components of the underlying iptables command +func (ipt *IPTables) GetIptablesVersion() (int, int, int) { + return ipt.v1, ipt.v2, ipt.v3 +} + +// run runs an iptables command with the given arguments, ignoring +// any stdout output +func (ipt *IPTables) run(args ...string) error { + return ipt.runWithOutput(args, nil) +} + +// runWithOutput runs an iptables command with the given arguments, +// writing any stdout output to the given writer +func (ipt *IPTables) runWithOutput(args []string, stdout io.Writer) error { + args = append([]string{ipt.path}, args...) + if ipt.hasWait { + args = append(args, "--wait") + if ipt.timeout != 0 && ipt.waitSupportSecond { + args = append(args, strconv.Itoa(ipt.timeout)) + } + } else { + fmu, err := newXtablesFileLock() + if err != nil { + return err + } + ul, err := fmu.tryLock() + if err != nil { + syscall.Close(fmu.fd) + return err + } + defer ul.Unlock() + } + + var stderr bytes.Buffer + cmd := exec.Cmd{ + Path: ipt.path, + Args: args, + Stdout: stdout, + Stderr: &stderr, + } + + if err := cmd.Run(); err != nil { + switch e := err.(type) { + case *exec.ExitError: + return &Error{*e, cmd, stderr.String(), nil} + default: + return err + } + } + + return nil +} + +// getIptablesCommand returns the correct command for the given protocol, either "iptables" or "ip6tables". +func getIptablesCommand(proto Protocol) string { + if proto == ProtocolIPv6 { + return "ip6tables" + } else { + return "iptables" + } +} + +// Checks if iptables has the "-C" and "--wait" flag +func getIptablesCommandSupport(v1 int, v2 int, v3 int) (bool, bool, bool, bool) { + return iptablesHasCheckCommand(v1, v2, v3), iptablesHasWaitCommand(v1, v2, v3), iptablesWaitSupportSecond(v1, v2, v3), iptablesHasRandomFully(v1, v2, v3) +} + +// getIptablesVersion returns the first three components of the iptables version +// and the operating mode (e.g. nf_tables or legacy) +// e.g. "iptables v1.3.66" would return (1, 3, 66, legacy, nil) +func extractIptablesVersion(str string) (int, int, int, string, error) { + versionMatcher := regexp.MustCompile(`v([0-9]+)\.([0-9]+)\.([0-9]+)(?:\s+\((\w+))?`) + result := versionMatcher.FindStringSubmatch(str) + if result == nil { + return 0, 0, 0, "", fmt.Errorf("no iptables version found in string: %s", str) + } + + v1, err := strconv.Atoi(result[1]) + if err != nil { + return 0, 0, 0, "", err + } + + v2, err := strconv.Atoi(result[2]) + if err != nil { + return 0, 0, 0, "", err + } + + v3, err := strconv.Atoi(result[3]) + if err != nil { + return 0, 0, 0, "", err + } + + mode := "legacy" + if result[4] != "" { + mode = result[4] + } + return v1, v2, v3, mode, nil +} + +// Runs "iptables --version" to get the version string +func getIptablesVersionString(path string) (string, error) { + cmd := exec.Command(path, "--version") + var out bytes.Buffer + cmd.Stdout = &out + err := cmd.Run() + if err != nil { + return "", err + } + return out.String(), nil +} + +// Checks if an iptables version is after 1.4.11, when --check was added +func iptablesHasCheckCommand(v1 int, v2 int, v3 int) bool { + if v1 > 1 { + return true + } + if v1 == 1 && v2 > 4 { + return true + } + if v1 == 1 && v2 == 4 && v3 >= 11 { + return true + } + return false +} + +// Checks if an iptables version is after 1.4.20, when --wait was added +func iptablesHasWaitCommand(v1 int, v2 int, v3 int) bool { + if v1 > 1 { + return true + } + if v1 == 1 && v2 > 4 { + return true + } + if v1 == 1 && v2 == 4 && v3 >= 20 { + return true + } + return false +} + +//Checks if an iptablse version is after 1.6.0, when --wait support second +func iptablesWaitSupportSecond(v1 int, v2 int, v3 int) bool { + if v1 > 1 { + return true + } + if v1 == 1 && v2 >= 6 { + return true + } + return false +} + +// Checks if an iptables version is after 1.6.2, when --random-fully was added +func iptablesHasRandomFully(v1 int, v2 int, v3 int) bool { + if v1 > 1 { + return true + } + if v1 == 1 && v2 > 6 { + return true + } + if v1 == 1 && v2 == 6 && v3 >= 2 { + return true + } + return false +} + +// Checks if a rule specification exists for a table +func (ipt *IPTables) existsForOldIptables(table, chain string, rulespec []string) (bool, error) { + rs := strings.Join(append([]string{"-A", chain}, rulespec...), " ") + args := []string{"-t", table, "-S"} + var stdout bytes.Buffer + err := ipt.runWithOutput(args, &stdout) + if err != nil { + return false, err + } + return strings.Contains(stdout.String(), rs), nil +} + +// counterRegex is the regex used to detect nftables counter format +var counterRegex = regexp.MustCompile(`^\[([0-9]+):([0-9]+)\] `) + +// filterRuleOutput works around some inconsistencies in output. +// For example, when iptables is in legacy vs. nftables mode, it produces +// different results. +func filterRuleOutput(rule string) string { + out := rule + + // work around an output difference in nftables mode where counters + // are output in iptables-save format, rather than iptables -S format + // The string begins with "[0:0]" + // + // Fixes #49 + if groups := counterRegex.FindStringSubmatch(out); groups != nil { + // drop the brackets + out = out[len(groups[0]):] + out = fmt.Sprintf("%s -c %s %s", out, groups[1], groups[2]) + } + + return out +} diff --git a/vendor/github.com/coreos/go-iptables/iptables/lock.go b/vendor/github.com/coreos/go-iptables/iptables/lock.go new file mode 100644 index 000000000..a88e92b4e --- /dev/null +++ b/vendor/github.com/coreos/go-iptables/iptables/lock.go @@ -0,0 +1,84 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package iptables + +import ( + "os" + "sync" + "syscall" +) + +const ( + // In earlier versions of iptables, the xtables lock was implemented + // via a Unix socket, but now flock is used via this lockfile: + // http://git.netfilter.org/iptables/commit/?id=aa562a660d1555b13cffbac1e744033e91f82707 + // Note the LSB-conforming "/run" directory does not exist on old + // distributions, so assume "/var" is symlinked + xtablesLockFilePath = "/var/run/xtables.lock" + + defaultFilePerm = 0600 +) + +type Unlocker interface { + Unlock() error +} + +type nopUnlocker struct{} + +func (_ nopUnlocker) Unlock() error { return nil } + +type fileLock struct { + // mu is used to protect against concurrent invocations from within this process + mu sync.Mutex + fd int +} + +// tryLock takes an exclusive lock on the xtables lock file without blocking. +// This is best-effort only: if the exclusive lock would block (i.e. because +// another process already holds it), no error is returned. Otherwise, any +// error encountered during the locking operation is returned. +// The returned Unlocker should be used to release the lock when the caller is +// done invoking iptables commands. +func (l *fileLock) tryLock() (Unlocker, error) { + l.mu.Lock() + err := syscall.Flock(l.fd, syscall.LOCK_EX|syscall.LOCK_NB) + switch err { + case syscall.EWOULDBLOCK: + l.mu.Unlock() + return nopUnlocker{}, nil + case nil: + return l, nil + default: + l.mu.Unlock() + return nil, err + } +} + +// Unlock closes the underlying file, which implicitly unlocks it as well. It +// also unlocks the associated mutex. +func (l *fileLock) Unlock() error { + defer l.mu.Unlock() + return syscall.Close(l.fd) +} + +// newXtablesFileLock opens a new lock on the xtables lockfile without +// acquiring the lock +func newXtablesFileLock() (*fileLock, error) { + fd, err := syscall.Open(xtablesLockFilePath, os.O_CREATE, defaultFilePerm) + if err != nil { + return nil, err + } + return &fileLock{fd: fd}, nil +} diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md b/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md new file mode 100644 index 000000000..1cade6cef --- /dev/null +++ b/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Brian Goff + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/debug.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/debug.go new file mode 100644 index 000000000..0ec4b12c7 --- /dev/null +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/debug.go @@ -0,0 +1,62 @@ +package md2man + +import ( + "fmt" + "io" + "os" + "strings" + + "github.com/russross/blackfriday/v2" +) + +func fmtListFlags(flags blackfriday.ListType) string { + knownFlags := []struct { + name string + flag blackfriday.ListType + }{ + {"ListTypeOrdered", blackfriday.ListTypeOrdered}, + {"ListTypeDefinition", blackfriday.ListTypeDefinition}, + {"ListTypeTerm", blackfriday.ListTypeTerm}, + {"ListItemContainsBlock", blackfriday.ListItemContainsBlock}, + {"ListItemBeginningOfList", blackfriday.ListItemBeginningOfList}, + {"ListItemEndOfList", blackfriday.ListItemEndOfList}, + } + + var f []string + for _, kf := range knownFlags { + if flags&kf.flag != 0 { + f = append(f, kf.name) + flags &^= kf.flag + } + } + if flags != 0 { + f = append(f, fmt.Sprintf("Unknown(%#x)", flags)) + } + return strings.Join(f, "|") +} + +type debugDecorator struct { + blackfriday.Renderer +} + +func depth(node *blackfriday.Node) int { + d := 0 + for n := node.Parent; n != nil; n = n.Parent { + d++ + } + return d +} + +func (d *debugDecorator) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus { + fmt.Fprintf(os.Stderr, "%s%s %v %v\n", + strings.Repeat(" ", depth(node)), + map[bool]string{true: "+", false: "-"}[entering], + node, + fmtListFlags(node.ListFlags)) + var b strings.Builder + status := d.Renderer.RenderNode(io.MultiWriter(&b, w), node, entering) + if b.Len() > 0 { + fmt.Fprintf(os.Stderr, ">> %q\n", b.String()) + } + return status +} diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go new file mode 100644 index 000000000..5673f5c0b --- /dev/null +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go @@ -0,0 +1,24 @@ +// Package md2man aims in converting markdown into roff (man pages). +package md2man + +import ( + "os" + "strconv" + + "github.com/russross/blackfriday/v2" +) + +// Render converts a markdown document into a roff formatted document. +func Render(doc []byte) []byte { + renderer := NewRoffRenderer() + var r blackfriday.Renderer = renderer + if v, _ := strconv.ParseBool(os.Getenv("MD2MAN_DEBUG")); v { + r = &debugDecorator{Renderer: r} + } + + return blackfriday.Run(doc, + []blackfriday.Option{ + blackfriday.WithRenderer(r), + blackfriday.WithExtensions(renderer.GetExtensions()), + }...) +} diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go new file mode 100644 index 000000000..4f1070fc5 --- /dev/null +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go @@ -0,0 +1,416 @@ +package md2man + +import ( + "bufio" + "bytes" + "fmt" + "io" + "os" + "strings" + + "github.com/russross/blackfriday/v2" +) + +// roffRenderer implements the blackfriday.Renderer interface for creating +// roff format (manpages) from markdown text +type roffRenderer struct { + listCounters []int + firstHeader bool + listDepth int +} + +const ( + titleHeader = ".TH " + topLevelHeader = "\n\n.SH " + secondLevelHdr = "\n.SH " + otherHeader = "\n.SS " + crTag = "\n" + emphTag = "\\fI" + emphCloseTag = "\\fP" + strongTag = "\\fB" + strongCloseTag = "\\fP" + breakTag = "\n.br\n" + paraTag = "\n.PP\n" + hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n" + linkTag = "\n\\[la]" + linkCloseTag = "\\[ra]" + codespanTag = "\\fB" + codespanCloseTag = "\\fR" + codeTag = "\n.EX\n" + codeCloseTag = ".EE\n" // Do not prepend a newline character since code blocks, by definition, include a newline already (or at least as how blackfriday gives us on). + quoteTag = "\n.PP\n.RS\n" + quoteCloseTag = "\n.RE\n" + listTag = "\n.RS\n" + listCloseTag = ".RE\n" + dtTag = "\n.TP\n" + dd2Tag = "\n" + tableStart = "\n.TS\nallbox;\n" + tableEnd = ".TE\n" + tableCellStart = "T{\n" + tableCellEnd = "\nT}" + tablePreprocessor = `'\" t` +) + +// NewRoffRenderer creates a new blackfriday Renderer for generating roff documents +// from markdown +func NewRoffRenderer() *roffRenderer { + return &roffRenderer{} +} + +// GetExtensions returns the list of extensions used by this renderer implementation +func (*roffRenderer) GetExtensions() blackfriday.Extensions { + return blackfriday.NoIntraEmphasis | + blackfriday.Tables | + blackfriday.FencedCode | + blackfriday.SpaceHeadings | + blackfriday.Footnotes | + blackfriday.Titleblock | + blackfriday.DefinitionLists +} + +// RenderHeader handles outputting the header at document start +func (r *roffRenderer) RenderHeader(w io.Writer, ast *blackfriday.Node) { + // We need to walk the tree to check if there are any tables. + // If there are, we need to enable the roff table preprocessor. + ast.Walk(func(node *blackfriday.Node, entering bool) blackfriday.WalkStatus { + if node.Type == blackfriday.Table { + out(w, tablePreprocessor+"\n") + return blackfriday.Terminate + } + return blackfriday.GoToNext + }) + + // disable hyphenation + out(w, ".nh\n") +} + +// RenderFooter handles outputting the footer at the document end; the roff +// renderer has no footer information +func (r *roffRenderer) RenderFooter(w io.Writer, ast *blackfriday.Node) { +} + +// RenderNode is called for each node in a markdown document; based on the node +// type the equivalent roff output is sent to the writer +func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus { + walkAction := blackfriday.GoToNext + + switch node.Type { + case blackfriday.Text: + // Special case: format the NAME section as required for proper whatis parsing. + // Refer to the lexgrog(1) and groff_man(7) manual pages for details. + if node.Parent != nil && + node.Parent.Type == blackfriday.Paragraph && + node.Parent.Prev != nil && + node.Parent.Prev.Type == blackfriday.Heading && + node.Parent.Prev.FirstChild != nil && + bytes.EqualFold(node.Parent.Prev.FirstChild.Literal, []byte("NAME")) { + before, after, found := bytesCut(node.Literal, []byte(" - ")) + escapeSpecialChars(w, before) + if found { + out(w, ` \- `) + escapeSpecialChars(w, after) + } + } else { + escapeSpecialChars(w, node.Literal) + } + case blackfriday.Softbreak: + out(w, crTag) + case blackfriday.Hardbreak: + out(w, breakTag) + case blackfriday.Emph: + if entering { + out(w, emphTag) + } else { + out(w, emphCloseTag) + } + case blackfriday.Strong: + if entering { + out(w, strongTag) + } else { + out(w, strongCloseTag) + } + case blackfriday.Link: + // Don't render the link text for automatic links, because this + // will only duplicate the URL in the roff output. + // See https://daringfireball.net/projects/markdown/syntax#autolink + if !bytes.Equal(node.LinkData.Destination, node.FirstChild.Literal) { + out(w, string(node.FirstChild.Literal)) + } + // Hyphens in a link must be escaped to avoid word-wrap in the rendered man page. + escapedLink := strings.ReplaceAll(string(node.LinkData.Destination), "-", "\\-") + out(w, linkTag+escapedLink+linkCloseTag) + walkAction = blackfriday.SkipChildren + case blackfriday.Image: + // ignore images + walkAction = blackfriday.SkipChildren + case blackfriday.Code: + out(w, codespanTag) + escapeSpecialChars(w, node.Literal) + out(w, codespanCloseTag) + case blackfriday.Document: + break + case blackfriday.Paragraph: + if entering { + if r.listDepth > 0 { + // roff .PP markers break lists + if node.Prev != nil { // continued paragraph + if node.Prev.Type == blackfriday.List && node.Prev.ListFlags&blackfriday.ListTypeDefinition == 0 { + out(w, ".IP\n") + } else { + out(w, crTag) + } + } + } else if node.Prev != nil && node.Prev.Type == blackfriday.Heading { + out(w, crTag) + } else { + out(w, paraTag) + } + } else { + if node.Next == nil || node.Next.Type != blackfriday.List { + out(w, crTag) + } + } + case blackfriday.BlockQuote: + if entering { + out(w, quoteTag) + } else { + out(w, quoteCloseTag) + } + case blackfriday.Heading: + r.handleHeading(w, node, entering) + case blackfriday.HorizontalRule: + out(w, hruleTag) + case blackfriday.List: + r.handleList(w, node, entering) + case blackfriday.Item: + r.handleItem(w, node, entering) + case blackfriday.CodeBlock: + out(w, codeTag) + escapeSpecialChars(w, node.Literal) + out(w, codeCloseTag) + case blackfriday.Table: + r.handleTable(w, node, entering) + case blackfriday.TableHead: + case blackfriday.TableBody: + case blackfriday.TableRow: + // no action as cell entries do all the nroff formatting + return blackfriday.GoToNext + case blackfriday.TableCell: + r.handleTableCell(w, node, entering) + case blackfriday.HTMLSpan: + // ignore other HTML tags + case blackfriday.HTMLBlock: + if bytes.HasPrefix(node.Literal, []byte(" +

CBOR Codec Go logo

[fxamacker/cbor](https://github.com/fxamacker/cbor) is a library for encoding and decoding [CBOR](https://www.rfc-editor.org/info/std94) and [CBOR Sequences](https://www.rfc-editor.org/rfc/rfc8742.html). CBOR is a [trusted alternative](https://www.rfc-editor.org/rfc/rfc8949.html#name-comparison-of-other-binary-) to JSON, MessagePack, Protocol Buffers, etc.  CBOR is an Internet Standard defined by [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94) and is designed to be relevant for decades. -`fxamacker/cbor` is used in projects by Arm Ltd., Cisco, EdgeX Foundry, Flow Foundation, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Microsoft, Mozilla, Oasis Protocol, Tailscale, Teleport, [etc](https://github.com/fxamacker/cbor#who-uses-fxamackercbor). +`fxamacker/cbor` is used in projects by Arm Ltd., EdgeX Foundry, Flow Foundation, Fraunhofer‑AISEC, IBM, Kubernetes[*](https://github.com/search?q=org%3Akubernetes%20fxamacker%2Fcbor&type=code), Let's Encrypt, Linux Foundation, Microsoft, Oasis Protocol, Red Hat[*](https://github.com/search?q=org%3Aopenshift+fxamacker%2Fcbor&type=code), Tailscale[*](https://github.com/search?q=org%3Atailscale+fxamacker%2Fcbor&type=code), Veraison[*](https://github.com/search?q=org%3Averaison+fxamacker%2Fcbor&type=code), [etc](https://github.com/fxamacker/cbor#who-uses-fxamackercbor). -See [Quick Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). 🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `cbor.MarshalToBuffer()` and `UserBufferEncMode` accepts user-specified buffer. +See [Quick Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). 🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `MarshalToBuffer` and `UserBufferEncMode` accepts user-specified buffer. ## fxamacker/cbor [![](https://github.com/fxamacker/cbor/workflows/ci/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3Aci) -[![](https://github.com/fxamacker/cbor/workflows/cover%20%E2%89%A596%25/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A596%25%22) +[![](https://github.com/fxamacker/cbor/workflows/cover%20%E2%89%A597%25/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A597%25%22) [![CodeQL](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml) [![](https://img.shields.io/badge/fuzzing-passing-44c010)](#fuzzing-and-code-coverage) [![Go Report Card](https://goreportcard.com/badge/github.com/fxamacker/cbor)](https://goreportcard.com/report/github.com/fxamacker/cbor) +[![](https://img.shields.io/ossf-scorecard/github.com/fxamacker/cbor?label=openssf%20scorecard)](https://github.com/fxamacker/cbor#fuzzing-and-code-coverage) `fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)). Features include full support for CBOR tags, [Core Deterministic Encoding](https://www.rfc-editor.org/rfc/rfc8949.html#name-core-deterministic-encoding), duplicate map key detection, etc. +API is mostly same as `encoding/json`, plus interfaces that simplify concurrency and CBOR options. + Design balances trade-offs between security, speed, concurrency, encoded data size, usability, etc. -
Highlights

+

🔎  Highlights

__🚀  Speed__ @@ -38,7 +39,7 @@ Codec passed multiple confidential security assessments in 2022. No vulnerabili __🗜️  Data Size__ -Struct tags (`toarray`, `keyasint`, `omitempty`) automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit. +Struct tag options (`toarray`, `keyasint`, `omitempty`, `omitzero`) and field tag "-" automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit. __:jigsaw:  Usability__ @@ -58,164 +59,205 @@ Features include CBOR [extension points](https://www.rfc-editor.org/rfc/rfc8949. `fxamacker/cbor` has configurable limits, etc. that defend against malicious CBOR data. -By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security). - -

Example decoding with encoding/gob 💥 fatal error (out of memory)

- -```Go -// Example of encoding/gob having "fatal error: runtime: out of memory" -// while decoding 181 bytes. -package main -import ( - "bytes" - "encoding/gob" - "encoding/hex" - "fmt" -) - -// Example data is from https://github.com/golang/go/issues/24446 -// (shortened to 181 bytes). -const data = "4dffb503010102303001ff30000109010130010800010130010800010130" + - "01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" + - "860001013001ff860001013001ffb80000001eff850401010e3030303030" + - "30303030303030303001ff3000010c0104000016ffb70201010830303030" + - "3030303001ff3000010c000030ffb6040405fcff00303030303030303030" + - "303030303030303030303030303030303030303030303030303030303030" + - "30" - -type X struct { - J *X - K map[string]int -} - -func main() { - raw, _ := hex.DecodeString(data) - decoder := gob.NewDecoder(bytes.NewReader(raw)) - - var x X - decoder.Decode(&x) // fatal error: runtime: out of memory - fmt.Println("Decoding finished.") -} -``` - -


- -
- -`fxamacker/cbor` is fast at rejecting malformed CBOR data. E.g. attempts to -decode 10 bytes of malicious CBOR data to `[]byte` (with default settings): - -| Codec | Speed (ns/op) | Memory | Allocs | -| :---- | ------------: | -----: | -----: | -| fxamacker/cbor 2.5.0 | 44 ± 5% | 32 B/op | 2 allocs/op | -| ugorji/go 1.2.11 | 5353261 ± 4% | 67111321 B/op | 13 allocs/op | - -
Benchmark details

- -Latest comparison used: -- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` -- go1.19.10, linux/amd64, i5-13600K (disabled all e-cores, DDR4 @2933) -- go test -bench=. -benchmem -count=20 - -#### Prior comparisons - -| Codec | Speed (ns/op) | Memory | Allocs | -| :---- | ------------: | -----: | -----: | -| fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op | -| fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op | 2 allocs/op | -| ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op | 12 allocs/op | -| ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate | - -- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` -- go1.19.6, linux/amd64, i5-13600K (DDR4) -- go test -bench=. -benchmem -count=20 - -


- -
- -### Smaller Encodings with Struct Tags - -Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs. - -
Example encoding 3-level nested Go struct to 1 byte CBOR

- -https://go.dev/play/p/YxwvfPdFQG2 - -```Go -// Example encoding nested struct (with omitempty tag) -// - encoding/json: 18 byte JSON -// - fxamacker/cbor: 1 byte CBOR -package main - -import ( - "encoding/hex" - "encoding/json" - "fmt" - - "github.com/fxamacker/cbor/v2" -) - -type GrandChild struct { - Quux int `json:",omitempty"` -} - -type Child struct { - Baz int `json:",omitempty"` - Qux GrandChild `json:",omitempty"` -} - -type Parent struct { - Foo Child `json:",omitempty"` - Bar int `json:",omitempty"` -} - -func cb() { - results, _ := cbor.Marshal(Parent{}) - fmt.Println("hex(CBOR): " + hex.EncodeToString(results)) - - text, _ := cbor.Diagnose(results) // Diagnostic Notation - fmt.Println("DN: " + text) -} - -func js() { - results, _ := json.Marshal(Parent{}) - fmt.Println("hex(JSON): " + hex.EncodeToString(results)) - - text := string(results) // JSON - fmt.Println("JSON: " + text) -} - -func main() { - cb() - fmt.Println("-------------") - js() -} -``` - -Output (DN is Diagnostic Notation): -``` -hex(CBOR): a0 -DN: {} -------------- -hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d -JSON: {"Foo":{"Qux":{}}} -``` - -


- -
- -Example using different struct tags together: +Notably, `fxamacker/cbor` is fast at rejecting malformed CBOR data. + +> [!NOTE] +> Benchmarks rejecting 10 bytes of malicious CBOR data decoding to `[]byte`: +> +> | Codec | Speed (ns/op) | Memory | Allocs | +> | :---- | ------------: | -----: | -----: | +> | fxamacker/cbor 2.7.0 | 47 ± 7% | 32 B/op | 2 allocs/op | +> | ugorji/go 1.2.12 | 5878187 ± 3% | 67111556 B/op | 13 allocs/op | +> +> Faster hardware (overclocked DDR4 or DDR5) can reduce speed difference. +> +>
🔎  Benchmark details

+> +> Latest comparison for decoding CBOR data to Go `[]byte`: +> - Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` +> - go1.22.7, linux/amd64, i5-13600K (DDR4-2933, disabled e-cores) +> - go test -bench=. -benchmem -count=20 +> +> #### Prior comparisons +> +> | Codec | Speed (ns/op) | Memory | Allocs | +> | :---- | ------------: | -----: | -----: | +> | fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op | +> | fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op | 2 allocs/op | +> | ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op | 12 allocs/op | +> | ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate | +> +> - Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` +> - go1.19.6, linux/amd64, i5-13600K (DDR4) +> - go test -bench=. -benchmem -count=20 +> +>

+ +In contrast, some codecs can crash or use excessive resources while decoding bad data. + +> [!WARNING] +> Go's `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security). +> +>
🔎  gob fatal error (out of memory) 💥 decoding 181 bytes

+> +> ```Go +> // Example of encoding/gob having "fatal error: runtime: out of memory" +> // while decoding 181 bytes (all Go versions as of Dec. 8, 2024). +> package main +> import ( +> "bytes" +> "encoding/gob" +> "encoding/hex" +> "fmt" +> ) +> +> // Example data is from https://github.com/golang/go/issues/24446 +> // (shortened to 181 bytes). +> const data = "4dffb503010102303001ff30000109010130010800010130010800010130" + +> "01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" + +> "860001013001ff860001013001ffb80000001eff850401010e3030303030" + +> "30303030303030303001ff3000010c0104000016ffb70201010830303030" + +> "3030303001ff3000010c000030ffb6040405fcff00303030303030303030" + +> "303030303030303030303030303030303030303030303030303030303030" + +> "30" +> +> type X struct { +> J *X +> K map[string]int +> } +> +> func main() { +> raw, _ := hex.DecodeString(data) +> decoder := gob.NewDecoder(bytes.NewReader(raw)) +> +> var x X +> decoder.Decode(&x) // fatal error: runtime: out of memory +> fmt.Println("Decoding finished.") +> } +> ``` +> +> +>

+ +### Smaller Encodings with Struct Tag Options + +Struct tags automatically reduce encoded size of structs and improve speed. + +We can write less code by using struct tag options: +- `toarray`: encode without field names (decode back to original struct) +- `keyasint`: encode field names as integers (decode back to original struct) +- `omitempty`: omit empty field when encoding +- `omitzero`: omit zero-value field when encoding + +As a special case, struct field tag "-" omits the field. + +NOTE: When a struct uses `toarray`, the encoder will ignore `omitempty` and `omitzero` to prevent position of encoded array elements from changing. This allows decoder to match encoded elements to their Go struct field. ![alt text](https://github.com/fxamacker/images/raw/master/cbor/v2.3.0/cbor_struct_tags_api.svg?sanitize=1 "CBOR API and Go Struct Tags") -API is mostly same as `encoding/json`, plus interfaces that simplify concurrency for CBOR options. +> [!NOTE] +> `fxamacker/cbor` can encode a 3-level nested Go struct to 1 byte! +> - `encoding/json`: 18 bytes of JSON +> - `fxamacker/cbor`: 1 byte of CBOR +> +>
🔎  Encoding 3-level nested Go struct with omitempty

+> +> https://go.dev/play/p/YxwvfPdFQG2 +> +> ```Go +> // Example encoding nested struct (with omitempty tag) +> // - encoding/json: 18 byte JSON +> // - fxamacker/cbor: 1 byte CBOR +> +> package main +> +> import ( +> "encoding/hex" +> "encoding/json" +> "fmt" +> +> "github.com/fxamacker/cbor/v2" +> ) +> +> type GrandChild struct { +> Quux int `json:",omitempty"` +> } +> +> type Child struct { +> Baz int `json:",omitempty"` +> Qux GrandChild `json:",omitempty"` +> } +> +> type Parent struct { +> Foo Child `json:",omitempty"` +> Bar int `json:",omitempty"` +> } +> +> func cb() { +> results, _ := cbor.Marshal(Parent{}) +> fmt.Println("hex(CBOR): " + hex.EncodeToString(results)) +> +> text, _ := cbor.Diagnose(results) // Diagnostic Notation +> fmt.Println("DN: " + text) +> } +> +> func js() { +> results, _ := json.Marshal(Parent{}) +> fmt.Println("hex(JSON): " + hex.EncodeToString(results)) +> +> text := string(results) // JSON +> fmt.Println("JSON: " + text) +> } +> +> func main() { +> cb() +> fmt.Println("-------------") +> js() +> } +> ``` +> +> Output (DN is Diagnostic Notation): +> ``` +> hex(CBOR): a0 +> DN: {} +> ------------- +> hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d +> JSON: {"Foo":{"Qux":{}}} +> ``` +> +>

+ ## Quick Start __Install__: `go get github.com/fxamacker/cbor/v2` and `import "github.com/fxamacker/cbor/v2"`. +> [!TIP] +> +> Tinygo users can try beta/experimental branch [feature/cbor-tinygo-beta](https://github.com/fxamacker/cbor/tree/feature/cbor-tinygo-beta). +> +>
🔎  More about tinygo feature branch +> +> ### Tinygo +> +> Branch [feature/cbor-tinygo-beta](https://github.com/fxamacker/cbor/tree/feature/cbor-tinygo-beta) is based on fxamacker/cbor v2.7.0 and it can be compiled using tinygo v0.33 (also compiles with golang/go). +> +> It passes unit tests (with both go1.22 and tinygo v0.33) and is considered beta/experimental for tinygo. +> +> :warning: The `feature/cbor-tinygo-beta` branch does not get fuzz tested yet. +> +> Changes in this feature branch only affect tinygo compiled software. Summary of changes: +> - default `DecOptions.MaxNestedLevels` is reduced to 16 (was 32). User can specify higher limit but 24+ crashes tests when compiled with tinygo v0.33. +> - disabled decoding CBOR tag data to Go interface because tinygo v0.33 is missing needed feature. +> - encoding error message can be different when encoding function type. +> +> Related tinygo issues: +> - https://github.com/tinygo-org/tinygo/issues/4277 +> - https://github.com/tinygo-org/tinygo/issues/4458 +> +>
+ + ### Key Points This library can encode and decode CBOR (RFC 8949) and CBOR Sequences (RFC 8742). @@ -252,16 +294,17 @@ rest, err = cbor.UnmarshalFirst(b, &v) // decode []byte b to v // DiagnoseFirst translates first CBOR data item to text and returns remaining bytes. text, rest, err = cbor.DiagnoseFirst(b) // decode []byte b to Diagnostic Notation text -// NOTE: Unmarshal returns ExtraneousDataError if there are remaining bytes, -// but new funcs UnmarshalFirst and DiagnoseFirst do not. +// NOTE: Unmarshal() returns ExtraneousDataError if there are remaining bytes, but +// UnmarshalFirst() and DiagnoseFirst() allow trailing bytes. ``` -__IMPORTANT__: 👉 CBOR settings allow trade-offs between speed, security, encoding size, etc. - -- Different CBOR libraries may use different default settings. -- CBOR-based formats or protocols usually require specific settings. - -For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset. +> [!IMPORTANT] +> CBOR settings allow trade-offs between speed, security, encoding size, etc. +> +> - Different CBOR libraries may use different default settings. +> - CBOR-based formats or protocols usually require specific settings. +> +> For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset. ### Presets @@ -312,9 +355,63 @@ err = em.MarshalToBuffer(v, &buf) // encode v to provided buf ### Struct Tags -Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs. +Struct tag options (`toarray`, `keyasint`, `omitempty`, `omitzero`) reduce encoded size of structs. + +As a special case, struct field tag "-" omits the field. + +
🔎  Example encoding with struct field tag "-"

+ +https://go.dev/play/p/aWEIFxd7InX + +```Go +// https://github.com/fxamacker/cbor/issues/652 +package main + +import ( + "encoding/json" + "fmt" + + "github.com/fxamacker/cbor/v2" +) + +// The `cbor:"-"` tag omits the Type field when encoding to CBOR. +type Entity struct { + _ struct{} `cbor:",toarray"` + ID uint64 `json:"id"` + Type string `cbor:"-" json:"typeOf"` + Name string `json:"name"` +} + +func main() { + entity := Entity{ + ID: 1, + Type: "int64", + Name: "Identifier", + } + + c, _ := cbor.Marshal(entity) + diag, _ := cbor.Diagnose(c) + fmt.Printf("CBOR in hex: %x\n", c) + fmt.Printf("CBOR in edn: %s\n", diag) + + j, _ := json.Marshal(entity) + fmt.Printf("JSON: %s\n", string(j)) + + fmt.Printf("JSON encoding is %d bytes\n", len(j)) + fmt.Printf("CBOR encoding is %d bytes\n", len(c)) + + // Output: + // CBOR in hex: 82016a4964656e746966696572 + // CBOR in edn: [1, "Identifier"] + // JSON: {"id":1,"typeOf":"int64","name":"Identifier"} + // JSON encoding is 45 bytes + // CBOR encoding is 13 bytes +} +``` + +

-
Example encoding 3-level nested Go struct to 1 byte CBOR

+

🔎  Example encoding 3-level nested Go struct to 1 byte CBOR

https://go.dev/play/p/YxwvfPdFQG2 @@ -382,13 +479,13 @@ JSON: {"Foo":{"Qux":{}}}

-
Example using several struct tags

+

🔎  Example using struct tag options

![alt text](https://github.com/fxamacker/images/raw/master/cbor/v2.3.0/cbor_struct_tags_api.svg?sanitize=1 "CBOR API and Go Struct Tags")

-Struct tags simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys. +Struct tag options simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys. ### CBOR Tags @@ -404,7 +501,7 @@ em, err := opts.EncModeWithSharedTags(ts) // mutable shared CBOR tags `TagSet` and modes using it are safe for concurrent use. Equivalent API is available for `DecMode`. -
Example using TagSet and TagOptions

+

🔎  Example using TagSet and TagOptions

```go // Use signedCWT struct defined in "Decoding CWT" example. @@ -430,16 +527,149 @@ if err := dm.Unmarshal(data, &v); err != nil { em, _ := cbor.EncOptions{}.EncModeWithTags(tags) // Marshal signedCWT with tag number. -if data, err := cbor.Marshal(v); err != nil { +if data, err := em.Marshal(v); err != nil { return err } ```

+👉 `fxamacker/cbor` allows user apps to use almost any current or future CBOR tag number by implementing `cbor.Marshaler` and `cbor.Unmarshaler` interfaces. + +Basically, `MarshalCBOR` and `UnmarshalCBOR` functions can be implemented by user apps and those functions will automatically be called by this CBOR codec's `Marshal`, `Unmarshal`, etc. + +The following [example](https://github.com/fxamacker/cbor/blob/master/example_embedded_json_tag_for_cbor_test.go) shows how to encode and decode a tagged CBOR data item with tag number 262. The tag content is a JSON object "embedded" as a CBOR byte string (major type 2). + +
🔎  Example using Embedded JSON Tag for CBOR (tag 262) + +```go +// https://github.com/fxamacker/cbor/issues/657 + +package cbor_test + +// NOTE: RFC 8949 does not mention tag number 262. IANA assigned +// CBOR tag number 262 as "Embedded JSON Object" specified by the +// document Embedded JSON Tag for CBOR: +// +// "Tag 262 can be applied to a byte string (major type 2) to indicate +// that the byte string is a JSON Object. The length of the byte string +// indicates the content." +// +// For more info, see Embedded JSON Tag for CBOR at: +// https://github.com/toravir/CBOR-Tag-Specs/blob/master/embeddedJSON.md + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/fxamacker/cbor/v2" +) + +// cborTagNumForEmbeddedJSON is the CBOR tag number 262. +const cborTagNumForEmbeddedJSON = 262 + +// EmbeddedJSON represents a Go value to be encoded as a tagged CBOR data item +// with tag number 262 and the tag content is a JSON object "embedded" as a +// CBOR byte string (major type 2). +type EmbeddedJSON struct { + any +} + +func NewEmbeddedJSON(val any) EmbeddedJSON { + return EmbeddedJSON{val} +} + +// MarshalCBOR encodes EmbeddedJSON to a tagged CBOR data item with the +// tag number 262 and the tag content is a JSON object that is +// "embedded" as a CBOR byte string. +func (v EmbeddedJSON) MarshalCBOR() ([]byte, error) { + // Encode v to JSON object. + data, err := json.Marshal(v) + if err != nil { + return nil, err + } + + // Create cbor.Tag representing a tagged CBOR data item. + tag := cbor.Tag{ + Number: cborTagNumForEmbeddedJSON, + Content: data, + } + + // Marshal to a tagged CBOR data item. + return cbor.Marshal(tag) +} + +// UnmarshalCBOR decodes a tagged CBOR data item to EmbeddedJSON. +// The byte slice provided to this function must contain a single +// tagged CBOR data item with the tag number 262 and tag content +// must be a JSON object "embedded" as a CBOR byte string. +func (v *EmbeddedJSON) UnmarshalCBOR(b []byte) error { + // Unmarshal tagged CBOR data item. + var tag cbor.Tag + if err := cbor.Unmarshal(b, &tag); err != nil { + return err + } + + // Check tag number. + if tag.Number != cborTagNumForEmbeddedJSON { + return fmt.Errorf("got tag number %d, expect tag number %d", tag.Number, cborTagNumForEmbeddedJSON) + } + + // Check tag content. + jsonData, isByteString := tag.Content.([]byte) + if !isByteString { + return fmt.Errorf("got tag content type %T, expect tag content []byte", tag.Content) + } + + // Unmarshal JSON object. + return json.Unmarshal(jsonData, v) +} + +// MarshalJSON encodes EmbeddedJSON to a JSON object. +func (v EmbeddedJSON) MarshalJSON() ([]byte, error) { + return json.Marshal(v.any) +} + +// UnmarshalJSON decodes a JSON object. +func (v *EmbeddedJSON) UnmarshalJSON(b []byte) error { + dec := json.NewDecoder(bytes.NewReader(b)) + dec.UseNumber() + return dec.Decode(&v.any) +} + +func Example_embeddedJSONTagForCBOR() { + value := NewEmbeddedJSON(map[string]any{ + "name": "gopher", + "id": json.Number("42"), + }) + + data, err := cbor.Marshal(value) + if err != nil { + panic(err) + } + + fmt.Printf("cbor: %x\n", data) + + var v EmbeddedJSON + err = cbor.Unmarshal(data, &v) + if err != nil { + panic(err) + } + + fmt.Printf("%+v\n", v.any) + for k, v := range v.any.(map[string]any) { + fmt.Printf(" %s: %v (%T)\n", k, v, v) + } +} +``` + +
+ + ### Functions and Interfaces -
Functions and interfaces at a glance

+

🔎  Functions and interfaces at a glance

Common functions with same API as `encoding/json`: - `Marshal`, `Unmarshal` @@ -453,7 +683,7 @@ because RFC 8949 treats CBOR data item with remaining bytes as malformed. Other useful functions: - `Diagnose`, `DiagnoseFirst` produce human-readable [Extended Diagnostic Notation](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G) from CBOR data. - `UnmarshalFirst` decodes first CBOR data item and return any remaining bytes. -- `Wellformed` returns true if the the CBOR data item is well-formed. +- `Wellformed` returns true if the CBOR data item is well-formed. Interfaces identical or comparable to Go `encoding` packages include: `Marshaler`, `Unmarshaler`, `BinaryMarshaler`, and `BinaryUnmarshaler`. @@ -472,15 +702,28 @@ Default limits may need to be increased for systems handling very large data (e. ## Status -v2.7.0 (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality. +[v2.9.0](https://github.com/fxamacker/cbor/releases/tag/v2.9.0) (Jul 13, 2025) improved interoperability/transcoding between CBOR & JSON, refactored tests, and improved docs. +- Add opt-in support for `encoding.TextMarshaler` and `encoding.TextUnmarshaler` to encode and decode from CBOR text string. +- Add opt-in support for `json.Marshaler` and `json.Unmarshaler` via user-provided transcoding function. +- Update docs for TimeMode, Tag, RawTag, and add example for Embedded JSON Tag for CBOR. + +v2.9.0 passed fuzz tests and is production quality. + +The minimum version of Go required to build: +- v2.8.0 and newer releases require go 1.20+. +- v2.7.1 and older releases require go 1.17+. For more details, see [release notes](https://github.com/fxamacker/cbor/releases). -### Prior Release +### Prior Releases + +[v2.8.0](https://github.com/fxamacker/cbor/releases/tag/v2.8.0) (March 30, 2025) is a small release primarily to add `omitzero` option to struct field tags and fix bugs. It passed fuzz tests (billions of executions) and is production quality. + +[v2.7.0](https://github.com/fxamacker/cbor/releases/tag/v2.7.0) (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality. [v2.6.0](https://github.com/fxamacker/cbor/releases/tag/v2.6.0) (February 2024) adds important new features, optimizations, and bug fixes. It is especially useful to systems that need to convert data between CBOR and JSON. New options and optimizations improve handling of bignum, integers, maps, and strings. -v2.5.0 was released on Sunday, August 13, 2023 with new features and important bug fixes. It is fuzz tested and production quality after extended beta [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023). +[v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) was released on Sunday, August 13, 2023 with new features and important bug fixes. It is fuzz tested and production quality after extended beta [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023). __IMPORTANT__: 👉 Before upgrading from v2.4 or older release, please read the notable changes highlighted in the release notes. v2.5.0 is a large release with bug fixes to error handling for extraneous data in `Unmarshal`, etc. that should be reviewed before upgrading. @@ -489,7 +732,7 @@ See [v2.5.0 release notes](https://github.com/fxamacker/cbor/releases/tag/v2.5.0 See ["Version and API Changes"](https://github.com/fxamacker/cbor#versions-and-api-changes) section for more info about version numbering, etc. |" + processingInstruction = "[<][?].*?[?][>]" + singleQuotedValue = "'[^']*'" + tagName = "[A-Za-z][A-Za-z0-9-]*" + unquotedValue = "[^\"'=<>`\\x00-\\x20]+" +) + +// HTMLRendererParameters is a collection of supplementary parameters tweaking +// the behavior of various parts of HTML renderer. +type HTMLRendererParameters struct { + // Prepend this text to each relative URL. + AbsolutePrefix string + // Add this text to each footnote anchor, to ensure uniqueness. + FootnoteAnchorPrefix string + // Show this text inside the tag for a footnote return link, if the + // HTML_FOOTNOTE_RETURN_LINKS flag is enabled. If blank, the string + // [return] is used. + FootnoteReturnLinkContents string + // If set, add this text to the front of each Heading ID, to ensure + // uniqueness. + HeadingIDPrefix string + // If set, add this text to the back of each Heading ID, to ensure uniqueness. + HeadingIDSuffix string + // Increase heading levels: if the offset is 1,

becomes

etc. + // Negative offset is also valid. + // Resulting levels are clipped between 1 and 6. + HeadingLevelOffset int + + Title string // Document title (used if CompletePage is set) + CSS string // Optional CSS file URL (used if CompletePage is set) + Icon string // Optional icon file URL (used if CompletePage is set) + + Flags HTMLFlags // Flags allow customizing this renderer's behavior +} + +// HTMLRenderer is a type that implements the Renderer interface for HTML output. +// +// Do not create this directly, instead use the NewHTMLRenderer function. +type HTMLRenderer struct { + HTMLRendererParameters + + closeTag string // how to end singleton tags: either " />" or ">" + + // Track heading IDs to prevent ID collision in a single generation. + headingIDs map[string]int + + lastOutputLen int + disableTags int + + sr *SPRenderer +} + +const ( + xhtmlClose = " />" + htmlClose = ">" +) + +// NewHTMLRenderer creates and configures an HTMLRenderer object, which +// satisfies the Renderer interface. +func NewHTMLRenderer(params HTMLRendererParameters) *HTMLRenderer { + // configure the rendering engine + closeTag := htmlClose + if params.Flags&UseXHTML != 0 { + closeTag = xhtmlClose + } + + if params.FootnoteReturnLinkContents == "" { + // U+FE0E is VARIATION SELECTOR-15. + // It suppresses automatic emoji presentation of the preceding + // U+21A9 LEFTWARDS ARROW WITH HOOK on iOS and iPadOS. + params.FootnoteReturnLinkContents = "↩\ufe0e" + } + + return &HTMLRenderer{ + HTMLRendererParameters: params, + + closeTag: closeTag, + headingIDs: make(map[string]int), + + sr: NewSmartypantsRenderer(params.Flags), + } +} + +func isHTMLTag(tag []byte, tagname string) bool { + found, _ := findHTMLTagPos(tag, tagname) + return found +} + +// Look for a character, but ignore it when it's in any kind of quotes, it +// might be JavaScript +func skipUntilCharIgnoreQuotes(html []byte, start int, char byte) int { + inSingleQuote := false + inDoubleQuote := false + inGraveQuote := false + i := start + for i < len(html) { + switch { + case html[i] == char && !inSingleQuote && !inDoubleQuote && !inGraveQuote: + return i + case html[i] == '\'': + inSingleQuote = !inSingleQuote + case html[i] == '"': + inDoubleQuote = !inDoubleQuote + case html[i] == '`': + inGraveQuote = !inGraveQuote + } + i++ + } + return start +} + +func findHTMLTagPos(tag []byte, tagname string) (bool, int) { + i := 0 + if i < len(tag) && tag[0] != '<' { + return false, -1 + } + i++ + i = skipSpace(tag, i) + + if i < len(tag) && tag[i] == '/' { + i++ + } + + i = skipSpace(tag, i) + j := 0 + for ; i < len(tag); i, j = i+1, j+1 { + if j >= len(tagname) { + break + } + + if strings.ToLower(string(tag[i]))[0] != tagname[j] { + return false, -1 + } + } + + if i == len(tag) { + return false, -1 + } + + rightAngle := skipUntilCharIgnoreQuotes(tag, i, '>') + if rightAngle >= i { + return true, rightAngle + } + + return false, -1 +} + +func skipSpace(tag []byte, i int) int { + for i < len(tag) && isspace(tag[i]) { + i++ + } + return i +} + +func isRelativeLink(link []byte) (yes bool) { + // a tag begin with '#' + if link[0] == '#' { + return true + } + + // link begin with '/' but not '//', the second maybe a protocol relative link + if len(link) >= 2 && link[0] == '/' && link[1] != '/' { + return true + } + + // only the root '/' + if len(link) == 1 && link[0] == '/' { + return true + } + + // current directory : begin with "./" + if bytes.HasPrefix(link, []byte("./")) { + return true + } + + // parent directory : begin with "../" + if bytes.HasPrefix(link, []byte("../")) { + return true + } + + return false +} + +func (r *HTMLRenderer) ensureUniqueHeadingID(id string) string { + for count, found := r.headingIDs[id]; found; count, found = r.headingIDs[id] { + tmp := fmt.Sprintf("%s-%d", id, count+1) + + if _, tmpFound := r.headingIDs[tmp]; !tmpFound { + r.headingIDs[id] = count + 1 + id = tmp + } else { + id = id + "-1" + } + } + + if _, found := r.headingIDs[id]; !found { + r.headingIDs[id] = 0 + } + + return id +} + +func (r *HTMLRenderer) addAbsPrefix(link []byte) []byte { + if r.AbsolutePrefix != "" && isRelativeLink(link) && link[0] != '.' { + newDest := r.AbsolutePrefix + if link[0] != '/' { + newDest += "/" + } + newDest += string(link) + return []byte(newDest) + } + return link +} + +func appendLinkAttrs(attrs []string, flags HTMLFlags, link []byte) []string { + if isRelativeLink(link) { + return attrs + } + val := []string{} + if flags&NofollowLinks != 0 { + val = append(val, "nofollow") + } + if flags&NoreferrerLinks != 0 { + val = append(val, "noreferrer") + } + if flags&NoopenerLinks != 0 { + val = append(val, "noopener") + } + if flags&HrefTargetBlank != 0 { + attrs = append(attrs, "target=\"_blank\"") + } + if len(val) == 0 { + return attrs + } + attr := fmt.Sprintf("rel=%q", strings.Join(val, " ")) + return append(attrs, attr) +} + +func isMailto(link []byte) bool { + return bytes.HasPrefix(link, []byte("mailto:")) +} + +func needSkipLink(flags HTMLFlags, dest []byte) bool { + if flags&SkipLinks != 0 { + return true + } + return flags&Safelink != 0 && !isSafeLink(dest) && !isMailto(dest) +} + +func isSmartypantable(node *Node) bool { + pt := node.Parent.Type + return pt != Link && pt != CodeBlock && pt != Code +} + +func appendLanguageAttr(attrs []string, info []byte) []string { + if len(info) == 0 { + return attrs + } + endOfLang := bytes.IndexAny(info, "\t ") + if endOfLang < 0 { + endOfLang = len(info) + } + return append(attrs, fmt.Sprintf("class=\"language-%s\"", info[:endOfLang])) +} + +func (r *HTMLRenderer) tag(w io.Writer, name []byte, attrs []string) { + w.Write(name) + if len(attrs) > 0 { + w.Write(spaceBytes) + w.Write([]byte(strings.Join(attrs, " "))) + } + w.Write(gtBytes) + r.lastOutputLen = 1 +} + +func footnoteRef(prefix string, node *Node) []byte { + urlFrag := prefix + string(slugify(node.Destination)) + anchor := fmt.Sprintf(`%d`, urlFrag, node.NoteID) + return []byte(fmt.Sprintf(`%s`, urlFrag, anchor)) +} + +func footnoteItem(prefix string, slug []byte) []byte { + return []byte(fmt.Sprintf(`
  • `, prefix, slug)) +} + +func footnoteReturnLink(prefix, returnLink string, slug []byte) []byte { + const format = ` %s` + return []byte(fmt.Sprintf(format, prefix, slug, returnLink)) +} + +func itemOpenCR(node *Node) bool { + if node.Prev == nil { + return false + } + ld := node.Parent.ListData + return !ld.Tight && ld.ListFlags&ListTypeDefinition == 0 +} + +func skipParagraphTags(node *Node) bool { + grandparent := node.Parent.Parent + if grandparent == nil || grandparent.Type != List { + return false + } + tightOrTerm := grandparent.Tight || node.Parent.ListFlags&ListTypeTerm != 0 + return grandparent.Type == List && tightOrTerm +} + +func cellAlignment(align CellAlignFlags) string { + switch align { + case TableAlignmentLeft: + return "left" + case TableAlignmentRight: + return "right" + case TableAlignmentCenter: + return "center" + default: + return "" + } +} + +func (r *HTMLRenderer) out(w io.Writer, text []byte) { + if r.disableTags > 0 { + w.Write(htmlTagRe.ReplaceAll(text, []byte{})) + } else { + w.Write(text) + } + r.lastOutputLen = len(text) +} + +func (r *HTMLRenderer) cr(w io.Writer) { + if r.lastOutputLen > 0 { + r.out(w, nlBytes) + } +} + +var ( + nlBytes = []byte{'\n'} + gtBytes = []byte{'>'} + spaceBytes = []byte{' '} +) + +var ( + brTag = []byte("
    ") + brXHTMLTag = []byte("
    ") + emTag = []byte("") + emCloseTag = []byte("") + strongTag = []byte("") + strongCloseTag = []byte("") + delTag = []byte("") + delCloseTag = []byte("") + ttTag = []byte("") + ttCloseTag = []byte("") + aTag = []byte("") + preTag = []byte("
    ")
    +	preCloseTag        = []byte("
    ") + codeTag = []byte("") + codeCloseTag = []byte("") + pTag = []byte("

    ") + pCloseTag = []byte("

    ") + blockquoteTag = []byte("
    ") + blockquoteCloseTag = []byte("
    ") + hrTag = []byte("
    ") + hrXHTMLTag = []byte("
    ") + ulTag = []byte("
      ") + ulCloseTag = []byte("
    ") + olTag = []byte("
      ") + olCloseTag = []byte("
    ") + dlTag = []byte("
    ") + dlCloseTag = []byte("
    ") + liTag = []byte("
  • ") + liCloseTag = []byte("
  • ") + ddTag = []byte("
    ") + ddCloseTag = []byte("
    ") + dtTag = []byte("
    ") + dtCloseTag = []byte("
    ") + tableTag = []byte("") + tableCloseTag = []byte("
    ") + tdTag = []byte("") + thTag = []byte("") + theadTag = []byte("") + theadCloseTag = []byte("") + tbodyTag = []byte("") + tbodyCloseTag = []byte("") + trTag = []byte("") + trCloseTag = []byte("") + h1Tag = []byte("") + h2Tag = []byte("") + h3Tag = []byte("") + h4Tag = []byte("") + h5Tag = []byte("") + h6Tag = []byte("") + + footnotesDivBytes = []byte("\n
    \n\n") + footnotesCloseDivBytes = []byte("\n
    \n") +) + +func headingTagsFromLevel(level int) ([]byte, []byte) { + if level <= 1 { + return h1Tag, h1CloseTag + } + switch level { + case 2: + return h2Tag, h2CloseTag + case 3: + return h3Tag, h3CloseTag + case 4: + return h4Tag, h4CloseTag + case 5: + return h5Tag, h5CloseTag + } + return h6Tag, h6CloseTag +} + +func (r *HTMLRenderer) outHRTag(w io.Writer) { + if r.Flags&UseXHTML == 0 { + r.out(w, hrTag) + } else { + r.out(w, hrXHTMLTag) + } +} + +// RenderNode is a default renderer of a single node of a syntax tree. For +// block nodes it will be called twice: first time with entering=true, second +// time with entering=false, so that it could know when it's working on an open +// tag and when on close. It writes the result to w. +// +// The return value is a way to tell the calling walker to adjust its walk +// pattern: e.g. it can terminate the traversal by returning Terminate. Or it +// can ask the walker to skip a subtree of this node by returning SkipChildren. +// The typical behavior is to return GoToNext, which asks for the usual +// traversal to the next node. +func (r *HTMLRenderer) RenderNode(w io.Writer, node *Node, entering bool) WalkStatus { + attrs := []string{} + switch node.Type { + case Text: + if r.Flags&Smartypants != 0 { + var tmp bytes.Buffer + escapeHTML(&tmp, node.Literal) + r.sr.Process(w, tmp.Bytes()) + } else { + if node.Parent.Type == Link { + escLink(w, node.Literal) + } else { + escapeHTML(w, node.Literal) + } + } + case Softbreak: + r.cr(w) + // TODO: make it configurable via out(renderer.softbreak) + case Hardbreak: + if r.Flags&UseXHTML == 0 { + r.out(w, brTag) + } else { + r.out(w, brXHTMLTag) + } + r.cr(w) + case Emph: + if entering { + r.out(w, emTag) + } else { + r.out(w, emCloseTag) + } + case Strong: + if entering { + r.out(w, strongTag) + } else { + r.out(w, strongCloseTag) + } + case Del: + if entering { + r.out(w, delTag) + } else { + r.out(w, delCloseTag) + } + case HTMLSpan: + if r.Flags&SkipHTML != 0 { + break + } + r.out(w, node.Literal) + case Link: + // mark it but don't link it if it is not a safe link: no smartypants + dest := node.LinkData.Destination + if needSkipLink(r.Flags, dest) { + if entering { + r.out(w, ttTag) + } else { + r.out(w, ttCloseTag) + } + } else { + if entering { + dest = r.addAbsPrefix(dest) + var hrefBuf bytes.Buffer + hrefBuf.WriteString("href=\"") + escLink(&hrefBuf, dest) + hrefBuf.WriteByte('"') + attrs = append(attrs, hrefBuf.String()) + if node.NoteID != 0 { + r.out(w, footnoteRef(r.FootnoteAnchorPrefix, node)) + break + } + attrs = appendLinkAttrs(attrs, r.Flags, dest) + if len(node.LinkData.Title) > 0 { + var titleBuff bytes.Buffer + titleBuff.WriteString("title=\"") + escapeHTML(&titleBuff, node.LinkData.Title) + titleBuff.WriteByte('"') + attrs = append(attrs, titleBuff.String()) + } + r.tag(w, aTag, attrs) + } else { + if node.NoteID != 0 { + break + } + r.out(w, aCloseTag) + } + } + case Image: + if r.Flags&SkipImages != 0 { + return SkipChildren + } + if entering { + dest := node.LinkData.Destination + dest = r.addAbsPrefix(dest) + if r.disableTags == 0 { + //if options.safe && potentiallyUnsafe(dest) { + //out(w, ``)
+				//} else {
+				r.out(w, []byte(`<img src=`)) + } + } + case Code: + r.out(w, codeTag) + escapeAllHTML(w, node.Literal) + r.out(w, codeCloseTag) + case Document: + break + case Paragraph: + if skipParagraphTags(node) { + break + } + if entering { + // TODO: untangle this clusterfuck about when the newlines need + // to be added and when not. + if node.Prev != nil { + switch node.Prev.Type { + case HTMLBlock, List, Paragraph, Heading, CodeBlock, BlockQuote, HorizontalRule: + r.cr(w) + } + } + if node.Parent.Type == BlockQuote && node.Prev == nil { + r.cr(w) + } + r.out(w, pTag) + } else { + r.out(w, pCloseTag) + if !(node.Parent.Type == Item && node.Next == nil) { + r.cr(w) + } + } + case BlockQuote: + if entering { + r.cr(w) + r.out(w, blockquoteTag) + } else { + r.out(w, blockquoteCloseTag) + r.cr(w) + } + case HTMLBlock: + if r.Flags&SkipHTML != 0 { + break + } + r.cr(w) + r.out(w, node.Literal) + r.cr(w) + case Heading: + headingLevel := r.HTMLRendererParameters.HeadingLevelOffset + node.Level + openTag, closeTag := headingTagsFromLevel(headingLevel) + if entering { + if node.IsTitleblock { + attrs = append(attrs, `class="title"`) + } + if node.HeadingID != "" { + id := r.ensureUniqueHeadingID(node.HeadingID) + if r.HeadingIDPrefix != "" { + id = r.HeadingIDPrefix + id + } + if r.HeadingIDSuffix != "" { + id = id + r.HeadingIDSuffix + } + attrs = append(attrs, fmt.Sprintf(`id="%s"`, id)) + } + r.cr(w) + r.tag(w, openTag, attrs) + } else { + r.out(w, closeTag) + if !(node.Parent.Type == Item && node.Next == nil) { + r.cr(w) + } + } + case HorizontalRule: + r.cr(w) + r.outHRTag(w) + r.cr(w) + case List: + openTag := ulTag + closeTag := ulCloseTag + if node.ListFlags&ListTypeOrdered != 0 { + openTag = olTag + closeTag = olCloseTag + } + if node.ListFlags&ListTypeDefinition != 0 { + openTag = dlTag + closeTag = dlCloseTag + } + if entering { + if node.IsFootnotesList { + r.out(w, footnotesDivBytes) + r.outHRTag(w) + r.cr(w) + } + r.cr(w) + if node.Parent.Type == Item && node.Parent.Parent.Tight { + r.cr(w) + } + r.tag(w, openTag[:len(openTag)-1], attrs) + r.cr(w) + } else { + r.out(w, closeTag) + //cr(w) + //if node.parent.Type != Item { + // cr(w) + //} + if node.Parent.Type == Item && node.Next != nil { + r.cr(w) + } + if node.Parent.Type == Document || node.Parent.Type == BlockQuote { + r.cr(w) + } + if node.IsFootnotesList { + r.out(w, footnotesCloseDivBytes) + } + } + case Item: + openTag := liTag + closeTag := liCloseTag + if node.ListFlags&ListTypeDefinition != 0 { + openTag = ddTag + closeTag = ddCloseTag + } + if node.ListFlags&ListTypeTerm != 0 { + openTag = dtTag + closeTag = dtCloseTag + } + if entering { + if itemOpenCR(node) { + r.cr(w) + } + if node.ListData.RefLink != nil { + slug := slugify(node.ListData.RefLink) + r.out(w, footnoteItem(r.FootnoteAnchorPrefix, slug)) + break + } + r.out(w, openTag) + } else { + if node.ListData.RefLink != nil { + slug := slugify(node.ListData.RefLink) + if r.Flags&FootnoteReturnLinks != 0 { + r.out(w, footnoteReturnLink(r.FootnoteAnchorPrefix, r.FootnoteReturnLinkContents, slug)) + } + } + r.out(w, closeTag) + r.cr(w) + } + case CodeBlock: + attrs = appendLanguageAttr(attrs, node.Info) + r.cr(w) + r.out(w, preTag) + r.tag(w, codeTag[:len(codeTag)-1], attrs) + escapeAllHTML(w, node.Literal) + r.out(w, codeCloseTag) + r.out(w, preCloseTag) + if node.Parent.Type != Item { + r.cr(w) + } + case Table: + if entering { + r.cr(w) + r.out(w, tableTag) + } else { + r.out(w, tableCloseTag) + r.cr(w) + } + case TableCell: + openTag := tdTag + closeTag := tdCloseTag + if node.IsHeader { + openTag = thTag + closeTag = thCloseTag + } + if entering { + align := cellAlignment(node.Align) + if align != "" { + attrs = append(attrs, fmt.Sprintf(`align="%s"`, align)) + } + if node.Prev == nil { + r.cr(w) + } + r.tag(w, openTag, attrs) + } else { + r.out(w, closeTag) + r.cr(w) + } + case TableHead: + if entering { + r.cr(w) + r.out(w, theadTag) + } else { + r.out(w, theadCloseTag) + r.cr(w) + } + case TableBody: + if entering { + r.cr(w) + r.out(w, tbodyTag) + // XXX: this is to adhere to a rather silly test. Should fix test. + if node.FirstChild == nil { + r.cr(w) + } + } else { + r.out(w, tbodyCloseTag) + r.cr(w) + } + case TableRow: + if entering { + r.cr(w) + r.out(w, trTag) + } else { + r.out(w, trCloseTag) + r.cr(w) + } + default: + panic("Unknown node type " + node.Type.String()) + } + return GoToNext +} + +// RenderHeader writes HTML document preamble and TOC if requested. +func (r *HTMLRenderer) RenderHeader(w io.Writer, ast *Node) { + r.writeDocumentHeader(w) + if r.Flags&TOC != 0 { + r.writeTOC(w, ast) + } +} + +// RenderFooter writes HTML document footer. +func (r *HTMLRenderer) RenderFooter(w io.Writer, ast *Node) { + if r.Flags&CompletePage == 0 { + return + } + io.WriteString(w, "\n\n\n") +} + +func (r *HTMLRenderer) writeDocumentHeader(w io.Writer) { + if r.Flags&CompletePage == 0 { + return + } + ending := "" + if r.Flags&UseXHTML != 0 { + io.WriteString(w, "\n") + io.WriteString(w, "\n") + ending = " /" + } else { + io.WriteString(w, "\n") + io.WriteString(w, "\n") + } + io.WriteString(w, "\n") + io.WriteString(w, " ") + if r.Flags&Smartypants != 0 { + r.sr.Process(w, []byte(r.Title)) + } else { + escapeHTML(w, []byte(r.Title)) + } + io.WriteString(w, "\n") + io.WriteString(w, " \n") + io.WriteString(w, " \n") + if r.CSS != "" { + io.WriteString(w, " \n") + } + if r.Icon != "" { + io.WriteString(w, " \n") + } + io.WriteString(w, "\n") + io.WriteString(w, "\n\n") +} + +func (r *HTMLRenderer) writeTOC(w io.Writer, ast *Node) { + buf := bytes.Buffer{} + + inHeading := false + tocLevel := 0 + headingCount := 0 + + ast.Walk(func(node *Node, entering bool) WalkStatus { + if node.Type == Heading && !node.HeadingData.IsTitleblock { + inHeading = entering + if entering { + node.HeadingID = fmt.Sprintf("toc_%d", headingCount) + if node.Level == tocLevel { + buf.WriteString("\n\n
  • ") + } else if node.Level < tocLevel { + for node.Level < tocLevel { + tocLevel-- + buf.WriteString("
  • \n") + } + buf.WriteString("\n\n
  • ") + } else { + for node.Level > tocLevel { + tocLevel++ + buf.WriteString("\n") + } + + if buf.Len() > 0 { + io.WriteString(w, "\n") + } + r.lastOutputLen = buf.Len() +} diff --git a/vendor/github.com/russross/blackfriday/v2/inline.go b/vendor/github.com/russross/blackfriday/v2/inline.go new file mode 100644 index 000000000..d45bd9417 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/inline.go @@ -0,0 +1,1228 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// Functions to parse inline elements. +// + +package blackfriday + +import ( + "bytes" + "regexp" + "strconv" +) + +var ( + urlRe = `((https?|ftp):\/\/|\/)[-A-Za-z0-9+&@#\/%?=~_|!:,.;\(\)]+` + anchorRe = regexp.MustCompile(`^(]+")?\s?>` + urlRe + `<\/a>)`) + + // https://www.w3.org/TR/html5/syntax.html#character-references + // highest unicode code point in 17 planes (2^20): 1,114,112d = + // 7 dec digits or 6 hex digits + // named entity references can be 2-31 characters with stuff like < + // at one end and ∳ at the other. There + // are also sometimes numbers at the end, although this isn't inherent + // in the specification; there are never numbers anywhere else in + // current character references, though; see ¾ and ▒, etc. + // https://www.w3.org/TR/html5/syntax.html#named-character-references + // + // entity := "&" (named group | number ref) ";" + // named group := [a-zA-Z]{2,31}[0-9]{0,2} + // number ref := "#" (dec ref | hex ref) + // dec ref := [0-9]{1,7} + // hex ref := ("x" | "X") [0-9a-fA-F]{1,6} + htmlEntityRe = regexp.MustCompile(`&([a-zA-Z]{2,31}[0-9]{0,2}|#([0-9]{1,7}|[xX][0-9a-fA-F]{1,6}));`) +) + +// Functions to parse text within a block +// Each function returns the number of chars taken care of +// data is the complete block being rendered +// offset is the number of valid chars before the current cursor + +func (p *Markdown) inline(currBlock *Node, data []byte) { + // handlers might call us recursively: enforce a maximum depth + if p.nesting >= p.maxNesting || len(data) == 0 { + return + } + p.nesting++ + beg, end := 0, 0 + for end < len(data) { + handler := p.inlineCallback[data[end]] + if handler != nil { + if consumed, node := handler(p, data, end); consumed == 0 { + // No action from the callback. + end++ + } else { + // Copy inactive chars into the output. + currBlock.AppendChild(text(data[beg:end])) + if node != nil { + currBlock.AppendChild(node) + } + // Skip past whatever the callback used. + beg = end + consumed + end = beg + } + } else { + end++ + } + } + if beg < len(data) { + if data[end-1] == '\n' { + end-- + } + currBlock.AppendChild(text(data[beg:end])) + } + p.nesting-- +} + +// single and double emphasis parsing +func emphasis(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + c := data[0] + + if len(data) > 2 && data[1] != c { + // whitespace cannot follow an opening emphasis; + // strikethrough only takes two characters '~~' + if c == '~' || isspace(data[1]) { + return 0, nil + } + ret, node := helperEmphasis(p, data[1:], c) + if ret == 0 { + return 0, nil + } + + return ret + 1, node + } + + if len(data) > 3 && data[1] == c && data[2] != c { + if isspace(data[2]) { + return 0, nil + } + ret, node := helperDoubleEmphasis(p, data[2:], c) + if ret == 0 { + return 0, nil + } + + return ret + 2, node + } + + if len(data) > 4 && data[1] == c && data[2] == c && data[3] != c { + if c == '~' || isspace(data[3]) { + return 0, nil + } + ret, node := helperTripleEmphasis(p, data, 3, c) + if ret == 0 { + return 0, nil + } + + return ret + 3, node + } + + return 0, nil +} + +func codeSpan(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + + nb := 0 + + // count the number of backticks in the delimiter + for nb < len(data) && data[nb] == '`' { + nb++ + } + + // find the next delimiter + i, end := 0, 0 + for end = nb; end < len(data) && i < nb; end++ { + if data[end] == '`' { + i++ + } else { + i = 0 + } + } + + // no matching delimiter? + if i < nb && end >= len(data) { + return 0, nil + } + + // trim outside whitespace + fBegin := nb + for fBegin < end && data[fBegin] == ' ' { + fBegin++ + } + + fEnd := end - nb + for fEnd > fBegin && data[fEnd-1] == ' ' { + fEnd-- + } + + // render the code span + if fBegin != fEnd { + code := NewNode(Code) + code.Literal = data[fBegin:fEnd] + return end, code + } + + return end, nil +} + +// newline preceded by two spaces becomes
    +func maybeLineBreak(p *Markdown, data []byte, offset int) (int, *Node) { + origOffset := offset + for offset < len(data) && data[offset] == ' ' { + offset++ + } + + if offset < len(data) && data[offset] == '\n' { + if offset-origOffset >= 2 { + return offset - origOffset + 1, NewNode(Hardbreak) + } + return offset - origOffset, nil + } + return 0, nil +} + +// newline without two spaces works when HardLineBreak is enabled +func lineBreak(p *Markdown, data []byte, offset int) (int, *Node) { + if p.extensions&HardLineBreak != 0 { + return 1, NewNode(Hardbreak) + } + return 0, nil +} + +type linkType int + +const ( + linkNormal linkType = iota + linkImg + linkDeferredFootnote + linkInlineFootnote +) + +func isReferenceStyleLink(data []byte, pos int, t linkType) bool { + if t == linkDeferredFootnote { + return false + } + return pos < len(data)-1 && data[pos] == '[' && data[pos+1] != '^' +} + +func maybeImage(p *Markdown, data []byte, offset int) (int, *Node) { + if offset < len(data)-1 && data[offset+1] == '[' { + return link(p, data, offset) + } + return 0, nil +} + +func maybeInlineFootnote(p *Markdown, data []byte, offset int) (int, *Node) { + if offset < len(data)-1 && data[offset+1] == '[' { + return link(p, data, offset) + } + return 0, nil +} + +// '[': parse a link or an image or a footnote +func link(p *Markdown, data []byte, offset int) (int, *Node) { + // no links allowed inside regular links, footnote, and deferred footnotes + if p.insideLink && (offset > 0 && data[offset-1] == '[' || len(data)-1 > offset && data[offset+1] == '^') { + return 0, nil + } + + var t linkType + switch { + // special case: ![^text] == deferred footnote (that follows something with + // an exclamation point) + case p.extensions&Footnotes != 0 && len(data)-1 > offset && data[offset+1] == '^': + t = linkDeferredFootnote + // ![alt] == image + case offset >= 0 && data[offset] == '!': + t = linkImg + offset++ + // ^[text] == inline footnote + // [^refId] == deferred footnote + case p.extensions&Footnotes != 0: + if offset >= 0 && data[offset] == '^' { + t = linkInlineFootnote + offset++ + } else if len(data)-1 > offset && data[offset+1] == '^' { + t = linkDeferredFootnote + } + // [text] == regular link + default: + t = linkNormal + } + + data = data[offset:] + + var ( + i = 1 + noteID int + title, link, altContent []byte + textHasNl = false + ) + + if t == linkDeferredFootnote { + i++ + } + + // look for the matching closing bracket + for level := 1; level > 0 && i < len(data); i++ { + switch { + case data[i] == '\n': + textHasNl = true + + case isBackslashEscaped(data, i): + continue + + case data[i] == '[': + level++ + + case data[i] == ']': + level-- + if level <= 0 { + i-- // compensate for extra i++ in for loop + } + } + } + + if i >= len(data) { + return 0, nil + } + + txtE := i + i++ + var footnoteNode *Node + + // skip any amount of whitespace or newline + // (this is much more lax than original markdown syntax) + for i < len(data) && isspace(data[i]) { + i++ + } + + // inline style link + switch { + case i < len(data) && data[i] == '(': + // skip initial whitespace + i++ + + for i < len(data) && isspace(data[i]) { + i++ + } + + linkB := i + + // look for link end: ' " ) + findlinkend: + for i < len(data) { + switch { + case data[i] == '\\': + i += 2 + + case data[i] == ')' || data[i] == '\'' || data[i] == '"': + break findlinkend + + default: + i++ + } + } + + if i >= len(data) { + return 0, nil + } + linkE := i + + // look for title end if present + titleB, titleE := 0, 0 + if data[i] == '\'' || data[i] == '"' { + i++ + titleB = i + + findtitleend: + for i < len(data) { + switch { + case data[i] == '\\': + i += 2 + + case data[i] == ')': + break findtitleend + + default: + i++ + } + } + + if i >= len(data) { + return 0, nil + } + + // skip whitespace after title + titleE = i - 1 + for titleE > titleB && isspace(data[titleE]) { + titleE-- + } + + // check for closing quote presence + if data[titleE] != '\'' && data[titleE] != '"' { + titleB, titleE = 0, 0 + linkE = i + } + } + + // remove whitespace at the end of the link + for linkE > linkB && isspace(data[linkE-1]) { + linkE-- + } + + // remove optional angle brackets around the link + if data[linkB] == '<' { + linkB++ + } + if data[linkE-1] == '>' { + linkE-- + } + + // build escaped link and title + if linkE > linkB { + link = data[linkB:linkE] + } + + if titleE > titleB { + title = data[titleB:titleE] + } + + i++ + + // reference style link + case isReferenceStyleLink(data, i, t): + var id []byte + altContentConsidered := false + + // look for the id + i++ + linkB := i + for i < len(data) && data[i] != ']' { + i++ + } + if i >= len(data) { + return 0, nil + } + linkE := i + + // find the reference + if linkB == linkE { + if textHasNl { + var b bytes.Buffer + + for j := 1; j < txtE; j++ { + switch { + case data[j] != '\n': + b.WriteByte(data[j]) + case data[j-1] != ' ': + b.WriteByte(' ') + } + } + + id = b.Bytes() + } else { + id = data[1:txtE] + altContentConsidered = true + } + } else { + id = data[linkB:linkE] + } + + // find the reference with matching id + lr, ok := p.getRef(string(id)) + if !ok { + return 0, nil + } + + // keep link and title from reference + link = lr.link + title = lr.title + if altContentConsidered { + altContent = lr.text + } + i++ + + // shortcut reference style link or reference or inline footnote + default: + var id []byte + + // craft the id + if textHasNl { + var b bytes.Buffer + + for j := 1; j < txtE; j++ { + switch { + case data[j] != '\n': + b.WriteByte(data[j]) + case data[j-1] != ' ': + b.WriteByte(' ') + } + } + + id = b.Bytes() + } else { + if t == linkDeferredFootnote { + id = data[2:txtE] // get rid of the ^ + } else { + id = data[1:txtE] + } + } + + footnoteNode = NewNode(Item) + if t == linkInlineFootnote { + // create a new reference + noteID = len(p.notes) + 1 + + var fragment []byte + if len(id) > 0 { + if len(id) < 16 { + fragment = make([]byte, len(id)) + } else { + fragment = make([]byte, 16) + } + copy(fragment, slugify(id)) + } else { + fragment = append([]byte("footnote-"), []byte(strconv.Itoa(noteID))...) + } + + ref := &reference{ + noteID: noteID, + hasBlock: false, + link: fragment, + title: id, + footnote: footnoteNode, + } + + p.notes = append(p.notes, ref) + + link = ref.link + title = ref.title + } else { + // find the reference with matching id + lr, ok := p.getRef(string(id)) + if !ok { + return 0, nil + } + + if t == linkDeferredFootnote { + lr.noteID = len(p.notes) + 1 + lr.footnote = footnoteNode + p.notes = append(p.notes, lr) + } + + // keep link and title from reference + link = lr.link + // if inline footnote, title == footnote contents + title = lr.title + noteID = lr.noteID + } + + // rewind the whitespace + i = txtE + 1 + } + + var uLink []byte + if t == linkNormal || t == linkImg { + if len(link) > 0 { + var uLinkBuf bytes.Buffer + unescapeText(&uLinkBuf, link) + uLink = uLinkBuf.Bytes() + } + + // links need something to click on and somewhere to go + if len(uLink) == 0 || (t == linkNormal && txtE <= 1) { + return 0, nil + } + } + + // call the relevant rendering function + var linkNode *Node + switch t { + case linkNormal: + linkNode = NewNode(Link) + linkNode.Destination = normalizeURI(uLink) + linkNode.Title = title + if len(altContent) > 0 { + linkNode.AppendChild(text(altContent)) + } else { + // links cannot contain other links, so turn off link parsing + // temporarily and recurse + insideLink := p.insideLink + p.insideLink = true + p.inline(linkNode, data[1:txtE]) + p.insideLink = insideLink + } + + case linkImg: + linkNode = NewNode(Image) + linkNode.Destination = uLink + linkNode.Title = title + linkNode.AppendChild(text(data[1:txtE])) + i++ + + case linkInlineFootnote, linkDeferredFootnote: + linkNode = NewNode(Link) + linkNode.Destination = link + linkNode.Title = title + linkNode.NoteID = noteID + linkNode.Footnote = footnoteNode + if t == linkInlineFootnote { + i++ + } + + default: + return 0, nil + } + + return i, linkNode +} + +func (p *Markdown) inlineHTMLComment(data []byte) int { + if len(data) < 5 { + return 0 + } + if data[0] != '<' || data[1] != '!' || data[2] != '-' || data[3] != '-' { + return 0 + } + i := 5 + // scan for an end-of-comment marker, across lines if necessary + for i < len(data) && !(data[i-2] == '-' && data[i-1] == '-' && data[i] == '>') { + i++ + } + // no end-of-comment marker + if i >= len(data) { + return 0 + } + return i + 1 +} + +func stripMailto(link []byte) []byte { + if bytes.HasPrefix(link, []byte("mailto://")) { + return link[9:] + } else if bytes.HasPrefix(link, []byte("mailto:")) { + return link[7:] + } else { + return link + } +} + +// autolinkType specifies a kind of autolink that gets detected. +type autolinkType int + +// These are the possible flag values for the autolink renderer. +const ( + notAutolink autolinkType = iota + normalAutolink + emailAutolink +) + +// '<' when tags or autolinks are allowed +func leftAngle(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + altype, end := tagLength(data) + if size := p.inlineHTMLComment(data); size > 0 { + end = size + } + if end > 2 { + if altype != notAutolink { + var uLink bytes.Buffer + unescapeText(&uLink, data[1:end+1-2]) + if uLink.Len() > 0 { + link := uLink.Bytes() + node := NewNode(Link) + node.Destination = link + if altype == emailAutolink { + node.Destination = append([]byte("mailto:"), link...) + } + node.AppendChild(text(stripMailto(link))) + return end, node + } + } else { + htmlTag := NewNode(HTMLSpan) + htmlTag.Literal = data[:end] + return end, htmlTag + } + } + + return end, nil +} + +// '\\' backslash escape +var escapeChars = []byte("\\`*_{}[]()#+-.!:|&<>~") + +func escape(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + + if len(data) > 1 { + if p.extensions&BackslashLineBreak != 0 && data[1] == '\n' { + return 2, NewNode(Hardbreak) + } + if bytes.IndexByte(escapeChars, data[1]) < 0 { + return 0, nil + } + + return 2, text(data[1:2]) + } + + return 2, nil +} + +func unescapeText(ob *bytes.Buffer, src []byte) { + i := 0 + for i < len(src) { + org := i + for i < len(src) && src[i] != '\\' { + i++ + } + + if i > org { + ob.Write(src[org:i]) + } + + if i+1 >= len(src) { + break + } + + ob.WriteByte(src[i+1]) + i += 2 + } +} + +// '&' escaped when it doesn't belong to an entity +// valid entities are assumed to be anything matching &#?[A-Za-z0-9]+; +func entity(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + + end := 1 + + if end < len(data) && data[end] == '#' { + end++ + } + + for end < len(data) && isalnum(data[end]) { + end++ + } + + if end < len(data) && data[end] == ';' { + end++ // real entity + } else { + return 0, nil // lone '&' + } + + ent := data[:end] + // undo & escaping or it will be converted to &amp; by another + // escaper in the renderer + if bytes.Equal(ent, []byte("&")) { + ent = []byte{'&'} + } + + return end, text(ent) +} + +func linkEndsWithEntity(data []byte, linkEnd int) bool { + entityRanges := htmlEntityRe.FindAllIndex(data[:linkEnd], -1) + return entityRanges != nil && entityRanges[len(entityRanges)-1][1] == linkEnd +} + +// hasPrefixCaseInsensitive is a custom implementation of +// strings.HasPrefix(strings.ToLower(s), prefix) +// we rolled our own because ToLower pulls in a huge machinery of lowercasing +// anything from Unicode and that's very slow. Since this func will only be +// used on ASCII protocol prefixes, we can take shortcuts. +func hasPrefixCaseInsensitive(s, prefix []byte) bool { + if len(s) < len(prefix) { + return false + } + delta := byte('a' - 'A') + for i, b := range prefix { + if b != s[i] && b != s[i]+delta { + return false + } + } + return true +} + +var protocolPrefixes = [][]byte{ + []byte("http://"), + []byte("https://"), + []byte("ftp://"), + []byte("file://"), + []byte("mailto:"), +} + +const shortestPrefix = 6 // len("ftp://"), the shortest of the above + +func maybeAutoLink(p *Markdown, data []byte, offset int) (int, *Node) { + // quick check to rule out most false hits + if p.insideLink || len(data) < offset+shortestPrefix { + return 0, nil + } + for _, prefix := range protocolPrefixes { + endOfHead := offset + 8 // 8 is the len() of the longest prefix + if endOfHead > len(data) { + endOfHead = len(data) + } + if hasPrefixCaseInsensitive(data[offset:endOfHead], prefix) { + return autoLink(p, data, offset) + } + } + return 0, nil +} + +func autoLink(p *Markdown, data []byte, offset int) (int, *Node) { + // Now a more expensive check to see if we're not inside an anchor element + anchorStart := offset + offsetFromAnchor := 0 + for anchorStart > 0 && data[anchorStart] != '<' { + anchorStart-- + offsetFromAnchor++ + } + + anchorStr := anchorRe.Find(data[anchorStart:]) + if anchorStr != nil { + anchorClose := NewNode(HTMLSpan) + anchorClose.Literal = anchorStr[offsetFromAnchor:] + return len(anchorStr) - offsetFromAnchor, anchorClose + } + + // scan backward for a word boundary + rewind := 0 + for offset-rewind > 0 && rewind <= 7 && isletter(data[offset-rewind-1]) { + rewind++ + } + if rewind > 6 { // longest supported protocol is "mailto" which has 6 letters + return 0, nil + } + + origData := data + data = data[offset-rewind:] + + if !isSafeLink(data) { + return 0, nil + } + + linkEnd := 0 + for linkEnd < len(data) && !isEndOfLink(data[linkEnd]) { + linkEnd++ + } + + // Skip punctuation at the end of the link + if (data[linkEnd-1] == '.' || data[linkEnd-1] == ',') && data[linkEnd-2] != '\\' { + linkEnd-- + } + + // But don't skip semicolon if it's a part of escaped entity: + if data[linkEnd-1] == ';' && data[linkEnd-2] != '\\' && !linkEndsWithEntity(data, linkEnd) { + linkEnd-- + } + + // See if the link finishes with a punctuation sign that can be closed. + var copen byte + switch data[linkEnd-1] { + case '"': + copen = '"' + case '\'': + copen = '\'' + case ')': + copen = '(' + case ']': + copen = '[' + case '}': + copen = '{' + default: + copen = 0 + } + + if copen != 0 { + bufEnd := offset - rewind + linkEnd - 2 + + openDelim := 1 + + /* Try to close the final punctuation sign in this same line; + * if we managed to close it outside of the URL, that means that it's + * not part of the URL. If it closes inside the URL, that means it + * is part of the URL. + * + * Examples: + * + * foo http://www.pokemon.com/Pikachu_(Electric) bar + * => http://www.pokemon.com/Pikachu_(Electric) + * + * foo (http://www.pokemon.com/Pikachu_(Electric)) bar + * => http://www.pokemon.com/Pikachu_(Electric) + * + * foo http://www.pokemon.com/Pikachu_(Electric)) bar + * => http://www.pokemon.com/Pikachu_(Electric)) + * + * (foo http://www.pokemon.com/Pikachu_(Electric)) bar + * => foo http://www.pokemon.com/Pikachu_(Electric) + */ + + for bufEnd >= 0 && origData[bufEnd] != '\n' && openDelim != 0 { + if origData[bufEnd] == data[linkEnd-1] { + openDelim++ + } + + if origData[bufEnd] == copen { + openDelim-- + } + + bufEnd-- + } + + if openDelim == 0 { + linkEnd-- + } + } + + var uLink bytes.Buffer + unescapeText(&uLink, data[:linkEnd]) + + if uLink.Len() > 0 { + node := NewNode(Link) + node.Destination = uLink.Bytes() + node.AppendChild(text(uLink.Bytes())) + return linkEnd, node + } + + return linkEnd, nil +} + +func isEndOfLink(char byte) bool { + return isspace(char) || char == '<' +} + +var validUris = [][]byte{[]byte("http://"), []byte("https://"), []byte("ftp://"), []byte("mailto://")} +var validPaths = [][]byte{[]byte("/"), []byte("./"), []byte("../")} + +func isSafeLink(link []byte) bool { + for _, path := range validPaths { + if len(link) >= len(path) && bytes.Equal(link[:len(path)], path) { + if len(link) == len(path) { + return true + } else if isalnum(link[len(path)]) { + return true + } + } + } + + for _, prefix := range validUris { + // TODO: handle unicode here + // case-insensitive prefix test + if len(link) > len(prefix) && bytes.Equal(bytes.ToLower(link[:len(prefix)]), prefix) && isalnum(link[len(prefix)]) { + return true + } + } + + return false +} + +// return the length of the given tag, or 0 is it's not valid +func tagLength(data []byte) (autolink autolinkType, end int) { + var i, j int + + // a valid tag can't be shorter than 3 chars + if len(data) < 3 { + return notAutolink, 0 + } + + // begins with a '<' optionally followed by '/', followed by letter or number + if data[0] != '<' { + return notAutolink, 0 + } + if data[1] == '/' { + i = 2 + } else { + i = 1 + } + + if !isalnum(data[i]) { + return notAutolink, 0 + } + + // scheme test + autolink = notAutolink + + // try to find the beginning of an URI + for i < len(data) && (isalnum(data[i]) || data[i] == '.' || data[i] == '+' || data[i] == '-') { + i++ + } + + if i > 1 && i < len(data) && data[i] == '@' { + if j = isMailtoAutoLink(data[i:]); j != 0 { + return emailAutolink, i + j + } + } + + if i > 2 && i < len(data) && data[i] == ':' { + autolink = normalAutolink + i++ + } + + // complete autolink test: no whitespace or ' or " + switch { + case i >= len(data): + autolink = notAutolink + case autolink != notAutolink: + j = i + + for i < len(data) { + if data[i] == '\\' { + i += 2 + } else if data[i] == '>' || data[i] == '\'' || data[i] == '"' || isspace(data[i]) { + break + } else { + i++ + } + + } + + if i >= len(data) { + return autolink, 0 + } + if i > j && data[i] == '>' { + return autolink, i + 1 + } + + // one of the forbidden chars has been found + autolink = notAutolink + } + i += bytes.IndexByte(data[i:], '>') + if i < 0 { + return autolink, 0 + } + return autolink, i + 1 +} + +// look for the address part of a mail autolink and '>' +// this is less strict than the original markdown e-mail address matching +func isMailtoAutoLink(data []byte) int { + nb := 0 + + // address is assumed to be: [-@._a-zA-Z0-9]+ with exactly one '@' + for i := 0; i < len(data); i++ { + if isalnum(data[i]) { + continue + } + + switch data[i] { + case '@': + nb++ + + case '-', '.', '_': + break + + case '>': + if nb == 1 { + return i + 1 + } + return 0 + default: + return 0 + } + } + + return 0 +} + +// look for the next emph char, skipping other constructs +func helperFindEmphChar(data []byte, c byte) int { + i := 0 + + for i < len(data) { + for i < len(data) && data[i] != c && data[i] != '`' && data[i] != '[' { + i++ + } + if i >= len(data) { + return 0 + } + // do not count escaped chars + if i != 0 && data[i-1] == '\\' { + i++ + continue + } + if data[i] == c { + return i + } + + if data[i] == '`' { + // skip a code span + tmpI := 0 + i++ + for i < len(data) && data[i] != '`' { + if tmpI == 0 && data[i] == c { + tmpI = i + } + i++ + } + if i >= len(data) { + return tmpI + } + i++ + } else if data[i] == '[' { + // skip a link + tmpI := 0 + i++ + for i < len(data) && data[i] != ']' { + if tmpI == 0 && data[i] == c { + tmpI = i + } + i++ + } + i++ + for i < len(data) && (data[i] == ' ' || data[i] == '\n') { + i++ + } + if i >= len(data) { + return tmpI + } + if data[i] != '[' && data[i] != '(' { // not a link + if tmpI > 0 { + return tmpI + } + continue + } + cc := data[i] + i++ + for i < len(data) && data[i] != cc { + if tmpI == 0 && data[i] == c { + return i + } + i++ + } + if i >= len(data) { + return tmpI + } + i++ + } + } + return 0 +} + +func helperEmphasis(p *Markdown, data []byte, c byte) (int, *Node) { + i := 0 + + // skip one symbol if coming from emph3 + if len(data) > 1 && data[0] == c && data[1] == c { + i = 1 + } + + for i < len(data) { + length := helperFindEmphChar(data[i:], c) + if length == 0 { + return 0, nil + } + i += length + if i >= len(data) { + return 0, nil + } + + if i+1 < len(data) && data[i+1] == c { + i++ + continue + } + + if data[i] == c && !isspace(data[i-1]) { + + if p.extensions&NoIntraEmphasis != 0 { + if !(i+1 == len(data) || isspace(data[i+1]) || ispunct(data[i+1])) { + continue + } + } + + emph := NewNode(Emph) + p.inline(emph, data[:i]) + return i + 1, emph + } + } + + return 0, nil +} + +func helperDoubleEmphasis(p *Markdown, data []byte, c byte) (int, *Node) { + i := 0 + + for i < len(data) { + length := helperFindEmphChar(data[i:], c) + if length == 0 { + return 0, nil + } + i += length + + if i+1 < len(data) && data[i] == c && data[i+1] == c && i > 0 && !isspace(data[i-1]) { + nodeType := Strong + if c == '~' { + nodeType = Del + } + node := NewNode(nodeType) + p.inline(node, data[:i]) + return i + 2, node + } + i++ + } + return 0, nil +} + +func helperTripleEmphasis(p *Markdown, data []byte, offset int, c byte) (int, *Node) { + i := 0 + origData := data + data = data[offset:] + + for i < len(data) { + length := helperFindEmphChar(data[i:], c) + if length == 0 { + return 0, nil + } + i += length + + // skip whitespace preceded symbols + if data[i] != c || isspace(data[i-1]) { + continue + } + + switch { + case i+2 < len(data) && data[i+1] == c && data[i+2] == c: + // triple symbol found + strong := NewNode(Strong) + em := NewNode(Emph) + strong.AppendChild(em) + p.inline(em, data[:i]) + return i + 3, strong + case (i+1 < len(data) && data[i+1] == c): + // double symbol found, hand over to emph1 + length, node := helperEmphasis(p, origData[offset-2:], c) + if length == 0 { + return 0, nil + } + return length - 2, node + default: + // single symbol found, hand over to emph2 + length, node := helperDoubleEmphasis(p, origData[offset-1:], c) + if length == 0 { + return 0, nil + } + return length - 1, node + } + } + return 0, nil +} + +func text(s []byte) *Node { + node := NewNode(Text) + node.Literal = s + return node +} + +func normalizeURI(s []byte) []byte { + return s // TODO: implement +} diff --git a/vendor/github.com/russross/blackfriday/v2/markdown.go b/vendor/github.com/russross/blackfriday/v2/markdown.go new file mode 100644 index 000000000..58d2e4538 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/markdown.go @@ -0,0 +1,950 @@ +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. + +package blackfriday + +import ( + "bytes" + "fmt" + "io" + "strings" + "unicode/utf8" +) + +// +// Markdown parsing and processing +// + +// Version string of the package. Appears in the rendered document when +// CompletePage flag is on. +const Version = "2.0" + +// Extensions is a bitwise or'ed collection of enabled Blackfriday's +// extensions. +type Extensions int + +// These are the supported markdown parsing extensions. +// OR these values together to select multiple extensions. +const ( + NoExtensions Extensions = 0 + NoIntraEmphasis Extensions = 1 << iota // Ignore emphasis markers inside words + Tables // Render tables + FencedCode // Render fenced code blocks + Autolink // Detect embedded URLs that are not explicitly marked + Strikethrough // Strikethrough text using ~~test~~ + LaxHTMLBlocks // Loosen up HTML block parsing rules + SpaceHeadings // Be strict about prefix heading rules + HardLineBreak // Translate newlines into line breaks + TabSizeEight // Expand tabs to eight spaces instead of four + Footnotes // Pandoc-style footnotes + NoEmptyLineBeforeBlock // No need to insert an empty line to start a (code, quote, ordered list, unordered list) block + HeadingIDs // specify heading IDs with {#id} + Titleblock // Titleblock ala pandoc + AutoHeadingIDs // Create the heading ID from the text + BackslashLineBreak // Translate trailing backslashes into line breaks + DefinitionLists // Render definition lists + + CommonHTMLFlags HTMLFlags = UseXHTML | Smartypants | + SmartypantsFractions | SmartypantsDashes | SmartypantsLatexDashes + + CommonExtensions Extensions = NoIntraEmphasis | Tables | FencedCode | + Autolink | Strikethrough | SpaceHeadings | HeadingIDs | + BackslashLineBreak | DefinitionLists +) + +// ListType contains bitwise or'ed flags for list and list item objects. +type ListType int + +// These are the possible flag values for the ListItem renderer. +// Multiple flag values may be ORed together. +// These are mostly of interest if you are writing a new output format. +const ( + ListTypeOrdered ListType = 1 << iota + ListTypeDefinition + ListTypeTerm + + ListItemContainsBlock + ListItemBeginningOfList // TODO: figure out if this is of any use now + ListItemEndOfList +) + +// CellAlignFlags holds a type of alignment in a table cell. +type CellAlignFlags int + +// These are the possible flag values for the table cell renderer. +// Only a single one of these values will be used; they are not ORed together. +// These are mostly of interest if you are writing a new output format. +const ( + TableAlignmentLeft CellAlignFlags = 1 << iota + TableAlignmentRight + TableAlignmentCenter = (TableAlignmentLeft | TableAlignmentRight) +) + +// The size of a tab stop. +const ( + TabSizeDefault = 4 + TabSizeDouble = 8 +) + +// blockTags is a set of tags that are recognized as HTML block tags. +// Any of these can be included in markdown text without special escaping. +var blockTags = map[string]struct{}{ + "blockquote": {}, + "del": {}, + "div": {}, + "dl": {}, + "fieldset": {}, + "form": {}, + "h1": {}, + "h2": {}, + "h3": {}, + "h4": {}, + "h5": {}, + "h6": {}, + "iframe": {}, + "ins": {}, + "math": {}, + "noscript": {}, + "ol": {}, + "pre": {}, + "p": {}, + "script": {}, + "style": {}, + "table": {}, + "ul": {}, + + // HTML5 + "address": {}, + "article": {}, + "aside": {}, + "canvas": {}, + "figcaption": {}, + "figure": {}, + "footer": {}, + "header": {}, + "hgroup": {}, + "main": {}, + "nav": {}, + "output": {}, + "progress": {}, + "section": {}, + "video": {}, +} + +// Renderer is the rendering interface. This is mostly of interest if you are +// implementing a new rendering format. +// +// Only an HTML implementation is provided in this repository, see the README +// for external implementations. +type Renderer interface { + // RenderNode is the main rendering method. It will be called once for + // every leaf node and twice for every non-leaf node (first with + // entering=true, then with entering=false). The method should write its + // rendition of the node to the supplied writer w. + RenderNode(w io.Writer, node *Node, entering bool) WalkStatus + + // RenderHeader is a method that allows the renderer to produce some + // content preceding the main body of the output document. The header is + // understood in the broad sense here. For example, the default HTML + // renderer will write not only the HTML document preamble, but also the + // table of contents if it was requested. + // + // The method will be passed an entire document tree, in case a particular + // implementation needs to inspect it to produce output. + // + // The output should be written to the supplied writer w. If your + // implementation has no header to write, supply an empty implementation. + RenderHeader(w io.Writer, ast *Node) + + // RenderFooter is a symmetric counterpart of RenderHeader. + RenderFooter(w io.Writer, ast *Node) +} + +// Callback functions for inline parsing. One such function is defined +// for each character that triggers a response when parsing inline data. +type inlineParser func(p *Markdown, data []byte, offset int) (int, *Node) + +// Markdown is a type that holds extensions and the runtime state used by +// Parse, and the renderer. You can not use it directly, construct it with New. +type Markdown struct { + renderer Renderer + referenceOverride ReferenceOverrideFunc + refs map[string]*reference + inlineCallback [256]inlineParser + extensions Extensions + nesting int + maxNesting int + insideLink bool + + // Footnotes need to be ordered as well as available to quickly check for + // presence. If a ref is also a footnote, it's stored both in refs and here + // in notes. Slice is nil if footnotes not enabled. + notes []*reference + + doc *Node + tip *Node // = doc + oldTip *Node + lastMatchedContainer *Node // = doc + allClosed bool +} + +func (p *Markdown) getRef(refid string) (ref *reference, found bool) { + if p.referenceOverride != nil { + r, overridden := p.referenceOverride(refid) + if overridden { + if r == nil { + return nil, false + } + return &reference{ + link: []byte(r.Link), + title: []byte(r.Title), + noteID: 0, + hasBlock: false, + text: []byte(r.Text)}, true + } + } + // refs are case insensitive + ref, found = p.refs[strings.ToLower(refid)] + return ref, found +} + +func (p *Markdown) finalize(block *Node) { + above := block.Parent + block.open = false + p.tip = above +} + +func (p *Markdown) addChild(node NodeType, offset uint32) *Node { + return p.addExistingChild(NewNode(node), offset) +} + +func (p *Markdown) addExistingChild(node *Node, offset uint32) *Node { + for !p.tip.canContain(node.Type) { + p.finalize(p.tip) + } + p.tip.AppendChild(node) + p.tip = node + return node +} + +func (p *Markdown) closeUnmatchedBlocks() { + if !p.allClosed { + for p.oldTip != p.lastMatchedContainer { + parent := p.oldTip.Parent + p.finalize(p.oldTip) + p.oldTip = parent + } + p.allClosed = true + } +} + +// +// +// Public interface +// +// + +// Reference represents the details of a link. +// See the documentation in Options for more details on use-case. +type Reference struct { + // Link is usually the URL the reference points to. + Link string + // Title is the alternate text describing the link in more detail. + Title string + // Text is the optional text to override the ref with if the syntax used was + // [refid][] + Text string +} + +// ReferenceOverrideFunc is expected to be called with a reference string and +// return either a valid Reference type that the reference string maps to or +// nil. If overridden is false, the default reference logic will be executed. +// See the documentation in Options for more details on use-case. +type ReferenceOverrideFunc func(reference string) (ref *Reference, overridden bool) + +// New constructs a Markdown processor. You can use the same With* functions as +// for Run() to customize parser's behavior and the renderer. +func New(opts ...Option) *Markdown { + var p Markdown + for _, opt := range opts { + opt(&p) + } + p.refs = make(map[string]*reference) + p.maxNesting = 16 + p.insideLink = false + docNode := NewNode(Document) + p.doc = docNode + p.tip = docNode + p.oldTip = docNode + p.lastMatchedContainer = docNode + p.allClosed = true + // register inline parsers + p.inlineCallback[' '] = maybeLineBreak + p.inlineCallback['*'] = emphasis + p.inlineCallback['_'] = emphasis + if p.extensions&Strikethrough != 0 { + p.inlineCallback['~'] = emphasis + } + p.inlineCallback['`'] = codeSpan + p.inlineCallback['\n'] = lineBreak + p.inlineCallback['['] = link + p.inlineCallback['<'] = leftAngle + p.inlineCallback['\\'] = escape + p.inlineCallback['&'] = entity + p.inlineCallback['!'] = maybeImage + p.inlineCallback['^'] = maybeInlineFootnote + if p.extensions&Autolink != 0 { + p.inlineCallback['h'] = maybeAutoLink + p.inlineCallback['m'] = maybeAutoLink + p.inlineCallback['f'] = maybeAutoLink + p.inlineCallback['H'] = maybeAutoLink + p.inlineCallback['M'] = maybeAutoLink + p.inlineCallback['F'] = maybeAutoLink + } + if p.extensions&Footnotes != 0 { + p.notes = make([]*reference, 0) + } + return &p +} + +// Option customizes the Markdown processor's default behavior. +type Option func(*Markdown) + +// WithRenderer allows you to override the default renderer. +func WithRenderer(r Renderer) Option { + return func(p *Markdown) { + p.renderer = r + } +} + +// WithExtensions allows you to pick some of the many extensions provided by +// Blackfriday. You can bitwise OR them. +func WithExtensions(e Extensions) Option { + return func(p *Markdown) { + p.extensions = e + } +} + +// WithNoExtensions turns off all extensions and custom behavior. +func WithNoExtensions() Option { + return func(p *Markdown) { + p.extensions = NoExtensions + p.renderer = NewHTMLRenderer(HTMLRendererParameters{ + Flags: HTMLFlagsNone, + }) + } +} + +// WithRefOverride sets an optional function callback that is called every +// time a reference is resolved. +// +// In Markdown, the link reference syntax can be made to resolve a link to +// a reference instead of an inline URL, in one of the following ways: +// +// * [link text][refid] +// * [refid][] +// +// Usually, the refid is defined at the bottom of the Markdown document. If +// this override function is provided, the refid is passed to the override +// function first, before consulting the defined refids at the bottom. If +// the override function indicates an override did not occur, the refids at +// the bottom will be used to fill in the link details. +func WithRefOverride(o ReferenceOverrideFunc) Option { + return func(p *Markdown) { + p.referenceOverride = o + } +} + +// Run is the main entry point to Blackfriday. It parses and renders a +// block of markdown-encoded text. +// +// The simplest invocation of Run takes one argument, input: +// output := Run(input) +// This will parse the input with CommonExtensions enabled and render it with +// the default HTMLRenderer (with CommonHTMLFlags). +// +// Variadic arguments opts can customize the default behavior. Since Markdown +// type does not contain exported fields, you can not use it directly. Instead, +// use the With* functions. For example, this will call the most basic +// functionality, with no extensions: +// output := Run(input, WithNoExtensions()) +// +// You can use any number of With* arguments, even contradicting ones. They +// will be applied in order of appearance and the latter will override the +// former: +// output := Run(input, WithNoExtensions(), WithExtensions(exts), +// WithRenderer(yourRenderer)) +func Run(input []byte, opts ...Option) []byte { + r := NewHTMLRenderer(HTMLRendererParameters{ + Flags: CommonHTMLFlags, + }) + optList := []Option{WithRenderer(r), WithExtensions(CommonExtensions)} + optList = append(optList, opts...) + parser := New(optList...) + ast := parser.Parse(input) + var buf bytes.Buffer + parser.renderer.RenderHeader(&buf, ast) + ast.Walk(func(node *Node, entering bool) WalkStatus { + return parser.renderer.RenderNode(&buf, node, entering) + }) + parser.renderer.RenderFooter(&buf, ast) + return buf.Bytes() +} + +// Parse is an entry point to the parsing part of Blackfriday. It takes an +// input markdown document and produces a syntax tree for its contents. This +// tree can then be rendered with a default or custom renderer, or +// analyzed/transformed by the caller to whatever non-standard needs they have. +// The return value is the root node of the syntax tree. +func (p *Markdown) Parse(input []byte) *Node { + p.block(input) + // Walk the tree and finish up some of unfinished blocks + for p.tip != nil { + p.finalize(p.tip) + } + // Walk the tree again and process inline markdown in each block + p.doc.Walk(func(node *Node, entering bool) WalkStatus { + if node.Type == Paragraph || node.Type == Heading || node.Type == TableCell { + p.inline(node, node.content) + node.content = nil + } + return GoToNext + }) + p.parseRefsToAST() + return p.doc +} + +func (p *Markdown) parseRefsToAST() { + if p.extensions&Footnotes == 0 || len(p.notes) == 0 { + return + } + p.tip = p.doc + block := p.addBlock(List, nil) + block.IsFootnotesList = true + block.ListFlags = ListTypeOrdered + flags := ListItemBeginningOfList + // Note: this loop is intentionally explicit, not range-form. This is + // because the body of the loop will append nested footnotes to p.notes and + // we need to process those late additions. Range form would only walk over + // the fixed initial set. + for i := 0; i < len(p.notes); i++ { + ref := p.notes[i] + p.addExistingChild(ref.footnote, 0) + block := ref.footnote + block.ListFlags = flags | ListTypeOrdered + block.RefLink = ref.link + if ref.hasBlock { + flags |= ListItemContainsBlock + p.block(ref.title) + } else { + p.inline(block, ref.title) + } + flags &^= ListItemBeginningOfList | ListItemContainsBlock + } + above := block.Parent + finalizeList(block) + p.tip = above + block.Walk(func(node *Node, entering bool) WalkStatus { + if node.Type == Paragraph || node.Type == Heading { + p.inline(node, node.content) + node.content = nil + } + return GoToNext + }) +} + +// +// Link references +// +// This section implements support for references that (usually) appear +// as footnotes in a document, and can be referenced anywhere in the document. +// The basic format is: +// +// [1]: http://www.google.com/ "Google" +// [2]: http://www.github.com/ "Github" +// +// Anywhere in the document, the reference can be linked by referring to its +// label, i.e., 1 and 2 in this example, as in: +// +// This library is hosted on [Github][2], a git hosting site. +// +// Actual footnotes as specified in Pandoc and supported by some other Markdown +// libraries such as php-markdown are also taken care of. They look like this: +// +// This sentence needs a bit of further explanation.[^note] +// +// [^note]: This is the explanation. +// +// Footnotes should be placed at the end of the document in an ordered list. +// Finally, there are inline footnotes such as: +// +// Inline footnotes^[Also supported.] provide a quick inline explanation, +// but are rendered at the bottom of the document. +// + +// reference holds all information necessary for a reference-style links or +// footnotes. +// +// Consider this markdown with reference-style links: +// +// [link][ref] +// +// [ref]: /url/ "tooltip title" +// +// It will be ultimately converted to this HTML: +// +//

    link

    +// +// And a reference structure will be populated as follows: +// +// p.refs["ref"] = &reference{ +// link: "/url/", +// title: "tooltip title", +// } +// +// Alternatively, reference can contain information about a footnote. Consider +// this markdown: +// +// Text needing a footnote.[^a] +// +// [^a]: This is the note +// +// A reference structure will be populated as follows: +// +// p.refs["a"] = &reference{ +// link: "a", +// title: "This is the note", +// noteID: , +// } +// +// TODO: As you can see, it begs for splitting into two dedicated structures +// for refs and for footnotes. +type reference struct { + link []byte + title []byte + noteID int // 0 if not a footnote ref + hasBlock bool + footnote *Node // a link to the Item node within a list of footnotes + + text []byte // only gets populated by refOverride feature with Reference.Text +} + +func (r *reference) String() string { + return fmt.Sprintf("{link: %q, title: %q, text: %q, noteID: %d, hasBlock: %v}", + r.link, r.title, r.text, r.noteID, r.hasBlock) +} + +// Check whether or not data starts with a reference link. +// If so, it is parsed and stored in the list of references +// (in the render struct). +// Returns the number of bytes to skip to move past it, +// or zero if the first line is not a reference. +func isReference(p *Markdown, data []byte, tabSize int) int { + // up to 3 optional leading spaces + if len(data) < 4 { + return 0 + } + i := 0 + for i < 3 && data[i] == ' ' { + i++ + } + + noteID := 0 + + // id part: anything but a newline between brackets + if data[i] != '[' { + return 0 + } + i++ + if p.extensions&Footnotes != 0 { + if i < len(data) && data[i] == '^' { + // we can set it to anything here because the proper noteIds will + // be assigned later during the second pass. It just has to be != 0 + noteID = 1 + i++ + } + } + idOffset := i + for i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != ']' { + i++ + } + if i >= len(data) || data[i] != ']' { + return 0 + } + idEnd := i + // footnotes can have empty ID, like this: [^], but a reference can not be + // empty like this: []. Break early if it's not a footnote and there's no ID + if noteID == 0 && idOffset == idEnd { + return 0 + } + // spacer: colon (space | tab)* newline? (space | tab)* + i++ + if i >= len(data) || data[i] != ':' { + return 0 + } + i++ + for i < len(data) && (data[i] == ' ' || data[i] == '\t') { + i++ + } + if i < len(data) && (data[i] == '\n' || data[i] == '\r') { + i++ + if i < len(data) && data[i] == '\n' && data[i-1] == '\r' { + i++ + } + } + for i < len(data) && (data[i] == ' ' || data[i] == '\t') { + i++ + } + if i >= len(data) { + return 0 + } + + var ( + linkOffset, linkEnd int + titleOffset, titleEnd int + lineEnd int + raw []byte + hasBlock bool + ) + + if p.extensions&Footnotes != 0 && noteID != 0 { + linkOffset, linkEnd, raw, hasBlock = scanFootnote(p, data, i, tabSize) + lineEnd = linkEnd + } else { + linkOffset, linkEnd, titleOffset, titleEnd, lineEnd = scanLinkRef(p, data, i) + } + if lineEnd == 0 { + return 0 + } + + // a valid ref has been found + + ref := &reference{ + noteID: noteID, + hasBlock: hasBlock, + } + + if noteID > 0 { + // reusing the link field for the id since footnotes don't have links + ref.link = data[idOffset:idEnd] + // if footnote, it's not really a title, it's the contained text + ref.title = raw + } else { + ref.link = data[linkOffset:linkEnd] + ref.title = data[titleOffset:titleEnd] + } + + // id matches are case-insensitive + id := string(bytes.ToLower(data[idOffset:idEnd])) + + p.refs[id] = ref + + return lineEnd +} + +func scanLinkRef(p *Markdown, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) { + // link: whitespace-free sequence, optionally between angle brackets + if data[i] == '<' { + i++ + } + linkOffset = i + for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' { + i++ + } + linkEnd = i + if data[linkOffset] == '<' && data[linkEnd-1] == '>' { + linkOffset++ + linkEnd-- + } + + // optional spacer: (space | tab)* (newline | '\'' | '"' | '(' ) + for i < len(data) && (data[i] == ' ' || data[i] == '\t') { + i++ + } + if i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != '\'' && data[i] != '"' && data[i] != '(' { + return + } + + // compute end-of-line + if i >= len(data) || data[i] == '\r' || data[i] == '\n' { + lineEnd = i + } + if i+1 < len(data) && data[i] == '\r' && data[i+1] == '\n' { + lineEnd++ + } + + // optional (space|tab)* spacer after a newline + if lineEnd > 0 { + i = lineEnd + 1 + for i < len(data) && (data[i] == ' ' || data[i] == '\t') { + i++ + } + } + + // optional title: any non-newline sequence enclosed in '"() alone on its line + if i+1 < len(data) && (data[i] == '\'' || data[i] == '"' || data[i] == '(') { + i++ + titleOffset = i + + // look for EOL + for i < len(data) && data[i] != '\n' && data[i] != '\r' { + i++ + } + if i+1 < len(data) && data[i] == '\n' && data[i+1] == '\r' { + titleEnd = i + 1 + } else { + titleEnd = i + } + + // step back + i-- + for i > titleOffset && (data[i] == ' ' || data[i] == '\t') { + i-- + } + if i > titleOffset && (data[i] == '\'' || data[i] == '"' || data[i] == ')') { + lineEnd = titleEnd + titleEnd = i + } + } + + return +} + +// The first bit of this logic is the same as Parser.listItem, but the rest +// is much simpler. This function simply finds the entire block and shifts it +// over by one tab if it is indeed a block (just returns the line if it's not). +// blockEnd is the end of the section in the input buffer, and contents is the +// extracted text that was shifted over one tab. It will need to be rendered at +// the end of the document. +func scanFootnote(p *Markdown, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte, hasBlock bool) { + if i == 0 || len(data) == 0 { + return + } + + // skip leading whitespace on first line + for i < len(data) && data[i] == ' ' { + i++ + } + + blockStart = i + + // find the end of the line + blockEnd = i + for i < len(data) && data[i-1] != '\n' { + i++ + } + + // get working buffer + var raw bytes.Buffer + + // put the first line into the working buffer + raw.Write(data[blockEnd:i]) + blockEnd = i + + // process the following lines + containsBlankLine := false + +gatherLines: + for blockEnd < len(data) { + i++ + + // find the end of this line + for i < len(data) && data[i-1] != '\n' { + i++ + } + + // if it is an empty line, guess that it is part of this item + // and move on to the next line + if p.isEmpty(data[blockEnd:i]) > 0 { + containsBlankLine = true + blockEnd = i + continue + } + + n := 0 + if n = isIndented(data[blockEnd:i], indentSize); n == 0 { + // this is the end of the block. + // we don't want to include this last line in the index. + break gatherLines + } + + // if there were blank lines before this one, insert a new one now + if containsBlankLine { + raw.WriteByte('\n') + containsBlankLine = false + } + + // get rid of that first tab, write to buffer + raw.Write(data[blockEnd+n : i]) + hasBlock = true + + blockEnd = i + } + + if data[blockEnd-1] != '\n' { + raw.WriteByte('\n') + } + + contents = raw.Bytes() + + return +} + +// +// +// Miscellaneous helper functions +// +// + +// Test if a character is a punctuation symbol. +// Taken from a private function in regexp in the stdlib. +func ispunct(c byte) bool { + for _, r := range []byte("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") { + if c == r { + return true + } + } + return false +} + +// Test if a character is a whitespace character. +func isspace(c byte) bool { + return ishorizontalspace(c) || isverticalspace(c) +} + +// Test if a character is a horizontal whitespace character. +func ishorizontalspace(c byte) bool { + return c == ' ' || c == '\t' +} + +// Test if a character is a vertical character. +func isverticalspace(c byte) bool { + return c == '\n' || c == '\r' || c == '\f' || c == '\v' +} + +// Test if a character is letter. +func isletter(c byte) bool { + return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') +} + +// Test if a character is a letter or a digit. +// TODO: check when this is looking for ASCII alnum and when it should use unicode +func isalnum(c byte) bool { + return (c >= '0' && c <= '9') || isletter(c) +} + +// Replace tab characters with spaces, aligning to the next TAB_SIZE column. +// always ends output with a newline +func expandTabs(out *bytes.Buffer, line []byte, tabSize int) { + // first, check for common cases: no tabs, or only tabs at beginning of line + i, prefix := 0, 0 + slowcase := false + for i = 0; i < len(line); i++ { + if line[i] == '\t' { + if prefix == i { + prefix++ + } else { + slowcase = true + break + } + } + } + + // no need to decode runes if all tabs are at the beginning of the line + if !slowcase { + for i = 0; i < prefix*tabSize; i++ { + out.WriteByte(' ') + } + out.Write(line[prefix:]) + return + } + + // the slow case: we need to count runes to figure out how + // many spaces to insert for each tab + column := 0 + i = 0 + for i < len(line) { + start := i + for i < len(line) && line[i] != '\t' { + _, size := utf8.DecodeRune(line[i:]) + i += size + column++ + } + + if i > start { + out.Write(line[start:i]) + } + + if i >= len(line) { + break + } + + for { + out.WriteByte(' ') + column++ + if column%tabSize == 0 { + break + } + } + + i++ + } +} + +// Find if a line counts as indented or not. +// Returns number of characters the indent is (0 = not indented). +func isIndented(data []byte, indentSize int) int { + if len(data) == 0 { + return 0 + } + if data[0] == '\t' { + return 1 + } + if len(data) < indentSize { + return 0 + } + for i := 0; i < indentSize; i++ { + if data[i] != ' ' { + return 0 + } + } + return indentSize +} + +// Create a url-safe slug for fragments +func slugify(in []byte) []byte { + if len(in) == 0 { + return in + } + out := make([]byte, 0, len(in)) + sym := false + + for _, ch := range in { + if isalnum(ch) { + sym = false + out = append(out, ch) + } else if sym { + continue + } else { + out = append(out, '-') + sym = true + } + } + var a, b int + var ch byte + for a, ch = range out { + if ch != '-' { + break + } + } + for b = len(out) - 1; b > 0; b-- { + if out[b] != '-' { + break + } + } + return out[a : b+1] +} diff --git a/vendor/github.com/russross/blackfriday/v2/node.go b/vendor/github.com/russross/blackfriday/v2/node.go new file mode 100644 index 000000000..04e6050ce --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/node.go @@ -0,0 +1,360 @@ +package blackfriday + +import ( + "bytes" + "fmt" +) + +// NodeType specifies a type of a single node of a syntax tree. Usually one +// node (and its type) corresponds to a single markdown feature, e.g. emphasis +// or code block. +type NodeType int + +// Constants for identifying different types of nodes. See NodeType. +const ( + Document NodeType = iota + BlockQuote + List + Item + Paragraph + Heading + HorizontalRule + Emph + Strong + Del + Link + Image + Text + HTMLBlock + CodeBlock + Softbreak + Hardbreak + Code + HTMLSpan + Table + TableCell + TableHead + TableBody + TableRow +) + +var nodeTypeNames = []string{ + Document: "Document", + BlockQuote: "BlockQuote", + List: "List", + Item: "Item", + Paragraph: "Paragraph", + Heading: "Heading", + HorizontalRule: "HorizontalRule", + Emph: "Emph", + Strong: "Strong", + Del: "Del", + Link: "Link", + Image: "Image", + Text: "Text", + HTMLBlock: "HTMLBlock", + CodeBlock: "CodeBlock", + Softbreak: "Softbreak", + Hardbreak: "Hardbreak", + Code: "Code", + HTMLSpan: "HTMLSpan", + Table: "Table", + TableCell: "TableCell", + TableHead: "TableHead", + TableBody: "TableBody", + TableRow: "TableRow", +} + +func (t NodeType) String() string { + return nodeTypeNames[t] +} + +// ListData contains fields relevant to a List and Item node type. +type ListData struct { + ListFlags ListType + Tight bool // Skip

    s around list item data if true + BulletChar byte // '*', '+' or '-' in bullet lists + Delimiter byte // '.' or ')' after the number in ordered lists + RefLink []byte // If not nil, turns this list item into a footnote item and triggers different rendering + IsFootnotesList bool // This is a list of footnotes +} + +// LinkData contains fields relevant to a Link node type. +type LinkData struct { + Destination []byte // Destination is what goes into a href + Title []byte // Title is the tooltip thing that goes in a title attribute + NoteID int // NoteID contains a serial number of a footnote, zero if it's not a footnote + Footnote *Node // If it's a footnote, this is a direct link to the footnote Node. Otherwise nil. +} + +// CodeBlockData contains fields relevant to a CodeBlock node type. +type CodeBlockData struct { + IsFenced bool // Specifies whether it's a fenced code block or an indented one + Info []byte // This holds the info string + FenceChar byte + FenceLength int + FenceOffset int +} + +// TableCellData contains fields relevant to a TableCell node type. +type TableCellData struct { + IsHeader bool // This tells if it's under the header row + Align CellAlignFlags // This holds the value for align attribute +} + +// HeadingData contains fields relevant to a Heading node type. +type HeadingData struct { + Level int // This holds the heading level number + HeadingID string // This might hold heading ID, if present + IsTitleblock bool // Specifies whether it's a title block +} + +// Node is a single element in the abstract syntax tree of the parsed document. +// It holds connections to the structurally neighboring nodes and, for certain +// types of nodes, additional information that might be needed when rendering. +type Node struct { + Type NodeType // Determines the type of the node + Parent *Node // Points to the parent + FirstChild *Node // Points to the first child, if any + LastChild *Node // Points to the last child, if any + Prev *Node // Previous sibling; nil if it's the first child + Next *Node // Next sibling; nil if it's the last child + + Literal []byte // Text contents of the leaf nodes + + HeadingData // Populated if Type is Heading + ListData // Populated if Type is List + CodeBlockData // Populated if Type is CodeBlock + LinkData // Populated if Type is Link + TableCellData // Populated if Type is TableCell + + content []byte // Markdown content of the block nodes + open bool // Specifies an open block node that has not been finished to process yet +} + +// NewNode allocates a node of a specified type. +func NewNode(typ NodeType) *Node { + return &Node{ + Type: typ, + open: true, + } +} + +func (n *Node) String() string { + ellipsis := "" + snippet := n.Literal + if len(snippet) > 16 { + snippet = snippet[:16] + ellipsis = "..." + } + return fmt.Sprintf("%s: '%s%s'", n.Type, snippet, ellipsis) +} + +// Unlink removes node 'n' from the tree. +// It panics if the node is nil. +func (n *Node) Unlink() { + if n.Prev != nil { + n.Prev.Next = n.Next + } else if n.Parent != nil { + n.Parent.FirstChild = n.Next + } + if n.Next != nil { + n.Next.Prev = n.Prev + } else if n.Parent != nil { + n.Parent.LastChild = n.Prev + } + n.Parent = nil + n.Next = nil + n.Prev = nil +} + +// AppendChild adds a node 'child' as a child of 'n'. +// It panics if either node is nil. +func (n *Node) AppendChild(child *Node) { + child.Unlink() + child.Parent = n + if n.LastChild != nil { + n.LastChild.Next = child + child.Prev = n.LastChild + n.LastChild = child + } else { + n.FirstChild = child + n.LastChild = child + } +} + +// InsertBefore inserts 'sibling' immediately before 'n'. +// It panics if either node is nil. +func (n *Node) InsertBefore(sibling *Node) { + sibling.Unlink() + sibling.Prev = n.Prev + if sibling.Prev != nil { + sibling.Prev.Next = sibling + } + sibling.Next = n + n.Prev = sibling + sibling.Parent = n.Parent + if sibling.Prev == nil { + sibling.Parent.FirstChild = sibling + } +} + +// IsContainer returns true if 'n' can contain children. +func (n *Node) IsContainer() bool { + switch n.Type { + case Document: + fallthrough + case BlockQuote: + fallthrough + case List: + fallthrough + case Item: + fallthrough + case Paragraph: + fallthrough + case Heading: + fallthrough + case Emph: + fallthrough + case Strong: + fallthrough + case Del: + fallthrough + case Link: + fallthrough + case Image: + fallthrough + case Table: + fallthrough + case TableHead: + fallthrough + case TableBody: + fallthrough + case TableRow: + fallthrough + case TableCell: + return true + default: + return false + } +} + +// IsLeaf returns true if 'n' is a leaf node. +func (n *Node) IsLeaf() bool { + return !n.IsContainer() +} + +func (n *Node) canContain(t NodeType) bool { + if n.Type == List { + return t == Item + } + if n.Type == Document || n.Type == BlockQuote || n.Type == Item { + return t != Item + } + if n.Type == Table { + return t == TableHead || t == TableBody + } + if n.Type == TableHead || n.Type == TableBody { + return t == TableRow + } + if n.Type == TableRow { + return t == TableCell + } + return false +} + +// WalkStatus allows NodeVisitor to have some control over the tree traversal. +// It is returned from NodeVisitor and different values allow Node.Walk to +// decide which node to go to next. +type WalkStatus int + +const ( + // GoToNext is the default traversal of every node. + GoToNext WalkStatus = iota + // SkipChildren tells walker to skip all children of current node. + SkipChildren + // Terminate tells walker to terminate the traversal. + Terminate +) + +// NodeVisitor is a callback to be called when traversing the syntax tree. +// Called twice for every node: once with entering=true when the branch is +// first visited, then with entering=false after all the children are done. +type NodeVisitor func(node *Node, entering bool) WalkStatus + +// Walk is a convenience method that instantiates a walker and starts a +// traversal of subtree rooted at n. +func (n *Node) Walk(visitor NodeVisitor) { + w := newNodeWalker(n) + for w.current != nil { + status := visitor(w.current, w.entering) + switch status { + case GoToNext: + w.next() + case SkipChildren: + w.entering = false + w.next() + case Terminate: + return + } + } +} + +type nodeWalker struct { + current *Node + root *Node + entering bool +} + +func newNodeWalker(root *Node) *nodeWalker { + return &nodeWalker{ + current: root, + root: root, + entering: true, + } +} + +func (nw *nodeWalker) next() { + if (!nw.current.IsContainer() || !nw.entering) && nw.current == nw.root { + nw.current = nil + return + } + if nw.entering && nw.current.IsContainer() { + if nw.current.FirstChild != nil { + nw.current = nw.current.FirstChild + nw.entering = true + } else { + nw.entering = false + } + } else if nw.current.Next == nil { + nw.current = nw.current.Parent + nw.entering = false + } else { + nw.current = nw.current.Next + nw.entering = true + } +} + +func dump(ast *Node) { + fmt.Println(dumpString(ast)) +} + +func dumpR(ast *Node, depth int) string { + if ast == nil { + return "" + } + indent := bytes.Repeat([]byte("\t"), depth) + content := ast.Literal + if content == nil { + content = ast.content + } + result := fmt.Sprintf("%s%s(%q)\n", indent, ast.Type, content) + for n := ast.FirstChild; n != nil; n = n.Next { + result += dumpR(n, depth+1) + } + return result +} + +func dumpString(ast *Node) string { + return dumpR(ast, 0) +} diff --git a/vendor/github.com/russross/blackfriday/v2/smartypants.go b/vendor/github.com/russross/blackfriday/v2/smartypants.go new file mode 100644 index 000000000..3a220e942 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/smartypants.go @@ -0,0 +1,457 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// +// SmartyPants rendering +// +// + +package blackfriday + +import ( + "bytes" + "io" +) + +// SPRenderer is a struct containing state of a Smartypants renderer. +type SPRenderer struct { + inSingleQuote bool + inDoubleQuote bool + callbacks [256]smartCallback +} + +func wordBoundary(c byte) bool { + return c == 0 || isspace(c) || ispunct(c) +} + +func tolower(c byte) byte { + if c >= 'A' && c <= 'Z' { + return c - 'A' + 'a' + } + return c +} + +func isdigit(c byte) bool { + return c >= '0' && c <= '9' +} + +func smartQuoteHelper(out *bytes.Buffer, previousChar byte, nextChar byte, quote byte, isOpen *bool, addNBSP bool) bool { + // edge of the buffer is likely to be a tag that we don't get to see, + // so we treat it like text sometimes + + // enumerate all sixteen possibilities for (previousChar, nextChar) + // each can be one of {0, space, punct, other} + switch { + case previousChar == 0 && nextChar == 0: + // context is not any help here, so toggle + *isOpen = !*isOpen + case isspace(previousChar) && nextChar == 0: + // [ "] might be [ "foo...] + *isOpen = true + case ispunct(previousChar) && nextChar == 0: + // [!"] hmm... could be [Run!"] or [("...] + *isOpen = false + case /* isnormal(previousChar) && */ nextChar == 0: + // [a"] is probably a close + *isOpen = false + case previousChar == 0 && isspace(nextChar): + // [" ] might be [...foo" ] + *isOpen = false + case isspace(previousChar) && isspace(nextChar): + // [ " ] context is not any help here, so toggle + *isOpen = !*isOpen + case ispunct(previousChar) && isspace(nextChar): + // [!" ] is probably a close + *isOpen = false + case /* isnormal(previousChar) && */ isspace(nextChar): + // [a" ] this is one of the easy cases + *isOpen = false + case previousChar == 0 && ispunct(nextChar): + // ["!] hmm... could be ["$1.95] or ["!...] + *isOpen = false + case isspace(previousChar) && ispunct(nextChar): + // [ "!] looks more like [ "$1.95] + *isOpen = true + case ispunct(previousChar) && ispunct(nextChar): + // [!"!] context is not any help here, so toggle + *isOpen = !*isOpen + case /* isnormal(previousChar) && */ ispunct(nextChar): + // [a"!] is probably a close + *isOpen = false + case previousChar == 0 /* && isnormal(nextChar) */ : + // ["a] is probably an open + *isOpen = true + case isspace(previousChar) /* && isnormal(nextChar) */ : + // [ "a] this is one of the easy cases + *isOpen = true + case ispunct(previousChar) /* && isnormal(nextChar) */ : + // [!"a] is probably an open + *isOpen = true + default: + // [a'b] maybe a contraction? + *isOpen = false + } + + // Note that with the limited lookahead, this non-breaking + // space will also be appended to single double quotes. + if addNBSP && !*isOpen { + out.WriteString(" ") + } + + out.WriteByte('&') + if *isOpen { + out.WriteByte('l') + } else { + out.WriteByte('r') + } + out.WriteByte(quote) + out.WriteString("quo;") + + if addNBSP && *isOpen { + out.WriteString(" ") + } + + return true +} + +func (r *SPRenderer) smartSingleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 2 { + t1 := tolower(text[1]) + + if t1 == '\'' { + nextChar := byte(0) + if len(text) >= 3 { + nextChar = text[2] + } + if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) { + return 1 + } + } + + if (t1 == 's' || t1 == 't' || t1 == 'm' || t1 == 'd') && (len(text) < 3 || wordBoundary(text[2])) { + out.WriteString("’") + return 0 + } + + if len(text) >= 3 { + t2 := tolower(text[2]) + + if ((t1 == 'r' && t2 == 'e') || (t1 == 'l' && t2 == 'l') || (t1 == 'v' && t2 == 'e')) && + (len(text) < 4 || wordBoundary(text[3])) { + out.WriteString("’") + return 0 + } + } + } + + nextChar := byte(0) + if len(text) > 1 { + nextChar = text[1] + } + if smartQuoteHelper(out, previousChar, nextChar, 's', &r.inSingleQuote, false) { + return 0 + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartParens(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 3 { + t1 := tolower(text[1]) + t2 := tolower(text[2]) + + if t1 == 'c' && t2 == ')' { + out.WriteString("©") + return 2 + } + + if t1 == 'r' && t2 == ')' { + out.WriteString("®") + return 2 + } + + if len(text) >= 4 && t1 == 't' && t2 == 'm' && text[3] == ')' { + out.WriteString("™") + return 3 + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartDash(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 2 { + if text[1] == '-' { + out.WriteString("—") + return 1 + } + + if wordBoundary(previousChar) && wordBoundary(text[1]) { + out.WriteString("–") + return 0 + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartDashLatex(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 3 && text[1] == '-' && text[2] == '-' { + out.WriteString("—") + return 2 + } + if len(text) >= 2 && text[1] == '-' { + out.WriteString("–") + return 1 + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartAmpVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte, addNBSP bool) int { + if bytes.HasPrefix(text, []byte(""")) { + nextChar := byte(0) + if len(text) >= 7 { + nextChar = text[6] + } + if smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, addNBSP) { + return 5 + } + } + + if bytes.HasPrefix(text, []byte("�")) { + return 3 + } + + out.WriteByte('&') + return 0 +} + +func (r *SPRenderer) smartAmp(angledQuotes, addNBSP bool) func(*bytes.Buffer, byte, []byte) int { + var quote byte = 'd' + if angledQuotes { + quote = 'a' + } + + return func(out *bytes.Buffer, previousChar byte, text []byte) int { + return r.smartAmpVariant(out, previousChar, text, quote, addNBSP) + } +} + +func (r *SPRenderer) smartPeriod(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 3 && text[1] == '.' && text[2] == '.' { + out.WriteString("…") + return 2 + } + + if len(text) >= 5 && text[1] == ' ' && text[2] == '.' && text[3] == ' ' && text[4] == '.' { + out.WriteString("…") + return 4 + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartBacktick(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 2 && text[1] == '`' { + nextChar := byte(0) + if len(text) >= 3 { + nextChar = text[2] + } + if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) { + return 1 + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartNumberGeneric(out *bytes.Buffer, previousChar byte, text []byte) int { + if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 { + // is it of the form digits/digits(word boundary)?, i.e., \d+/\d+\b + // note: check for regular slash (/) or fraction slash (⁄, 0x2044, or 0xe2 81 84 in utf-8) + // and avoid changing dates like 1/23/2005 into fractions. + numEnd := 0 + for len(text) > numEnd && isdigit(text[numEnd]) { + numEnd++ + } + if numEnd == 0 { + out.WriteByte(text[0]) + return 0 + } + denStart := numEnd + 1 + if len(text) > numEnd+3 && text[numEnd] == 0xe2 && text[numEnd+1] == 0x81 && text[numEnd+2] == 0x84 { + denStart = numEnd + 3 + } else if len(text) < numEnd+2 || text[numEnd] != '/' { + out.WriteByte(text[0]) + return 0 + } + denEnd := denStart + for len(text) > denEnd && isdigit(text[denEnd]) { + denEnd++ + } + if denEnd == denStart { + out.WriteByte(text[0]) + return 0 + } + if len(text) == denEnd || wordBoundary(text[denEnd]) && text[denEnd] != '/' { + out.WriteString("") + out.Write(text[:numEnd]) + out.WriteString("") + out.Write(text[denStart:denEnd]) + out.WriteString("") + return denEnd - 1 + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartNumber(out *bytes.Buffer, previousChar byte, text []byte) int { + if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 { + if text[0] == '1' && text[1] == '/' && text[2] == '2' { + if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' { + out.WriteString("½") + return 2 + } + } + + if text[0] == '1' && text[1] == '/' && text[2] == '4' { + if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 5 && tolower(text[3]) == 't' && tolower(text[4]) == 'h') { + out.WriteString("¼") + return 2 + } + } + + if text[0] == '3' && text[1] == '/' && text[2] == '4' { + if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 6 && tolower(text[3]) == 't' && tolower(text[4]) == 'h' && tolower(text[5]) == 's') { + out.WriteString("¾") + return 2 + } + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartDoubleQuoteVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte) int { + nextChar := byte(0) + if len(text) > 1 { + nextChar = text[1] + } + if !smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, false) { + out.WriteString(""") + } + + return 0 +} + +func (r *SPRenderer) smartDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { + return r.smartDoubleQuoteVariant(out, previousChar, text, 'd') +} + +func (r *SPRenderer) smartAngledDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { + return r.smartDoubleQuoteVariant(out, previousChar, text, 'a') +} + +func (r *SPRenderer) smartLeftAngle(out *bytes.Buffer, previousChar byte, text []byte) int { + i := 0 + + for i < len(text) && text[i] != '>' { + i++ + } + + out.Write(text[:i+1]) + return i +} + +type smartCallback func(out *bytes.Buffer, previousChar byte, text []byte) int + +// NewSmartypantsRenderer constructs a Smartypants renderer object. +func NewSmartypantsRenderer(flags HTMLFlags) *SPRenderer { + var ( + r SPRenderer + + smartAmpAngled = r.smartAmp(true, false) + smartAmpAngledNBSP = r.smartAmp(true, true) + smartAmpRegular = r.smartAmp(false, false) + smartAmpRegularNBSP = r.smartAmp(false, true) + + addNBSP = flags&SmartypantsQuotesNBSP != 0 + ) + + if flags&SmartypantsAngledQuotes == 0 { + r.callbacks['"'] = r.smartDoubleQuote + if !addNBSP { + r.callbacks['&'] = smartAmpRegular + } else { + r.callbacks['&'] = smartAmpRegularNBSP + } + } else { + r.callbacks['"'] = r.smartAngledDoubleQuote + if !addNBSP { + r.callbacks['&'] = smartAmpAngled + } else { + r.callbacks['&'] = smartAmpAngledNBSP + } + } + r.callbacks['\''] = r.smartSingleQuote + r.callbacks['('] = r.smartParens + if flags&SmartypantsDashes != 0 { + if flags&SmartypantsLatexDashes == 0 { + r.callbacks['-'] = r.smartDash + } else { + r.callbacks['-'] = r.smartDashLatex + } + } + r.callbacks['.'] = r.smartPeriod + if flags&SmartypantsFractions == 0 { + r.callbacks['1'] = r.smartNumber + r.callbacks['3'] = r.smartNumber + } else { + for ch := '1'; ch <= '9'; ch++ { + r.callbacks[ch] = r.smartNumberGeneric + } + } + r.callbacks['<'] = r.smartLeftAngle + r.callbacks['`'] = r.smartBacktick + return &r +} + +// Process is the entry point of the Smartypants renderer. +func (r *SPRenderer) Process(w io.Writer, text []byte) { + mark := 0 + for i := 0; i < len(text); i++ { + if action := r.callbacks[text[i]]; action != nil { + if i > mark { + w.Write(text[mark:i]) + } + previousChar := byte(0) + if i > 0 { + previousChar = text[i-1] + } + var tmp bytes.Buffer + i += action(&tmp, previousChar, text[i:]) + w.Write(tmp.Bytes()) + mark = i + 1 + } + } + if mark < len(text) { + w.Write(text[mark:]) + } +} diff --git a/vendor/github.com/safchain/ethtool/.gitignore b/vendor/github.com/safchain/ethtool/.gitignore new file mode 100644 index 000000000..db6cadffd --- /dev/null +++ b/vendor/github.com/safchain/ethtool/.gitignore @@ -0,0 +1,27 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# Skip compiled example binary file +/example/example diff --git a/vendor/github.com/safchain/ethtool/.golangci.yml b/vendor/github.com/safchain/ethtool/.golangci.yml new file mode 100644 index 000000000..65552c98a --- /dev/null +++ b/vendor/github.com/safchain/ethtool/.golangci.yml @@ -0,0 +1,18 @@ +linters: + enable: + - gosimple + - gci + - gofmt + - misspell + - goimports + - staticcheck + - errcheck + - govet + - misspell + - gocritic +linters-settings: + gci: + sections: + - standard + - default + - prefix(github.com/safchain/ethtool) diff --git a/vendor/github.com/safchain/ethtool/.yamllint b/vendor/github.com/safchain/ethtool/.yamllint new file mode 100644 index 000000000..9862c5f78 --- /dev/null +++ b/vendor/github.com/safchain/ethtool/.yamllint @@ -0,0 +1,7 @@ +--- +extends: default + +rules: + document-start: disable + truthy: + check-keys: false diff --git a/vendor/github.com/safchain/ethtool/LICENSE b/vendor/github.com/safchain/ethtool/LICENSE new file mode 100644 index 000000000..3c83e6b88 --- /dev/null +++ b/vendor/github.com/safchain/ethtool/LICENSE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) 2015 The Ethtool Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/safchain/ethtool/Makefile b/vendor/github.com/safchain/ethtool/Makefile new file mode 100644 index 000000000..beb5ca2c0 --- /dev/null +++ b/vendor/github.com/safchain/ethtool/Makefile @@ -0,0 +1,5 @@ +all: build + +build: + CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build + CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build diff --git a/vendor/github.com/safchain/ethtool/README.md b/vendor/github.com/safchain/ethtool/README.md new file mode 100644 index 000000000..e44367582 --- /dev/null +++ b/vendor/github.com/safchain/ethtool/README.md @@ -0,0 +1,55 @@ +# ethtool go package # + +![Build Status](https://github.com/safchain/ethtool/actions/workflows/unittests.yml/badge.svg) +[![GoDoc](https://godoc.org/github.com/safchain/ethtool?status.svg)](https://godoc.org/github.com/safchain/ethtool) + + +The ethtool package aims to provide a library that provides easy access to the Linux SIOCETHTOOL ioctl operations. It can be used to retrieve information from a network device such as statistics, driver related information or even the peer of a VETH interface. + +# Installation + +```shell +go get github.com/safchain/ethtool +``` + +# How to use + +```go +package main + +import ( + "fmt" + + "github.com/safchain/ethtool" +) + +func main() { + ethHandle, err := ethtool.NewEthtool() + if err != nil { + panic(err.Error()) + } + defer ethHandle.Close() + + // Retrieve tx from eth0 + stats, err := ethHandle.Stats("eth0") + if err != nil { + panic(err.Error()) + } + fmt.Printf("TX: %d\n", stats["tx_bytes"]) + + // Retrieve peer index of a veth interface + stats, err = ethHandle.Stats("veth0") + if err != nil { + panic(err.Error()) + } + fmt.Printf("Peer Index: %d\n", stats["peer_ifindex"]) +} +``` + +## LICENSE ## + +Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. diff --git a/vendor/github.com/safchain/ethtool/ethtool.go b/vendor/github.com/safchain/ethtool/ethtool.go new file mode 100644 index 000000000..62df2c10b --- /dev/null +++ b/vendor/github.com/safchain/ethtool/ethtool.go @@ -0,0 +1,1110 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ + +// The ethtool package aims to provide a library that provides easy access +// to the Linux SIOCETHTOOL ioctl operations. It can be used to retrieve information +// from a network device such as statistics, driver related information or even +// the peer of a VETH interface. +package ethtool + +import ( + "bytes" + "encoding/hex" + "fmt" + "strings" + "unsafe" + + "golang.org/x/sys/unix" +) + +// Maximum size of an interface name +const ( + IFNAMSIZ = 16 +) + +// ioctl ethtool request +const ( + SIOCETHTOOL = 0x8946 +) + +// ethtool stats related constants. +const ( + ETH_GSTRING_LEN = 32 + ETH_SS_STATS = 1 + ETH_SS_PRIV_FLAGS = 2 + ETH_SS_FEATURES = 4 + + // CMD supported + ETHTOOL_GSET = 0x00000001 /* Get settings. */ + ETHTOOL_SSET = 0x00000002 /* Set settings. */ + ETHTOOL_GWOL = 0x00000005 /* Get wake-on-lan options. */ + ETHTOOL_SWOL = 0x00000006 /* Set wake-on-lan options. */ + ETHTOOL_GDRVINFO = 0x00000003 /* Get driver info. */ + ETHTOOL_GMSGLVL = 0x00000007 /* Get driver message level */ + ETHTOOL_SMSGLVL = 0x00000008 /* Set driver msg level. */ + + // Get link status for host, i.e. whether the interface *and* the + // physical port (if there is one) are up (ethtool_value). + ETHTOOL_GLINK = 0x0000000a + ETHTOOL_GCOALESCE = 0x0000000e /* Get coalesce config */ + ETHTOOL_SCOALESCE = 0x0000000f /* Set coalesce config */ + ETHTOOL_GRINGPARAM = 0x00000010 /* Get ring parameters */ + ETHTOOL_SRINGPARAM = 0x00000011 /* Set ring parameters. */ + ETHTOOL_GPAUSEPARAM = 0x00000012 /* Get pause parameters */ + ETHTOOL_SPAUSEPARAM = 0x00000013 /* Set pause parameters. */ + ETHTOOL_GSTRINGS = 0x0000001b /* Get specified string set */ + ETHTOOL_GSTATS = 0x0000001d /* Get NIC-specific statistics */ + ETHTOOL_GPERMADDR = 0x00000020 /* Get permanent hardware address */ + ETHTOOL_GFLAGS = 0x00000025 /* Get flags bitmap(ethtool_value) */ + ETHTOOL_GPFLAGS = 0x00000027 /* Get driver-private flags bitmap */ + ETHTOOL_SPFLAGS = 0x00000028 /* Set driver-private flags bitmap */ + ETHTOOL_GSSET_INFO = 0x00000037 /* Get string set info */ + ETHTOOL_GFEATURES = 0x0000003a /* Get device offload settings */ + ETHTOOL_SFEATURES = 0x0000003b /* Change device offload settings */ + ETHTOOL_GCHANNELS = 0x0000003c /* Get no of channels */ + ETHTOOL_SCHANNELS = 0x0000003d /* Set no of channels */ + ETHTOOL_GET_TS_INFO = 0x00000041 /* Get time stamping and PHC info */ + ETHTOOL_GMODULEINFO = 0x00000042 /* Get plug-in module information */ + ETHTOOL_GMODULEEEPROM = 0x00000043 /* Get plug-in module eeprom */ +) + +// MAX_GSTRINGS maximum number of stats entries that ethtool can +// retrieve currently. +const ( + MAX_GSTRINGS = 32768 + MAX_FEATURE_BLOCKS = (MAX_GSTRINGS + 32 - 1) / 32 + EEPROM_LEN = 640 + PERMADDR_LEN = 32 +) + +// ethtool sset_info related constants +const ( + MAX_SSET_INFO = 64 +) + +type ifreq struct { + ifr_name [IFNAMSIZ]byte + ifr_data uintptr +} + +// following structures comes from uapi/linux/ethtool.h +type ethtoolSsetInfo struct { + cmd uint32 + reserved uint32 + sset_mask uint64 + data [MAX_SSET_INFO]uint32 +} + +type ethtoolGetFeaturesBlock struct { + available uint32 + requested uint32 + active uint32 + never_changed uint32 +} + +type ethtoolGfeatures struct { + cmd uint32 + size uint32 + blocks [MAX_FEATURE_BLOCKS]ethtoolGetFeaturesBlock +} + +type ethtoolSetFeaturesBlock struct { + valid uint32 + requested uint32 +} + +type ethtoolSfeatures struct { + cmd uint32 + size uint32 + blocks [MAX_FEATURE_BLOCKS]ethtoolSetFeaturesBlock +} + +type ethtoolDrvInfo struct { + cmd uint32 + driver [32]byte + version [32]byte + fw_version [32]byte + bus_info [32]byte + erom_version [32]byte + reserved2 [12]byte + n_priv_flags uint32 + n_stats uint32 + testinfo_len uint32 + eedump_len uint32 + regdump_len uint32 +} + +// DrvInfo contains driver information +// ethtool.h v3.5: struct ethtool_drvinfo +type DrvInfo struct { + Cmd uint32 + Driver string + Version string + FwVersion string + BusInfo string + EromVersion string + Reserved2 string + NPrivFlags uint32 + NStats uint32 + TestInfoLen uint32 + EedumpLen uint32 + RegdumpLen uint32 +} + +// Channels contains the number of channels for a given interface. +type Channels struct { + Cmd uint32 + MaxRx uint32 + MaxTx uint32 + MaxOther uint32 + MaxCombined uint32 + RxCount uint32 + TxCount uint32 + OtherCount uint32 + CombinedCount uint32 +} + +// Coalesce is a coalesce config for an interface +type Coalesce struct { + Cmd uint32 + RxCoalesceUsecs uint32 + RxMaxCoalescedFrames uint32 + RxCoalesceUsecsIrq uint32 + RxMaxCoalescedFramesIrq uint32 + TxCoalesceUsecs uint32 + TxMaxCoalescedFrames uint32 + TxCoalesceUsecsIrq uint32 + TxMaxCoalescedFramesIrq uint32 + StatsBlockCoalesceUsecs uint32 + UseAdaptiveRxCoalesce uint32 + UseAdaptiveTxCoalesce uint32 + PktRateLow uint32 + RxCoalesceUsecsLow uint32 + RxMaxCoalescedFramesLow uint32 + TxCoalesceUsecsLow uint32 + TxMaxCoalescedFramesLow uint32 + PktRateHigh uint32 + RxCoalesceUsecsHigh uint32 + RxMaxCoalescedFramesHigh uint32 + TxCoalesceUsecsHigh uint32 + TxMaxCoalescedFramesHigh uint32 + RateSampleInterval uint32 +} + +// WoL options +const ( + WAKE_PHY = 1 << 0 + WAKE_UCAST = 1 << 1 + WAKE_MCAST = 1 << 2 + WAKE_BCAST = 1 << 3 + WAKE_ARP = 1 << 4 + WAKE_MAGIC = 1 << 5 + WAKE_MAGICSECURE = 1 << 6 // only meaningful if WAKE_MAGIC +) + +var WoLMap = map[uint32]string{ + WAKE_PHY: "p", // Wake on PHY activity + WAKE_UCAST: "u", // Wake on unicast messages + WAKE_MCAST: "m", // Wake on multicast messages + WAKE_BCAST: "b", // Wake on broadcast messages + WAKE_ARP: "a", // Wake on ARP + WAKE_MAGIC: "g", // Wake on MagicPacket™ + WAKE_MAGICSECURE: "s", // Enable SecureOn™ password for MagicPacket™ + // f Wake on filter(s) + // d Disable (wake on nothing). This option clears all previous options. +} + +// WakeOnLan contains WoL config for an interface +type WakeOnLan struct { + Cmd uint32 // ETHTOOL_GWOL or ETHTOOL_SWOL + Supported uint32 // r/o bitmask of WAKE_* flags for supported WoL modes + Opts uint32 // Bitmask of WAKE_* flags for enabled WoL modes +} + +// Timestamping options +// see: https://www.kernel.org/doc/Documentation/networking/timestamping.txt +const ( + SOF_TIMESTAMPING_TX_HARDWARE = (1 << 0) /* Request tx timestamps generated by the network adapter. */ + SOF_TIMESTAMPING_TX_SOFTWARE = (1 << 1) /* Request tx timestamps when data leaves the kernel. */ + SOF_TIMESTAMPING_RX_HARDWARE = (1 << 2) /* Request rx timestamps generated by the network adapter. */ + SOF_TIMESTAMPING_RX_SOFTWARE = (1 << 3) /* Request rx timestamps when data enters the kernel. */ + SOF_TIMESTAMPING_SOFTWARE = (1 << 4) /* Report any software timestamps when available. */ + SOF_TIMESTAMPING_SYS_HARDWARE = (1 << 5) /* This option is deprecated and ignored. */ + SOF_TIMESTAMPING_RAW_HARDWARE = (1 << 6) /* Report hardware timestamps. */ + SOF_TIMESTAMPING_OPT_ID = (1 << 7) /* Generate a unique identifier along with each packet. */ + SOF_TIMESTAMPING_TX_SCHED = (1 << 8) /* Request tx timestamps prior to entering the packet scheduler. */ + SOF_TIMESTAMPING_TX_ACK = (1 << 9) /* Request tx timestamps when all data in the send buffer has been acknowledged. */ + SOF_TIMESTAMPING_OPT_CMSG = (1 << 10) /* Support recv() cmsg for all timestamped packets. */ + SOF_TIMESTAMPING_OPT_TSONLY = (1 << 11) /* Applies to transmit timestamps only. */ + SOF_TIMESTAMPING_OPT_STATS = (1 << 12) /* Optional stats that are obtained along with the transmit timestamps. */ + SOF_TIMESTAMPING_OPT_PKTINFO = (1 << 13) /* Enable the SCM_TIMESTAMPING_PKTINFO control message for incoming packets with hardware timestamps. */ + SOF_TIMESTAMPING_OPT_TX_SWHW = (1 << 14) /* Request both hardware and software timestamps for outgoing packets when SOF_TIMESTAMPING_TX_HARDWARE and SOF_TIMESTAMPING_TX_SOFTWARE are enabled at the same time. */ + SOF_TIMESTAMPING_BIND_PHC = (1 << 15) /* Bind the socket to a specific PTP Hardware Clock. */ +) + +const ( + /* + * No outgoing packet will need hardware time stamping; + * should a packet arrive which asks for it, no hardware + * time stamping will be done. + */ + HWTSTAMP_TX_OFF = iota + + /* + * Enables hardware time stamping for outgoing packets; + * the sender of the packet decides which are to be + * time stamped by setting %SOF_TIMESTAMPING_TX_SOFTWARE + * before sending the packet. + */ + HWTSTAMP_TX_ON + + /* + * Enables time stamping for outgoing packets just as + * HWTSTAMP_TX_ON does, but also enables time stamp insertion + * directly into Sync packets. In this case, transmitted Sync + * packets will not received a time stamp via the socket error + * queue. + */ + HWTSTAMP_TX_ONESTEP_SYNC + + /* + * Same as HWTSTAMP_TX_ONESTEP_SYNC, but also enables time + * stamp insertion directly into PDelay_Resp packets. In this + * case, neither transmitted Sync nor PDelay_Resp packets will + * receive a time stamp via the socket error queue. + */ + HWTSTAMP_TX_ONESTEP_P2P +) + +const ( + HWTSTAMP_FILTER_NONE = iota /* time stamp no incoming packet at all */ + HWTSTAMP_FILTER_ALL /* time stamp any incoming packet */ + HWTSTAMP_FILTER_SOME /* return value: time stamp all packets requested plus some others */ + HWTSTAMP_FILTER_PTP_V1_L4_EVENT /* PTP v1, UDP, any kind of event packet */ + HWTSTAMP_FILTER_PTP_V1_L4_SYNC /* PTP v1, UDP, Sync packet */ + HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ /* PTP v1, UDP, Delay_req packet */ + HWTSTAMP_FILTER_PTP_V2_L4_EVENT /* PTP v2, UDP, any kind of event packet */ + HWTSTAMP_FILTER_PTP_V2_L4_SYNC /* PTP v2, UDP, Sync packet */ + HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ /* PTP v2, UDP, Delay_req packet */ + HWTSTAMP_FILTER_PTP_V2_L2_EVENT /* 802.AS1, Ethernet, any kind of event packet */ + HWTSTAMP_FILTER_PTP_V2_L2_SYNC /* 802.AS1, Ethernet, Sync packet */ + HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ /* 802.AS1, Ethernet, Delay_req packet */ + HWTSTAMP_FILTER_PTP_V2_EVENT /* PTP v2/802.AS1, any layer, any kind of event packet */ + HWTSTAMP_FILTER_PTP_V2_SYNC /* PTP v2/802.AS1, any layer, Sync packet */ + HWTSTAMP_FILTER_PTP_V2_DELAY_REQ /* PTP v2/802.AS1, any layer, Delay_req packet */ + HWTSTAMP_FILTER_NTP_ALL /* NTP, UDP, all versions and packet modes */ +) + +// TimestampingInformation contains PTP timetstapming information +type TimestampingInformation struct { + Cmd uint32 + SoTimestamping uint32 /* SOF_TIMESTAMPING_* bitmask */ + PhcIndex int32 + TxTypes uint32 /* HWTSTAMP_TX_* */ + txReserved [3]uint32 + RxFilters uint32 /* HWTSTAMP_FILTER_ */ + rxReserved [3]uint32 +} + +type ethtoolGStrings struct { + cmd uint32 + string_set uint32 + len uint32 + data [MAX_GSTRINGS * ETH_GSTRING_LEN]byte +} + +type ethtoolStats struct { + cmd uint32 + n_stats uint32 + data [MAX_GSTRINGS]uint64 +} + +type ethtoolEeprom struct { + cmd uint32 + magic uint32 + offset uint32 + len uint32 + data [EEPROM_LEN]byte +} + +type ethtoolModInfo struct { + cmd uint32 + tpe uint32 + eeprom_len uint32 + reserved [8]uint32 +} + +type ethtoolLink struct { + cmd uint32 + data uint32 +} + +type ethtoolPermAddr struct { + cmd uint32 + size uint32 + data [PERMADDR_LEN]byte +} + +// Ring is a ring config for an interface +type Ring struct { + Cmd uint32 + RxMaxPending uint32 + RxMiniMaxPending uint32 + RxJumboMaxPending uint32 + TxMaxPending uint32 + RxPending uint32 + RxMiniPending uint32 + RxJumboPending uint32 + TxPending uint32 +} + +// Pause is a pause config for an interface +type Pause struct { + Cmd uint32 + Autoneg uint32 + RxPause uint32 + TxPause uint32 +} + +// Ethtool is a struct that contains the file descriptor for the ethtool +type Ethtool struct { + fd int +} + +// Convert zero-terminated array of chars (string in C) to a Go string. +func goString(s []byte) string { + strEnd := bytes.IndexByte(s, 0) + if strEnd == -1 { + return string(s) + } + return string(s[:strEnd]) +} + +// DriverName returns the driver name of the given interface name. +func (e *Ethtool) DriverName(intf string) (string, error) { + info, err := e.getDriverInfo(intf) + if err != nil { + return "", err + } + return goString(info.driver[:]), nil +} + +// BusInfo returns the bus information of the given interface name. +func (e *Ethtool) BusInfo(intf string) (string, error) { + info, err := e.getDriverInfo(intf) + if err != nil { + return "", err + } + return goString(info.bus_info[:]), nil +} + +// ModuleEeprom returns Eeprom information of the given interface name. +func (e *Ethtool) ModuleEeprom(intf string) ([]byte, error) { + eeprom, _, err := e.getModuleEeprom(intf) + if err != nil { + return nil, err + } + + return eeprom.data[:eeprom.len], nil +} + +// ModuleEeprom returns Eeprom information of the given interface name. +func (e *Ethtool) ModuleEepromHex(intf string) (string, error) { + eeprom, _, err := e.getModuleEeprom(intf) + if err != nil { + return "", err + } + + return hex.EncodeToString(eeprom.data[:eeprom.len]), nil +} + +// DriverInfo returns driver information of the given interface name. +func (e *Ethtool) DriverInfo(intf string) (DrvInfo, error) { + i, err := e.getDriverInfo(intf) + if err != nil { + return DrvInfo{}, err + } + + drvInfo := DrvInfo{ + Cmd: i.cmd, + Driver: goString(i.driver[:]), + Version: goString(i.version[:]), + FwVersion: goString(i.fw_version[:]), + BusInfo: goString(i.bus_info[:]), + EromVersion: goString(i.erom_version[:]), + Reserved2: goString(i.reserved2[:]), + NPrivFlags: i.n_priv_flags, + NStats: i.n_stats, + TestInfoLen: i.testinfo_len, + EedumpLen: i.eedump_len, + RegdumpLen: i.regdump_len, + } + + return drvInfo, nil +} + +// GetChannels returns the number of channels for the given interface name. +func (e *Ethtool) GetChannels(intf string) (Channels, error) { + channels, err := e.getChannels(intf) + if err != nil { + return Channels{}, err + } + + return channels, nil +} + +// SetChannels sets the number of channels for the given interface name and +// returns the new number of channels. +func (e *Ethtool) SetChannels(intf string, channels Channels) (Channels, error) { + channels, err := e.setChannels(intf, channels) + if err != nil { + return Channels{}, err + } + + return channels, nil +} + +// GetCoalesce returns the coalesce config for the given interface name. +func (e *Ethtool) GetCoalesce(intf string) (Coalesce, error) { + coalesce, err := e.getCoalesce(intf) + if err != nil { + return Coalesce{}, err + } + return coalesce, nil +} + +// SetCoalesce sets the coalesce config for the given interface name. +func (e *Ethtool) SetCoalesce(intf string, coalesce Coalesce) (Coalesce, error) { + coalesce, err := e.setCoalesce(intf, coalesce) + if err != nil { + return Coalesce{}, err + } + return coalesce, nil +} + +// GetTimestampingInformation returns the PTP timestamping information for the given interface name. +func (e *Ethtool) GetTimestampingInformation(intf string) (TimestampingInformation, error) { + ts, err := e.getTimestampingInformation(intf) + if err != nil { + return TimestampingInformation{}, err + } + return ts, nil +} + +// PermAddr returns permanent address of the given interface name. +func (e *Ethtool) PermAddr(intf string) (string, error) { + permAddr, err := e.getPermAddr(intf) + if err != nil { + return "", err + } + + if permAddr.data[0] == 0 && permAddr.data[1] == 0 && + permAddr.data[2] == 0 && permAddr.data[3] == 0 && + permAddr.data[4] == 0 && permAddr.data[5] == 0 { + return "", nil + } + + return fmt.Sprintf("%x:%x:%x:%x:%x:%x", + permAddr.data[0:1], + permAddr.data[1:2], + permAddr.data[2:3], + permAddr.data[3:4], + permAddr.data[4:5], + permAddr.data[5:6], + ), nil +} + +// GetWakeOnLan returns the WoL config for the given interface name. +func (e *Ethtool) GetWakeOnLan(intf string) (WakeOnLan, error) { + wol := WakeOnLan{ + Cmd: ETHTOOL_GWOL, + } + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&wol))); err != nil { + return WakeOnLan{}, err + } + + return wol, nil +} + +// SetWakeOnLan sets the WoL config for the given interface name and +// returns the new WoL config. +func (e *Ethtool) SetWakeOnLan(intf string, wol WakeOnLan) (WakeOnLan, error) { + wol.Cmd = ETHTOOL_SWOL + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&wol))); err != nil { + return WakeOnLan{}, err + } + + return wol, nil +} + +func (e *Ethtool) ioctl(intf string, data uintptr) error { + var name [IFNAMSIZ]byte + copy(name[:], []byte(intf)) + + ifr := ifreq{ + ifr_name: name, + ifr_data: data, + } + + _, _, ep := unix.Syscall(unix.SYS_IOCTL, uintptr(e.fd), SIOCETHTOOL, uintptr(unsafe.Pointer(&ifr))) + if ep != 0 { + return ep + } + + return nil +} + +func (e *Ethtool) getDriverInfo(intf string) (ethtoolDrvInfo, error) { + drvinfo := ethtoolDrvInfo{ + cmd: ETHTOOL_GDRVINFO, + } + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&drvinfo))); err != nil { + return ethtoolDrvInfo{}, err + } + + return drvinfo, nil +} + +func (e *Ethtool) getChannels(intf string) (Channels, error) { + channels := Channels{ + Cmd: ETHTOOL_GCHANNELS, + } + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&channels))); err != nil { + return Channels{}, err + } + + return channels, nil +} + +func (e *Ethtool) setChannels(intf string, channels Channels) (Channels, error) { + channels.Cmd = ETHTOOL_SCHANNELS + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&channels))); err != nil { + return Channels{}, err + } + + return channels, nil +} + +func (e *Ethtool) getCoalesce(intf string) (Coalesce, error) { + coalesce := Coalesce{ + Cmd: ETHTOOL_GCOALESCE, + } + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&coalesce))); err != nil { + return Coalesce{}, err + } + + return coalesce, nil +} + +func (e *Ethtool) setCoalesce(intf string, coalesce Coalesce) (Coalesce, error) { + coalesce.Cmd = ETHTOOL_SCOALESCE + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&coalesce))); err != nil { + return Coalesce{}, err + } + + return coalesce, nil +} + +func (e *Ethtool) getTimestampingInformation(intf string) (TimestampingInformation, error) { + ts := TimestampingInformation{ + Cmd: ETHTOOL_GET_TS_INFO, + } + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&ts))); err != nil { + return TimestampingInformation{}, err + } + + return ts, nil +} + +func (e *Ethtool) getPermAddr(intf string) (ethtoolPermAddr, error) { + permAddr := ethtoolPermAddr{ + cmd: ETHTOOL_GPERMADDR, + size: PERMADDR_LEN, + } + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&permAddr))); err != nil { + return ethtoolPermAddr{}, err + } + + return permAddr, nil +} + +func (e *Ethtool) getModuleEeprom(intf string) (ethtoolEeprom, ethtoolModInfo, error) { + modInfo := ethtoolModInfo{ + cmd: ETHTOOL_GMODULEINFO, + } + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&modInfo))); err != nil { + return ethtoolEeprom{}, ethtoolModInfo{}, err + } + + eeprom := ethtoolEeprom{ + cmd: ETHTOOL_GMODULEEEPROM, + len: modInfo.eeprom_len, + offset: 0, + } + + if modInfo.eeprom_len > EEPROM_LEN { + return ethtoolEeprom{}, ethtoolModInfo{}, fmt.Errorf("eeprom size: %d is larger than buffer size: %d", modInfo.eeprom_len, EEPROM_LEN) + } + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&eeprom))); err != nil { + return ethtoolEeprom{}, ethtoolModInfo{}, err + } + + return eeprom, modInfo, nil +} + +// GetRing retrieves ring parameters of the given interface name. +func (e *Ethtool) GetRing(intf string) (Ring, error) { + ring := Ring{ + Cmd: ETHTOOL_GRINGPARAM, + } + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&ring))); err != nil { + return Ring{}, err + } + + return ring, nil +} + +// SetRing sets ring parameters of the given interface name. +func (e *Ethtool) SetRing(intf string, ring Ring) (Ring, error) { + ring.Cmd = ETHTOOL_SRINGPARAM + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&ring))); err != nil { + return Ring{}, err + } + + return ring, nil +} + +// GetPause retrieves pause parameters of the given interface name. +func (e *Ethtool) GetPause(intf string) (Pause, error) { + pause := Pause{ + Cmd: ETHTOOL_GPAUSEPARAM, + } + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&pause))); err != nil { + return Pause{}, err + } + + return pause, nil +} + +// SetPause sets pause parameters of the given interface name. +func (e *Ethtool) SetPause(intf string, pause Pause) (Pause, error) { + pause.Cmd = ETHTOOL_SPAUSEPARAM + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&pause))); err != nil { + return Pause{}, err + } + + return pause, nil +} + +func isFeatureBitSet(blocks [MAX_FEATURE_BLOCKS]ethtoolGetFeaturesBlock, index uint) bool { + return (blocks)[index/32].active&(1<<(index%32)) != 0 +} + +// FeatureState contains the state of a feature. +type FeatureState struct { + Available bool + Requested bool + Active bool + NeverChanged bool +} + +func getFeatureStateBits(blocks [MAX_FEATURE_BLOCKS]ethtoolGetFeaturesBlock, index uint) FeatureState { + return FeatureState{ + Available: (blocks)[index/32].available&(1<<(index%32)) != 0, + Requested: (blocks)[index/32].requested&(1<<(index%32)) != 0, + Active: (blocks)[index/32].active&(1<<(index%32)) != 0, + NeverChanged: (blocks)[index/32].never_changed&(1<<(index%32)) != 0, + } +} + +func setFeatureBit(blocks *[MAX_FEATURE_BLOCKS]ethtoolSetFeaturesBlock, index uint, value bool) { + blockIndex, bitIndex := index/32, index%32 + + blocks[blockIndex].valid |= 1 << bitIndex + + if value { + blocks[blockIndex].requested |= 1 << bitIndex + } else { + blocks[blockIndex].requested &= ^(1 << bitIndex) + } +} + +func (e *Ethtool) getNames(intf string, mask int) (map[string]uint, error) { + ssetInfo := ethtoolSsetInfo{ + cmd: ETHTOOL_GSSET_INFO, + sset_mask: 1 << mask, + data: [MAX_SSET_INFO]uint32{}, + } + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&ssetInfo))); err != nil { + return nil, err + } + + /* we only read data on first index because single bit was set in sset_mask(0x10) */ + length := ssetInfo.data[0] + if length == 0 { + return map[string]uint{}, nil + } else if length > MAX_GSTRINGS { + return nil, fmt.Errorf("ethtool currently doesn't support more than %d entries, received %d", MAX_GSTRINGS, length) + } + + gstrings := ethtoolGStrings{ + cmd: ETHTOOL_GSTRINGS, + string_set: uint32(mask), + len: length, + data: [MAX_GSTRINGS * ETH_GSTRING_LEN]byte{}, + } + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&gstrings))); err != nil { + return nil, err + } + + result := make(map[string]uint) + for i := 0; i != int(length); i++ { + b := gstrings.data[i*ETH_GSTRING_LEN : i*ETH_GSTRING_LEN+ETH_GSTRING_LEN] + key := goString(b) + if key != "" { + result[key] = uint(i) + } + } + + return result, nil +} + +// FeatureNames shows supported features by their name. +func (e *Ethtool) FeatureNames(intf string) (map[string]uint, error) { + return e.getNames(intf, ETH_SS_FEATURES) +} + +// Features retrieves features of the given interface name. +func (e *Ethtool) Features(intf string) (map[string]bool, error) { + names, err := e.FeatureNames(intf) + if err != nil { + return nil, err + } + + length := uint32(len(names)) + if length == 0 { + return map[string]bool{}, nil + } + + features := ethtoolGfeatures{ + cmd: ETHTOOL_GFEATURES, + size: (length + 32 - 1) / 32, + } + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&features))); err != nil { + return nil, err + } + + result := make(map[string]bool, length) + for key, index := range names { + result[key] = isFeatureBitSet(features.blocks, index) + } + + return result, nil +} + +// FeaturesWithState retrieves features of the given interface name, +// with extra flags to explain if they can be enabled +func (e *Ethtool) FeaturesWithState(intf string) (map[string]FeatureState, error) { + names, err := e.FeatureNames(intf) + if err != nil { + return nil, err + } + + length := uint32(len(names)) + if length == 0 { + return map[string]FeatureState{}, nil + } + + features := ethtoolGfeatures{ + cmd: ETHTOOL_GFEATURES, + size: (length + 32 - 1) / 32, + } + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&features))); err != nil { + return nil, err + } + + var result = make(map[string]FeatureState, length) + for key, index := range names { + result[key] = getFeatureStateBits(features.blocks, index) + } + + return result, nil +} + +// Change requests a change in the given device's features. +func (e *Ethtool) Change(intf string, config map[string]bool) error { + names, err := e.FeatureNames(intf) + if err != nil { + return err + } + + length := uint32(len(names)) + + features := ethtoolSfeatures{ + cmd: ETHTOOL_SFEATURES, + size: (length + 32 - 1) / 32, + } + + for key, value := range config { + if index, ok := names[key]; ok { + setFeatureBit(&features.blocks, index, value) + } else { + return fmt.Errorf("unsupported feature %q", key) + } + } + + return e.ioctl(intf, uintptr(unsafe.Pointer(&features))) +} + +// PrivFlagsNames shows supported private flags by their name. +func (e *Ethtool) PrivFlagsNames(intf string) (map[string]uint, error) { + return e.getNames(intf, ETH_SS_PRIV_FLAGS) +} + +// PrivFlags retrieves private flags of the given interface name. +func (e *Ethtool) PrivFlags(intf string) (map[string]bool, error) { + names, err := e.PrivFlagsNames(intf) + if err != nil { + return nil, err + } + + length := uint32(len(names)) + if length == 0 { + return map[string]bool{}, nil + } + + var val ethtoolLink + val.cmd = ETHTOOL_GPFLAGS + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&val))); err != nil { + return nil, err + } + + result := make(map[string]bool, length) + for name, mask := range names { + result[name] = val.data&(1< MAX_GSTRINGS*ETH_GSTRING_LEN { + return nil, fmt.Errorf("ethtool currently doesn't support more than %d entries, received %d", MAX_GSTRINGS, drvinfo.n_stats) + } + + gstrings := ethtoolGStrings{ + cmd: ETHTOOL_GSTRINGS, + string_set: ETH_SS_STATS, + len: drvinfo.n_stats, + data: [MAX_GSTRINGS * ETH_GSTRING_LEN]byte{}, + } + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&gstrings))); err != nil { + return nil, err + } + + stats := ethtoolStats{ + cmd: ETHTOOL_GSTATS, + n_stats: drvinfo.n_stats, + data: [MAX_GSTRINGS]uint64{}, + } + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&stats))); err != nil { + return nil, err + } + + result := make(map[string]uint64) + for i := 0; i != int(drvinfo.n_stats); i++ { + b := gstrings.data[i*ETH_GSTRING_LEN : i*ETH_GSTRING_LEN+ETH_GSTRING_LEN] + strEnd := strings.Index(string(b), "\x00") + if strEnd == -1 { + strEnd = ETH_GSTRING_LEN + } + key := string(b[:strEnd]) + if len(key) != 0 { + result[key] = stats.data[i] + } + } + + return result, nil +} + +// Close closes the ethool handler +func (e *Ethtool) Close() { + unix.Close(e.fd) +} + +// NewEthtool returns a new ethtool handler +func NewEthtool() (*Ethtool, error) { + fd, err := unix.Socket(unix.AF_INET, unix.SOCK_DGRAM|unix.SOCK_CLOEXEC, unix.IPPROTO_IP) + if err != nil { + return nil, err + } + + return &Ethtool{ + fd: int(fd), + }, nil +} + +// BusInfo returns bus information of the given interface name. +func BusInfo(intf string) (string, error) { + e, err := NewEthtool() + if err != nil { + return "", err + } + defer e.Close() + return e.BusInfo(intf) +} + +// DriverName returns the driver name of the given interface name. +func DriverName(intf string) (string, error) { + e, err := NewEthtool() + if err != nil { + return "", err + } + defer e.Close() + return e.DriverName(intf) +} + +// Stats retrieves stats of the given interface name. +func Stats(intf string) (map[string]uint64, error) { + e, err := NewEthtool() + if err != nil { + return nil, err + } + defer e.Close() + return e.Stats(intf) +} + +// PermAddr returns permanent address of the given interface name. +func PermAddr(intf string) (string, error) { + e, err := NewEthtool() + if err != nil { + return "", err + } + defer e.Close() + return e.PermAddr(intf) +} + +func supportedSpeeds(mask uint64) (ret []struct { + name string + mask uint64 + speed uint64 +}) { + for _, mode := range supportedCapabilities { + if ((1 << mode.mask) & mask) != 0 { + ret = append(ret, mode) + } + } + return ret +} + +// SupportedLinkModes returns the names of the link modes supported by the interface. +func SupportedLinkModes(mask uint64) []string { + var ret []string + for _, mode := range supportedSpeeds(mask) { + ret = append(ret, mode.name) + } + return ret +} + +// SupportedSpeed returns the maximum capacity of this interface. +func SupportedSpeed(mask uint64) uint64 { + var ret uint64 + for _, mode := range supportedSpeeds(mask) { + if mode.speed > ret { + ret = mode.speed + } + } + return ret +} diff --git a/vendor/github.com/safchain/ethtool/ethtool_cmd.go b/vendor/github.com/safchain/ethtool/ethtool_cmd.go new file mode 100644 index 000000000..09499fea8 --- /dev/null +++ b/vendor/github.com/safchain/ethtool/ethtool_cmd.go @@ -0,0 +1,207 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ + +// Package ethtool aims to provide a library giving a simple access to the +// Linux SIOCETHTOOL ioctl operations. It can be used to retrieve informations +// from a network device like statistics, driver related informations or +// even the peer of a VETH interface. +package ethtool + +import ( + "math" + "reflect" + "unsafe" + + "golang.org/x/sys/unix" +) + +// EthtoolCmd is the Go version of the Linux kerne ethtool_cmd struct +// see ethtool.c +type EthtoolCmd struct { + Cmd uint32 + Supported uint32 + Advertising uint32 + Speed uint16 + Duplex uint8 + Port uint8 + Phy_address uint8 + Transceiver uint8 + Autoneg uint8 + Mdio_support uint8 + Maxtxpkt uint32 + Maxrxpkt uint32 + Speed_hi uint16 + Eth_tp_mdix uint8 + Reserved2 uint8 + Lp_advertising uint32 + Reserved [2]uint32 +} + +// CmdGet returns the interface settings in the receiver struct +// and returns speed +func (ecmd *EthtoolCmd) CmdGet(intf string) (uint32, error) { + e, err := NewEthtool() + if err != nil { + return 0, err + } + defer e.Close() + return e.CmdGet(ecmd, intf) +} + +// CmdSet sets and returns the settings in the receiver struct +// and returns speed +func (ecmd *EthtoolCmd) CmdSet(intf string) (uint32, error) { + e, err := NewEthtool() + if err != nil { + return 0, err + } + defer e.Close() + return e.CmdSet(ecmd, intf) +} + +func (f *EthtoolCmd) reflect(retv *map[string]uint64) { + val := reflect.ValueOf(f).Elem() + + for i := 0; i < val.NumField(); i++ { + valueField := val.Field(i) + typeField := val.Type().Field(i) + + t := valueField.Interface() + // tt := reflect.TypeOf(t) + // fmt.Printf(" t %T %v tt %T %v\n", t, t, tt, tt) + switch tt := t.(type) { + case uint32: + // fmt.Printf(" t is uint32\n") + (*retv)[typeField.Name] = uint64(tt) + case uint16: + (*retv)[typeField.Name] = uint64(tt) + case uint8: + (*retv)[typeField.Name] = uint64(tt) + case int32: + (*retv)[typeField.Name] = uint64(tt) + case int16: + (*retv)[typeField.Name] = uint64(tt) + case int8: + (*retv)[typeField.Name] = uint64(tt) + default: + (*retv)[typeField.Name+"_unknown_type"] = 0 + } + } +} + +// CmdGet returns the interface settings in the receiver struct +// and returns speed +func (e *Ethtool) CmdGet(ecmd *EthtoolCmd, intf string) (uint32, error) { + ecmd.Cmd = ETHTOOL_GSET + + var name [IFNAMSIZ]byte + copy(name[:], []byte(intf)) + + ifr := ifreq{ + ifr_name: name, + ifr_data: uintptr(unsafe.Pointer(ecmd)), + } + + _, _, ep := unix.Syscall(unix.SYS_IOCTL, uintptr(e.fd), + SIOCETHTOOL, uintptr(unsafe.Pointer(&ifr))) + if ep != 0 { + return 0, ep + } + + var speedval uint32 = (uint32(ecmd.Speed_hi) << 16) | + (uint32(ecmd.Speed) & 0xffff) + if speedval == math.MaxUint16 { + speedval = math.MaxUint32 + } + + return speedval, nil +} + +// CmdSet sets and returns the settings in the receiver struct +// and returns speed +func (e *Ethtool) CmdSet(ecmd *EthtoolCmd, intf string) (uint32, error) { + ecmd.Cmd = ETHTOOL_SSET + + var name [IFNAMSIZ]byte + copy(name[:], []byte(intf)) + + ifr := ifreq{ + ifr_name: name, + ifr_data: uintptr(unsafe.Pointer(ecmd)), + } + + _, _, ep := unix.Syscall(unix.SYS_IOCTL, uintptr(e.fd), + SIOCETHTOOL, uintptr(unsafe.Pointer(&ifr))) + if ep != 0 { + return 0, unix.Errno(ep) + } + + var speedval uint32 = (uint32(ecmd.Speed_hi) << 16) | + (uint32(ecmd.Speed) & 0xffff) + if speedval == math.MaxUint16 { + speedval = math.MaxUint32 + } + + return speedval, nil +} + +// CmdGetMapped returns the interface settings in a map +func (e *Ethtool) CmdGetMapped(intf string) (map[string]uint64, error) { + ecmd := EthtoolCmd{ + Cmd: ETHTOOL_GSET, + } + + var name [IFNAMSIZ]byte + copy(name[:], []byte(intf)) + + ifr := ifreq{ + ifr_name: name, + ifr_data: uintptr(unsafe.Pointer(&ecmd)), + } + + _, _, ep := unix.Syscall(unix.SYS_IOCTL, uintptr(e.fd), + SIOCETHTOOL, uintptr(unsafe.Pointer(&ifr))) + if ep != 0 { + return nil, ep + } + + result := make(map[string]uint64) + + // ref https://gist.github.com/drewolson/4771479 + // Golang Reflection Example + ecmd.reflect(&result) + + var speedval uint32 = (uint32(ecmd.Speed_hi) << 16) | + (uint32(ecmd.Speed) & 0xffff) + result["speed"] = uint64(speedval) + + return result, nil +} + +// CmdGetMapped returns the interface settings in a map +func CmdGetMapped(intf string) (map[string]uint64, error) { + e, err := NewEthtool() + if err != nil { + return nil, err + } + defer e.Close() + return e.CmdGetMapped(intf) +} diff --git a/vendor/github.com/safchain/ethtool/ethtool_darwin.go b/vendor/github.com/safchain/ethtool/ethtool_darwin.go new file mode 100644 index 000000000..721a214c4 --- /dev/null +++ b/vendor/github.com/safchain/ethtool/ethtool_darwin.go @@ -0,0 +1,30 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ + +package ethtool + +var supportedCapabilities = []struct { + name string + mask uint64 + speed uint64 +}{ + // no supported capabilities on darwin +} diff --git a/vendor/github.com/safchain/ethtool/ethtool_linux.go b/vendor/github.com/safchain/ethtool/ethtool_linux.go new file mode 100644 index 000000000..70fb8d718 --- /dev/null +++ b/vendor/github.com/safchain/ethtool/ethtool_linux.go @@ -0,0 +1,56 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ + +package ethtool + +import ( + "golang.org/x/sys/unix" +) + +var supportedCapabilities = []struct { + name string + mask uint64 + speed uint64 +}{ + {"10baseT_Half", unix.ETHTOOL_LINK_MODE_10baseT_Half_BIT, 10_000_000}, + {"10baseT_Full", unix.ETHTOOL_LINK_MODE_10baseT_Full_BIT, 10_000_000}, + {"100baseT_Half", unix.ETHTOOL_LINK_MODE_100baseT_Half_BIT, 100_000_000}, + {"100baseT_Full", unix.ETHTOOL_LINK_MODE_100baseT_Full_BIT, 100_000_000}, + {"1000baseT_Half", unix.ETHTOOL_LINK_MODE_1000baseT_Half_BIT, 1_000_000_000}, + {"1000baseT_Full", unix.ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 1_000_000_000}, + {"10000baseT_Full", unix.ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 10_000_000_000}, + {"2500baseT_Full", unix.ETHTOOL_LINK_MODE_2500baseT_Full_BIT, 2_500_000_000}, + {"1000baseKX_Full", unix.ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 1_000_000_000}, + {"10000baseKX_Full", unix.ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 10_000_000_000}, + {"10000baseKR_Full", unix.ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 10_000_000_000}, + {"10000baseR_FEC", unix.ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, 10_000_000_000}, + {"20000baseMLD2_Full", unix.ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT, 20_000_000_000}, + {"20000baseKR2_Full", unix.ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 20_000_000_000}, + {"40000baseKR4_Full", unix.ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 40_000_000_000}, + {"40000baseCR4_Full", unix.ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 40_000_000_000}, + {"40000baseSR4_Full", unix.ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 40_000_000_000}, + {"40000baseLR4_Full", unix.ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 40_000_000_000}, + {"56000baseKR4_Full", unix.ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT, 56_000_000_000}, + {"56000baseCR4_Full", unix.ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT, 56_000_000_000}, + {"56000baseSR4_Full", unix.ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT, 56_000_000_000}, + {"56000baseLR4_Full", unix.ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, 56_000_000_000}, + {"25000baseCR_Full", unix.ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 25_000_000_000}, +} diff --git a/vendor/github.com/safchain/ethtool/ethtool_msglvl.go b/vendor/github.com/safchain/ethtool/ethtool_msglvl.go new file mode 100644 index 000000000..1f6e338cf --- /dev/null +++ b/vendor/github.com/safchain/ethtool/ethtool_msglvl.go @@ -0,0 +1,114 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ + +// Package ethtool aims to provide a library giving a simple access to the +// Linux SIOCETHTOOL ioctl operations. It can be used to retrieve informations +// from a network device like statistics, driver related informations or +// even the peer of a VETH interface. +package ethtool + +import ( + "unsafe" + + "golang.org/x/sys/unix" +) + +type ethtoolValue struct { /* ethtool.c: struct ethtool_value */ + cmd uint32 + data uint32 +} + +// MsglvlGet returns the msglvl of the given interface. +func (e *Ethtool) MsglvlGet(intf string) (uint32, error) { + edata := ethtoolValue{ + cmd: ETHTOOL_GMSGLVL, + } + + var name [IFNAMSIZ]byte + copy(name[:], []byte(intf)) + + ifr := ifreq{ + ifr_name: name, + ifr_data: uintptr(unsafe.Pointer(&edata)), + } + + _, _, ep := unix.Syscall(unix.SYS_IOCTL, uintptr(e.fd), + SIOCETHTOOL, uintptr(unsafe.Pointer(&ifr))) + if ep != 0 { + return 0, ep + } + + return edata.data, nil +} + +// MsglvlSet returns the read-msglvl, post-set-msglvl of the given interface. +func (e *Ethtool) MsglvlSet(intf string, valset uint32) (uint32, uint32, error) { + edata := ethtoolValue{ + cmd: ETHTOOL_GMSGLVL, + } + + var name [IFNAMSIZ]byte + copy(name[:], []byte(intf)) + + ifr := ifreq{ + ifr_name: name, + ifr_data: uintptr(unsafe.Pointer(&edata)), + } + + _, _, ep := unix.Syscall(unix.SYS_IOCTL, uintptr(e.fd), + SIOCETHTOOL, uintptr(unsafe.Pointer(&ifr))) + if ep != 0 { + return 0, 0, ep + } + + readval := edata.data + + edata.cmd = ETHTOOL_SMSGLVL + edata.data = valset + + _, _, ep = unix.Syscall(unix.SYS_IOCTL, uintptr(e.fd), + SIOCETHTOOL, uintptr(unsafe.Pointer(&ifr))) + if ep != 0 { + return 0, 0, ep + } + + return readval, edata.data, nil +} + +// MsglvlGet returns the msglvl of the given interface. +func MsglvlGet(intf string) (uint32, error) { + e, err := NewEthtool() + if err != nil { + return 0, err + } + defer e.Close() + return e.MsglvlGet(intf) +} + +// MsglvlSet returns the read-msglvl, post-set-msglvl of the given interface. +func MsglvlSet(intf string, valset uint32) (uint32, uint32, error) { + e, err := NewEthtool() + if err != nil { + return 0, 0, err + } + defer e.Close() + return e.MsglvlSet(intf, valset) +} diff --git a/vendor/github.com/spf13/cobra/.golangci.yml b/vendor/github.com/spf13/cobra/.golangci.yml index 2c8f4808c..104dc2440 100644 --- a/vendor/github.com/spf13/cobra/.golangci.yml +++ b/vendor/github.com/spf13/cobra/.golangci.yml @@ -12,14 +12,20 @@ # See the License for the specific language governing permissions and # limitations under the License. +version: "2" + run: - deadline: 5m + timeout: 5m + +formatters: + enable: + - gofmt + - goimports linters: - disable-all: true + default: none enable: #- bodyclose - # - deadcode ! deprecated since v1.49.0; replaced by 'unused' #- depguard #- dogsled #- dupl @@ -30,28 +36,31 @@ linters: - goconst - gocritic #- gocyclo - - gofmt - - goimports - #- gomnd #- goprintffuncname - gosec - - gosimple - govet - ineffassign #- lll - misspell + #- mnd #- nakedret #- noctx - nolintlint #- rowserrcheck - #- scopelint - staticcheck - #- structcheck ! deprecated since v1.49.0; replaced by 'unused' - - stylecheck - #- typecheck - unconvert #- unparam - unused - # - varcheck ! deprecated since v1.49.0; replaced by 'unused' #- whitespace - fast: false + exclusions: + presets: + - common-false-positives + - legacy + - std-error-handling + settings: + govet: + # Disable buildtag check to allow dual build tag syntax (both //go:build and // +build). + # This is necessary for Go 1.15 compatibility since //go:build was introduced in Go 1.17. + # This can be removed once Cobra requires Go 1.17 or higher. + disable: + - buildtag diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md index 6444f4b7f..8416275f4 100644 --- a/vendor/github.com/spf13/cobra/README.md +++ b/vendor/github.com/spf13/cobra/README.md @@ -1,7 +1,14 @@ -![cobra logo](assets/CobraMain.png) +

    + +cobra-logo + +
    Cobra is a library for creating powerful modern CLI applications. +Visit Cobra.dev for extensive documentation + + Cobra is used in many Go projects such as [Kubernetes](https://kubernetes.io/), [Hugo](https://gohugo.io), and [GitHub CLI](https://github.com/cli/cli) to name a few. [This list](site/content/projects_using_cobra.md) contains a more extensive list of projects using Cobra. @@ -10,6 +17,20 @@ name a few. [This list](site/content/projects_using_cobra.md) contains a more ex [![Go Reference](https://pkg.go.dev/badge/github.com/spf13/cobra.svg)](https://pkg.go.dev/github.com/spf13/cobra) [![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cobra)](https://goreportcard.com/report/github.com/spf13/cobra) [![Slack](https://img.shields.io/badge/Slack-cobra-brightgreen)](https://gophers.slack.com/archives/CD3LP1199) +
    +
    + Supported by: +
    +
    + + Warp sponsorship + + +### [Warp, the AI terminal for devs](https://www.warp.dev/cobra) +[Try Cobra in Warp today](https://www.warp.dev/cobra)
    + +
    +
    # Overview @@ -105,7 +126,7 @@ go install github.com/spf13/cobra-cli@latest For complete details on using the Cobra-CLI generator, please read [The Cobra Generator README](https://github.com/spf13/cobra-cli/blob/main/README.md) -For complete details on using the Cobra library, please read the [The Cobra User Guide](site/content/user_guide.md). +For complete details on using the Cobra library, please read [The Cobra User Guide](site/content/user_guide.md). # License diff --git a/vendor/github.com/spf13/cobra/SECURITY.md b/vendor/github.com/spf13/cobra/SECURITY.md new file mode 100644 index 000000000..54e60c28c --- /dev/null +++ b/vendor/github.com/spf13/cobra/SECURITY.md @@ -0,0 +1,105 @@ +# Security Policy + +## Reporting a Vulnerability + +The `cobra` maintainers take security issues seriously and +we appreciate your efforts to _**responsibly**_ disclose your findings. +We will make every effort to swiftly respond and address concerns. + +To report a security vulnerability: + +1. **DO NOT** create a public GitHub issue for the vulnerability! +2. **DO NOT** create a public GitHub Pull Request with a fix for the vulnerability! +3. Send an email to `cobra-security@googlegroups.com`. +4. Include the following details in your report: + - Description of the vulnerability + - Steps to reproduce + - Potential impact of the vulnerability (to your downstream project, to the Go ecosystem, etc.) + - Any potential mitigations you've already identified +5. Allow up to 7 days for an initial response. + You should receive an acknowledgment of your report and an estimated timeline for a fix. +6. (Optional) If you have a fix and would like to contribute your patch, please work + directly with the maintainers via `cobra-security@googlegroups.com` to + coordinate pushing the patch to GitHub, cutting a new release, and disclosing the change. + +## Response Process + +When a security vulnerability report is received, the `cobra` maintainers will: + +1. Confirm receipt of the vulnerability report within 7 days. +2. Assess the report to determine if it constitutes a security vulnerability. +3. If confirmed, assign the vulnerability a severity level and create a timeline for addressing it. +4. Develop and test a fix. +5. Patch the vulnerability and make a new GitHub release: the maintainers will coordinate disclosure with the reporter. +6. Create a new GitHub Security Advisory to inform the broader Go ecosystem + +## Disclosure Policy + +The `cobra` maintainers follow a coordinated disclosure process: + +1. Security vulnerabilities will be addressed as quickly as possible. +2. A CVE (Common Vulnerabilities and Exposures) identifier will be requested for significant vulnerabilities + that are within `cobra` itself. +3. Once a fix is ready, the maintainers will: + - Release a new version containing the fix. + - Update the security advisory with details about the vulnerability. + - Credit the reporter (unless they wish to remain anonymous). + - Credit the fixer (unless they wish to remain anonymous, this may be the same as the reporter). + - Announce the vulnerability through appropriate channels + (GitHub Security Advisory, mailing lists, GitHub Releases, etc.) + +## Supported Versions + +Security fixes will typically only be released for the most recent major release. + +## Upstream Security Issues + +`cobra` generally will not accept vulnerability reports that originate in upstream +dependencies. I.e., if there is a problem in Go code that `cobra` depends on, +it is best to engage that project's maintainers and owners. + +This security policy primarily pertains only to `cobra` itself but if you believe you've +identified a problem that originates in an upstream dependency and is being widely +distributed by `cobra`, please follow the disclosure procedure above: the `cobra` +maintainers will work with you to determine the severity and ecosystem impact. + +## Security Updates and CVEs + +Information about known security vulnerabilities and CVEs affecting `cobra` will +be published as GitHub Security Advisories at +https://github.com/spf13/cobra/security/advisories. + +All users are encouraged to watch the repository and upgrade promptly when +security releases are published. + +## `cobra` Security Best Practices for Users + +When using `cobra` in your CLIs, the `cobra` maintainers recommend the following: + +1. Always use the latest version of `cobra`. +2. [Use Go modules](https://go.dev/blog/using-go-modules) for dependency management. +3. Always use the latest possible version of Go. + +## Security Best Practices for Contributors + +When contributing to `cobra`: + +1. Be mindful of security implications when adding new features or modifying existing ones. +2. Be aware of `cobra`'s extremely large reach: it is used in nearly every Go CLI + (like Kubernetes, Docker, Prometheus, etc. etc.) +3. Write tests that explicitly cover edge cases and potential issues. +4. If you discover a security issue while working on `cobra`, please report it + following the process above rather than opening a public pull request or issue that + addresses the vulnerability. +5. Take personal sec-ops seriously and secure your GitHub account: use [two-factor authentication](https://docs.github.com/en/authentication/securing-your-account-with-two-factor-authentication-2fa), + [sign your commits with a GPG or SSH key](https://docs.github.com/en/authentication/managing-commit-signature-verification/about-commit-signature-verification), + etc. + +## Acknowledgments + +The `cobra` maintainers would like to thank all security researchers and +community members who help keep cobra, its users, and the entire Go ecosystem secure through responsible disclosures!! + +--- + +*This security policy is inspired by the [Open Web Application Security Project (OWASP)](https://owasp.org/) guidelines and security best practices.* diff --git a/vendor/github.com/spf13/cobra/active_help.go b/vendor/github.com/spf13/cobra/active_help.go index 25c30e3cc..b3e2dadfe 100644 --- a/vendor/github.com/spf13/cobra/active_help.go +++ b/vendor/github.com/spf13/cobra/active_help.go @@ -35,7 +35,7 @@ const ( // This function can be called multiple times before and/or after completions are added to // the array. Each time this function is called with the same array, the new // ActiveHelp line will be shown below the previous ones when completion is triggered. -func AppendActiveHelp(compArray []string, activeHelpStr string) []string { +func AppendActiveHelp(compArray []Completion, activeHelpStr string) []Completion { return append(compArray, fmt.Sprintf("%s%s", activeHelpMarker, activeHelpStr)) } diff --git a/vendor/github.com/spf13/cobra/bash_completionsV2.go b/vendor/github.com/spf13/cobra/bash_completionsV2.go index 1cce5c329..d2397aa36 100644 --- a/vendor/github.com/spf13/cobra/bash_completionsV2.go +++ b/vendor/github.com/spf13/cobra/bash_completionsV2.go @@ -146,7 +146,7 @@ __%[1]s_process_completion_results() { if (((directive & shellCompDirectiveFilterFileExt) != 0)); then # File extension filtering - local fullFilter filter filteringCmd + local fullFilter="" filter filteringCmd # Do not use quotes around the $completions variable or else newline # characters will be kept. @@ -177,20 +177,71 @@ __%[1]s_process_completion_results() { __%[1]s_handle_special_char "$cur" = # Print the activeHelp statements before we finish + __%[1]s_handle_activeHelp +} + +__%[1]s_handle_activeHelp() { + # Print the activeHelp statements if ((${#activeHelp[*]} != 0)); then - printf "\n"; - printf "%%s\n" "${activeHelp[@]}" - printf "\n" - - # The prompt format is only available from bash 4.4. - # We test if it is available before using it. - if (x=${PS1@P}) 2> /dev/null; then - printf "%%s" "${PS1@P}${COMP_LINE[@]}" - else - # Can't print the prompt. Just print the - # text the user had typed, it is workable enough. - printf "%%s" "${COMP_LINE[@]}" + if [ -z $COMP_TYPE ]; then + # Bash v3 does not set the COMP_TYPE variable. + printf "\n"; + printf "%%s\n" "${activeHelp[@]}" + printf "\n" + __%[1]s_reprint_commandLine + return fi + + # Only print ActiveHelp on the second TAB press + if [ $COMP_TYPE -eq 63 ]; then + printf "\n" + printf "%%s\n" "${activeHelp[@]}" + + if ((${#COMPREPLY[*]} == 0)); then + # When there are no completion choices from the program, file completion + # may kick in if the program has not disabled it; in such a case, we want + # to know if any files will match what the user typed, so that we know if + # there will be completions presented, so that we know how to handle ActiveHelp. + # To find out, we actually trigger the file completion ourselves; + # the call to _filedir will fill COMPREPLY if files match. + if (((directive & shellCompDirectiveNoFileComp) == 0)); then + __%[1]s_debug "Listing files" + _filedir + fi + fi + + if ((${#COMPREPLY[*]} != 0)); then + # If there are completion choices to be shown, print a delimiter. + # Re-printing the command-line will automatically be done + # by the shell when it prints the completion choices. + printf -- "--" + else + # When there are no completion choices at all, we need + # to re-print the command-line since the shell will + # not be doing it itself. + __%[1]s_reprint_commandLine + fi + elif [ $COMP_TYPE -eq 37 ] || [ $COMP_TYPE -eq 42 ]; then + # For completion type: menu-complete/menu-complete-backward and insert-completions + # the completions are immediately inserted into the command-line, so we first + # print the activeHelp message and reprint the command-line since the shell won't. + printf "\n" + printf "%%s\n" "${activeHelp[@]}" + + __%[1]s_reprint_commandLine + fi + fi +} + +__%[1]s_reprint_commandLine() { + # The prompt format is only available from bash 4.4. + # We test if it is available before using it. + if (x=${PS1@P}) 2> /dev/null; then + printf "%%s" "${PS1@P}${COMP_LINE[@]}" + else + # Can't print the prompt. Just print the + # text the user had typed, it is workable enough. + printf "%%s" "${COMP_LINE[@]}" fi } @@ -201,6 +252,8 @@ __%[1]s_extract_activeHelp() { local endIndex=${#activeHelpMarker} while IFS='' read -r comp; do + [[ -z $comp ]] && continue + if [[ ${comp:0:endIndex} == $activeHelpMarker ]]; then comp=${comp:endIndex} __%[1]s_debug "ActiveHelp found: $comp" @@ -223,16 +276,21 @@ __%[1]s_handle_completion_types() { # If the user requested inserting one completion at a time, or all # completions at once on the command-line we must remove the descriptions. # https://github.com/spf13/cobra/issues/1508 - local tab=$'\t' comp - while IFS='' read -r comp; do - [[ -z $comp ]] && continue - # Strip any description - comp=${comp%%%%$tab*} - # Only consider the completions that match - if [[ $comp == "$cur"* ]]; then - COMPREPLY+=("$comp") - fi - done < <(printf "%%s\n" "${completions[@]}") + + # If there are no completions, we don't need to do anything + (( ${#completions[@]} == 0 )) && return 0 + + local tab=$'\t' + + # Strip any description and escape the completion to handled special characters + IFS=$'\n' read -ra completions -d '' < <(printf "%%q\n" "${completions[@]%%%%$tab*}") + + # Only consider the completions that match + IFS=$'\n' read -ra COMPREPLY -d '' < <(IFS=$'\n'; compgen -W "${completions[*]}" -- "${cur}") + + # compgen looses the escaping so we need to escape all completions again since they will + # all be inserted on the command-line. + IFS=$'\n' read -ra COMPREPLY -d '' < <(printf "%%q\n" "${COMPREPLY[@]}") ;; *) @@ -243,11 +301,25 @@ __%[1]s_handle_completion_types() { } __%[1]s_handle_standard_completion_case() { - local tab=$'\t' comp + local tab=$'\t' + + # If there are no completions, we don't need to do anything + (( ${#completions[@]} == 0 )) && return 0 # Short circuit to optimize if we don't have descriptions if [[ "${completions[*]}" != *$tab* ]]; then - IFS=$'\n' read -ra COMPREPLY -d '' < <(compgen -W "${completions[*]}" -- "$cur") + # First, escape the completions to handle special characters + IFS=$'\n' read -ra completions -d '' < <(printf "%%q\n" "${completions[@]}") + # Only consider the completions that match what the user typed + IFS=$'\n' read -ra COMPREPLY -d '' < <(IFS=$'\n'; compgen -W "${completions[*]}" -- "${cur}") + + # compgen looses the escaping so, if there is only a single completion, we need to + # escape it again because it will be inserted on the command-line. If there are multiple + # completions, we don't want to escape them because they will be printed in a list + # and we don't want to show escape characters in that list. + if (( ${#COMPREPLY[@]} == 1 )); then + COMPREPLY[0]=$(printf "%%q" "${COMPREPLY[0]}") + fi return 0 fi @@ -256,23 +328,39 @@ __%[1]s_handle_standard_completion_case() { # Look for the longest completion so that we can format things nicely while IFS='' read -r compline; do [[ -z $compline ]] && continue - # Strip any description before checking the length - comp=${compline%%%%$tab*} + + # Before checking if the completion matches what the user typed, + # we need to strip any description and escape the completion to handle special + # characters because those escape characters are part of what the user typed. + # Don't call "printf" in a sub-shell because it will be much slower + # since we are in a loop. + printf -v comp "%%q" "${compline%%%%$tab*}" &>/dev/null || comp=$(printf "%%q" "${compline%%%%$tab*}") + # Only consider the completions that match [[ $comp == "$cur"* ]] || continue + + # The completions matches. Add it to the list of full completions including + # its description. We don't escape the completion because it may get printed + # in a list if there are more than one and we don't want show escape characters + # in that list. COMPREPLY+=("$compline") + + # Strip any description before checking the length, and again, don't escape + # the completion because this length is only used when printing the completions + # in a list and we don't want show escape characters in that list. + comp=${compline%%%%$tab*} if ((${#comp}>longest)); then longest=${#comp} fi done < <(printf "%%s\n" "${completions[@]}") - # If there is a single completion left, remove the description text + # If there is a single completion left, remove the description text and escape any special characters if ((${#COMPREPLY[*]} == 1)); then __%[1]s_debug "COMPREPLY[0]: ${COMPREPLY[0]}" - comp="${COMPREPLY[0]%%%%$tab*}" - __%[1]s_debug "Removed description from single completion, which is now: ${comp}" - COMPREPLY[0]=$comp - else # Format the descriptions + COMPREPLY[0]=$(printf "%%q" "${COMPREPLY[0]%%%%$tab*}") + __%[1]s_debug "Removed description from single completion, which is now: ${COMPREPLY[0]}" + else + # Format the descriptions __%[1]s_format_comp_descriptions $longest fi } diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go index e0b0947b0..d9cd2414e 100644 --- a/vendor/github.com/spf13/cobra/cobra.go +++ b/vendor/github.com/spf13/cobra/cobra.go @@ -176,12 +176,16 @@ func rpad(s string, padding int) string { return fmt.Sprintf(formattedString, s) } -// tmpl executes the given template text on data, writing the result to w. -func tmpl(w io.Writer, text string, data interface{}) error { - t := template.New("top") - t.Funcs(templateFuncs) - template.Must(t.Parse(text)) - return t.Execute(w, data) +func tmpl(text string) *tmplFunc { + return &tmplFunc{ + tmpl: text, + fn: func(w io.Writer, data interface{}) error { + t := template.New("top") + t.Funcs(templateFuncs) + template.Must(t.Parse(text)) + return t.Execute(w, data) + }, + } } // ld compares two strings and returns the levenshtein distance between them. diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go index 54748fc67..c05fed45a 100644 --- a/vendor/github.com/spf13/cobra/command.go +++ b/vendor/github.com/spf13/cobra/command.go @@ -33,10 +33,13 @@ import ( const ( FlagSetByCobraAnnotation = "cobra_annotation_flag_set_by_cobra" CommandDisplayNameAnnotation = "cobra_annotation_command_display_name" + + helpFlagName = "help" + helpCommandName = "help" ) // FParseErrWhitelist configures Flag parse errors to be ignored -type FParseErrWhitelist flag.ParseErrorsWhitelist +type FParseErrWhitelist flag.ParseErrorsAllowlist // Group Structure to manage groups for commands type Group struct { @@ -80,11 +83,11 @@ type Command struct { Example string // ValidArgs is list of all valid non-flag arguments that are accepted in shell completions - ValidArgs []string + ValidArgs []Completion // ValidArgsFunction is an optional function that provides valid non-flag arguments for shell completion. // It is a dynamic version of using ValidArgs. // Only one of ValidArgs and ValidArgsFunction can be used for a command. - ValidArgsFunction func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) + ValidArgsFunction CompletionFunc // Expected arguments Args PositionalArgs @@ -168,12 +171,12 @@ type Command struct { // usageFunc is usage func defined by user. usageFunc func(*Command) error // usageTemplate is usage template defined by user. - usageTemplate string + usageTemplate *tmplFunc // flagErrorFunc is func defined by user and it's called when the parsing of // flags returns an error. flagErrorFunc func(*Command, error) error // helpTemplate is help template defined by user. - helpTemplate string + helpTemplate *tmplFunc // helpFunc is help func defined by user. helpFunc func(*Command, []string) // helpCommand is command with usage 'help'. If it's not defined by user, @@ -186,7 +189,7 @@ type Command struct { completionCommandGroupID string // versionTemplate is the version template defined by user. - versionTemplate string + versionTemplate *tmplFunc // errPrefix is the error message prefix defined by user. errPrefix string @@ -281,6 +284,7 @@ func (c *Command) SetArgs(a []string) { // SetOutput sets the destination for usage and error messages. // If output is nil, os.Stderr is used. +// // Deprecated: Use SetOut and/or SetErr instead func (c *Command) SetOutput(output io.Writer) { c.outWriter = output @@ -312,7 +316,11 @@ func (c *Command) SetUsageFunc(f func(*Command) error) { // SetUsageTemplate sets usage template. Can be defined by Application. func (c *Command) SetUsageTemplate(s string) { - c.usageTemplate = s + if s == "" { + c.usageTemplate = nil + return + } + c.usageTemplate = tmpl(s) } // SetFlagErrorFunc sets a function to generate an error when flag parsing @@ -348,12 +356,20 @@ func (c *Command) SetCompletionCommandGroupID(groupID string) { // SetHelpTemplate sets help template to be used. Application can use it to set custom template. func (c *Command) SetHelpTemplate(s string) { - c.helpTemplate = s + if s == "" { + c.helpTemplate = nil + return + } + c.helpTemplate = tmpl(s) } // SetVersionTemplate sets version template to be used. Application can use it to set custom template. func (c *Command) SetVersionTemplate(s string) { - c.versionTemplate = s + if s == "" { + c.versionTemplate = nil + return + } + c.versionTemplate = tmpl(s) } // SetErrPrefix sets error message prefix to be used. Application can use it to set custom prefix. @@ -434,7 +450,8 @@ func (c *Command) UsageFunc() (f func(*Command) error) { } return func(c *Command) error { c.mergePersistentFlags() - err := tmpl(c.OutOrStderr(), c.UsageTemplate(), c) + fn := c.getUsageTemplateFunc() + err := fn(c.OutOrStderr(), c) if err != nil { c.PrintErrln(err) } @@ -442,6 +459,19 @@ func (c *Command) UsageFunc() (f func(*Command) error) { } } +// getUsageTemplateFunc returns the usage template function for the command +// going up the command tree if necessary. +func (c *Command) getUsageTemplateFunc() func(w io.Writer, data interface{}) error { + if c.usageTemplate != nil { + return c.usageTemplate.fn + } + + if c.HasParent() { + return c.parent.getUsageTemplateFunc() + } + return defaultUsageFunc +} + // Usage puts out the usage for the command. // Used when a user provides invalid input. // Can be defined by user by overriding UsageFunc. @@ -460,15 +490,30 @@ func (c *Command) HelpFunc() func(*Command, []string) { } return func(c *Command, a []string) { c.mergePersistentFlags() + fn := c.getHelpTemplateFunc() // The help should be sent to stdout // See https://github.com/spf13/cobra/issues/1002 - err := tmpl(c.OutOrStdout(), c.HelpTemplate(), c) + err := fn(c.OutOrStdout(), c) if err != nil { c.PrintErrln(err) } } } +// getHelpTemplateFunc returns the help template function for the command +// going up the command tree if necessary. +func (c *Command) getHelpTemplateFunc() func(w io.Writer, data interface{}) error { + if c.helpTemplate != nil { + return c.helpTemplate.fn + } + + if c.HasParent() { + return c.parent.getHelpTemplateFunc() + } + + return defaultHelpFunc +} + // Help puts out the help for the command. // Used when a user calls help [command]. // Can be defined by user by overriding HelpFunc. @@ -512,7 +557,7 @@ func (c *Command) FlagErrorFunc() (f func(*Command, error) error) { } } -var minUsagePadding = 25 +const minUsagePadding = 25 // UsagePadding return padding for the usage. func (c *Command) UsagePadding() int { @@ -522,7 +567,7 @@ func (c *Command) UsagePadding() int { return c.parent.commandsMaxUseLen } -var minCommandPathPadding = 11 +const minCommandPathPadding = 11 // CommandPathPadding return padding for the command path. func (c *Command) CommandPathPadding() int { @@ -532,7 +577,7 @@ func (c *Command) CommandPathPadding() int { return c.parent.commandsMaxCommandPathLen } -var minNamePadding = 11 +const minNamePadding = 11 // NamePadding returns padding for the name. func (c *Command) NamePadding() int { @@ -543,71 +588,55 @@ func (c *Command) NamePadding() int { } // UsageTemplate returns usage template for the command. +// This function is kept for backwards-compatibility reasons. func (c *Command) UsageTemplate() string { - if c.usageTemplate != "" { - return c.usageTemplate + if c.usageTemplate != nil { + return c.usageTemplate.tmpl } if c.HasParent() { return c.parent.UsageTemplate() } - return `Usage:{{if .Runnable}} - {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}} - {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}} - -Aliases: - {{.NameAndAliases}}{{end}}{{if .HasExample}} - -Examples: -{{.Example}}{{end}}{{if .HasAvailableSubCommands}}{{$cmds := .Commands}}{{if eq (len .Groups) 0}} - -Available Commands:{{range $cmds}}{{if (or .IsAvailableCommand (eq .Name "help"))}} - {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{else}}{{range $group := .Groups}} - -{{.Title}}{{range $cmds}}{{if (and (eq .GroupID $group.ID) (or .IsAvailableCommand (eq .Name "help")))}} - {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if not .AllChildCommandsHaveGroup}} - -Additional Commands:{{range $cmds}}{{if (and (eq .GroupID "") (or .IsAvailableCommand (eq .Name "help")))}} - {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}} - -Flags: -{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}} - -Global Flags: -{{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasHelpSubCommands}} - -Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}} - {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}} - -Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}} -` + return defaultUsageTemplate } // HelpTemplate return help template for the command. +// This function is kept for backwards-compatibility reasons. func (c *Command) HelpTemplate() string { - if c.helpTemplate != "" { - return c.helpTemplate + if c.helpTemplate != nil { + return c.helpTemplate.tmpl } if c.HasParent() { return c.parent.HelpTemplate() } - return `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}} - -{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` + return defaultHelpTemplate } // VersionTemplate return version template for the command. +// This function is kept for backwards-compatibility reasons. func (c *Command) VersionTemplate() string { - if c.versionTemplate != "" { - return c.versionTemplate + if c.versionTemplate != nil { + return c.versionTemplate.tmpl } if c.HasParent() { return c.parent.VersionTemplate() } - return `{{with .Name}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}} -` + return defaultVersionTemplate +} + +// getVersionTemplateFunc returns the version template function for the command +// going up the command tree if necessary. +func (c *Command) getVersionTemplateFunc() func(w io.Writer, data interface{}) error { + if c.versionTemplate != nil { + return c.versionTemplate.fn + } + + if c.HasParent() { + return c.parent.getVersionTemplateFunc() + } + return defaultVersionFunc } // ErrPrefix return error message prefix for the command @@ -894,7 +923,7 @@ func (c *Command) execute(a []string) (err error) { // If help is called, regardless of other flags, return we want help. // Also say we need help if the command isn't runnable. - helpVal, err := c.Flags().GetBool("help") + helpVal, err := c.Flags().GetBool(helpFlagName) if err != nil { // should be impossible to get here as we always declare a help // flag in InitDefaultHelpFlag() @@ -914,7 +943,8 @@ func (c *Command) execute(a []string) (err error) { return err } if versionVal { - err := tmpl(c.OutOrStdout(), c.VersionTemplate(), c) + fn := c.getVersionTemplateFunc() + err := fn(c.OutOrStdout(), c) if err != nil { c.Println(err) } @@ -1068,12 +1098,6 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { // initialize help at the last point to allow for user overriding c.InitDefaultHelpCmd() - // initialize completion at the last point to allow for user overriding - c.InitDefaultCompletionCmd() - - // Now that all commands have been created, let's make sure all groups - // are properly created also - c.checkCommandGroups() args := c.args @@ -1082,9 +1106,16 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { args = os.Args[1:] } - // initialize the hidden command to be used for shell completion + // initialize the __complete command to be used for shell completion c.initCompleteCmd(args) + // initialize the default completion command + c.InitDefaultCompletionCmd(args...) + + // Now that all commands have been created, let's make sure all groups + // are properly created also + c.checkCommandGroups() + var flags []string if c.TraverseChildren { cmd, flags, err = c.Traverse(args) @@ -1187,16 +1218,16 @@ func (c *Command) checkCommandGroups() { // If c already has help flag, it will do nothing. func (c *Command) InitDefaultHelpFlag() { c.mergePersistentFlags() - if c.Flags().Lookup("help") == nil { + if c.Flags().Lookup(helpFlagName) == nil { usage := "help for " - name := c.displayName() + name := c.DisplayName() if name == "" { usage += "this command" } else { usage += name } - c.Flags().BoolP("help", "h", false, usage) - _ = c.Flags().SetAnnotation("help", FlagSetByCobraAnnotation, []string{"true"}) + c.Flags().BoolP(helpFlagName, "h", false, usage) + _ = c.Flags().SetAnnotation(helpFlagName, FlagSetByCobraAnnotation, []string{"true"}) } } @@ -1215,7 +1246,7 @@ func (c *Command) InitDefaultVersionFlag() { if c.Name() == "" { usage += "this command" } else { - usage += c.Name() + usage += c.DisplayName() } if c.Flags().ShorthandLookup("v") == nil { c.Flags().BoolP("version", "v", false, usage) @@ -1239,9 +1270,9 @@ func (c *Command) InitDefaultHelpCmd() { Use: "help [command]", Short: "Help about any command", Long: `Help provides help for any command in the application. -Simply type ` + c.displayName() + ` help [path to command] for full details.`, - ValidArgsFunction: func(c *Command, args []string, toComplete string) ([]string, ShellCompDirective) { - var completions []string +Simply type ` + c.DisplayName() + ` help [path to command] for full details.`, + ValidArgsFunction: func(c *Command, args []string, toComplete string) ([]Completion, ShellCompDirective) { + var completions []Completion cmd, _, e := c.Root().Find(args) if e != nil { return nil, ShellCompDirectiveNoFileComp @@ -1253,7 +1284,7 @@ Simply type ` + c.displayName() + ` help [path to command] for full details.`, for _, subCmd := range cmd.Commands() { if subCmd.IsAvailableCommand() || subCmd == cmd.helpCommand { if strings.HasPrefix(subCmd.Name(), toComplete) { - completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short)) + completions = append(completions, CompletionWithDesc(subCmd.Name(), subCmd.Short)) } } } @@ -1265,6 +1296,11 @@ Simply type ` + c.displayName() + ` help [path to command] for full details.`, c.Printf("Unknown help topic %#q\n", args) CheckErr(c.Root().Usage()) } else { + // FLow the context down to be used in help text + if cmd.ctx == nil { + cmd.ctx = c.ctx + } + cmd.InitDefaultHelpFlag() // make possible 'help' flag to be shown cmd.InitDefaultVersionFlag() // make possible 'version' flag to be shown CheckErr(cmd.Help()) @@ -1430,10 +1466,12 @@ func (c *Command) CommandPath() string { if c.HasParent() { return c.Parent().CommandPath() + " " + c.Name() } - return c.displayName() + return c.DisplayName() } -func (c *Command) displayName() string { +// DisplayName returns the name to display in help text. Returns command Name() +// If CommandDisplayNameAnnoation is not set +func (c *Command) DisplayName() string { if displayName, ok := c.Annotations[CommandDisplayNameAnnotation]; ok { return displayName } @@ -1443,7 +1481,7 @@ func (c *Command) displayName() string { // UseLine puts out the full usage for a given command (including parents). func (c *Command) UseLine() string { var useline string - use := strings.Replace(c.Use, c.Name(), c.displayName(), 1) + use := strings.Replace(c.Use, c.Name(), c.DisplayName(), 1) if c.HasParent() { useline = c.parent.CommandPath() + " " + use } else { @@ -1649,7 +1687,7 @@ func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) f // to this command (local and persistent declared here and by all parents). func (c *Command) Flags() *flag.FlagSet { if c.flags == nil { - c.flags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) + c.flags = flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1664,7 +1702,7 @@ func (c *Command) Flags() *flag.FlagSet { func (c *Command) LocalNonPersistentFlags() *flag.FlagSet { persistentFlags := c.PersistentFlags() - out := flag.NewFlagSet(c.displayName(), flag.ContinueOnError) + out := flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError) c.LocalFlags().VisitAll(func(f *flag.Flag) { if persistentFlags.Lookup(f.Name) == nil { out.AddFlag(f) @@ -1679,7 +1717,7 @@ func (c *Command) LocalFlags() *flag.FlagSet { c.mergePersistentFlags() if c.lflags == nil { - c.lflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) + c.lflags = flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1707,7 +1745,7 @@ func (c *Command) InheritedFlags() *flag.FlagSet { c.mergePersistentFlags() if c.iflags == nil { - c.iflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) + c.iflags = flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1736,7 +1774,7 @@ func (c *Command) NonInheritedFlags() *flag.FlagSet { // PersistentFlags returns the persistent FlagSet specifically set in the current command. func (c *Command) PersistentFlags() *flag.FlagSet { if c.pflags == nil { - c.pflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) + c.pflags = flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1749,9 +1787,9 @@ func (c *Command) PersistentFlags() *flag.FlagSet { func (c *Command) ResetFlags() { c.flagErrorBuf = new(bytes.Buffer) c.flagErrorBuf.Reset() - c.flags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) + c.flags = flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError) c.flags.SetOutput(c.flagErrorBuf) - c.pflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) + c.pflags = flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError) c.pflags.SetOutput(c.flagErrorBuf) c.lflags = nil @@ -1839,7 +1877,7 @@ func (c *Command) ParseFlags(args []string) error { c.mergePersistentFlags() // do it here after merging all flags and just before parse - c.Flags().ParseErrorsWhitelist = flag.ParseErrorsWhitelist(c.FParseErrWhitelist) + c.Flags().ParseErrorsAllowlist = flag.ParseErrorsAllowlist(c.FParseErrWhitelist) err := c.Flags().Parse(args) // Print warnings if they occurred (e.g. deprecated flag messages). @@ -1868,7 +1906,7 @@ func (c *Command) mergePersistentFlags() { // If c.parentsPflags == nil, it makes new. func (c *Command) updateParentsPflags() { if c.parentsPflags == nil { - c.parentsPflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) + c.parentsPflags = flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError) c.parentsPflags.SetOutput(c.flagErrorBuf) c.parentsPflags.SortFlags = false } @@ -1894,3 +1932,141 @@ func commandNameMatches(s string, t string) bool { return s == t } + +// tmplFunc holds a template and a function that will execute said template. +type tmplFunc struct { + tmpl string + fn func(io.Writer, interface{}) error +} + +const defaultUsageTemplate = `Usage:{{if .Runnable}} + {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}} + {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}} + +Aliases: + {{.NameAndAliases}}{{end}}{{if .HasExample}} + +Examples: +{{.Example}}{{end}}{{if .HasAvailableSubCommands}}{{$cmds := .Commands}}{{if eq (len .Groups) 0}} + +Available Commands:{{range $cmds}}{{if (or .IsAvailableCommand (eq .Name "help"))}} + {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{else}}{{range $group := .Groups}} + +{{.Title}}{{range $cmds}}{{if (and (eq .GroupID $group.ID) (or .IsAvailableCommand (eq .Name "help")))}} + {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if not .AllChildCommandsHaveGroup}} + +Additional Commands:{{range $cmds}}{{if (and (eq .GroupID "") (or .IsAvailableCommand (eq .Name "help")))}} + {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}} + +Flags: +{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}} + +Global Flags: +{{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasHelpSubCommands}} + +Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}} + {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}} + +Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}} +` + +// defaultUsageFunc is equivalent to executing defaultUsageTemplate. The two should be changed in sync. +func defaultUsageFunc(w io.Writer, in interface{}) error { + c := in.(*Command) + fmt.Fprint(w, "Usage:") + if c.Runnable() { + fmt.Fprintf(w, "\n %s", c.UseLine()) + } + if c.HasAvailableSubCommands() { + fmt.Fprintf(w, "\n %s [command]", c.CommandPath()) + } + if len(c.Aliases) > 0 { + fmt.Fprintf(w, "\n\nAliases:\n") + fmt.Fprintf(w, " %s", c.NameAndAliases()) + } + if c.HasExample() { + fmt.Fprintf(w, "\n\nExamples:\n") + fmt.Fprintf(w, "%s", c.Example) + } + if c.HasAvailableSubCommands() { + cmds := c.Commands() + if len(c.Groups()) == 0 { + fmt.Fprintf(w, "\n\nAvailable Commands:") + for _, subcmd := range cmds { + if subcmd.IsAvailableCommand() || subcmd.Name() == helpCommandName { + fmt.Fprintf(w, "\n %s %s", rpad(subcmd.Name(), subcmd.NamePadding()), subcmd.Short) + } + } + } else { + for _, group := range c.Groups() { + fmt.Fprintf(w, "\n\n%s", group.Title) + for _, subcmd := range cmds { + if subcmd.GroupID == group.ID && (subcmd.IsAvailableCommand() || subcmd.Name() == helpCommandName) { + fmt.Fprintf(w, "\n %s %s", rpad(subcmd.Name(), subcmd.NamePadding()), subcmd.Short) + } + } + } + if !c.AllChildCommandsHaveGroup() { + fmt.Fprintf(w, "\n\nAdditional Commands:") + for _, subcmd := range cmds { + if subcmd.GroupID == "" && (subcmd.IsAvailableCommand() || subcmd.Name() == helpCommandName) { + fmt.Fprintf(w, "\n %s %s", rpad(subcmd.Name(), subcmd.NamePadding()), subcmd.Short) + } + } + } + } + } + if c.HasAvailableLocalFlags() { + fmt.Fprintf(w, "\n\nFlags:\n") + fmt.Fprint(w, trimRightSpace(c.LocalFlags().FlagUsages())) + } + if c.HasAvailableInheritedFlags() { + fmt.Fprintf(w, "\n\nGlobal Flags:\n") + fmt.Fprint(w, trimRightSpace(c.InheritedFlags().FlagUsages())) + } + if c.HasHelpSubCommands() { + fmt.Fprintf(w, "\n\nAdditional help topics:") + for _, subcmd := range c.Commands() { + if subcmd.IsAdditionalHelpTopicCommand() { + fmt.Fprintf(w, "\n %s %s", rpad(subcmd.CommandPath(), subcmd.CommandPathPadding()), subcmd.Short) + } + } + } + if c.HasAvailableSubCommands() { + fmt.Fprintf(w, "\n\nUse \"%s [command] --help\" for more information about a command.", c.CommandPath()) + } + fmt.Fprintln(w) + return nil +} + +const defaultHelpTemplate = `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}} + +{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` + +// defaultHelpFunc is equivalent to executing defaultHelpTemplate. The two should be changed in sync. +func defaultHelpFunc(w io.Writer, in interface{}) error { + c := in.(*Command) + usage := c.Long + if usage == "" { + usage = c.Short + } + usage = trimRightSpace(usage) + if usage != "" { + fmt.Fprintln(w, usage) + fmt.Fprintln(w) + } + if c.Runnable() || c.HasSubCommands() { + fmt.Fprint(w, c.UsageString()) + } + return nil +} + +const defaultVersionTemplate = `{{with .DisplayName}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}} +` + +// defaultVersionFunc is equivalent to executing defaultVersionTemplate. The two should be changed in sync. +func defaultVersionFunc(w io.Writer, in interface{}) error { + c := in.(*Command) + _, err := fmt.Fprintf(w, "%s version %s\n", c.DisplayName(), c.Version) + return err +} diff --git a/vendor/github.com/spf13/cobra/completions.go b/vendor/github.com/spf13/cobra/completions.go index c0c08b057..d3607c2d2 100644 --- a/vendor/github.com/spf13/cobra/completions.go +++ b/vendor/github.com/spf13/cobra/completions.go @@ -35,7 +35,7 @@ const ( ) // Global map of flag completion functions. Make sure to use flagCompletionMutex before you try to read and write from it. -var flagCompletionFunctions = map[*pflag.Flag]func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective){} +var flagCompletionFunctions = map[*pflag.Flag]CompletionFunc{} // lock for reading and writing from flagCompletionFunctions var flagCompletionMutex = &sync.RWMutex{} @@ -115,24 +115,59 @@ type CompletionOptions struct { DisableDescriptions bool // HiddenDefaultCmd makes the default 'completion' command hidden HiddenDefaultCmd bool + // DefaultShellCompDirective sets the ShellCompDirective that is returned + // if no special directive can be determined + DefaultShellCompDirective *ShellCompDirective +} + +func (receiver *CompletionOptions) SetDefaultShellCompDirective(directive ShellCompDirective) { + receiver.DefaultShellCompDirective = &directive +} + +// Completion is a string that can be used for completions +// +// two formats are supported: +// - the completion choice +// - the completion choice with a textual description (separated by a TAB). +// +// [CompletionWithDesc] can be used to create a completion string with a textual description. +// +// Note: Go type alias is used to provide a more descriptive name in the documentation, but any string can be used. +type Completion = string + +// CompletionFunc is a function that provides completion results. +type CompletionFunc = func(cmd *Command, args []string, toComplete string) ([]Completion, ShellCompDirective) + +// CompletionWithDesc returns a [Completion] with a description by using the TAB delimited format. +func CompletionWithDesc(choice string, description string) Completion { + return choice + "\t" + description } // NoFileCompletions can be used to disable file completion for commands that should // not trigger file completions. -func NoFileCompletions(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) { +// +// This method satisfies [CompletionFunc]. +// It can be used with [Command.RegisterFlagCompletionFunc] and for [Command.ValidArgsFunction]. +func NoFileCompletions(cmd *Command, args []string, toComplete string) ([]Completion, ShellCompDirective) { return nil, ShellCompDirectiveNoFileComp } // FixedCompletions can be used to create a completion function which always // returns the same results. -func FixedCompletions(choices []string, directive ShellCompDirective) func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) { - return func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) { +// +// This method returns a function that satisfies [CompletionFunc] +// It can be used with [Command.RegisterFlagCompletionFunc] and for [Command.ValidArgsFunction]. +func FixedCompletions(choices []Completion, directive ShellCompDirective) CompletionFunc { + return func(cmd *Command, args []string, toComplete string) ([]Completion, ShellCompDirective) { return choices, directive } } // RegisterFlagCompletionFunc should be called to register a function to provide completion for a flag. -func (c *Command) RegisterFlagCompletionFunc(flagName string, f func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)) error { +// +// You can use pre-defined completion functions such as [FixedCompletions] or [NoFileCompletions], +// or you can define your own. +func (c *Command) RegisterFlagCompletionFunc(flagName string, f CompletionFunc) error { flag := c.Flag(flagName) if flag == nil { return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' does not exist", flagName) @@ -148,7 +183,7 @@ func (c *Command) RegisterFlagCompletionFunc(flagName string, f func(cmd *Comman } // GetFlagCompletionFunc returns the completion function for the given flag of the command, if available. -func (c *Command) GetFlagCompletionFunc(flagName string) (func(*Command, []string, string) ([]string, ShellCompDirective), bool) { +func (c *Command) GetFlagCompletionFunc(flagName string) (CompletionFunc, bool) { flag := c.Flag(flagName) if flag == nil { return nil, false @@ -270,7 +305,15 @@ func (c *Command) initCompleteCmd(args []string) { } } -func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDirective, error) { +// SliceValue is a reduced version of [pflag.SliceValue]. It is used to detect +// flags that accept multiple values and therefore can provide completion +// multiple times. +type SliceValue interface { + // GetSlice returns the flag value list as an array of strings. + GetSlice() []string +} + +func (c *Command) getCompletions(args []string) (*Command, []Completion, ShellCompDirective, error) { // The last argument, which is not completely typed by the user, // should not be part of the list of arguments toComplete := args[len(args)-1] @@ -298,7 +341,7 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi } if err != nil { // Unable to find the real command. E.g., someInvalidCmd - return c, []string{}, ShellCompDirectiveDefault, fmt.Errorf("unable to find a command for arguments: %v", trimmedArgs) + return c, []Completion{}, ShellCompDirectiveDefault, fmt.Errorf("unable to find a command for arguments: %v", trimmedArgs) } finalCmd.ctx = c.ctx @@ -328,7 +371,7 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi // Parse the flags early so we can check if required flags are set if err = finalCmd.ParseFlags(finalArgs); err != nil { - return finalCmd, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Error while parsing flags from args %v: %s", finalArgs, err.Error()) + return finalCmd, []Completion{}, ShellCompDirectiveDefault, fmt.Errorf("Error while parsing flags from args %v: %s", finalArgs, err.Error()) } realArgCount := finalCmd.Flags().NArg() @@ -339,15 +382,15 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi // Error while attempting to parse flags if flagErr != nil { // If error type is flagCompError and we don't want flagCompletion we should ignore the error - if _, ok := flagErr.(*flagCompError); !(ok && !flagCompletion) { - return finalCmd, []string{}, ShellCompDirectiveDefault, flagErr + if _, ok := flagErr.(*flagCompError); !ok || flagCompletion { + return finalCmd, []Completion{}, ShellCompDirectiveDefault, flagErr } } // Look for the --help or --version flags. If they are present, // there should be no further completions. if helpOrVersionFlagPresent(finalCmd) { - return finalCmd, []string{}, ShellCompDirectiveNoFileComp, nil + return finalCmd, []Completion{}, ShellCompDirectiveNoFileComp, nil } // We only remove the flags from the arguments if DisableFlagParsing is not set. @@ -376,11 +419,11 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi return finalCmd, subDir, ShellCompDirectiveFilterDirs, nil } // Directory completion - return finalCmd, []string{}, ShellCompDirectiveFilterDirs, nil + return finalCmd, []Completion{}, ShellCompDirectiveFilterDirs, nil } } - var completions []string + var completions []Completion var directive ShellCompDirective // Enforce flag groups before doing flag completions @@ -399,10 +442,14 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi // If we have not found any required flags, only then can we show regular flags if len(completions) == 0 { doCompleteFlags := func(flag *pflag.Flag) { - if !flag.Changed || + _, acceptsMultiple := flag.Value.(SliceValue) + acceptsMultiple = acceptsMultiple || strings.Contains(flag.Value.Type(), "Slice") || - strings.Contains(flag.Value.Type(), "Array") { - // If the flag is not already present, or if it can be specified multiple times (Array or Slice) + strings.Contains(flag.Value.Type(), "Array") || + strings.HasPrefix(flag.Value.Type(), "stringTo") + + if !flag.Changed || acceptsMultiple { + // If the flag is not already present, or if it can be specified multiple times (Array, Slice, or stringTo) // we suggest it as a completion completions = append(completions, getFlagNameCompletions(flag, toComplete)...) } @@ -440,6 +487,14 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi } } else { directive = ShellCompDirectiveDefault + // check current and parent commands for a custom DefaultShellCompDirective + for cmd := finalCmd; cmd != nil; cmd = cmd.parent { + if cmd.CompletionOptions.DefaultShellCompDirective != nil { + directive = *cmd.CompletionOptions.DefaultShellCompDirective + break + } + } + if flag == nil { foundLocalNonPersistentFlag := false // If TraverseChildren is true on the root command we don't check for @@ -462,7 +517,7 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi for _, subCmd := range finalCmd.Commands() { if subCmd.IsAvailableCommand() || subCmd == finalCmd.helpCommand { if strings.HasPrefix(subCmd.Name(), toComplete) { - completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short)) + completions = append(completions, CompletionWithDesc(subCmd.Name(), subCmd.Short)) } directive = ShellCompDirectiveNoFileComp } @@ -507,7 +562,7 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi } // Find the completion function for the flag or command - var completionFn func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) + var completionFn CompletionFunc if flag != nil && flagCompletion { flagCompletionMutex.RLock() completionFn = flagCompletionFunctions[flag] @@ -518,7 +573,7 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi if completionFn != nil { // Go custom completion defined for this flag or command. // Call the registered completion function to get the completions. - var comps []string + var comps []Completion comps, directive = completionFn(finalCmd, finalArgs, toComplete) completions = append(completions, comps...) } @@ -531,23 +586,23 @@ func helpOrVersionFlagPresent(cmd *Command) bool { len(versionFlag.Annotations[FlagSetByCobraAnnotation]) > 0 && versionFlag.Changed { return true } - if helpFlag := cmd.Flags().Lookup("help"); helpFlag != nil && + if helpFlag := cmd.Flags().Lookup(helpFlagName); helpFlag != nil && len(helpFlag.Annotations[FlagSetByCobraAnnotation]) > 0 && helpFlag.Changed { return true } return false } -func getFlagNameCompletions(flag *pflag.Flag, toComplete string) []string { +func getFlagNameCompletions(flag *pflag.Flag, toComplete string) []Completion { if nonCompletableFlag(flag) { - return []string{} + return []Completion{} } - var completions []string + var completions []Completion flagName := "--" + flag.Name if strings.HasPrefix(flagName, toComplete) { // Flag without the = - completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) + completions = append(completions, CompletionWithDesc(flagName, flag.Usage)) // Why suggest both long forms: --flag and --flag= ? // This forces the user to *always* have to type either an = or a space after the flag name. @@ -559,20 +614,20 @@ func getFlagNameCompletions(flag *pflag.Flag, toComplete string) []string { // if len(flag.NoOptDefVal) == 0 { // // Flag requires a value, so it can be suffixed with = // flagName += "=" - // completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) + // completions = append(completions, CompletionWithDesc(flagName, flag.Usage)) // } } flagName = "-" + flag.Shorthand if len(flag.Shorthand) > 0 && strings.HasPrefix(flagName, toComplete) { - completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) + completions = append(completions, CompletionWithDesc(flagName, flag.Usage)) } return completions } -func completeRequireFlags(finalCmd *Command, toComplete string) []string { - var completions []string +func completeRequireFlags(finalCmd *Command, toComplete string) []Completion { + var completions []Completion doCompleteRequiredFlags := func(flag *pflag.Flag) { if _, present := flag.Annotations[BashCompOneRequiredFlag]; present { @@ -687,8 +742,8 @@ func checkIfFlagCompletion(finalCmd *Command, args []string, lastArg string) (*p // 1- the feature has been explicitly disabled by the program, // 2- c has no subcommands (to avoid creating one), // 3- c already has a 'completion' command provided by the program. -func (c *Command) InitDefaultCompletionCmd() { - if c.CompletionOptions.DisableDefaultCmd || !c.HasSubCommands() { +func (c *Command) InitDefaultCompletionCmd(args ...string) { + if c.CompletionOptions.DisableDefaultCmd { return } @@ -701,6 +756,16 @@ func (c *Command) InitDefaultCompletionCmd() { haveNoDescFlag := !c.CompletionOptions.DisableNoDescFlag && !c.CompletionOptions.DisableDescriptions + // Special case to know if there are sub-commands or not. + hasSubCommands := false + for _, cmd := range c.commands { + if cmd.Name() != ShellCompRequestCmd && cmd.Name() != helpCommandName { + // We found a real sub-command (not 'help' or '__complete') + hasSubCommands = true + break + } + } + completionCmd := &Command{ Use: compCmdName, Short: "Generate the autocompletion script for the specified shell", @@ -714,6 +779,22 @@ See each sub-command's help for details on how to use the generated script. } c.AddCommand(completionCmd) + if !hasSubCommands { + // If the 'completion' command will be the only sub-command, + // we only create it if it is actually being called. + // This avoids breaking programs that would suddenly find themselves with + // a subcommand, which would prevent them from accepting arguments. + // We also create the 'completion' command if the user is triggering + // shell completion for it (prog __complete completion '') + subCmd, cmdArgs, err := c.Find(args) + if err != nil || subCmd.Name() != compCmdName && + (subCmd.Name() != ShellCompRequestCmd || len(cmdArgs) <= 1 || cmdArgs[0] != compCmdName) { + // The completion command is not being called or being completed so we remove it. + c.RemoveCommand(completionCmd) + return + } + } + out := c.OutOrStdout() noDesc := c.CompletionOptions.DisableDescriptions shortDesc := "Generate the autocompletion script for %s" diff --git a/vendor/github.com/spf13/cobra/powershell_completions.go b/vendor/github.com/spf13/cobra/powershell_completions.go index a830b7bca..746dcb92e 100644 --- a/vendor/github.com/spf13/cobra/powershell_completions.go +++ b/vendor/github.com/spf13/cobra/powershell_completions.go @@ -162,7 +162,10 @@ filter __%[1]s_escapeStringWithSpecialChars { if (-Not $Description) { $Description = " " } - @{Name="$Name";Description="$Description"} + New-Object -TypeName PSCustomObject -Property @{ + Name = "$Name" + Description = "$Description" + } } @@ -240,7 +243,12 @@ filter __%[1]s_escapeStringWithSpecialChars { __%[1]s_debug "Only one completion left" # insert space after value - [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space, "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + $CompletionText = $($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space + if ($ExecutionContext.SessionState.LanguageMode -eq "FullLanguage"){ + [System.Management.Automation.CompletionResult]::new($CompletionText, "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + } else { + $CompletionText + } } else { # Add the proper number of spaces to align the descriptions @@ -255,7 +263,12 @@ filter __%[1]s_escapeStringWithSpecialChars { $Description = " ($($comp.Description))" } - [System.Management.Automation.CompletionResult]::new("$($comp.Name)$Description", "$($comp.Name)$Description", 'ParameterValue', "$($comp.Description)") + $CompletionText = "$($comp.Name)$Description" + if ($ExecutionContext.SessionState.LanguageMode -eq "FullLanguage"){ + [System.Management.Automation.CompletionResult]::new($CompletionText, "$($comp.Name)$Description", 'ParameterValue', "$($comp.Description)") + } else { + $CompletionText + } } } @@ -264,7 +277,13 @@ filter __%[1]s_escapeStringWithSpecialChars { # insert space after value # MenuComplete will automatically show the ToolTip of # the highlighted value at the bottom of the suggestions. - [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space, "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + + $CompletionText = $($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space + if ($ExecutionContext.SessionState.LanguageMode -eq "FullLanguage"){ + [System.Management.Automation.CompletionResult]::new($CompletionText, "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + } else { + $CompletionText + } } # TabCompleteNext and in case we get something unknown @@ -272,7 +291,13 @@ filter __%[1]s_escapeStringWithSpecialChars { # Like MenuComplete but we don't want to add a space here because # the user need to press space anyway to get the completion. # Description will not be shown because that's not possible with TabCompleteNext - [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars), "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + + $CompletionText = $($comp.Name | __%[1]s_escapeStringWithSpecialChars) + if ($ExecutionContext.SessionState.LanguageMode -eq "FullLanguage"){ + [System.Management.Automation.CompletionResult]::new($CompletionText, "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + } else { + $CompletionText + } } } diff --git a/vendor/github.com/spf13/pflag/README.md b/vendor/github.com/spf13/pflag/README.md index 7eacc5bdb..388c4e5ea 100644 --- a/vendor/github.com/spf13/pflag/README.md +++ b/vendor/github.com/spf13/pflag/README.md @@ -284,6 +284,33 @@ func main() { } ``` +### Using pflag with go test +`pflag` does not parse the shorthand versions of go test's built-in flags (i.e., those starting with `-test.`). +For more context, see issues [#63](https://github.com/spf13/pflag/issues/63) and [#238](https://github.com/spf13/pflag/issues/238) for more details. + +For example, if you use pflag in your `TestMain` function and call `pflag.Parse()` after defining your custom flags, running a test like this: +```bash +go test /your/tests -run ^YourTest -v --your-test-pflags +``` +will result in the `-v` flag being ignored. This happens because of the way pflag handles flag parsing, skipping over go test's built-in shorthand flags. +To work around this, you can use the `ParseSkippedFlags` function, which ensures that go test's flags are parsed separately using the standard flag package. + +**Example**: You want to parse go test flags that are otherwise ignore by `pflag.Parse()` +```go +import ( + goflag "flag" + flag "github.com/spf13/pflag" +) + +var ip *int = flag.Int("flagname", 1234, "help message for flagname") + +func main() { + flag.CommandLine.AddGoFlagSet(goflag.CommandLine) + flag.ParseSkippedFlags(os.Args[1:], goflag.CommandLine) + flag.Parse() +} +``` + ## More info You can see the full reference documentation of the pflag package diff --git a/vendor/github.com/spf13/pflag/bool_func.go b/vendor/github.com/spf13/pflag/bool_func.go new file mode 100644 index 000000000..83d77afa8 --- /dev/null +++ b/vendor/github.com/spf13/pflag/bool_func.go @@ -0,0 +1,40 @@ +package pflag + +// -- func Value +type boolfuncValue func(string) error + +func (f boolfuncValue) Set(s string) error { return f(s) } + +func (f boolfuncValue) Type() string { return "boolfunc" } + +func (f boolfuncValue) String() string { return "" } // same behavior as stdlib 'flag' package + +func (f boolfuncValue) IsBoolFlag() bool { return true } + +// BoolFunc defines a func flag with specified name, callback function and usage string. +// +// The callback function will be called every time "--{name}" (or any form that matches the flag) is parsed +// on the command line. +func (f *FlagSet) BoolFunc(name string, usage string, fn func(string) error) { + f.BoolFuncP(name, "", usage, fn) +} + +// BoolFuncP is like BoolFunc, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) BoolFuncP(name, shorthand string, usage string, fn func(string) error) { + var val Value = boolfuncValue(fn) + flag := f.VarPF(val, name, shorthand, usage) + flag.NoOptDefVal = "true" +} + +// BoolFunc defines a func flag with specified name, callback function and usage string. +// +// The callback function will be called every time "--{name}" (or any form that matches the flag) is parsed +// on the command line. +func BoolFunc(name string, usage string, fn func(string) error) { + CommandLine.BoolFuncP(name, "", usage, fn) +} + +// BoolFuncP is like BoolFunc, but accepts a shorthand letter that can be used after a single dash. +func BoolFuncP(name, shorthand string, usage string, fn func(string) error) { + CommandLine.BoolFuncP(name, shorthand, usage, fn) +} diff --git a/vendor/github.com/spf13/pflag/count.go b/vendor/github.com/spf13/pflag/count.go index a0b2679f7..d49c0143c 100644 --- a/vendor/github.com/spf13/pflag/count.go +++ b/vendor/github.com/spf13/pflag/count.go @@ -85,7 +85,7 @@ func (f *FlagSet) CountP(name, shorthand string, usage string) *int { // Count defines a count flag with specified name, default value, and usage string. // The return value is the address of an int variable that stores the value of the flag. -// A count flag will add 1 to its value evey time it is found on the command line +// A count flag will add 1 to its value every time it is found on the command line func Count(name string, usage string) *int { return CommandLine.CountP(name, "", usage) } diff --git a/vendor/github.com/spf13/pflag/errors.go b/vendor/github.com/spf13/pflag/errors.go new file mode 100644 index 000000000..ff11b66be --- /dev/null +++ b/vendor/github.com/spf13/pflag/errors.go @@ -0,0 +1,149 @@ +package pflag + +import "fmt" + +// notExistErrorMessageType specifies which flavor of "flag does not exist" +// is printed by NotExistError. This allows the related errors to be grouped +// under a single NotExistError struct without making a breaking change to +// the error message text. +type notExistErrorMessageType int + +const ( + flagNotExistMessage notExistErrorMessageType = iota + flagNotDefinedMessage + flagNoSuchFlagMessage + flagUnknownFlagMessage + flagUnknownShorthandFlagMessage +) + +// NotExistError is the error returned when trying to access a flag that +// does not exist in the FlagSet. +type NotExistError struct { + name string + specifiedShorthands string + messageType notExistErrorMessageType +} + +// Error implements error. +func (e *NotExistError) Error() string { + switch e.messageType { + case flagNotExistMessage: + return fmt.Sprintf("flag %q does not exist", e.name) + + case flagNotDefinedMessage: + return fmt.Sprintf("flag accessed but not defined: %s", e.name) + + case flagNoSuchFlagMessage: + return fmt.Sprintf("no such flag -%v", e.name) + + case flagUnknownFlagMessage: + return fmt.Sprintf("unknown flag: --%s", e.name) + + case flagUnknownShorthandFlagMessage: + c := rune(e.name[0]) + return fmt.Sprintf("unknown shorthand flag: %q in -%s", c, e.specifiedShorthands) + } + + panic(fmt.Errorf("unknown flagNotExistErrorMessageType: %v", e.messageType)) +} + +// GetSpecifiedName returns the name of the flag (without dashes) as it +// appeared in the parsed arguments. +func (e *NotExistError) GetSpecifiedName() string { + return e.name +} + +// GetSpecifiedShortnames returns the group of shorthand arguments +// (without dashes) that the flag appeared within. If the flag was not in a +// shorthand group, this will return an empty string. +func (e *NotExistError) GetSpecifiedShortnames() string { + return e.specifiedShorthands +} + +// ValueRequiredError is the error returned when a flag needs an argument but +// no argument was provided. +type ValueRequiredError struct { + flag *Flag + specifiedName string + specifiedShorthands string +} + +// Error implements error. +func (e *ValueRequiredError) Error() string { + if len(e.specifiedShorthands) > 0 { + c := rune(e.specifiedName[0]) + return fmt.Sprintf("flag needs an argument: %q in -%s", c, e.specifiedShorthands) + } + + return fmt.Sprintf("flag needs an argument: --%s", e.specifiedName) +} + +// GetFlag returns the flag for which the error occurred. +func (e *ValueRequiredError) GetFlag() *Flag { + return e.flag +} + +// GetSpecifiedName returns the name of the flag (without dashes) as it +// appeared in the parsed arguments. +func (e *ValueRequiredError) GetSpecifiedName() string { + return e.specifiedName +} + +// GetSpecifiedShortnames returns the group of shorthand arguments +// (without dashes) that the flag appeared within. If the flag was not in a +// shorthand group, this will return an empty string. +func (e *ValueRequiredError) GetSpecifiedShortnames() string { + return e.specifiedShorthands +} + +// InvalidValueError is the error returned when an invalid value is used +// for a flag. +type InvalidValueError struct { + flag *Flag + value string + cause error +} + +// Error implements error. +func (e *InvalidValueError) Error() string { + flag := e.flag + var flagName string + if flag.Shorthand != "" && flag.ShorthandDeprecated == "" { + flagName = fmt.Sprintf("-%s, --%s", flag.Shorthand, flag.Name) + } else { + flagName = fmt.Sprintf("--%s", flag.Name) + } + return fmt.Sprintf("invalid argument %q for %q flag: %v", e.value, flagName, e.cause) +} + +// Unwrap implements errors.Unwrap. +func (e *InvalidValueError) Unwrap() error { + return e.cause +} + +// GetFlag returns the flag for which the error occurred. +func (e *InvalidValueError) GetFlag() *Flag { + return e.flag +} + +// GetValue returns the invalid value that was provided. +func (e *InvalidValueError) GetValue() string { + return e.value +} + +// InvalidSyntaxError is the error returned when a bad flag name is passed on +// the command line. +type InvalidSyntaxError struct { + specifiedFlag string +} + +// Error implements error. +func (e *InvalidSyntaxError) Error() string { + return fmt.Sprintf("bad flag syntax: %s", e.specifiedFlag) +} + +// GetSpecifiedName returns the exact flag (with dashes) as it +// appeared in the parsed arguments. +func (e *InvalidSyntaxError) GetSpecifiedFlag() string { + return e.specifiedFlag +} diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go index 7c058de37..2fd3c5759 100644 --- a/vendor/github.com/spf13/pflag/flag.go +++ b/vendor/github.com/spf13/pflag/flag.go @@ -27,23 +27,32 @@ unaffected. Define flags using flag.String(), Bool(), Int(), etc. This declares an integer flag, -flagname, stored in the pointer ip, with type *int. + var ip = flag.Int("flagname", 1234, "help message for flagname") + If you like, you can bind the flag to a variable using the Var() functions. + var flagvar int func init() { flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") } + Or you can create custom flags that satisfy the Value interface (with pointer receivers) and couple them to flag parsing by + flag.Var(&flagVal, "name", "help message for flagname") + For such flags, the default value is just the initial value of the variable. After all flags are defined, call + flag.Parse() + to parse the command line into the defined flags. Flags may then be used directly. If you're using the flags themselves, they are all pointers; if you bind to variables, they're values. + fmt.Println("ip has value ", *ip) fmt.Println("flagvar has value ", flagvar) @@ -54,22 +63,26 @@ The arguments are indexed from 0 through flag.NArg()-1. The pflag package also defines some new functions that are not in flag, that give one-letter shorthands for flags. You can use these by appending 'P' to the name of any function that defines a flag. + var ip = flag.IntP("flagname", "f", 1234, "help message") var flagvar bool func init() { flag.BoolVarP(&flagvar, "boolname", "b", true, "help message") } flag.VarP(&flagval, "varname", "v", "help message") + Shorthand letters can be used with single dashes on the command line. Boolean shorthand flags can be combined with other shorthand flags. Command line flag syntax: + --flag // boolean flags only --flag=x Unlike the flag package, a single dash before an option means something different than a double dash. Single dashes signify a series of shorthand letters for flags. All but the last shorthand letter must be boolean flags. + // boolean flags -f -abc @@ -124,12 +137,17 @@ const ( PanicOnError ) -// ParseErrorsWhitelist defines the parsing errors that can be ignored -type ParseErrorsWhitelist struct { +// ParseErrorsAllowlist defines the parsing errors that can be ignored +type ParseErrorsAllowlist struct { // UnknownFlags will ignore unknown flags errors and continue parsing rest of the flags UnknownFlags bool } +// ParseErrorsWhitelist defines the parsing errors that can be ignored. +// +// Deprecated: use [ParseErrorsAllowlist] instead. This type will be removed in a future release. +type ParseErrorsWhitelist = ParseErrorsAllowlist + // NormalizedName is a flag name that has been normalized according to rules // for the FlagSet (e.g. making '-' and '_' equivalent). type NormalizedName string @@ -145,8 +163,13 @@ type FlagSet struct { // help/usage messages. SortFlags bool - // ParseErrorsWhitelist is used to configure a whitelist of errors - ParseErrorsWhitelist ParseErrorsWhitelist + // ParseErrorsAllowlist is used to configure an allowlist of errors + ParseErrorsAllowlist ParseErrorsAllowlist + + // ParseErrorsAllowlist is used to configure an allowlist of errors. + // + // Deprecated: use [FlagSet.ParseErrorsAllowlist] instead. This field will be removed in a future release. + ParseErrorsWhitelist ParseErrorsAllowlist name string parsed bool @@ -381,7 +404,7 @@ func (f *FlagSet) lookup(name NormalizedName) *Flag { func (f *FlagSet) getFlagType(name string, ftype string, convFunc func(sval string) (interface{}, error)) (interface{}, error) { flag := f.Lookup(name) if flag == nil { - err := fmt.Errorf("flag accessed but not defined: %s", name) + err := &NotExistError{name: name, messageType: flagNotDefinedMessage} return nil, err } @@ -411,7 +434,7 @@ func (f *FlagSet) ArgsLenAtDash() int { func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error { flag := f.Lookup(name) if flag == nil { - return fmt.Errorf("flag %q does not exist", name) + return &NotExistError{name: name, messageType: flagNotExistMessage} } if usageMessage == "" { return fmt.Errorf("deprecated message for flag %q must be set", name) @@ -427,7 +450,7 @@ func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error { func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) error { flag := f.Lookup(name) if flag == nil { - return fmt.Errorf("flag %q does not exist", name) + return &NotExistError{name: name, messageType: flagNotExistMessage} } if usageMessage == "" { return fmt.Errorf("deprecated message for flag %q must be set", name) @@ -441,7 +464,7 @@ func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) erro func (f *FlagSet) MarkHidden(name string) error { flag := f.Lookup(name) if flag == nil { - return fmt.Errorf("flag %q does not exist", name) + return &NotExistError{name: name, messageType: flagNotExistMessage} } flag.Hidden = true return nil @@ -464,18 +487,16 @@ func (f *FlagSet) Set(name, value string) error { normalName := f.normalizeFlagName(name) flag, ok := f.formal[normalName] if !ok { - return fmt.Errorf("no such flag -%v", name) + return &NotExistError{name: name, messageType: flagNoSuchFlagMessage} } err := flag.Value.Set(value) if err != nil { - var flagName string - if flag.Shorthand != "" && flag.ShorthandDeprecated == "" { - flagName = fmt.Sprintf("-%s, --%s", flag.Shorthand, flag.Name) - } else { - flagName = fmt.Sprintf("--%s", flag.Name) + return &InvalidValueError{ + flag: flag, + value: value, + cause: err, } - return fmt.Errorf("invalid argument %q for %q flag: %v", value, flagName, err) } if !flag.Changed { @@ -501,7 +522,7 @@ func (f *FlagSet) SetAnnotation(name, key string, values []string) error { normalName := f.normalizeFlagName(name) flag, ok := f.formal[normalName] if !ok { - return fmt.Errorf("no such flag -%v", name) + return &NotExistError{name: name, messageType: flagNoSuchFlagMessage} } if flag.Annotations == nil { flag.Annotations = map[string][]string{} @@ -538,7 +559,7 @@ func (f *FlagSet) PrintDefaults() { func (f *Flag) defaultIsZeroValue() bool { switch f.Value.(type) { case boolFlag: - return f.DefValue == "false" + return f.DefValue == "false" || f.DefValue == "" case *durationValue: // Beginning in Go 1.7, duration zero values are "0s" return f.DefValue == "0" || f.DefValue == "0s" @@ -551,7 +572,7 @@ func (f *Flag) defaultIsZeroValue() bool { case *intSliceValue, *stringSliceValue, *stringArrayValue: return f.DefValue == "[]" default: - switch f.Value.String() { + switch f.DefValue { case "false": return true case "": @@ -588,8 +609,10 @@ func UnquoteUsage(flag *Flag) (name string, usage string) { name = flag.Value.Type() switch name { - case "bool": + case "bool", "boolfunc": name = "" + case "func": + name = "value" case "float64": name = "float" case "int64": @@ -707,7 +730,7 @@ func (f *FlagSet) FlagUsagesWrapped(cols int) string { switch flag.Value.Type() { case "string": line += fmt.Sprintf("[=\"%s\"]", flag.NoOptDefVal) - case "bool": + case "bool", "boolfunc": if flag.NoOptDefVal != "true" { line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) } @@ -911,12 +934,10 @@ func VarP(value Value, name, shorthand, usage string) { CommandLine.VarP(value, name, shorthand, usage) } -// failf prints to standard error a formatted error and usage message and +// fail prints an error message and usage message to standard error and // returns the error. -func (f *FlagSet) failf(format string, a ...interface{}) error { - err := fmt.Errorf(format, a...) +func (f *FlagSet) fail(err error) error { if f.errorHandling != ContinueOnError { - fmt.Fprintln(f.Output(), err) f.usage() } return err @@ -934,9 +955,9 @@ func (f *FlagSet) usage() { } } -//--unknown (args will be empty) -//--unknown --next-flag ... (args will be --next-flag ...) -//--unknown arg ... (args will be arg ...) +// --unknown (args will be empty) +// --unknown --next-flag ... (args will be --next-flag ...) +// --unknown arg ... (args will be arg ...) func stripUnknownFlagValue(args []string) []string { if len(args) == 0 { //--unknown @@ -960,7 +981,7 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin a = args name := s[2:] if len(name) == 0 || name[0] == '-' || name[0] == '=' { - err = f.failf("bad flag syntax: %s", s) + err = f.fail(&InvalidSyntaxError{specifiedFlag: s}) return } @@ -974,6 +995,8 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin f.usage() return a, ErrHelp case f.ParseErrorsWhitelist.UnknownFlags: + fallthrough + case f.ParseErrorsAllowlist.UnknownFlags: // --unknown=unknownval arg ... // we do not want to lose arg in this case if len(split) >= 2 { @@ -982,7 +1005,7 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin return stripUnknownFlagValue(a), nil default: - err = f.failf("unknown flag: --%s", name) + err = f.fail(&NotExistError{name: name, messageType: flagUnknownFlagMessage}) return } } @@ -1000,13 +1023,16 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin a = a[1:] } else { // '--flag' (arg was required) - err = f.failf("flag needs an argument: %s", s) + err = f.fail(&ValueRequiredError{ + flag: flag, + specifiedName: name, + }) return } err = fn(flag, value) if err != nil { - f.failf(err.Error()) + f.fail(err) } return } @@ -1014,7 +1040,7 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parseFunc) (outShorts string, outArgs []string, err error) { outArgs = args - if strings.HasPrefix(shorthands, "test.") { + if isGotestShorthandFlag(shorthands) { return } @@ -1029,6 +1055,8 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse err = ErrHelp return case f.ParseErrorsWhitelist.UnknownFlags: + fallthrough + case f.ParseErrorsAllowlist.UnknownFlags: // '-f=arg arg ...' // we do not want to lose arg in this case if len(shorthands) > 2 && shorthands[1] == '=' { @@ -1039,7 +1067,11 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse outArgs = stripUnknownFlagValue(outArgs) return default: - err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands) + err = f.fail(&NotExistError{ + name: string(c), + specifiedShorthands: shorthands, + messageType: flagUnknownShorthandFlagMessage, + }) return } } @@ -1062,7 +1094,11 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse outArgs = args[1:] } else { // '-f' (arg was required) - err = f.failf("flag needs an argument: %q in -%s", c, shorthands) + err = f.fail(&ValueRequiredError{ + flag: flag, + specifiedName: string(c), + specifiedShorthands: shorthands, + }) return } @@ -1072,7 +1108,7 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse err = fn(flag, value) if err != nil { - f.failf(err.Error()) + f.fail(err) } return } @@ -1135,12 +1171,12 @@ func (f *FlagSet) Parse(arguments []string) error { } f.parsed = true - if len(arguments) < 0 { + f.args = make([]string, 0, len(arguments)) + + if len(arguments) == 0 { return nil } - f.args = make([]string, 0, len(arguments)) - set := func(flag *Flag, value string) error { return f.Set(flag.Name, value) } @@ -1151,7 +1187,10 @@ func (f *FlagSet) Parse(arguments []string) error { case ContinueOnError: return err case ExitOnError: - fmt.Println(err) + if err == ErrHelp { + os.Exit(0) + } + fmt.Fprintln(f.Output(), err) os.Exit(2) case PanicOnError: panic(err) @@ -1177,6 +1216,10 @@ func (f *FlagSet) ParseAll(arguments []string, fn func(flag *Flag, value string) case ContinueOnError: return err case ExitOnError: + if err == ErrHelp { + os.Exit(0) + } + fmt.Fprintln(f.Output(), err) os.Exit(2) case PanicOnError: panic(err) diff --git a/vendor/github.com/spf13/pflag/func.go b/vendor/github.com/spf13/pflag/func.go new file mode 100644 index 000000000..9f4d88f27 --- /dev/null +++ b/vendor/github.com/spf13/pflag/func.go @@ -0,0 +1,37 @@ +package pflag + +// -- func Value +type funcValue func(string) error + +func (f funcValue) Set(s string) error { return f(s) } + +func (f funcValue) Type() string { return "func" } + +func (f funcValue) String() string { return "" } // same behavior as stdlib 'flag' package + +// Func defines a func flag with specified name, callback function and usage string. +// +// The callback function will be called every time "--{name}={value}" (or equivalent) is +// parsed on the command line, with "{value}" as an argument. +func (f *FlagSet) Func(name string, usage string, fn func(string) error) { + f.FuncP(name, "", usage, fn) +} + +// FuncP is like Func, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) FuncP(name string, shorthand string, usage string, fn func(string) error) { + var val Value = funcValue(fn) + f.VarP(val, name, shorthand, usage) +} + +// Func defines a func flag with specified name, callback function and usage string. +// +// The callback function will be called every time "--{name}={value}" (or equivalent) is +// parsed on the command line, with "{value}" as an argument. +func Func(name string, usage string, fn func(string) error) { + CommandLine.FuncP(name, "", usage, fn) +} + +// FuncP is like Func, but accepts a shorthand letter that can be used after a single dash. +func FuncP(name, shorthand string, usage string, fn func(string) error) { + CommandLine.FuncP(name, shorthand, usage, fn) +} diff --git a/vendor/github.com/spf13/pflag/golangflag.go b/vendor/github.com/spf13/pflag/golangflag.go index d3dd72b7f..e62eab538 100644 --- a/vendor/github.com/spf13/pflag/golangflag.go +++ b/vendor/github.com/spf13/pflag/golangflag.go @@ -8,8 +8,18 @@ import ( goflag "flag" "reflect" "strings" + "time" ) +// go test flags prefixes +func isGotestFlag(flag string) bool { + return strings.HasPrefix(flag, "-test.") +} + +func isGotestShorthandFlag(flag string) bool { + return strings.HasPrefix(flag, "test.") +} + // flagValueWrapper implements pflag.Value around a flag.Value. The main // difference here is the addition of the Type method that returns a string // name of the type. As this is generally unknown, we approximate that with @@ -103,3 +113,49 @@ func (f *FlagSet) AddGoFlagSet(newSet *goflag.FlagSet) { } f.addedGoFlagSets = append(f.addedGoFlagSets, newSet) } + +// CopyToGoFlagSet will add all current flags to the given Go flag set. +// Deprecation remarks get copied into the usage description. +// Whenever possible, a flag gets added for which Go flags shows +// a proper type in the help message. +func (f *FlagSet) CopyToGoFlagSet(newSet *goflag.FlagSet) { + f.VisitAll(func(flag *Flag) { + usage := flag.Usage + if flag.Deprecated != "" { + usage += " (DEPRECATED: " + flag.Deprecated + ")" + } + + switch value := flag.Value.(type) { + case *stringValue: + newSet.StringVar((*string)(value), flag.Name, flag.DefValue, usage) + case *intValue: + newSet.IntVar((*int)(value), flag.Name, *(*int)(value), usage) + case *int64Value: + newSet.Int64Var((*int64)(value), flag.Name, *(*int64)(value), usage) + case *uintValue: + newSet.UintVar((*uint)(value), flag.Name, *(*uint)(value), usage) + case *uint64Value: + newSet.Uint64Var((*uint64)(value), flag.Name, *(*uint64)(value), usage) + case *durationValue: + newSet.DurationVar((*time.Duration)(value), flag.Name, *(*time.Duration)(value), usage) + case *float64Value: + newSet.Float64Var((*float64)(value), flag.Name, *(*float64)(value), usage) + default: + newSet.Var(flag.Value, flag.Name, usage) + } + }) +} + +// ParseSkippedFlags explicitly Parses go test flags (i.e. the one starting with '-test.') with goflag.Parse(), +// since by default those are skipped by pflag.Parse(). +// Typical usage example: `ParseGoTestFlags(os.Args[1:], goflag.CommandLine)` +func ParseSkippedFlags(osArgs []string, goFlagSet *goflag.FlagSet) error { + var skippedFlags []string + for _, f := range osArgs { + if isGotestFlag(f) { + skippedFlags = append(skippedFlags, f) + } + } + return goFlagSet.Parse(skippedFlags) +} + diff --git a/vendor/github.com/spf13/pflag/ipnet_slice.go b/vendor/github.com/spf13/pflag/ipnet_slice.go index 6b541aa87..c6e89da18 100644 --- a/vendor/github.com/spf13/pflag/ipnet_slice.go +++ b/vendor/github.com/spf13/pflag/ipnet_slice.go @@ -73,7 +73,7 @@ func (s *ipNetSliceValue) String() string { func ipNetSliceConv(val string) (interface{}, error) { val = strings.Trim(val, "[]") - // Emtpy string would cause a slice with one (empty) entry + // Empty string would cause a slice with one (empty) entry if len(val) == 0 { return []net.IPNet{}, nil } diff --git a/vendor/github.com/spf13/pflag/string_to_string.go b/vendor/github.com/spf13/pflag/string_to_string.go index 890a01afc..1d1e3bf91 100644 --- a/vendor/github.com/spf13/pflag/string_to_string.go +++ b/vendor/github.com/spf13/pflag/string_to_string.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/csv" "fmt" + "sort" "strings" ) @@ -62,8 +63,15 @@ func (s *stringToStringValue) Type() string { } func (s *stringToStringValue) String() string { + keys := make([]string, 0, len(*s.value)) + for k := range *s.value { + keys = append(keys, k) + } + sort.Strings(keys) + records := make([]string, 0, len(*s.value)>>1) - for k, v := range *s.value { + for _, k := range keys { + v := (*s.value)[k] records = append(records, k+"="+v) } diff --git a/vendor/github.com/spf13/pflag/text.go b/vendor/github.com/spf13/pflag/text.go new file mode 100644 index 000000000..886d5a3d8 --- /dev/null +++ b/vendor/github.com/spf13/pflag/text.go @@ -0,0 +1,81 @@ +package pflag + +import ( + "encoding" + "fmt" + "reflect" +) + +// following is copied from go 1.23.4 flag.go +type textValue struct{ p encoding.TextUnmarshaler } + +func newTextValue(val encoding.TextMarshaler, p encoding.TextUnmarshaler) textValue { + ptrVal := reflect.ValueOf(p) + if ptrVal.Kind() != reflect.Ptr { + panic("variable value type must be a pointer") + } + defVal := reflect.ValueOf(val) + if defVal.Kind() == reflect.Ptr { + defVal = defVal.Elem() + } + if defVal.Type() != ptrVal.Type().Elem() { + panic(fmt.Sprintf("default type does not match variable type: %v != %v", defVal.Type(), ptrVal.Type().Elem())) + } + ptrVal.Elem().Set(defVal) + return textValue{p} +} + +func (v textValue) Set(s string) error { + return v.p.UnmarshalText([]byte(s)) +} + +func (v textValue) Get() interface{} { + return v.p +} + +func (v textValue) String() string { + if m, ok := v.p.(encoding.TextMarshaler); ok { + if b, err := m.MarshalText(); err == nil { + return string(b) + } + } + return "" +} + +//end of copy + +func (v textValue) Type() string { + return reflect.ValueOf(v.p).Type().Name() +} + +// GetText set out, which implements encoding.UnmarshalText, to the value of a flag with given name +func (f *FlagSet) GetText(name string, out encoding.TextUnmarshaler) error { + flag := f.Lookup(name) + if flag == nil { + return fmt.Errorf("flag accessed but not defined: %s", name) + } + if flag.Value.Type() != reflect.TypeOf(out).Name() { + return fmt.Errorf("trying to get %s value of flag of type %s", reflect.TypeOf(out).Name(), flag.Value.Type()) + } + return out.UnmarshalText([]byte(flag.Value.String())) +} + +// TextVar defines a flag with a specified name, default value, and usage string. The argument p must be a pointer to a variable that will hold the value of the flag, and p must implement encoding.TextUnmarshaler. If the flag is used, the flag value will be passed to p's UnmarshalText method. The type of the default value must be the same as the type of p. +func (f *FlagSet) TextVar(p encoding.TextUnmarshaler, name string, value encoding.TextMarshaler, usage string) { + f.VarP(newTextValue(value, p), name, "", usage) +} + +// TextVarP is like TextVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) TextVarP(p encoding.TextUnmarshaler, name, shorthand string, value encoding.TextMarshaler, usage string) { + f.VarP(newTextValue(value, p), name, shorthand, usage) +} + +// TextVar defines a flag with a specified name, default value, and usage string. The argument p must be a pointer to a variable that will hold the value of the flag, and p must implement encoding.TextUnmarshaler. If the flag is used, the flag value will be passed to p's UnmarshalText method. The type of the default value must be the same as the type of p. +func TextVar(p encoding.TextUnmarshaler, name string, value encoding.TextMarshaler, usage string) { + CommandLine.VarP(newTextValue(value, p), name, "", usage) +} + +// TextVarP is like TextVar, but accepts a shorthand letter that can be used after a single dash. +func TextVarP(p encoding.TextUnmarshaler, name, shorthand string, value encoding.TextMarshaler, usage string) { + CommandLine.VarP(newTextValue(value, p), name, shorthand, usage) +} diff --git a/vendor/github.com/spf13/pflag/time.go b/vendor/github.com/spf13/pflag/time.go new file mode 100644 index 000000000..3dee42479 --- /dev/null +++ b/vendor/github.com/spf13/pflag/time.go @@ -0,0 +1,124 @@ +package pflag + +import ( + "fmt" + "strings" + "time" +) + +// TimeValue adapts time.Time for use as a flag. +type timeValue struct { + *time.Time + formats []string +} + +func newTimeValue(val time.Time, p *time.Time, formats []string) *timeValue { + *p = val + return &timeValue{ + Time: p, + formats: formats, + } +} + +// Set time.Time value from string based on accepted formats. +func (d *timeValue) Set(s string) error { + s = strings.TrimSpace(s) + for _, f := range d.formats { + v, err := time.Parse(f, s) + if err != nil { + continue + } + *d.Time = v + return nil + } + + formatsString := "" + for i, f := range d.formats { + if i > 0 { + formatsString += ", " + } + formatsString += fmt.Sprintf("`%s`", f) + } + + return fmt.Errorf("invalid time format `%s` must be one of: %s", s, formatsString) +} + +// Type name for time.Time flags. +func (d *timeValue) Type() string { + return "time" +} + +func (d *timeValue) String() string { + if d.Time.IsZero() { + return "" + } else { + return d.Time.Format(time.RFC3339Nano) + } +} + +// GetTime return the time value of a flag with the given name +func (f *FlagSet) GetTime(name string) (time.Time, error) { + flag := f.Lookup(name) + if flag == nil { + err := fmt.Errorf("flag accessed but not defined: %s", name) + return time.Time{}, err + } + + if flag.Value.Type() != "time" { + err := fmt.Errorf("trying to get %s value of flag of type %s", "time", flag.Value.Type()) + return time.Time{}, err + } + + val, ok := flag.Value.(*timeValue) + if !ok { + return time.Time{}, fmt.Errorf("value %s is not a time", flag.Value) + } + + return *val.Time, nil +} + +// TimeVar defines a time.Time flag with specified name, default value, and usage string. +// The argument p points to a time.Time variable in which to store the value of the flag. +func (f *FlagSet) TimeVar(p *time.Time, name string, value time.Time, formats []string, usage string) { + f.TimeVarP(p, name, "", value, formats, usage) +} + +// TimeVarP is like TimeVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) TimeVarP(p *time.Time, name, shorthand string, value time.Time, formats []string, usage string) { + f.VarP(newTimeValue(value, p, formats), name, shorthand, usage) +} + +// TimeVar defines a time.Time flag with specified name, default value, and usage string. +// The argument p points to a time.Time variable in which to store the value of the flag. +func TimeVar(p *time.Time, name string, value time.Time, formats []string, usage string) { + CommandLine.TimeVarP(p, name, "", value, formats, usage) +} + +// TimeVarP is like TimeVar, but accepts a shorthand letter that can be used after a single dash. +func TimeVarP(p *time.Time, name, shorthand string, value time.Time, formats []string, usage string) { + CommandLine.VarP(newTimeValue(value, p, formats), name, shorthand, usage) +} + +// Time defines a time.Time flag with specified name, default value, and usage string. +// The return value is the address of a time.Time variable that stores the value of the flag. +func (f *FlagSet) Time(name string, value time.Time, formats []string, usage string) *time.Time { + return f.TimeP(name, "", value, formats, usage) +} + +// TimeP is like Time, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) TimeP(name, shorthand string, value time.Time, formats []string, usage string) *time.Time { + p := new(time.Time) + f.TimeVarP(p, name, shorthand, value, formats, usage) + return p +} + +// Time defines a time.Time flag with specified name, default value, and usage string. +// The return value is the address of a time.Time variable that stores the value of the flag. +func Time(name string, value time.Time, formats []string, usage string) *time.Time { + return CommandLine.TimeP(name, "", value, formats, usage) +} + +// TimeP is like Time, but accepts a shorthand letter that can be used after a single dash. +func TimeP(name, shorthand string, value time.Time, formats []string, usage string) *time.Time { + return CommandLine.TimeP(name, shorthand, value, formats, usage) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go index 7e19eba09..ffb24e8e3 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -390,7 +390,8 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []compareResult{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not greater than \"%v\"", e1, e2) + return compareTwoValues(t, e1, e2, []compareResult{compareGreater}, failMessage, msgAndArgs...) } // GreaterOrEqual asserts that the first element is greater than or equal to the second @@ -403,7 +404,8 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []compareResult{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not greater than or equal to \"%v\"", e1, e2) + return compareTwoValues(t, e1, e2, []compareResult{compareGreater, compareEqual}, failMessage, msgAndArgs...) } // Less asserts that the first element is less than the second @@ -415,7 +417,8 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []compareResult{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not less than \"%v\"", e1, e2) + return compareTwoValues(t, e1, e2, []compareResult{compareLess}, failMessage, msgAndArgs...) } // LessOrEqual asserts that the first element is less than or equal to the second @@ -428,7 +431,8 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []compareResult{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not less than or equal to \"%v\"", e1, e2) + return compareTwoValues(t, e1, e2, []compareResult{compareLess, compareEqual}, failMessage, msgAndArgs...) } // Positive asserts that the specified element is positive @@ -440,7 +444,8 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { h.Helper() } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []compareResult{compareGreater}, "\"%v\" is not positive", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not positive", e) + return compareTwoValues(t, e, zero.Interface(), []compareResult{compareGreater}, failMessage, msgAndArgs...) } // Negative asserts that the specified element is negative @@ -452,7 +457,8 @@ func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { h.Helper() } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []compareResult{compareLess}, "\"%v\" is not negative", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not negative", e) + return compareTwoValues(t, e, zero.Interface(), []compareResult{compareLess}, failMessage, msgAndArgs...) } func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool { @@ -468,11 +474,11 @@ func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedCompare compareResult, isComparable := compare(e1, e2, e1Kind) if !isComparable { - return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) + return Fail(t, fmt.Sprintf(`Can not compare type "%T"`, e1), msgAndArgs...) } if !containsValue(allowedComparesResults, compareResult) { - return Fail(t, fmt.Sprintf(failMessage, e1, e2), msgAndArgs...) + return Fail(t, failMessage, msgAndArgs...) } return true diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go index 190634165..c592f6ad5 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -50,10 +50,19 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) } -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Emptyf asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // assert.Emptyf(t, obj, "error message %s", "formatted") +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -117,10 +126,8 @@ func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg stri // Errorf asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if assert.Errorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } +// actualObj, err := SomeFunction() +// assert.Errorf(t, err, "error message %s", "formatted") func Errorf(t TestingT, err error, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -438,7 +445,19 @@ func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interf return IsNonIncreasing(t, object, append([]interface{}{msg}, args...)...) } +// IsNotTypef asserts that the specified objects are not of the same type. +// +// assert.IsNotTypef(t, &NotMyStruct{}, &MyStruct{}, "error message %s", "formatted") +func IsNotTypef(t TestingT, theType interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return IsNotType(t, theType, object, append([]interface{}{msg}, args...)...) +} + // IsTypef asserts that the specified objects are of the same type. +// +// assert.IsTypef(t, &MyStruct{}, &MyStruct{}, "error message %s", "formatted") func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -585,8 +604,7 @@ func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg str return NotElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) } -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmptyf asserts that the specified object is NOT [Empty]. // // if assert.NotEmptyf(t, obj, "error message %s", "formatted") { // assert.Equal(t, "two", obj[1]) @@ -693,12 +711,15 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...) } -// NotSubsetf asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") // assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") +// assert.NotSubsetf(t, [1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted") +// assert.NotSubsetf(t, {"x": 1, "y": 2}, ["z"], "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -782,11 +803,15 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg return Same(t, expected, actual, append([]interface{}{msg}, args...)...) } -// Subsetf asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subsetf asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") // assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") +// assert.Subsetf(t, [1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted") +// assert.Subsetf(t, {"x": 1, "y": 2}, ["x"], "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index 21629087b..58db92845 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -92,10 +92,19 @@ func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg st return ElementsMatchf(a.t, listA, listB, msg, args...) } -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Empty asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // a.Empty(obj) +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -103,10 +112,19 @@ func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { return Empty(a.t, object, msgAndArgs...) } -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Emptyf asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // a.Emptyf(obj, "error message %s", "formatted") +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -224,10 +242,8 @@ func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string // Error asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if a.Error(err) { -// assert.Equal(t, expectedError, err) -// } +// actualObj, err := SomeFunction() +// a.Error(err) func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -297,10 +313,8 @@ func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...inter // Errorf asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if a.Errorf(err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } +// actualObj, err := SomeFunction() +// a.Errorf(err, "error message %s", "formatted") func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -868,7 +882,29 @@ func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...in return IsNonIncreasingf(a.t, object, msg, args...) } +// IsNotType asserts that the specified objects are not of the same type. +// +// a.IsNotType(&NotMyStruct{}, &MyStruct{}) +func (a *Assertions) IsNotType(theType interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsNotType(a.t, theType, object, msgAndArgs...) +} + +// IsNotTypef asserts that the specified objects are not of the same type. +// +// a.IsNotTypef(&NotMyStruct{}, &MyStruct{}, "error message %s", "formatted") +func (a *Assertions) IsNotTypef(theType interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsNotTypef(a.t, theType, object, msg, args...) +} + // IsType asserts that the specified objects are of the same type. +// +// a.IsType(&MyStruct{}, &MyStruct{}) func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -877,6 +913,8 @@ func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAnd } // IsTypef asserts that the specified objects are of the same type. +// +// a.IsTypef(&MyStruct{}, &MyStruct{}, "error message %s", "formatted") func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1162,8 +1200,7 @@ func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg return NotElementsMatchf(a.t, listA, listB, msg, args...) } -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmpty asserts that the specified object is NOT [Empty]. // // if a.NotEmpty(obj) { // assert.Equal(t, "two", obj[1]) @@ -1175,8 +1212,7 @@ func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) boo return NotEmpty(a.t, object, msgAndArgs...) } -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmptyf asserts that the specified object is NOT [Empty]. // // if a.NotEmptyf(obj, "error message %s", "formatted") { // assert.Equal(t, "two", obj[1]) @@ -1378,12 +1414,15 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri return NotSamef(a.t, expected, actual, msg, args...) } -// NotSubset asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubset asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.NotSubset([1, 3, 4], [1, 2]) // a.NotSubset({"x": 1, "y": 2}, {"z": 3}) +// a.NotSubset([1, 3, 4], {1: "one", 2: "two"}) +// a.NotSubset({"x": 1, "y": 2}, ["z"]) func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1391,12 +1430,15 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs return NotSubset(a.t, list, subset, msgAndArgs...) } -// NotSubsetf asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted") // a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") +// a.NotSubsetf([1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted") +// a.NotSubsetf({"x": 1, "y": 2}, ["z"], "error message %s", "formatted") func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1556,11 +1598,15 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, return Samef(a.t, expected, actual, msg, args...) } -// Subset asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subset asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.Subset([1, 2, 3], [1, 2]) // a.Subset({"x": 1, "y": 2}, {"x": 1}) +// a.Subset([1, 2, 3], {1: "one", 2: "two"}) +// a.Subset({"x": 1, "y": 2}, ["x"]) func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1568,11 +1614,15 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ... return Subset(a.t, list, subset, msgAndArgs...) } -// Subsetf asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subsetf asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted") // a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") +// a.Subsetf([1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted") +// a.Subsetf({"x": 1, "y": 2}, ["x"], "error message %s", "formatted") func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go index 1d2f71824..2fdf80fdd 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_order.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go @@ -33,7 +33,7 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []compareR compareResult, isComparable := compare(prevValueInterface, valueInterface, firstValueKind) if !isComparable { - return Fail(t, fmt.Sprintf("Can not compare type \"%s\" and \"%s\"", reflect.TypeOf(value), reflect.TypeOf(prevValue)), msgAndArgs...) + return Fail(t, fmt.Sprintf(`Can not compare type "%T" and "%T"`, value, prevValue), msgAndArgs...) } if !containsValue(allowedComparesResults, compareResult) { diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index 4e91332bb..de8de0cb6 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -210,59 +210,77 @@ the problem actually occurred in calling code.*/ // of each stack frame leading from the current test to the assert call that // failed. func CallerInfo() []string { - var pc uintptr - var ok bool var file string var line int var name string + const stackFrameBufferSize = 10 + pcs := make([]uintptr, stackFrameBufferSize) + callers := []string{} - for i := 0; ; i++ { - pc, file, line, ok = runtime.Caller(i) - if !ok { - // The breaks below failed to terminate the loop, and we ran off the - // end of the call stack. - break - } + offset := 1 - // This is a huge edge case, but it will panic if this is the case, see #180 - if file == "" { - break - } + for { + n := runtime.Callers(offset, pcs) - f := runtime.FuncForPC(pc) - if f == nil { - break - } - name = f.Name() - - // testing.tRunner is the standard library function that calls - // tests. Subtests are called directly by tRunner, without going through - // the Test/Benchmark/Example function that contains the t.Run calls, so - // with subtests we should break when we hit tRunner, without adding it - // to the list of callers. - if name == "testing.tRunner" { + if n == 0 { break } - parts := strings.Split(file, "/") - if len(parts) > 1 { - filename := parts[len(parts)-1] - dir := parts[len(parts)-2] - if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" { - callers = append(callers, fmt.Sprintf("%s:%d", file, line)) + frames := runtime.CallersFrames(pcs[:n]) + + for { + frame, more := frames.Next() + pc = frame.PC + file = frame.File + line = frame.Line + + // This is a huge edge case, but it will panic if this is the case, see #180 + if file == "" { + break } - } - // Drop the package - segments := strings.Split(name, ".") - name = segments[len(segments)-1] - if isTest(name, "Test") || - isTest(name, "Benchmark") || - isTest(name, "Example") { - break + f := runtime.FuncForPC(pc) + if f == nil { + break + } + name = f.Name() + + // testing.tRunner is the standard library function that calls + // tests. Subtests are called directly by tRunner, without going through + // the Test/Benchmark/Example function that contains the t.Run calls, so + // with subtests we should break when we hit tRunner, without adding it + // to the list of callers. + if name == "testing.tRunner" { + break + } + + parts := strings.Split(file, "/") + if len(parts) > 1 { + filename := parts[len(parts)-1] + dir := parts[len(parts)-2] + if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" { + callers = append(callers, fmt.Sprintf("%s:%d", file, line)) + } + } + + // Drop the package + dotPos := strings.LastIndexByte(name, '.') + name = name[dotPos+1:] + if isTest(name, "Test") || + isTest(name, "Benchmark") || + isTest(name, "Example") { + break + } + + if !more { + break + } } + + // Next batch + offset += cap(pcs) } return callers @@ -437,17 +455,34 @@ func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, return true } +func isType(expectedType, object interface{}) bool { + return ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) +} + // IsType asserts that the specified objects are of the same type. -func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { +// +// assert.IsType(t, &MyStruct{}, &MyStruct{}) +func IsType(t TestingT, expectedType, object interface{}, msgAndArgs ...interface{}) bool { + if isType(expectedType, object) { + return true + } if h, ok := t.(tHelper); ok { h.Helper() } + return Fail(t, fmt.Sprintf("Object expected to be of type %T, but was %T", expectedType, object), msgAndArgs...) +} - if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) { - return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...) +// IsNotType asserts that the specified objects are not of the same type. +// +// assert.IsNotType(t, &NotMyStruct{}, &MyStruct{}) +func IsNotType(t TestingT, theType, object interface{}, msgAndArgs ...interface{}) bool { + if !isType(theType, object) { + return true } - - return true + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Fail(t, fmt.Sprintf("Object type expected to be different than %T", theType), msgAndArgs...) } // Equal asserts that two objects are equal. @@ -475,7 +510,6 @@ func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) } return true - } // validateEqualArgs checks whether provided arguments can be safely used in the @@ -510,8 +544,9 @@ func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) b if !same { // both are pointers but not the same type & pointing to the same address return Fail(t, fmt.Sprintf("Not same: \n"+ - "expected: %p %#v\n"+ - "actual : %p %#v", expected, expected, actual, actual), msgAndArgs...) + "expected: %p %#[1]v\n"+ + "actual : %p %#[2]v", + expected, actual), msgAndArgs...) } return true @@ -530,14 +565,14 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} same, ok := samePointers(expected, actual) if !ok { - //fails when the arguments are not pointers + // fails when the arguments are not pointers return !(Fail(t, "Both arguments must be pointers", msgAndArgs...)) } if same { return Fail(t, fmt.Sprintf( - "Expected and actual point to the same object: %p %#v", - expected, expected), msgAndArgs...) + "Expected and actual point to the same object: %p %#[1]v", + expected), msgAndArgs...) } return true } @@ -549,7 +584,7 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} func samePointers(first, second interface{}) (same bool, ok bool) { firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second) if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr { - return false, false //not both are pointers + return false, false // not both are pointers } firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second) @@ -610,7 +645,6 @@ func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interfa } return true - } // EqualExportedValues asserts that the types of two objects are equal and their public @@ -665,7 +699,6 @@ func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} } return Equal(t, expected, actual, msgAndArgs...) - } // NotNil asserts that the specified object is not nil. @@ -715,37 +748,45 @@ func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { // isEmpty gets whether the specified object is considered empty or not. func isEmpty(object interface{}) bool { - // get nil case out of the way if object == nil { return true } - objValue := reflect.ValueOf(object) + return isEmptyValue(reflect.ValueOf(object)) +} +// isEmptyValue gets whether the specified reflect.Value is considered empty or not. +func isEmptyValue(objValue reflect.Value) bool { + if objValue.IsZero() { + return true + } + // Special cases of non-zero values that we consider empty switch objValue.Kind() { // collection types are empty when they have no element + // Note: array types are empty when they match their zero-initialized state. case reflect.Chan, reflect.Map, reflect.Slice: return objValue.Len() == 0 - // pointers are empty if nil or if the value they point to is empty + // non-nil pointers are empty if the value they point to is empty case reflect.Ptr: - if objValue.IsNil() { - return true - } - deref := objValue.Elem().Interface() - return isEmpty(deref) - // for all other types, compare against the zero value - // array types are empty when they match their zero-initialized state - default: - zero := reflect.Zero(objValue.Type()) - return reflect.DeepEqual(object, zero.Interface()) + return isEmptyValue(objValue.Elem()) } + return false } -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Empty asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // assert.Empty(t, obj) +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { pass := isEmpty(object) if !pass { @@ -756,11 +797,9 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { } return pass - } -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmpty asserts that the specified object is NOT [Empty]. // // if assert.NotEmpty(t, obj) { // assert.Equal(t, "two", obj[1]) @@ -775,7 +814,6 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { } return pass - } // getLen tries to get the length of an object. @@ -819,7 +857,6 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { } return true - } // False asserts that the specified value is false. @@ -834,7 +871,6 @@ func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { } return true - } // NotEqual asserts that the specified values are NOT equal. @@ -857,7 +893,6 @@ func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{ } return true - } // NotEqualValues asserts that two objects are not equal even when converted to the same type @@ -880,7 +915,6 @@ func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...inte // return (true, false) if element was not found. // return (true, true) if element was found. func containsElement(list interface{}, element interface{}) (ok, found bool) { - listValue := reflect.ValueOf(list) listType := reflect.TypeOf(list) if listType == nil { @@ -915,7 +949,6 @@ func containsElement(list interface{}, element interface{}) (ok, found bool) { } } return true, false - } // Contains asserts that the specified string, list(array, slice...) or map contains the @@ -938,7 +971,6 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo } return true - } // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the @@ -961,14 +993,17 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) } return true - } -// Subset asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subset asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // assert.Subset(t, [1, 2, 3], [1, 2]) // assert.Subset(t, {"x": 1, "y": 2}, {"x": 1}) +// assert.Subset(t, [1, 2, 3], {1: "one", 2: "two"}) +// assert.Subset(t, {"x": 1, "y": 2}, ["x"]) func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { if h, ok := t.(tHelper); ok { h.Helper() @@ -983,7 +1018,7 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok } subsetKind := reflect.TypeOf(subset).Kind() - if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { + if subsetKind != reflect.Array && subsetKind != reflect.Slice && subsetKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } @@ -1007,6 +1042,13 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok } subsetList := reflect.ValueOf(subset) + if subsetKind == reflect.Map { + keys := make([]interface{}, subsetList.Len()) + for idx, key := range subsetList.MapKeys() { + keys[idx] = key.Interface() + } + subsetList = reflect.ValueOf(keys) + } for i := 0; i < subsetList.Len(); i++ { element := subsetList.Index(i).Interface() ok, found := containsElement(list, element) @@ -1021,12 +1063,15 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok return true } -// NotSubset asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubset asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // assert.NotSubset(t, [1, 3, 4], [1, 2]) // assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) +// assert.NotSubset(t, [1, 3, 4], {1: "one", 2: "two"}) +// assert.NotSubset(t, {"x": 1, "y": 2}, ["z"]) func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1041,7 +1086,7 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) } subsetKind := reflect.TypeOf(subset).Kind() - if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { + if subsetKind != reflect.Array && subsetKind != reflect.Slice && subsetKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } @@ -1065,11 +1110,18 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) } subsetList := reflect.ValueOf(subset) + if subsetKind == reflect.Map { + keys := make([]interface{}, subsetList.Len()) + for idx, key := range subsetList.MapKeys() { + keys[idx] = key.Interface() + } + subsetList = reflect.ValueOf(keys) + } for i := 0; i < subsetList.Len(); i++ { element := subsetList.Index(i).Interface() ok, found := containsElement(list, element) if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) + return Fail(t, fmt.Sprintf("%q could not be applied builtin len()", list), msgAndArgs...) } if !found { return true @@ -1591,10 +1643,8 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { // Error asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if assert.Error(t, err) { -// assert.Equal(t, expectedError, err) -// } +// actualObj, err := SomeFunction() +// assert.Error(t, err) func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { if err == nil { if h, ok := t.(tHelper); ok { @@ -1667,7 +1717,6 @@ func matchRegexp(rx interface{}, str interface{}) bool { default: return r.MatchString(fmt.Sprint(v)) } - } // Regexp asserts that a specified regexp matches a string. @@ -1703,7 +1752,6 @@ func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interf } return !match - } // Zero asserts that i is the zero value for its type. @@ -1814,6 +1862,11 @@ func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{ return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...) } + // Shortcut if same bytes + if actual == expected { + return true + } + if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil { return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...) } @@ -1832,6 +1885,11 @@ func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{ return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid yaml.\nYAML parsing error: '%s'", expected, err.Error()), msgAndArgs...) } + // Shortcut if same bytes + if actual == expected { + return true + } + if err := yaml.Unmarshal([]byte(actual), &actualYAMLAsInterface); err != nil { return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid yaml.\nYAML error: '%s'", actual, err.Error()), msgAndArgs...) } @@ -1933,6 +1991,7 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t } ch := make(chan bool, 1) + checkCond := func() { ch <- condition() } timer := time.NewTimer(waitFor) defer timer.Stop() @@ -1940,18 +1999,23 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t ticker := time.NewTicker(tick) defer ticker.Stop() - for tick := ticker.C; ; { + var tickC <-chan time.Time + + // Check the condition once first on the initial call. + go checkCond() + + for { select { case <-timer.C: return Fail(t, "Condition never satisfied", msgAndArgs...) - case <-tick: - tick = nil - go func() { ch <- condition() }() + case <-tickC: + tickC = nil + go checkCond() case v := <-ch: if v { return true } - tick = ticker.C + tickC = ticker.C } } } @@ -1964,6 +2028,9 @@ type CollectT struct { errors []error } +// Helper is like [testing.T.Helper] but does nothing. +func (CollectT) Helper() {} + // Errorf collects the error. func (c *CollectT) Errorf(format string, args ...interface{}) { c.errors = append(c.errors, fmt.Errorf(format, args...)) @@ -2021,35 +2088,42 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time var lastFinishedTickErrs []error ch := make(chan *CollectT, 1) + checkCond := func() { + collect := new(CollectT) + defer func() { + ch <- collect + }() + condition(collect) + } + timer := time.NewTimer(waitFor) defer timer.Stop() ticker := time.NewTicker(tick) defer ticker.Stop() - for tick := ticker.C; ; { + var tickC <-chan time.Time + + // Check the condition once first on the initial call. + go checkCond() + + for { select { case <-timer.C: for _, err := range lastFinishedTickErrs { t.Errorf("%v", err) } return Fail(t, "Condition never satisfied", msgAndArgs...) - case <-tick: - tick = nil - go func() { - collect := new(CollectT) - defer func() { - ch <- collect - }() - condition(collect) - }() + case <-tickC: + tickC = nil + go checkCond() case collect := <-ch: if !collect.failed() { return true } // Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached. lastFinishedTickErrs = collect.errors - tick = ticker.C + tickC = ticker.C } } } @@ -2064,6 +2138,7 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D } ch := make(chan bool, 1) + checkCond := func() { ch <- condition() } timer := time.NewTimer(waitFor) defer timer.Stop() @@ -2071,18 +2146,23 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D ticker := time.NewTicker(tick) defer ticker.Stop() - for tick := ticker.C; ; { + var tickC <-chan time.Time + + // Check the condition once first on the initial call. + go checkCond() + + for { select { case <-timer.C: return true - case <-tick: - tick = nil - go func() { ch <- condition() }() + case <-tickC: + tickC = nil + go checkCond() case v := <-ch: if v { return Fail(t, "Condition satisfied", msgAndArgs...) } - tick = ticker.C + tickC = ticker.C } } } @@ -2100,9 +2180,12 @@ func ErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { var expectedText string if target != nil { expectedText = target.Error() + if err == nil { + return Fail(t, fmt.Sprintf("Expected error with %q in chain but got nil.", expectedText), msgAndArgs...) + } } - chain := buildErrorChainString(err) + chain := buildErrorChainString(err, false) return Fail(t, fmt.Sprintf("Target error should be in err chain:\n"+ "expected: %q\n"+ @@ -2125,7 +2208,7 @@ func NotErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { expectedText = target.Error() } - chain := buildErrorChainString(err) + chain := buildErrorChainString(err, false) return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+ "found: %q\n"+ @@ -2143,11 +2226,17 @@ func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{ return true } - chain := buildErrorChainString(err) + expectedType := reflect.TypeOf(target).Elem().String() + if err == nil { + return Fail(t, fmt.Sprintf("An error is expected but got nil.\n"+ + "expected: %s", expectedType), msgAndArgs...) + } + + chain := buildErrorChainString(err, true) return Fail(t, fmt.Sprintf("Should be in error chain:\n"+ - "expected: %q\n"+ - "in chain: %s", target, chain, + "expected: %s\n"+ + "in chain: %s", expectedType, chain, ), msgAndArgs...) } @@ -2161,24 +2250,46 @@ func NotErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interfa return true } - chain := buildErrorChainString(err) + chain := buildErrorChainString(err, true) return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+ - "found: %q\n"+ - "in chain: %s", target, chain, + "found: %s\n"+ + "in chain: %s", reflect.TypeOf(target).Elem().String(), chain, ), msgAndArgs...) } -func buildErrorChainString(err error) string { +func unwrapAll(err error) (errs []error) { + errs = append(errs, err) + switch x := err.(type) { + case interface{ Unwrap() error }: + err = x.Unwrap() + if err == nil { + return + } + errs = append(errs, unwrapAll(err)...) + case interface{ Unwrap() []error }: + for _, err := range x.Unwrap() { + errs = append(errs, unwrapAll(err)...) + } + } + return +} + +func buildErrorChainString(err error, withType bool) string { if err == nil { return "" } - e := errors.Unwrap(err) - chain := fmt.Sprintf("%q", err.Error()) - for e != nil { - chain += fmt.Sprintf("\n\t%q", e.Error()) - e = errors.Unwrap(e) + var chain string + errs := unwrapAll(err) + for i := range errs { + if i != 0 { + chain += "\n\t" + } + chain += fmt.Sprintf("%q", errs[i].Error()) + if withType { + chain += fmt.Sprintf(" (%T)", errs[i]) + } } return chain } diff --git a/vendor/github.com/stretchr/testify/assert/doc.go b/vendor/github.com/stretchr/testify/assert/doc.go index 4953981d3..a0b953aa5 100644 --- a/vendor/github.com/stretchr/testify/assert/doc.go +++ b/vendor/github.com/stretchr/testify/assert/doc.go @@ -1,5 +1,9 @@ // Package assert provides a set of comprehensive testing tools for use with the normal Go testing system. // +// # Note +// +// All functions in this package return a bool value indicating whether the assertion has passed. +// // # Example Usage // // The following is a complete example using assert in a standard test function: diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go index 861ed4b7c..5a6bb75f2 100644 --- a/vendor/github.com/stretchr/testify/assert/http_assertions.go +++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go @@ -138,7 +138,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, contains := strings.Contains(body, fmt.Sprint(str)) if !contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) + Fail(t, fmt.Sprintf("Expected response body for %q to contain %q but found %q", url+"?"+values.Encode(), str, body), msgAndArgs...) } return contains @@ -158,7 +158,7 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url strin contains := strings.Contains(body, fmt.Sprint(str)) if contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) + Fail(t, fmt.Sprintf("Expected response body for %q to NOT contain %q but found %q", url+"?"+values.Encode(), str, body), msgAndArgs...) } return !contains diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go index baa0cc7d7..5a74c4f4d 100644 --- a/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go @@ -1,5 +1,4 @@ //go:build testify_yaml_custom && !testify_yaml_fail && !testify_yaml_default -// +build testify_yaml_custom,!testify_yaml_fail,!testify_yaml_default // Package yaml is an implementation of YAML functions that calls a pluggable implementation. // diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go index b83c6cf64..0bae80e34 100644 --- a/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go @@ -1,5 +1,4 @@ //go:build !testify_yaml_fail && !testify_yaml_custom -// +build !testify_yaml_fail,!testify_yaml_custom // Package yaml is just an indirection to handle YAML deserialization. // diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go index e78f7dfe6..8041803fd 100644 --- a/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go @@ -1,5 +1,4 @@ //go:build testify_yaml_fail && !testify_yaml_custom && !testify_yaml_default -// +build testify_yaml_fail,!testify_yaml_custom,!testify_yaml_default // Package yaml is an implementation of YAML functions that always fail. // diff --git a/vendor/github.com/stretchr/testify/require/doc.go b/vendor/github.com/stretchr/testify/require/doc.go index 968434724..c8e3f94a8 100644 --- a/vendor/github.com/stretchr/testify/require/doc.go +++ b/vendor/github.com/stretchr/testify/require/doc.go @@ -23,6 +23,8 @@ // // The `require` package have same global functions as in the `assert` package, // but instead of returning a boolean result they call `t.FailNow()`. +// A consequence of this is that it must be called from the goroutine running +// the test function, not from other goroutines created during the test. // // Every assertion function also takes an optional string message as the final argument, // allowing custom error messages to be appended to the message the assertion method outputs. diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go index d8921950d..2d02f9bce 100644 --- a/vendor/github.com/stretchr/testify/require/require.go +++ b/vendor/github.com/stretchr/testify/require/require.go @@ -117,10 +117,19 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string t.FailNow() } -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Empty asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // require.Empty(t, obj) +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -131,10 +140,19 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { t.FailNow() } -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Emptyf asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // require.Emptyf(t, obj, "error message %s", "formatted") +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -279,10 +297,8 @@ func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, ar // Error asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if require.Error(t, err) { -// require.Equal(t, expectedError, err) -// } +// actualObj, err := SomeFunction() +// require.Error(t, err) func Error(t TestingT, err error, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -373,10 +389,8 @@ func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface // Errorf asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if require.Errorf(t, err, "error message %s", "formatted") { -// require.Equal(t, expectedErrorf, err) -// } +// actualObj, err := SomeFunction() +// require.Errorf(t, err, "error message %s", "formatted") func Errorf(t TestingT, err error, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1097,7 +1111,35 @@ func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interf t.FailNow() } +// IsNotType asserts that the specified objects are not of the same type. +// +// require.IsNotType(t, &NotMyStruct{}, &MyStruct{}) +func IsNotType(t TestingT, theType interface{}, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.IsNotType(t, theType, object, msgAndArgs...) { + return + } + t.FailNow() +} + +// IsNotTypef asserts that the specified objects are not of the same type. +// +// require.IsNotTypef(t, &NotMyStruct{}, &MyStruct{}, "error message %s", "formatted") +func IsNotTypef(t TestingT, theType interface{}, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.IsNotTypef(t, theType, object, msg, args...) { + return + } + t.FailNow() +} + // IsType asserts that the specified objects are of the same type. +// +// require.IsType(t, &MyStruct{}, &MyStruct{}) func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1109,6 +1151,8 @@ func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs } // IsTypef asserts that the specified objects are of the same type. +// +// require.IsTypef(t, &MyStruct{}, &MyStruct{}, "error message %s", "formatted") func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1469,8 +1513,7 @@ func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg str t.FailNow() } -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmpty asserts that the specified object is NOT [Empty]. // // if require.NotEmpty(t, obj) { // require.Equal(t, "two", obj[1]) @@ -1485,8 +1528,7 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { t.FailNow() } -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmptyf asserts that the specified object is NOT [Empty]. // // if require.NotEmptyf(t, obj, "error message %s", "formatted") { // require.Equal(t, "two", obj[1]) @@ -1745,12 +1787,15 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, t.FailNow() } -// NotSubset asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubset asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // require.NotSubset(t, [1, 3, 4], [1, 2]) // require.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) +// require.NotSubset(t, [1, 3, 4], {1: "one", 2: "two"}) +// require.NotSubset(t, {"x": 1, "y": 2}, ["z"]) func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1761,12 +1806,15 @@ func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...i t.FailNow() } -// NotSubsetf asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // require.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") // require.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") +// require.NotSubsetf(t, [1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted") +// require.NotSubsetf(t, {"x": 1, "y": 2}, ["z"], "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1971,11 +2019,15 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg t.FailNow() } -// Subset asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subset asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // require.Subset(t, [1, 2, 3], [1, 2]) // require.Subset(t, {"x": 1, "y": 2}, {"x": 1}) +// require.Subset(t, [1, 2, 3], {1: "one", 2: "two"}) +// require.Subset(t, {"x": 1, "y": 2}, ["x"]) func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1986,11 +2038,15 @@ func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...inte t.FailNow() } -// Subsetf asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subsetf asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // require.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") // require.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") +// require.Subsetf(t, [1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted") +// require.Subsetf(t, {"x": 1, "y": 2}, ["x"], "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go index 1bd87304f..e6f7e9446 100644 --- a/vendor/github.com/stretchr/testify/require/require_forward.go +++ b/vendor/github.com/stretchr/testify/require/require_forward.go @@ -93,10 +93,19 @@ func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg st ElementsMatchf(a.t, listA, listB, msg, args...) } -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Empty asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // a.Empty(obj) +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -104,10 +113,19 @@ func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) { Empty(a.t, object, msgAndArgs...) } -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Emptyf asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // a.Emptyf(obj, "error message %s", "formatted") +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -225,10 +243,8 @@ func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string // Error asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if a.Error(err) { -// assert.Equal(t, expectedError, err) -// } +// actualObj, err := SomeFunction() +// a.Error(err) func (a *Assertions) Error(err error, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -298,10 +314,8 @@ func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...inter // Errorf asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if a.Errorf(err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } +// actualObj, err := SomeFunction() +// a.Errorf(err, "error message %s", "formatted") func (a *Assertions) Errorf(err error, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -869,7 +883,29 @@ func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...in IsNonIncreasingf(a.t, object, msg, args...) } +// IsNotType asserts that the specified objects are not of the same type. +// +// a.IsNotType(&NotMyStruct{}, &MyStruct{}) +func (a *Assertions) IsNotType(theType interface{}, object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + IsNotType(a.t, theType, object, msgAndArgs...) +} + +// IsNotTypef asserts that the specified objects are not of the same type. +// +// a.IsNotTypef(&NotMyStruct{}, &MyStruct{}, "error message %s", "formatted") +func (a *Assertions) IsNotTypef(theType interface{}, object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + IsNotTypef(a.t, theType, object, msg, args...) +} + // IsType asserts that the specified objects are of the same type. +// +// a.IsType(&MyStruct{}, &MyStruct{}) func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -878,6 +914,8 @@ func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAnd } // IsTypef asserts that the specified objects are of the same type. +// +// a.IsTypef(&MyStruct{}, &MyStruct{}, "error message %s", "formatted") func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1163,8 +1201,7 @@ func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg NotElementsMatchf(a.t, listA, listB, msg, args...) } -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmpty asserts that the specified object is NOT [Empty]. // // if a.NotEmpty(obj) { // assert.Equal(t, "two", obj[1]) @@ -1176,8 +1213,7 @@ func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) { NotEmpty(a.t, object, msgAndArgs...) } -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmptyf asserts that the specified object is NOT [Empty]. // // if a.NotEmptyf(obj, "error message %s", "formatted") { // assert.Equal(t, "two", obj[1]) @@ -1379,12 +1415,15 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri NotSamef(a.t, expected, actual, msg, args...) } -// NotSubset asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubset asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.NotSubset([1, 3, 4], [1, 2]) // a.NotSubset({"x": 1, "y": 2}, {"z": 3}) +// a.NotSubset([1, 3, 4], {1: "one", 2: "two"}) +// a.NotSubset({"x": 1, "y": 2}, ["z"]) func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1392,12 +1431,15 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs NotSubset(a.t, list, subset, msgAndArgs...) } -// NotSubsetf asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted") // a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") +// a.NotSubsetf([1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted") +// a.NotSubsetf({"x": 1, "y": 2}, ["z"], "error message %s", "formatted") func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1557,11 +1599,15 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, Samef(a.t, expected, actual, msg, args...) } -// Subset asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subset asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.Subset([1, 2, 3], [1, 2]) // a.Subset({"x": 1, "y": 2}, {"x": 1}) +// a.Subset([1, 2, 3], {1: "one", 2: "two"}) +// a.Subset({"x": 1, "y": 2}, ["x"]) func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1569,11 +1615,15 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ... Subset(a.t, list, subset, msgAndArgs...) } -// Subsetf asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subsetf asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted") // a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") +// a.Subsetf([1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted") +// a.Subsetf({"x": 1, "y": 2}, ["x"], "error message %s", "formatted") func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/urfave/cli/v2/.flake8 b/vendor/github.com/urfave/cli/v2/.flake8 new file mode 100644 index 000000000..6deafc261 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/.flake8 @@ -0,0 +1,2 @@ +[flake8] +max-line-length = 120 diff --git a/vendor/github.com/urfave/cli/v2/.gitignore b/vendor/github.com/urfave/cli/v2/.gitignore new file mode 100644 index 000000000..1ef91a60b --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/.gitignore @@ -0,0 +1,14 @@ +*.coverprofile +*.exe +*.orig +.*envrc +.envrc +.idea +# goimports is installed here if not available +/.local/ +/site/ +coverage.txt +internal/*/built-example +vendor +/cmd/urfave-cli-genflags/urfave-cli-genflags +*.exe diff --git a/vendor/github.com/urfave/cli/v2/.golangci.yaml b/vendor/github.com/urfave/cli/v2/.golangci.yaml new file mode 100644 index 000000000..89b6e8661 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/.golangci.yaml @@ -0,0 +1,4 @@ +# https://golangci-lint.run/usage/configuration/ +linters: + enable: + - misspell diff --git a/vendor/github.com/urfave/cli/v2/CODE_OF_CONDUCT.md b/vendor/github.com/urfave/cli/v2/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..9fee14807 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/CODE_OF_CONDUCT.md @@ -0,0 +1,75 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +education, socio-economic status, nationality, personal appearance, race, +religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting urfave-governance@googlegroups.com, a members-only group +that is world-postable. All complaints will be reviewed and investigated and +will result in a response that is deemed necessary and appropriate to the +circumstances. The project team is obligated to maintain confidentiality with +regard to the reporter of an incident. Further details of specific enforcement +policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org + diff --git a/vendor/github.com/urfave/cli/v2/LICENSE b/vendor/github.com/urfave/cli/v2/LICENSE new file mode 100644 index 000000000..2c84c78a1 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 urfave/cli maintainers + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/urfave/cli/v2/Makefile b/vendor/github.com/urfave/cli/v2/Makefile new file mode 100644 index 000000000..f0d41905e --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/Makefile @@ -0,0 +1,26 @@ +# NOTE: this Makefile is meant to provide a simplified entry point for humans to +# run all of the critical steps to verify one's changes are harmonious in +# nature. Keeping target bodies to one line each and abstaining from make magic +# are very important so that maintainers and contributors can focus their +# attention on files that are primarily Go. + +GO_RUN_BUILD := go run internal/build/build.go + +.PHONY: all +all: generate vet test check-binary-size gfmrun yamlfmt v2diff + +# NOTE: this is a special catch-all rule to run any of the commands +# defined in internal/build/build.go with optional arguments passed +# via GFLAGS (global flags) and FLAGS (command-specific flags), e.g.: +# +# $ make test GFLAGS='--packages cli' +%: + $(GO_RUN_BUILD) $(GFLAGS) $* $(FLAGS) + +.PHONY: docs +docs: + mkdocs build + +.PHONY: serve-docs +serve-docs: + mkdocs serve diff --git a/vendor/github.com/urfave/cli/v2/README.md b/vendor/github.com/urfave/cli/v2/README.md new file mode 100644 index 000000000..9080aee41 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/README.md @@ -0,0 +1,19 @@ +# cli + +[![Run Tests](https://github.com/urfave/cli/actions/workflows/cli.yml/badge.svg?branch=v2-maint)](https://github.com/urfave/cli/actions/workflows/cli.yml) +[![Go Reference](https://pkg.go.dev/badge/github.com/urfave/cli/v2.svg)](https://pkg.go.dev/github.com/urfave/cli/v2) +[![Go Report Card](https://goreportcard.com/badge/github.com/urfave/cli/v2)](https://goreportcard.com/report/github.com/urfave/cli/v2) +[![codecov](https://codecov.io/gh/urfave/cli/branch/v2-maint/graph/badge.svg?token=t9YGWLh05g)](https://app.codecov.io/gh/urfave/cli/tree/v2-maint) + +cli is a simple, fast, and fun package for building command line apps in Go. The +goal is to enable developers to write fast and distributable command line +applications in an expressive way. + +## Documentation + +More documentation is available in [`./docs`](./docs) or the hosted +documentation site at . + +## License + +See [`LICENSE`](./LICENSE) diff --git a/vendor/github.com/urfave/cli/v2/app.go b/vendor/github.com/urfave/cli/v2/app.go new file mode 100644 index 000000000..af072e769 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/app.go @@ -0,0 +1,536 @@ +package cli + +import ( + "context" + "flag" + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strings" + "time" +) + +const suggestDidYouMeanTemplate = "Did you mean %q?" + +var ( + changeLogURL = "https://github.com/urfave/cli/blob/main/docs/CHANGELOG.md" + appActionDeprecationURL = fmt.Sprintf("%s#deprecated-cli-app-action-signature", changeLogURL) + contactSysadmin = "This is an error in the application. Please contact the distributor of this application if this is not you." + errInvalidActionType = NewExitError("ERROR invalid Action type. "+ + fmt.Sprintf("Must be `func(*Context`)` or `func(*Context) error). %s", contactSysadmin)+ + fmt.Sprintf("See %s", appActionDeprecationURL), 2) + ignoreFlagPrefix = "test." // this is to ignore test flags when adding flags from other packages + + SuggestFlag SuggestFlagFunc = nil // initialized in suggestions.go unless built with urfave_cli_no_suggest + SuggestCommand SuggestCommandFunc = nil // initialized in suggestions.go unless built with urfave_cli_no_suggest + SuggestDidYouMeanTemplate string = suggestDidYouMeanTemplate +) + +// App is the main structure of a cli application. It is recommended that +// an app be created with the cli.NewApp() function +type App struct { + // The name of the program. Defaults to path.Base(os.Args[0]) + Name string + // Full name of command for help, defaults to Name + HelpName string + // Description of the program. + Usage string + // Text to override the USAGE section of help + UsageText string + // Whether this command supports arguments + Args bool + // Description of the program argument format. + ArgsUsage string + // Version of the program + Version string + // Description of the program + Description string + // DefaultCommand is the (optional) name of a command + // to run if no command names are passed as CLI arguments. + DefaultCommand string + // List of commands to execute + Commands []*Command + // List of flags to parse + Flags []Flag + // Boolean to enable bash completion commands + EnableBashCompletion bool + // Boolean to hide built-in help command and help flag + HideHelp bool + // Boolean to hide built-in help command but keep help flag. + // Ignored if HideHelp is true. + HideHelpCommand bool + // Boolean to hide built-in version flag and the VERSION section of help + HideVersion bool + // categories contains the categorized commands and is populated on app startup + categories CommandCategories + // flagCategories contains the categorized flags and is populated on app startup + flagCategories FlagCategories + // An action to execute when the shell completion flag is set + BashComplete BashCompleteFunc + // An action to execute before any subcommands are run, but after the context is ready + // If a non-nil error is returned, no subcommands are run + Before BeforeFunc + // An action to execute after any subcommands are run, but after the subcommand has finished + // It is run even if Action() panics + After AfterFunc + // The action to execute when no subcommands are specified + Action ActionFunc + // Execute this function if the proper command cannot be found + CommandNotFound CommandNotFoundFunc + // Execute this function if a usage error occurs + OnUsageError OnUsageErrorFunc + // Execute this function when an invalid flag is accessed from the context + InvalidFlagAccessHandler InvalidFlagAccessFunc + // Compilation date + Compiled time.Time + // List of all authors who contributed + Authors []*Author + // Copyright of the binary if any + Copyright string + // Reader reader to write input to (useful for tests) + Reader io.Reader + // Writer writer to write output to + Writer io.Writer + // ErrWriter writes error output + ErrWriter io.Writer + // ExitErrHandler processes any error encountered while running an App before + // it is returned to the caller. If no function is provided, HandleExitCoder + // is used as the default behavior. + ExitErrHandler ExitErrHandlerFunc + // Other custom info + Metadata map[string]interface{} + // Carries a function which returns app specific info. + ExtraInfo func() map[string]string + // CustomAppHelpTemplate the text template for app help topic. + // cli.go uses text/template to render templates. You can + // render custom help text by setting this variable. + CustomAppHelpTemplate string + // SliceFlagSeparator is used to customize the separator for SliceFlag, the default is "," + SliceFlagSeparator string + // DisableSliceFlagSeparator is used to disable SliceFlagSeparator, the default is false + DisableSliceFlagSeparator bool + // Boolean to enable short-option handling so user can combine several + // single-character bool arguments into one + // i.e. foobar -o -v -> foobar -ov + UseShortOptionHandling bool + // Enable suggestions for commands and flags + Suggest bool + // Allows global flags set by libraries which use flag.XXXVar(...) directly + // to be parsed through this library + AllowExtFlags bool + // Treat all flags as normal arguments if true + SkipFlagParsing bool + + didSetup bool + separator separatorSpec + + rootCommand *Command +} + +type SuggestFlagFunc func(flags []Flag, provided string, hideHelp bool) string + +type SuggestCommandFunc func(commands []*Command, provided string) string + +// Tries to find out when this binary was compiled. +// Returns the current time if it fails to find it. +func compileTime() time.Time { + info, err := os.Stat(os.Args[0]) + if err != nil { + return time.Now() + } + return info.ModTime() +} + +// NewApp creates a new cli Application with some reasonable defaults for Name, +// Usage, Version and Action. +func NewApp() *App { + return &App{ + Name: filepath.Base(os.Args[0]), + Usage: "A new cli application", + UsageText: "", + BashComplete: DefaultAppComplete, + Action: helpCommand.Action, + Compiled: compileTime(), + Reader: os.Stdin, + Writer: os.Stdout, + ErrWriter: os.Stderr, + } +} + +// Setup runs initialization code to ensure all data structures are ready for +// `Run` or inspection prior to `Run`. It is internally called by `Run`, but +// will return early if setup has already happened. +func (a *App) Setup() { + if a.didSetup { + return + } + + a.didSetup = true + + if a.Name == "" { + a.Name = filepath.Base(os.Args[0]) + } + + if a.HelpName == "" { + a.HelpName = a.Name + } + + if a.Usage == "" { + a.Usage = "A new cli application" + } + + if a.Version == "" { + a.HideVersion = true + } + + if a.BashComplete == nil { + a.BashComplete = DefaultAppComplete + } + + if a.Action == nil { + a.Action = helpCommand.Action + } + + if a.Compiled == (time.Time{}) { + a.Compiled = compileTime() + } + + if a.Reader == nil { + a.Reader = os.Stdin + } + + if a.Writer == nil { + a.Writer = os.Stdout + } + + if a.ErrWriter == nil { + a.ErrWriter = os.Stderr + } + + if a.AllowExtFlags { + // add global flags added by other packages + flag.VisitAll(func(f *flag.Flag) { + // skip test flags + if !strings.HasPrefix(f.Name, ignoreFlagPrefix) { + a.Flags = append(a.Flags, &extFlag{f}) + } + }) + } + + if len(a.SliceFlagSeparator) != 0 { + a.separator.customized = true + a.separator.sep = a.SliceFlagSeparator + } + + if a.DisableSliceFlagSeparator { + a.separator.customized = true + a.separator.disabled = true + } + + for _, c := range a.Commands { + cname := c.Name + if c.HelpName != "" { + cname = c.HelpName + } + c.separator = a.separator + c.HelpName = fmt.Sprintf("%s %s", a.HelpName, cname) + c.flagCategories = newFlagCategoriesFromFlags(c.Flags) + } + + if a.Command(helpCommand.Name) == nil && !a.HideHelp { + if !a.HideHelpCommand { + a.appendCommand(helpCommand) + } + + if HelpFlag != nil { + a.appendFlag(HelpFlag) + } + } + + if !a.HideVersion { + a.appendFlag(VersionFlag) + } + + a.categories = newCommandCategories() + for _, command := range a.Commands { + a.categories.AddCommand(command.Category, command) + } + sort.Sort(a.categories.(*commandCategories)) + + a.flagCategories = newFlagCategoriesFromFlags(a.Flags) + + if a.Metadata == nil { + a.Metadata = make(map[string]interface{}) + } +} + +func (a *App) newRootCommand() *Command { + return &Command{ + Name: a.Name, + Usage: a.Usage, + UsageText: a.UsageText, + Description: a.Description, + ArgsUsage: a.ArgsUsage, + BashComplete: a.BashComplete, + Before: a.Before, + After: a.After, + Action: a.Action, + OnUsageError: a.OnUsageError, + Subcommands: a.Commands, + Flags: a.Flags, + flagCategories: a.flagCategories, + HideHelp: a.HideHelp, + HideHelpCommand: a.HideHelpCommand, + UseShortOptionHandling: a.UseShortOptionHandling, + HelpName: a.HelpName, + CustomHelpTemplate: a.CustomAppHelpTemplate, + categories: a.categories, + SkipFlagParsing: a.SkipFlagParsing, + isRoot: true, + separator: a.separator, + } +} + +func (a *App) newFlagSet() (*flag.FlagSet, error) { + return flagSet(a.Name, a.Flags, a.separator) +} + +func (a *App) useShortOptionHandling() bool { + return a.UseShortOptionHandling +} + +// Run is the entry point to the cli app. Parses the arguments slice and routes +// to the proper flag/args combination +func (a *App) Run(arguments []string) (err error) { + return a.RunContext(context.Background(), arguments) +} + +// RunContext is like Run except it takes a Context that will be +// passed to its commands and sub-commands. Through this, you can +// propagate timeouts and cancellation requests +func (a *App) RunContext(ctx context.Context, arguments []string) (err error) { + a.Setup() + + // handle the completion flag separately from the flagset since + // completion could be attempted after a flag, but before its value was put + // on the command line. this causes the flagset to interpret the completion + // flag name as the value of the flag before it which is undesirable + // note that we can only do this because the shell autocomplete function + // always appends the completion flag at the end of the command + shellComplete, arguments := checkShellCompleteFlag(a, arguments) + + cCtx := NewContext(a, nil, &Context{Context: ctx}) + cCtx.shellComplete = shellComplete + + a.rootCommand = a.newRootCommand() + cCtx.Command = a.rootCommand + + if err := checkDuplicatedCmds(a.rootCommand); err != nil { + return err + } + return a.rootCommand.Run(cCtx, arguments...) +} + +// RunAsSubcommand is for legacy/compatibility purposes only. New code should only +// use App.RunContext. This function is slated to be removed in v3. +func (a *App) RunAsSubcommand(ctx *Context) (err error) { + a.Setup() + + cCtx := NewContext(a, nil, ctx) + cCtx.shellComplete = ctx.shellComplete + + a.rootCommand = a.newRootCommand() + cCtx.Command = a.rootCommand + + return a.rootCommand.Run(cCtx, ctx.Args().Slice()...) +} + +func (a *App) suggestFlagFromError(err error, command string) (string, error) { + flag, parseErr := flagFromError(err) + if parseErr != nil { + return "", err + } + + flags := a.Flags + hideHelp := a.HideHelp + if command != "" { + cmd := a.Command(command) + if cmd == nil { + return "", err + } + flags = cmd.Flags + hideHelp = hideHelp || cmd.HideHelp + } + + if SuggestFlag == nil { + return "", err + } + suggestion := SuggestFlag(flags, flag, hideHelp) + if len(suggestion) == 0 { + return "", err + } + + return fmt.Sprintf(SuggestDidYouMeanTemplate+"\n\n", suggestion), nil +} + +// RunAndExitOnError calls .Run() and exits non-zero if an error was returned +// +// Deprecated: instead you should return an error that fulfills cli.ExitCoder +// to cli.App.Run. This will cause the application to exit with the given error +// code in the cli.ExitCoder +func (a *App) RunAndExitOnError() { + if err := a.Run(os.Args); err != nil { + _, _ = fmt.Fprintln(a.ErrWriter, err) + OsExiter(1) + } +} + +// Command returns the named command on App. Returns nil if the command does not exist +func (a *App) Command(name string) *Command { + for _, c := range a.Commands { + if c.HasName(name) { + return c + } + } + + return nil +} + +// VisibleCategories returns a slice of categories and commands that are +// Hidden=false +func (a *App) VisibleCategories() []CommandCategory { + ret := []CommandCategory{} + for _, category := range a.categories.Categories() { + if visible := func() CommandCategory { + if len(category.VisibleCommands()) > 0 { + return category + } + return nil + }(); visible != nil { + ret = append(ret, visible) + } + } + return ret +} + +// VisibleCommands returns a slice of the Commands with Hidden=false +func (a *App) VisibleCommands() []*Command { + var ret []*Command + for _, command := range a.Commands { + if !command.Hidden { + ret = append(ret, command) + } + } + return ret +} + +// VisibleFlagCategories returns a slice containing all the categories with the flags they contain +func (a *App) VisibleFlagCategories() []VisibleFlagCategory { + if a.flagCategories == nil { + return []VisibleFlagCategory{} + } + return a.flagCategories.VisibleCategories() +} + +// VisibleFlags returns a slice of the Flags with Hidden=false +func (a *App) VisibleFlags() []Flag { + return visibleFlags(a.Flags) +} + +func (a *App) appendFlag(fl Flag) { + if !hasFlag(a.Flags, fl) { + a.Flags = append(a.Flags, fl) + } +} + +func (a *App) appendCommand(c *Command) { + if !hasCommand(a.Commands, c) { + a.Commands = append(a.Commands, c) + } +} + +func (a *App) handleExitCoder(cCtx *Context, err error) { + if a.ExitErrHandler != nil { + a.ExitErrHandler(cCtx, err) + } else { + HandleExitCoder(err) + } +} + +func (a *App) argsWithDefaultCommand(oldArgs Args) Args { + if a.DefaultCommand != "" { + rawArgs := append([]string{a.DefaultCommand}, oldArgs.Slice()...) + newArgs := args(rawArgs) + + return &newArgs + } + + return oldArgs +} + +func runFlagActions(c *Context, fs []Flag) error { + for _, f := range fs { + isSet := false + for _, name := range f.Names() { + if c.IsSet(name) { + isSet = true + break + } + } + if isSet { + if af, ok := f.(ActionableFlag); ok { + if err := af.RunAction(c); err != nil { + return err + } + } + } + } + return nil +} + +// Author represents someone who has contributed to a cli project. +type Author struct { + Name string // The Authors name + Email string // The Authors email +} + +// String makes Author comply to the Stringer interface, to allow an easy print in the templating process +func (a *Author) String() string { + e := "" + if a.Email != "" { + e = " <" + a.Email + ">" + } + + return fmt.Sprintf("%v%v", a.Name, e) +} + +// HandleAction attempts to figure out which Action signature was used. If +// it's an ActionFunc or a func with the legacy signature for Action, the func +// is run! +func HandleAction(action interface{}, cCtx *Context) (err error) { + switch a := action.(type) { + case ActionFunc: + return a(cCtx) + case func(*Context) error: + return a(cCtx) + case func(*Context): // deprecated function signature + a(cCtx) + return nil + } + + return errInvalidActionType +} + +func checkStringSliceIncludes(want string, sSlice []string) bool { + found := false + for _, s := range sSlice { + if want == s { + found = true + break + } + } + + return found +} diff --git a/vendor/github.com/urfave/cli/v2/args.go b/vendor/github.com/urfave/cli/v2/args.go new file mode 100644 index 000000000..bd65c17bd --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/args.go @@ -0,0 +1,54 @@ +package cli + +type Args interface { + // Get returns the nth argument, or else a blank string + Get(n int) string + // First returns the first argument, or else a blank string + First() string + // Tail returns the rest of the arguments (not the first one) + // or else an empty string slice + Tail() []string + // Len returns the length of the wrapped slice + Len() int + // Present checks if there are any arguments present + Present() bool + // Slice returns a copy of the internal slice + Slice() []string +} + +type args []string + +func (a *args) Get(n int) string { + if len(*a) > n { + return (*a)[n] + } + return "" +} + +func (a *args) First() string { + return a.Get(0) +} + +func (a *args) Tail() []string { + if a.Len() >= 2 { + tail := []string((*a)[1:]) + ret := make([]string, len(tail)) + copy(ret, tail) + return ret + } + return []string{} +} + +func (a *args) Len() int { + return len(*a) +} + +func (a *args) Present() bool { + return a.Len() != 0 +} + +func (a *args) Slice() []string { + ret := make([]string, len(*a)) + copy(ret, *a) + return ret +} diff --git a/vendor/github.com/urfave/cli/v2/category.go b/vendor/github.com/urfave/cli/v2/category.go new file mode 100644 index 000000000..0986fffca --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/category.go @@ -0,0 +1,186 @@ +package cli + +import "sort" + +// CommandCategories interface allows for category manipulation +type CommandCategories interface { + // AddCommand adds a command to a category, creating a new category if necessary. + AddCommand(category string, command *Command) + // Categories returns a slice of categories sorted by name + Categories() []CommandCategory +} + +type commandCategories []*commandCategory + +func newCommandCategories() CommandCategories { + ret := commandCategories([]*commandCategory{}) + return &ret +} + +func (c *commandCategories) Less(i, j int) bool { + return lexicographicLess((*c)[i].Name(), (*c)[j].Name()) +} + +func (c *commandCategories) Len() int { + return len(*c) +} + +func (c *commandCategories) Swap(i, j int) { + (*c)[i], (*c)[j] = (*c)[j], (*c)[i] +} + +func (c *commandCategories) AddCommand(category string, command *Command) { + for _, commandCategory := range []*commandCategory(*c) { + if commandCategory.name == category { + commandCategory.commands = append(commandCategory.commands, command) + return + } + } + newVal := append(*c, + &commandCategory{name: category, commands: []*Command{command}}) + *c = newVal +} + +func (c *commandCategories) Categories() []CommandCategory { + ret := make([]CommandCategory, len(*c)) + for i, cat := range *c { + ret[i] = cat + } + return ret +} + +// CommandCategory is a category containing commands. +type CommandCategory interface { + // Name returns the category name string + Name() string + // VisibleCommands returns a slice of the Commands with Hidden=false + VisibleCommands() []*Command +} + +type commandCategory struct { + name string + commands []*Command +} + +func (c *commandCategory) Name() string { + return c.name +} + +func (c *commandCategory) VisibleCommands() []*Command { + if c.commands == nil { + c.commands = []*Command{} + } + + var ret []*Command + for _, command := range c.commands { + if !command.Hidden { + ret = append(ret, command) + } + } + return ret +} + +// FlagCategories interface allows for category manipulation +type FlagCategories interface { + // AddFlags adds a flag to a category, creating a new category if necessary. + AddFlag(category string, fl Flag) + // VisibleCategories returns a slice of visible flag categories sorted by name + VisibleCategories() []VisibleFlagCategory +} + +type defaultFlagCategories struct { + m map[string]*defaultVisibleFlagCategory +} + +func newFlagCategories() FlagCategories { + return &defaultFlagCategories{ + m: map[string]*defaultVisibleFlagCategory{}, + } +} + +func newFlagCategoriesFromFlags(fs []Flag) FlagCategories { + fc := newFlagCategories() + + var categorized bool + for _, fl := range fs { + if cf, ok := fl.(CategorizableFlag); ok { + if cat := cf.GetCategory(); cat != "" && cf.IsVisible() { + fc.AddFlag(cat, cf) + categorized = true + } + } + } + + if categorized { + for _, fl := range fs { + if cf, ok := fl.(CategorizableFlag); ok { + if cf.GetCategory() == "" && cf.IsVisible() { + fc.AddFlag("", fl) + } + } + } + } + + return fc +} + +func (f *defaultFlagCategories) AddFlag(category string, fl Flag) { + if _, ok := f.m[category]; !ok { + f.m[category] = &defaultVisibleFlagCategory{name: category, m: map[string]Flag{}} + } + + f.m[category].m[fl.String()] = fl +} + +func (f *defaultFlagCategories) VisibleCategories() []VisibleFlagCategory { + catNames := []string{} + for name := range f.m { + catNames = append(catNames, name) + } + + sort.Strings(catNames) + + ret := make([]VisibleFlagCategory, len(catNames)) + for i, name := range catNames { + ret[i] = f.m[name] + } + + return ret +} + +// VisibleFlagCategory is a category containing flags. +type VisibleFlagCategory interface { + // Name returns the category name string + Name() string + // Flags returns a slice of VisibleFlag sorted by name + Flags() []VisibleFlag +} + +type defaultVisibleFlagCategory struct { + name string + m map[string]Flag +} + +func (fc *defaultVisibleFlagCategory) Name() string { + return fc.name +} + +func (fc *defaultVisibleFlagCategory) Flags() []VisibleFlag { + vfNames := []string{} + for flName, fl := range fc.m { + if vf, ok := fl.(VisibleFlag); ok { + if vf.IsVisible() { + vfNames = append(vfNames, flName) + } + } + } + + sort.Strings(vfNames) + + ret := make([]VisibleFlag, len(vfNames)) + for i, flName := range vfNames { + ret[i] = fc.m[flName].(VisibleFlag) + } + + return ret +} diff --git a/vendor/github.com/urfave/cli/v2/cli.go b/vendor/github.com/urfave/cli/v2/cli.go new file mode 100644 index 000000000..28ad0582b --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/cli.go @@ -0,0 +1,25 @@ +// Package cli provides a minimal framework for creating and organizing command line +// Go applications. cli is designed to be easy to understand and write, the most simple +// cli application can be written as follows: +// +// func main() { +// (&cli.App{}).Run(os.Args) +// } +// +// Of course this application does not do much, so let's make this an actual application: +// +// func main() { +// app := &cli.App{ +// Name: "greet", +// Usage: "say a greeting", +// Action: func(c *cli.Context) error { +// fmt.Println("Greetings") +// return nil +// }, +// } +// +// app.Run(os.Args) +// } +package cli + +//go:generate make -C cmd/urfave-cli-genflags run diff --git a/vendor/github.com/urfave/cli/v2/command.go b/vendor/github.com/urfave/cli/v2/command.go new file mode 100644 index 000000000..472c1ff44 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/command.go @@ -0,0 +1,421 @@ +package cli + +import ( + "flag" + "fmt" + "reflect" + "sort" + "strings" +) + +// Command is a subcommand for a cli.App. +type Command struct { + // The name of the command + Name string + // A list of aliases for the command + Aliases []string + // A short description of the usage of this command + Usage string + // Custom text to show on USAGE section of help + UsageText string + // A longer explanation of how the command works + Description string + // Whether this command supports arguments + Args bool + // A short description of the arguments of this command + ArgsUsage string + // The category the command is part of + Category string + // The function to call when checking for bash command completions + BashComplete BashCompleteFunc + // An action to execute before any sub-subcommands are run, but after the context is ready + // If a non-nil error is returned, no sub-subcommands are run + Before BeforeFunc + // An action to execute after any subcommands are run, but after the subcommand has finished + // It is run even if Action() panics + After AfterFunc + // The function to call when this command is invoked + Action ActionFunc + // Execute this function if a usage error occurs. + OnUsageError OnUsageErrorFunc + // List of child commands + Subcommands []*Command + // List of flags to parse + Flags []Flag + flagCategories FlagCategories + // Treat all flags as normal arguments if true + SkipFlagParsing bool + // Boolean to hide built-in help command and help flag + HideHelp bool + // Boolean to hide built-in help command but keep help flag + // Ignored if HideHelp is true. + HideHelpCommand bool + // Boolean to hide this command from help or completion + Hidden bool + // Boolean to enable short-option handling so user can combine several + // single-character bool arguments into one + // i.e. foobar -o -v -> foobar -ov + UseShortOptionHandling bool + + // Full name of command for help, defaults to full command name, including parent commands. + HelpName string + commandNamePath []string + + // CustomHelpTemplate the text template for the command help topic. + // cli.go uses text/template to render templates. You can + // render custom help text by setting this variable. + CustomHelpTemplate string + + // categories contains the categorized commands and is populated on app startup + categories CommandCategories + + // if this is a root "special" command + isRoot bool + + separator separatorSpec +} + +type Commands []*Command + +type CommandsByName []*Command + +func (c CommandsByName) Len() int { + return len(c) +} + +func (c CommandsByName) Less(i, j int) bool { + return lexicographicLess(c[i].Name, c[j].Name) +} + +func (c CommandsByName) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} + +// FullName returns the full name of the command. +// For subcommands this ensures that parent commands are part of the command path +func (c *Command) FullName() string { + if c.commandNamePath == nil { + return c.Name + } + return strings.Join(c.commandNamePath, " ") +} + +func (cmd *Command) Command(name string) *Command { + for _, c := range cmd.Subcommands { + if c.HasName(name) { + return c + } + } + + return nil +} + +func (c *Command) setup(ctx *Context) { + if c.Command(helpCommand.Name) == nil && !c.HideHelp { + if !c.HideHelpCommand { + c.Subcommands = append(c.Subcommands, helpCommand) + } + } + + if !c.HideHelp && HelpFlag != nil { + // append help to flags + c.appendFlag(HelpFlag) + } + + if ctx.App.UseShortOptionHandling { + c.UseShortOptionHandling = true + } + + c.categories = newCommandCategories() + for _, command := range c.Subcommands { + c.categories.AddCommand(command.Category, command) + } + sort.Sort(c.categories.(*commandCategories)) + + for _, scmd := range c.Subcommands { + if scmd.HelpName == "" { + scmd.HelpName = fmt.Sprintf("%s %s", c.HelpName, scmd.Name) + } + scmd.separator = c.separator + } + + if c.BashComplete == nil { + c.BashComplete = DefaultCompleteWithFlags(c) + } +} + +func (c *Command) Run(cCtx *Context, arguments ...string) (err error) { + + if !c.isRoot { + c.setup(cCtx) + if err := checkDuplicatedCmds(c); err != nil { + return err + } + } + + a := args(arguments) + set, err := c.parseFlags(&a, cCtx.shellComplete) + cCtx.flagSet = set + + if checkCompletions(cCtx) { + return nil + } + + if err != nil { + if c.OnUsageError != nil { + err = c.OnUsageError(cCtx, err, !c.isRoot) + cCtx.App.handleExitCoder(cCtx, err) + return err + } + _, _ = fmt.Fprintf(cCtx.App.Writer, "%s %s\n\n", "Incorrect Usage:", err.Error()) + if cCtx.App.Suggest { + if suggestion, err := c.suggestFlagFromError(err, ""); err == nil { + fmt.Fprintf(cCtx.App.Writer, "%s", suggestion) + } + } + if !c.HideHelp { + if c.isRoot { + _ = ShowAppHelp(cCtx) + } else { + _ = ShowCommandHelp(cCtx.parentContext, c.Name) + } + } + return err + } + + if checkHelp(cCtx) { + return helpCommand.Action(cCtx) + } + + if c.isRoot && !cCtx.App.HideVersion && checkVersion(cCtx) { + ShowVersion(cCtx) + return nil + } + + if c.After != nil && !cCtx.shellComplete { + defer func() { + afterErr := c.After(cCtx) + if afterErr != nil { + cCtx.App.handleExitCoder(cCtx, err) + if err != nil { + err = newMultiError(err, afterErr) + } else { + err = afterErr + } + } + }() + } + + cerr := cCtx.checkRequiredFlags(c.Flags) + if cerr != nil { + _ = helpCommand.Action(cCtx) + return cerr + } + + if c.Before != nil && !cCtx.shellComplete { + beforeErr := c.Before(cCtx) + if beforeErr != nil { + cCtx.App.handleExitCoder(cCtx, beforeErr) + err = beforeErr + return err + } + } + + if err = runFlagActions(cCtx, c.Flags); err != nil { + return err + } + + var cmd *Command + args := cCtx.Args() + if args.Present() { + name := args.First() + cmd = c.Command(name) + if cmd == nil { + hasDefault := cCtx.App.DefaultCommand != "" + isFlagName := checkStringSliceIncludes(name, cCtx.FlagNames()) + + var ( + isDefaultSubcommand = false + defaultHasSubcommands = false + ) + + if hasDefault { + dc := cCtx.App.Command(cCtx.App.DefaultCommand) + defaultHasSubcommands = len(dc.Subcommands) > 0 + for _, dcSub := range dc.Subcommands { + if checkStringSliceIncludes(name, dcSub.Names()) { + isDefaultSubcommand = true + break + } + } + } + + if isFlagName || (hasDefault && (defaultHasSubcommands && isDefaultSubcommand)) { + argsWithDefault := cCtx.App.argsWithDefaultCommand(args) + if !reflect.DeepEqual(args, argsWithDefault) { + cmd = cCtx.App.rootCommand.Command(argsWithDefault.First()) + } + } + } + } else if c.isRoot && cCtx.App.DefaultCommand != "" { + if dc := cCtx.App.Command(cCtx.App.DefaultCommand); dc != c { + cmd = dc + } + } + + if cmd != nil { + newcCtx := NewContext(cCtx.App, nil, cCtx) + newcCtx.Command = cmd + return cmd.Run(newcCtx, cCtx.Args().Slice()...) + } + + if c.Action == nil { + c.Action = helpCommand.Action + } + + err = c.Action(cCtx) + + cCtx.App.handleExitCoder(cCtx, err) + return err +} + +func (c *Command) newFlagSet() (*flag.FlagSet, error) { + return flagSet(c.Name, c.Flags, c.separator) +} + +func (c *Command) useShortOptionHandling() bool { + return c.UseShortOptionHandling +} + +func (c *Command) suggestFlagFromError(err error, command string) (string, error) { + flag, parseErr := flagFromError(err) + if parseErr != nil { + return "", err + } + + flags := c.Flags + hideHelp := c.HideHelp + if command != "" { + cmd := c.Command(command) + if cmd == nil { + return "", err + } + flags = cmd.Flags + hideHelp = hideHelp || cmd.HideHelp + } + + suggestion := SuggestFlag(flags, flag, hideHelp) + if len(suggestion) == 0 { + return "", err + } + + return fmt.Sprintf(SuggestDidYouMeanTemplate, suggestion) + "\n\n", nil +} + +func (c *Command) parseFlags(args Args, shellComplete bool) (*flag.FlagSet, error) { + set, err := c.newFlagSet() + if err != nil { + return nil, err + } + + if c.SkipFlagParsing { + return set, set.Parse(append([]string{"--"}, args.Tail()...)) + } + + err = parseIter(set, c, args.Tail(), shellComplete) + if err != nil { + return nil, err + } + + err = normalizeFlags(c.Flags, set) + if err != nil { + return nil, err + } + + return set, nil +} + +// Names returns the names including short names and aliases. +func (c *Command) Names() []string { + return append([]string{c.Name}, c.Aliases...) +} + +// HasName returns true if Command.Name matches given name +func (c *Command) HasName(name string) bool { + for _, n := range c.Names() { + if n == name { + return true + } + } + return false +} + +// VisibleCategories returns a slice of categories and commands that are +// Hidden=false +func (c *Command) VisibleCategories() []CommandCategory { + ret := []CommandCategory{} + for _, category := range c.categories.Categories() { + if visible := func() CommandCategory { + if len(category.VisibleCommands()) > 0 { + return category + } + return nil + }(); visible != nil { + ret = append(ret, visible) + } + } + return ret +} + +// VisibleCommands returns a slice of the Commands with Hidden=false +func (c *Command) VisibleCommands() []*Command { + var ret []*Command + for _, command := range c.Subcommands { + if !command.Hidden { + ret = append(ret, command) + } + } + return ret +} + +// VisibleFlagCategories returns a slice containing all the visible flag categories with the flags they contain +func (c *Command) VisibleFlagCategories() []VisibleFlagCategory { + if c.flagCategories == nil { + c.flagCategories = newFlagCategoriesFromFlags(c.Flags) + } + return c.flagCategories.VisibleCategories() +} + +// VisibleFlags returns a slice of the Flags with Hidden=false +func (c *Command) VisibleFlags() []Flag { + return visibleFlags(c.Flags) +} + +func (c *Command) appendFlag(fl Flag) { + if !hasFlag(c.Flags, fl) { + c.Flags = append(c.Flags, fl) + } +} + +func hasCommand(commands []*Command, command *Command) bool { + for _, existing := range commands { + if command == existing { + return true + } + } + + return false +} + +func checkDuplicatedCmds(parent *Command) error { + seen := make(map[string]struct{}) + for _, c := range parent.Subcommands { + for _, name := range c.Names() { + if _, exists := seen[name]; exists { + return fmt.Errorf("parent command [%s] has duplicated subcommand name or alias: %s", parent.Name, name) + } + seen[name] = struct{}{} + } + } + return nil +} diff --git a/vendor/github.com/urfave/cli/v2/context.go b/vendor/github.com/urfave/cli/v2/context.go new file mode 100644 index 000000000..8dd476521 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/context.go @@ -0,0 +1,272 @@ +package cli + +import ( + "context" + "flag" + "fmt" + "strings" +) + +// Context is a type that is passed through to +// each Handler action in a cli application. Context +// can be used to retrieve context-specific args and +// parsed command-line options. +type Context struct { + context.Context + App *App + Command *Command + shellComplete bool + flagSet *flag.FlagSet + parentContext *Context +} + +// NewContext creates a new context. For use in when invoking an App or Command action. +func NewContext(app *App, set *flag.FlagSet, parentCtx *Context) *Context { + c := &Context{App: app, flagSet: set, parentContext: parentCtx} + if parentCtx != nil { + c.Context = parentCtx.Context + c.shellComplete = parentCtx.shellComplete + if parentCtx.flagSet == nil { + parentCtx.flagSet = &flag.FlagSet{} + } + } + + c.Command = &Command{} + + if c.Context == nil { + c.Context = context.Background() + } + + return c +} + +// NumFlags returns the number of flags set +func (cCtx *Context) NumFlags() int { + return cCtx.flagSet.NFlag() +} + +// Set sets a context flag to a value. +func (cCtx *Context) Set(name, value string) error { + if fs := cCtx.lookupFlagSet(name); fs != nil { + return fs.Set(name, value) + } + + return fmt.Errorf("no such flag -%s", name) +} + +// IsSet determines if the flag was actually set +func (cCtx *Context) IsSet(name string) bool { + + if fs := cCtx.lookupFlagSet(name); fs != nil { + isSet := false + fs.Visit(func(f *flag.Flag) { + if f.Name == name { + isSet = true + } + }) + if isSet { + return true + } + + f := cCtx.lookupFlag(name) + if f == nil { + return false + } + + if f.IsSet() { + return true + } + + // now redo flagset search on aliases + aliases := f.Names() + fs.Visit(func(f *flag.Flag) { + for _, alias := range aliases { + if f.Name == alias { + isSet = true + } + } + }) + + if isSet { + return true + } + } + + return false +} + +// LocalFlagNames returns a slice of flag names used in this context. +func (cCtx *Context) LocalFlagNames() []string { + var names []string + cCtx.flagSet.Visit(makeFlagNameVisitor(&names)) + // Check the flags which have been set via env or file + if cCtx.Command != nil && cCtx.Command.Flags != nil { + for _, f := range cCtx.Command.Flags { + if f.IsSet() { + names = append(names, f.Names()...) + } + } + } + + // Sort out the duplicates since flag could be set via multiple + // paths + m := map[string]struct{}{} + var unames []string + for _, name := range names { + if _, ok := m[name]; !ok { + m[name] = struct{}{} + unames = append(unames, name) + } + } + + return unames +} + +// FlagNames returns a slice of flag names used by the this context and all of +// its parent contexts. +func (cCtx *Context) FlagNames() []string { + var names []string + for _, pCtx := range cCtx.Lineage() { + names = append(names, pCtx.LocalFlagNames()...) + } + return names +} + +// Lineage returns *this* context and all of its ancestor contexts in order from +// child to parent +func (cCtx *Context) Lineage() []*Context { + var lineage []*Context + + for cur := cCtx; cur != nil; cur = cur.parentContext { + lineage = append(lineage, cur) + } + + return lineage +} + +// Count returns the num of occurrences of this flag +func (cCtx *Context) Count(name string) int { + if fs := cCtx.lookupFlagSet(name); fs != nil { + if cf, ok := fs.Lookup(name).Value.(Countable); ok { + return cf.Count() + } + } + return 0 +} + +// Value returns the value of the flag corresponding to `name` +func (cCtx *Context) Value(name string) interface{} { + if fs := cCtx.lookupFlagSet(name); fs != nil { + return fs.Lookup(name).Value.(flag.Getter).Get() + } + return nil +} + +// Args returns the command line arguments associated with the context. +func (cCtx *Context) Args() Args { + ret := args(cCtx.flagSet.Args()) + return &ret +} + +// NArg returns the number of the command line arguments. +func (cCtx *Context) NArg() int { + return cCtx.Args().Len() +} + +func (cCtx *Context) lookupFlag(name string) Flag { + for _, c := range cCtx.Lineage() { + if c.Command == nil { + continue + } + + for _, f := range c.Command.Flags { + for _, n := range f.Names() { + if n == name { + return f + } + } + } + } + + if cCtx.App != nil { + for _, f := range cCtx.App.Flags { + for _, n := range f.Names() { + if n == name { + return f + } + } + } + } + + return nil +} + +func (cCtx *Context) lookupFlagSet(name string) *flag.FlagSet { + for _, c := range cCtx.Lineage() { + if c.flagSet == nil { + continue + } + if f := c.flagSet.Lookup(name); f != nil { + return c.flagSet + } + } + cCtx.onInvalidFlag(name) + return nil +} + +func (cCtx *Context) checkRequiredFlags(flags []Flag) requiredFlagsErr { + var missingFlags []string + for _, f := range flags { + if rf, ok := f.(RequiredFlag); ok && rf.IsRequired() { + var flagPresent bool + var flagName string + + flagNames := f.Names() + flagName = flagNames[0] + + for _, key := range flagNames { + if cCtx.IsSet(strings.TrimSpace(key)) { + flagPresent = true + } + } + + if !flagPresent && flagName != "" { + missingFlags = append(missingFlags, flagName) + } + } + } + + if len(missingFlags) != 0 { + return &errRequiredFlags{missingFlags: missingFlags} + } + + return nil +} + +func (cCtx *Context) onInvalidFlag(name string) { + for cCtx != nil { + if cCtx.App != nil && cCtx.App.InvalidFlagAccessHandler != nil { + cCtx.App.InvalidFlagAccessHandler(cCtx, name) + break + } + cCtx = cCtx.parentContext + } +} + +func makeFlagNameVisitor(names *[]string) func(*flag.Flag) { + return func(f *flag.Flag) { + nameParts := strings.Split(f.Name, ",") + name := strings.TrimSpace(nameParts[0]) + + for _, part := range nameParts { + part = strings.TrimSpace(part) + if len(part) > len(name) { + name = part + } + } + + if name != "" { + *names = append(*names, name) + } + } +} diff --git a/vendor/github.com/urfave/cli/v2/docs.go b/vendor/github.com/urfave/cli/v2/docs.go new file mode 100644 index 000000000..6cd0624ae --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/docs.go @@ -0,0 +1,203 @@ +//go:build !urfave_cli_no_docs +// +build !urfave_cli_no_docs + +package cli + +import ( + "bytes" + "fmt" + "io" + "sort" + "strings" + "text/template" + + "github.com/cpuguy83/go-md2man/v2/md2man" +) + +// ToMarkdown creates a markdown string for the `*App` +// The function errors if either parsing or writing of the string fails. +func (a *App) ToMarkdown() (string, error) { + var w bytes.Buffer + if err := a.writeDocTemplate(&w, 0); err != nil { + return "", err + } + return w.String(), nil +} + +// ToMan creates a man page string with section number for the `*App` +// The function errors if either parsing or writing of the string fails. +func (a *App) ToManWithSection(sectionNumber int) (string, error) { + var w bytes.Buffer + if err := a.writeDocTemplate(&w, sectionNumber); err != nil { + return "", err + } + man := md2man.Render(w.Bytes()) + return string(man), nil +} + +// ToMan creates a man page string for the `*App` +// The function errors if either parsing or writing of the string fails. +func (a *App) ToMan() (string, error) { + man, err := a.ToManWithSection(8) + return man, err +} + +type cliTemplate struct { + App *App + SectionNum int + Commands []string + GlobalArgs []string + SynopsisArgs []string +} + +func (a *App) writeDocTemplate(w io.Writer, sectionNum int) error { + const name = "cli" + t, err := template.New(name).Parse(MarkdownDocTemplate) + if err != nil { + return err + } + return t.ExecuteTemplate(w, name, &cliTemplate{ + App: a, + SectionNum: sectionNum, + Commands: prepareCommands(a.Commands, 0), + GlobalArgs: prepareArgsWithValues(a.VisibleFlags()), + SynopsisArgs: prepareArgsSynopsis(a.VisibleFlags()), + }) +} + +func prepareCommands(commands []*Command, level int) []string { + var coms []string + for _, command := range commands { + if command.Hidden { + continue + } + + usageText := prepareUsageText(command) + + usage := prepareUsage(command, usageText) + + prepared := fmt.Sprintf("%s %s\n\n%s%s", + strings.Repeat("#", level+2), + strings.Join(command.Names(), ", "), + usage, + usageText, + ) + + flags := prepareArgsWithValues(command.VisibleFlags()) + if len(flags) > 0 { + prepared += fmt.Sprintf("\n%s", strings.Join(flags, "\n")) + } + + coms = append(coms, prepared) + + // recursively iterate subcommands + if len(command.Subcommands) > 0 { + coms = append( + coms, + prepareCommands(command.Subcommands, level+1)..., + ) + } + } + + return coms +} + +func prepareArgsWithValues(flags []Flag) []string { + return prepareFlags(flags, ", ", "**", "**", `""`, true) +} + +func prepareArgsSynopsis(flags []Flag) []string { + return prepareFlags(flags, "|", "[", "]", "[value]", false) +} + +func prepareFlags( + flags []Flag, + sep, opener, closer, value string, + addDetails bool, +) []string { + args := []string{} + for _, f := range flags { + flag, ok := f.(DocGenerationFlag) + if !ok { + continue + } + modifiedArg := opener + + for _, s := range flag.Names() { + trimmed := strings.TrimSpace(s) + if len(modifiedArg) > len(opener) { + modifiedArg += sep + } + if len(trimmed) > 1 { + modifiedArg += fmt.Sprintf("--%s", trimmed) + } else { + modifiedArg += fmt.Sprintf("-%s", trimmed) + } + } + modifiedArg += closer + if flag.TakesValue() { + modifiedArg += fmt.Sprintf("=%s", value) + } + + if addDetails { + modifiedArg += flagDetails(flag) + } + + args = append(args, modifiedArg+"\n") + + } + sort.Strings(args) + return args +} + +// flagDetails returns a string containing the flags metadata +func flagDetails(flag DocGenerationFlag) string { + description := flag.GetUsage() + if flag.TakesValue() { + defaultText := flag.GetDefaultText() + if defaultText == "" { + defaultText = flag.GetValue() + } + if defaultText != "" { + description += " (default: " + defaultText + ")" + } + } + return ": " + description +} + +func prepareUsageText(command *Command) string { + if command.UsageText == "" { + return "" + } + + // Remove leading and trailing newlines + preparedUsageText := strings.Trim(command.UsageText, "\n") + + var usageText string + if strings.Contains(preparedUsageText, "\n") { + // Format multi-line string as a code block using the 4 space schema to allow for embedded markdown such + // that it will not break the continuous code block. + for _, ln := range strings.Split(preparedUsageText, "\n") { + usageText += fmt.Sprintf(" %s\n", ln) + } + } else { + // Style a single line as a note + usageText = fmt.Sprintf(">%s\n", preparedUsageText) + } + + return usageText +} + +func prepareUsage(command *Command, usageText string) string { + if command.Usage == "" { + return "" + } + + usage := command.Usage + "\n" + // Add a newline to the Usage IFF there is a UsageText + if usageText != "" { + usage += "\n" + } + + return usage +} diff --git a/vendor/github.com/urfave/cli/v2/errors.go b/vendor/github.com/urfave/cli/v2/errors.go new file mode 100644 index 000000000..a818727db --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/errors.go @@ -0,0 +1,178 @@ +package cli + +import ( + "fmt" + "io" + "os" + "strings" +) + +// OsExiter is the function used when the app exits. If not set defaults to os.Exit. +var OsExiter = os.Exit + +// ErrWriter is used to write errors to the user. This can be anything +// implementing the io.Writer interface and defaults to os.Stderr. +var ErrWriter io.Writer = os.Stderr + +// MultiError is an error that wraps multiple errors. +type MultiError interface { + error + Errors() []error +} + +// newMultiError creates a new MultiError. Pass in one or more errors. +func newMultiError(err ...error) MultiError { + ret := multiError(err) + return &ret +} + +type multiError []error + +// Error implements the error interface. +func (m *multiError) Error() string { + errs := make([]string, len(*m)) + for i, err := range *m { + errs[i] = err.Error() + } + + return strings.Join(errs, "\n") +} + +// Errors returns a copy of the errors slice +func (m *multiError) Errors() []error { + errs := make([]error, len(*m)) + for _, err := range *m { + errs = append(errs, err) + } + return errs +} + +type requiredFlagsErr interface { + error + getMissingFlags() []string +} + +type errRequiredFlags struct { + missingFlags []string +} + +func (e *errRequiredFlags) Error() string { + numberOfMissingFlags := len(e.missingFlags) + if numberOfMissingFlags == 1 { + return fmt.Sprintf("Required flag %q not set", e.missingFlags[0]) + } + joinedMissingFlags := strings.Join(e.missingFlags, ", ") + return fmt.Sprintf("Required flags %q not set", joinedMissingFlags) +} + +func (e *errRequiredFlags) getMissingFlags() []string { + return e.missingFlags +} + +// ErrorFormatter is the interface that will suitably format the error output +type ErrorFormatter interface { + Format(s fmt.State, verb rune) +} + +// ExitCoder is the interface checked by `App` and `Command` for a custom exit +// code +type ExitCoder interface { + error + ExitCode() int +} + +type exitError struct { + exitCode int + err error +} + +// NewExitError calls Exit to create a new ExitCoder. +// +// Deprecated: This function is a duplicate of Exit and will eventually be removed. +func NewExitError(message interface{}, exitCode int) ExitCoder { + return Exit(message, exitCode) +} + +// Exit wraps a message and exit code into an error, which by default is +// handled with a call to os.Exit during default error handling. +// +// This is the simplest way to trigger a non-zero exit code for an App without +// having to call os.Exit manually. During testing, this behavior can be avoided +// by overriding the ExitErrHandler function on an App or the package-global +// OsExiter function. +func Exit(message interface{}, exitCode int) ExitCoder { + var err error + + switch e := message.(type) { + case ErrorFormatter: + err = fmt.Errorf("%+v", message) + case error: + err = e + default: + err = fmt.Errorf("%+v", message) + } + + return &exitError{ + err: err, + exitCode: exitCode, + } +} + +func (ee *exitError) Error() string { + return ee.err.Error() +} + +func (ee *exitError) ExitCode() int { + return ee.exitCode +} + +func (ee *exitError) Unwrap() error { + return ee.err +} + +// HandleExitCoder handles errors implementing ExitCoder by printing their +// message and calling OsExiter with the given exit code. +// +// If the given error instead implements MultiError, each error will be checked +// for the ExitCoder interface, and OsExiter will be called with the last exit +// code found, or exit code 1 if no ExitCoder is found. +// +// This function is the default error-handling behavior for an App. +func HandleExitCoder(err error) { + if err == nil { + return + } + + if exitErr, ok := err.(ExitCoder); ok { + if err.Error() != "" { + if _, ok := exitErr.(ErrorFormatter); ok { + _, _ = fmt.Fprintf(ErrWriter, "%+v\n", err) + } else { + _, _ = fmt.Fprintln(ErrWriter, err) + } + } + OsExiter(exitErr.ExitCode()) + return + } + + if multiErr, ok := err.(MultiError); ok { + code := handleMultiError(multiErr) + OsExiter(code) + return + } +} + +func handleMultiError(multiErr MultiError) int { + code := 1 + for _, merr := range multiErr.Errors() { + if multiErr2, ok := merr.(MultiError); ok { + code = handleMultiError(multiErr2) + } else if merr != nil { + fmt.Fprintln(ErrWriter, merr) + if exitErr, ok := merr.(ExitCoder); ok { + code = exitErr.ExitCode() + } + } + } + return code +} diff --git a/vendor/github.com/urfave/cli/v2/fish.go b/vendor/github.com/urfave/cli/v2/fish.go new file mode 100644 index 000000000..909dfc5a2 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/fish.go @@ -0,0 +1,196 @@ +package cli + +import ( + "bytes" + "fmt" + "io" + "strings" + "text/template" +) + +// ToFishCompletion creates a fish completion string for the `*App` +// The function errors if either parsing or writing of the string fails. +func (a *App) ToFishCompletion() (string, error) { + var w bytes.Buffer + if err := a.writeFishCompletionTemplate(&w); err != nil { + return "", err + } + return w.String(), nil +} + +type fishCompletionTemplate struct { + App *App + Completions []string + AllCommands []string +} + +func (a *App) writeFishCompletionTemplate(w io.Writer) error { + const name = "cli" + t, err := template.New(name).Parse(FishCompletionTemplate) + if err != nil { + return err + } + allCommands := []string{} + + // Add global flags + completions := a.prepareFishFlags(a.VisibleFlags(), allCommands) + + // Add help flag + if !a.HideHelp { + completions = append( + completions, + a.prepareFishFlags([]Flag{HelpFlag}, allCommands)..., + ) + } + + // Add version flag + if !a.HideVersion { + completions = append( + completions, + a.prepareFishFlags([]Flag{VersionFlag}, allCommands)..., + ) + } + + // Add commands and their flags + completions = append( + completions, + a.prepareFishCommands(a.VisibleCommands(), &allCommands, []string{})..., + ) + + return t.ExecuteTemplate(w, name, &fishCompletionTemplate{ + App: a, + Completions: completions, + AllCommands: allCommands, + }) +} + +func (a *App) prepareFishCommands(commands []*Command, allCommands *[]string, previousCommands []string) []string { + completions := []string{} + for _, command := range commands { + if command.Hidden { + continue + } + + var completion strings.Builder + completion.WriteString(fmt.Sprintf( + "complete -r -c %s -n '%s' -a '%s'", + a.Name, + a.fishSubcommandHelper(previousCommands), + strings.Join(command.Names(), " "), + )) + + if command.Usage != "" { + completion.WriteString(fmt.Sprintf(" -d '%s'", + escapeSingleQuotes(command.Usage))) + } + + if !command.HideHelp { + completions = append( + completions, + a.prepareFishFlags([]Flag{HelpFlag}, command.Names())..., + ) + } + + *allCommands = append(*allCommands, command.Names()...) + completions = append(completions, completion.String()) + completions = append( + completions, + a.prepareFishFlags(command.VisibleFlags(), command.Names())..., + ) + + // recursively iterate subcommands + if len(command.Subcommands) > 0 { + completions = append( + completions, + a.prepareFishCommands( + command.Subcommands, allCommands, command.Names(), + )..., + ) + } + } + + return completions +} + +func (a *App) prepareFishFlags(flags []Flag, previousCommands []string) []string { + completions := []string{} + for _, f := range flags { + flag, ok := f.(DocGenerationFlag) + if !ok { + continue + } + + completion := &strings.Builder{} + completion.WriteString(fmt.Sprintf( + "complete -c %s -n '%s'", + a.Name, + a.fishSubcommandHelper(previousCommands), + )) + + fishAddFileFlag(f, completion) + + for idx, opt := range flag.Names() { + if idx == 0 { + completion.WriteString(fmt.Sprintf( + " -l %s", strings.TrimSpace(opt), + )) + } else { + completion.WriteString(fmt.Sprintf( + " -s %s", strings.TrimSpace(opt), + )) + + } + } + + if flag.TakesValue() { + completion.WriteString(" -r") + } + + if flag.GetUsage() != "" { + completion.WriteString(fmt.Sprintf(" -d '%s'", + escapeSingleQuotes(flag.GetUsage()))) + } + + completions = append(completions, completion.String()) + } + + return completions +} + +func fishAddFileFlag(flag Flag, completion *strings.Builder) { + switch f := flag.(type) { + case *GenericFlag: + if f.TakesFile { + return + } + case *StringFlag: + if f.TakesFile { + return + } + case *StringSliceFlag: + if f.TakesFile { + return + } + case *PathFlag: + if f.TakesFile { + return + } + } + completion.WriteString(" -f") +} + +func (a *App) fishSubcommandHelper(allCommands []string) string { + fishHelper := fmt.Sprintf("__fish_%s_no_subcommand", a.Name) + if len(allCommands) > 0 { + fishHelper = fmt.Sprintf( + "__fish_seen_subcommand_from %s", + strings.Join(allCommands, " "), + ) + } + return fishHelper + +} + +func escapeSingleQuotes(input string) string { + return strings.Replace(input, `'`, `\'`, -1) +} diff --git a/vendor/github.com/urfave/cli/v2/flag-spec.yaml b/vendor/github.com/urfave/cli/v2/flag-spec.yaml new file mode 100644 index 000000000..03d82e701 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag-spec.yaml @@ -0,0 +1,131 @@ +# NOTE: this file is used by the tool defined in +# ./cmd/urfave-cli-genflags/main.go which uses the +# `Spec` type that maps to this file structure. +flag_types: + bool: + struct_fields: + - name: Count + type: int + pointer: true + - name: DisableDefaultText + type: bool + - name: Action + type: "func(*Context, bool) error" + float64: + struct_fields: + - name: Action + type: "func(*Context, float64) error" + Float64Slice: + value_pointer: true + skip_interfaces: + - fmt.Stringer + struct_fields: + - name: separator + type: separatorSpec + - name: Action + type: "func(*Context, []float64) error" + int: + struct_fields: + - name: Base + type: int + - name: Action + type: "func(*Context, int) error" + IntSlice: + value_pointer: true + skip_interfaces: + - fmt.Stringer + struct_fields: + - name: separator + type: separatorSpec + - name: Action + type: "func(*Context, []int) error" + int64: + struct_fields: + - name: Base + type: int + - name: Action + type: "func(*Context, int64) error" + Int64Slice: + value_pointer: true + skip_interfaces: + - fmt.Stringer + struct_fields: + - name: separator + type: separatorSpec + - name: Action + type: "func(*Context, []int64) error" + uint: + struct_fields: + - name: Base + type: int + - name: Action + type: "func(*Context, uint) error" + UintSlice: + value_pointer: true + skip_interfaces: + - fmt.Stringer + struct_fields: + - name: separator + type: separatorSpec + - name: Action + type: "func(*Context, []uint) error" + uint64: + struct_fields: + - name: Base + type: int + - name: Action + type: "func(*Context, uint64) error" + Uint64Slice: + value_pointer: true + skip_interfaces: + - fmt.Stringer + struct_fields: + - name: separator + type: separatorSpec + - name: Action + type: "func(*Context, []uint64) error" + string: + struct_fields: + - name: TakesFile + type: bool + - name: Action + type: "func(*Context, string) error" + StringSlice: + value_pointer: true + skip_interfaces: + - fmt.Stringer + struct_fields: + - name: separator + type: separatorSpec + - name: TakesFile + type: bool + - name: Action + type: "func(*Context, []string) error" + - name: KeepSpace + type: bool + time.Duration: + struct_fields: + - name: Action + type: "func(*Context, time.Duration) error" + Timestamp: + value_pointer: true + struct_fields: + - name: Layout + type: string + - name: Timezone + type: "*time.Location" + - name: Action + type: "func(*Context, *time.Time) error" + Generic: + no_destination_pointer: true + struct_fields: + - name: TakesFile + type: bool + - name: Action + type: "func(*Context, interface{}) error" + Path: + struct_fields: + - name: TakesFile + type: bool + - name: Action + type: "func(*Context, Path) error" diff --git a/vendor/github.com/urfave/cli/v2/flag.go b/vendor/github.com/urfave/cli/v2/flag.go new file mode 100644 index 000000000..4d04de3da --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag.go @@ -0,0 +1,419 @@ +package cli + +import ( + "errors" + "flag" + "fmt" + "io" + "os" + "regexp" + "runtime" + "strings" + "syscall" + "time" +) + +const defaultPlaceholder = "value" + +const ( + defaultSliceFlagSeparator = "," + disableSliceFlagSeparator = false +) + +var ( + slPfx = fmt.Sprintf("sl:::%d:::", time.Now().UTC().UnixNano()) + + commaWhitespace = regexp.MustCompile("[, ]+.*") +) + +// BashCompletionFlag enables bash-completion for all commands and subcommands +var BashCompletionFlag Flag = &BoolFlag{ + Name: "generate-bash-completion", + Hidden: true, +} + +// VersionFlag prints the version for the application +var VersionFlag Flag = &BoolFlag{ + Name: "version", + Aliases: []string{"v"}, + Usage: "print the version", + DisableDefaultText: true, +} + +// HelpFlag prints the help for all commands and subcommands. +// Set to nil to disable the flag. The subcommand +// will still be added unless HideHelp or HideHelpCommand is set to true. +var HelpFlag Flag = &BoolFlag{ + Name: "help", + Aliases: []string{"h"}, + Usage: "show help", + DisableDefaultText: true, +} + +// FlagStringer converts a flag definition to a string. This is used by help +// to display a flag. +var FlagStringer FlagStringFunc = stringifyFlag + +// Serializer is used to circumvent the limitations of flag.FlagSet.Set +type Serializer interface { + Serialize() string +} + +// FlagNamePrefixer converts a full flag name and its placeholder into the help +// message flag prefix. This is used by the default FlagStringer. +var FlagNamePrefixer FlagNamePrefixFunc = prefixedNames + +// FlagEnvHinter annotates flag help message with the environment variable +// details. This is used by the default FlagStringer. +var FlagEnvHinter FlagEnvHintFunc = withEnvHint + +// FlagFileHinter annotates flag help message with the environment variable +// details. This is used by the default FlagStringer. +var FlagFileHinter FlagFileHintFunc = withFileHint + +// FlagsByName is a slice of Flag. +type FlagsByName []Flag + +func (f FlagsByName) Len() int { + return len(f) +} + +func (f FlagsByName) Less(i, j int) bool { + if len(f[j].Names()) == 0 { + return false + } else if len(f[i].Names()) == 0 { + return true + } + return lexicographicLess(f[i].Names()[0], f[j].Names()[0]) +} + +func (f FlagsByName) Swap(i, j int) { + f[i], f[j] = f[j], f[i] +} + +// ActionableFlag is an interface that wraps Flag interface and RunAction operation. +type ActionableFlag interface { + Flag + RunAction(*Context) error +} + +// Flag is a common interface related to parsing flags in cli. +// For more advanced flag parsing techniques, it is recommended that +// this interface be implemented. +type Flag interface { + fmt.Stringer + // Apply Flag settings to the given flag set + Apply(*flag.FlagSet) error + Names() []string + IsSet() bool +} + +// RequiredFlag is an interface that allows us to mark flags as required +// it allows flags required flags to be backwards compatible with the Flag interface +type RequiredFlag interface { + Flag + + IsRequired() bool +} + +// DocGenerationFlag is an interface that allows documentation generation for the flag +type DocGenerationFlag interface { + Flag + + // TakesValue returns true if the flag takes a value, otherwise false + TakesValue() bool + + // GetUsage returns the usage string for the flag + GetUsage() string + + // GetValue returns the flags value as string representation and an empty + // string if the flag takes no value at all. + GetValue() string + + // GetDefaultText returns the default text for this flag + GetDefaultText() string + + // GetEnvVars returns the env vars for this flag + GetEnvVars() []string +} + +// DocGenerationSliceFlag extends DocGenerationFlag for slice-based flags. +type DocGenerationSliceFlag interface { + DocGenerationFlag + + // IsSliceFlag returns true for flags that can be given multiple times. + IsSliceFlag() bool +} + +// VisibleFlag is an interface that allows to check if a flag is visible +type VisibleFlag interface { + Flag + + // IsVisible returns true if the flag is not hidden, otherwise false + IsVisible() bool +} + +// CategorizableFlag is an interface that allows us to potentially +// use a flag in a categorized representation. +type CategorizableFlag interface { + VisibleFlag + + GetCategory() string +} + +// Countable is an interface to enable detection of flag values which support +// repetitive flags +type Countable interface { + Count() int +} + +func flagSet(name string, flags []Flag, spec separatorSpec) (*flag.FlagSet, error) { + set := flag.NewFlagSet(name, flag.ContinueOnError) + + for _, f := range flags { + if c, ok := f.(customizedSeparator); ok { + c.WithSeparatorSpec(spec) + } + if err := f.Apply(set); err != nil { + return nil, err + } + } + set.SetOutput(io.Discard) + return set, nil +} + +func copyFlag(name string, ff *flag.Flag, set *flag.FlagSet) { + switch ff.Value.(type) { + case Serializer: + _ = set.Set(name, ff.Value.(Serializer).Serialize()) + default: + _ = set.Set(name, ff.Value.String()) + } +} + +func normalizeFlags(flags []Flag, set *flag.FlagSet) error { + visited := make(map[string]bool) + set.Visit(func(f *flag.Flag) { + visited[f.Name] = true + }) + for _, f := range flags { + parts := f.Names() + if len(parts) == 1 { + continue + } + var ff *flag.Flag + for _, name := range parts { + name = strings.Trim(name, " ") + if visited[name] { + if ff != nil { + return errors.New("Cannot use two forms of the same flag: " + name + " " + ff.Name) + } + ff = set.Lookup(name) + } + } + if ff == nil { + continue + } + for _, name := range parts { + name = strings.Trim(name, " ") + if !visited[name] { + copyFlag(name, ff, set) + } + } + } + return nil +} + +func visibleFlags(fl []Flag) []Flag { + var visible []Flag + for _, f := range fl { + if vf, ok := f.(VisibleFlag); ok && vf.IsVisible() { + visible = append(visible, f) + } + } + return visible +} + +func prefixFor(name string) (prefix string) { + if len(name) == 1 { + prefix = "-" + } else { + prefix = "--" + } + + return +} + +// Returns the placeholder, if any, and the unquoted usage string. +func unquoteUsage(usage string) (string, string) { + for i := 0; i < len(usage); i++ { + if usage[i] == '`' { + for j := i + 1; j < len(usage); j++ { + if usage[j] == '`' { + name := usage[i+1 : j] + usage = usage[:i] + name + usage[j+1:] + return name, usage + } + } + break + } + } + return "", usage +} + +func prefixedNames(names []string, placeholder string) string { + var prefixed string + for i, name := range names { + if name == "" { + continue + } + + prefixed += prefixFor(name) + name + if placeholder != "" { + prefixed += " " + placeholder + } + if i < len(names)-1 { + prefixed += ", " + } + } + return prefixed +} + +func envFormat(envVars []string, prefix, sep, suffix string) string { + if len(envVars) > 0 { + return fmt.Sprintf(" [%s%s%s]", prefix, strings.Join(envVars, sep), suffix) + } + return "" +} + +func defaultEnvFormat(envVars []string) string { + return envFormat(envVars, "$", ", $", "") +} + +func withEnvHint(envVars []string, str string) string { + envText := "" + if runtime.GOOS != "windows" || os.Getenv("PSHOME") != "" { + envText = defaultEnvFormat(envVars) + } else { + envText = envFormat(envVars, "%", "%, %", "%") + } + return str + envText +} + +func FlagNames(name string, aliases []string) []string { + var ret []string + + for _, part := range append([]string{name}, aliases...) { + // v1 -> v2 migration warning zone: + // Strip off anything after the first found comma or space, which + // *hopefully* makes it a tiny bit more obvious that unexpected behavior is + // caused by using the v1 form of stringly typed "Name". + ret = append(ret, commaWhitespace.ReplaceAllString(part, "")) + } + + return ret +} + +func withFileHint(filePath, str string) string { + fileText := "" + if filePath != "" { + fileText = fmt.Sprintf(" [%s]", filePath) + } + return str + fileText +} + +func formatDefault(format string) string { + return " (default: " + format + ")" +} + +func stringifyFlag(f Flag) string { + // enforce DocGeneration interface on flags to avoid reflection + df, ok := f.(DocGenerationFlag) + if !ok { + return "" + } + + placeholder, usage := unquoteUsage(df.GetUsage()) + needsPlaceholder := df.TakesValue() + + if needsPlaceholder && placeholder == "" { + placeholder = defaultPlaceholder + } + + defaultValueString := "" + + // set default text for all flags except bool flags + // for bool flags display default text if DisableDefaultText is not + // set + if bf, ok := f.(*BoolFlag); !ok || !bf.DisableDefaultText { + if s := df.GetDefaultText(); s != "" { + defaultValueString = fmt.Sprintf(formatDefault("%s"), s) + } + } + + usageWithDefault := strings.TrimSpace(usage + defaultValueString) + + pn := prefixedNames(df.Names(), placeholder) + sliceFlag, ok := f.(DocGenerationSliceFlag) + if ok && sliceFlag.IsSliceFlag() { + pn = pn + " [ " + pn + " ]" + } + + return withEnvHint(df.GetEnvVars(), fmt.Sprintf("%s\t%s", pn, usageWithDefault)) +} + +func hasFlag(flags []Flag, fl Flag) bool { + for _, existing := range flags { + if fl == existing { + return true + } + } + + return false +} + +// Return the first value from a list of environment variables and files +// (which may or may not exist), a description of where the value was found, +// and a boolean which is true if a value was found. +func flagFromEnvOrFile(envVars []string, filePath string) (value string, fromWhere string, found bool) { + for _, envVar := range envVars { + envVar = strings.TrimSpace(envVar) + if value, found := syscall.Getenv(envVar); found { + return value, fmt.Sprintf("environment variable %q", envVar), true + } + } + for _, fileVar := range strings.Split(filePath, ",") { + if fileVar != "" { + if data, err := os.ReadFile(fileVar); err == nil { + return string(data), fmt.Sprintf("file %q", filePath), true + } + } + } + return "", "", false +} + +type customizedSeparator interface { + WithSeparatorSpec(separatorSpec) +} + +type separatorSpec struct { + sep string + disabled bool + customized bool +} + +func (s separatorSpec) flagSplitMultiValues(val string) []string { + var ( + disabled bool = s.disabled + sep string = s.sep + ) + if !s.customized { + disabled = disableSliceFlagSeparator + sep = defaultSliceFlagSeparator + } + if disabled { + return []string{val} + } + + return strings.Split(val, sep) +} diff --git a/vendor/github.com/urfave/cli/v2/flag_bool.go b/vendor/github.com/urfave/cli/v2/flag_bool.go new file mode 100644 index 000000000..01862ea76 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_bool.go @@ -0,0 +1,178 @@ +package cli + +import ( + "errors" + "flag" + "fmt" + "strconv" +) + +// boolValue needs to implement the boolFlag internal interface in flag +// to be able to capture bool fields and values +// +// type boolFlag interface { +// Value +// IsBoolFlag() bool +// } +type boolValue struct { + destination *bool + count *int +} + +func newBoolValue(val bool, p *bool, count *int) *boolValue { + *p = val + return &boolValue{ + destination: p, + count: count, + } +} + +func (b *boolValue) Set(s string) error { + v, err := strconv.ParseBool(s) + if err != nil { + err = errors.New("parse error") + return err + } + *b.destination = v + if b.count != nil { + *b.count = *b.count + 1 + } + return err +} + +func (b *boolValue) Get() interface{} { return *b.destination } + +func (b *boolValue) String() string { + if b.destination != nil { + return strconv.FormatBool(*b.destination) + } + return strconv.FormatBool(false) +} + +func (b *boolValue) IsBoolFlag() bool { return true } + +func (b *boolValue) Count() int { + if b.count != nil && *b.count > 0 { + return *b.count + } + return 0 +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *BoolFlag) TakesValue() bool { + return false +} + +// GetUsage returns the usage string for the flag +func (f *BoolFlag) GetUsage() string { + return f.Usage +} + +// GetCategory returns the category for the flag +func (f *BoolFlag) GetCategory() string { + return f.Category +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *BoolFlag) GetValue() string { + return "" +} + +// GetDefaultText returns the default text for this flag +func (f *BoolFlag) GetDefaultText() string { + if f.DefaultText != "" { + return f.DefaultText + } + if f.defaultValueSet { + return fmt.Sprintf("%v", f.defaultValue) + } + return fmt.Sprintf("%v", f.Value) +} + +// GetEnvVars returns the env vars for this flag +func (f *BoolFlag) GetEnvVars() []string { + return f.EnvVars +} + +// RunAction executes flag action if set +func (f *BoolFlag) RunAction(c *Context) error { + if f.Action != nil { + return f.Action(c, c.Bool(f.Name)) + } + + return nil +} + +// Apply populates the flag given the flag set and environment +func (f *BoolFlag) Apply(set *flag.FlagSet) error { + // set default value so that environment wont be able to overwrite it + f.defaultValue = f.Value + f.defaultValueSet = true + + if val, source, found := flagFromEnvOrFile(f.EnvVars, f.FilePath); found { + if val != "" { + valBool, err := strconv.ParseBool(val) + + if err != nil { + return fmt.Errorf("could not parse %q as bool value from %s for flag %s: %s", val, source, f.Name, err) + } + + f.Value = valBool + } else { + // empty value implies that the env is defined but set to empty string, we have to assume that this is + // what the user wants. If user doesnt want this then the env needs to be deleted or the flag removed from + // file + f.Value = false + } + f.HasBeenSet = true + } + + count := f.Count + dest := f.Destination + + if count == nil { + count = new(int) + } + + // since count will be incremented for each alias as well + // subtract number of aliases from overall count + *count -= len(f.Aliases) + + if dest == nil { + dest = new(bool) + } + + for _, name := range f.Names() { + value := newBoolValue(f.Value, dest, count) + set.Var(value, name, f.Usage) + } + + return nil +} + +// Get returns the flag’s value in the given Context. +func (f *BoolFlag) Get(ctx *Context) bool { + return ctx.Bool(f.Name) +} + +// Bool looks up the value of a local BoolFlag, returns +// false if not found +func (cCtx *Context) Bool(name string) bool { + if fs := cCtx.lookupFlagSet(name); fs != nil { + return lookupBool(name, fs) + } + return false +} + +func lookupBool(name string, set *flag.FlagSet) bool { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseBool(f.Value.String()) + if err != nil { + return false + } + return parsed + } + return false +} diff --git a/vendor/github.com/urfave/cli/v2/flag_duration.go b/vendor/github.com/urfave/cli/v2/flag_duration.go new file mode 100644 index 000000000..e600cc30a --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_duration.go @@ -0,0 +1,108 @@ +package cli + +import ( + "flag" + "fmt" + "time" +) + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *DurationFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *DurationFlag) GetUsage() string { + return f.Usage +} + +// GetCategory returns the category for the flag +func (f *DurationFlag) GetCategory() string { + return f.Category +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *DurationFlag) GetValue() string { + return f.Value.String() +} + +// GetDefaultText returns the default text for this flag +func (f *DurationFlag) GetDefaultText() string { + if f.DefaultText != "" { + return f.DefaultText + } + if f.defaultValueSet { + return f.defaultValue.String() + } + return f.Value.String() +} + +// GetEnvVars returns the env vars for this flag +func (f *DurationFlag) GetEnvVars() []string { + return f.EnvVars +} + +// Apply populates the flag given the flag set and environment +func (f *DurationFlag) Apply(set *flag.FlagSet) error { + // set default value so that environment wont be able to overwrite it + f.defaultValue = f.Value + f.defaultValueSet = true + + if val, source, found := flagFromEnvOrFile(f.EnvVars, f.FilePath); found { + if val != "" { + valDuration, err := time.ParseDuration(val) + + if err != nil { + return fmt.Errorf("could not parse %q as duration value from %s for flag %s: %s", val, source, f.Name, err) + } + + f.Value = valDuration + f.HasBeenSet = true + } + } + + for _, name := range f.Names() { + if f.Destination != nil { + set.DurationVar(f.Destination, name, f.Value, f.Usage) + continue + } + set.Duration(name, f.Value, f.Usage) + } + return nil +} + +// Get returns the flag’s value in the given Context. +func (f *DurationFlag) Get(ctx *Context) time.Duration { + return ctx.Duration(f.Name) +} + +// RunAction executes flag action if set +func (f *DurationFlag) RunAction(c *Context) error { + if f.Action != nil { + return f.Action(c, c.Duration(f.Name)) + } + + return nil +} + +// Duration looks up the value of a local DurationFlag, returns +// 0 if not found +func (cCtx *Context) Duration(name string) time.Duration { + if fs := cCtx.lookupFlagSet(name); fs != nil { + return lookupDuration(name, fs) + } + return 0 +} + +func lookupDuration(name string, set *flag.FlagSet) time.Duration { + f := set.Lookup(name) + if f != nil { + parsed, err := time.ParseDuration(f.Value.String()) + if err != nil { + return 0 + } + return parsed + } + return 0 +} diff --git a/vendor/github.com/urfave/cli/v2/flag_ext.go b/vendor/github.com/urfave/cli/v2/flag_ext.go new file mode 100644 index 000000000..64da59ea9 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_ext.go @@ -0,0 +1,48 @@ +package cli + +import "flag" + +type extFlag struct { + f *flag.Flag +} + +func (e *extFlag) Apply(fs *flag.FlagSet) error { + fs.Var(e.f.Value, e.f.Name, e.f.Usage) + return nil +} + +func (e *extFlag) Names() []string { + return []string{e.f.Name} +} + +func (e *extFlag) IsSet() bool { + return false +} + +func (e *extFlag) String() string { + return FlagStringer(e) +} + +func (e *extFlag) IsVisible() bool { + return true +} + +func (e *extFlag) TakesValue() bool { + return false +} + +func (e *extFlag) GetUsage() string { + return e.f.Usage +} + +func (e *extFlag) GetValue() string { + return e.f.Value.String() +} + +func (e *extFlag) GetDefaultText() string { + return e.f.DefValue +} + +func (e *extFlag) GetEnvVars() []string { + return nil +} diff --git a/vendor/github.com/urfave/cli/v2/flag_float64.go b/vendor/github.com/urfave/cli/v2/flag_float64.go new file mode 100644 index 000000000..6a4de5c88 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_float64.go @@ -0,0 +1,107 @@ +package cli + +import ( + "flag" + "fmt" + "strconv" +) + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *Float64Flag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *Float64Flag) GetUsage() string { + return f.Usage +} + +// GetCategory returns the category for the flag +func (f *Float64Flag) GetCategory() string { + return f.Category +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *Float64Flag) GetValue() string { + return fmt.Sprintf("%v", f.Value) +} + +// GetDefaultText returns the default text for this flag +func (f *Float64Flag) GetDefaultText() string { + if f.DefaultText != "" { + return f.DefaultText + } + if f.defaultValueSet { + return fmt.Sprintf("%v", f.defaultValue) + } + return fmt.Sprintf("%v", f.Value) +} + +// GetEnvVars returns the env vars for this flag +func (f *Float64Flag) GetEnvVars() []string { + return f.EnvVars +} + +// Apply populates the flag given the flag set and environment +func (f *Float64Flag) Apply(set *flag.FlagSet) error { + f.defaultValue = f.Value + f.defaultValueSet = true + + if val, source, found := flagFromEnvOrFile(f.EnvVars, f.FilePath); found { + if val != "" { + valFloat, err := strconv.ParseFloat(val, 64) + if err != nil { + return fmt.Errorf("could not parse %q as float64 value from %s for flag %s: %s", val, source, f.Name, err) + } + + f.Value = valFloat + f.HasBeenSet = true + } + } + + for _, name := range f.Names() { + if f.Destination != nil { + set.Float64Var(f.Destination, name, f.Value, f.Usage) + continue + } + set.Float64(name, f.Value, f.Usage) + } + + return nil +} + +// Get returns the flag’s value in the given Context. +func (f *Float64Flag) Get(ctx *Context) float64 { + return ctx.Float64(f.Name) +} + +// RunAction executes flag action if set +func (f *Float64Flag) RunAction(c *Context) error { + if f.Action != nil { + return f.Action(c, c.Float64(f.Name)) + } + + return nil +} + +// Float64 looks up the value of a local Float64Flag, returns +// 0 if not found +func (cCtx *Context) Float64(name string) float64 { + if fs := cCtx.lookupFlagSet(name); fs != nil { + return lookupFloat64(name, fs) + } + return 0 +} + +func lookupFloat64(name string, set *flag.FlagSet) float64 { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseFloat(f.Value.String(), 64) + if err != nil { + return 0 + } + return parsed + } + return 0 +} diff --git a/vendor/github.com/urfave/cli/v2/flag_float64_slice.go b/vendor/github.com/urfave/cli/v2/flag_float64_slice.go new file mode 100644 index 000000000..0bc4612c8 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_float64_slice.go @@ -0,0 +1,216 @@ +package cli + +import ( + "encoding/json" + "flag" + "fmt" + "strconv" + "strings" +) + +// Float64Slice wraps []float64 to satisfy flag.Value +type Float64Slice struct { + slice []float64 + separator separatorSpec + hasBeenSet bool +} + +// NewFloat64Slice makes a *Float64Slice with default values +func NewFloat64Slice(defaults ...float64) *Float64Slice { + return &Float64Slice{slice: append([]float64{}, defaults...)} +} + +// clone allocate a copy of self object +func (f *Float64Slice) clone() *Float64Slice { + n := &Float64Slice{ + slice: make([]float64, len(f.slice)), + hasBeenSet: f.hasBeenSet, + } + copy(n.slice, f.slice) + return n +} + +func (f *Float64Slice) WithSeparatorSpec(spec separatorSpec) { + f.separator = spec +} + +// Set parses the value into a float64 and appends it to the list of values +func (f *Float64Slice) Set(value string) error { + if !f.hasBeenSet { + f.slice = []float64{} + f.hasBeenSet = true + } + + if strings.HasPrefix(value, slPfx) { + // Deserializing assumes overwrite + _ = json.Unmarshal([]byte(strings.Replace(value, slPfx, "", 1)), &f.slice) + f.hasBeenSet = true + return nil + } + + for _, s := range f.separator.flagSplitMultiValues(value) { + tmp, err := strconv.ParseFloat(strings.TrimSpace(s), 64) + if err != nil { + return err + } + + f.slice = append(f.slice, tmp) + } + return nil +} + +// String returns a readable representation of this value (for usage defaults) +func (f *Float64Slice) String() string { + v := f.slice + if v == nil { + // treat nil the same as zero length non-nil + v = make([]float64, 0) + } + return fmt.Sprintf("%#v", v) +} + +// Serialize allows Float64Slice to fulfill Serializer +func (f *Float64Slice) Serialize() string { + jsonBytes, _ := json.Marshal(f.slice) + return fmt.Sprintf("%s%s", slPfx, string(jsonBytes)) +} + +// Value returns the slice of float64s set by this flag +func (f *Float64Slice) Value() []float64 { + return f.slice +} + +// Get returns the slice of float64s set by this flag +func (f *Float64Slice) Get() interface{} { + return *f +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f *Float64SliceFlag) String() string { + return FlagStringer(f) +} + +// TakesValue returns true if the flag takes a value, otherwise false +func (f *Float64SliceFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *Float64SliceFlag) GetUsage() string { + return f.Usage +} + +// GetCategory returns the category for the flag +func (f *Float64SliceFlag) GetCategory() string { + return f.Category +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *Float64SliceFlag) GetValue() string { + var defaultVals []string + if f.Value != nil && len(f.Value.Value()) > 0 { + for _, i := range f.Value.Value() { + defaultVals = append(defaultVals, strings.TrimRight(strings.TrimRight(fmt.Sprintf("%f", i), "0"), ".")) + } + } + return strings.Join(defaultVals, ", ") +} + +// GetDefaultText returns the default text for this flag +func (f *Float64SliceFlag) GetDefaultText() string { + if f.DefaultText != "" { + return f.DefaultText + } + return f.GetValue() +} + +// GetEnvVars returns the env vars for this flag +func (f *Float64SliceFlag) GetEnvVars() []string { + return f.EnvVars +} + +// IsSliceFlag implements DocGenerationSliceFlag. +func (f *Float64SliceFlag) IsSliceFlag() bool { + return true +} + +// Apply populates the flag given the flag set and environment +func (f *Float64SliceFlag) Apply(set *flag.FlagSet) error { + // apply any default + if f.Destination != nil && f.Value != nil { + f.Destination.slice = make([]float64, len(f.Value.slice)) + copy(f.Destination.slice, f.Value.slice) + } + + // resolve setValue (what we will assign to the set) + var setValue *Float64Slice + switch { + case f.Destination != nil: + setValue = f.Destination + case f.Value != nil: + setValue = f.Value.clone() + default: + setValue = new(Float64Slice) + setValue.WithSeparatorSpec(f.separator) + } + + if val, source, found := flagFromEnvOrFile(f.EnvVars, f.FilePath); found { + if val != "" { + for _, s := range f.separator.flagSplitMultiValues(val) { + if err := setValue.Set(strings.TrimSpace(s)); err != nil { + return fmt.Errorf("could not parse %q as float64 slice value from %s for flag %s: %s", val, source, f.Name, err) + } + } + + // Set this to false so that we reset the slice if we then set values from + // flags that have already been set by the environment. + setValue.hasBeenSet = false + f.HasBeenSet = true + } + } + + for _, name := range f.Names() { + set.Var(setValue, name, f.Usage) + } + + return nil +} + +func (f *Float64SliceFlag) WithSeparatorSpec(spec separatorSpec) { + f.separator = spec +} + +// Get returns the flag’s value in the given Context. +func (f *Float64SliceFlag) Get(ctx *Context) []float64 { + return ctx.Float64Slice(f.Name) +} + +// RunAction executes flag action if set +func (f *Float64SliceFlag) RunAction(c *Context) error { + if f.Action != nil { + return f.Action(c, c.Float64Slice(f.Name)) + } + + return nil +} + +// Float64Slice looks up the value of a local Float64SliceFlag, returns +// nil if not found +func (cCtx *Context) Float64Slice(name string) []float64 { + if fs := cCtx.lookupFlagSet(name); fs != nil { + return lookupFloat64Slice(name, fs) + } + return nil +} + +func lookupFloat64Slice(name string, set *flag.FlagSet) []float64 { + f := set.Lookup(name) + if f != nil { + if slice, ok := unwrapFlagValue(f.Value).(*Float64Slice); ok { + return slice.Value() + } + } + return nil +} diff --git a/vendor/github.com/urfave/cli/v2/flag_generic.go b/vendor/github.com/urfave/cli/v2/flag_generic.go new file mode 100644 index 000000000..7528c934c --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_generic.go @@ -0,0 +1,131 @@ +package cli + +import ( + "flag" + "fmt" +) + +// Generic is a generic parseable type identified by a specific flag +type Generic interface { + Set(value string) error + String() string +} + +type stringGeneric struct { + value string +} + +func (s *stringGeneric) Set(value string) error { + s.value = value + return nil +} + +func (s *stringGeneric) String() string { + return s.value +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *GenericFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *GenericFlag) GetUsage() string { + return f.Usage +} + +// GetCategory returns the category for the flag +func (f *GenericFlag) GetCategory() string { + return f.Category +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *GenericFlag) GetValue() string { + if f.Value != nil { + return f.Value.String() + } + return "" +} + +// GetDefaultText returns the default text for this flag +func (f *GenericFlag) GetDefaultText() string { + if f.DefaultText != "" { + return f.DefaultText + } + val := f.Value + if f.defaultValueSet { + val = f.defaultValue + } + + if val != nil { + return val.String() + } + + return "" +} + +// GetEnvVars returns the env vars for this flag +func (f *GenericFlag) GetEnvVars() []string { + return f.EnvVars +} + +// Apply takes the flagset and calls Set on the generic flag with the value +// provided by the user for parsing by the flag +func (f *GenericFlag) Apply(set *flag.FlagSet) error { + // set default value so that environment wont be able to overwrite it + if f.Value != nil { + f.defaultValue = &stringGeneric{value: f.Value.String()} + f.defaultValueSet = true + } + + if val, source, found := flagFromEnvOrFile(f.EnvVars, f.FilePath); found { + if val != "" { + if err := f.Value.Set(val); err != nil { + return fmt.Errorf("could not parse %q from %s as value for flag %s: %s", val, source, f.Name, err) + } + + f.HasBeenSet = true + } + } + + for _, name := range f.Names() { + if f.Destination != nil { + set.Var(f.Destination, name, f.Usage) + continue + } + set.Var(f.Value, name, f.Usage) + } + + return nil +} + +// Get returns the flag’s value in the given Context. +func (f *GenericFlag) Get(ctx *Context) interface{} { + return ctx.Generic(f.Name) +} + +// RunAction executes flag action if set +func (f *GenericFlag) RunAction(c *Context) error { + if f.Action != nil { + return f.Action(c, c.Generic(f.Name)) + } + + return nil +} + +// Generic looks up the value of a local GenericFlag, returns +// nil if not found +func (cCtx *Context) Generic(name string) interface{} { + if fs := cCtx.lookupFlagSet(name); fs != nil { + return lookupGeneric(name, fs) + } + return nil +} + +func lookupGeneric(name string, set *flag.FlagSet) interface{} { + if f := set.Lookup(name); f != nil { + return f.Value + } + return nil +} diff --git a/vendor/github.com/urfave/cli/v2/flag_int.go b/vendor/github.com/urfave/cli/v2/flag_int.go new file mode 100644 index 000000000..750e7ebfc --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_int.go @@ -0,0 +1,109 @@ +package cli + +import ( + "flag" + "fmt" + "strconv" +) + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *IntFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *IntFlag) GetUsage() string { + return f.Usage +} + +// GetCategory returns the category for the flag +func (f *IntFlag) GetCategory() string { + return f.Category +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *IntFlag) GetValue() string { + return fmt.Sprintf("%d", f.Value) +} + +// GetDefaultText returns the default text for this flag +func (f *IntFlag) GetDefaultText() string { + if f.DefaultText != "" { + return f.DefaultText + } + if f.defaultValueSet { + return fmt.Sprintf("%d", f.defaultValue) + } + return fmt.Sprintf("%d", f.Value) +} + +// GetEnvVars returns the env vars for this flag +func (f *IntFlag) GetEnvVars() []string { + return f.EnvVars +} + +// Apply populates the flag given the flag set and environment +func (f *IntFlag) Apply(set *flag.FlagSet) error { + // set default value so that environment wont be able to overwrite it + f.defaultValue = f.Value + f.defaultValueSet = true + + if val, source, found := flagFromEnvOrFile(f.EnvVars, f.FilePath); found { + if val != "" { + valInt, err := strconv.ParseInt(val, f.Base, 64) + + if err != nil { + return fmt.Errorf("could not parse %q as int value from %s for flag %s: %s", val, source, f.Name, err) + } + + f.Value = int(valInt) + f.HasBeenSet = true + } + } + + for _, name := range f.Names() { + if f.Destination != nil { + set.IntVar(f.Destination, name, f.Value, f.Usage) + continue + } + set.Int(name, f.Value, f.Usage) + } + + return nil +} + +// Get returns the flag’s value in the given Context. +func (f *IntFlag) Get(ctx *Context) int { + return ctx.Int(f.Name) +} + +// RunAction executes flag action if set +func (f *IntFlag) RunAction(c *Context) error { + if f.Action != nil { + return f.Action(c, c.Int(f.Name)) + } + + return nil +} + +// Int looks up the value of a local IntFlag, returns +// 0 if not found +func (cCtx *Context) Int(name string) int { + if fs := cCtx.lookupFlagSet(name); fs != nil { + return lookupInt(name, fs) + } + return 0 +} + +func lookupInt(name string, set *flag.FlagSet) int { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseInt(f.Value.String(), 0, 64) + if err != nil { + return 0 + } + return int(parsed) + } + return 0 +} diff --git a/vendor/github.com/urfave/cli/v2/flag_int64.go b/vendor/github.com/urfave/cli/v2/flag_int64.go new file mode 100644 index 000000000..688c26716 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_int64.go @@ -0,0 +1,108 @@ +package cli + +import ( + "flag" + "fmt" + "strconv" +) + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *Int64Flag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *Int64Flag) GetUsage() string { + return f.Usage +} + +// GetCategory returns the category for the flag +func (f *Int64Flag) GetCategory() string { + return f.Category +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *Int64Flag) GetValue() string { + return fmt.Sprintf("%d", f.Value) +} + +// GetDefaultText returns the default text for this flag +func (f *Int64Flag) GetDefaultText() string { + if f.DefaultText != "" { + return f.DefaultText + } + if f.defaultValueSet { + return fmt.Sprintf("%d", f.defaultValue) + } + return fmt.Sprintf("%d", f.Value) +} + +// GetEnvVars returns the env vars for this flag +func (f *Int64Flag) GetEnvVars() []string { + return f.EnvVars +} + +// Apply populates the flag given the flag set and environment +func (f *Int64Flag) Apply(set *flag.FlagSet) error { + // set default value so that environment wont be able to overwrite it + f.defaultValue = f.Value + f.defaultValueSet = true + + if val, source, found := flagFromEnvOrFile(f.EnvVars, f.FilePath); found { + if val != "" { + valInt, err := strconv.ParseInt(val, f.Base, 64) + + if err != nil { + return fmt.Errorf("could not parse %q as int value from %s for flag %s: %s", val, source, f.Name, err) + } + + f.Value = valInt + f.HasBeenSet = true + } + } + + for _, name := range f.Names() { + if f.Destination != nil { + set.Int64Var(f.Destination, name, f.Value, f.Usage) + continue + } + set.Int64(name, f.Value, f.Usage) + } + return nil +} + +// Get returns the flag’s value in the given Context. +func (f *Int64Flag) Get(ctx *Context) int64 { + return ctx.Int64(f.Name) +} + +// RunAction executes flag action if set +func (f *Int64Flag) RunAction(c *Context) error { + if f.Action != nil { + return f.Action(c, c.Int64(f.Name)) + } + + return nil +} + +// Int64 looks up the value of a local Int64Flag, returns +// 0 if not found +func (cCtx *Context) Int64(name string) int64 { + if fs := cCtx.lookupFlagSet(name); fs != nil { + return lookupInt64(name, fs) + } + return 0 +} + +func lookupInt64(name string, set *flag.FlagSet) int64 { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseInt(f.Value.String(), 0, 64) + if err != nil { + return 0 + } + return parsed + } + return 0 +} diff --git a/vendor/github.com/urfave/cli/v2/flag_int64_slice.go b/vendor/github.com/urfave/cli/v2/flag_int64_slice.go new file mode 100644 index 000000000..d45c2dd44 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_int64_slice.go @@ -0,0 +1,215 @@ +package cli + +import ( + "encoding/json" + "flag" + "fmt" + "strconv" + "strings" +) + +// Int64Slice wraps []int64 to satisfy flag.Value +type Int64Slice struct { + slice []int64 + separator separatorSpec + hasBeenSet bool +} + +// NewInt64Slice makes an *Int64Slice with default values +func NewInt64Slice(defaults ...int64) *Int64Slice { + return &Int64Slice{slice: append([]int64{}, defaults...)} +} + +// clone allocate a copy of self object +func (i *Int64Slice) clone() *Int64Slice { + n := &Int64Slice{ + slice: make([]int64, len(i.slice)), + hasBeenSet: i.hasBeenSet, + } + copy(n.slice, i.slice) + return n +} + +func (i *Int64Slice) WithSeparatorSpec(spec separatorSpec) { + i.separator = spec +} + +// Set parses the value into an integer and appends it to the list of values +func (i *Int64Slice) Set(value string) error { + if !i.hasBeenSet { + i.slice = []int64{} + i.hasBeenSet = true + } + + if strings.HasPrefix(value, slPfx) { + // Deserializing assumes overwrite + _ = json.Unmarshal([]byte(strings.Replace(value, slPfx, "", 1)), &i.slice) + i.hasBeenSet = true + return nil + } + + for _, s := range i.separator.flagSplitMultiValues(value) { + tmp, err := strconv.ParseInt(strings.TrimSpace(s), 0, 64) + if err != nil { + return err + } + + i.slice = append(i.slice, tmp) + } + + return nil +} + +// String returns a readable representation of this value (for usage defaults) +func (i *Int64Slice) String() string { + v := i.slice + if v == nil { + // treat nil the same as zero length non-nil + v = make([]int64, 0) + } + return fmt.Sprintf("%#v", v) +} + +// Serialize allows Int64Slice to fulfill Serializer +func (i *Int64Slice) Serialize() string { + jsonBytes, _ := json.Marshal(i.slice) + return fmt.Sprintf("%s%s", slPfx, string(jsonBytes)) +} + +// Value returns the slice of ints set by this flag +func (i *Int64Slice) Value() []int64 { + return i.slice +} + +// Get returns the slice of ints set by this flag +func (i *Int64Slice) Get() interface{} { + return *i +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f *Int64SliceFlag) String() string { + return FlagStringer(f) +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *Int64SliceFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *Int64SliceFlag) GetUsage() string { + return f.Usage +} + +// GetCategory returns the category for the flag +func (f *Int64SliceFlag) GetCategory() string { + return f.Category +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *Int64SliceFlag) GetValue() string { + var defaultVals []string + if f.Value != nil && len(f.Value.Value()) > 0 { + for _, i := range f.Value.Value() { + defaultVals = append(defaultVals, strconv.FormatInt(i, 10)) + } + } + return strings.Join(defaultVals, ", ") +} + +// GetDefaultText returns the default text for this flag +func (f *Int64SliceFlag) GetDefaultText() string { + if f.DefaultText != "" { + return f.DefaultText + } + return f.GetValue() +} + +// GetEnvVars returns the env vars for this flag +func (f *Int64SliceFlag) GetEnvVars() []string { + return f.EnvVars +} + +// IsSliceFlag implements DocGenerationSliceFlag. +func (f *Int64SliceFlag) IsSliceFlag() bool { + return true +} + +// Apply populates the flag given the flag set and environment +func (f *Int64SliceFlag) Apply(set *flag.FlagSet) error { + // apply any default + if f.Destination != nil && f.Value != nil { + f.Destination.slice = make([]int64, len(f.Value.slice)) + copy(f.Destination.slice, f.Value.slice) + } + + // resolve setValue (what we will assign to the set) + var setValue *Int64Slice + switch { + case f.Destination != nil: + setValue = f.Destination + case f.Value != nil: + setValue = f.Value.clone() + default: + setValue = new(Int64Slice) + setValue.WithSeparatorSpec(f.separator) + } + + if val, source, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok && val != "" { + for _, s := range f.separator.flagSplitMultiValues(val) { + if err := setValue.Set(strings.TrimSpace(s)); err != nil { + return fmt.Errorf("could not parse %q as int64 slice value from %s for flag %s: %s", val, source, f.Name, err) + } + } + + // Set this to false so that we reset the slice if we then set values from + // flags that have already been set by the environment. + setValue.hasBeenSet = false + f.HasBeenSet = true + } + + for _, name := range f.Names() { + set.Var(setValue, name, f.Usage) + } + + return nil +} + +func (f *Int64SliceFlag) WithSeparatorSpec(spec separatorSpec) { + f.separator = spec +} + +// Get returns the flag’s value in the given Context. +func (f *Int64SliceFlag) Get(ctx *Context) []int64 { + return ctx.Int64Slice(f.Name) +} + +// RunAction executes flag action if set +func (f *Int64SliceFlag) RunAction(c *Context) error { + if f.Action != nil { + return f.Action(c, c.Int64Slice(f.Name)) + } + + return nil +} + +// Int64Slice looks up the value of a local Int64SliceFlag, returns +// nil if not found +func (cCtx *Context) Int64Slice(name string) []int64 { + if fs := cCtx.lookupFlagSet(name); fs != nil { + return lookupInt64Slice(name, fs) + } + return nil +} + +func lookupInt64Slice(name string, set *flag.FlagSet) []int64 { + f := set.Lookup(name) + if f != nil { + if slice, ok := unwrapFlagValue(f.Value).(*Int64Slice); ok { + return slice.Value() + } + } + return nil +} diff --git a/vendor/github.com/urfave/cli/v2/flag_int_slice.go b/vendor/github.com/urfave/cli/v2/flag_int_slice.go new file mode 100644 index 000000000..da9c09bc7 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_int_slice.go @@ -0,0 +1,226 @@ +package cli + +import ( + "encoding/json" + "flag" + "fmt" + "strconv" + "strings" +) + +// IntSlice wraps []int to satisfy flag.Value +type IntSlice struct { + slice []int + separator separatorSpec + hasBeenSet bool +} + +// NewIntSlice makes an *IntSlice with default values +func NewIntSlice(defaults ...int) *IntSlice { + return &IntSlice{slice: append([]int{}, defaults...)} +} + +// clone allocate a copy of self object +func (i *IntSlice) clone() *IntSlice { + n := &IntSlice{ + slice: make([]int, len(i.slice)), + hasBeenSet: i.hasBeenSet, + } + copy(n.slice, i.slice) + return n +} + +// TODO: Consistently have specific Set function for Int64 and Float64 ? +// SetInt directly adds an integer to the list of values +func (i *IntSlice) SetInt(value int) { + if !i.hasBeenSet { + i.slice = []int{} + i.hasBeenSet = true + } + + i.slice = append(i.slice, value) +} + +func (i *IntSlice) WithSeparatorSpec(spec separatorSpec) { + i.separator = spec +} + +// Set parses the value into an integer and appends it to the list of values +func (i *IntSlice) Set(value string) error { + if !i.hasBeenSet { + i.slice = []int{} + i.hasBeenSet = true + } + + if strings.HasPrefix(value, slPfx) { + // Deserializing assumes overwrite + _ = json.Unmarshal([]byte(strings.Replace(value, slPfx, "", 1)), &i.slice) + i.hasBeenSet = true + return nil + } + + for _, s := range i.separator.flagSplitMultiValues(value) { + tmp, err := strconv.ParseInt(strings.TrimSpace(s), 0, 64) + if err != nil { + return err + } + + i.slice = append(i.slice, int(tmp)) + } + + return nil +} + +// String returns a readable representation of this value (for usage defaults) +func (i *IntSlice) String() string { + v := i.slice + if v == nil { + // treat nil the same as zero length non-nil + v = make([]int, 0) + } + return fmt.Sprintf("%#v", v) +} + +// Serialize allows IntSlice to fulfill Serializer +func (i *IntSlice) Serialize() string { + jsonBytes, _ := json.Marshal(i.slice) + return fmt.Sprintf("%s%s", slPfx, string(jsonBytes)) +} + +// Value returns the slice of ints set by this flag +func (i *IntSlice) Value() []int { + return i.slice +} + +// Get returns the slice of ints set by this flag +func (i *IntSlice) Get() interface{} { + return *i +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f *IntSliceFlag) String() string { + return FlagStringer(f) +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *IntSliceFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *IntSliceFlag) GetUsage() string { + return f.Usage +} + +// GetCategory returns the category for the flag +func (f *IntSliceFlag) GetCategory() string { + return f.Category +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *IntSliceFlag) GetValue() string { + var defaultVals []string + if f.Value != nil && len(f.Value.Value()) > 0 { + for _, i := range f.Value.Value() { + defaultVals = append(defaultVals, strconv.Itoa(i)) + } + } + return strings.Join(defaultVals, ", ") +} + +// GetDefaultText returns the default text for this flag +func (f *IntSliceFlag) GetDefaultText() string { + if f.DefaultText != "" { + return f.DefaultText + } + return f.GetValue() +} + +// GetEnvVars returns the env vars for this flag +func (f *IntSliceFlag) GetEnvVars() []string { + return f.EnvVars +} + +// IsSliceFlag implements DocGenerationSliceFlag. +func (f *IntSliceFlag) IsSliceFlag() bool { + return true +} + +// Apply populates the flag given the flag set and environment +func (f *IntSliceFlag) Apply(set *flag.FlagSet) error { + // apply any default + if f.Destination != nil && f.Value != nil { + f.Destination.slice = make([]int, len(f.Value.slice)) + copy(f.Destination.slice, f.Value.slice) + } + + // resolve setValue (what we will assign to the set) + var setValue *IntSlice + switch { + case f.Destination != nil: + setValue = f.Destination + case f.Value != nil: + setValue = f.Value.clone() + default: + setValue = new(IntSlice) + setValue.WithSeparatorSpec(f.separator) + } + + if val, source, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok && val != "" { + for _, s := range f.separator.flagSplitMultiValues(val) { + if err := setValue.Set(strings.TrimSpace(s)); err != nil { + return fmt.Errorf("could not parse %q as int slice value from %s for flag %s: %s", val, source, f.Name, err) + } + } + + // Set this to false so that we reset the slice if we then set values from + // flags that have already been set by the environment. + setValue.hasBeenSet = false + f.HasBeenSet = true + } + + for _, name := range f.Names() { + set.Var(setValue, name, f.Usage) + } + + return nil +} + +func (f *IntSliceFlag) WithSeparatorSpec(spec separatorSpec) { + f.separator = spec +} + +// Get returns the flag’s value in the given Context. +func (f *IntSliceFlag) Get(ctx *Context) []int { + return ctx.IntSlice(f.Name) +} + +// RunAction executes flag action if set +func (f *IntSliceFlag) RunAction(c *Context) error { + if f.Action != nil { + return f.Action(c, c.IntSlice(f.Name)) + } + + return nil +} + +// IntSlice looks up the value of a local IntSliceFlag, returns +// nil if not found +func (cCtx *Context) IntSlice(name string) []int { + if fs := cCtx.lookupFlagSet(name); fs != nil { + return lookupIntSlice(name, fs) + } + return nil +} + +func lookupIntSlice(name string, set *flag.FlagSet) []int { + f := set.Lookup(name) + if f != nil { + if slice, ok := unwrapFlagValue(f.Value).(*IntSlice); ok { + return slice.Value() + } + } + return nil +} diff --git a/vendor/github.com/urfave/cli/v2/flag_path.go b/vendor/github.com/urfave/cli/v2/flag_path.go new file mode 100644 index 000000000..76cb35248 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_path.go @@ -0,0 +1,102 @@ +package cli + +import ( + "flag" + "fmt" +) + +type Path = string + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *PathFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *PathFlag) GetUsage() string { + return f.Usage +} + +// GetCategory returns the category for the flag +func (f *PathFlag) GetCategory() string { + return f.Category +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *PathFlag) GetValue() string { + return f.Value +} + +// GetDefaultText returns the default text for this flag +func (f *PathFlag) GetDefaultText() string { + if f.DefaultText != "" { + return f.DefaultText + } + val := f.Value + if f.defaultValueSet { + val = f.defaultValue + } + if val == "" { + return val + } + return fmt.Sprintf("%q", val) +} + +// GetEnvVars returns the env vars for this flag +func (f *PathFlag) GetEnvVars() []string { + return f.EnvVars +} + +// Apply populates the flag given the flag set and environment +func (f *PathFlag) Apply(set *flag.FlagSet) error { + // set default value so that environment wont be able to overwrite it + f.defaultValue = f.Value + f.defaultValueSet = true + + if val, _, found := flagFromEnvOrFile(f.EnvVars, f.FilePath); found { + f.Value = val + f.HasBeenSet = true + } + + for _, name := range f.Names() { + if f.Destination != nil { + set.StringVar(f.Destination, name, f.Value, f.Usage) + continue + } + set.String(name, f.Value, f.Usage) + } + + return nil +} + +// Get returns the flag’s value in the given Context. +func (f *PathFlag) Get(ctx *Context) string { + return ctx.Path(f.Name) +} + +// RunAction executes flag action if set +func (f *PathFlag) RunAction(c *Context) error { + if f.Action != nil { + return f.Action(c, c.Path(f.Name)) + } + + return nil +} + +// Path looks up the value of a local PathFlag, returns +// "" if not found +func (cCtx *Context) Path(name string) string { + if fs := cCtx.lookupFlagSet(name); fs != nil { + return lookupPath(name, fs) + } + + return "" +} + +func lookupPath(name string, set *flag.FlagSet) string { + if f := set.Lookup(name); f != nil { + return f.Value.String() + } + return "" +} diff --git a/vendor/github.com/urfave/cli/v2/flag_string.go b/vendor/github.com/urfave/cli/v2/flag_string.go new file mode 100644 index 000000000..0f73e0621 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_string.go @@ -0,0 +1,100 @@ +package cli + +import ( + "flag" + "fmt" +) + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *StringFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *StringFlag) GetUsage() string { + return f.Usage +} + +// GetCategory returns the category for the flag +func (f *StringFlag) GetCategory() string { + return f.Category +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *StringFlag) GetValue() string { + return f.Value +} + +// GetDefaultText returns the default text for this flag +func (f *StringFlag) GetDefaultText() string { + if f.DefaultText != "" { + return f.DefaultText + } + val := f.Value + if f.defaultValueSet { + val = f.defaultValue + } + + if val == "" { + return val + } + return fmt.Sprintf("%q", val) +} + +// GetEnvVars returns the env vars for this flag +func (f *StringFlag) GetEnvVars() []string { + return f.EnvVars +} + +// Apply populates the flag given the flag set and environment +func (f *StringFlag) Apply(set *flag.FlagSet) error { + // set default value so that environment wont be able to overwrite it + f.defaultValue = f.Value + f.defaultValueSet = true + + if val, _, found := flagFromEnvOrFile(f.EnvVars, f.FilePath); found { + f.Value = val + f.HasBeenSet = true + } + + for _, name := range f.Names() { + if f.Destination != nil { + set.StringVar(f.Destination, name, f.Value, f.Usage) + continue + } + set.String(name, f.Value, f.Usage) + } + + return nil +} + +// Get returns the flag’s value in the given Context. +func (f *StringFlag) Get(ctx *Context) string { + return ctx.String(f.Name) +} + +// RunAction executes flag action if set +func (f *StringFlag) RunAction(c *Context) error { + if f.Action != nil { + return f.Action(c, c.String(f.Name)) + } + + return nil +} + +// String looks up the value of a local StringFlag, returns +// "" if not found +func (cCtx *Context) String(name string) string { + if fs := cCtx.lookupFlagSet(name); fs != nil { + return lookupString(name, fs) + } + return "" +} + +func lookupString(name string, set *flag.FlagSet) string { + if f := set.Lookup(name); f != nil { + return f.Value.String() + } + return "" +} diff --git a/vendor/github.com/urfave/cli/v2/flag_string_slice.go b/vendor/github.com/urfave/cli/v2/flag_string_slice.go new file mode 100644 index 000000000..66bdf1afc --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_string_slice.go @@ -0,0 +1,216 @@ +package cli + +import ( + "encoding/json" + "flag" + "fmt" + "strconv" + "strings" +) + +// StringSlice wraps a []string to satisfy flag.Value +type StringSlice struct { + slice []string + separator separatorSpec + hasBeenSet bool + keepSpace bool +} + +// NewStringSlice creates a *StringSlice with default values +func NewStringSlice(defaults ...string) *StringSlice { + return &StringSlice{slice: append([]string{}, defaults...)} +} + +// clone allocate a copy of self object +func (s *StringSlice) clone() *StringSlice { + n := &StringSlice{ + slice: make([]string, len(s.slice)), + hasBeenSet: s.hasBeenSet, + } + copy(n.slice, s.slice) + return n +} + +// Set appends the string value to the list of values +func (s *StringSlice) Set(value string) error { + if !s.hasBeenSet { + s.slice = []string{} + s.hasBeenSet = true + } + + if strings.HasPrefix(value, slPfx) { + // Deserializing assumes overwrite + _ = json.Unmarshal([]byte(strings.Replace(value, slPfx, "", 1)), &s.slice) + s.hasBeenSet = true + return nil + } + + for _, t := range s.separator.flagSplitMultiValues(value) { + if !s.keepSpace { + t = strings.TrimSpace(t) + } + s.slice = append(s.slice, t) + } + + return nil +} + +func (s *StringSlice) WithSeparatorSpec(spec separatorSpec) { + s.separator = spec +} + +// String returns a readable representation of this value (for usage defaults) +func (s *StringSlice) String() string { + return fmt.Sprintf("%s", s.slice) +} + +// Serialize allows StringSlice to fulfill Serializer +func (s *StringSlice) Serialize() string { + jsonBytes, _ := json.Marshal(s.slice) + return fmt.Sprintf("%s%s", slPfx, string(jsonBytes)) +} + +// Value returns the slice of strings set by this flag +func (s *StringSlice) Value() []string { + return s.slice +} + +// Get returns the slice of strings set by this flag +func (s *StringSlice) Get() interface{} { + return *s +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f *StringSliceFlag) String() string { + return FlagStringer(f) +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *StringSliceFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *StringSliceFlag) GetUsage() string { + return f.Usage +} + +// GetCategory returns the category for the flag +func (f *StringSliceFlag) GetCategory() string { + return f.Category +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *StringSliceFlag) GetValue() string { + var defaultVals []string + if f.Value != nil && len(f.Value.Value()) > 0 { + for _, s := range f.Value.Value() { + if len(s) > 0 { + defaultVals = append(defaultVals, strconv.Quote(s)) + } + } + } + return strings.Join(defaultVals, ", ") +} + +// GetDefaultText returns the default text for this flag +func (f *StringSliceFlag) GetDefaultText() string { + if f.DefaultText != "" { + return f.DefaultText + } + return f.GetValue() +} + +// GetEnvVars returns the env vars for this flag +func (f *StringSliceFlag) GetEnvVars() []string { + return f.EnvVars +} + +// IsSliceFlag implements DocGenerationSliceFlag. +func (f *StringSliceFlag) IsSliceFlag() bool { + return true +} + +// Apply populates the flag given the flag set and environment +func (f *StringSliceFlag) Apply(set *flag.FlagSet) error { + // apply any default + if f.Destination != nil && f.Value != nil { + f.Destination.slice = make([]string, len(f.Value.slice)) + copy(f.Destination.slice, f.Value.slice) + } + + // resolve setValue (what we will assign to the set) + var setValue *StringSlice + switch { + case f.Destination != nil: + setValue = f.Destination + case f.Value != nil: + setValue = f.Value.clone() + default: + setValue = new(StringSlice) + } + setValue.WithSeparatorSpec(f.separator) + + setValue.keepSpace = f.KeepSpace + + if val, source, found := flagFromEnvOrFile(f.EnvVars, f.FilePath); found { + for _, s := range f.separator.flagSplitMultiValues(val) { + if !f.KeepSpace { + s = strings.TrimSpace(s) + } + if err := setValue.Set(s); err != nil { + return fmt.Errorf("could not parse %q as string value from %s for flag %s: %s", val, source, f.Name, err) + } + } + + // Set this to false so that we reset the slice if we then set values from + // flags that have already been set by the environment. + setValue.hasBeenSet = false + f.HasBeenSet = true + } + + for _, name := range f.Names() { + set.Var(setValue, name, f.Usage) + } + + return nil +} + +func (f *StringSliceFlag) WithSeparatorSpec(spec separatorSpec) { + f.separator = spec +} + +// Get returns the flag’s value in the given Context. +func (f *StringSliceFlag) Get(ctx *Context) []string { + return ctx.StringSlice(f.Name) +} + +// RunAction executes flag action if set +func (f *StringSliceFlag) RunAction(c *Context) error { + if f.Action != nil { + return f.Action(c, c.StringSlice(f.Name)) + } + + return nil +} + +// StringSlice looks up the value of a local StringSliceFlag, returns +// nil if not found +func (cCtx *Context) StringSlice(name string) []string { + if fs := cCtx.lookupFlagSet(name); fs != nil { + return lookupStringSlice(name, fs) + } + return nil +} + +func lookupStringSlice(name string, set *flag.FlagSet) []string { + f := set.Lookup(name) + if f != nil { + if slice, ok := unwrapFlagValue(f.Value).(*StringSlice); ok { + return slice.Value() + } + } + return nil +} diff --git a/vendor/github.com/urfave/cli/v2/flag_timestamp.go b/vendor/github.com/urfave/cli/v2/flag_timestamp.go new file mode 100644 index 000000000..b90123087 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_timestamp.go @@ -0,0 +1,205 @@ +package cli + +import ( + "flag" + "fmt" + "time" +) + +// Timestamp wrap to satisfy golang's flag interface. +type Timestamp struct { + timestamp *time.Time + hasBeenSet bool + layout string + location *time.Location +} + +// Timestamp constructor +func NewTimestamp(timestamp time.Time) *Timestamp { + return &Timestamp{timestamp: ×tamp} +} + +// Set the timestamp value directly +func (t *Timestamp) SetTimestamp(value time.Time) { + if !t.hasBeenSet { + t.timestamp = &value + t.hasBeenSet = true + } +} + +// Set the timestamp string layout for future parsing +func (t *Timestamp) SetLayout(layout string) { + t.layout = layout +} + +// Set perceived timezone of the to-be parsed time string +func (t *Timestamp) SetLocation(loc *time.Location) { + t.location = loc +} + +// Parses the string value to timestamp +func (t *Timestamp) Set(value string) error { + var timestamp time.Time + var err error + + if t.location != nil { + timestamp, err = time.ParseInLocation(t.layout, value, t.location) + } else { + timestamp, err = time.Parse(t.layout, value) + } + + if err != nil { + return err + } + + t.timestamp = ×tamp + t.hasBeenSet = true + return nil +} + +// String returns a readable representation of this value (for usage defaults) +func (t *Timestamp) String() string { + return fmt.Sprintf("%#v", t.timestamp) +} + +// Value returns the timestamp value stored in the flag +func (t *Timestamp) Value() *time.Time { + return t.timestamp +} + +// Get returns the flag structure +func (t *Timestamp) Get() interface{} { + return *t +} + +// clone timestamp +func (t *Timestamp) clone() *Timestamp { + tc := &Timestamp{ + timestamp: nil, + hasBeenSet: t.hasBeenSet, + layout: t.layout, + location: nil, + } + if t.timestamp != nil { + tts := *t.timestamp + tc.timestamp = &tts + } + if t.location != nil { + loc := *t.location + tc.location = &loc + } + return tc +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *TimestampFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *TimestampFlag) GetUsage() string { + return f.Usage +} + +// GetCategory returns the category for the flag +func (f *TimestampFlag) GetCategory() string { + return f.Category +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *TimestampFlag) GetValue() string { + if f.Value != nil && f.Value.timestamp != nil { + return f.Value.timestamp.String() + } + return "" +} + +// GetDefaultText returns the default text for this flag +func (f *TimestampFlag) GetDefaultText() string { + if f.DefaultText != "" { + return f.DefaultText + } + val := f.Value + if f.defaultValueSet { + val = f.defaultValue + } + + if val != nil && val.timestamp != nil { + return val.timestamp.String() + } + + return "" +} + +// GetEnvVars returns the env vars for this flag +func (f *TimestampFlag) GetEnvVars() []string { + return f.EnvVars +} + +// Apply populates the flag given the flag set and environment +func (f *TimestampFlag) Apply(set *flag.FlagSet) error { + if f.Layout == "" { + return fmt.Errorf("timestamp Layout is required") + } + if f.Value == nil { + f.Value = &Timestamp{} + } + f.Value.SetLayout(f.Layout) + f.Value.SetLocation(f.Timezone) + + f.defaultValue = f.Value.clone() + f.defaultValueSet = true + + if val, source, found := flagFromEnvOrFile(f.EnvVars, f.FilePath); found { + if err := f.Value.Set(val); err != nil { + return fmt.Errorf("could not parse %q as timestamp value from %s for flag %s: %s", val, source, f.Name, err) + } + f.HasBeenSet = true + } + + if f.Destination != nil { + *f.Destination = *f.Value + } + + for _, name := range f.Names() { + if f.Destination != nil { + set.Var(f.Destination, name, f.Usage) + continue + } + + set.Var(f.Value, name, f.Usage) + } + return nil +} + +// Get returns the flag’s value in the given Context. +func (f *TimestampFlag) Get(ctx *Context) *time.Time { + return ctx.Timestamp(f.Name) +} + +// RunAction executes flag action if set +func (f *TimestampFlag) RunAction(c *Context) error { + if f.Action != nil { + return f.Action(c, c.Timestamp(f.Name)) + } + + return nil +} + +// Timestamp gets the timestamp from a flag name +func (cCtx *Context) Timestamp(name string) *time.Time { + if fs := cCtx.lookupFlagSet(name); fs != nil { + return lookupTimestamp(name, fs) + } + return nil +} + +// Fetches the timestamp value from the local timestampWrap +func lookupTimestamp(name string, set *flag.FlagSet) *time.Time { + f := set.Lookup(name) + if f != nil { + return (f.Value.(*Timestamp)).Value() + } + return nil +} diff --git a/vendor/github.com/urfave/cli/v2/flag_uint.go b/vendor/github.com/urfave/cli/v2/flag_uint.go new file mode 100644 index 000000000..8d5b85458 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_uint.go @@ -0,0 +1,108 @@ +package cli + +import ( + "flag" + "fmt" + "strconv" +) + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *UintFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *UintFlag) GetUsage() string { + return f.Usage +} + +// GetCategory returns the category for the flag +func (f *UintFlag) GetCategory() string { + return f.Category +} + +// Apply populates the flag given the flag set and environment +func (f *UintFlag) Apply(set *flag.FlagSet) error { + // set default value so that environment wont be able to overwrite it + f.defaultValue = f.Value + f.defaultValueSet = true + + if val, source, found := flagFromEnvOrFile(f.EnvVars, f.FilePath); found { + if val != "" { + valInt, err := strconv.ParseUint(val, f.Base, 64) + if err != nil { + return fmt.Errorf("could not parse %q as uint value from %s for flag %s: %s", val, source, f.Name, err) + } + + f.Value = uint(valInt) + f.HasBeenSet = true + } + } + + for _, name := range f.Names() { + if f.Destination != nil { + set.UintVar(f.Destination, name, f.Value, f.Usage) + continue + } + set.Uint(name, f.Value, f.Usage) + } + + return nil +} + +// RunAction executes flag action if set +func (f *UintFlag) RunAction(c *Context) error { + if f.Action != nil { + return f.Action(c, c.Uint(f.Name)) + } + + return nil +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *UintFlag) GetValue() string { + return fmt.Sprintf("%d", f.Value) +} + +// GetDefaultText returns the default text for this flag +func (f *UintFlag) GetDefaultText() string { + if f.DefaultText != "" { + return f.DefaultText + } + if f.defaultValueSet { + return fmt.Sprintf("%d", f.defaultValue) + } + return fmt.Sprintf("%d", f.Value) +} + +// GetEnvVars returns the env vars for this flag +func (f *UintFlag) GetEnvVars() []string { + return f.EnvVars +} + +// Get returns the flag’s value in the given Context. +func (f *UintFlag) Get(ctx *Context) uint { + return ctx.Uint(f.Name) +} + +// Uint looks up the value of a local UintFlag, returns +// 0 if not found +func (cCtx *Context) Uint(name string) uint { + if fs := cCtx.lookupFlagSet(name); fs != nil { + return lookupUint(name, fs) + } + return 0 +} + +func lookupUint(name string, set *flag.FlagSet) uint { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseUint(f.Value.String(), 0, 64) + if err != nil { + return 0 + } + return uint(parsed) + } + return 0 +} diff --git a/vendor/github.com/urfave/cli/v2/flag_uint64.go b/vendor/github.com/urfave/cli/v2/flag_uint64.go new file mode 100644 index 000000000..c356e533b --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_uint64.go @@ -0,0 +1,108 @@ +package cli + +import ( + "flag" + "fmt" + "strconv" +) + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *Uint64Flag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *Uint64Flag) GetUsage() string { + return f.Usage +} + +// GetCategory returns the category for the flag +func (f *Uint64Flag) GetCategory() string { + return f.Category +} + +// Apply populates the flag given the flag set and environment +func (f *Uint64Flag) Apply(set *flag.FlagSet) error { + // set default value so that environment wont be able to overwrite it + f.defaultValue = f.Value + f.defaultValueSet = true + + if val, source, found := flagFromEnvOrFile(f.EnvVars, f.FilePath); found { + if val != "" { + valInt, err := strconv.ParseUint(val, f.Base, 64) + if err != nil { + return fmt.Errorf("could not parse %q as uint64 value from %s for flag %s: %s", val, source, f.Name, err) + } + + f.Value = valInt + f.HasBeenSet = true + } + } + + for _, name := range f.Names() { + if f.Destination != nil { + set.Uint64Var(f.Destination, name, f.Value, f.Usage) + continue + } + set.Uint64(name, f.Value, f.Usage) + } + + return nil +} + +// RunAction executes flag action if set +func (f *Uint64Flag) RunAction(c *Context) error { + if f.Action != nil { + return f.Action(c, c.Uint64(f.Name)) + } + + return nil +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *Uint64Flag) GetValue() string { + return fmt.Sprintf("%d", f.Value) +} + +// GetDefaultText returns the default text for this flag +func (f *Uint64Flag) GetDefaultText() string { + if f.DefaultText != "" { + return f.DefaultText + } + if f.defaultValueSet { + return fmt.Sprintf("%d", f.defaultValue) + } + return fmt.Sprintf("%d", f.Value) +} + +// GetEnvVars returns the env vars for this flag +func (f *Uint64Flag) GetEnvVars() []string { + return f.EnvVars +} + +// Get returns the flag’s value in the given Context. +func (f *Uint64Flag) Get(ctx *Context) uint64 { + return ctx.Uint64(f.Name) +} + +// Uint64 looks up the value of a local Uint64Flag, returns +// 0 if not found +func (cCtx *Context) Uint64(name string) uint64 { + if fs := cCtx.lookupFlagSet(name); fs != nil { + return lookupUint64(name, fs) + } + return 0 +} + +func lookupUint64(name string, set *flag.FlagSet) uint64 { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseUint(f.Value.String(), 0, 64) + if err != nil { + return 0 + } + return parsed + } + return 0 +} diff --git a/vendor/github.com/urfave/cli/v2/flag_uint64_slice.go b/vendor/github.com/urfave/cli/v2/flag_uint64_slice.go new file mode 100644 index 000000000..d34201868 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_uint64_slice.go @@ -0,0 +1,219 @@ +package cli + +import ( + "encoding/json" + "flag" + "fmt" + "strconv" + "strings" +) + +// Uint64Slice wraps []int64 to satisfy flag.Value +type Uint64Slice struct { + slice []uint64 + separator separatorSpec + hasBeenSet bool +} + +// NewUint64Slice makes an *Uint64Slice with default values +func NewUint64Slice(defaults ...uint64) *Uint64Slice { + return &Uint64Slice{slice: append([]uint64{}, defaults...)} +} + +// clone allocate a copy of self object +func (i *Uint64Slice) clone() *Uint64Slice { + n := &Uint64Slice{ + slice: make([]uint64, len(i.slice)), + hasBeenSet: i.hasBeenSet, + } + copy(n.slice, i.slice) + return n +} + +// Set parses the value into an integer and appends it to the list of values +func (i *Uint64Slice) Set(value string) error { + if !i.hasBeenSet { + i.slice = []uint64{} + i.hasBeenSet = true + } + + if strings.HasPrefix(value, slPfx) { + // Deserializing assumes overwrite + _ = json.Unmarshal([]byte(strings.Replace(value, slPfx, "", 1)), &i.slice) + i.hasBeenSet = true + return nil + } + + for _, s := range i.separator.flagSplitMultiValues(value) { + tmp, err := strconv.ParseUint(strings.TrimSpace(s), 0, 64) + if err != nil { + return err + } + + i.slice = append(i.slice, tmp) + } + + return nil +} + +func (i *Uint64Slice) WithSeparatorSpec(spec separatorSpec) { + i.separator = spec +} + +// String returns a readable representation of this value (for usage defaults) +func (i *Uint64Slice) String() string { + v := i.slice + if v == nil { + // treat nil the same as zero length non-nil + v = make([]uint64, 0) + } + str := fmt.Sprintf("%d", v) + str = strings.Replace(str, " ", ", ", -1) + str = strings.Replace(str, "[", "{", -1) + str = strings.Replace(str, "]", "}", -1) + return fmt.Sprintf("[]uint64%s", str) +} + +// Serialize allows Uint64Slice to fulfill Serializer +func (i *Uint64Slice) Serialize() string { + jsonBytes, _ := json.Marshal(i.slice) + return fmt.Sprintf("%s%s", slPfx, string(jsonBytes)) +} + +// Value returns the slice of ints set by this flag +func (i *Uint64Slice) Value() []uint64 { + return i.slice +} + +// Get returns the slice of ints set by this flag +func (i *Uint64Slice) Get() interface{} { + return *i +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f *Uint64SliceFlag) String() string { + return FlagStringer(f) +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *Uint64SliceFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *Uint64SliceFlag) GetUsage() string { + return f.Usage +} + +// GetCategory returns the category for the flag +func (f *Uint64SliceFlag) GetCategory() string { + return f.Category +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *Uint64SliceFlag) GetValue() string { + var defaultVals []string + if f.Value != nil && len(f.Value.Value()) > 0 { + for _, i := range f.Value.Value() { + defaultVals = append(defaultVals, strconv.FormatUint(i, 10)) + } + } + return strings.Join(defaultVals, ", ") +} + +// GetDefaultText returns the default text for this flag +func (f *Uint64SliceFlag) GetDefaultText() string { + if f.DefaultText != "" { + return f.DefaultText + } + return f.GetValue() +} + +// GetEnvVars returns the env vars for this flag +func (f *Uint64SliceFlag) GetEnvVars() []string { + return f.EnvVars +} + +// IsSliceFlag implements DocGenerationSliceFlag. +func (f *Uint64SliceFlag) IsSliceFlag() bool { + return true +} + +// Apply populates the flag given the flag set and environment +func (f *Uint64SliceFlag) Apply(set *flag.FlagSet) error { + // apply any default + if f.Destination != nil && f.Value != nil { + f.Destination.slice = make([]uint64, len(f.Value.slice)) + copy(f.Destination.slice, f.Value.slice) + } + + // resolve setValue (what we will assign to the set) + var setValue *Uint64Slice + switch { + case f.Destination != nil: + setValue = f.Destination + case f.Value != nil: + setValue = f.Value.clone() + default: + setValue = new(Uint64Slice) + setValue.WithSeparatorSpec(f.separator) + } + + if val, source, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok && val != "" { + for _, s := range f.separator.flagSplitMultiValues(val) { + if err := setValue.Set(strings.TrimSpace(s)); err != nil { + return fmt.Errorf("could not parse %q as uint64 slice value from %s for flag %s: %s", val, source, f.Name, err) + } + } + + // Set this to false so that we reset the slice if we then set values from + // flags that have already been set by the environment. + setValue.hasBeenSet = false + f.HasBeenSet = true + } + + for _, name := range f.Names() { + set.Var(setValue, name, f.Usage) + } + + return nil +} + +func (f *Uint64SliceFlag) WithSeparatorSpec(spec separatorSpec) { + f.separator = spec +} + +// Get returns the flag’s value in the given Context. +func (f *Uint64SliceFlag) Get(ctx *Context) []uint64 { + return ctx.Uint64Slice(f.Name) +} + +// RunAction executes flag action if set +func (f *Uint64SliceFlag) RunAction(c *Context) error { + if f.Action != nil { + return f.Action(c, c.Uint64Slice(f.Name)) + } + + return nil +} + +// Uint64Slice looks up the value of a local Uint64SliceFlag, returns +// nil if not found +func (cCtx *Context) Uint64Slice(name string) []uint64 { + if fs := cCtx.lookupFlagSet(name); fs != nil { + return lookupUint64Slice(name, fs) + } + return nil +} + +func lookupUint64Slice(name string, set *flag.FlagSet) []uint64 { + f := set.Lookup(name) + if f != nil { + if slice, ok := unwrapFlagValue(f.Value).(*Uint64Slice); ok { + return slice.Value() + } + } + return nil +} diff --git a/vendor/github.com/urfave/cli/v2/flag_uint_slice.go b/vendor/github.com/urfave/cli/v2/flag_uint_slice.go new file mode 100644 index 000000000..4dc13e126 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_uint_slice.go @@ -0,0 +1,230 @@ +package cli + +import ( + "encoding/json" + "flag" + "fmt" + "strconv" + "strings" +) + +// UintSlice wraps []int to satisfy flag.Value +type UintSlice struct { + slice []uint + separator separatorSpec + hasBeenSet bool +} + +// NewUintSlice makes an *UintSlice with default values +func NewUintSlice(defaults ...uint) *UintSlice { + return &UintSlice{slice: append([]uint{}, defaults...)} +} + +// clone allocate a copy of self object +func (i *UintSlice) clone() *UintSlice { + n := &UintSlice{ + slice: make([]uint, len(i.slice)), + hasBeenSet: i.hasBeenSet, + } + copy(n.slice, i.slice) + return n +} + +// TODO: Consistently have specific Set function for Int64 and Float64 ? +// SetInt directly adds an integer to the list of values +func (i *UintSlice) SetUint(value uint) { + if !i.hasBeenSet { + i.slice = []uint{} + i.hasBeenSet = true + } + + i.slice = append(i.slice, value) +} + +// Set parses the value into an integer and appends it to the list of values +func (i *UintSlice) Set(value string) error { + if !i.hasBeenSet { + i.slice = []uint{} + i.hasBeenSet = true + } + + if strings.HasPrefix(value, slPfx) { + // Deserializing assumes overwrite + _ = json.Unmarshal([]byte(strings.Replace(value, slPfx, "", 1)), &i.slice) + i.hasBeenSet = true + return nil + } + + for _, s := range i.separator.flagSplitMultiValues(value) { + tmp, err := strconv.ParseUint(strings.TrimSpace(s), 0, 32) + if err != nil { + return err + } + + i.slice = append(i.slice, uint(tmp)) + } + + return nil +} + +func (i *UintSlice) WithSeparatorSpec(spec separatorSpec) { + i.separator = spec +} + +// String returns a readable representation of this value (for usage defaults) +func (i *UintSlice) String() string { + v := i.slice + if v == nil { + // treat nil the same as zero length non-nil + v = make([]uint, 0) + } + str := fmt.Sprintf("%d", v) + str = strings.Replace(str, " ", ", ", -1) + str = strings.Replace(str, "[", "{", -1) + str = strings.Replace(str, "]", "}", -1) + return fmt.Sprintf("[]uint%s", str) +} + +// Serialize allows UintSlice to fulfill Serializer +func (i *UintSlice) Serialize() string { + jsonBytes, _ := json.Marshal(i.slice) + return fmt.Sprintf("%s%s", slPfx, string(jsonBytes)) +} + +// Value returns the slice of ints set by this flag +func (i *UintSlice) Value() []uint { + return i.slice +} + +// Get returns the slice of ints set by this flag +func (i *UintSlice) Get() interface{} { + return *i +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f *UintSliceFlag) String() string { + return FlagStringer(f) +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *UintSliceFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *UintSliceFlag) GetUsage() string { + return f.Usage +} + +// GetCategory returns the category for the flag +func (f *UintSliceFlag) GetCategory() string { + return f.Category +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *UintSliceFlag) GetValue() string { + var defaultVals []string + if f.Value != nil && len(f.Value.Value()) > 0 { + for _, i := range f.Value.Value() { + defaultVals = append(defaultVals, strconv.FormatUint(uint64(i), 10)) + } + } + return strings.Join(defaultVals, ", ") +} + +// GetDefaultText returns the default text for this flag +func (f *UintSliceFlag) GetDefaultText() string { + if f.DefaultText != "" { + return f.DefaultText + } + return f.GetValue() +} + +// GetEnvVars returns the env vars for this flag +func (f *UintSliceFlag) GetEnvVars() []string { + return f.EnvVars +} + +// IsSliceFlag implements DocGenerationSliceFlag. +func (f *UintSliceFlag) IsSliceFlag() bool { + return true +} + +// Apply populates the flag given the flag set and environment +func (f *UintSliceFlag) Apply(set *flag.FlagSet) error { + // apply any default + if f.Destination != nil && f.Value != nil { + f.Destination.slice = make([]uint, len(f.Value.slice)) + copy(f.Destination.slice, f.Value.slice) + } + + // resolve setValue (what we will assign to the set) + var setValue *UintSlice + switch { + case f.Destination != nil: + setValue = f.Destination + case f.Value != nil: + setValue = f.Value.clone() + default: + setValue = new(UintSlice) + setValue.WithSeparatorSpec(f.separator) + } + + if val, source, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok && val != "" { + for _, s := range f.separator.flagSplitMultiValues(val) { + if err := setValue.Set(strings.TrimSpace(s)); err != nil { + return fmt.Errorf("could not parse %q as uint slice value from %s for flag %s: %s", val, source, f.Name, err) + } + } + + // Set this to false so that we reset the slice if we then set values from + // flags that have already been set by the environment. + setValue.hasBeenSet = false + f.HasBeenSet = true + } + + for _, name := range f.Names() { + set.Var(setValue, name, f.Usage) + } + + return nil +} + +func (f *UintSliceFlag) WithSeparatorSpec(spec separatorSpec) { + f.separator = spec +} + +// Get returns the flag’s value in the given Context. +func (f *UintSliceFlag) Get(ctx *Context) []uint { + return ctx.UintSlice(f.Name) +} + +// RunAction executes flag action if set +func (f *UintSliceFlag) RunAction(c *Context) error { + if f.Action != nil { + return f.Action(c, c.UintSlice(f.Name)) + } + + return nil +} + +// UintSlice looks up the value of a local UintSliceFlag, returns +// nil if not found +func (cCtx *Context) UintSlice(name string) []uint { + if fs := cCtx.lookupFlagSet(name); fs != nil { + return lookupUintSlice(name, fs) + } + return nil +} + +func lookupUintSlice(name string, set *flag.FlagSet) []uint { + f := set.Lookup(name) + if f != nil { + if slice, ok := unwrapFlagValue(f.Value).(*UintSlice); ok { + return slice.Value() + } + } + return nil +} diff --git a/vendor/github.com/urfave/cli/v2/funcs.go b/vendor/github.com/urfave/cli/v2/funcs.go new file mode 100644 index 000000000..e77b0d0a1 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/funcs.go @@ -0,0 +1,47 @@ +package cli + +// BashCompleteFunc is an action to execute when the shell completion flag is set +type BashCompleteFunc func(*Context) + +// BeforeFunc is an action to execute before any subcommands are run, but after +// the context is ready if a non-nil error is returned, no subcommands are run +type BeforeFunc func(*Context) error + +// AfterFunc is an action to execute after any subcommands are run, but after the +// subcommand has finished it is run even if Action() panics +type AfterFunc func(*Context) error + +// ActionFunc is the action to execute when no subcommands are specified +type ActionFunc func(*Context) error + +// CommandNotFoundFunc is executed if the proper command cannot be found +type CommandNotFoundFunc func(*Context, string) + +// OnUsageErrorFunc is executed if a usage error occurs. This is useful for displaying +// customized usage error messages. This function is able to replace the +// original error messages. If this function is not set, the "Incorrect usage" +// is displayed and the execution is interrupted. +type OnUsageErrorFunc func(cCtx *Context, err error, isSubcommand bool) error + +// InvalidFlagAccessFunc is executed when an invalid flag is accessed from the context. +type InvalidFlagAccessFunc func(*Context, string) + +// ExitErrHandlerFunc is executed if provided in order to handle exitError values +// returned by Actions and Before/After functions. +type ExitErrHandlerFunc func(cCtx *Context, err error) + +// FlagStringFunc is used by the help generation to display a flag, which is +// expected to be a single line. +type FlagStringFunc func(Flag) string + +// FlagNamePrefixFunc is used by the default FlagStringFunc to create prefix +// text for a flag's full name. +type FlagNamePrefixFunc func(fullName []string, placeholder string) string + +// FlagEnvHintFunc is used by the default FlagStringFunc to annotate flag help +// with the environment variable details. +type FlagEnvHintFunc func(envVars []string, str string) string + +// FlagFileHintFunc is used by the default FlagStringFunc to annotate flag help +// with the file path details. +type FlagFileHintFunc func(filePath, str string) string diff --git a/vendor/github.com/urfave/cli/v2/godoc-current.txt b/vendor/github.com/urfave/cli/v2/godoc-current.txt new file mode 100644 index 000000000..2f3d76e31 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/godoc-current.txt @@ -0,0 +1,2727 @@ +package cli // import "github.com/urfave/cli/v2" + +Package cli provides a minimal framework for creating and organizing command +line Go applications. cli is designed to be easy to understand and write, +the most simple cli application can be written as follows: + + func main() { + (&cli.App{}).Run(os.Args) + } + +Of course this application does not do much, so let's make this an actual +application: + + func main() { + app := &cli.App{ + Name: "greet", + Usage: "say a greeting", + Action: func(c *cli.Context) error { + fmt.Println("Greetings") + return nil + }, + } + + app.Run(os.Args) + } + +VARIABLES + +var ( + SuggestFlag SuggestFlagFunc = nil // initialized in suggestions.go unless built with urfave_cli_no_suggest + SuggestCommand SuggestCommandFunc = nil // initialized in suggestions.go unless built with urfave_cli_no_suggest + SuggestDidYouMeanTemplate string = suggestDidYouMeanTemplate +) +var AppHelpTemplate = `NAME: + {{template "helpNameTemplate" .}} + +USAGE: + {{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}}{{if .ArgsUsage}} {{.ArgsUsage}}{{else}}{{if .Args}} [arguments...]{{end}}{{end}}{{end}}{{if .Version}}{{if not .HideVersion}} + +VERSION: + {{.Version}}{{end}}{{end}}{{if .Description}} + +DESCRIPTION: + {{template "descriptionTemplate" .}}{{end}} +{{- if len .Authors}} + +AUTHOR{{template "authorsTemplate" .}}{{end}}{{if .VisibleCommands}} + +COMMANDS:{{template "visibleCommandCategoryTemplate" .}}{{end}}{{if .VisibleFlagCategories}} + +GLOBAL OPTIONS:{{template "visibleFlagCategoryTemplate" .}}{{else if .VisibleFlags}} + +GLOBAL OPTIONS:{{template "visibleFlagTemplate" .}}{{end}}{{if .Copyright}} + +COPYRIGHT: + {{template "copyrightTemplate" .}}{{end}} +` + AppHelpTemplate is the text template for the Default help topic. cli.go + uses text/template to render templates. You can render custom help text by + setting this variable. + +var CommandHelpTemplate = `NAME: + {{template "helpNameTemplate" .}} + +USAGE: + {{template "usageTemplate" .}}{{if .Category}} + +CATEGORY: + {{.Category}}{{end}}{{if .Description}} + +DESCRIPTION: + {{template "descriptionTemplate" .}}{{end}}{{if .VisibleFlagCategories}} + +OPTIONS:{{template "visibleFlagCategoryTemplate" .}}{{else if .VisibleFlags}} + +OPTIONS:{{template "visibleFlagTemplate" .}}{{end}} +` + CommandHelpTemplate is the text template for the command help topic. cli.go + uses text/template to render templates. You can render custom help text by + setting this variable. + +var ErrWriter io.Writer = os.Stderr + ErrWriter is used to write errors to the user. This can be anything + implementing the io.Writer interface and defaults to os.Stderr. + +var FishCompletionTemplate = `# {{ .App.Name }} fish shell completion + +function __fish_{{ .App.Name }}_no_subcommand --description 'Test if there has been any subcommand yet' + for i in (commandline -opc) + if contains -- $i{{ range $v := .AllCommands }} {{ $v }}{{ end }} + return 1 + end + end + return 0 +end + +{{ range $v := .Completions }}{{ $v }} +{{ end }}` +var MarkdownDocTemplate = `{{if gt .SectionNum 0}}% {{ .App.Name }} {{ .SectionNum }} + +{{end}}# NAME + +{{ .App.Name }}{{ if .App.Usage }} - {{ .App.Usage }}{{ end }} + +# SYNOPSIS + +{{ .App.Name }} +{{ if .SynopsisArgs }} +` + "```" + ` +{{ range $v := .SynopsisArgs }}{{ $v }}{{ end }}` + "```" + ` +{{ end }}{{ if .App.Description }} +# DESCRIPTION + +{{ .App.Description }} +{{ end }} +**Usage**: + +` + "```" + `{{ if .App.UsageText }} +{{ .App.UsageText }} +{{ else }} +{{ .App.Name }} [GLOBAL OPTIONS] command [COMMAND OPTIONS] [ARGUMENTS...] +{{ end }}` + "```" + ` +{{ if .GlobalArgs }} +# GLOBAL OPTIONS +{{ range $v := .GlobalArgs }} +{{ $v }}{{ end }} +{{ end }}{{ if .Commands }} +# COMMANDS +{{ range $v := .Commands }} +{{ $v }}{{ end }}{{ end }}` +var OsExiter = os.Exit + OsExiter is the function used when the app exits. If not set defaults to + os.Exit. + +var SubcommandHelpTemplate = `NAME: + {{template "helpNameTemplate" .}} + +USAGE: + {{template "usageTemplate" .}}{{if .Category}} + +CATEGORY: + {{.Category}}{{end}}{{if .Description}} + +DESCRIPTION: + {{template "descriptionTemplate" .}}{{end}}{{if .VisibleCommands}} + +COMMANDS:{{template "visibleCommandCategoryTemplate" .}}{{end}}{{if .VisibleFlagCategories}} + +OPTIONS:{{template "visibleFlagCategoryTemplate" .}}{{else if .VisibleFlags}} + +OPTIONS:{{template "visibleFlagTemplate" .}}{{end}} +` + SubcommandHelpTemplate is the text template for the subcommand help topic. + cli.go uses text/template to render templates. You can render custom help + text by setting this variable. + +var VersionPrinter = printVersion + VersionPrinter prints the version for the App + +var HelpPrinter helpPrinter = printHelp + HelpPrinter is a function that writes the help output. If not set + explicitly, this calls HelpPrinterCustom using only the default template + functions. + + If custom logic for printing help is required, this function can be + overridden. If the ExtraInfo field is defined on an App, this function + should not be modified, as HelpPrinterCustom will be used directly in order + to capture the extra information. + +var HelpPrinterCustom helpPrinterCustom = printHelpCustom + HelpPrinterCustom is a function that writes the help output. It is used as + the default implementation of HelpPrinter, and may be called directly if the + ExtraInfo field is set on an App. + + In the default implementation, if the customFuncs argument contains a + "wrapAt" key, which is a function which takes no arguments and returns an + int, this int value will be used to produce a "wrap" function used by the + default template to wrap long lines. + + +FUNCTIONS + +func DefaultAppComplete(cCtx *Context) + DefaultAppComplete prints the list of subcommands as the default app + completion method + +func DefaultCompleteWithFlags(cmd *Command) func(cCtx *Context) +func FlagNames(name string, aliases []string) []string +func HandleAction(action interface{}, cCtx *Context) (err error) + HandleAction attempts to figure out which Action signature was used. + If it's an ActionFunc or a func with the legacy signature for Action, + the func is run! + +func HandleExitCoder(err error) + HandleExitCoder handles errors implementing ExitCoder by printing their + message and calling OsExiter with the given exit code. + + If the given error instead implements MultiError, each error will be checked + for the ExitCoder interface, and OsExiter will be called with the last exit + code found, or exit code 1 if no ExitCoder is found. + + This function is the default error-handling behavior for an App. + +func ShowAppHelp(cCtx *Context) error + ShowAppHelp is an action that displays the help. + +func ShowAppHelpAndExit(c *Context, exitCode int) + ShowAppHelpAndExit - Prints the list of subcommands for the app and exits + with exit code. + +func ShowCommandCompletions(ctx *Context, command string) + ShowCommandCompletions prints the custom completions for a given command + +func ShowCommandHelp(ctx *Context, command string) error + ShowCommandHelp prints help for the given command + +func ShowCommandHelpAndExit(c *Context, command string, code int) + ShowCommandHelpAndExit - exits with code after showing help + +func ShowCompletions(cCtx *Context) + ShowCompletions prints the lists of commands within a given context + +func ShowSubcommandHelp(cCtx *Context) error + ShowSubcommandHelp prints help for the given subcommand + +func ShowSubcommandHelpAndExit(c *Context, exitCode int) + ShowSubcommandHelpAndExit - Prints help for the given subcommand and exits + with exit code. + +func ShowVersion(cCtx *Context) + ShowVersion prints the version number of the App + + +TYPES + +type ActionFunc func(*Context) error + ActionFunc is the action to execute when no subcommands are specified + +type ActionableFlag interface { + Flag + RunAction(*Context) error +} + ActionableFlag is an interface that wraps Flag interface and RunAction + operation. + +type AfterFunc func(*Context) error + AfterFunc is an action to execute after any subcommands are run, but after + the subcommand has finished it is run even if Action() panics + +type App struct { + // The name of the program. Defaults to path.Base(os.Args[0]) + Name string + // Full name of command for help, defaults to Name + HelpName string + // Description of the program. + Usage string + // Text to override the USAGE section of help + UsageText string + // Whether this command supports arguments + Args bool + // Description of the program argument format. + ArgsUsage string + // Version of the program + Version string + // Description of the program + Description string + // DefaultCommand is the (optional) name of a command + // to run if no command names are passed as CLI arguments. + DefaultCommand string + // List of commands to execute + Commands []*Command + // List of flags to parse + Flags []Flag + // Boolean to enable bash completion commands + EnableBashCompletion bool + // Boolean to hide built-in help command and help flag + HideHelp bool + // Boolean to hide built-in help command but keep help flag. + // Ignored if HideHelp is true. + HideHelpCommand bool + // Boolean to hide built-in version flag and the VERSION section of help + HideVersion bool + + // An action to execute when the shell completion flag is set + BashComplete BashCompleteFunc + // An action to execute before any subcommands are run, but after the context is ready + // If a non-nil error is returned, no subcommands are run + Before BeforeFunc + // An action to execute after any subcommands are run, but after the subcommand has finished + // It is run even if Action() panics + After AfterFunc + // The action to execute when no subcommands are specified + Action ActionFunc + // Execute this function if the proper command cannot be found + CommandNotFound CommandNotFoundFunc + // Execute this function if a usage error occurs + OnUsageError OnUsageErrorFunc + // Execute this function when an invalid flag is accessed from the context + InvalidFlagAccessHandler InvalidFlagAccessFunc + // Compilation date + Compiled time.Time + // List of all authors who contributed + Authors []*Author + // Copyright of the binary if any + Copyright string + // Reader reader to write input to (useful for tests) + Reader io.Reader + // Writer writer to write output to + Writer io.Writer + // ErrWriter writes error output + ErrWriter io.Writer + // ExitErrHandler processes any error encountered while running an App before + // it is returned to the caller. If no function is provided, HandleExitCoder + // is used as the default behavior. + ExitErrHandler ExitErrHandlerFunc + // Other custom info + Metadata map[string]interface{} + // Carries a function which returns app specific info. + ExtraInfo func() map[string]string + // CustomAppHelpTemplate the text template for app help topic. + // cli.go uses text/template to render templates. You can + // render custom help text by setting this variable. + CustomAppHelpTemplate string + // SliceFlagSeparator is used to customize the separator for SliceFlag, the default is "," + SliceFlagSeparator string + // DisableSliceFlagSeparator is used to disable SliceFlagSeparator, the default is false + DisableSliceFlagSeparator bool + // Boolean to enable short-option handling so user can combine several + // single-character bool arguments into one + // i.e. foobar -o -v -> foobar -ov + UseShortOptionHandling bool + // Enable suggestions for commands and flags + Suggest bool + // Allows global flags set by libraries which use flag.XXXVar(...) directly + // to be parsed through this library + AllowExtFlags bool + // Treat all flags as normal arguments if true + SkipFlagParsing bool + + // Has unexported fields. +} + App is the main structure of a cli application. It is recommended that an + app be created with the cli.NewApp() function + +func NewApp() *App + NewApp creates a new cli Application with some reasonable defaults for Name, + Usage, Version and Action. + +func (a *App) Command(name string) *Command + Command returns the named command on App. Returns nil if the command does + not exist + +func (a *App) Run(arguments []string) (err error) + Run is the entry point to the cli app. Parses the arguments slice and routes + to the proper flag/args combination + +func (a *App) RunAndExitOnError() + RunAndExitOnError calls .Run() and exits non-zero if an error was returned + + Deprecated: instead you should return an error that fulfills cli.ExitCoder + to cli.App.Run. This will cause the application to exit with the given error + code in the cli.ExitCoder + +func (a *App) RunAsSubcommand(ctx *Context) (err error) + RunAsSubcommand is for legacy/compatibility purposes only. New code should + only use App.RunContext. This function is slated to be removed in v3. + +func (a *App) RunContext(ctx context.Context, arguments []string) (err error) + RunContext is like Run except it takes a Context that will be passed to + its commands and sub-commands. Through this, you can propagate timeouts and + cancellation requests + +func (a *App) Setup() + Setup runs initialization code to ensure all data structures are ready + for `Run` or inspection prior to `Run`. It is internally called by `Run`, + but will return early if setup has already happened. + +func (a *App) ToFishCompletion() (string, error) + ToFishCompletion creates a fish completion string for the `*App` The + function errors if either parsing or writing of the string fails. + +func (a *App) ToMan() (string, error) + ToMan creates a man page string for the `*App` The function errors if either + parsing or writing of the string fails. + +func (a *App) ToManWithSection(sectionNumber int) (string, error) + ToMan creates a man page string with section number for the `*App` The + function errors if either parsing or writing of the string fails. + +func (a *App) ToMarkdown() (string, error) + ToMarkdown creates a markdown string for the `*App` The function errors if + either parsing or writing of the string fails. + +func (a *App) VisibleCategories() []CommandCategory + VisibleCategories returns a slice of categories and commands that are + Hidden=false + +func (a *App) VisibleCommands() []*Command + VisibleCommands returns a slice of the Commands with Hidden=false + +func (a *App) VisibleFlagCategories() []VisibleFlagCategory + VisibleFlagCategories returns a slice containing all the categories with the + flags they contain + +func (a *App) VisibleFlags() []Flag + VisibleFlags returns a slice of the Flags with Hidden=false + +type Args interface { + // Get returns the nth argument, or else a blank string + Get(n int) string + // First returns the first argument, or else a blank string + First() string + // Tail returns the rest of the arguments (not the first one) + // or else an empty string slice + Tail() []string + // Len returns the length of the wrapped slice + Len() int + // Present checks if there are any arguments present + Present() bool + // Slice returns a copy of the internal slice + Slice() []string +} + +type Author struct { + Name string // The Authors name + Email string // The Authors email +} + Author represents someone who has contributed to a cli project. + +func (a *Author) String() string + String makes Author comply to the Stringer interface, to allow an easy print + in the templating process + +type BashCompleteFunc func(*Context) + BashCompleteFunc is an action to execute when the shell completion flag is + set + +type BeforeFunc func(*Context) error + BeforeFunc is an action to execute before any subcommands are run, but after + the context is ready if a non-nil error is returned, no subcommands are run + +type BoolFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value bool + Destination *bool + + Aliases []string + EnvVars []string + + Count *int + + DisableDefaultText bool + + Action func(*Context, bool) error + // Has unexported fields. +} + BoolFlag is a flag with type bool + +func (f *BoolFlag) Apply(set *flag.FlagSet) error + Apply populates the flag given the flag set and environment + +func (f *BoolFlag) Get(ctx *Context) bool + Get returns the flag’s value in the given Context. + +func (f *BoolFlag) GetCategory() string + GetCategory returns the category for the flag + +func (f *BoolFlag) GetDefaultText() string + GetDefaultText returns the default text for this flag + +func (f *BoolFlag) GetEnvVars() []string + GetEnvVars returns the env vars for this flag + +func (f *BoolFlag) GetUsage() string + GetUsage returns the usage string for the flag + +func (f *BoolFlag) GetValue() string + GetValue returns the flags value as string representation and an empty + string if the flag takes no value at all. + +func (f *BoolFlag) IsRequired() bool + IsRequired returns whether or not the flag is required + +func (f *BoolFlag) IsSet() bool + IsSet returns whether or not the flag has been set through env or file + +func (f *BoolFlag) IsVisible() bool + IsVisible returns true if the flag is not hidden, otherwise false + +func (f *BoolFlag) Names() []string + Names returns the names of the flag + +func (f *BoolFlag) RunAction(c *Context) error + RunAction executes flag action if set + +func (f *BoolFlag) String() string + String returns a readable representation of this value (for usage defaults) + +func (f *BoolFlag) TakesValue() bool + TakesValue returns true of the flag takes a value, otherwise false + +type CategorizableFlag interface { + VisibleFlag + + GetCategory() string +} + CategorizableFlag is an interface that allows us to potentially use a flag + in a categorized representation. + +type Command struct { + // The name of the command + Name string + // A list of aliases for the command + Aliases []string + // A short description of the usage of this command + Usage string + // Custom text to show on USAGE section of help + UsageText string + // A longer explanation of how the command works + Description string + // Whether this command supports arguments + Args bool + // A short description of the arguments of this command + ArgsUsage string + // The category the command is part of + Category string + // The function to call when checking for bash command completions + BashComplete BashCompleteFunc + // An action to execute before any sub-subcommands are run, but after the context is ready + // If a non-nil error is returned, no sub-subcommands are run + Before BeforeFunc + // An action to execute after any subcommands are run, but after the subcommand has finished + // It is run even if Action() panics + After AfterFunc + // The function to call when this command is invoked + Action ActionFunc + // Execute this function if a usage error occurs. + OnUsageError OnUsageErrorFunc + // List of child commands + Subcommands []*Command + // List of flags to parse + Flags []Flag + + // Treat all flags as normal arguments if true + SkipFlagParsing bool + // Boolean to hide built-in help command and help flag + HideHelp bool + // Boolean to hide built-in help command but keep help flag + // Ignored if HideHelp is true. + HideHelpCommand bool + // Boolean to hide this command from help or completion + Hidden bool + // Boolean to enable short-option handling so user can combine several + // single-character bool arguments into one + // i.e. foobar -o -v -> foobar -ov + UseShortOptionHandling bool + + // Full name of command for help, defaults to full command name, including parent commands. + HelpName string + + // CustomHelpTemplate the text template for the command help topic. + // cli.go uses text/template to render templates. You can + // render custom help text by setting this variable. + CustomHelpTemplate string + + // Has unexported fields. +} + Command is a subcommand for a cli.App. + +func (cmd *Command) Command(name string) *Command + +func (c *Command) FullName() string + FullName returns the full name of the command. For subcommands this ensures + that parent commands are part of the command path + +func (c *Command) HasName(name string) bool + HasName returns true if Command.Name matches given name + +func (c *Command) Names() []string + Names returns the names including short names and aliases. + +func (c *Command) Run(cCtx *Context, arguments ...string) (err error) + +func (c *Command) VisibleCategories() []CommandCategory + VisibleCategories returns a slice of categories and commands that are + Hidden=false + +func (c *Command) VisibleCommands() []*Command + VisibleCommands returns a slice of the Commands with Hidden=false + +func (c *Command) VisibleFlagCategories() []VisibleFlagCategory + VisibleFlagCategories returns a slice containing all the visible flag + categories with the flags they contain + +func (c *Command) VisibleFlags() []Flag + VisibleFlags returns a slice of the Flags with Hidden=false + +type CommandCategories interface { + // AddCommand adds a command to a category, creating a new category if necessary. + AddCommand(category string, command *Command) + // Categories returns a slice of categories sorted by name + Categories() []CommandCategory +} + CommandCategories interface allows for category manipulation + +type CommandCategory interface { + // Name returns the category name string + Name() string + // VisibleCommands returns a slice of the Commands with Hidden=false + VisibleCommands() []*Command +} + CommandCategory is a category containing commands. + +type CommandNotFoundFunc func(*Context, string) + CommandNotFoundFunc is executed if the proper command cannot be found + +type Commands []*Command + +type CommandsByName []*Command + +func (c CommandsByName) Len() int + +func (c CommandsByName) Less(i, j int) bool + +func (c CommandsByName) Swap(i, j int) + +type Context struct { + context.Context + App *App + Command *Command + + // Has unexported fields. +} + Context is a type that is passed through to each Handler action in a cli + application. Context can be used to retrieve context-specific args and + parsed command-line options. + +func NewContext(app *App, set *flag.FlagSet, parentCtx *Context) *Context + NewContext creates a new context. For use in when invoking an App or Command + action. + +func (cCtx *Context) Args() Args + Args returns the command line arguments associated with the context. + +func (cCtx *Context) Bool(name string) bool + Bool looks up the value of a local BoolFlag, returns false if not found + +func (cCtx *Context) Count(name string) int + Count returns the num of occurrences of this flag + +func (cCtx *Context) Duration(name string) time.Duration + Duration looks up the value of a local DurationFlag, returns 0 if not found + +func (cCtx *Context) FlagNames() []string + FlagNames returns a slice of flag names used by the this context and all of + its parent contexts. + +func (cCtx *Context) Float64(name string) float64 + Float64 looks up the value of a local Float64Flag, returns 0 if not found + +func (cCtx *Context) Float64Slice(name string) []float64 + Float64Slice looks up the value of a local Float64SliceFlag, returns nil if + not found + +func (cCtx *Context) Generic(name string) interface{} + Generic looks up the value of a local GenericFlag, returns nil if not found + +func (cCtx *Context) Int(name string) int + Int looks up the value of a local IntFlag, returns 0 if not found + +func (cCtx *Context) Int64(name string) int64 + Int64 looks up the value of a local Int64Flag, returns 0 if not found + +func (cCtx *Context) Int64Slice(name string) []int64 + Int64Slice looks up the value of a local Int64SliceFlag, returns nil if not + found + +func (cCtx *Context) IntSlice(name string) []int + IntSlice looks up the value of a local IntSliceFlag, returns nil if not + found + +func (cCtx *Context) IsSet(name string) bool + IsSet determines if the flag was actually set + +func (cCtx *Context) Lineage() []*Context + Lineage returns *this* context and all of its ancestor contexts in order + from child to parent + +func (cCtx *Context) LocalFlagNames() []string + LocalFlagNames returns a slice of flag names used in this context. + +func (cCtx *Context) NArg() int + NArg returns the number of the command line arguments. + +func (cCtx *Context) NumFlags() int + NumFlags returns the number of flags set + +func (cCtx *Context) Path(name string) string + Path looks up the value of a local PathFlag, returns "" if not found + +func (cCtx *Context) Set(name, value string) error + Set sets a context flag to a value. + +func (cCtx *Context) String(name string) string + String looks up the value of a local StringFlag, returns "" if not found + +func (cCtx *Context) StringSlice(name string) []string + StringSlice looks up the value of a local StringSliceFlag, returns nil if + not found + +func (cCtx *Context) Timestamp(name string) *time.Time + Timestamp gets the timestamp from a flag name + +func (cCtx *Context) Uint(name string) uint + Uint looks up the value of a local UintFlag, returns 0 if not found + +func (cCtx *Context) Uint64(name string) uint64 + Uint64 looks up the value of a local Uint64Flag, returns 0 if not found + +func (cCtx *Context) Uint64Slice(name string) []uint64 + Uint64Slice looks up the value of a local Uint64SliceFlag, returns nil if + not found + +func (cCtx *Context) UintSlice(name string) []uint + UintSlice looks up the value of a local UintSliceFlag, returns nil if not + found + +func (cCtx *Context) Value(name string) interface{} + Value returns the value of the flag corresponding to `name` + +type Countable interface { + Count() int +} + Countable is an interface to enable detection of flag values which support + repetitive flags + +type DocGenerationFlag interface { + Flag + + // TakesValue returns true if the flag takes a value, otherwise false + TakesValue() bool + + // GetUsage returns the usage string for the flag + GetUsage() string + + // GetValue returns the flags value as string representation and an empty + // string if the flag takes no value at all. + GetValue() string + + // GetDefaultText returns the default text for this flag + GetDefaultText() string + + // GetEnvVars returns the env vars for this flag + GetEnvVars() []string +} + DocGenerationFlag is an interface that allows documentation generation for + the flag + +type DocGenerationSliceFlag interface { + DocGenerationFlag + + // IsSliceFlag returns true for flags that can be given multiple times. + IsSliceFlag() bool +} + DocGenerationSliceFlag extends DocGenerationFlag for slice-based flags. + +type DurationFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value time.Duration + Destination *time.Duration + + Aliases []string + EnvVars []string + + Action func(*Context, time.Duration) error + // Has unexported fields. +} + DurationFlag is a flag with type time.Duration + +func (f *DurationFlag) Apply(set *flag.FlagSet) error + Apply populates the flag given the flag set and environment + +func (f *DurationFlag) Get(ctx *Context) time.Duration + Get returns the flag’s value in the given Context. + +func (f *DurationFlag) GetCategory() string + GetCategory returns the category for the flag + +func (f *DurationFlag) GetDefaultText() string + GetDefaultText returns the default text for this flag + +func (f *DurationFlag) GetEnvVars() []string + GetEnvVars returns the env vars for this flag + +func (f *DurationFlag) GetUsage() string + GetUsage returns the usage string for the flag + +func (f *DurationFlag) GetValue() string + GetValue returns the flags value as string representation and an empty + string if the flag takes no value at all. + +func (f *DurationFlag) IsRequired() bool + IsRequired returns whether or not the flag is required + +func (f *DurationFlag) IsSet() bool + IsSet returns whether or not the flag has been set through env or file + +func (f *DurationFlag) IsVisible() bool + IsVisible returns true if the flag is not hidden, otherwise false + +func (f *DurationFlag) Names() []string + Names returns the names of the flag + +func (f *DurationFlag) RunAction(c *Context) error + RunAction executes flag action if set + +func (f *DurationFlag) String() string + String returns a readable representation of this value (for usage defaults) + +func (f *DurationFlag) TakesValue() bool + TakesValue returns true of the flag takes a value, otherwise false + +type ErrorFormatter interface { + Format(s fmt.State, verb rune) +} + ErrorFormatter is the interface that will suitably format the error output + +type ExitCoder interface { + error + ExitCode() int +} + ExitCoder is the interface checked by `App` and `Command` for a custom exit + code + +func Exit(message interface{}, exitCode int) ExitCoder + Exit wraps a message and exit code into an error, which by default is + handled with a call to os.Exit during default error handling. + + This is the simplest way to trigger a non-zero exit code for an App + without having to call os.Exit manually. During testing, this behavior + can be avoided by overriding the ExitErrHandler function on an App or the + package-global OsExiter function. + +func NewExitError(message interface{}, exitCode int) ExitCoder + NewExitError calls Exit to create a new ExitCoder. + + Deprecated: This function is a duplicate of Exit and will eventually be + removed. + +type ExitErrHandlerFunc func(cCtx *Context, err error) + ExitErrHandlerFunc is executed if provided in order to handle exitError + values returned by Actions and Before/After functions. + +type Flag interface { + fmt.Stringer + // Apply Flag settings to the given flag set + Apply(*flag.FlagSet) error + Names() []string + IsSet() bool +} + Flag is a common interface related to parsing flags in cli. For more + advanced flag parsing techniques, it is recommended that this interface be + implemented. + +var BashCompletionFlag Flag = &BoolFlag{ + Name: "generate-bash-completion", + Hidden: true, +} + BashCompletionFlag enables bash-completion for all commands and subcommands + +var HelpFlag Flag = &BoolFlag{ + Name: "help", + Aliases: []string{"h"}, + Usage: "show help", + DisableDefaultText: true, +} + HelpFlag prints the help for all commands and subcommands. Set to nil to + disable the flag. The subcommand will still be added unless HideHelp or + HideHelpCommand is set to true. + +var VersionFlag Flag = &BoolFlag{ + Name: "version", + Aliases: []string{"v"}, + Usage: "print the version", + DisableDefaultText: true, +} + VersionFlag prints the version for the application + +type FlagCategories interface { + // AddFlags adds a flag to a category, creating a new category if necessary. + AddFlag(category string, fl Flag) + // VisibleCategories returns a slice of visible flag categories sorted by name + VisibleCategories() []VisibleFlagCategory +} + FlagCategories interface allows for category manipulation + +type FlagEnvHintFunc func(envVars []string, str string) string + FlagEnvHintFunc is used by the default FlagStringFunc to annotate flag help + with the environment variable details. + +var FlagEnvHinter FlagEnvHintFunc = withEnvHint + FlagEnvHinter annotates flag help message with the environment variable + details. This is used by the default FlagStringer. + +type FlagFileHintFunc func(filePath, str string) string + FlagFileHintFunc is used by the default FlagStringFunc to annotate flag help + with the file path details. + +var FlagFileHinter FlagFileHintFunc = withFileHint + FlagFileHinter annotates flag help message with the environment variable + details. This is used by the default FlagStringer. + +type FlagNamePrefixFunc func(fullName []string, placeholder string) string + FlagNamePrefixFunc is used by the default FlagStringFunc to create prefix + text for a flag's full name. + +var FlagNamePrefixer FlagNamePrefixFunc = prefixedNames + FlagNamePrefixer converts a full flag name and its placeholder into the help + message flag prefix. This is used by the default FlagStringer. + +type FlagStringFunc func(Flag) string + FlagStringFunc is used by the help generation to display a flag, which is + expected to be a single line. + +var FlagStringer FlagStringFunc = stringifyFlag + FlagStringer converts a flag definition to a string. This is used by help to + display a flag. + +type FlagsByName []Flag + FlagsByName is a slice of Flag. + +func (f FlagsByName) Len() int + +func (f FlagsByName) Less(i, j int) bool + +func (f FlagsByName) Swap(i, j int) + +type Float64Flag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value float64 + Destination *float64 + + Aliases []string + EnvVars []string + + Action func(*Context, float64) error + // Has unexported fields. +} + Float64Flag is a flag with type float64 + +func (f *Float64Flag) Apply(set *flag.FlagSet) error + Apply populates the flag given the flag set and environment + +func (f *Float64Flag) Get(ctx *Context) float64 + Get returns the flag’s value in the given Context. + +func (f *Float64Flag) GetCategory() string + GetCategory returns the category for the flag + +func (f *Float64Flag) GetDefaultText() string + GetDefaultText returns the default text for this flag + +func (f *Float64Flag) GetEnvVars() []string + GetEnvVars returns the env vars for this flag + +func (f *Float64Flag) GetUsage() string + GetUsage returns the usage string for the flag + +func (f *Float64Flag) GetValue() string + GetValue returns the flags value as string representation and an empty + string if the flag takes no value at all. + +func (f *Float64Flag) IsRequired() bool + IsRequired returns whether or not the flag is required + +func (f *Float64Flag) IsSet() bool + IsSet returns whether or not the flag has been set through env or file + +func (f *Float64Flag) IsVisible() bool + IsVisible returns true if the flag is not hidden, otherwise false + +func (f *Float64Flag) Names() []string + Names returns the names of the flag + +func (f *Float64Flag) RunAction(c *Context) error + RunAction executes flag action if set + +func (f *Float64Flag) String() string + String returns a readable representation of this value (for usage defaults) + +func (f *Float64Flag) TakesValue() bool + TakesValue returns true of the flag takes a value, otherwise false + +type Float64Slice struct { + // Has unexported fields. +} + Float64Slice wraps []float64 to satisfy flag.Value + +func NewFloat64Slice(defaults ...float64) *Float64Slice + NewFloat64Slice makes a *Float64Slice with default values + +func (f *Float64Slice) Get() interface{} + Get returns the slice of float64s set by this flag + +func (f *Float64Slice) Serialize() string + Serialize allows Float64Slice to fulfill Serializer + +func (f *Float64Slice) Set(value string) error + Set parses the value into a float64 and appends it to the list of values + +func (f *Float64Slice) String() string + String returns a readable representation of this value (for usage defaults) + +func (f *Float64Slice) Value() []float64 + Value returns the slice of float64s set by this flag + +func (f *Float64Slice) WithSeparatorSpec(spec separatorSpec) + +type Float64SliceFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value *Float64Slice + Destination *Float64Slice + + Aliases []string + EnvVars []string + + Action func(*Context, []float64) error + // Has unexported fields. +} + Float64SliceFlag is a flag with type *Float64Slice + +func (f *Float64SliceFlag) Apply(set *flag.FlagSet) error + Apply populates the flag given the flag set and environment + +func (f *Float64SliceFlag) Get(ctx *Context) []float64 + Get returns the flag’s value in the given Context. + +func (f *Float64SliceFlag) GetCategory() string + GetCategory returns the category for the flag + +func (f *Float64SliceFlag) GetDefaultText() string + GetDefaultText returns the default text for this flag + +func (f *Float64SliceFlag) GetDestination() []float64 + +func (f *Float64SliceFlag) GetEnvVars() []string + GetEnvVars returns the env vars for this flag + +func (f *Float64SliceFlag) GetUsage() string + GetUsage returns the usage string for the flag + +func (f *Float64SliceFlag) GetValue() string + GetValue returns the flags value as string representation and an empty + string if the flag takes no value at all. + +func (f *Float64SliceFlag) IsRequired() bool + IsRequired returns whether or not the flag is required + +func (f *Float64SliceFlag) IsSet() bool + IsSet returns whether or not the flag has been set through env or file + +func (f *Float64SliceFlag) IsSliceFlag() bool + IsSliceFlag implements DocGenerationSliceFlag. + +func (f *Float64SliceFlag) IsVisible() bool + IsVisible returns true if the flag is not hidden, otherwise false + +func (f *Float64SliceFlag) Names() []string + Names returns the names of the flag + +func (f *Float64SliceFlag) RunAction(c *Context) error + RunAction executes flag action if set + +func (f *Float64SliceFlag) SetDestination(slice []float64) + +func (f *Float64SliceFlag) SetValue(slice []float64) + +func (f *Float64SliceFlag) String() string + String returns a readable representation of this value (for usage defaults) + +func (f *Float64SliceFlag) TakesValue() bool + TakesValue returns true if the flag takes a value, otherwise false + +func (f *Float64SliceFlag) WithSeparatorSpec(spec separatorSpec) + +type Generic interface { + Set(value string) error + String() string +} + Generic is a generic parseable type identified by a specific flag + +type GenericFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value Generic + Destination Generic + + Aliases []string + EnvVars []string + + TakesFile bool + + Action func(*Context, interface{}) error + // Has unexported fields. +} + GenericFlag is a flag with type Generic + +func (f *GenericFlag) Apply(set *flag.FlagSet) error + Apply takes the flagset and calls Set on the generic flag with the value + provided by the user for parsing by the flag + +func (f *GenericFlag) Get(ctx *Context) interface{} + Get returns the flag’s value in the given Context. + +func (f *GenericFlag) GetCategory() string + GetCategory returns the category for the flag + +func (f *GenericFlag) GetDefaultText() string + GetDefaultText returns the default text for this flag + +func (f *GenericFlag) GetEnvVars() []string + GetEnvVars returns the env vars for this flag + +func (f *GenericFlag) GetUsage() string + GetUsage returns the usage string for the flag + +func (f *GenericFlag) GetValue() string + GetValue returns the flags value as string representation and an empty + string if the flag takes no value at all. + +func (f *GenericFlag) IsRequired() bool + IsRequired returns whether or not the flag is required + +func (f *GenericFlag) IsSet() bool + IsSet returns whether or not the flag has been set through env or file + +func (f *GenericFlag) IsVisible() bool + IsVisible returns true if the flag is not hidden, otherwise false + +func (f *GenericFlag) Names() []string + Names returns the names of the flag + +func (f *GenericFlag) RunAction(c *Context) error + RunAction executes flag action if set + +func (f *GenericFlag) String() string + String returns a readable representation of this value (for usage defaults) + +func (f *GenericFlag) TakesValue() bool + TakesValue returns true of the flag takes a value, otherwise false + +type Int64Flag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value int64 + Destination *int64 + + Aliases []string + EnvVars []string + + Base int + + Action func(*Context, int64) error + // Has unexported fields. +} + Int64Flag is a flag with type int64 + +func (f *Int64Flag) Apply(set *flag.FlagSet) error + Apply populates the flag given the flag set and environment + +func (f *Int64Flag) Get(ctx *Context) int64 + Get returns the flag’s value in the given Context. + +func (f *Int64Flag) GetCategory() string + GetCategory returns the category for the flag + +func (f *Int64Flag) GetDefaultText() string + GetDefaultText returns the default text for this flag + +func (f *Int64Flag) GetEnvVars() []string + GetEnvVars returns the env vars for this flag + +func (f *Int64Flag) GetUsage() string + GetUsage returns the usage string for the flag + +func (f *Int64Flag) GetValue() string + GetValue returns the flags value as string representation and an empty + string if the flag takes no value at all. + +func (f *Int64Flag) IsRequired() bool + IsRequired returns whether or not the flag is required + +func (f *Int64Flag) IsSet() bool + IsSet returns whether or not the flag has been set through env or file + +func (f *Int64Flag) IsVisible() bool + IsVisible returns true if the flag is not hidden, otherwise false + +func (f *Int64Flag) Names() []string + Names returns the names of the flag + +func (f *Int64Flag) RunAction(c *Context) error + RunAction executes flag action if set + +func (f *Int64Flag) String() string + String returns a readable representation of this value (for usage defaults) + +func (f *Int64Flag) TakesValue() bool + TakesValue returns true of the flag takes a value, otherwise false + +type Int64Slice struct { + // Has unexported fields. +} + Int64Slice wraps []int64 to satisfy flag.Value + +func NewInt64Slice(defaults ...int64) *Int64Slice + NewInt64Slice makes an *Int64Slice with default values + +func (i *Int64Slice) Get() interface{} + Get returns the slice of ints set by this flag + +func (i *Int64Slice) Serialize() string + Serialize allows Int64Slice to fulfill Serializer + +func (i *Int64Slice) Set(value string) error + Set parses the value into an integer and appends it to the list of values + +func (i *Int64Slice) String() string + String returns a readable representation of this value (for usage defaults) + +func (i *Int64Slice) Value() []int64 + Value returns the slice of ints set by this flag + +func (i *Int64Slice) WithSeparatorSpec(spec separatorSpec) + +type Int64SliceFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value *Int64Slice + Destination *Int64Slice + + Aliases []string + EnvVars []string + + Action func(*Context, []int64) error + // Has unexported fields. +} + Int64SliceFlag is a flag with type *Int64Slice + +func (f *Int64SliceFlag) Apply(set *flag.FlagSet) error + Apply populates the flag given the flag set and environment + +func (f *Int64SliceFlag) Get(ctx *Context) []int64 + Get returns the flag’s value in the given Context. + +func (f *Int64SliceFlag) GetCategory() string + GetCategory returns the category for the flag + +func (f *Int64SliceFlag) GetDefaultText() string + GetDefaultText returns the default text for this flag + +func (f *Int64SliceFlag) GetDestination() []int64 + +func (f *Int64SliceFlag) GetEnvVars() []string + GetEnvVars returns the env vars for this flag + +func (f *Int64SliceFlag) GetUsage() string + GetUsage returns the usage string for the flag + +func (f *Int64SliceFlag) GetValue() string + GetValue returns the flags value as string representation and an empty + string if the flag takes no value at all. + +func (f *Int64SliceFlag) IsRequired() bool + IsRequired returns whether or not the flag is required + +func (f *Int64SliceFlag) IsSet() bool + IsSet returns whether or not the flag has been set through env or file + +func (f *Int64SliceFlag) IsSliceFlag() bool + IsSliceFlag implements DocGenerationSliceFlag. + +func (f *Int64SliceFlag) IsVisible() bool + IsVisible returns true if the flag is not hidden, otherwise false + +func (f *Int64SliceFlag) Names() []string + Names returns the names of the flag + +func (f *Int64SliceFlag) RunAction(c *Context) error + RunAction executes flag action if set + +func (f *Int64SliceFlag) SetDestination(slice []int64) + +func (f *Int64SliceFlag) SetValue(slice []int64) + +func (f *Int64SliceFlag) String() string + String returns a readable representation of this value (for usage defaults) + +func (f *Int64SliceFlag) TakesValue() bool + TakesValue returns true of the flag takes a value, otherwise false + +func (f *Int64SliceFlag) WithSeparatorSpec(spec separatorSpec) + +type IntFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value int + Destination *int + + Aliases []string + EnvVars []string + + Base int + + Action func(*Context, int) error + // Has unexported fields. +} + IntFlag is a flag with type int + +func (f *IntFlag) Apply(set *flag.FlagSet) error + Apply populates the flag given the flag set and environment + +func (f *IntFlag) Get(ctx *Context) int + Get returns the flag’s value in the given Context. + +func (f *IntFlag) GetCategory() string + GetCategory returns the category for the flag + +func (f *IntFlag) GetDefaultText() string + GetDefaultText returns the default text for this flag + +func (f *IntFlag) GetEnvVars() []string + GetEnvVars returns the env vars for this flag + +func (f *IntFlag) GetUsage() string + GetUsage returns the usage string for the flag + +func (f *IntFlag) GetValue() string + GetValue returns the flags value as string representation and an empty + string if the flag takes no value at all. + +func (f *IntFlag) IsRequired() bool + IsRequired returns whether or not the flag is required + +func (f *IntFlag) IsSet() bool + IsSet returns whether or not the flag has been set through env or file + +func (f *IntFlag) IsVisible() bool + IsVisible returns true if the flag is not hidden, otherwise false + +func (f *IntFlag) Names() []string + Names returns the names of the flag + +func (f *IntFlag) RunAction(c *Context) error + RunAction executes flag action if set + +func (f *IntFlag) String() string + String returns a readable representation of this value (for usage defaults) + +func (f *IntFlag) TakesValue() bool + TakesValue returns true of the flag takes a value, otherwise false + +type IntSlice struct { + // Has unexported fields. +} + IntSlice wraps []int to satisfy flag.Value + +func NewIntSlice(defaults ...int) *IntSlice + NewIntSlice makes an *IntSlice with default values + +func (i *IntSlice) Get() interface{} + Get returns the slice of ints set by this flag + +func (i *IntSlice) Serialize() string + Serialize allows IntSlice to fulfill Serializer + +func (i *IntSlice) Set(value string) error + Set parses the value into an integer and appends it to the list of values + +func (i *IntSlice) SetInt(value int) + TODO: Consistently have specific Set function for Int64 and Float64 ? SetInt + directly adds an integer to the list of values + +func (i *IntSlice) String() string + String returns a readable representation of this value (for usage defaults) + +func (i *IntSlice) Value() []int + Value returns the slice of ints set by this flag + +func (i *IntSlice) WithSeparatorSpec(spec separatorSpec) + +type IntSliceFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value *IntSlice + Destination *IntSlice + + Aliases []string + EnvVars []string + + Action func(*Context, []int) error + // Has unexported fields. +} + IntSliceFlag is a flag with type *IntSlice + +func (f *IntSliceFlag) Apply(set *flag.FlagSet) error + Apply populates the flag given the flag set and environment + +func (f *IntSliceFlag) Get(ctx *Context) []int + Get returns the flag’s value in the given Context. + +func (f *IntSliceFlag) GetCategory() string + GetCategory returns the category for the flag + +func (f *IntSliceFlag) GetDefaultText() string + GetDefaultText returns the default text for this flag + +func (f *IntSliceFlag) GetDestination() []int + +func (f *IntSliceFlag) GetEnvVars() []string + GetEnvVars returns the env vars for this flag + +func (f *IntSliceFlag) GetUsage() string + GetUsage returns the usage string for the flag + +func (f *IntSliceFlag) GetValue() string + GetValue returns the flags value as string representation and an empty + string if the flag takes no value at all. + +func (f *IntSliceFlag) IsRequired() bool + IsRequired returns whether or not the flag is required + +func (f *IntSliceFlag) IsSet() bool + IsSet returns whether or not the flag has been set through env or file + +func (f *IntSliceFlag) IsSliceFlag() bool + IsSliceFlag implements DocGenerationSliceFlag. + +func (f *IntSliceFlag) IsVisible() bool + IsVisible returns true if the flag is not hidden, otherwise false + +func (f *IntSliceFlag) Names() []string + Names returns the names of the flag + +func (f *IntSliceFlag) RunAction(c *Context) error + RunAction executes flag action if set + +func (f *IntSliceFlag) SetDestination(slice []int) + +func (f *IntSliceFlag) SetValue(slice []int) + +func (f *IntSliceFlag) String() string + String returns a readable representation of this value (for usage defaults) + +func (f *IntSliceFlag) TakesValue() bool + TakesValue returns true of the flag takes a value, otherwise false + +func (f *IntSliceFlag) WithSeparatorSpec(spec separatorSpec) + +type InvalidFlagAccessFunc func(*Context, string) + InvalidFlagAccessFunc is executed when an invalid flag is accessed from the + context. + +type MultiError interface { + error + Errors() []error +} + MultiError is an error that wraps multiple errors. + +type MultiFloat64Flag = SliceFlag[*Float64SliceFlag, []float64, float64] + MultiFloat64Flag extends Float64SliceFlag with support for using slices + directly, as Value and/or Destination. See also SliceFlag. + +type MultiInt64Flag = SliceFlag[*Int64SliceFlag, []int64, int64] + MultiInt64Flag extends Int64SliceFlag with support for using slices + directly, as Value and/or Destination. See also SliceFlag. + +type MultiIntFlag = SliceFlag[*IntSliceFlag, []int, int] + MultiIntFlag extends IntSliceFlag with support for using slices directly, + as Value and/or Destination. See also SliceFlag. + +type MultiStringFlag = SliceFlag[*StringSliceFlag, []string, string] + MultiStringFlag extends StringSliceFlag with support for using slices + directly, as Value and/or Destination. See also SliceFlag. + +type OnUsageErrorFunc func(cCtx *Context, err error, isSubcommand bool) error + OnUsageErrorFunc is executed if a usage error occurs. This is useful for + displaying customized usage error messages. This function is able to replace + the original error messages. If this function is not set, the "Incorrect + usage" is displayed and the execution is interrupted. + +type Path = string + +type PathFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value Path + Destination *Path + + Aliases []string + EnvVars []string + + TakesFile bool + + Action func(*Context, Path) error + // Has unexported fields. +} + PathFlag is a flag with type Path + +func (f *PathFlag) Apply(set *flag.FlagSet) error + Apply populates the flag given the flag set and environment + +func (f *PathFlag) Get(ctx *Context) string + Get returns the flag’s value in the given Context. + +func (f *PathFlag) GetCategory() string + GetCategory returns the category for the flag + +func (f *PathFlag) GetDefaultText() string + GetDefaultText returns the default text for this flag + +func (f *PathFlag) GetEnvVars() []string + GetEnvVars returns the env vars for this flag + +func (f *PathFlag) GetUsage() string + GetUsage returns the usage string for the flag + +func (f *PathFlag) GetValue() string + GetValue returns the flags value as string representation and an empty + string if the flag takes no value at all. + +func (f *PathFlag) IsRequired() bool + IsRequired returns whether or not the flag is required + +func (f *PathFlag) IsSet() bool + IsSet returns whether or not the flag has been set through env or file + +func (f *PathFlag) IsVisible() bool + IsVisible returns true if the flag is not hidden, otherwise false + +func (f *PathFlag) Names() []string + Names returns the names of the flag + +func (f *PathFlag) RunAction(c *Context) error + RunAction executes flag action if set + +func (f *PathFlag) String() string + String returns a readable representation of this value (for usage defaults) + +func (f *PathFlag) TakesValue() bool + TakesValue returns true of the flag takes a value, otherwise false + +type RequiredFlag interface { + Flag + + IsRequired() bool +} + RequiredFlag is an interface that allows us to mark flags as required + it allows flags required flags to be backwards compatible with the Flag + interface + +type Serializer interface { + Serialize() string +} + Serializer is used to circumvent the limitations of flag.FlagSet.Set + +type SliceFlag[T SliceFlagTarget[E], S ~[]E, E any] struct { + Target T + Value S + Destination *S +} + SliceFlag extends implementations like StringSliceFlag and IntSliceFlag + with support for using slices directly, as Value and/or Destination. + See also SliceFlagTarget, MultiStringFlag, MultiFloat64Flag, MultiInt64Flag, + MultiIntFlag. + +func (x *SliceFlag[T, S, E]) Apply(set *flag.FlagSet) error + +func (x *SliceFlag[T, S, E]) GetCategory() string + +func (x *SliceFlag[T, S, E]) GetDefaultText() string + +func (x *SliceFlag[T, S, E]) GetDestination() S + +func (x *SliceFlag[T, S, E]) GetEnvVars() []string + +func (x *SliceFlag[T, S, E]) GetUsage() string + +func (x *SliceFlag[T, S, E]) GetValue() string + +func (x *SliceFlag[T, S, E]) IsRequired() bool + +func (x *SliceFlag[T, S, E]) IsSet() bool + +func (x *SliceFlag[T, S, E]) IsVisible() bool + +func (x *SliceFlag[T, S, E]) Names() []string + +func (x *SliceFlag[T, S, E]) SetDestination(slice S) + +func (x *SliceFlag[T, S, E]) SetValue(slice S) + +func (x *SliceFlag[T, S, E]) String() string + +func (x *SliceFlag[T, S, E]) TakesValue() bool + +type SliceFlagTarget[E any] interface { + Flag + RequiredFlag + DocGenerationFlag + VisibleFlag + CategorizableFlag + + // SetValue should propagate the given slice to the target, ideally as a new value. + // Note that a nil slice should nil/clear any existing value (modelled as ~[]E). + SetValue(slice []E) + // SetDestination should propagate the given slice to the target, ideally as a new value. + // Note that a nil slice should nil/clear any existing value (modelled as ~*[]E). + SetDestination(slice []E) + // GetDestination should return the current value referenced by any destination, or nil if nil/unset. + GetDestination() []E +} + SliceFlagTarget models a target implementation for use with SliceFlag. The + three methods, SetValue, SetDestination, and GetDestination, are necessary + to propagate Value and Destination, where Value is propagated inwards + (initially), and Destination is propagated outwards (on every update). + +type StringFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value string + Destination *string + + Aliases []string + EnvVars []string + + TakesFile bool + + Action func(*Context, string) error + // Has unexported fields. +} + StringFlag is a flag with type string + +func (f *StringFlag) Apply(set *flag.FlagSet) error + Apply populates the flag given the flag set and environment + +func (f *StringFlag) Get(ctx *Context) string + Get returns the flag’s value in the given Context. + +func (f *StringFlag) GetCategory() string + GetCategory returns the category for the flag + +func (f *StringFlag) GetDefaultText() string + GetDefaultText returns the default text for this flag + +func (f *StringFlag) GetEnvVars() []string + GetEnvVars returns the env vars for this flag + +func (f *StringFlag) GetUsage() string + GetUsage returns the usage string for the flag + +func (f *StringFlag) GetValue() string + GetValue returns the flags value as string representation and an empty + string if the flag takes no value at all. + +func (f *StringFlag) IsRequired() bool + IsRequired returns whether or not the flag is required + +func (f *StringFlag) IsSet() bool + IsSet returns whether or not the flag has been set through env or file + +func (f *StringFlag) IsVisible() bool + IsVisible returns true if the flag is not hidden, otherwise false + +func (f *StringFlag) Names() []string + Names returns the names of the flag + +func (f *StringFlag) RunAction(c *Context) error + RunAction executes flag action if set + +func (f *StringFlag) String() string + String returns a readable representation of this value (for usage defaults) + +func (f *StringFlag) TakesValue() bool + TakesValue returns true of the flag takes a value, otherwise false + +type StringSlice struct { + // Has unexported fields. +} + StringSlice wraps a []string to satisfy flag.Value + +func NewStringSlice(defaults ...string) *StringSlice + NewStringSlice creates a *StringSlice with default values + +func (s *StringSlice) Get() interface{} + Get returns the slice of strings set by this flag + +func (s *StringSlice) Serialize() string + Serialize allows StringSlice to fulfill Serializer + +func (s *StringSlice) Set(value string) error + Set appends the string value to the list of values + +func (s *StringSlice) String() string + String returns a readable representation of this value (for usage defaults) + +func (s *StringSlice) Value() []string + Value returns the slice of strings set by this flag + +func (s *StringSlice) WithSeparatorSpec(spec separatorSpec) + +type StringSliceFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value *StringSlice + Destination *StringSlice + + Aliases []string + EnvVars []string + + TakesFile bool + + Action func(*Context, []string) error + + KeepSpace bool + // Has unexported fields. +} + StringSliceFlag is a flag with type *StringSlice + +func (f *StringSliceFlag) Apply(set *flag.FlagSet) error + Apply populates the flag given the flag set and environment + +func (f *StringSliceFlag) Get(ctx *Context) []string + Get returns the flag’s value in the given Context. + +func (f *StringSliceFlag) GetCategory() string + GetCategory returns the category for the flag + +func (f *StringSliceFlag) GetDefaultText() string + GetDefaultText returns the default text for this flag + +func (f *StringSliceFlag) GetDestination() []string + +func (f *StringSliceFlag) GetEnvVars() []string + GetEnvVars returns the env vars for this flag + +func (f *StringSliceFlag) GetUsage() string + GetUsage returns the usage string for the flag + +func (f *StringSliceFlag) GetValue() string + GetValue returns the flags value as string representation and an empty + string if the flag takes no value at all. + +func (f *StringSliceFlag) IsRequired() bool + IsRequired returns whether or not the flag is required + +func (f *StringSliceFlag) IsSet() bool + IsSet returns whether or not the flag has been set through env or file + +func (f *StringSliceFlag) IsSliceFlag() bool + IsSliceFlag implements DocGenerationSliceFlag. + +func (f *StringSliceFlag) IsVisible() bool + IsVisible returns true if the flag is not hidden, otherwise false + +func (f *StringSliceFlag) Names() []string + Names returns the names of the flag + +func (f *StringSliceFlag) RunAction(c *Context) error + RunAction executes flag action if set + +func (f *StringSliceFlag) SetDestination(slice []string) + +func (f *StringSliceFlag) SetValue(slice []string) + +func (f *StringSliceFlag) String() string + String returns a readable representation of this value (for usage defaults) + +func (f *StringSliceFlag) TakesValue() bool + TakesValue returns true of the flag takes a value, otherwise false + +func (f *StringSliceFlag) WithSeparatorSpec(spec separatorSpec) + +type SuggestCommandFunc func(commands []*Command, provided string) string + +type SuggestFlagFunc func(flags []Flag, provided string, hideHelp bool) string + +type Timestamp struct { + // Has unexported fields. +} + Timestamp wrap to satisfy golang's flag interface. + +func NewTimestamp(timestamp time.Time) *Timestamp + Timestamp constructor + +func (t *Timestamp) Get() interface{} + Get returns the flag structure + +func (t *Timestamp) Set(value string) error + Parses the string value to timestamp + +func (t *Timestamp) SetLayout(layout string) + Set the timestamp string layout for future parsing + +func (t *Timestamp) SetLocation(loc *time.Location) + Set perceived timezone of the to-be parsed time string + +func (t *Timestamp) SetTimestamp(value time.Time) + Set the timestamp value directly + +func (t *Timestamp) String() string + String returns a readable representation of this value (for usage defaults) + +func (t *Timestamp) Value() *time.Time + Value returns the timestamp value stored in the flag + +type TimestampFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value *Timestamp + Destination *Timestamp + + Aliases []string + EnvVars []string + + Layout string + + Timezone *time.Location + + Action func(*Context, *time.Time) error + // Has unexported fields. +} + TimestampFlag is a flag with type *Timestamp + +func (f *TimestampFlag) Apply(set *flag.FlagSet) error + Apply populates the flag given the flag set and environment + +func (f *TimestampFlag) Get(ctx *Context) *time.Time + Get returns the flag’s value in the given Context. + +func (f *TimestampFlag) GetCategory() string + GetCategory returns the category for the flag + +func (f *TimestampFlag) GetDefaultText() string + GetDefaultText returns the default text for this flag + +func (f *TimestampFlag) GetEnvVars() []string + GetEnvVars returns the env vars for this flag + +func (f *TimestampFlag) GetUsage() string + GetUsage returns the usage string for the flag + +func (f *TimestampFlag) GetValue() string + GetValue returns the flags value as string representation and an empty + string if the flag takes no value at all. + +func (f *TimestampFlag) IsRequired() bool + IsRequired returns whether or not the flag is required + +func (f *TimestampFlag) IsSet() bool + IsSet returns whether or not the flag has been set through env or file + +func (f *TimestampFlag) IsVisible() bool + IsVisible returns true if the flag is not hidden, otherwise false + +func (f *TimestampFlag) Names() []string + Names returns the names of the flag + +func (f *TimestampFlag) RunAction(c *Context) error + RunAction executes flag action if set + +func (f *TimestampFlag) String() string + String returns a readable representation of this value (for usage defaults) + +func (f *TimestampFlag) TakesValue() bool + TakesValue returns true of the flag takes a value, otherwise false + +type Uint64Flag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value uint64 + Destination *uint64 + + Aliases []string + EnvVars []string + + Base int + + Action func(*Context, uint64) error + // Has unexported fields. +} + Uint64Flag is a flag with type uint64 + +func (f *Uint64Flag) Apply(set *flag.FlagSet) error + Apply populates the flag given the flag set and environment + +func (f *Uint64Flag) Get(ctx *Context) uint64 + Get returns the flag’s value in the given Context. + +func (f *Uint64Flag) GetCategory() string + GetCategory returns the category for the flag + +func (f *Uint64Flag) GetDefaultText() string + GetDefaultText returns the default text for this flag + +func (f *Uint64Flag) GetEnvVars() []string + GetEnvVars returns the env vars for this flag + +func (f *Uint64Flag) GetUsage() string + GetUsage returns the usage string for the flag + +func (f *Uint64Flag) GetValue() string + GetValue returns the flags value as string representation and an empty + string if the flag takes no value at all. + +func (f *Uint64Flag) IsRequired() bool + IsRequired returns whether or not the flag is required + +func (f *Uint64Flag) IsSet() bool + IsSet returns whether or not the flag has been set through env or file + +func (f *Uint64Flag) IsVisible() bool + IsVisible returns true if the flag is not hidden, otherwise false + +func (f *Uint64Flag) Names() []string + Names returns the names of the flag + +func (f *Uint64Flag) RunAction(c *Context) error + RunAction executes flag action if set + +func (f *Uint64Flag) String() string + String returns a readable representation of this value (for usage defaults) + +func (f *Uint64Flag) TakesValue() bool + TakesValue returns true of the flag takes a value, otherwise false + +type Uint64Slice struct { + // Has unexported fields. +} + Uint64Slice wraps []int64 to satisfy flag.Value + +func NewUint64Slice(defaults ...uint64) *Uint64Slice + NewUint64Slice makes an *Uint64Slice with default values + +func (i *Uint64Slice) Get() interface{} + Get returns the slice of ints set by this flag + +func (i *Uint64Slice) Serialize() string + Serialize allows Uint64Slice to fulfill Serializer + +func (i *Uint64Slice) Set(value string) error + Set parses the value into an integer and appends it to the list of values + +func (i *Uint64Slice) String() string + String returns a readable representation of this value (for usage defaults) + +func (i *Uint64Slice) Value() []uint64 + Value returns the slice of ints set by this flag + +func (i *Uint64Slice) WithSeparatorSpec(spec separatorSpec) + +type Uint64SliceFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value *Uint64Slice + Destination *Uint64Slice + + Aliases []string + EnvVars []string + + Action func(*Context, []uint64) error + // Has unexported fields. +} + Uint64SliceFlag is a flag with type *Uint64Slice + +func (f *Uint64SliceFlag) Apply(set *flag.FlagSet) error + Apply populates the flag given the flag set and environment + +func (f *Uint64SliceFlag) Get(ctx *Context) []uint64 + Get returns the flag’s value in the given Context. + +func (f *Uint64SliceFlag) GetCategory() string + GetCategory returns the category for the flag + +func (f *Uint64SliceFlag) GetDefaultText() string + GetDefaultText returns the default text for this flag + +func (f *Uint64SliceFlag) GetEnvVars() []string + GetEnvVars returns the env vars for this flag + +func (f *Uint64SliceFlag) GetUsage() string + GetUsage returns the usage string for the flag + +func (f *Uint64SliceFlag) GetValue() string + GetValue returns the flags value as string representation and an empty + string if the flag takes no value at all. + +func (f *Uint64SliceFlag) IsRequired() bool + IsRequired returns whether or not the flag is required + +func (f *Uint64SliceFlag) IsSet() bool + IsSet returns whether or not the flag has been set through env or file + +func (f *Uint64SliceFlag) IsSliceFlag() bool + IsSliceFlag implements DocGenerationSliceFlag. + +func (f *Uint64SliceFlag) IsVisible() bool + IsVisible returns true if the flag is not hidden, otherwise false + +func (f *Uint64SliceFlag) Names() []string + Names returns the names of the flag + +func (f *Uint64SliceFlag) RunAction(c *Context) error + RunAction executes flag action if set + +func (f *Uint64SliceFlag) String() string + String returns a readable representation of this value (for usage defaults) + +func (f *Uint64SliceFlag) TakesValue() bool + TakesValue returns true of the flag takes a value, otherwise false + +func (f *Uint64SliceFlag) WithSeparatorSpec(spec separatorSpec) + +type UintFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value uint + Destination *uint + + Aliases []string + EnvVars []string + + Base int + + Action func(*Context, uint) error + // Has unexported fields. +} + UintFlag is a flag with type uint + +func (f *UintFlag) Apply(set *flag.FlagSet) error + Apply populates the flag given the flag set and environment + +func (f *UintFlag) Get(ctx *Context) uint + Get returns the flag’s value in the given Context. + +func (f *UintFlag) GetCategory() string + GetCategory returns the category for the flag + +func (f *UintFlag) GetDefaultText() string + GetDefaultText returns the default text for this flag + +func (f *UintFlag) GetEnvVars() []string + GetEnvVars returns the env vars for this flag + +func (f *UintFlag) GetUsage() string + GetUsage returns the usage string for the flag + +func (f *UintFlag) GetValue() string + GetValue returns the flags value as string representation and an empty + string if the flag takes no value at all. + +func (f *UintFlag) IsRequired() bool + IsRequired returns whether or not the flag is required + +func (f *UintFlag) IsSet() bool + IsSet returns whether or not the flag has been set through env or file + +func (f *UintFlag) IsVisible() bool + IsVisible returns true if the flag is not hidden, otherwise false + +func (f *UintFlag) Names() []string + Names returns the names of the flag + +func (f *UintFlag) RunAction(c *Context) error + RunAction executes flag action if set + +func (f *UintFlag) String() string + String returns a readable representation of this value (for usage defaults) + +func (f *UintFlag) TakesValue() bool + TakesValue returns true of the flag takes a value, otherwise false + +type UintSlice struct { + // Has unexported fields. +} + UintSlice wraps []int to satisfy flag.Value + +func NewUintSlice(defaults ...uint) *UintSlice + NewUintSlice makes an *UintSlice with default values + +func (i *UintSlice) Get() interface{} + Get returns the slice of ints set by this flag + +func (i *UintSlice) Serialize() string + Serialize allows UintSlice to fulfill Serializer + +func (i *UintSlice) Set(value string) error + Set parses the value into an integer and appends it to the list of values + +func (i *UintSlice) SetUint(value uint) + TODO: Consistently have specific Set function for Int64 and Float64 ? SetInt + directly adds an integer to the list of values + +func (i *UintSlice) String() string + String returns a readable representation of this value (for usage defaults) + +func (i *UintSlice) Value() []uint + Value returns the slice of ints set by this flag + +func (i *UintSlice) WithSeparatorSpec(spec separatorSpec) + +type UintSliceFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value *UintSlice + Destination *UintSlice + + Aliases []string + EnvVars []string + + Action func(*Context, []uint) error + // Has unexported fields. +} + UintSliceFlag is a flag with type *UintSlice + +func (f *UintSliceFlag) Apply(set *flag.FlagSet) error + Apply populates the flag given the flag set and environment + +func (f *UintSliceFlag) Get(ctx *Context) []uint + Get returns the flag’s value in the given Context. + +func (f *UintSliceFlag) GetCategory() string + GetCategory returns the category for the flag + +func (f *UintSliceFlag) GetDefaultText() string + GetDefaultText returns the default text for this flag + +func (f *UintSliceFlag) GetEnvVars() []string + GetEnvVars returns the env vars for this flag + +func (f *UintSliceFlag) GetUsage() string + GetUsage returns the usage string for the flag + +func (f *UintSliceFlag) GetValue() string + GetValue returns the flags value as string representation and an empty + string if the flag takes no value at all. + +func (f *UintSliceFlag) IsRequired() bool + IsRequired returns whether or not the flag is required + +func (f *UintSliceFlag) IsSet() bool + IsSet returns whether or not the flag has been set through env or file + +func (f *UintSliceFlag) IsSliceFlag() bool + IsSliceFlag implements DocGenerationSliceFlag. + +func (f *UintSliceFlag) IsVisible() bool + IsVisible returns true if the flag is not hidden, otherwise false + +func (f *UintSliceFlag) Names() []string + Names returns the names of the flag + +func (f *UintSliceFlag) RunAction(c *Context) error + RunAction executes flag action if set + +func (f *UintSliceFlag) String() string + String returns a readable representation of this value (for usage defaults) + +func (f *UintSliceFlag) TakesValue() bool + TakesValue returns true of the flag takes a value, otherwise false + +func (f *UintSliceFlag) WithSeparatorSpec(spec separatorSpec) + +type VisibleFlag interface { + Flag + + // IsVisible returns true if the flag is not hidden, otherwise false + IsVisible() bool +} + VisibleFlag is an interface that allows to check if a flag is visible + +type VisibleFlagCategory interface { + // Name returns the category name string + Name() string + // Flags returns a slice of VisibleFlag sorted by name + Flags() []VisibleFlag +} + VisibleFlagCategory is a category containing flags. + +package altsrc // import "github.com/urfave/cli/v2/altsrc" + + +FUNCTIONS + +func ApplyInputSourceValues(cCtx *cli.Context, inputSourceContext InputSourceContext, flags []cli.Flag) error + ApplyInputSourceValues iterates over all provided flags and executes + ApplyInputSourceValue on flags implementing the FlagInputSourceExtension + interface to initialize these flags to an alternate input source. + +func InitInputSource(flags []cli.Flag, createInputSource func() (InputSourceContext, error)) cli.BeforeFunc + InitInputSource is used to to setup an InputSourceContext on a cli.Command + Before method. It will create a new input source based on the func provided. + If there is no error it will then apply the new input source to any flags + that are supported by the input source + +func InitInputSourceWithContext(flags []cli.Flag, createInputSource func(cCtx *cli.Context) (InputSourceContext, error)) cli.BeforeFunc + InitInputSourceWithContext is used to to setup an InputSourceContext on + a cli.Command Before method. It will create a new input source based on + the func provided with potentially using existing cli.Context values to + initialize itself. If there is no error it will then apply the new input + source to any flags that are supported by the input source + +func NewJSONSourceFromFlagFunc(flag string) func(c *cli.Context) (InputSourceContext, error) + NewJSONSourceFromFlagFunc returns a func that takes a cli.Context and + returns an InputSourceContext suitable for retrieving config variables from + a file containing JSON data with the file name defined by the given flag. + +func NewTomlSourceFromFlagFunc(flagFileName string) func(cCtx *cli.Context) (InputSourceContext, error) + NewTomlSourceFromFlagFunc creates a new TOML InputSourceContext from a + provided flag name and source context. + +func NewYamlSourceFromFlagFunc(flagFileName string) func(cCtx *cli.Context) (InputSourceContext, error) + NewYamlSourceFromFlagFunc creates a new Yaml InputSourceContext from a + provided flag name and source context. + + +TYPES + +type BoolFlag struct { + *cli.BoolFlag + // Has unexported fields. +} + BoolFlag is the flag type that wraps cli.BoolFlag to allow for other values + to be specified + +func NewBoolFlag(fl *cli.BoolFlag) *BoolFlag + NewBoolFlag creates a new BoolFlag + +func (f *BoolFlag) Apply(set *flag.FlagSet) error + Apply saves the flagSet for later usage calls, then calls the wrapped + BoolFlag.Apply + +func (f *BoolFlag) ApplyInputSourceValue(cCtx *cli.Context, isc InputSourceContext) error + ApplyInputSourceValue applies a Bool value to the flagSet if required + +type DurationFlag struct { + *cli.DurationFlag + // Has unexported fields. +} + DurationFlag is the flag type that wraps cli.DurationFlag to allow for other + values to be specified + +func NewDurationFlag(fl *cli.DurationFlag) *DurationFlag + NewDurationFlag creates a new DurationFlag + +func (f *DurationFlag) Apply(set *flag.FlagSet) error + Apply saves the flagSet for later usage calls, then calls the wrapped + DurationFlag.Apply + +func (f *DurationFlag) ApplyInputSourceValue(cCtx *cli.Context, isc InputSourceContext) error + ApplyInputSourceValue applies a Duration value to the flagSet if required + +type FlagInputSourceExtension interface { + cli.Flag + ApplyInputSourceValue(cCtx *cli.Context, isc InputSourceContext) error +} + FlagInputSourceExtension is an extension interface of cli.Flag that allows a + value to be set on the existing parsed flags. + +type Float64Flag struct { + *cli.Float64Flag + // Has unexported fields. +} + Float64Flag is the flag type that wraps cli.Float64Flag to allow for other + values to be specified + +func NewFloat64Flag(fl *cli.Float64Flag) *Float64Flag + NewFloat64Flag creates a new Float64Flag + +func (f *Float64Flag) Apply(set *flag.FlagSet) error + Apply saves the flagSet for later usage calls, then calls the wrapped + Float64Flag.Apply + +func (f *Float64Flag) ApplyInputSourceValue(cCtx *cli.Context, isc InputSourceContext) error + ApplyInputSourceValue applies a Float64 value to the flagSet if required + +type Float64SliceFlag struct { + *cli.Float64SliceFlag + // Has unexported fields. +} + Float64SliceFlag is the flag type that wraps cli.Float64SliceFlag to allow + for other values to be specified + +func NewFloat64SliceFlag(fl *cli.Float64SliceFlag) *Float64SliceFlag + NewFloat64SliceFlag creates a new Float64SliceFlag + +func (f *Float64SliceFlag) Apply(set *flag.FlagSet) error + Apply saves the flagSet for later usage calls, then calls the wrapped + Float64SliceFlag.Apply + +func (f *Float64SliceFlag) ApplyInputSourceValue(cCtx *cli.Context, isc InputSourceContext) error + ApplyInputSourceValue applies a Float64Slice value if required + +type GenericFlag struct { + *cli.GenericFlag + // Has unexported fields. +} + GenericFlag is the flag type that wraps cli.GenericFlag to allow for other + values to be specified + +func NewGenericFlag(fl *cli.GenericFlag) *GenericFlag + NewGenericFlag creates a new GenericFlag + +func (f *GenericFlag) Apply(set *flag.FlagSet) error + Apply saves the flagSet for later usage calls, then calls the wrapped + GenericFlag.Apply + +func (f *GenericFlag) ApplyInputSourceValue(cCtx *cli.Context, isc InputSourceContext) error + ApplyInputSourceValue applies a generic value to the flagSet if required + +type InputSourceContext interface { + Source() string + + Int(name string) (int, error) + Int64(name string) (int64, error) + Uint(name string) (uint, error) + Uint64(name string) (uint64, error) + Duration(name string) (time.Duration, error) + Float64(name string) (float64, error) + String(name string) (string, error) + StringSlice(name string) ([]string, error) + IntSlice(name string) ([]int, error) + Int64Slice(name string) ([]int64, error) + Float64Slice(name string) ([]float64, error) + Generic(name string) (cli.Generic, error) + Bool(name string) (bool, error) + + // Has unexported methods. +} + InputSourceContext is an interface used to allow other input sources to be + implemented as needed. + + Source returns an identifier for the input source. In case of file source it + should return path to the file. + +func NewJSONSource(data []byte) (InputSourceContext, error) + NewJSONSource returns an InputSourceContext suitable for retrieving config + variables from raw JSON data. + +func NewJSONSourceFromFile(f string) (InputSourceContext, error) + NewJSONSourceFromFile returns an InputSourceContext suitable for retrieving + config variables from a file (or url) containing JSON data. + +func NewJSONSourceFromReader(r io.Reader) (InputSourceContext, error) + NewJSONSourceFromReader returns an InputSourceContext suitable for + retrieving config variables from an io.Reader that returns JSON data. + +func NewTomlSourceFromFile(file string) (InputSourceContext, error) + NewTomlSourceFromFile creates a new TOML InputSourceContext from a filepath. + +func NewYamlSourceFromFile(file string) (InputSourceContext, error) + NewYamlSourceFromFile creates a new Yaml InputSourceContext from a filepath. + +type Int64Flag struct { + *cli.Int64Flag + // Has unexported fields. +} + Int64Flag is the flag type that wraps cli.Int64Flag to allow for other + values to be specified + +func NewInt64Flag(fl *cli.Int64Flag) *Int64Flag + NewInt64Flag creates a new Int64Flag + +func (f *Int64Flag) Apply(set *flag.FlagSet) error + Apply saves the flagSet for later usage calls, then calls the wrapped + Int64Flag.Apply + +func (f *Int64Flag) ApplyInputSourceValue(cCtx *cli.Context, isc InputSourceContext) error + +type Int64SliceFlag struct { + *cli.Int64SliceFlag + // Has unexported fields. +} + Int64SliceFlag is the flag type that wraps cli.Int64SliceFlag to allow for + other values to be specified + +func NewInt64SliceFlag(fl *cli.Int64SliceFlag) *Int64SliceFlag + NewInt64SliceFlag creates a new Int64SliceFlag + +func (f *Int64SliceFlag) Apply(set *flag.FlagSet) error + Apply saves the flagSet for later usage calls, then calls the wrapped + Int64SliceFlag.Apply + +func (f *Int64SliceFlag) ApplyInputSourceValue(cCtx *cli.Context, isc InputSourceContext) error + ApplyInputSourceValue applies a Int64Slice value if required + +type IntFlag struct { + *cli.IntFlag + // Has unexported fields. +} + IntFlag is the flag type that wraps cli.IntFlag to allow for other values to + be specified + +func NewIntFlag(fl *cli.IntFlag) *IntFlag + NewIntFlag creates a new IntFlag + +func (f *IntFlag) Apply(set *flag.FlagSet) error + Apply saves the flagSet for later usage calls, then calls the wrapped + IntFlag.Apply + +func (f *IntFlag) ApplyInputSourceValue(cCtx *cli.Context, isc InputSourceContext) error + ApplyInputSourceValue applies a int value to the flagSet if required + +type IntSliceFlag struct { + *cli.IntSliceFlag + // Has unexported fields. +} + IntSliceFlag is the flag type that wraps cli.IntSliceFlag to allow for other + values to be specified + +func NewIntSliceFlag(fl *cli.IntSliceFlag) *IntSliceFlag + NewIntSliceFlag creates a new IntSliceFlag + +func (f *IntSliceFlag) Apply(set *flag.FlagSet) error + Apply saves the flagSet for later usage calls, then calls the wrapped + IntSliceFlag.Apply + +func (f *IntSliceFlag) ApplyInputSourceValue(cCtx *cli.Context, isc InputSourceContext) error + ApplyInputSourceValue applies a IntSlice value if required + +type MapInputSource struct { + // Has unexported fields. +} + MapInputSource implements InputSourceContext to return data from the map + that is loaded. + +func NewMapInputSource(file string, valueMap map[interface{}]interface{}) *MapInputSource + NewMapInputSource creates a new MapInputSource for implementing custom input + sources. + +func (fsm *MapInputSource) Bool(name string) (bool, error) + Bool returns an bool from the map otherwise returns false + +func (fsm *MapInputSource) Duration(name string) (time.Duration, error) + Duration returns a duration from the map if it exists otherwise returns 0 + +func (fsm *MapInputSource) Float64(name string) (float64, error) + Float64 returns an float64 from the map if it exists otherwise returns 0 + +func (fsm *MapInputSource) Float64Slice(name string) ([]float64, error) + Float64Slice returns an []float64 from the map if it exists otherwise + returns nil + +func (fsm *MapInputSource) Generic(name string) (cli.Generic, error) + Generic returns an cli.Generic from the map if it exists otherwise returns + nil + +func (fsm *MapInputSource) Int(name string) (int, error) + Int returns an int from the map if it exists otherwise returns 0 + +func (fsm *MapInputSource) Int64(name string) (int64, error) + Int64 returns an int64 from the map if it exists otherwise returns 0 + +func (fsm *MapInputSource) Int64Slice(name string) ([]int64, error) + Int64Slice returns an []int64 from the map if it exists otherwise returns + nil + +func (fsm *MapInputSource) IntSlice(name string) ([]int, error) + IntSlice returns an []int from the map if it exists otherwise returns nil + +func (fsm *MapInputSource) Source() string + Source returns the path of the source file + +func (fsm *MapInputSource) String(name string) (string, error) + String returns a string from the map if it exists otherwise returns an empty + string + +func (fsm *MapInputSource) StringSlice(name string) ([]string, error) + StringSlice returns an []string from the map if it exists otherwise returns + nil + +func (fsm *MapInputSource) Uint(name string) (uint, error) + Int64 returns an int64 from the map if it exists otherwise returns 0 + +func (fsm *MapInputSource) Uint64(name string) (uint64, error) + UInt64 returns an uint64 from the map if it exists otherwise returns 0 + +type PathFlag struct { + *cli.PathFlag + // Has unexported fields. +} + PathFlag is the flag type that wraps cli.PathFlag to allow for other values + to be specified + +func NewPathFlag(fl *cli.PathFlag) *PathFlag + NewPathFlag creates a new PathFlag + +func (f *PathFlag) Apply(set *flag.FlagSet) error + Apply saves the flagSet for later usage calls, then calls the wrapped + PathFlag.Apply + +func (f *PathFlag) ApplyInputSourceValue(cCtx *cli.Context, isc InputSourceContext) error + ApplyInputSourceValue applies a Path value to the flagSet if required + +type StringFlag struct { + *cli.StringFlag + // Has unexported fields. +} + StringFlag is the flag type that wraps cli.StringFlag to allow for other + values to be specified + +func NewStringFlag(fl *cli.StringFlag) *StringFlag + NewStringFlag creates a new StringFlag + +func (f *StringFlag) Apply(set *flag.FlagSet) error + Apply saves the flagSet for later usage calls, then calls the wrapped + StringFlag.Apply + +func (f *StringFlag) ApplyInputSourceValue(cCtx *cli.Context, isc InputSourceContext) error + ApplyInputSourceValue applies a String value to the flagSet if required + +type StringSliceFlag struct { + *cli.StringSliceFlag + // Has unexported fields. +} + StringSliceFlag is the flag type that wraps cli.StringSliceFlag to allow for + other values to be specified + +func NewStringSliceFlag(fl *cli.StringSliceFlag) *StringSliceFlag + NewStringSliceFlag creates a new StringSliceFlag + +func (f *StringSliceFlag) Apply(set *flag.FlagSet) error + Apply saves the flagSet for later usage calls, then calls the wrapped + StringSliceFlag.Apply + +func (f *StringSliceFlag) ApplyInputSourceValue(cCtx *cli.Context, isc InputSourceContext) error + ApplyInputSourceValue applies a StringSlice value to the flagSet if required + +type Uint64Flag struct { + *cli.Uint64Flag + // Has unexported fields. +} + Uint64Flag is the flag type that wraps cli.Uint64Flag to allow for other + values to be specified + +func NewUint64Flag(fl *cli.Uint64Flag) *Uint64Flag + NewUint64Flag creates a new Uint64Flag + +func (f *Uint64Flag) Apply(set *flag.FlagSet) error + Apply saves the flagSet for later usage calls, then calls the wrapped + Uint64Flag.Apply + +func (f *Uint64Flag) ApplyInputSourceValue(cCtx *cli.Context, isc InputSourceContext) error + +type UintFlag struct { + *cli.UintFlag + // Has unexported fields. +} + UintFlag is the flag type that wraps cli.UintFlag to allow for other values + to be specified + +func NewUintFlag(fl *cli.UintFlag) *UintFlag + NewUintFlag creates a new UintFlag + +func (f *UintFlag) Apply(set *flag.FlagSet) error + Apply saves the flagSet for later usage calls, then calls the wrapped + UintFlag.Apply + +func (f *UintFlag) ApplyInputSourceValue(cCtx *cli.Context, isc InputSourceContext) error + diff --git a/vendor/github.com/urfave/cli/v2/help.go b/vendor/github.com/urfave/cli/v2/help.go new file mode 100644 index 000000000..d27e8ce38 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/help.go @@ -0,0 +1,569 @@ +package cli + +import ( + "fmt" + "io" + "os" + "strings" + "text/tabwriter" + "text/template" + "unicode/utf8" +) + +const ( + helpName = "help" + helpAlias = "h" +) + +// this instance is to avoid recursion in the ShowCommandHelp which can +// add a help command again +var helpCommandDontUse = &Command{ + Name: helpName, + Aliases: []string{helpAlias}, + Usage: "Shows a list of commands or help for one command", + ArgsUsage: "[command]", +} + +var helpCommand = &Command{ + Name: helpName, + Aliases: []string{helpAlias}, + Usage: "Shows a list of commands or help for one command", + ArgsUsage: "[command]", + Action: func(cCtx *Context) error { + args := cCtx.Args() + argsPresent := args.First() != "" + firstArg := args.First() + + // This action can be triggered by a "default" action of a command + // or via cmd.Run when cmd == helpCmd. So we have following possibilities + // + // 1 $ app + // 2 $ app help + // 3 $ app foo + // 4 $ app help foo + // 5 $ app foo help + // 6 $ app foo -h (with no other sub-commands nor flags defined) + + // Case 4. when executing a help command set the context to parent + // to allow resolution of subsequent args. This will transform + // $ app help foo + // to + // $ app foo + // which will then be handled as case 3 + if cCtx.Command.Name == helpName || cCtx.Command.Name == helpAlias { + cCtx = cCtx.parentContext + } + + // Case 4. $ app help foo + // foo is the command for which help needs to be shown + if argsPresent { + return ShowCommandHelp(cCtx, firstArg) + } + + // Case 1 & 2 + // Special case when running help on main app itself as opposed to individual + // commands/subcommands + if cCtx.parentContext.App == nil { + _ = ShowAppHelp(cCtx) + return nil + } + + // Case 3, 5 + if (len(cCtx.Command.Subcommands) == 1 && !cCtx.Command.HideHelp && !cCtx.Command.HideHelpCommand) || + (len(cCtx.Command.Subcommands) == 0 && cCtx.Command.HideHelp) { + templ := cCtx.Command.CustomHelpTemplate + if templ == "" { + templ = CommandHelpTemplate + } + HelpPrinter(cCtx.App.Writer, templ, cCtx.Command) + return nil + } + + // Case 6, handling incorporated in the callee itself + return ShowSubcommandHelp(cCtx) + }, +} + +// Prints help for the App or Command +type helpPrinter func(w io.Writer, templ string, data interface{}) + +// Prints help for the App or Command with custom template function. +type helpPrinterCustom func(w io.Writer, templ string, data interface{}, customFunc map[string]interface{}) + +// HelpPrinter is a function that writes the help output. If not set explicitly, +// this calls HelpPrinterCustom using only the default template functions. +// +// If custom logic for printing help is required, this function can be +// overridden. If the ExtraInfo field is defined on an App, this function +// should not be modified, as HelpPrinterCustom will be used directly in order +// to capture the extra information. +var HelpPrinter helpPrinter = printHelp + +// HelpPrinterCustom is a function that writes the help output. It is used as +// the default implementation of HelpPrinter, and may be called directly if +// the ExtraInfo field is set on an App. +// +// In the default implementation, if the customFuncs argument contains a +// "wrapAt" key, which is a function which takes no arguments and returns +// an int, this int value will be used to produce a "wrap" function used +// by the default template to wrap long lines. +var HelpPrinterCustom helpPrinterCustom = printHelpCustom + +// VersionPrinter prints the version for the App +var VersionPrinter = printVersion + +// ShowAppHelpAndExit - Prints the list of subcommands for the app and exits with exit code. +func ShowAppHelpAndExit(c *Context, exitCode int) { + _ = ShowAppHelp(c) + os.Exit(exitCode) +} + +// ShowAppHelp is an action that displays the help. +func ShowAppHelp(cCtx *Context) error { + tpl := cCtx.App.CustomAppHelpTemplate + if tpl == "" { + tpl = AppHelpTemplate + } + + if cCtx.App.ExtraInfo == nil { + HelpPrinter(cCtx.App.Writer, tpl, cCtx.App) + return nil + } + + customAppData := func() map[string]interface{} { + return map[string]interface{}{ + "ExtraInfo": cCtx.App.ExtraInfo, + } + } + HelpPrinterCustom(cCtx.App.Writer, tpl, cCtx.App, customAppData()) + + return nil +} + +// DefaultAppComplete prints the list of subcommands as the default app completion method +func DefaultAppComplete(cCtx *Context) { + DefaultCompleteWithFlags(nil)(cCtx) +} + +func printCommandSuggestions(commands []*Command, writer io.Writer) { + for _, command := range commands { + if command.Hidden { + continue + } + if strings.HasSuffix(os.Getenv("0"), "zsh") { + for _, name := range command.Names() { + _, _ = fmt.Fprintf(writer, "%s:%s\n", name, command.Usage) + } + } else { + for _, name := range command.Names() { + _, _ = fmt.Fprintf(writer, "%s\n", name) + } + } + } +} + +func cliArgContains(flagName string) bool { + for _, name := range strings.Split(flagName, ",") { + name = strings.TrimSpace(name) + count := utf8.RuneCountInString(name) + if count > 2 { + count = 2 + } + flag := fmt.Sprintf("%s%s", strings.Repeat("-", count), name) + for _, a := range os.Args { + if a == flag { + return true + } + } + } + return false +} + +func printFlagSuggestions(lastArg string, flags []Flag, writer io.Writer) { + cur := strings.TrimPrefix(lastArg, "-") + cur = strings.TrimPrefix(cur, "-") + for _, flag := range flags { + if bflag, ok := flag.(*BoolFlag); ok && bflag.Hidden { + continue + } + for _, name := range flag.Names() { + name = strings.TrimSpace(name) + // this will get total count utf8 letters in flag name + count := utf8.RuneCountInString(name) + if count > 2 { + count = 2 // reuse this count to generate single - or -- in flag completion + } + // if flag name has more than one utf8 letter and last argument in cli has -- prefix then + // skip flag completion for short flags example -v or -x + if strings.HasPrefix(lastArg, "--") && count == 1 { + continue + } + // match if last argument matches this flag and it is not repeated + if strings.HasPrefix(name, cur) && cur != name && !cliArgContains(name) { + flagCompletion := fmt.Sprintf("%s%s", strings.Repeat("-", count), name) + _, _ = fmt.Fprintln(writer, flagCompletion) + } + } + } +} + +func DefaultCompleteWithFlags(cmd *Command) func(cCtx *Context) { + return func(cCtx *Context) { + var lastArg string + + // TODO: This shouldnt depend on os.Args rather it should + // depend on root arguments passed to App + if len(os.Args) > 2 { + lastArg = os.Args[len(os.Args)-2] + } + + if lastArg != "" { + if strings.HasPrefix(lastArg, "-") { + if cmd != nil { + printFlagSuggestions(lastArg, cmd.Flags, cCtx.App.Writer) + + return + } + + printFlagSuggestions(lastArg, cCtx.App.Flags, cCtx.App.Writer) + + return + } + } + + if cmd != nil { + printCommandSuggestions(cmd.Subcommands, cCtx.App.Writer) + return + } + + printCommandSuggestions(cCtx.Command.Subcommands, cCtx.App.Writer) + } +} + +// ShowCommandHelpAndExit - exits with code after showing help +func ShowCommandHelpAndExit(c *Context, command string, code int) { + _ = ShowCommandHelp(c, command) + os.Exit(code) +} + +// ShowCommandHelp prints help for the given command +func ShowCommandHelp(ctx *Context, command string) error { + commands := ctx.App.Commands + if ctx.Command.Subcommands != nil { + commands = ctx.Command.Subcommands + } + for _, c := range commands { + if c.HasName(command) { + if !ctx.App.HideHelpCommand && !c.HasName(helpName) && len(c.Subcommands) != 0 && c.Command(helpName) == nil { + c.Subcommands = append(c.Subcommands, helpCommandDontUse) + } + if !ctx.App.HideHelp && HelpFlag != nil { + c.appendFlag(HelpFlag) + } + templ := c.CustomHelpTemplate + if templ == "" { + if len(c.Subcommands) == 0 { + templ = CommandHelpTemplate + } else { + templ = SubcommandHelpTemplate + } + } + + HelpPrinter(ctx.App.Writer, templ, c) + + return nil + } + } + + if ctx.App.CommandNotFound == nil { + errMsg := fmt.Sprintf("No help topic for '%v'", command) + if ctx.App.Suggest && SuggestCommand != nil { + if suggestion := SuggestCommand(ctx.Command.Subcommands, command); suggestion != "" { + errMsg += ". " + suggestion + } + } + return Exit(errMsg, 3) + } + + ctx.App.CommandNotFound(ctx, command) + return nil +} + +// ShowSubcommandHelpAndExit - Prints help for the given subcommand and exits with exit code. +func ShowSubcommandHelpAndExit(c *Context, exitCode int) { + _ = ShowSubcommandHelp(c) + os.Exit(exitCode) +} + +// ShowSubcommandHelp prints help for the given subcommand +func ShowSubcommandHelp(cCtx *Context) error { + if cCtx == nil { + return nil + } + // use custom template when provided (fixes #1703) + templ := SubcommandHelpTemplate + if cCtx.Command != nil && cCtx.Command.CustomHelpTemplate != "" { + templ = cCtx.Command.CustomHelpTemplate + } + HelpPrinter(cCtx.App.Writer, templ, cCtx.Command) + return nil +} + +// ShowVersion prints the version number of the App +func ShowVersion(cCtx *Context) { + VersionPrinter(cCtx) +} + +func printVersion(cCtx *Context) { + _, _ = fmt.Fprintf(cCtx.App.Writer, "%v version %v\n", cCtx.App.Name, cCtx.App.Version) +} + +// ShowCompletions prints the lists of commands within a given context +func ShowCompletions(cCtx *Context) { + c := cCtx.Command + if c != nil && c.BashComplete != nil { + c.BashComplete(cCtx) + } +} + +// ShowCommandCompletions prints the custom completions for a given command +func ShowCommandCompletions(ctx *Context, command string) { + c := ctx.Command.Command(command) + if c != nil { + if c.BashComplete != nil { + c.BashComplete(ctx) + } else { + DefaultCompleteWithFlags(c)(ctx) + } + } +} + +// printHelpCustom is the default implementation of HelpPrinterCustom. +// +// The customFuncs map will be combined with a default template.FuncMap to +// allow using arbitrary functions in template rendering. +func printHelpCustom(out io.Writer, templ string, data interface{}, customFuncs map[string]interface{}) { + const maxLineLength = 10000 + + funcMap := template.FuncMap{ + "join": strings.Join, + "subtract": subtract, + "indent": indent, + "nindent": nindent, + "trim": strings.TrimSpace, + "wrap": func(input string, offset int) string { return wrap(input, offset, maxLineLength) }, + "offset": offset, + "offsetCommands": offsetCommands, + } + + if customFuncs["wrapAt"] != nil { + if wa, ok := customFuncs["wrapAt"]; ok { + if waf, ok := wa.(func() int); ok { + wrapAt := waf() + customFuncs["wrap"] = func(input string, offset int) string { + return wrap(input, offset, wrapAt) + } + } + } + } + + for key, value := range customFuncs { + funcMap[key] = value + } + + w := tabwriter.NewWriter(out, 1, 8, 2, ' ', 0) + t := template.Must(template.New("help").Funcs(funcMap).Parse(templ)) + templates := map[string]string{ + "helpNameTemplate": helpNameTemplate, + "usageTemplate": usageTemplate, + "descriptionTemplate": descriptionTemplate, + "visibleCommandTemplate": visibleCommandTemplate, + "copyrightTemplate": copyrightTemplate, + "versionTemplate": versionTemplate, + "visibleFlagCategoryTemplate": visibleFlagCategoryTemplate, + "visibleFlagTemplate": visibleFlagTemplate, + "visibleGlobalFlagCategoryTemplate": strings.Replace(visibleFlagCategoryTemplate, "OPTIONS", "GLOBAL OPTIONS", -1), + "authorsTemplate": authorsTemplate, + "visibleCommandCategoryTemplate": visibleCommandCategoryTemplate, + } + for name, value := range templates { + if _, err := t.New(name).Parse(value); err != nil { + if os.Getenv("CLI_TEMPLATE_ERROR_DEBUG") != "" { + _, _ = fmt.Fprintf(ErrWriter, "CLI TEMPLATE ERROR: %#v\n", err) + } + } + } + + err := t.Execute(w, data) + if err != nil { + // If the writer is closed, t.Execute will fail, and there's nothing + // we can do to recover. + if os.Getenv("CLI_TEMPLATE_ERROR_DEBUG") != "" { + _, _ = fmt.Fprintf(ErrWriter, "CLI TEMPLATE ERROR: %#v\n", err) + } + return + } + _ = w.Flush() +} + +func printHelp(out io.Writer, templ string, data interface{}) { + HelpPrinterCustom(out, templ, data, nil) +} + +func checkVersion(cCtx *Context) bool { + found := false + for _, name := range VersionFlag.Names() { + if cCtx.Bool(name) { + found = true + } + } + return found +} + +func checkHelp(cCtx *Context) bool { + if HelpFlag == nil { + return false + } + found := false + for _, name := range HelpFlag.Names() { + if cCtx.Bool(name) { + found = true + break + } + } + + return found +} + +func checkShellCompleteFlag(a *App, arguments []string) (bool, []string) { + if !a.EnableBashCompletion { + return false, arguments + } + + pos := len(arguments) - 1 + lastArg := arguments[pos] + + if lastArg != "--generate-bash-completion" { + return false, arguments + } + + for _, arg := range arguments { + // If arguments include "--", shell completion is disabled + // because after "--" only positional arguments are accepted. + // https://unix.stackexchange.com/a/11382 + if arg == "--" { + return false, arguments + } + } + + return true, arguments[:pos] +} + +func checkCompletions(cCtx *Context) bool { + if !cCtx.shellComplete { + return false + } + + if args := cCtx.Args(); args.Present() { + name := args.First() + if cmd := cCtx.Command.Command(name); cmd != nil { + // let the command handle the completion + return false + } + } + + ShowCompletions(cCtx) + return true +} + +func subtract(a, b int) int { + return a - b +} + +func indent(spaces int, v string) string { + pad := strings.Repeat(" ", spaces) + return pad + strings.Replace(v, "\n", "\n"+pad, -1) +} + +func nindent(spaces int, v string) string { + return "\n" + indent(spaces, v) +} + +func wrap(input string, offset int, wrapAt int) string { + var ss []string + + lines := strings.Split(input, "\n") + + padding := strings.Repeat(" ", offset) + + for i, line := range lines { + if line == "" { + ss = append(ss, line) + } else { + wrapped := wrapLine(line, offset, wrapAt, padding) + if i == 0 { + ss = append(ss, wrapped) + } else { + ss = append(ss, padding+wrapped) + } + + } + } + + return strings.Join(ss, "\n") +} + +func wrapLine(input string, offset int, wrapAt int, padding string) string { + if wrapAt <= offset || len(input) <= wrapAt-offset { + return input + } + + lineWidth := wrapAt - offset + words := strings.Fields(input) + if len(words) == 0 { + return input + } + + wrapped := words[0] + spaceLeft := lineWidth - len(wrapped) + for _, word := range words[1:] { + if len(word)+1 > spaceLeft { + wrapped += "\n" + padding + word + spaceLeft = lineWidth - len(word) + } else { + wrapped += " " + word + spaceLeft -= 1 + len(word) + } + } + + return wrapped +} + +func offset(input string, fixed int) int { + return len(input) + fixed +} + +// this function tries to find the max width of the names column +// so say we have the following rows for help +// +// foo1, foo2, foo3 some string here +// bar1, b2 some other string here +// +// We want to offset the 2nd row usage by some amount so that everything +// is aligned +// +// foo1, foo2, foo3 some string here +// bar1, b2 some other string here +// +// to find that offset we find the length of all the rows and use the max +// to calculate the offset +func offsetCommands(cmds []*Command, fixed int) int { + var max int = 0 + for _, cmd := range cmds { + s := strings.Join(cmd.Names(), ", ") + if len(s) > max { + max = len(s) + } + } + return max + fixed +} diff --git a/vendor/github.com/urfave/cli/v2/mkdocs-reqs.txt b/vendor/github.com/urfave/cli/v2/mkdocs-reqs.txt new file mode 100644 index 000000000..482ad0622 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/mkdocs-reqs.txt @@ -0,0 +1,5 @@ +mkdocs-git-revision-date-localized-plugin~=1.0 +mkdocs-material-extensions~=1.0 +mkdocs-material~=8.2 +mkdocs~=1.3 +pygments~=2.12 diff --git a/vendor/github.com/urfave/cli/v2/mkdocs.yml b/vendor/github.com/urfave/cli/v2/mkdocs.yml new file mode 100644 index 000000000..f7bfd419e --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/mkdocs.yml @@ -0,0 +1,107 @@ +# NOTE: the mkdocs dependencies will need to be installed out of +# band until this whole thing gets more automated: +# +# pip install -r mkdocs-reqs.txt +# + +site_name: urfave/cli +site_url: https://cli.urfave.org/ +repo_url: https://github.com/urfave/cli +edit_uri: edit/main/docs/ +nav: + - Home: + - Welcome: index.md + - Contributing: CONTRIBUTING.md + - Code of Conduct: CODE_OF_CONDUCT.md + - Releasing: RELEASING.md + - Security: SECURITY.md + - Migrate v1 to v2: migrate-v1-to-v2.md + - v2 Manual: + - Getting Started: v2/getting-started.md + - Migrating From Older Releases: v2/migrating-from-older-releases.md + - Examples: + - Greet: v2/examples/greet.md + - Arguments: v2/examples/arguments.md + - Flags: v2/examples/flags.md + - Subcommands: v2/examples/subcommands.md + - Subcommands Categories: v2/examples/subcommands-categories.md + - Exit Codes: v2/examples/exit-codes.md + - Combining Short Options: v2/examples/combining-short-options.md + - Bash Completions: v2/examples/bash-completions.md + - Generated Help Text: v2/examples/generated-help-text.md + - Version Flag: v2/examples/version-flag.md + - Timestamp Flag: v2/examples/timestamp-flag.md + - Suggestions: v2/examples/suggestions.md + - Full API Example: v2/examples/full-api-example.md + - v1 Manual: + - Getting Started: v1/getting-started.md + - Migrating to v2: v1/migrating-to-v2.md + - Examples: + - Greet: v1/examples/greet.md + - Arguments: v1/examples/arguments.md + - Flags: v1/examples/flags.md + - Subcommands: v1/examples/subcommands.md + - Subcommands (Categories): v1/examples/subcommands-categories.md + - Exit Codes: v1/examples/exit-codes.md + - Combining Short Options: v1/examples/combining-short-options.md + - Bash Completions: v1/examples/bash-completions.md + - Generated Help Text: v1/examples/generated-help-text.md + - Version Flag: v1/examples/version-flag.md + +theme: + name: material + palette: + - media: "(prefers-color-scheme: light)" + scheme: default + toggle: + icon: material/brightness-4 + name: dark mode + - media: "(prefers-color-scheme: dark)" + scheme: slate + toggle: + icon: material/brightness-7 + name: light mode + features: + - content.code.annotate + - navigation.top + - navigation.instant + - navigation.expand + - navigation.sections + - navigation.tabs + - navigation.tabs.sticky +plugins: + - git-revision-date-localized + - search + - tags +# NOTE: this is the recommended configuration from +# https://squidfunk.github.io/mkdocs-material/setup/extensions/#recommended-configuration +markdown_extensions: + - abbr + - admonition + - attr_list + - def_list + - footnotes + - meta + - md_in_html + - toc: + permalink: true + - pymdownx.arithmatex: + generic: true + - pymdownx.betterem: + smart_enable: all + - pymdownx.caret + - pymdownx.details + - pymdownx.emoji: + emoji_index: !!python/name:materialx.emoji.twemoji + emoji_generator: !!python/name:materialx.emoji.to_svg + - pymdownx.highlight + - pymdownx.inlinehilite + - pymdownx.keys + - pymdownx.mark + - pymdownx.smartsymbols + - pymdownx.superfences + - pymdownx.tabbed: + alternate_style: true + - pymdownx.tasklist: + custom_checkbox: true + - pymdownx.tilde diff --git a/vendor/github.com/urfave/cli/v2/parse.go b/vendor/github.com/urfave/cli/v2/parse.go new file mode 100644 index 000000000..d79f15a18 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/parse.go @@ -0,0 +1,102 @@ +package cli + +import ( + "flag" + "strings" +) + +type iterativeParser interface { + newFlagSet() (*flag.FlagSet, error) + useShortOptionHandling() bool +} + +// To enable short-option handling (e.g., "-it" vs "-i -t") we have to +// iteratively catch parsing errors. This way we achieve LR parsing without +// transforming any arguments. Otherwise, there is no way we can discriminate +// combined short options from common arguments that should be left untouched. +// Pass `shellComplete` to continue parsing options on failure during shell +// completion when, the user-supplied options may be incomplete. +func parseIter(set *flag.FlagSet, ip iterativeParser, args []string, shellComplete bool) error { + for { + err := set.Parse(args) + if !ip.useShortOptionHandling() || err == nil { + if shellComplete { + return nil + } + return err + } + + trimmed, trimErr := flagFromError(err) + if trimErr != nil { + return err + } + + // regenerate the initial args with the split short opts + argsWereSplit := false + for i, arg := range args { + // skip args that are not part of the error message + if name := strings.TrimLeft(arg, "-"); name != trimmed { + continue + } + + // if we can't split, the error was accurate + shortOpts := splitShortOptions(set, arg) + if len(shortOpts) == 1 { + return err + } + + // swap current argument with the split version + // do not include args that parsed correctly so far as it would + // trigger Value.Set() on those args and would result in + // duplicates for slice type flags + args = append(shortOpts, args[i+1:]...) + argsWereSplit = true + break + } + + // This should be an impossible to reach code path, but in case the arg + // splitting failed to happen, this will prevent infinite loops + if !argsWereSplit { + return err + } + } +} + +const providedButNotDefinedErrMsg = "flag provided but not defined: -" + +// flagFromError tries to parse a provided flag from an error message. If the +// parsing fials, it returns the input error and an empty string +func flagFromError(err error) (string, error) { + errStr := err.Error() + trimmed := strings.TrimPrefix(errStr, providedButNotDefinedErrMsg) + if errStr == trimmed { + return "", err + } + return trimmed, nil +} + +func splitShortOptions(set *flag.FlagSet, arg string) []string { + shortFlagsExist := func(s string) bool { + for _, c := range s[1:] { + if f := set.Lookup(string(c)); f == nil { + return false + } + } + return true + } + + if !isSplittable(arg) || !shortFlagsExist(arg) { + return []string{arg} + } + + separated := make([]string, 0, len(arg)-1) + for _, flagChar := range arg[1:] { + separated = append(separated, "-"+string(flagChar)) + } + + return separated +} + +func isSplittable(flagArg string) bool { + return strings.HasPrefix(flagArg, "-") && !strings.HasPrefix(flagArg, "--") && len(flagArg) > 2 +} diff --git a/vendor/github.com/urfave/cli/v2/sliceflag.go b/vendor/github.com/urfave/cli/v2/sliceflag.go new file mode 100644 index 000000000..b2ca59041 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/sliceflag.go @@ -0,0 +1,290 @@ +package cli + +import ( + "flag" + "reflect" +) + +type ( + // SliceFlag extends implementations like StringSliceFlag and IntSliceFlag with support for using slices directly, + // as Value and/or Destination. + // See also SliceFlagTarget, MultiStringFlag, MultiFloat64Flag, MultiInt64Flag, MultiIntFlag. + SliceFlag[T SliceFlagTarget[E], S ~[]E, E any] struct { + Target T + Value S + Destination *S + } + + // SliceFlagTarget models a target implementation for use with SliceFlag. + // The three methods, SetValue, SetDestination, and GetDestination, are necessary to propagate Value and + // Destination, where Value is propagated inwards (initially), and Destination is propagated outwards (on every + // update). + SliceFlagTarget[E any] interface { + Flag + RequiredFlag + DocGenerationFlag + VisibleFlag + CategorizableFlag + + // SetValue should propagate the given slice to the target, ideally as a new value. + // Note that a nil slice should nil/clear any existing value (modelled as ~[]E). + SetValue(slice []E) + // SetDestination should propagate the given slice to the target, ideally as a new value. + // Note that a nil slice should nil/clear any existing value (modelled as ~*[]E). + SetDestination(slice []E) + // GetDestination should return the current value referenced by any destination, or nil if nil/unset. + GetDestination() []E + } + + // MultiStringFlag extends StringSliceFlag with support for using slices directly, as Value and/or Destination. + // See also SliceFlag. + MultiStringFlag = SliceFlag[*StringSliceFlag, []string, string] + + // MultiFloat64Flag extends Float64SliceFlag with support for using slices directly, as Value and/or Destination. + // See also SliceFlag. + MultiFloat64Flag = SliceFlag[*Float64SliceFlag, []float64, float64] + + // MultiInt64Flag extends Int64SliceFlag with support for using slices directly, as Value and/or Destination. + // See also SliceFlag. + MultiInt64Flag = SliceFlag[*Int64SliceFlag, []int64, int64] + + // MultiIntFlag extends IntSliceFlag with support for using slices directly, as Value and/or Destination. + // See also SliceFlag. + MultiIntFlag = SliceFlag[*IntSliceFlag, []int, int] + + flagValueHook struct { + value Generic + hook func() + } +) + +var ( + // compile time assertions + + _ SliceFlagTarget[string] = (*StringSliceFlag)(nil) + _ SliceFlagTarget[string] = (*SliceFlag[*StringSliceFlag, []string, string])(nil) + _ SliceFlagTarget[string] = (*MultiStringFlag)(nil) + _ SliceFlagTarget[float64] = (*MultiFloat64Flag)(nil) + _ SliceFlagTarget[int64] = (*MultiInt64Flag)(nil) + _ SliceFlagTarget[int] = (*MultiIntFlag)(nil) + + _ Generic = (*flagValueHook)(nil) + _ Serializer = (*flagValueHook)(nil) +) + +func (x *SliceFlag[T, S, E]) Apply(set *flag.FlagSet) error { + x.Target.SetValue(x.convertSlice(x.Value)) + + destination := x.Destination + if destination == nil { + x.Target.SetDestination(nil) + + return x.Target.Apply(set) + } + + x.Target.SetDestination(x.convertSlice(*destination)) + + return applyFlagValueHook(set, x.Target.Apply, func() { + *destination = x.Target.GetDestination() + }) +} + +func (x *SliceFlag[T, S, E]) convertSlice(slice S) []E { + result := make([]E, len(slice)) + copy(result, slice) + return result +} + +func (x *SliceFlag[T, S, E]) SetValue(slice S) { + x.Value = slice +} + +func (x *SliceFlag[T, S, E]) SetDestination(slice S) { + if slice != nil { + x.Destination = &slice + } else { + x.Destination = nil + } +} + +func (x *SliceFlag[T, S, E]) GetDestination() S { + if destination := x.Destination; destination != nil { + return *destination + } + return nil +} + +func (x *SliceFlag[T, S, E]) String() string { return x.Target.String() } +func (x *SliceFlag[T, S, E]) Names() []string { return x.Target.Names() } +func (x *SliceFlag[T, S, E]) IsSet() bool { return x.Target.IsSet() } +func (x *SliceFlag[T, S, E]) IsRequired() bool { return x.Target.IsRequired() } +func (x *SliceFlag[T, S, E]) TakesValue() bool { return x.Target.TakesValue() } +func (x *SliceFlag[T, S, E]) GetUsage() string { return x.Target.GetUsage() } +func (x *SliceFlag[T, S, E]) GetValue() string { return x.Target.GetValue() } +func (x *SliceFlag[T, S, E]) GetDefaultText() string { return x.Target.GetDefaultText() } +func (x *SliceFlag[T, S, E]) GetEnvVars() []string { return x.Target.GetEnvVars() } +func (x *SliceFlag[T, S, E]) IsVisible() bool { return x.Target.IsVisible() } +func (x *SliceFlag[T, S, E]) GetCategory() string { return x.Target.GetCategory() } + +func (x *flagValueHook) Set(value string) error { + if err := x.value.Set(value); err != nil { + return err + } + x.hook() + return nil +} + +func (x *flagValueHook) String() string { + // note: this is necessary due to the way Go's flag package handles defaults + isZeroValue := func(f flag.Value, v string) bool { + /* + https://cs.opensource.google/go/go/+/refs/tags/go1.18.3:src/flag/flag.go;drc=2580d0e08d5e9f979b943758d3c49877fb2324cb;l=453 + + Copyright (c) 2009 The Go Authors. All rights reserved. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + * Neither the name of Google Inc. nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + // Build a zero value of the flag's Value type, and see if the + // result of calling its String method equals the value passed in. + // This works unless the Value type is itself an interface type. + typ := reflect.TypeOf(f) + var z reflect.Value + if typ.Kind() == reflect.Pointer { + z = reflect.New(typ.Elem()) + } else { + z = reflect.Zero(typ) + } + return v == z.Interface().(flag.Value).String() + } + if x.value != nil { + // only return non-empty if not the same string as returned by the zero value + if s := x.value.String(); !isZeroValue(x.value, s) { + return s + } + } + return `` +} + +func (x *flagValueHook) Serialize() string { + if value, ok := x.value.(Serializer); ok { + return value.Serialize() + } + return x.String() +} + +// applyFlagValueHook wraps calls apply then wraps flags to call a hook function on update and after initial apply. +func applyFlagValueHook(set *flag.FlagSet, apply func(set *flag.FlagSet) error, hook func()) error { + if apply == nil || set == nil || hook == nil { + panic(`invalid input`) + } + var tmp flag.FlagSet + if err := apply(&tmp); err != nil { + return err + } + tmp.VisitAll(func(f *flag.Flag) { set.Var(&flagValueHook{value: f.Value, hook: hook}, f.Name, f.Usage) }) + hook() + return nil +} + +// newSliceFlagValue is for implementing SliceFlagTarget.SetValue and SliceFlagTarget.SetDestination. +// It's e.g. as part of StringSliceFlag.SetValue, using the factory NewStringSlice. +func newSliceFlagValue[R any, S ~[]E, E any](factory func(defaults ...E) *R, defaults S) *R { + if defaults == nil { + return nil + } + return factory(defaults...) +} + +// unwrapFlagValue strips any/all *flagValueHook wrappers. +func unwrapFlagValue(v flag.Value) flag.Value { + for { + h, ok := v.(*flagValueHook) + if !ok { + return v + } + v = h.value + } +} + +// NOTE: the methods below are in this file to make use of the build constraint + +func (f *Float64SliceFlag) SetValue(slice []float64) { + f.Value = newSliceFlagValue(NewFloat64Slice, slice) +} + +func (f *Float64SliceFlag) SetDestination(slice []float64) { + f.Destination = newSliceFlagValue(NewFloat64Slice, slice) +} + +func (f *Float64SliceFlag) GetDestination() []float64 { + if destination := f.Destination; destination != nil { + return destination.Value() + } + return nil +} + +func (f *Int64SliceFlag) SetValue(slice []int64) { + f.Value = newSliceFlagValue(NewInt64Slice, slice) +} + +func (f *Int64SliceFlag) SetDestination(slice []int64) { + f.Destination = newSliceFlagValue(NewInt64Slice, slice) +} + +func (f *Int64SliceFlag) GetDestination() []int64 { + if destination := f.Destination; destination != nil { + return destination.Value() + } + return nil +} + +func (f *IntSliceFlag) SetValue(slice []int) { + f.Value = newSliceFlagValue(NewIntSlice, slice) +} + +func (f *IntSliceFlag) SetDestination(slice []int) { + f.Destination = newSliceFlagValue(NewIntSlice, slice) +} + +func (f *IntSliceFlag) GetDestination() []int { + if destination := f.Destination; destination != nil { + return destination.Value() + } + return nil +} + +func (f *StringSliceFlag) SetValue(slice []string) { + f.Value = newSliceFlagValue(NewStringSlice, slice) +} + +func (f *StringSliceFlag) SetDestination(slice []string) { + f.Destination = newSliceFlagValue(NewStringSlice, slice) +} + +func (f *StringSliceFlag) GetDestination() []string { + if destination := f.Destination; destination != nil { + return destination.Value() + } + return nil +} diff --git a/vendor/github.com/urfave/cli/v2/sort.go b/vendor/github.com/urfave/cli/v2/sort.go new file mode 100644 index 000000000..23d1c2f77 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/sort.go @@ -0,0 +1,29 @@ +package cli + +import "unicode" + +// lexicographicLess compares strings alphabetically considering case. +func lexicographicLess(i, j string) bool { + iRunes := []rune(i) + jRunes := []rune(j) + + lenShared := len(iRunes) + if lenShared > len(jRunes) { + lenShared = len(jRunes) + } + + for index := 0; index < lenShared; index++ { + ir := iRunes[index] + jr := jRunes[index] + + if lir, ljr := unicode.ToLower(ir), unicode.ToLower(jr); lir != ljr { + return lir < ljr + } + + if ir != jr { + return ir < jr + } + } + + return i < j +} diff --git a/vendor/github.com/urfave/cli/v2/suggestions.go b/vendor/github.com/urfave/cli/v2/suggestions.go new file mode 100644 index 000000000..9d2b7a81e --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/suggestions.go @@ -0,0 +1,68 @@ +//go:build !urfave_cli_no_suggest +// +build !urfave_cli_no_suggest + +package cli + +import ( + "fmt" + + "github.com/xrash/smetrics" +) + +func init() { + SuggestFlag = suggestFlag + SuggestCommand = suggestCommand +} + +func jaroWinkler(a, b string) float64 { + // magic values are from https://github.com/xrash/smetrics/blob/039620a656736e6ad994090895784a7af15e0b80/jaro-winkler.go#L8 + const ( + boostThreshold = 0.7 + prefixSize = 4 + ) + return smetrics.JaroWinkler(a, b, boostThreshold, prefixSize) +} + +func suggestFlag(flags []Flag, provided string, hideHelp bool) string { + distance := 0.0 + suggestion := "" + + for _, flag := range flags { + flagNames := flag.Names() + if !hideHelp && HelpFlag != nil { + flagNames = append(flagNames, HelpFlag.Names()...) + } + for _, name := range flagNames { + newDistance := jaroWinkler(name, provided) + if newDistance > distance { + distance = newDistance + suggestion = name + } + } + } + + if len(suggestion) == 1 { + suggestion = "-" + suggestion + } else if len(suggestion) > 1 { + suggestion = "--" + suggestion + } + + return suggestion +} + +// suggestCommand takes a list of commands and a provided string to suggest a +// command name +func suggestCommand(commands []*Command, provided string) (suggestion string) { + distance := 0.0 + for _, command := range commands { + for _, name := range append(command.Names(), helpName, helpAlias) { + newDistance := jaroWinkler(name, provided) + if newDistance > distance { + distance = newDistance + suggestion = name + } + } + } + + return fmt.Sprintf(SuggestDidYouMeanTemplate, suggestion) +} diff --git a/vendor/github.com/urfave/cli/v2/template.go b/vendor/github.com/urfave/cli/v2/template.go new file mode 100644 index 000000000..ccb22f1d5 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/template.go @@ -0,0 +1,146 @@ +package cli + +var helpNameTemplate = `{{$v := offset .HelpName 6}}{{wrap .HelpName 3}}{{if .Usage}} - {{wrap .Usage $v}}{{end}}` +var usageTemplate = `{{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}}{{if .VisibleFlags}} [command options]{{end}}{{if .ArgsUsage}} {{.ArgsUsage}}{{else}}{{if .Args}} [arguments...]{{end}}{{end}}{{end}}` +var descriptionTemplate = `{{wrap .Description 3}}` +var authorsTemplate = `{{with $length := len .Authors}}{{if ne 1 $length}}S{{end}}{{end}}: + {{range $index, $author := .Authors}}{{if $index}} + {{end}}{{$author}}{{end}}` +var visibleCommandTemplate = `{{ $cv := offsetCommands .VisibleCommands 5}}{{range .VisibleCommands}} + {{$s := join .Names ", "}}{{$s}}{{ $sp := subtract $cv (offset $s 3) }}{{ indent $sp ""}}{{wrap .Usage $cv}}{{end}}` +var visibleCommandCategoryTemplate = `{{range .VisibleCategories}}{{if .Name}} + {{.Name}}:{{range .VisibleCommands}} + {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{else}}{{template "visibleCommandTemplate" .}}{{end}}{{end}}` +var visibleFlagCategoryTemplate = `{{range .VisibleFlagCategories}} + {{if .Name}}{{.Name}} + + {{end}}{{$flglen := len .Flags}}{{range $i, $e := .Flags}}{{if eq (subtract $flglen $i) 1}}{{$e}} +{{else}}{{$e}} + {{end}}{{end}}{{end}}` + +var visibleFlagTemplate = `{{range $i, $e := .VisibleFlags}} + {{wrap $e.String 6}}{{end}}` + +var versionTemplate = `{{if .Version}}{{if not .HideVersion}} + +VERSION: + {{.Version}}{{end}}{{end}}` + +var copyrightTemplate = `{{wrap .Copyright 3}}` + +// AppHelpTemplate is the text template for the Default help topic. +// cli.go uses text/template to render templates. You can +// render custom help text by setting this variable. +var AppHelpTemplate = `NAME: + {{template "helpNameTemplate" .}} + +USAGE: + {{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}}{{if .ArgsUsage}} {{.ArgsUsage}}{{else}}{{if .Args}} [arguments...]{{end}}{{end}}{{end}}{{if .Version}}{{if not .HideVersion}} + +VERSION: + {{.Version}}{{end}}{{end}}{{if .Description}} + +DESCRIPTION: + {{template "descriptionTemplate" .}}{{end}} +{{- if len .Authors}} + +AUTHOR{{template "authorsTemplate" .}}{{end}}{{if .VisibleCommands}} + +COMMANDS:{{template "visibleCommandCategoryTemplate" .}}{{end}}{{if .VisibleFlagCategories}} + +GLOBAL OPTIONS:{{template "visibleFlagCategoryTemplate" .}}{{else if .VisibleFlags}} + +GLOBAL OPTIONS:{{template "visibleFlagTemplate" .}}{{end}}{{if .Copyright}} + +COPYRIGHT: + {{template "copyrightTemplate" .}}{{end}} +` + +// CommandHelpTemplate is the text template for the command help topic. +// cli.go uses text/template to render templates. You can +// render custom help text by setting this variable. +var CommandHelpTemplate = `NAME: + {{template "helpNameTemplate" .}} + +USAGE: + {{template "usageTemplate" .}}{{if .Category}} + +CATEGORY: + {{.Category}}{{end}}{{if .Description}} + +DESCRIPTION: + {{template "descriptionTemplate" .}}{{end}}{{if .VisibleFlagCategories}} + +OPTIONS:{{template "visibleFlagCategoryTemplate" .}}{{else if .VisibleFlags}} + +OPTIONS:{{template "visibleFlagTemplate" .}}{{end}} +` + +// SubcommandHelpTemplate is the text template for the subcommand help topic. +// cli.go uses text/template to render templates. You can +// render custom help text by setting this variable. +var SubcommandHelpTemplate = `NAME: + {{template "helpNameTemplate" .}} + +USAGE: + {{template "usageTemplate" .}}{{if .Category}} + +CATEGORY: + {{.Category}}{{end}}{{if .Description}} + +DESCRIPTION: + {{template "descriptionTemplate" .}}{{end}}{{if .VisibleCommands}} + +COMMANDS:{{template "visibleCommandCategoryTemplate" .}}{{end}}{{if .VisibleFlagCategories}} + +OPTIONS:{{template "visibleFlagCategoryTemplate" .}}{{else if .VisibleFlags}} + +OPTIONS:{{template "visibleFlagTemplate" .}}{{end}} +` + +var MarkdownDocTemplate = `{{if gt .SectionNum 0}}% {{ .App.Name }} {{ .SectionNum }} + +{{end}}# NAME + +{{ .App.Name }}{{ if .App.Usage }} - {{ .App.Usage }}{{ end }} + +# SYNOPSIS + +{{ .App.Name }} +{{ if .SynopsisArgs }} +` + "```" + ` +{{ range $v := .SynopsisArgs }}{{ $v }}{{ end }}` + "```" + ` +{{ end }}{{ if .App.Description }} +# DESCRIPTION + +{{ .App.Description }} +{{ end }} +**Usage**: + +` + "```" + `{{ if .App.UsageText }} +{{ .App.UsageText }} +{{ else }} +{{ .App.Name }} [GLOBAL OPTIONS] command [COMMAND OPTIONS] [ARGUMENTS...] +{{ end }}` + "```" + ` +{{ if .GlobalArgs }} +# GLOBAL OPTIONS +{{ range $v := .GlobalArgs }} +{{ $v }}{{ end }} +{{ end }}{{ if .Commands }} +# COMMANDS +{{ range $v := .Commands }} +{{ $v }}{{ end }}{{ end }}` + +var FishCompletionTemplate = `# {{ .App.Name }} fish shell completion + +function __fish_{{ .App.Name }}_no_subcommand --description 'Test if there has been any subcommand yet' + for i in (commandline -opc) + if contains -- $i{{ range $v := .AllCommands }} {{ $v }}{{ end }} + return 1 + end + end + return 0 +end + +{{ range $v := .Completions }}{{ $v }} +{{ end }}` diff --git a/vendor/github.com/urfave/cli/v2/zz_generated.flags.go b/vendor/github.com/urfave/cli/v2/zz_generated.flags.go new file mode 100644 index 000000000..f2fc8c88b --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/zz_generated.flags.go @@ -0,0 +1,865 @@ +// WARNING: this file is generated. DO NOT EDIT + +package cli + +import "time" + +// Float64SliceFlag is a flag with type *Float64Slice +type Float64SliceFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value *Float64Slice + Destination *Float64Slice + + Aliases []string + EnvVars []string + + defaultValue *Float64Slice + defaultValueSet bool + + separator separatorSpec + + Action func(*Context, []float64) error +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *Float64SliceFlag) IsSet() bool { + return f.HasBeenSet +} + +// Names returns the names of the flag +func (f *Float64SliceFlag) Names() []string { + return FlagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *Float64SliceFlag) IsRequired() bool { + return f.Required +} + +// IsVisible returns true if the flag is not hidden, otherwise false +func (f *Float64SliceFlag) IsVisible() bool { + return !f.Hidden +} + +// GenericFlag is a flag with type Generic +type GenericFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value Generic + Destination Generic + + Aliases []string + EnvVars []string + + defaultValue Generic + defaultValueSet bool + + TakesFile bool + + Action func(*Context, interface{}) error +} + +// String returns a readable representation of this value (for usage defaults) +func (f *GenericFlag) String() string { + return FlagStringer(f) +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *GenericFlag) IsSet() bool { + return f.HasBeenSet +} + +// Names returns the names of the flag +func (f *GenericFlag) Names() []string { + return FlagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *GenericFlag) IsRequired() bool { + return f.Required +} + +// IsVisible returns true if the flag is not hidden, otherwise false +func (f *GenericFlag) IsVisible() bool { + return !f.Hidden +} + +// Int64SliceFlag is a flag with type *Int64Slice +type Int64SliceFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value *Int64Slice + Destination *Int64Slice + + Aliases []string + EnvVars []string + + defaultValue *Int64Slice + defaultValueSet bool + + separator separatorSpec + + Action func(*Context, []int64) error +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *Int64SliceFlag) IsSet() bool { + return f.HasBeenSet +} + +// Names returns the names of the flag +func (f *Int64SliceFlag) Names() []string { + return FlagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *Int64SliceFlag) IsRequired() bool { + return f.Required +} + +// IsVisible returns true if the flag is not hidden, otherwise false +func (f *Int64SliceFlag) IsVisible() bool { + return !f.Hidden +} + +// IntSliceFlag is a flag with type *IntSlice +type IntSliceFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value *IntSlice + Destination *IntSlice + + Aliases []string + EnvVars []string + + defaultValue *IntSlice + defaultValueSet bool + + separator separatorSpec + + Action func(*Context, []int) error +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *IntSliceFlag) IsSet() bool { + return f.HasBeenSet +} + +// Names returns the names of the flag +func (f *IntSliceFlag) Names() []string { + return FlagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *IntSliceFlag) IsRequired() bool { + return f.Required +} + +// IsVisible returns true if the flag is not hidden, otherwise false +func (f *IntSliceFlag) IsVisible() bool { + return !f.Hidden +} + +// PathFlag is a flag with type Path +type PathFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value Path + Destination *Path + + Aliases []string + EnvVars []string + + defaultValue Path + defaultValueSet bool + + TakesFile bool + + Action func(*Context, Path) error +} + +// String returns a readable representation of this value (for usage defaults) +func (f *PathFlag) String() string { + return FlagStringer(f) +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *PathFlag) IsSet() bool { + return f.HasBeenSet +} + +// Names returns the names of the flag +func (f *PathFlag) Names() []string { + return FlagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *PathFlag) IsRequired() bool { + return f.Required +} + +// IsVisible returns true if the flag is not hidden, otherwise false +func (f *PathFlag) IsVisible() bool { + return !f.Hidden +} + +// StringSliceFlag is a flag with type *StringSlice +type StringSliceFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value *StringSlice + Destination *StringSlice + + Aliases []string + EnvVars []string + + defaultValue *StringSlice + defaultValueSet bool + + separator separatorSpec + + TakesFile bool + + Action func(*Context, []string) error + + KeepSpace bool +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *StringSliceFlag) IsSet() bool { + return f.HasBeenSet +} + +// Names returns the names of the flag +func (f *StringSliceFlag) Names() []string { + return FlagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *StringSliceFlag) IsRequired() bool { + return f.Required +} + +// IsVisible returns true if the flag is not hidden, otherwise false +func (f *StringSliceFlag) IsVisible() bool { + return !f.Hidden +} + +// TimestampFlag is a flag with type *Timestamp +type TimestampFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value *Timestamp + Destination *Timestamp + + Aliases []string + EnvVars []string + + defaultValue *Timestamp + defaultValueSet bool + + Layout string + + Timezone *time.Location + + Action func(*Context, *time.Time) error +} + +// String returns a readable representation of this value (for usage defaults) +func (f *TimestampFlag) String() string { + return FlagStringer(f) +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *TimestampFlag) IsSet() bool { + return f.HasBeenSet +} + +// Names returns the names of the flag +func (f *TimestampFlag) Names() []string { + return FlagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *TimestampFlag) IsRequired() bool { + return f.Required +} + +// IsVisible returns true if the flag is not hidden, otherwise false +func (f *TimestampFlag) IsVisible() bool { + return !f.Hidden +} + +// Uint64SliceFlag is a flag with type *Uint64Slice +type Uint64SliceFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value *Uint64Slice + Destination *Uint64Slice + + Aliases []string + EnvVars []string + + defaultValue *Uint64Slice + defaultValueSet bool + + separator separatorSpec + + Action func(*Context, []uint64) error +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *Uint64SliceFlag) IsSet() bool { + return f.HasBeenSet +} + +// Names returns the names of the flag +func (f *Uint64SliceFlag) Names() []string { + return FlagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *Uint64SliceFlag) IsRequired() bool { + return f.Required +} + +// IsVisible returns true if the flag is not hidden, otherwise false +func (f *Uint64SliceFlag) IsVisible() bool { + return !f.Hidden +} + +// UintSliceFlag is a flag with type *UintSlice +type UintSliceFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value *UintSlice + Destination *UintSlice + + Aliases []string + EnvVars []string + + defaultValue *UintSlice + defaultValueSet bool + + separator separatorSpec + + Action func(*Context, []uint) error +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *UintSliceFlag) IsSet() bool { + return f.HasBeenSet +} + +// Names returns the names of the flag +func (f *UintSliceFlag) Names() []string { + return FlagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *UintSliceFlag) IsRequired() bool { + return f.Required +} + +// IsVisible returns true if the flag is not hidden, otherwise false +func (f *UintSliceFlag) IsVisible() bool { + return !f.Hidden +} + +// BoolFlag is a flag with type bool +type BoolFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value bool + Destination *bool + + Aliases []string + EnvVars []string + + defaultValue bool + defaultValueSet bool + + Count *int + + DisableDefaultText bool + + Action func(*Context, bool) error +} + +// String returns a readable representation of this value (for usage defaults) +func (f *BoolFlag) String() string { + return FlagStringer(f) +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *BoolFlag) IsSet() bool { + return f.HasBeenSet +} + +// Names returns the names of the flag +func (f *BoolFlag) Names() []string { + return FlagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *BoolFlag) IsRequired() bool { + return f.Required +} + +// IsVisible returns true if the flag is not hidden, otherwise false +func (f *BoolFlag) IsVisible() bool { + return !f.Hidden +} + +// Float64Flag is a flag with type float64 +type Float64Flag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value float64 + Destination *float64 + + Aliases []string + EnvVars []string + + defaultValue float64 + defaultValueSet bool + + Action func(*Context, float64) error +} + +// String returns a readable representation of this value (for usage defaults) +func (f *Float64Flag) String() string { + return FlagStringer(f) +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *Float64Flag) IsSet() bool { + return f.HasBeenSet +} + +// Names returns the names of the flag +func (f *Float64Flag) Names() []string { + return FlagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *Float64Flag) IsRequired() bool { + return f.Required +} + +// IsVisible returns true if the flag is not hidden, otherwise false +func (f *Float64Flag) IsVisible() bool { + return !f.Hidden +} + +// IntFlag is a flag with type int +type IntFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value int + Destination *int + + Aliases []string + EnvVars []string + + defaultValue int + defaultValueSet bool + + Base int + + Action func(*Context, int) error +} + +// String returns a readable representation of this value (for usage defaults) +func (f *IntFlag) String() string { + return FlagStringer(f) +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *IntFlag) IsSet() bool { + return f.HasBeenSet +} + +// Names returns the names of the flag +func (f *IntFlag) Names() []string { + return FlagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *IntFlag) IsRequired() bool { + return f.Required +} + +// IsVisible returns true if the flag is not hidden, otherwise false +func (f *IntFlag) IsVisible() bool { + return !f.Hidden +} + +// Int64Flag is a flag with type int64 +type Int64Flag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value int64 + Destination *int64 + + Aliases []string + EnvVars []string + + defaultValue int64 + defaultValueSet bool + + Base int + + Action func(*Context, int64) error +} + +// String returns a readable representation of this value (for usage defaults) +func (f *Int64Flag) String() string { + return FlagStringer(f) +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *Int64Flag) IsSet() bool { + return f.HasBeenSet +} + +// Names returns the names of the flag +func (f *Int64Flag) Names() []string { + return FlagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *Int64Flag) IsRequired() bool { + return f.Required +} + +// IsVisible returns true if the flag is not hidden, otherwise false +func (f *Int64Flag) IsVisible() bool { + return !f.Hidden +} + +// StringFlag is a flag with type string +type StringFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value string + Destination *string + + Aliases []string + EnvVars []string + + defaultValue string + defaultValueSet bool + + TakesFile bool + + Action func(*Context, string) error +} + +// String returns a readable representation of this value (for usage defaults) +func (f *StringFlag) String() string { + return FlagStringer(f) +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *StringFlag) IsSet() bool { + return f.HasBeenSet +} + +// Names returns the names of the flag +func (f *StringFlag) Names() []string { + return FlagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *StringFlag) IsRequired() bool { + return f.Required +} + +// IsVisible returns true if the flag is not hidden, otherwise false +func (f *StringFlag) IsVisible() bool { + return !f.Hidden +} + +// DurationFlag is a flag with type time.Duration +type DurationFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value time.Duration + Destination *time.Duration + + Aliases []string + EnvVars []string + + defaultValue time.Duration + defaultValueSet bool + + Action func(*Context, time.Duration) error +} + +// String returns a readable representation of this value (for usage defaults) +func (f *DurationFlag) String() string { + return FlagStringer(f) +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *DurationFlag) IsSet() bool { + return f.HasBeenSet +} + +// Names returns the names of the flag +func (f *DurationFlag) Names() []string { + return FlagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *DurationFlag) IsRequired() bool { + return f.Required +} + +// IsVisible returns true if the flag is not hidden, otherwise false +func (f *DurationFlag) IsVisible() bool { + return !f.Hidden +} + +// UintFlag is a flag with type uint +type UintFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value uint + Destination *uint + + Aliases []string + EnvVars []string + + defaultValue uint + defaultValueSet bool + + Base int + + Action func(*Context, uint) error +} + +// String returns a readable representation of this value (for usage defaults) +func (f *UintFlag) String() string { + return FlagStringer(f) +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *UintFlag) IsSet() bool { + return f.HasBeenSet +} + +// Names returns the names of the flag +func (f *UintFlag) Names() []string { + return FlagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *UintFlag) IsRequired() bool { + return f.Required +} + +// IsVisible returns true if the flag is not hidden, otherwise false +func (f *UintFlag) IsVisible() bool { + return !f.Hidden +} + +// Uint64Flag is a flag with type uint64 +type Uint64Flag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value uint64 + Destination *uint64 + + Aliases []string + EnvVars []string + + defaultValue uint64 + defaultValueSet bool + + Base int + + Action func(*Context, uint64) error +} + +// String returns a readable representation of this value (for usage defaults) +func (f *Uint64Flag) String() string { + return FlagStringer(f) +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *Uint64Flag) IsSet() bool { + return f.HasBeenSet +} + +// Names returns the names of the flag +func (f *Uint64Flag) Names() []string { + return FlagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *Uint64Flag) IsRequired() bool { + return f.Required +} + +// IsVisible returns true if the flag is not hidden, otherwise false +func (f *Uint64Flag) IsVisible() bool { + return !f.Hidden +} + +// vim:ro diff --git a/vendor/github.com/vishvananda/netlink/addr_linux.go b/vendor/github.com/vishvananda/netlink/addr_linux.go index 218ab2379..9e312043b 100644 --- a/vendor/github.com/vishvananda/netlink/addr_linux.go +++ b/vendor/github.com/vishvananda/netlink/addr_linux.go @@ -1,9 +1,9 @@ package netlink import ( + "errors" "fmt" "net" - "strings" "syscall" "github.com/vishvananda/netlink/nl" @@ -17,6 +17,7 @@ import ( // // If `addr` is an IPv4 address and the broadcast address is not given, it // will be automatically computed based on the IP mask if /30 or larger. +// If `net.IPv4zero` is given as the broadcast address, broadcast is disabled. func AddrAdd(link Link, addr *Addr) error { return pkgHandle.AddrAdd(link, addr) } @@ -27,6 +28,7 @@ func AddrAdd(link Link, addr *Addr) error { // // If `addr` is an IPv4 address and the broadcast address is not given, it // will be automatically computed based on the IP mask if /30 or larger. +// If `net.IPv4zero` is given as the broadcast address, broadcast is disabled. func (h *Handle) AddrAdd(link Link, addr *Addr) error { req := h.newNetlinkRequest(unix.RTM_NEWADDR, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) return h.addrHandle(link, addr, req) @@ -38,6 +40,7 @@ func (h *Handle) AddrAdd(link Link, addr *Addr) error { // // If `addr` is an IPv4 address and the broadcast address is not given, it // will be automatically computed based on the IP mask if /30 or larger. +// If `net.IPv4zero` is given as the broadcast address, broadcast is disabled. func AddrReplace(link Link, addr *Addr) error { return pkgHandle.AddrReplace(link, addr) } @@ -48,6 +51,7 @@ func AddrReplace(link Link, addr *Addr) error { // // If `addr` is an IPv4 address and the broadcast address is not given, it // will be automatically computed based on the IP mask if /30 or larger. +// If `net.IPv4zero` is given as the broadcast address, broadcast is disabled. func (h *Handle) AddrReplace(link Link, addr *Addr) error { req := h.newNetlinkRequest(unix.RTM_NEWADDR, unix.NLM_F_CREATE|unix.NLM_F_REPLACE|unix.NLM_F_ACK) return h.addrHandle(link, addr, req) @@ -56,18 +60,13 @@ func (h *Handle) AddrReplace(link Link, addr *Addr) error { // AddrDel will delete an IP address from a link device. // // Equivalent to: `ip addr del $addr dev $link` -// -// If `addr` is an IPv4 address and the broadcast address is not given, it -// will be automatically computed based on the IP mask if /30 or larger. func AddrDel(link Link, addr *Addr) error { return pkgHandle.AddrDel(link, addr) } // AddrDel will delete an IP address from a link device. -// Equivalent to: `ip addr del $addr dev $link` // -// If `addr` is an IPv4 address and the broadcast address is not given, it -// will be automatically computed based on the IP mask if /30 or larger. +// Equivalent to: `ip addr del $addr dev $link` func (h *Handle) AddrDel(link Link, addr *Addr) error { req := h.newNetlinkRequest(unix.RTM_DELADDR, unix.NLM_F_ACK) return h.addrHandle(link, addr, req) @@ -81,9 +80,6 @@ func (h *Handle) addrHandle(link Link, addr *Addr, req *nl.NetlinkRequest) error msg.Index = uint32(addr.LinkIndex) } else { base := link.Attrs() - if addr.Label != "" && !strings.HasPrefix(addr.Label, base.Name) { - return fmt.Errorf("label must begin with interface name") - } h.ensureIndex(base) msg.Index = uint32(base.Index) } @@ -141,6 +137,10 @@ func (h *Handle) addrHandle(link Link, addr *Addr, req *nl.NetlinkRequest) error addr.Broadcast = calcBroadcast } + if net.IPv4zero.Equal(addr.Broadcast) { + addr.Broadcast = nil + } + if addr.Broadcast != nil { req.AddData(nl.NewRtAttr(unix.IFA_BROADCAST, addr.Broadcast)) } @@ -169,6 +169,9 @@ func (h *Handle) addrHandle(link Link, addr *Addr, req *nl.NetlinkRequest) error // AddrList gets a list of IP addresses in the system. // Equivalent to: `ip addr show`. // The list can be filtered by link and ip family. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func AddrList(link Link, family int) ([]Addr, error) { return pkgHandle.AddrList(link, family) } @@ -176,14 +179,17 @@ func AddrList(link Link, family int) ([]Addr, error) { // AddrList gets a list of IP addresses in the system. // Equivalent to: `ip addr show`. // The list can be filtered by link and ip family. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) AddrList(link Link, family int) ([]Addr, error) { req := h.newNetlinkRequest(unix.RTM_GETADDR, unix.NLM_F_DUMP) msg := nl.NewIfAddrmsg(family) req.AddData(msg) - msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWADDR) - if err != nil { - return nil, err + msgs, executeErr := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWADDR) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } indexFilter := 0 @@ -212,7 +218,7 @@ func (h *Handle) AddrList(link Link, family int) ([]Addr, error) { res = append(res, addr) } - return res, nil + return res, executeErr } func parseAddr(m []byte) (addr Addr, family int, err error) { diff --git a/vendor/github.com/vishvananda/netlink/bridge_linux.go b/vendor/github.com/vishvananda/netlink/bridge_linux.go index 6c340b0ce..fa5766b80 100644 --- a/vendor/github.com/vishvananda/netlink/bridge_linux.go +++ b/vendor/github.com/vishvananda/netlink/bridge_linux.go @@ -1,6 +1,7 @@ package netlink import ( + "errors" "fmt" "github.com/vishvananda/netlink/nl" @@ -9,21 +10,27 @@ import ( // BridgeVlanList gets a map of device id to bridge vlan infos. // Equivalent to: `bridge vlan show` +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func BridgeVlanList() (map[int32][]*nl.BridgeVlanInfo, error) { return pkgHandle.BridgeVlanList() } // BridgeVlanList gets a map of device id to bridge vlan infos. // Equivalent to: `bridge vlan show` +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) BridgeVlanList() (map[int32][]*nl.BridgeVlanInfo, error) { req := h.newNetlinkRequest(unix.RTM_GETLINK, unix.NLM_F_DUMP) msg := nl.NewIfInfomsg(unix.AF_BRIDGE) req.AddData(msg) req.AddData(nl.NewRtAttr(unix.IFLA_EXT_MASK, nl.Uint32Attr(uint32(nl.RTEXT_FILTER_BRVLAN)))) - msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWLINK) - if err != nil { - return nil, err + msgs, executeErr := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWLINK) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } ret := make(map[int32][]*nl.BridgeVlanInfo) for _, m := range msgs { @@ -51,7 +58,7 @@ func (h *Handle) BridgeVlanList() (map[int32][]*nl.BridgeVlanInfo, error) { } } } - return ret, nil + return ret, executeErr } // BridgeVlanAdd adds a new vlan filter entry diff --git a/vendor/github.com/vishvananda/netlink/chain_linux.go b/vendor/github.com/vishvananda/netlink/chain_linux.go index d9f441613..5008e7101 100644 --- a/vendor/github.com/vishvananda/netlink/chain_linux.go +++ b/vendor/github.com/vishvananda/netlink/chain_linux.go @@ -1,6 +1,8 @@ package netlink import ( + "errors" + "github.com/vishvananda/netlink/nl" "golang.org/x/sys/unix" ) @@ -56,6 +58,9 @@ func (h *Handle) chainModify(cmd, flags int, link Link, chain Chain) error { // ChainList gets a list of chains in the system. // Equivalent to: `tc chain list`. // The list can be filtered by link. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func ChainList(link Link, parent uint32) ([]Chain, error) { return pkgHandle.ChainList(link, parent) } @@ -63,6 +68,9 @@ func ChainList(link Link, parent uint32) ([]Chain, error) { // ChainList gets a list of chains in the system. // Equivalent to: `tc chain list`. // The list can be filtered by link. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) ChainList(link Link, parent uint32) ([]Chain, error) { req := h.newNetlinkRequest(unix.RTM_GETCHAIN, unix.NLM_F_DUMP) index := int32(0) @@ -78,9 +86,9 @@ func (h *Handle) ChainList(link Link, parent uint32) ([]Chain, error) { } req.AddData(msg) - msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWCHAIN) - if err != nil { - return nil, err + msgs, executeErr := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWCHAIN) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } var res []Chain @@ -108,5 +116,5 @@ func (h *Handle) ChainList(link Link, parent uint32) ([]Chain, error) { res = append(res, chain) } - return res, nil + return res, executeErr } diff --git a/vendor/github.com/vishvananda/netlink/class_linux.go b/vendor/github.com/vishvananda/netlink/class_linux.go index a82eb09de..08fb16c2b 100644 --- a/vendor/github.com/vishvananda/netlink/class_linux.go +++ b/vendor/github.com/vishvananda/netlink/class_linux.go @@ -201,14 +201,20 @@ func classPayload(req *nl.NetlinkRequest, class Class) error { // ClassList gets a list of classes in the system. // Equivalent to: `tc class show`. +// // Generally returns nothing if link and parent are not specified. +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func ClassList(link Link, parent uint32) ([]Class, error) { return pkgHandle.ClassList(link, parent) } // ClassList gets a list of classes in the system. // Equivalent to: `tc class show`. +// // Generally returns nothing if link and parent are not specified. +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) ClassList(link Link, parent uint32) ([]Class, error) { req := h.newNetlinkRequest(unix.RTM_GETTCLASS, unix.NLM_F_DUMP) msg := &nl.TcMsg{ @@ -222,9 +228,9 @@ func (h *Handle) ClassList(link Link, parent uint32) ([]Class, error) { } req.AddData(msg) - msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWTCLASS) - if err != nil { - return nil, err + msgs, executeErr := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWTCLASS) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } var res []Class @@ -295,7 +301,7 @@ func (h *Handle) ClassList(link Link, parent uint32) ([]Class, error) { res = append(res, class) } - return res, nil + return res, executeErr } func parseHtbClassData(class Class, data []syscall.NetlinkRouteAttr) (bool, error) { diff --git a/vendor/github.com/vishvananda/netlink/conntrack_linux.go b/vendor/github.com/vishvananda/netlink/conntrack_linux.go index ba022453b..b3d354d75 100644 --- a/vendor/github.com/vishvananda/netlink/conntrack_linux.go +++ b/vendor/github.com/vishvananda/netlink/conntrack_linux.go @@ -5,6 +5,7 @@ import ( "encoding/binary" "errors" "fmt" + "io/fs" "net" "time" @@ -44,6 +45,9 @@ type InetFamily uint8 // ConntrackTableList returns the flow list of a table of a specific family // conntrack -L [table] [options] List conntrack or expectation table +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func ConntrackTableList(table ConntrackTableType, family InetFamily) ([]*ConntrackFlow, error) { return pkgHandle.ConntrackTableList(table, family) } @@ -70,7 +74,7 @@ func ConntrackUpdate(table ConntrackTableType, family InetFamily, flow *Conntrac // ConntrackDeleteFilter deletes entries on the specified table on the base of the filter // conntrack -D [table] parameters Delete conntrack or expectation // -// Deprecated: use [ConntrackDeleteFilter] instead. +// Deprecated: use [ConntrackDeleteFilters] instead. func ConntrackDeleteFilter(table ConntrackTableType, family InetFamily, filter CustomConntrackFilter) (uint, error) { return pkgHandle.ConntrackDeleteFilters(table, family, filter) } @@ -83,10 +87,13 @@ func ConntrackDeleteFilters(table ConntrackTableType, family InetFamily, filters // ConntrackTableList returns the flow list of a table of a specific family using the netlink handle passed // conntrack -L [table] [options] List conntrack or expectation table +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) ConntrackTableList(table ConntrackTableType, family InetFamily) ([]*ConntrackFlow, error) { - res, err := h.dumpConntrackTable(table, family) - if err != nil { - return nil, err + res, executeErr := h.dumpConntrackTable(table, family) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } // Deserialize all the flows @@ -95,7 +102,7 @@ func (h *Handle) ConntrackTableList(table ConntrackTableType, family InetFamily) result = append(result, parseRawData(dataRaw)) } - return result, nil + return result, executeErr } // ConntrackTableFlush flushes all the flows of a specified table using the netlink handle passed @@ -152,11 +159,18 @@ func (h *Handle) ConntrackDeleteFilter(table ConntrackTableType, family InetFami // ConntrackDeleteFilters deletes entries on the specified table matching any of the specified filters using the netlink handle passed // conntrack -D [table] parameters Delete conntrack or expectation func (h *Handle) ConntrackDeleteFilters(table ConntrackTableType, family InetFamily, filters ...CustomConntrackFilter) (uint, error) { + var finalErr error res, err := h.dumpConntrackTable(table, family) if err != nil { - return 0, err + if !errors.Is(err, ErrDumpInterrupted) { + return 0, err + } + // This allows us to at least do a best effort to try to clean the + // entries matching the filter. + finalErr = err } + var totalFilterErrors int var matched uint for _, dataRaw := range res { flow := parseRawData(dataRaw) @@ -165,15 +179,20 @@ func (h *Handle) ConntrackDeleteFilters(table ConntrackTableType, family InetFam req2 := h.newConntrackRequest(table, family, nl.IPCTNL_MSG_CT_DELETE, unix.NLM_F_ACK) // skip the first 4 byte that are the netfilter header, the newConntrackRequest is adding it already req2.AddRawData(dataRaw[4:]) - req2.Execute(unix.NETLINK_NETFILTER, 0) - matched++ - // flow is already deleted, no need to match on other filters and continue to the next flow. - break + if _, err = req2.Execute(unix.NETLINK_NETFILTER, 0); err == nil || errors.Is(err, fs.ErrNotExist) { + matched++ + // flow is already deleted, no need to match on other filters and continue to the next flow. + break + } else { + totalFilterErrors++ + } } } } - - return matched, nil + if totalFilterErrors > 0 { + finalErr = errors.Join(finalErr, fmt.Errorf("failed to delete %d conntrack flows with %d filters", totalFilterErrors, len(filters))) + } + return matched, finalErr } func (h *Handle) newConntrackRequest(table ConntrackTableType, family InetFamily, operation, flags int) *nl.NetlinkRequest { diff --git a/vendor/github.com/vishvananda/netlink/conntrack_unspecified.go b/vendor/github.com/vishvananda/netlink/conntrack_unspecified.go index 0bfdf422d..0049048dc 100644 --- a/vendor/github.com/vishvananda/netlink/conntrack_unspecified.go +++ b/vendor/github.com/vishvananda/netlink/conntrack_unspecified.go @@ -33,7 +33,7 @@ func ConntrackTableFlush(table ConntrackTableType) error { // ConntrackDeleteFilter deletes entries on the specified table on the base of the filter // conntrack -D [table] parameters Delete conntrack or expectation // -// Deprecated: use [ConntrackDeleteFilter] instead. +// Deprecated: use [ConntrackDeleteFilters] instead. func ConntrackDeleteFilter(table ConntrackTableType, family InetFamily, filter *ConntrackFilter) (uint, error) { return 0, ErrNotImplemented } diff --git a/vendor/github.com/vishvananda/netlink/devlink_linux.go b/vendor/github.com/vishvananda/netlink/devlink_linux.go index d98801dbb..45d8ee4b6 100644 --- a/vendor/github.com/vishvananda/netlink/devlink_linux.go +++ b/vendor/github.com/vishvananda/netlink/devlink_linux.go @@ -1,6 +1,7 @@ package netlink import ( + "errors" "fmt" "net" "strings" @@ -466,6 +467,8 @@ func (h *Handle) getEswitchAttrs(family *GenlFamily, dev *DevlinkDevice) { // DevLinkGetDeviceList provides a pointer to devlink devices and nil error, // otherwise returns an error code. +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) DevLinkGetDeviceList() ([]*DevlinkDevice, error) { f, err := h.GenlFamilyGet(nl.GENL_DEVLINK_NAME) if err != nil { @@ -478,9 +481,9 @@ func (h *Handle) DevLinkGetDeviceList() ([]*DevlinkDevice, error) { req := h.newNetlinkRequest(int(f.ID), unix.NLM_F_REQUEST|unix.NLM_F_ACK|unix.NLM_F_DUMP) req.AddData(msg) - msgs, err := req.Execute(unix.NETLINK_GENERIC, 0) - if err != nil { - return nil, err + msgs, executeErr := req.Execute(unix.NETLINK_GENERIC, 0) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } devices, err := parseDevLinkDeviceList(msgs) if err != nil { @@ -489,11 +492,14 @@ func (h *Handle) DevLinkGetDeviceList() ([]*DevlinkDevice, error) { for _, d := range devices { h.getEswitchAttrs(f, d) } - return devices, nil + return devices, executeErr } // DevLinkGetDeviceList provides a pointer to devlink devices and nil error, // otherwise returns an error code. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func DevLinkGetDeviceList() ([]*DevlinkDevice, error) { return pkgHandle.DevLinkGetDeviceList() } @@ -646,6 +652,8 @@ func parseDevLinkAllPortList(msgs [][]byte) ([]*DevlinkPort, error) { // DevLinkGetPortList provides a pointer to devlink ports and nil error, // otherwise returns an error code. +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) DevLinkGetAllPortList() ([]*DevlinkPort, error) { f, err := h.GenlFamilyGet(nl.GENL_DEVLINK_NAME) if err != nil { @@ -658,19 +666,21 @@ func (h *Handle) DevLinkGetAllPortList() ([]*DevlinkPort, error) { req := h.newNetlinkRequest(int(f.ID), unix.NLM_F_REQUEST|unix.NLM_F_ACK|unix.NLM_F_DUMP) req.AddData(msg) - msgs, err := req.Execute(unix.NETLINK_GENERIC, 0) - if err != nil { - return nil, err + msgs, executeErr := req.Execute(unix.NETLINK_GENERIC, 0) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } ports, err := parseDevLinkAllPortList(msgs) if err != nil { return nil, err } - return ports, nil + return ports, executeErr } // DevLinkGetPortList provides a pointer to devlink ports and nil error, // otherwise returns an error code. +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func DevLinkGetAllPortList() ([]*DevlinkPort, error) { return pkgHandle.DevLinkGetAllPortList() } @@ -738,15 +748,18 @@ func (h *Handle) DevlinkGetDeviceResources(bus string, device string) (*DevlinkR // DevlinkGetDeviceParams returns parameters for devlink device // Equivalent to: `devlink dev param show /` +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) DevlinkGetDeviceParams(bus string, device string) ([]*DevlinkParam, error) { _, req, err := h.createCmdReq(nl.DEVLINK_CMD_PARAM_GET, bus, device) if err != nil { return nil, err } req.Flags |= unix.NLM_F_DUMP - respmsg, err := req.Execute(unix.NETLINK_GENERIC, 0) - if err != nil { - return nil, err + respmsg, executeErr := req.Execute(unix.NETLINK_GENERIC, 0) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } var params []*DevlinkParam for _, m := range respmsg { @@ -761,11 +774,14 @@ func (h *Handle) DevlinkGetDeviceParams(bus string, device string) ([]*DevlinkPa params = append(params, p) } - return params, nil + return params, executeErr } // DevlinkGetDeviceParams returns parameters for devlink device // Equivalent to: `devlink dev param show /` +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func DevlinkGetDeviceParams(bus string, device string) ([]*DevlinkParam, error) { return pkgHandle.DevlinkGetDeviceParams(bus, device) } diff --git a/vendor/github.com/vishvananda/netlink/filter.go b/vendor/github.com/vishvananda/netlink/filter.go index 84e1ca7a4..fbb3b6a57 100644 --- a/vendor/github.com/vishvananda/netlink/filter.go +++ b/vendor/github.com/vishvananda/netlink/filter.go @@ -231,6 +231,35 @@ func NewCsumAction() *CsumAction { } } +type VlanAct int8 + +type VlanAction struct { + ActionAttrs + Action VlanAct + VlanID uint16 +} + +const ( + TCA_VLAN_ACT_POP VlanAct = 1 + TCA_VLAN_ACT_PUSH VlanAct = 2 +) + +func (action *VlanAction) Type() string { + return "vlan" +} + +func (action *VlanAction) Attrs() *ActionAttrs { + return &action.ActionAttrs +} + +func NewVlanAction() *VlanAction { + return &VlanAction{ + ActionAttrs: ActionAttrs{ + Action: TC_ACT_PIPE, + }, + } +} + type MirredAct uint8 func (a MirredAct) String() string { @@ -369,6 +398,29 @@ func NewPoliceAction() *PoliceAction { } } +type SampleAction struct { + ActionAttrs + Group uint32 + Rate uint32 + TruncSize uint32 +} + +func (action *SampleAction) Type() string { + return "sample" +} + +func (action *SampleAction) Attrs() *ActionAttrs { + return &action.ActionAttrs +} + +func NewSampleAction() *SampleAction { + return &SampleAction{ + ActionAttrs: ActionAttrs{ + Action: TC_ACT_PIPE, + }, + } +} + // MatchAll filters match all packets type MatchAll struct { FilterAttrs diff --git a/vendor/github.com/vishvananda/netlink/filter_linux.go b/vendor/github.com/vishvananda/netlink/filter_linux.go index 87cd18f8e..231b57341 100644 --- a/vendor/github.com/vishvananda/netlink/filter_linux.go +++ b/vendor/github.com/vishvananda/netlink/filter_linux.go @@ -65,6 +65,9 @@ type Flower struct { EncSrcIPMask net.IPMask EncDestPort uint16 EncKeyId uint32 + SrcMac net.HardwareAddr + DestMac net.HardwareAddr + VlanId uint16 SkipHw bool SkipSw bool IPProto *nl.IPProto @@ -135,6 +138,15 @@ func (filter *Flower) encode(parent *nl.RtAttr) error { if filter.EncKeyId != 0 { parent.AddRtAttr(nl.TCA_FLOWER_KEY_ENC_KEY_ID, htonl(filter.EncKeyId)) } + if filter.SrcMac != nil { + parent.AddRtAttr(nl.TCA_FLOWER_KEY_ETH_SRC, filter.SrcMac) + } + if filter.DestMac != nil { + parent.AddRtAttr(nl.TCA_FLOWER_KEY_ETH_DST, filter.DestMac) + } + if filter.VlanId != 0 { + parent.AddRtAttr(nl.TCA_FLOWER_KEY_VLAN_ID, nl.Uint16Attr(filter.VlanId)) + } if filter.IPProto != nil { ipproto := *filter.IPProto parent.AddRtAttr(nl.TCA_FLOWER_KEY_IP_PROTO, ipproto.Serialize()) @@ -201,6 +213,13 @@ func (filter *Flower) decode(data []syscall.NetlinkRouteAttr) error { filter.EncDestPort = ntohs(datum.Value) case nl.TCA_FLOWER_KEY_ENC_KEY_ID: filter.EncKeyId = ntohl(datum.Value) + case nl.TCA_FLOWER_KEY_ETH_SRC: + filter.SrcMac = datum.Value + case nl.TCA_FLOWER_KEY_ETH_DST: + filter.DestMac = datum.Value + case nl.TCA_FLOWER_KEY_VLAN_ID: + filter.VlanId = native.Uint16(datum.Value[0:2]) + filter.EthType = unix.ETH_P_8021Q case nl.TCA_FLOWER_KEY_IP_PROTO: val := new(nl.IPProto) *val = nl.IPProto(datum.Value[0]) @@ -405,14 +424,20 @@ func (h *Handle) filterModify(filter Filter, proto, flags int) error { // FilterList gets a list of filters in the system. // Equivalent to: `tc filter show`. +// // Generally returns nothing if link and parent are not specified. +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func FilterList(link Link, parent uint32) ([]Filter, error) { return pkgHandle.FilterList(link, parent) } // FilterList gets a list of filters in the system. // Equivalent to: `tc filter show`. +// // Generally returns nothing if link and parent are not specified. +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) FilterList(link Link, parent uint32) ([]Filter, error) { req := h.newNetlinkRequest(unix.RTM_GETTFILTER, unix.NLM_F_DUMP) msg := &nl.TcMsg{ @@ -426,9 +451,9 @@ func (h *Handle) FilterList(link Link, parent uint32) ([]Filter, error) { } req.AddData(msg) - msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWTFILTER) - if err != nil { - return nil, err + msgs, executeErr := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWTFILTER) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } var res []Filter @@ -516,7 +541,7 @@ func (h *Handle) FilterList(link Link, parent uint32) ([]Filter, error) { } } - return res, nil + return res, executeErr } func toTcGen(attrs *ActionAttrs, tcgen *nl.TcGen) { @@ -616,6 +641,22 @@ func EncodeActions(attr *nl.RtAttr, actions []Action) error { } toTcGen(action.Attrs(), &mirred.TcGen) aopts.AddRtAttr(nl.TCA_MIRRED_PARMS, mirred.Serialize()) + case *VlanAction: + table := attr.AddRtAttr(tabIndex, nil) + tabIndex++ + table.AddRtAttr(nl.TCA_ACT_KIND, nl.ZeroTerminated("vlan")) + aopts := table.AddRtAttr(nl.TCA_ACT_OPTIONS, nil) + vlan := nl.TcVlan{ + Action: int32(action.Action), + } + toTcGen(action.Attrs(), &vlan.TcGen) + aopts.AddRtAttr(nl.TCA_VLAN_PARMS, vlan.Serialize()) + if action.Action == TCA_VLAN_ACT_PUSH && action.VlanID == 0 { + return fmt.Errorf("vlan id is required for push action") + } + if action.VlanID != 0 { + aopts.AddRtAttr(nl.TCA_VLAN_PUSH_VLAN_ID, nl.Uint16Attr(action.VlanID)) + } case *TunnelKeyAction: table := attr.AddRtAttr(tabIndex, nil) tabIndex++ @@ -699,6 +740,17 @@ func EncodeActions(attr *nl.RtAttr, actions []Action) error { aopts.AddRtAttr(nl.TCA_ACT_BPF_PARMS, gen.Serialize()) aopts.AddRtAttr(nl.TCA_ACT_BPF_FD, nl.Uint32Attr(uint32(action.Fd))) aopts.AddRtAttr(nl.TCA_ACT_BPF_NAME, nl.ZeroTerminated(action.Name)) + case *SampleAction: + table := attr.AddRtAttr(tabIndex, nil) + tabIndex++ + table.AddRtAttr(nl.TCA_ACT_KIND, nl.ZeroTerminated("sample")) + aopts := table.AddRtAttr(nl.TCA_ACT_OPTIONS, nil) + gen := nl.TcGen{} + toTcGen(action.Attrs(), &gen) + aopts.AddRtAttr(nl.TCA_ACT_SAMPLE_PARMS, gen.Serialize()) + aopts.AddRtAttr(nl.TCA_ACT_SAMPLE_RATE, nl.Uint32Attr(action.Rate)) + aopts.AddRtAttr(nl.TCA_ACT_SAMPLE_PSAMPLE_GROUP, nl.Uint32Attr(action.Group)) + aopts.AddRtAttr(nl.TCA_ACT_SAMPLE_TRUNC_SIZE, nl.Uint32Attr(action.TruncSize)) case *GenericAction: table := attr.AddRtAttr(tabIndex, nil) tabIndex++ @@ -711,6 +763,7 @@ func EncodeActions(attr *nl.RtAttr, actions []Action) error { table := attr.AddRtAttr(tabIndex, nil) tabIndex++ pedit := nl.TcPedit{} + toTcGen(action.Attrs(), &pedit.Sel.TcGen) if action.SrcMacAddr != nil { pedit.SetEthSrc(action.SrcMacAddr) } @@ -784,8 +837,12 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { action = &ConnmarkAction{} case "csum": action = &CsumAction{} + case "sample": + action = &SampleAction{} case "gact": action = &GenericAction{} + case "vlan": + action = &VlanAction{} case "tunnel_key": action = &TunnelKeyAction{} case "skbedit": @@ -816,7 +873,17 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { tcTs := nl.DeserializeTcf(adatum.Value) actionTimestamp = toTimeStamp(tcTs) } - + case "vlan": + switch adatum.Attr.Type { + case nl.TCA_VLAN_PARMS: + vlan := *nl.DeserializeTcVlan(adatum.Value) + action.(*VlanAction).ActionAttrs = ActionAttrs{} + toAttrs(&vlan.TcGen, action.Attrs()) + action.(*VlanAction).Action = VlanAct(vlan.Action) + case nl.TCA_VLAN_PUSH_VLAN_ID: + vlanId := native.Uint16(adatum.Value[0:2]) + action.(*VlanAction).VlanID = vlanId + } case "tunnel_key": switch adatum.Attr.Type { case nl.TCA_TUNNEL_KEY_PARMS: @@ -896,6 +963,18 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { tcTs := nl.DeserializeTcf(adatum.Value) actionTimestamp = toTimeStamp(tcTs) } + case "sample": + switch adatum.Attr.Type { + case nl.TCA_ACT_SAMPLE_PARMS: + gen := *nl.DeserializeTcGen(adatum.Value) + toAttrs(&gen, action.Attrs()) + case nl.TCA_ACT_SAMPLE_RATE: + action.(*SampleAction).Rate = native.Uint32(adatum.Value[0:4]) + case nl.TCA_ACT_SAMPLE_PSAMPLE_GROUP: + action.(*SampleAction).Group = native.Uint32(adatum.Value[0:4]) + case nl.TCA_ACT_SAMPLE_TRUNC_SIZE: + action.(*SampleAction).TruncSize = native.Uint32(adatum.Value[0:4]) + } case "gact": switch adatum.Attr.Type { case nl.TCA_GACT_PARMS: @@ -920,9 +999,11 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { actionnStatistic = (*ActionStatistic)(s) } } - action.Attrs().Statistics = actionnStatistic - action.Attrs().Timestamp = actionTimestamp - actions = append(actions, action) + if action != nil { + action.Attrs().Statistics = actionnStatistic + action.Attrs().Timestamp = actionTimestamp + actions = append(actions, action) + } } return actions, nil } diff --git a/vendor/github.com/vishvananda/netlink/fou.go b/vendor/github.com/vishvananda/netlink/fou.go index 71e73c37a..ea9f6cf67 100644 --- a/vendor/github.com/vishvananda/netlink/fou.go +++ b/vendor/github.com/vishvananda/netlink/fou.go @@ -1,16 +1,7 @@ package netlink import ( - "errors" -) - -var ( - // ErrAttrHeaderTruncated is returned when a netlink attribute's header is - // truncated. - ErrAttrHeaderTruncated = errors.New("attribute header truncated") - // ErrAttrBodyTruncated is returned when a netlink attribute's body is - // truncated. - ErrAttrBodyTruncated = errors.New("attribute body truncated") + "net" ) type Fou struct { @@ -18,4 +9,8 @@ type Fou struct { Port int Protocol int EncapType int + Local net.IP + Peer net.IP + PeerPort int + IfIndex int } diff --git a/vendor/github.com/vishvananda/netlink/fou_linux.go b/vendor/github.com/vishvananda/netlink/fou_linux.go index ed55b2b79..7645a5a5c 100644 --- a/vendor/github.com/vishvananda/netlink/fou_linux.go +++ b/vendor/github.com/vishvananda/netlink/fou_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package netlink @@ -5,6 +6,8 @@ package netlink import ( "encoding/binary" "errors" + "log" + "net" "github.com/vishvananda/netlink/nl" "golang.org/x/sys/unix" @@ -29,6 +32,12 @@ const ( FOU_ATTR_IPPROTO FOU_ATTR_TYPE FOU_ATTR_REMCSUM_NOPARTIAL + FOU_ATTR_LOCAL_V4 + FOU_ATTR_LOCAL_V6 + FOU_ATTR_PEER_V4 + FOU_ATTR_PEER_V6 + FOU_ATTR_PEER_PORT + FOU_ATTR_IFINDEX FOU_ATTR_MAX = FOU_ATTR_REMCSUM_NOPARTIAL ) @@ -128,10 +137,14 @@ func (h *Handle) FouDel(f Fou) error { return nil } +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func FouList(fam int) ([]Fou, error) { return pkgHandle.FouList(fam) } +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) FouList(fam int) ([]Fou, error) { fam_id, err := FouFamilyId() if err != nil { @@ -150,9 +163,9 @@ func (h *Handle) FouList(fam int) ([]Fou, error) { req.AddRawData(raw) - msgs, err := req.Execute(unix.NETLINK_GENERIC, 0) - if err != nil { - return nil, err + msgs, executeErr := req.Execute(unix.NETLINK_GENERIC, 0) + if executeErr != nil && !errors.Is(err, ErrDumpInterrupted) { + return nil, executeErr } fous := make([]Fou, 0, len(msgs)) @@ -165,45 +178,32 @@ func (h *Handle) FouList(fam int) ([]Fou, error) { fous = append(fous, f) } - return fous, nil + return fous, executeErr } func deserializeFouMsg(msg []byte) (Fou, error) { - // we'll skip to byte 4 to first attribute - msg = msg[3:] - var shift int fou := Fou{} - for { - // attribute header is at least 16 bits - if len(msg) < 4 { - return fou, ErrAttrHeaderTruncated - } - - lgt := int(binary.BigEndian.Uint16(msg[0:2])) - if len(msg) < lgt+4 { - return fou, ErrAttrBodyTruncated - } - attr := binary.BigEndian.Uint16(msg[2:4]) - - shift = lgt + 3 - switch attr { + for attr := range nl.ParseAttributes(msg[4:]) { + switch attr.Type { case FOU_ATTR_AF: - fou.Family = int(msg[5]) + fou.Family = int(attr.Value[0]) case FOU_ATTR_PORT: - fou.Port = int(binary.BigEndian.Uint16(msg[5:7])) - // port is 2 bytes - shift = lgt + 2 + fou.Port = int(networkOrder.Uint16(attr.Value)) case FOU_ATTR_IPPROTO: - fou.Protocol = int(msg[5]) + fou.Protocol = int(attr.Value[0]) case FOU_ATTR_TYPE: - fou.EncapType = int(msg[5]) - } - - msg = msg[shift:] - - if len(msg) < 4 { - break + fou.EncapType = int(attr.Value[0]) + case FOU_ATTR_LOCAL_V4, FOU_ATTR_LOCAL_V6: + fou.Local = net.IP(attr.Value) + case FOU_ATTR_PEER_V4, FOU_ATTR_PEER_V6: + fou.Peer = net.IP(attr.Value) + case FOU_ATTR_PEER_PORT: + fou.PeerPort = int(networkOrder.Uint16(attr.Value)) + case FOU_ATTR_IFINDEX: + fou.IfIndex = int(native.Uint16(attr.Value)) + default: + log.Printf("unknown fou attribute from kernel: %+v %v", attr, attr.Type&nl.NLA_TYPE_MASK) } } diff --git a/vendor/github.com/vishvananda/netlink/fou_unspecified.go b/vendor/github.com/vishvananda/netlink/fou_unspecified.go index 3a8365bfe..7e550151a 100644 --- a/vendor/github.com/vishvananda/netlink/fou_unspecified.go +++ b/vendor/github.com/vishvananda/netlink/fou_unspecified.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package netlink diff --git a/vendor/github.com/vishvananda/netlink/genetlink_linux.go b/vendor/github.com/vishvananda/netlink/genetlink_linux.go index 772e5834a..7bdaad97b 100644 --- a/vendor/github.com/vishvananda/netlink/genetlink_linux.go +++ b/vendor/github.com/vishvananda/netlink/genetlink_linux.go @@ -1,6 +1,7 @@ package netlink import ( + "errors" "fmt" "syscall" @@ -126,6 +127,8 @@ func parseFamilies(msgs [][]byte) ([]*GenlFamily, error) { return families, nil } +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) GenlFamilyList() ([]*GenlFamily, error) { msg := &nl.Genlmsg{ Command: nl.GENL_CTRL_CMD_GETFAMILY, @@ -133,13 +136,19 @@ func (h *Handle) GenlFamilyList() ([]*GenlFamily, error) { } req := h.newNetlinkRequest(nl.GENL_ID_CTRL, unix.NLM_F_DUMP) req.AddData(msg) - msgs, err := req.Execute(unix.NETLINK_GENERIC, 0) + msgs, executeErr := req.Execute(unix.NETLINK_GENERIC, 0) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr + } + families, err := parseFamilies(msgs) if err != nil { return nil, err } - return parseFamilies(msgs) + return families, executeErr } +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func GenlFamilyList() ([]*GenlFamily, error) { return pkgHandle.GenlFamilyList() } diff --git a/vendor/github.com/vishvananda/netlink/gtp_linux.go b/vendor/github.com/vishvananda/netlink/gtp_linux.go index f5e160ba5..377dcae5c 100644 --- a/vendor/github.com/vishvananda/netlink/gtp_linux.go +++ b/vendor/github.com/vishvananda/netlink/gtp_linux.go @@ -1,6 +1,7 @@ package netlink import ( + "errors" "fmt" "net" "strings" @@ -74,6 +75,8 @@ func parsePDP(msgs [][]byte) ([]*PDP, error) { return pdps, nil } +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) GTPPDPList() ([]*PDP, error) { f, err := h.GenlFamilyGet(nl.GENL_GTP_NAME) if err != nil { @@ -85,13 +88,19 @@ func (h *Handle) GTPPDPList() ([]*PDP, error) { } req := h.newNetlinkRequest(int(f.ID), unix.NLM_F_DUMP) req.AddData(msg) - msgs, err := req.Execute(unix.NETLINK_GENERIC, 0) + msgs, executeErr := req.Execute(unix.NETLINK_GENERIC, 0) + if executeErr != nil && !errors.Is(err, ErrDumpInterrupted) { + return nil, executeErr + } + pdps, err := parsePDP(msgs) if err != nil { return nil, err } - return parsePDP(msgs) + return pdps, executeErr } +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func GTPPDPList() ([]*PDP, error) { return pkgHandle.GTPPDPList() } diff --git a/vendor/github.com/vishvananda/netlink/handle_unspecified.go b/vendor/github.com/vishvananda/netlink/handle_unspecified.go index 3fe03642e..185e67151 100644 --- a/vendor/github.com/vishvananda/netlink/handle_unspecified.go +++ b/vendor/github.com/vishvananda/netlink/handle_unspecified.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package netlink @@ -183,6 +184,10 @@ func (h *Handle) LinkSetGROIPv4MaxSize(link Link, maxSize int) error { return ErrNotImplemented } +func (h *Handle) LinkSetIP6AddrGenMode(link Link, mode int) error { + return ErrNotImplemented +} + func (h *Handle) setProtinfoAttr(link Link, mode bool, attr int) error { return ErrNotImplemented } diff --git a/vendor/github.com/vishvananda/netlink/ioctl_linux.go b/vendor/github.com/vishvananda/netlink/ioctl_linux.go index 4d33db5da..f8da92e21 100644 --- a/vendor/github.com/vishvananda/netlink/ioctl_linux.go +++ b/vendor/github.com/vishvananda/netlink/ioctl_linux.go @@ -86,5 +86,5 @@ func newIocltStringSetReq(linkName string) (*Ifreq, *ethtoolSset) { // getSocketUDP returns file descriptor to new UDP socket // It is used for communication with ioctl interface. func getSocketUDP() (int, error) { - return syscall.Socket(unix.AF_INET, unix.SOCK_DGRAM, 0) + return syscall.Socket(unix.AF_INET, unix.SOCK_DGRAM|unix.SOCK_CLOEXEC, 0) } diff --git a/vendor/github.com/vishvananda/netlink/link.go b/vendor/github.com/vishvananda/netlink/link.go index f820cdb67..ef0f6c995 100644 --- a/vendor/github.com/vishvananda/netlink/link.go +++ b/vendor/github.com/vishvananda/netlink/link.go @@ -56,6 +56,8 @@ type LinkAttrs struct { Vfs []VfInfo // virtual functions available on link Group uint32 PermHWAddr net.HardwareAddr + ParentDev string + ParentDevBus string Slave LinkSlave } @@ -346,13 +348,14 @@ type TuntapFlag uint16 // Tuntap links created via /dev/tun/tap, but can be destroyed via netlink type Tuntap struct { LinkAttrs - Mode TuntapMode - Flags TuntapFlag - NonPersist bool - Queues int - Fds []*os.File - Owner uint32 - Group uint32 + Mode TuntapMode + Flags TuntapFlag + NonPersist bool + Queues int + DisabledQueues int + Fds []*os.File + Owner uint32 + Group uint32 } func (tuntap *Tuntap) Attrs() *LinkAttrs { @@ -377,6 +380,13 @@ const ( NETKIT_POLICY_BLACKHOLE NetkitPolicy = 2 ) +type NetkitScrub int + +const ( + NETKIT_SCRUB_NONE NetkitScrub = 0 + NETKIT_SCRUB_DEFAULT NetkitScrub = 1 +) + func (n *Netkit) IsPrimary() bool { return n.isPrimary } @@ -391,6 +401,9 @@ type Netkit struct { Mode NetkitMode Policy NetkitPolicy PeerPolicy NetkitPolicy + Scrub NetkitScrub + PeerScrub NetkitScrub + supportsScrub bool isPrimary bool peerLinkAttrs LinkAttrs } @@ -403,6 +416,10 @@ func (n *Netkit) Type() string { return "netkit" } +func (n *Netkit) SupportsScrub() bool { + return n.supportsScrub +} + // Veth devices must specify PeerName on create type Veth struct { LinkAttrs @@ -761,19 +778,19 @@ const ( ) var bondXmitHashPolicyToString = map[BondXmitHashPolicy]string{ - BOND_XMIT_HASH_POLICY_LAYER2: "layer2", - BOND_XMIT_HASH_POLICY_LAYER3_4: "layer3+4", - BOND_XMIT_HASH_POLICY_LAYER2_3: "layer2+3", - BOND_XMIT_HASH_POLICY_ENCAP2_3: "encap2+3", - BOND_XMIT_HASH_POLICY_ENCAP3_4: "encap3+4", + BOND_XMIT_HASH_POLICY_LAYER2: "layer2", + BOND_XMIT_HASH_POLICY_LAYER3_4: "layer3+4", + BOND_XMIT_HASH_POLICY_LAYER2_3: "layer2+3", + BOND_XMIT_HASH_POLICY_ENCAP2_3: "encap2+3", + BOND_XMIT_HASH_POLICY_ENCAP3_4: "encap3+4", BOND_XMIT_HASH_POLICY_VLAN_SRCMAC: "vlan+srcmac", } var StringToBondXmitHashPolicyMap = map[string]BondXmitHashPolicy{ - "layer2": BOND_XMIT_HASH_POLICY_LAYER2, - "layer3+4": BOND_XMIT_HASH_POLICY_LAYER3_4, - "layer2+3": BOND_XMIT_HASH_POLICY_LAYER2_3, - "encap2+3": BOND_XMIT_HASH_POLICY_ENCAP2_3, - "encap3+4": BOND_XMIT_HASH_POLICY_ENCAP3_4, + "layer2": BOND_XMIT_HASH_POLICY_LAYER2, + "layer3+4": BOND_XMIT_HASH_POLICY_LAYER3_4, + "layer2+3": BOND_XMIT_HASH_POLICY_LAYER2_3, + "encap2+3": BOND_XMIT_HASH_POLICY_ENCAP2_3, + "encap3+4": BOND_XMIT_HASH_POLICY_ENCAP3_4, "vlan+srcmac": BOND_XMIT_HASH_POLICY_VLAN_SRCMAC, } @@ -1042,6 +1059,8 @@ type Geneve struct { FlowBased bool InnerProtoInherit bool Df GeneveDf + PortLow int + PortHigh int } func (geneve *Geneve) Attrs() *LinkAttrs { diff --git a/vendor/github.com/vishvananda/netlink/link_linux.go b/vendor/github.com/vishvananda/netlink/link_linux.go index d713612a9..c9ff980c6 100644 --- a/vendor/github.com/vishvananda/netlink/link_linux.go +++ b/vendor/github.com/vishvananda/netlink/link_linux.go @@ -3,6 +3,7 @@ package netlink import ( "bytes" "encoding/binary" + "errors" "fmt" "io/ioutil" "net" @@ -1807,20 +1808,20 @@ func (h *Handle) LinkDel(link Link) error { } func (h *Handle) linkByNameDump(name string) (Link, error) { - links, err := h.LinkList() - if err != nil { - return nil, err + links, executeErr := h.LinkList() + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } for _, link := range links { if link.Attrs().Name == name { - return link, nil + return link, executeErr } // support finding interfaces also via altnames for _, altName := range link.Attrs().AltNames { if altName == name { - return link, nil + return link, executeErr } } } @@ -1828,25 +1829,33 @@ func (h *Handle) linkByNameDump(name string) (Link, error) { } func (h *Handle) linkByAliasDump(alias string) (Link, error) { - links, err := h.LinkList() - if err != nil { - return nil, err + links, executeErr := h.LinkList() + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } for _, link := range links { if link.Attrs().Alias == alias { - return link, nil + return link, executeErr } } return nil, LinkNotFoundError{fmt.Errorf("Link alias %s not found", alias)} } // LinkByName finds a link by name and returns a pointer to the object. +// +// If the kernel doesn't support IFLA_IFNAME, this method will fall back to +// filtering a dump of all link names. In this case, if the returned error is +// [ErrDumpInterrupted] the result may be missing or outdated. func LinkByName(name string) (Link, error) { return pkgHandle.LinkByName(name) } // LinkByName finds a link by name and returns a pointer to the object. +// +// If the kernel doesn't support IFLA_IFNAME, this method will fall back to +// filtering a dump of all link names. In this case, if the returned error is +// [ErrDumpInterrupted] the result may be missing or outdated. func (h *Handle) LinkByName(name string) (Link, error) { if h.lookupByDump { return h.linkByNameDump(name) @@ -1879,12 +1888,20 @@ func (h *Handle) LinkByName(name string) (Link, error) { // LinkByAlias finds a link by its alias and returns a pointer to the object. // If there are multiple links with the alias it returns the first one +// +// If the kernel doesn't support IFLA_IFALIAS, this method will fall back to +// filtering a dump of all link names. In this case, if the returned error is +// [ErrDumpInterrupted] the result may be missing or outdated. func LinkByAlias(alias string) (Link, error) { return pkgHandle.LinkByAlias(alias) } // LinkByAlias finds a link by its alias and returns a pointer to the object. // If there are multiple links with the alias it returns the first one +// +// If the kernel doesn't support IFLA_IFALIAS, this method will fall back to +// filtering a dump of all link names. In this case, if the returned error is +// [ErrDumpInterrupted] the result may be missing or outdated. func (h *Handle) LinkByAlias(alias string) (Link, error) { if h.lookupByDump { return h.linkByAliasDump(alias) @@ -2246,6 +2263,10 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { break } } + case unix.IFLA_PARENT_DEV_NAME: + base.ParentDev = string(attr.Value[:len(attr.Value)-1]) + case unix.IFLA_PARENT_DEV_BUS_NAME: + base.ParentDevBus = string(attr.Value[:len(attr.Value)-1]) } } @@ -2321,6 +2342,9 @@ func LinkList() ([]Link, error) { // LinkList gets a list of link devices. // Equivalent to: `ip link show` +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) LinkList() ([]Link, error) { // NOTE(vish): This duplicates functionality in net/iface_linux.go, but we need // to get the message ourselves to parse link type. @@ -2331,9 +2355,9 @@ func (h *Handle) LinkList() ([]Link, error) { attr := nl.NewRtAttr(unix.IFLA_EXT_MASK, nl.Uint32Attr(nl.RTEXT_FILTER_VF)) req.AddData(attr) - msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWLINK) - if err != nil { - return nil, err + msgs, executeErr := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWLINK) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } var res []Link @@ -2345,7 +2369,7 @@ func (h *Handle) LinkList() ([]Link, error) { res = append(res, link) } - return res, nil + return res, executeErr } // LinkUpdate is used to pass information back from LinkSubscribe() @@ -2381,6 +2405,10 @@ type LinkSubscribeOptions struct { // LinkSubscribeWithOptions work like LinkSubscribe but enable to // provide additional options to modify the behavior. Currently, the // namespace can be provided as well as an error callback. +// +// When options.ListExisting is true, options.ErrorCallback may be +// called with [ErrDumpInterrupted] to indicate that results from +// the initial dump of links may be inconsistent or incomplete. func LinkSubscribeWithOptions(ch chan<- LinkUpdate, done <-chan struct{}, options LinkSubscribeOptions) error { if options.Namespace == nil { none := netns.None() @@ -2440,6 +2468,9 @@ func linkSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- LinkUpdate, done <-c continue } for _, m := range msgs { + if m.Header.Flags&unix.NLM_F_DUMP_INTR != 0 && cberr != nil { + cberr(ErrDumpInterrupted) + } if m.Header.Type == unix.NLMSG_DONE { continue } @@ -2639,9 +2670,38 @@ func (h *Handle) LinkSetGroup(link Link, group int) error { return err } +// LinkSetIP6AddrGenMode sets the IPv6 address generation mode of the link device. +// Equivalent to: `ip link set $link addrgenmode $mode` +func LinkSetIP6AddrGenMode(link Link, mode int) error { + return pkgHandle.LinkSetIP6AddrGenMode(link, mode) +} + +// LinkSetIP6AddrGenMode sets the IPv6 address generation mode of the link device. +// Equivalent to: `ip link set $link addrgenmode $mode` +func (h *Handle) LinkSetIP6AddrGenMode(link Link, mode int) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + b := make([]byte, 1) + b[0] = uint8(mode) + + data := nl.NewRtAttr(unix.IFLA_INET6_ADDR_GEN_MODE, b) + af := nl.NewRtAttr(unix.AF_INET6, data.Serialize()) + spec := nl.NewRtAttr(unix.IFLA_AF_SPEC, af.Serialize()) + req.AddData(spec) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + func addNetkitAttrs(nk *Netkit, linkInfo *nl.RtAttr, flag int) error { - if nk.peerLinkAttrs.HardwareAddr != nil || nk.HardwareAddr != nil { - return fmt.Errorf("netkit doesn't support setting Ethernet") + if nk.Mode != NETKIT_MODE_L2 && (nk.LinkAttrs.HardwareAddr != nil || nk.peerLinkAttrs.HardwareAddr != nil) { + return fmt.Errorf("netkit only allows setting Ethernet in L2 mode") } data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) @@ -2649,6 +2709,8 @@ func addNetkitAttrs(nk *Netkit, linkInfo *nl.RtAttr, flag int) error { data.AddRtAttr(nl.IFLA_NETKIT_MODE, nl.Uint32Attr(uint32(nk.Mode))) data.AddRtAttr(nl.IFLA_NETKIT_POLICY, nl.Uint32Attr(uint32(nk.Policy))) data.AddRtAttr(nl.IFLA_NETKIT_PEER_POLICY, nl.Uint32Attr(uint32(nk.PeerPolicy))) + data.AddRtAttr(nl.IFLA_NETKIT_SCRUB, nl.Uint32Attr(uint32(nk.Scrub))) + data.AddRtAttr(nl.IFLA_NETKIT_PEER_SCRUB, nl.Uint32Attr(uint32(nk.PeerScrub))) if (flag & unix.NLM_F_EXCL) == 0 { // Modifying peer link attributes will not take effect @@ -2691,6 +2753,9 @@ func addNetkitAttrs(nk *Netkit, linkInfo *nl.RtAttr, flag int) error { peer.AddRtAttr(unix.IFLA_NET_NS_FD, nl.Uint32Attr(uint32(ns))) } } + if nk.peerLinkAttrs.HardwareAddr != nil { + peer.AddRtAttr(unix.IFLA_ADDRESS, []byte(nk.peerLinkAttrs.HardwareAddr)) + } return nil } @@ -2709,6 +2774,12 @@ func parseNetkitData(link Link, data []syscall.NetlinkRouteAttr) { netkit.Policy = NetkitPolicy(native.Uint32(datum.Value[0:4])) case nl.IFLA_NETKIT_PEER_POLICY: netkit.PeerPolicy = NetkitPolicy(native.Uint32(datum.Value[0:4])) + case nl.IFLA_NETKIT_SCRUB: + netkit.supportsScrub = true + netkit.Scrub = NetkitScrub(native.Uint32(datum.Value[0:4])) + case nl.IFLA_NETKIT_PEER_SCRUB: + netkit.supportsScrub = true + netkit.PeerScrub = NetkitScrub(native.Uint32(datum.Value[0:4])) } } } @@ -2782,7 +2853,7 @@ func parseVxlanData(link Link, data []syscall.NetlinkRouteAttr) { case nl.IFLA_VXLAN_PORT_RANGE: buf := bytes.NewBuffer(datum.Value[0:4]) var pr vxlanPortRange - if binary.Read(buf, binary.BigEndian, &pr) != nil { + if binary.Read(buf, binary.BigEndian, &pr) == nil { vxlan.PortLow = int(pr.Lo) vxlan.PortHigh = int(pr.Hi) } @@ -3006,7 +3077,6 @@ func parseMacvlanData(link Link, data []syscall.NetlinkRouteAttr) { } } -// copied from pkg/net_linux.go func linkFlags(rawFlags uint32) net.Flags { var f net.Flags if rawFlags&unix.IFF_UP != 0 { @@ -3024,9 +3094,16 @@ func linkFlags(rawFlags uint32) net.Flags { if rawFlags&unix.IFF_MULTICAST != 0 { f |= net.FlagMulticast } + if rawFlags&unix.IFF_RUNNING != 0 { + f |= net.FlagRunning + } return f } +type genevePortRange struct { + Lo, Hi uint16 +} + func addGeneveAttrs(geneve *Geneve, linkInfo *nl.RtAttr) { data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) @@ -3063,6 +3140,15 @@ func addGeneveAttrs(geneve *Geneve, linkInfo *nl.RtAttr) { data.AddRtAttr(nl.IFLA_GENEVE_TOS, nl.Uint8Attr(geneve.Tos)) } + if geneve.PortLow > 0 || geneve.PortHigh > 0 { + pr := genevePortRange{uint16(geneve.PortLow), uint16(geneve.PortHigh)} + + buf := new(bytes.Buffer) + binary.Write(buf, binary.BigEndian, &pr) + + data.AddRtAttr(nl.IFLA_GENEVE_PORT_RANGE, buf.Bytes()) + } + data.AddRtAttr(nl.IFLA_GENEVE_DF, nl.Uint8Attr(uint8(geneve.Df))) } @@ -3084,6 +3170,13 @@ func parseGeneveData(link Link, data []syscall.NetlinkRouteAttr) { geneve.FlowBased = true case nl.IFLA_GENEVE_INNER_PROTO_INHERIT: geneve.InnerProtoInherit = true + case nl.IFLA_GENEVE_PORT_RANGE: + buf := bytes.NewBuffer(datum.Value[0:4]) + var pr genevePortRange + if binary.Read(buf, binary.BigEndian, &pr) == nil { + geneve.PortLow = int(pr.Lo) + geneve.PortHigh = int(pr.Hi) + } } } } @@ -3859,11 +3952,27 @@ func parseTuntapData(link Link, data []syscall.NetlinkRouteAttr) { tuntap.Group = native.Uint32(datum.Value) case nl.IFLA_TUN_TYPE: tuntap.Mode = TuntapMode(uint8(datum.Value[0])) + case nl.IFLA_TUN_PI: + if datum.Value[0] == 0 { + tuntap.Flags |= TUNTAP_NO_PI + } + case nl.IFLA_TUN_VNET_HDR: + if datum.Value[0] == 1 { + tuntap.Flags |= TUNTAP_VNET_HDR + } case nl.IFLA_TUN_PERSIST: tuntap.NonPersist = false if uint8(datum.Value[0]) == 0 { tuntap.NonPersist = true } + case nl.IFLA_TUN_MULTI_QUEUE: + if datum.Value[0] == 1 { + tuntap.Flags |= TUNTAP_MULTI_QUEUE + } + case nl.IFLA_TUN_NUM_QUEUES: + tuntap.Queues = int(native.Uint32(datum.Value)) + case nl.IFLA_TUN_NUM_DISABLED_QUEUES: + tuntap.DisabledQueues = int(native.Uint32(datum.Value)) } } } diff --git a/vendor/github.com/vishvananda/netlink/link_tuntap_linux.go b/vendor/github.com/vishvananda/netlink/link_tuntap_linux.go index 310bd33d8..1a5da82c2 100644 --- a/vendor/github.com/vishvananda/netlink/link_tuntap_linux.go +++ b/vendor/github.com/vishvananda/netlink/link_tuntap_linux.go @@ -1,5 +1,14 @@ package netlink +import ( + "fmt" + "os" + "strings" + "syscall" + + "golang.org/x/sys/unix" +) + // ideally golang.org/x/sys/unix would define IfReq but it only has // IFNAMSIZ, hence this minimalistic implementation const ( @@ -7,8 +16,136 @@ const ( IFNAMSIZ = 16 ) +const TUN = "/dev/net/tun" + type ifReq struct { Name [IFNAMSIZ]byte Flags uint16 pad [SizeOfIfReq - IFNAMSIZ - 2]byte } + +// AddQueues opens and attaches multiple queue file descriptors to an existing +// TUN/TAP interface in multi-queue mode. +// +// It performs TUNSETIFF ioctl on each opened file descriptor with the current +// tuntap configuration. Each resulting fd is set to non-blocking mode and +// returned as *os.File. +// +// If the interface was created with a name pattern (e.g. "tap%d"), +// the first successful TUNSETIFF call will return the resolved name, +// which is saved back into tuntap.Name. +// +// This method assumes that the interface already exists and is in multi-queue mode. +// The returned FDs are also appended to tuntap.Fds and tuntap.Queues is updated. +// +// It is the caller's responsibility to close the FDs when they are no longer needed. +func (tuntap *Tuntap) AddQueues(count int) ([]*os.File, error) { + if tuntap.Mode < unix.IFF_TUN || tuntap.Mode > unix.IFF_TAP { + return nil, fmt.Errorf("Tuntap.Mode %v unknown", tuntap.Mode) + } + if tuntap.Flags&TUNTAP_MULTI_QUEUE == 0 { + return nil, fmt.Errorf("TUNTAP_MULTI_QUEUE not set") + } + if count < 1 { + return nil, fmt.Errorf("count must be >= 1") + } + + req, err := unix.NewIfreq(tuntap.Name) + if err != nil { + return nil, err + } + req.SetUint16(uint16(tuntap.Mode) | uint16(tuntap.Flags)) + + var fds []*os.File + for i := 0; i < count; i++ { + localReq := req + fd, err := unix.Open(TUN, os.O_RDWR|syscall.O_CLOEXEC, 0) + if err != nil { + cleanupFds(fds) + return nil, err + } + + err = unix.IoctlIfreq(fd, unix.TUNSETIFF, req) + if err != nil { + // close the new fd + unix.Close(fd) + // and the already opened ones + cleanupFds(fds) + return nil, fmt.Errorf("tuntap IOCTL TUNSETIFF failed [%d]: %w", i, err) + } + + // Set the tun device to non-blocking before use. The below comment + // taken from: + // + // https://github.com/mistsys/tuntap/commit/161418c25003bbee77d085a34af64d189df62bea + // + // Note there is a complication because in go, if a device node is + // opened, go sets it to use nonblocking I/O. However a /dev/net/tun + // doesn't work with epoll until after the TUNSETIFF ioctl has been + // done. So we open the unix fd directly, do the ioctl, then put the + // fd in nonblocking mode, an then finally wrap it in a os.File, + // which will see the nonblocking mode and add the fd to the + // pollable set, so later on when we Read() from it blocked the + // calling thread in the kernel. + // + // See + // https://github.com/golang/go/issues/30426 + // which got exposed in go 1.13 by the fix to + // https://github.com/golang/go/issues/30624 + err = unix.SetNonblock(fd, true) + if err != nil { + cleanupFds(fds) + return nil, fmt.Errorf("tuntap set to non-blocking failed [%d]: %w", i, err) + } + + // create the file from the file descriptor and store it + file := os.NewFile(uintptr(fd), TUN) + fds = append(fds, file) + + // 1) we only care for the name of the first tap in the multi queue set + // 2) if the original name was empty, the localReq has now the actual name + // + // In addition: + // This ensures that the link name is always identical to what the kernel returns. + // Not only in case of an empty name, but also when using name templates. + // e.g. when the provided name is "tap%d", the kernel replaces %d with the next available number. + if i == 0 { + tuntap.Name = strings.Trim(localReq.Name(), "\x00") + } + } + + tuntap.Fds = append(tuntap.Fds, fds...) + tuntap.Queues = len(tuntap.Fds) + return fds, nil +} + +// RemoveQueues closes the given TAP queue file descriptors and removes them +// from the tuntap.Fds list. +// +// This is a logical counterpart to AddQueues and allows releasing specific queues +// (e.g., to simulate queue failure or perform partial detach). +// +// The method updates tuntap.Queues to reflect the number of remaining active queues. +// +// It is safe to call with a subset of tuntap.Fds, but the caller must ensure +// that the passed *os.File descriptors belong to this interface. +func (tuntap *Tuntap) RemoveQueues(fds ...*os.File) error { + toClose := make(map[uintptr]struct{}, len(fds)) + for _, fd := range fds { + toClose[fd.Fd()] = struct{}{} + } + + var newFds []*os.File + for _, fd := range tuntap.Fds { + if _, shouldClose := toClose[fd.Fd()]; shouldClose { + if err := fd.Close(); err != nil { + return fmt.Errorf("failed to close queue fd %d: %w", fd.Fd(), err) + } + tuntap.Queues-- + } else { + newFds = append(newFds, fd) + } + } + tuntap.Fds = newFds + return nil +} diff --git a/vendor/github.com/vishvananda/netlink/neigh.go b/vendor/github.com/vishvananda/netlink/neigh.go index 32d722e88..a96e5846e 100644 --- a/vendor/github.com/vishvananda/netlink/neigh.go +++ b/vendor/github.com/vishvananda/netlink/neigh.go @@ -19,6 +19,14 @@ type Neigh struct { Vlan int VNI int MasterIndex int + + // These values are expressed as "clock ticks ago". To + // convert these clock ticks to seconds divide by sysconf(_SC_CLK_TCK). + // When _SC_CLK_TCK is 100, for example, the ndm_* times are expressed + // in centiseconds. + Confirmed uint32 // The last time ARP/ND succeeded OR higher layer confirmation was received + Used uint32 // The last time ARP/ND took place for this neighbor + Updated uint32 // The time when the current NUD state was entered } // String returns $ip/$hwaddr $label diff --git a/vendor/github.com/vishvananda/netlink/neigh_linux.go b/vendor/github.com/vishvananda/netlink/neigh_linux.go index 2d93044a6..f4dd83532 100644 --- a/vendor/github.com/vishvananda/netlink/neigh_linux.go +++ b/vendor/github.com/vishvananda/netlink/neigh_linux.go @@ -1,6 +1,7 @@ package netlink import ( + "errors" "fmt" "net" "syscall" @@ -206,6 +207,9 @@ func neighHandle(neigh *Neigh, req *nl.NetlinkRequest) error { // NeighList returns a list of IP-MAC mappings in the system (ARP table). // Equivalent to: `ip neighbor show`. // The list can be filtered by link and ip family. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func NeighList(linkIndex, family int) ([]Neigh, error) { return pkgHandle.NeighList(linkIndex, family) } @@ -213,6 +217,9 @@ func NeighList(linkIndex, family int) ([]Neigh, error) { // NeighProxyList returns a list of neighbor proxies in the system. // Equivalent to: `ip neighbor show proxy`. // The list can be filtered by link and ip family. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func NeighProxyList(linkIndex, family int) ([]Neigh, error) { return pkgHandle.NeighProxyList(linkIndex, family) } @@ -220,6 +227,9 @@ func NeighProxyList(linkIndex, family int) ([]Neigh, error) { // NeighList returns a list of IP-MAC mappings in the system (ARP table). // Equivalent to: `ip neighbor show`. // The list can be filtered by link and ip family. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) NeighList(linkIndex, family int) ([]Neigh, error) { return h.NeighListExecute(Ndmsg{ Family: uint8(family), @@ -230,6 +240,9 @@ func (h *Handle) NeighList(linkIndex, family int) ([]Neigh, error) { // NeighProxyList returns a list of neighbor proxies in the system. // Equivalent to: `ip neighbor show proxy`. // The list can be filtered by link, ip family. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) NeighProxyList(linkIndex, family int) ([]Neigh, error) { return h.NeighListExecute(Ndmsg{ Family: uint8(family), @@ -239,18 +252,24 @@ func (h *Handle) NeighProxyList(linkIndex, family int) ([]Neigh, error) { } // NeighListExecute returns a list of neighbour entries filtered by link, ip family, flag and state. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func NeighListExecute(msg Ndmsg) ([]Neigh, error) { return pkgHandle.NeighListExecute(msg) } // NeighListExecute returns a list of neighbour entries filtered by link, ip family, flag and state. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) NeighListExecute(msg Ndmsg) ([]Neigh, error) { req := h.newNetlinkRequest(unix.RTM_GETNEIGH, unix.NLM_F_DUMP) req.AddData(&msg) - msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWNEIGH) - if err != nil { - return nil, err + msgs, executeErr := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWNEIGH) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } var res []Neigh @@ -281,7 +300,7 @@ func (h *Handle) NeighListExecute(msg Ndmsg) ([]Neigh, error) { res = append(res, *neigh) } - return res, nil + return res, executeErr } func NeighDeserialize(m []byte) (*Neigh, error) { @@ -330,6 +349,10 @@ func NeighDeserialize(m []byte) (*Neigh, error) { neigh.VNI = int(native.Uint32(attr.Value[0:4])) case NDA_MASTER: neigh.MasterIndex = int(native.Uint32(attr.Value[0:4])) + case NDA_CACHEINFO: + neigh.Confirmed = native.Uint32(attr.Value[0:4]) + neigh.Used = native.Uint32(attr.Value[4:8]) + neigh.Updated = native.Uint32(attr.Value[8:12]) } } @@ -364,6 +387,10 @@ type NeighSubscribeOptions struct { // NeighSubscribeWithOptions work like NeighSubscribe but enable to // provide additional options to modify the behavior. Currently, the // namespace can be provided as well as an error callback. +// +// When options.ListExisting is true, options.ErrorCallback may be +// called with [ErrDumpInterrupted] to indicate that results from +// the initial dump of links may be inconsistent or incomplete. func NeighSubscribeWithOptions(ch chan<- NeighUpdate, done <-chan struct{}, options NeighSubscribeOptions) error { if options.Namespace == nil { none := netns.None() @@ -428,6 +455,9 @@ func neighSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- NeighUpdate, done < continue } for _, m := range msgs { + if m.Header.Flags&unix.NLM_F_DUMP_INTR != 0 && cberr != nil { + cberr(ErrDumpInterrupted) + } if m.Header.Type == unix.NLMSG_DONE { if listExisting { // This will be called after handling AF_UNSPEC diff --git a/vendor/github.com/vishvananda/netlink/netlink_linux.go b/vendor/github.com/vishvananda/netlink/netlink_linux.go index a20d293d8..7416e3051 100644 --- a/vendor/github.com/vishvananda/netlink/netlink_linux.go +++ b/vendor/github.com/vishvananda/netlink/netlink_linux.go @@ -9,3 +9,6 @@ const ( FAMILY_V6 = nl.FAMILY_V6 FAMILY_MPLS = nl.FAMILY_MPLS ) + +// ErrDumpInterrupted is an alias for [nl.ErrDumpInterrupted]. +var ErrDumpInterrupted = nl.ErrDumpInterrupted diff --git a/vendor/github.com/vishvananda/netlink/netlink_unspecified.go b/vendor/github.com/vishvananda/netlink/netlink_unspecified.go index da12c42a5..9961e158a 100644 --- a/vendor/github.com/vishvananda/netlink/netlink_unspecified.go +++ b/vendor/github.com/vishvananda/netlink/netlink_unspecified.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package netlink @@ -144,6 +145,10 @@ func LinkSetGROIPv4MaxSize(link Link, maxSize int) error { return ErrNotImplemented } +func LinkSetIP6AddrGenMode(link Link, mode int) error { + return ErrNotImplemented +} + func LinkAdd(link Link) error { return ErrNotImplemented } diff --git a/vendor/github.com/vishvananda/netlink/nl/link_linux.go b/vendor/github.com/vishvananda/netlink/nl/link_linux.go index 0b5be470c..2925e8a22 100644 --- a/vendor/github.com/vishvananda/netlink/nl/link_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/link_linux.go @@ -38,6 +38,8 @@ const ( IFLA_NETKIT_POLICY IFLA_NETKIT_PEER_POLICY IFLA_NETKIT_MODE + IFLA_NETKIT_SCRUB + IFLA_NETKIT_PEER_SCRUB IFLA_NETKIT_MAX = IFLA_NETKIT_MODE ) @@ -232,6 +234,7 @@ const ( IFLA_GENEVE_TTL_INHERIT IFLA_GENEVE_DF IFLA_GENEVE_INNER_PROTO_INHERIT + IFLA_GENEVE_PORT_RANGE IFLA_GENEVE_MAX = IFLA_GENEVE_INNER_PROTO_INHERIT ) @@ -816,3 +819,10 @@ const ( IFLA_BAREUDP_MULTIPROTO_MODE IFLA_BAREUDP_MAX = IFLA_BAREUDP_MULTIPROTO_MODE ) + +const ( + IN6_ADDR_GEN_MODE_EUI64 = iota + IN6_ADDR_GEN_MODE_NONE + IN6_ADDR_GEN_MODE_STABLE_PRIVACY + IN6_ADDR_GEN_MODE_RANDOM +) diff --git a/vendor/github.com/vishvananda/netlink/nl/nl_linux.go b/vendor/github.com/vishvananda/netlink/nl/nl_linux.go index 6cecc4517..f2dc7abb8 100644 --- a/vendor/github.com/vishvananda/netlink/nl/nl_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/nl_linux.go @@ -4,6 +4,7 @@ package nl import ( "bytes" "encoding/binary" + "errors" "fmt" "net" "os" @@ -11,6 +12,7 @@ import ( "sync" "sync/atomic" "syscall" + "time" "unsafe" "github.com/vishvananda/netns" @@ -43,6 +45,26 @@ var SocketTimeoutTv = unix.Timeval{Sec: 60, Usec: 0} // ErrorMessageReporting is the default error message reporting configuration for the new netlink sockets var EnableErrorMessageReporting bool = false +// ErrDumpInterrupted is an instance of errDumpInterrupted, used to report that +// a netlink function has set the NLM_F_DUMP_INTR flag in a response message, +// indicating that the results may be incomplete or inconsistent. +var ErrDumpInterrupted = errDumpInterrupted{} + +// errDumpInterrupted is an error type, used to report that NLM_F_DUMP_INTR was +// set in a netlink response. +type errDumpInterrupted struct{} + +func (errDumpInterrupted) Error() string { + return "results may be incomplete or inconsistent" +} + +// Before errDumpInterrupted was introduced, EINTR was returned when a netlink +// response had NLM_F_DUMP_INTR. Retain backward compatibility with code that +// may be checking for EINTR using Is. +func (e errDumpInterrupted) Is(target error) bool { + return target == unix.EINTR +} + // GetIPFamily returns the family type of a net.IP. func GetIPFamily(ip net.IP) int { if len(ip) <= net.IPv4len { @@ -492,22 +514,26 @@ func (req *NetlinkRequest) AddRawData(data []byte) { // Execute the request against the given sockType. // Returns a list of netlink messages in serialized format, optionally filtered // by resType. +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (req *NetlinkRequest) Execute(sockType int, resType uint16) ([][]byte, error) { var res [][]byte err := req.ExecuteIter(sockType, resType, func(msg []byte) bool { res = append(res, msg) return true }) - if err != nil { + if err != nil && !errors.Is(err, ErrDumpInterrupted) { return nil, err } - return res, nil + return res, err } // ExecuteIter executes the request against the given sockType. // Calls the provided callback func once for each netlink message. // If the callback returns false, it is not called again, but // the remaining messages are consumed/discarded. +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. // // Thread safety: ExecuteIter holds a lock on the socket until // it finishes iteration so the callback must not call back into @@ -559,6 +585,8 @@ func (req *NetlinkRequest) ExecuteIter(sockType int, resType uint16, f func(msg return err } + dumpIntr := false + done: for { msgs, from, err := s.Receive() @@ -580,7 +608,7 @@ done: } if m.Header.Flags&unix.NLM_F_DUMP_INTR != 0 { - return syscall.Errno(unix.EINTR) + dumpIntr = true } if m.Header.Type == unix.NLMSG_DONE || m.Header.Type == unix.NLMSG_ERROR { @@ -634,6 +662,9 @@ done: } } } + if dumpIntr { + return ErrDumpInterrupted + } return nil } @@ -656,9 +687,11 @@ func NewNetlinkRequest(proto, flags int) *NetlinkRequest { } type NetlinkSocket struct { - fd int32 - file *os.File - lsa unix.SockaddrNetlink + fd int32 + file *os.File + lsa unix.SockaddrNetlink + sendTimeout int64 // Access using atomic.Load/StoreInt64 + receiveTimeout int64 // Access using atomic.Load/StoreInt64 sync.Mutex } @@ -756,7 +789,7 @@ func executeInNetns(newNs, curNs netns.NsHandle) (func(), error) { // Returns the netlink socket on which Receive() method can be called // to retrieve the messages from the kernel. func Subscribe(protocol int, groups ...uint) (*NetlinkSocket, error) { - fd, err := unix.Socket(unix.AF_NETLINK, unix.SOCK_RAW, protocol) + fd, err := unix.Socket(unix.AF_NETLINK, unix.SOCK_RAW|unix.SOCK_CLOEXEC, protocol) if err != nil { return nil, err } @@ -802,8 +835,44 @@ func (s *NetlinkSocket) GetFd() int { return int(s.fd) } +func (s *NetlinkSocket) GetTimeouts() (send, receive time.Duration) { + return time.Duration(atomic.LoadInt64(&s.sendTimeout)), + time.Duration(atomic.LoadInt64(&s.receiveTimeout)) +} + func (s *NetlinkSocket) Send(request *NetlinkRequest) error { - return unix.Sendto(int(s.fd), request.Serialize(), 0, &s.lsa) + rawConn, err := s.file.SyscallConn() + if err != nil { + return err + } + var ( + deadline time.Time + innerErr error + ) + sendTimeout := atomic.LoadInt64(&s.sendTimeout) + if sendTimeout != 0 { + deadline = time.Now().Add(time.Duration(sendTimeout)) + } + if err := s.file.SetWriteDeadline(deadline); err != nil { + return err + } + serializedReq := request.Serialize() + err = rawConn.Write(func(fd uintptr) (done bool) { + innerErr = unix.Sendto(int(s.fd), serializedReq, 0, &s.lsa) + return innerErr != unix.EWOULDBLOCK + }) + if innerErr != nil { + return innerErr + } + if err != nil { + // The timeout was previously implemented using SO_SNDTIMEO on a blocking + // socket. So, continue to return EAGAIN when the timeout is reached. + if errors.Is(err, os.ErrDeadlineExceeded) { + return unix.EAGAIN + } + return err + } + return nil } func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, *unix.SockaddrNetlink, error) { @@ -812,20 +881,33 @@ func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, *unix.SockaddrNetli return nil, nil, err } var ( + deadline time.Time fromAddr *unix.SockaddrNetlink rb [RECEIVE_BUFFER_SIZE]byte nr int from unix.Sockaddr innerErr error ) + receiveTimeout := atomic.LoadInt64(&s.receiveTimeout) + if receiveTimeout != 0 { + deadline = time.Now().Add(time.Duration(receiveTimeout)) + } + if err := s.file.SetReadDeadline(deadline); err != nil { + return nil, nil, err + } err = rawConn.Read(func(fd uintptr) (done bool) { nr, from, innerErr = unix.Recvfrom(int(fd), rb[:], 0) return innerErr != unix.EWOULDBLOCK }) if innerErr != nil { - err = innerErr + return nil, nil, innerErr } if err != nil { + // The timeout was previously implemented using SO_RCVTIMEO on a blocking + // socket. So, continue to return EAGAIN when the timeout is reached. + if errors.Is(err, os.ErrDeadlineExceeded) { + return nil, nil, unix.EAGAIN + } return nil, nil, err } fromAddr, ok := from.(*unix.SockaddrNetlink) @@ -847,16 +929,14 @@ func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, *unix.SockaddrNetli // SetSendTimeout allows to set a send timeout on the socket func (s *NetlinkSocket) SetSendTimeout(timeout *unix.Timeval) error { - // Set a send timeout of SOCKET_SEND_TIMEOUT, this will allow the Send to periodically unblock and avoid that a routine - // remains stuck on a send on a closed fd - return unix.SetsockoptTimeval(int(s.fd), unix.SOL_SOCKET, unix.SO_SNDTIMEO, timeout) + atomic.StoreInt64(&s.sendTimeout, timeout.Nano()) + return nil } // SetReceiveTimeout allows to set a receive timeout on the socket func (s *NetlinkSocket) SetReceiveTimeout(timeout *unix.Timeval) error { - // Set a read timeout of SOCKET_READ_TIMEOUT, this will allow the Read to periodically unblock and avoid that a routine - // remains stuck on a recvmsg on a closed fd - return unix.SetsockoptTimeval(int(s.fd), unix.SOL_SOCKET, unix.SO_RCVTIMEO, timeout) + atomic.StoreInt64(&s.receiveTimeout, timeout.Nano()) + return nil } // SetReceiveBufferSize allows to set a receive buffer size on the socket diff --git a/vendor/github.com/vishvananda/netlink/nl/parse_attr_linux.go b/vendor/github.com/vishvananda/netlink/nl/parse_attr_linux.go index 7f49125cf..8ee0428db 100644 --- a/vendor/github.com/vishvananda/netlink/nl/parse_attr_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/parse_attr_linux.go @@ -17,7 +17,7 @@ func ParseAttributes(data []byte) <-chan Attribute { go func() { i := 0 - for i+4 < len(data) { + for i+4 <= len(data) { length := int(native.Uint16(data[i : i+2])) attrType := native.Uint16(data[i+2 : i+4]) diff --git a/vendor/github.com/vishvananda/netlink/nl/seg6local_linux.go b/vendor/github.com/vishvananda/netlink/nl/seg6local_linux.go index 8172b8471..b92991de7 100644 --- a/vendor/github.com/vishvananda/netlink/nl/seg6local_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/seg6local_linux.go @@ -13,6 +13,7 @@ const ( SEG6_LOCAL_IIF SEG6_LOCAL_OIF SEG6_LOCAL_BPF + SEG6_LOCAL_VRFTABLE __SEG6_LOCAL_MAX ) const ( diff --git a/vendor/github.com/vishvananda/netlink/nl/tc_linux.go b/vendor/github.com/vishvananda/netlink/nl/tc_linux.go index 0720729a9..50f4a6d09 100644 --- a/vendor/github.com/vishvananda/netlink/nl/tc_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/tc_linux.go @@ -77,6 +77,17 @@ const ( TCA_ACT_MAX ) +const ( + TCA_ACT_SAMPLE_UNSPEC = iota + TCA_ACT_SAMPLE_TM + TCA_ACT_SAMPLE_PARMS + TCA_ACT_SAMPLE_RATE + TCA_ACT_SAMPLE_TRUNC_SIZE + TCA_ACT_SAMPLE_PSAMPLE_GROUP + TCA_ACT_SAMPLE_PAD + TCA_ACT_SAMPLE_MAX +) + const ( TCA_PRIO_UNSPEC = iota TCA_PRIO_MQ @@ -115,6 +126,7 @@ const ( SizeofTcConnmark = SizeofTcGen + 0x04 SizeofTcCsum = SizeofTcGen + 0x04 SizeofTcMirred = SizeofTcGen + 0x08 + SizeofTcVlan = SizeofTcGen + 0x04 SizeofTcTunnelKey = SizeofTcGen + 0x04 SizeofTcSkbEdit = SizeofTcGen SizeofTcPolice = 2*SizeofTcRateSpec + 0x20 @@ -816,6 +828,41 @@ func (x *TcMirred) Serialize() []byte { return (*(*[SizeofTcMirred]byte)(unsafe.Pointer(x)))[:] } +const ( + TCA_VLAN_UNSPEC = iota + TCA_VLAN_TM + TCA_VLAN_PARMS + TCA_VLAN_PUSH_VLAN_ID + TCA_VLAN_PUSH_VLAN_PROTOCOL + TCA_VLAN_PAD + TCA_VLAN_PUSH_VLAN_PRIORITY + TCA_VLAN_PUSH_ETH_DST + TCA_VLAN_PUSH_ETH_SRC + TCA_VLAN_MAX +) + +//struct tc_vlan { +// tc_gen; +// int v_action; +//}; + +type TcVlan struct { + TcGen + Action int32 +} + +func (msg *TcVlan) Len() int { + return SizeofTcVlan +} + +func DeserializeTcVlan(b []byte) *TcVlan { + return (*TcVlan)(unsafe.Pointer(&b[0:SizeofTcVlan][0])) +} + +func (x *TcVlan) Serialize() []byte { + return (*(*[SizeofTcVlan]byte)(unsafe.Pointer(x)))[:] +} + const ( TCA_TUNNEL_KEY_UNSPEC = iota TCA_TUNNEL_KEY_TM @@ -1239,8 +1286,8 @@ const ( ) // /* TCA_PEDIT_KEY_EX_HDR_TYPE_NETWROK is a special case for legacy users. It -// * means no specific header type - offset is relative to the network layer -// */ +// - means no specific header type - offset is relative to the network layer +// */ type PeditHeaderType uint16 const ( diff --git a/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go b/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go index cdb318ba5..6cfd8f9e0 100644 --- a/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go @@ -78,10 +78,14 @@ const ( XFRMA_PROTO /* __u8 */ XFRMA_ADDRESS_FILTER /* struct xfrm_address_filter */ XFRMA_PAD - XFRMA_OFFLOAD_DEV /* struct xfrm_state_offload */ - XFRMA_SET_MARK /* __u32 */ - XFRMA_SET_MARK_MASK /* __u32 */ - XFRMA_IF_ID /* __u32 */ + XFRMA_OFFLOAD_DEV /* struct xfrm_state_offload */ + XFRMA_SET_MARK /* __u32 */ + XFRMA_SET_MARK_MASK /* __u32 */ + XFRMA_IF_ID /* __u32 */ + XFRMA_MTIMER_THRESH /* __u32 in seconds for input SA */ + XFRMA_SA_DIR /* __u8 */ + XFRMA_NAT_KEEPALIVE_INTERVAL /* __u32 in seconds for NAT keepalive */ + XFRMA_SA_PCPU /* __u32 */ XFRMA_MAX = iota - 1 ) diff --git a/vendor/github.com/vishvananda/netlink/protinfo_linux.go b/vendor/github.com/vishvananda/netlink/protinfo_linux.go index 1ba25d3cd..aa51e3b47 100644 --- a/vendor/github.com/vishvananda/netlink/protinfo_linux.go +++ b/vendor/github.com/vishvananda/netlink/protinfo_linux.go @@ -1,6 +1,7 @@ package netlink import ( + "errors" "fmt" "syscall" @@ -8,10 +9,14 @@ import ( "golang.org/x/sys/unix" ) +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func LinkGetProtinfo(link Link) (Protinfo, error) { return pkgHandle.LinkGetProtinfo(link) } +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) LinkGetProtinfo(link Link) (Protinfo, error) { base := link.Attrs() h.ensureIndex(base) @@ -19,9 +24,9 @@ func (h *Handle) LinkGetProtinfo(link Link) (Protinfo, error) { req := h.newNetlinkRequest(unix.RTM_GETLINK, unix.NLM_F_DUMP) msg := nl.NewIfInfomsg(unix.AF_BRIDGE) req.AddData(msg) - msgs, err := req.Execute(unix.NETLINK_ROUTE, 0) - if err != nil { - return pi, err + msgs, executeErr := req.Execute(unix.NETLINK_ROUTE, 0) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return pi, executeErr } for _, m := range msgs { @@ -43,7 +48,7 @@ func (h *Handle) LinkGetProtinfo(link Link) (Protinfo, error) { } pi = parseProtinfo(infos) - return pi, nil + return pi, executeErr } } return pi, fmt.Errorf("Device with index %d not found", base.Index) diff --git a/vendor/github.com/vishvananda/netlink/qdisc_linux.go b/vendor/github.com/vishvananda/netlink/qdisc_linux.go index e732ae3bd..22cf0e582 100644 --- a/vendor/github.com/vishvananda/netlink/qdisc_linux.go +++ b/vendor/github.com/vishvananda/netlink/qdisc_linux.go @@ -1,6 +1,7 @@ package netlink import ( + "errors" "fmt" "io/ioutil" "strconv" @@ -338,6 +339,9 @@ func qdiscPayload(req *nl.NetlinkRequest, qdisc Qdisc) error { // QdiscList gets a list of qdiscs in the system. // Equivalent to: `tc qdisc show`. // The list can be filtered by link. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func QdiscList(link Link) ([]Qdisc, error) { return pkgHandle.QdiscList(link) } @@ -345,6 +349,9 @@ func QdiscList(link Link) ([]Qdisc, error) { // QdiscList gets a list of qdiscs in the system. // Equivalent to: `tc qdisc show`. // The list can be filtered by link. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) QdiscList(link Link) ([]Qdisc, error) { req := h.newNetlinkRequest(unix.RTM_GETQDISC, unix.NLM_F_DUMP) index := int32(0) @@ -359,9 +366,9 @@ func (h *Handle) QdiscList(link Link) ([]Qdisc, error) { } req.AddData(msg) - msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWQDISC) - if err != nil { - return nil, err + msgs, executeErr := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWQDISC) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } var res []Qdisc @@ -497,7 +504,7 @@ func (h *Handle) QdiscList(link Link) ([]Qdisc, error) { res = append(res, qdisc) } - return res, nil + return res, executeErr } func parsePfifoFastData(qdisc Qdisc, value []byte) error { diff --git a/vendor/github.com/vishvananda/netlink/rdma_link_linux.go b/vendor/github.com/vishvananda/netlink/rdma_link_linux.go index 036399db6..9bb750732 100644 --- a/vendor/github.com/vishvananda/netlink/rdma_link_linux.go +++ b/vendor/github.com/vishvananda/netlink/rdma_link_linux.go @@ -3,6 +3,7 @@ package netlink import ( "bytes" "encoding/binary" + "errors" "fmt" "net" @@ -85,19 +86,25 @@ func execRdmaSetLink(req *nl.NetlinkRequest) error { // RdmaLinkList gets a list of RDMA link devices. // Equivalent to: `rdma dev show` +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func RdmaLinkList() ([]*RdmaLink, error) { return pkgHandle.RdmaLinkList() } // RdmaLinkList gets a list of RDMA link devices. // Equivalent to: `rdma dev show` +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) RdmaLinkList() ([]*RdmaLink, error) { proto := getProtoField(nl.RDMA_NL_NLDEV, nl.RDMA_NLDEV_CMD_GET) req := h.newNetlinkRequest(proto, unix.NLM_F_ACK|unix.NLM_F_DUMP) - msgs, err := req.Execute(unix.NETLINK_RDMA, 0) - if err != nil { - return nil, err + msgs, executeErr := req.Execute(unix.NETLINK_RDMA, 0) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } var res []*RdmaLink @@ -109,17 +116,23 @@ func (h *Handle) RdmaLinkList() ([]*RdmaLink, error) { res = append(res, link) } - return res, nil + return res, executeErr } // RdmaLinkByName finds a link by name and returns a pointer to the object if // found and nil error, otherwise returns error code. +// +// If the returned error is [ErrDumpInterrupted], the result may be missing or +// outdated and the caller should retry. func RdmaLinkByName(name string) (*RdmaLink, error) { return pkgHandle.RdmaLinkByName(name) } // RdmaLinkByName finds a link by name and returns a pointer to the object if // found and nil error, otherwise returns error code. +// +// If the returned error is [ErrDumpInterrupted], the result may be missing or +// outdated and the caller should retry. func (h *Handle) RdmaLinkByName(name string) (*RdmaLink, error) { links, err := h.RdmaLinkList() if err != nil { @@ -288,6 +301,8 @@ func RdmaLinkDel(name string) error { } // RdmaLinkDel deletes an rdma link. +// +// If the returned error is [ErrDumpInterrupted], the caller should retry. func (h *Handle) RdmaLinkDel(name string) error { link, err := h.RdmaLinkByName(name) if err != nil { @@ -307,6 +322,7 @@ func (h *Handle) RdmaLinkDel(name string) error { // RdmaLinkAdd adds an rdma link for the specified type to the network device. // Similar to: rdma link add NAME type TYPE netdev NETDEV +// // NAME - specifies the new name of the rdma link to add // TYPE - specifies which rdma type to use. Link types: // rxe - Soft RoCE driver diff --git a/vendor/github.com/vishvananda/netlink/route.go b/vendor/github.com/vishvananda/netlink/route.go index 1b4555d5c..47a57c24c 100644 --- a/vendor/github.com/vishvananda/netlink/route.go +++ b/vendor/github.com/vishvananda/netlink/route.go @@ -45,7 +45,7 @@ type Encap interface { Equal(Encap) bool } -//Protocol describe what was the originator of the route +// Protocol describe what was the originator of the route type RouteProtocol int // Route represents a netlink route. @@ -70,6 +70,7 @@ type Route struct { Via Destination Realm int MTU int + MTULock bool Window int Rtt int RttVar int @@ -81,6 +82,7 @@ type Route struct { InitCwnd int Features int RtoMin int + RtoMinLock bool InitRwnd int QuickACK int Congctl string diff --git a/vendor/github.com/vishvananda/netlink/route_linux.go b/vendor/github.com/vishvananda/netlink/route_linux.go index 0cd4f8363..9f06673a4 100644 --- a/vendor/github.com/vishvananda/netlink/route_linux.go +++ b/vendor/github.com/vishvananda/netlink/route_linux.go @@ -3,6 +3,7 @@ package netlink import ( "bytes" "encoding/binary" + "errors" "fmt" "net" "strconv" @@ -269,6 +270,7 @@ type SEG6LocalEncap struct { Action int Segments []net.IP // from SRH in seg6_local_lwt Table int // table id for End.T and End.DT6 + VrfTable int // vrftable id for END.DT4 and END.DT6 InAddr net.IP In6Addr net.IP Iif int @@ -304,6 +306,9 @@ func (e *SEG6LocalEncap) Decode(buf []byte) error { case nl.SEG6_LOCAL_TABLE: e.Table = int(native.Uint32(attr.Value[0:4])) e.Flags[nl.SEG6_LOCAL_TABLE] = true + case nl.SEG6_LOCAL_VRFTABLE: + e.VrfTable = int(native.Uint32(attr.Value[0:4])) + e.Flags[nl.SEG6_LOCAL_VRFTABLE] = true case nl.SEG6_LOCAL_NH4: e.InAddr = net.IP(attr.Value[0:4]) e.Flags[nl.SEG6_LOCAL_NH4] = true @@ -360,6 +365,15 @@ func (e *SEG6LocalEncap) Encode() ([]byte, error) { native.PutUint32(attr[4:], uint32(e.Table)) res = append(res, attr...) } + + if e.Flags[nl.SEG6_LOCAL_VRFTABLE] { + attr := make([]byte, 8) + native.PutUint16(attr, 8) + native.PutUint16(attr[2:], nl.SEG6_LOCAL_VRFTABLE) + native.PutUint32(attr[4:], uint32(e.VrfTable)) + res = append(res, attr...) + } + if e.Flags[nl.SEG6_LOCAL_NH4] { attr := make([]byte, 4) native.PutUint16(attr, 8) @@ -412,6 +426,11 @@ func (e *SEG6LocalEncap) String() string { if e.Flags[nl.SEG6_LOCAL_TABLE] { strs = append(strs, fmt.Sprintf("table %d", e.Table)) } + + if e.Flags[nl.SEG6_LOCAL_VRFTABLE] { + strs = append(strs, fmt.Sprintf("vrftable %d", e.VrfTable)) + } + if e.Flags[nl.SEG6_LOCAL_NH4] { strs = append(strs, fmt.Sprintf("nh4 %s", e.InAddr)) } @@ -476,7 +495,7 @@ func (e *SEG6LocalEncap) Equal(x Encap) bool { if !e.InAddr.Equal(o.InAddr) || !e.In6Addr.Equal(o.In6Addr) { return false } - if e.Action != o.Action || e.Table != o.Table || e.Iif != o.Iif || e.Oif != o.Oif || e.bpf != o.bpf { + if e.Action != o.Action || e.Table != o.Table || e.Iif != o.Iif || e.Oif != o.Oif || e.bpf != o.bpf || e.VrfTable != o.VrfTable { return false } return true @@ -1071,6 +1090,10 @@ func (h *Handle) prepareRouteReq(route *Route, req *nl.NetlinkRequest, msg *nl.R if route.MTU > 0 { b := nl.Uint32Attr(uint32(route.MTU)) metrics = append(metrics, nl.NewRtAttr(unix.RTAX_MTU, b)) + if route.MTULock { + b := nl.Uint32Attr(uint32(1 << unix.RTAX_MTU)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_LOCK, b)) + } } if route.Window > 0 { b := nl.Uint32Attr(uint32(route.Window)) @@ -1115,6 +1138,10 @@ func (h *Handle) prepareRouteReq(route *Route, req *nl.NetlinkRequest, msg *nl.R if route.RtoMin > 0 { b := nl.Uint32Attr(uint32(route.RtoMin)) metrics = append(metrics, nl.NewRtAttr(unix.RTAX_RTO_MIN, b)) + if route.RtoMinLock { + b := nl.Uint32Attr(uint32(1 << unix.RTAX_RTO_MIN)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_LOCK, b)) + } } if route.InitRwnd > 0 { b := nl.Uint32Attr(uint32(route.InitRwnd)) @@ -1163,6 +1190,9 @@ func (h *Handle) prepareRouteReq(route *Route, req *nl.NetlinkRequest, msg *nl.R // RouteList gets a list of routes in the system. // Equivalent to: `ip route show`. // The list can be filtered by link and ip family. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func RouteList(link Link, family int) ([]Route, error) { return pkgHandle.RouteList(link, family) } @@ -1170,6 +1200,9 @@ func RouteList(link Link, family int) ([]Route, error) { // RouteList gets a list of routes in the system. // Equivalent to: `ip route show`. // The list can be filtered by link and ip family. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) RouteList(link Link, family int) ([]Route, error) { routeFilter := &Route{} if link != nil { @@ -1188,6 +1221,9 @@ func RouteListFiltered(family int, filter *Route, filterMask uint64) ([]Route, e // RouteListFiltered gets a list of routes in the system filtered with specified rules. // All rules must be defined in RouteFilter struct +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) RouteListFiltered(family int, filter *Route, filterMask uint64) ([]Route, error) { var res []Route err := h.RouteListFilteredIter(family, filter, filterMask, func(route Route) (cont bool) { @@ -1202,17 +1238,22 @@ func (h *Handle) RouteListFiltered(family int, filter *Route, filterMask uint64) // RouteListFilteredIter passes each route that matches the filter to the given iterator func. Iteration continues // until all routes are loaded or the func returns false. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func RouteListFilteredIter(family int, filter *Route, filterMask uint64, f func(Route) (cont bool)) error { return pkgHandle.RouteListFilteredIter(family, filter, filterMask, f) } +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) RouteListFilteredIter(family int, filter *Route, filterMask uint64, f func(Route) (cont bool)) error { req := h.newNetlinkRequest(unix.RTM_GETROUTE, unix.NLM_F_DUMP) rtmsg := &nl.RtMsg{} rtmsg.Family = uint8(family) var parseErr error - err := h.routeHandleIter(filter, req, rtmsg, func(m []byte) bool { + executeErr := h.routeHandleIter(filter, req, rtmsg, func(m []byte) bool { msg := nl.DeserializeRtMsg(m) if family != FAMILY_ALL && msg.Family != uint8(family) { // Ignore routes not matching requested family @@ -1270,13 +1311,13 @@ func (h *Handle) RouteListFilteredIter(family int, filter *Route, filterMask uin } return f(route) }) - if err != nil { - return err + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return executeErr } if parseErr != nil { return parseErr } - return nil + return executeErr } // deserializeRoute decodes a binary netlink message into a Route struct @@ -1425,6 +1466,9 @@ func deserializeRoute(m []byte) (Route, error) { switch metric.Attr.Type { case unix.RTAX_MTU: route.MTU = int(native.Uint32(metric.Value[0:4])) + case unix.RTAX_LOCK: + route.MTULock = native.Uint32(metric.Value[0:4]) == uint32(1< 0 { link, err := h.LinkByName(options.Oif) if err != nil { return nil, err } + oifIndex = uint32(link.Attrs().Index) + } else if options.OifIndex > 0 { + oifIndex = uint32(options.OifIndex) + } + if oifIndex > 0 { b := make([]byte, 4) - native.PutUint32(b, uint32(link.Attrs().Index)) + native.PutUint32(b, oifIndex) req.AddData(nl.NewRtAttr(unix.RTA_OIF, b)) } @@ -1684,6 +1735,10 @@ type RouteSubscribeOptions struct { // RouteSubscribeWithOptions work like RouteSubscribe but enable to // provide additional options to modify the behavior. Currently, the // namespace can be provided as well as an error callback. +// +// When options.ListExisting is true, options.ErrorCallback may be +// called with [ErrDumpInterrupted] to indicate that results from +// the initial dump of links may be inconsistent or incomplete. func RouteSubscribeWithOptions(ch chan<- RouteUpdate, done <-chan struct{}, options RouteSubscribeOptions) error { if options.Namespace == nil { none := netns.None() @@ -1743,6 +1798,9 @@ func routeSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- RouteUpdate, done < continue } for _, m := range msgs { + if m.Header.Flags&unix.NLM_F_DUMP_INTR != 0 && cberr != nil { + cberr(ErrDumpInterrupted) + } if m.Header.Type == unix.NLMSG_DONE { continue } diff --git a/vendor/github.com/vishvananda/netlink/rule_linux.go b/vendor/github.com/vishvananda/netlink/rule_linux.go index ddff99cfa..dba99147b 100644 --- a/vendor/github.com/vishvananda/netlink/rule_linux.go +++ b/vendor/github.com/vishvananda/netlink/rule_linux.go @@ -2,6 +2,7 @@ package netlink import ( "bytes" + "errors" "fmt" "net" @@ -183,12 +184,18 @@ func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error { // RuleList lists rules in the system. // Equivalent to: ip rule list +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func RuleList(family int) ([]Rule, error) { return pkgHandle.RuleList(family) } // RuleList lists rules in the system. // Equivalent to: ip rule list +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) RuleList(family int) ([]Rule, error) { return h.RuleListFiltered(family, nil, 0) } @@ -196,20 +203,26 @@ func (h *Handle) RuleList(family int) ([]Rule, error) { // RuleListFiltered gets a list of rules in the system filtered by the // specified rule template `filter`. // Equivalent to: ip rule list +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func RuleListFiltered(family int, filter *Rule, filterMask uint64) ([]Rule, error) { return pkgHandle.RuleListFiltered(family, filter, filterMask) } // RuleListFiltered lists rules in the system. // Equivalent to: ip rule list +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) RuleListFiltered(family int, filter *Rule, filterMask uint64) ([]Rule, error) { req := h.newNetlinkRequest(unix.RTM_GETRULE, unix.NLM_F_DUMP|unix.NLM_F_REQUEST) msg := nl.NewIfInfomsg(family) req.AddData(msg) - msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWRULE) - if err != nil { - return nil, err + msgs, executeErr := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWRULE) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } var res = make([]Rule, 0) @@ -306,7 +319,7 @@ func (h *Handle) RuleListFiltered(family int, filter *Rule, filterMask uint64) ( res = append(res, *rule) } - return res, nil + return res, executeErr } func (pr *RulePortRange) toRtAttrData() []byte { diff --git a/vendor/github.com/vishvananda/netlink/socket_linux.go b/vendor/github.com/vishvananda/netlink/socket_linux.go index 4eb4aeafb..ebda532a8 100644 --- a/vendor/github.com/vishvananda/netlink/socket_linux.go +++ b/vendor/github.com/vishvananda/netlink/socket_linux.go @@ -157,6 +157,9 @@ func (u *UnixSocket) deserialize(b []byte) error { } // SocketGet returns the Socket identified by its local and remote addresses. +// +// If the returned error is [ErrDumpInterrupted], the search for a result may +// be incomplete and the caller should retry. func (h *Handle) SocketGet(local, remote net.Addr) (*Socket, error) { var protocol uint8 var localIP, remoteIP net.IP @@ -232,6 +235,9 @@ func (h *Handle) SocketGet(local, remote net.Addr) (*Socket, error) { } // SocketGet returns the Socket identified by its local and remote addresses. +// +// If the returned error is [ErrDumpInterrupted], the search for a result may +// be incomplete and the caller should retry. func SocketGet(local, remote net.Addr) (*Socket, error) { return pkgHandle.SocketGet(local, remote) } @@ -283,6 +289,9 @@ func SocketDestroy(local, remote net.Addr) error { } // SocketDiagTCPInfo requests INET_DIAG_INFO for TCP protocol for specified family type and return with extension TCP info. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) SocketDiagTCPInfo(family uint8) ([]*InetDiagTCPInfoResp, error) { // Construct the request req := h.newNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP) @@ -295,9 +304,9 @@ func (h *Handle) SocketDiagTCPInfo(family uint8) ([]*InetDiagTCPInfoResp, error) // Do the query and parse the result var result []*InetDiagTCPInfoResp - var err error - err = req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool { + executeErr := req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool { sockInfo := &Socket{} + var err error if err = sockInfo.deserialize(msg); err != nil { return false } @@ -315,18 +324,24 @@ func (h *Handle) SocketDiagTCPInfo(family uint8) ([]*InetDiagTCPInfoResp, error) return true }) - if err != nil { - return nil, err + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } - return result, nil + return result, executeErr } // SocketDiagTCPInfo requests INET_DIAG_INFO for TCP protocol for specified family type and return with extension TCP info. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func SocketDiagTCPInfo(family uint8) ([]*InetDiagTCPInfoResp, error) { return pkgHandle.SocketDiagTCPInfo(family) } // SocketDiagTCP requests INET_DIAG_INFO for TCP protocol for specified family type and return related socket. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) SocketDiagTCP(family uint8) ([]*Socket, error) { // Construct the request req := h.newNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP) @@ -339,27 +354,32 @@ func (h *Handle) SocketDiagTCP(family uint8) ([]*Socket, error) { // Do the query and parse the result var result []*Socket - var err error - err = req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool { + executeErr := req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool { sockInfo := &Socket{} - if err = sockInfo.deserialize(msg); err != nil { + if err := sockInfo.deserialize(msg); err != nil { return false } result = append(result, sockInfo) return true }) - if err != nil { - return nil, err + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } - return result, nil + return result, executeErr } // SocketDiagTCP requests INET_DIAG_INFO for TCP protocol for specified family type and return related socket. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func SocketDiagTCP(family uint8) ([]*Socket, error) { return pkgHandle.SocketDiagTCP(family) } // SocketDiagUDPInfo requests INET_DIAG_INFO for UDP protocol for specified family type and return with extension info. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) SocketDiagUDPInfo(family uint8) ([]*InetDiagUDPInfoResp, error) { // Construct the request var extensions uint8 @@ -377,14 +397,14 @@ func (h *Handle) SocketDiagUDPInfo(family uint8) ([]*InetDiagUDPInfoResp, error) // Do the query and parse the result var result []*InetDiagUDPInfoResp - var err error - err = req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool { + executeErr := req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool { sockInfo := &Socket{} - if err = sockInfo.deserialize(msg); err != nil { + if err := sockInfo.deserialize(msg); err != nil { return false } var attrs []syscall.NetlinkRouteAttr + var err error if attrs, err = nl.ParseRouteAttr(msg[sizeofSocket:]); err != nil { return false } @@ -397,18 +417,24 @@ func (h *Handle) SocketDiagUDPInfo(family uint8) ([]*InetDiagUDPInfoResp, error) result = append(result, res) return true }) - if err != nil { - return nil, err + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } - return result, nil + return result, executeErr } // SocketDiagUDPInfo requests INET_DIAG_INFO for UDP protocol for specified family type and return with extension info. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func SocketDiagUDPInfo(family uint8) ([]*InetDiagUDPInfoResp, error) { return pkgHandle.SocketDiagUDPInfo(family) } // SocketDiagUDP requests INET_DIAG_INFO for UDP protocol for specified family type and return related socket. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) SocketDiagUDP(family uint8) ([]*Socket, error) { // Construct the request req := h.newNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP) @@ -421,27 +447,32 @@ func (h *Handle) SocketDiagUDP(family uint8) ([]*Socket, error) { // Do the query and parse the result var result []*Socket - var err error - err = req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool { + executeErr := req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool { sockInfo := &Socket{} - if err = sockInfo.deserialize(msg); err != nil { + if err := sockInfo.deserialize(msg); err != nil { return false } result = append(result, sockInfo) return true }) - if err != nil { - return nil, err + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } - return result, nil + return result, executeErr } // SocketDiagUDP requests INET_DIAG_INFO for UDP protocol for specified family type and return related socket. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func SocketDiagUDP(family uint8) ([]*Socket, error) { return pkgHandle.SocketDiagUDP(family) } // UnixSocketDiagInfo requests UNIX_DIAG_INFO for unix sockets and return with extension info. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) UnixSocketDiagInfo() ([]*UnixDiagInfoResp, error) { // Construct the request var extensions uint8 @@ -456,10 +487,9 @@ func (h *Handle) UnixSocketDiagInfo() ([]*UnixDiagInfoResp, error) { }) var result []*UnixDiagInfoResp - var err error - err = req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool { + executeErr := req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool { sockInfo := &UnixSocket{} - if err = sockInfo.deserialize(msg); err != nil { + if err := sockInfo.deserialize(msg); err != nil { return false } @@ -469,7 +499,8 @@ func (h *Handle) UnixSocketDiagInfo() ([]*UnixDiagInfoResp, error) { } var attrs []syscall.NetlinkRouteAttr - if attrs, err = nl.ParseRouteAttr(msg[sizeofSocket:]); err != nil { + var err error + if attrs, err = nl.ParseRouteAttr(msg[sizeofUnixSocket:]); err != nil { return false } @@ -480,18 +511,24 @@ func (h *Handle) UnixSocketDiagInfo() ([]*UnixDiagInfoResp, error) { result = append(result, res) return true }) - if err != nil { - return nil, err + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } - return result, nil + return result, executeErr } // UnixSocketDiagInfo requests UNIX_DIAG_INFO for unix sockets and return with extension info. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func UnixSocketDiagInfo() ([]*UnixDiagInfoResp, error) { return pkgHandle.UnixSocketDiagInfo() } // UnixSocketDiag requests UNIX_DIAG_INFO for unix sockets. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) UnixSocketDiag() ([]*UnixSocket, error) { // Construct the request req := h.newNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP) @@ -501,10 +538,9 @@ func (h *Handle) UnixSocketDiag() ([]*UnixSocket, error) { }) var result []*UnixSocket - var err error - err = req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool { + executeErr := req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool { sockInfo := &UnixSocket{} - if err = sockInfo.deserialize(msg); err != nil { + if err := sockInfo.deserialize(msg); err != nil { return false } @@ -514,13 +550,16 @@ func (h *Handle) UnixSocketDiag() ([]*UnixSocket, error) { } return true }) - if err != nil { - return nil, err + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } - return result, nil + return result, executeErr } // UnixSocketDiag requests UNIX_DIAG_INFO for unix sockets. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func UnixSocketDiag() ([]*UnixSocket, error) { return pkgHandle.UnixSocketDiag() } diff --git a/vendor/github.com/vishvananda/netlink/socket_xdp_linux.go b/vendor/github.com/vishvananda/netlink/socket_xdp_linux.go index 20c82f9c7..c1dd00a86 100644 --- a/vendor/github.com/vishvananda/netlink/socket_xdp_linux.go +++ b/vendor/github.com/vishvananda/netlink/socket_xdp_linux.go @@ -52,8 +52,10 @@ func (s *XDPSocket) deserialize(b []byte) error { return nil } -// XDPSocketGet returns the XDP socket identified by its inode number and/or +// SocketXDPGetInfo returns the XDP socket identified by its inode number and/or // socket cookie. Specify the cookie as SOCK_ANY_COOKIE if +// +// If the returned error is [ErrDumpInterrupted], the caller should retry. func SocketXDPGetInfo(ino uint32, cookie uint64) (*XDPDiagInfoResp, error) { // We have a problem here: dumping AF_XDP sockets currently does not support // filtering. We thus need to dump all XSKs and then only filter afterwards @@ -85,6 +87,9 @@ func SocketXDPGetInfo(ino uint32, cookie uint64) (*XDPDiagInfoResp, error) { } // SocketDiagXDP requests XDP_DIAG_INFO for XDP family sockets. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func SocketDiagXDP() ([]*XDPDiagInfoResp, error) { var result []*XDPDiagInfoResp err := socketDiagXDPExecutor(func(m syscall.NetlinkMessage) error { @@ -105,10 +110,10 @@ func SocketDiagXDP() ([]*XDPDiagInfoResp, error) { result = append(result, res) return nil }) - if err != nil { + if err != nil && !errors.Is(err, ErrDumpInterrupted) { return nil, err } - return result, nil + return result, err } // socketDiagXDPExecutor requests XDP_DIAG_INFO for XDP family sockets. @@ -128,6 +133,7 @@ func socketDiagXDPExecutor(receiver func(syscall.NetlinkMessage) error) error { return err } + dumpIntr := false loop: for { msgs, from, err := s.Receive() @@ -142,6 +148,9 @@ loop: } for _, m := range msgs { + if m.Header.Flags&unix.NLM_F_DUMP_INTR != 0 { + dumpIntr = true + } switch m.Header.Type { case unix.NLMSG_DONE: break loop @@ -154,6 +163,9 @@ loop: } } } + if dumpIntr { + return ErrDumpInterrupted + } return nil } diff --git a/vendor/github.com/vishvananda/netlink/vdpa_linux.go b/vendor/github.com/vishvananda/netlink/vdpa_linux.go index 7c15986d0..c14877a29 100644 --- a/vendor/github.com/vishvananda/netlink/vdpa_linux.go +++ b/vendor/github.com/vishvananda/netlink/vdpa_linux.go @@ -1,6 +1,7 @@ package netlink import ( + "errors" "fmt" "net" "syscall" @@ -118,6 +119,9 @@ func VDPADelDev(name string) error { // VDPAGetDevList returns list of VDPA devices // Equivalent to: `vdpa dev show` +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func VDPAGetDevList() ([]*VDPADev, error) { return pkgHandle.VDPAGetDevList() } @@ -130,6 +134,9 @@ func VDPAGetDevByName(name string) (*VDPADev, error) { // VDPAGetDevConfigList returns list of VDPA devices configurations // Equivalent to: `vdpa dev config show` +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func VDPAGetDevConfigList() ([]*VDPADevConfig, error) { return pkgHandle.VDPAGetDevConfigList() } @@ -148,6 +155,9 @@ func VDPAGetDevVStats(name string, queueIndex uint32) (*VDPADevVStats, error) { // VDPAGetMGMTDevList returns list of mgmt devices // Equivalent to: `vdpa mgmtdev show` +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func VDPAGetMGMTDevList() ([]*VDPAMGMTDev, error) { return pkgHandle.VDPAGetMGMTDevList() } @@ -261,9 +271,9 @@ func (h *Handle) vdpaRequest(command uint8, extraFlags int, attrs []*nl.RtAttr) req.AddData(a) } - resp, err := req.Execute(unix.NETLINK_GENERIC, 0) - if err != nil { - return nil, err + resp, executeErr := req.Execute(unix.NETLINK_GENERIC, 0) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } messages := make([]vdpaNetlinkMessage, 0, len(resp)) for _, m := range resp { @@ -273,10 +283,13 @@ func (h *Handle) vdpaRequest(command uint8, extraFlags int, attrs []*nl.RtAttr) } messages = append(messages, attrs) } - return messages, nil + return messages, executeErr } // dump all devices if dev is nil +// +// If dev is nil and the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) vdpaDevGet(dev *string) ([]*VDPADev, error) { var extraFlags int var attrs []*nl.RtAttr @@ -285,9 +298,9 @@ func (h *Handle) vdpaDevGet(dev *string) ([]*VDPADev, error) { } else { extraFlags = extraFlags | unix.NLM_F_DUMP } - messages, err := h.vdpaRequest(nl.VDPA_CMD_DEV_GET, extraFlags, attrs) - if err != nil { - return nil, err + messages, executeErr := h.vdpaRequest(nl.VDPA_CMD_DEV_GET, extraFlags, attrs) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } devs := make([]*VDPADev, 0, len(messages)) for _, m := range messages { @@ -295,10 +308,13 @@ func (h *Handle) vdpaDevGet(dev *string) ([]*VDPADev, error) { d.parseAttributes(m) devs = append(devs, d) } - return devs, nil + return devs, executeErr } // dump all devices if dev is nil +// +// If dev is nil, and the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) vdpaDevConfigGet(dev *string) ([]*VDPADevConfig, error) { var extraFlags int var attrs []*nl.RtAttr @@ -307,9 +323,9 @@ func (h *Handle) vdpaDevConfigGet(dev *string) ([]*VDPADevConfig, error) { } else { extraFlags = extraFlags | unix.NLM_F_DUMP } - messages, err := h.vdpaRequest(nl.VDPA_CMD_DEV_CONFIG_GET, extraFlags, attrs) - if err != nil { - return nil, err + messages, executeErr := h.vdpaRequest(nl.VDPA_CMD_DEV_CONFIG_GET, extraFlags, attrs) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } cfgs := make([]*VDPADevConfig, 0, len(messages)) for _, m := range messages { @@ -317,10 +333,13 @@ func (h *Handle) vdpaDevConfigGet(dev *string) ([]*VDPADevConfig, error) { cfg.parseAttributes(m) cfgs = append(cfgs, cfg) } - return cfgs, nil + return cfgs, executeErr } // dump all devices if dev is nil +// +// If dev is nil and the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) vdpaMGMTDevGet(bus, dev *string) ([]*VDPAMGMTDev, error) { var extraFlags int var attrs []*nl.RtAttr @@ -336,9 +355,9 @@ func (h *Handle) vdpaMGMTDevGet(bus, dev *string) ([]*VDPAMGMTDev, error) { } else { extraFlags = extraFlags | unix.NLM_F_DUMP } - messages, err := h.vdpaRequest(nl.VDPA_CMD_MGMTDEV_GET, extraFlags, attrs) - if err != nil { - return nil, err + messages, executeErr := h.vdpaRequest(nl.VDPA_CMD_MGMTDEV_GET, extraFlags, attrs) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } cfgs := make([]*VDPAMGMTDev, 0, len(messages)) for _, m := range messages { @@ -346,7 +365,7 @@ func (h *Handle) vdpaMGMTDevGet(bus, dev *string) ([]*VDPAMGMTDev, error) { cfg.parseAttributes(m) cfgs = append(cfgs, cfg) } - return cfgs, nil + return cfgs, executeErr } // VDPANewDev adds new VDPA device @@ -385,6 +404,9 @@ func (h *Handle) VDPADelDev(name string) error { // VDPAGetDevList returns list of VDPA devices // Equivalent to: `vdpa dev show` +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) VDPAGetDevList() ([]*VDPADev, error) { return h.vdpaDevGet(nil) } @@ -404,6 +426,9 @@ func (h *Handle) VDPAGetDevByName(name string) (*VDPADev, error) { // VDPAGetDevConfigList returns list of VDPA devices configurations // Equivalent to: `vdpa dev config show` +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) VDPAGetDevConfigList() ([]*VDPADevConfig, error) { return h.vdpaDevConfigGet(nil) } @@ -441,6 +466,9 @@ func (h *Handle) VDPAGetDevVStats(name string, queueIndex uint32) (*VDPADevVStat // VDPAGetMGMTDevList returns list of mgmt devices // Equivalent to: `vdpa mgmtdev show` +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) VDPAGetMGMTDevList() ([]*VDPAMGMTDev, error) { return h.vdpaMGMTDevGet(nil, nil) } diff --git a/vendor/github.com/vishvananda/netlink/xfrm_linux.go b/vendor/github.com/vishvananda/netlink/xfrm_linux.go index dd38ed8e0..b603e4c15 100644 --- a/vendor/github.com/vishvananda/netlink/xfrm_linux.go +++ b/vendor/github.com/vishvananda/netlink/xfrm_linux.go @@ -48,6 +48,14 @@ const ( XFRM_MODE_MAX ) +// SADir is an enum representing an ipsec template direction. +type SADir uint8 + +const ( + XFRM_SA_DIR_IN SADir = iota + 1 + XFRM_SA_DIR_OUT +) + func (m Mode) String() string { switch m { case XFRM_MODE_TRANSPORT: diff --git a/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go b/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go index d526739ce..bf143a1b1 100644 --- a/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go +++ b/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go @@ -1,6 +1,7 @@ package netlink import ( + "errors" "fmt" "net" @@ -215,6 +216,9 @@ func (h *Handle) XfrmPolicyDel(policy *XfrmPolicy) error { // XfrmPolicyList gets a list of xfrm policies in the system. // Equivalent to: `ip xfrm policy show`. // The list can be filtered by ip family. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func XfrmPolicyList(family int) ([]XfrmPolicy, error) { return pkgHandle.XfrmPolicyList(family) } @@ -222,15 +226,18 @@ func XfrmPolicyList(family int) ([]XfrmPolicy, error) { // XfrmPolicyList gets a list of xfrm policies in the system. // Equivalent to: `ip xfrm policy show`. // The list can be filtered by ip family. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) XfrmPolicyList(family int) ([]XfrmPolicy, error) { req := h.newNetlinkRequest(nl.XFRM_MSG_GETPOLICY, unix.NLM_F_DUMP) msg := nl.NewIfInfomsg(family) req.AddData(msg) - msgs, err := req.Execute(unix.NETLINK_XFRM, nl.XFRM_MSG_NEWPOLICY) - if err != nil { - return nil, err + msgs, executeErr := req.Execute(unix.NETLINK_XFRM, nl.XFRM_MSG_NEWPOLICY) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } var res []XfrmPolicy @@ -243,7 +250,7 @@ func (h *Handle) XfrmPolicyList(family int) ([]XfrmPolicy, error) { return nil, err } } - return res, nil + return res, executeErr } // XfrmPolicyGet gets a the policy described by the index or selector, if found. diff --git a/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go b/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go index 554f2498c..092ffe97b 100644 --- a/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go +++ b/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go @@ -1,6 +1,7 @@ package netlink import ( + "errors" "fmt" "net" "time" @@ -112,7 +113,9 @@ type XfrmState struct { Statistics XfrmStateStats Mark *XfrmMark OutputMark *XfrmMark + SADir SADir Ifid int + Pcpunum *uint32 Auth *XfrmStateAlgo Crypt *XfrmStateAlgo Aead *XfrmStateAlgo @@ -125,8 +128,8 @@ type XfrmState struct { } func (sa XfrmState) String() string { - return fmt.Sprintf("Dst: %v, Src: %v, Proto: %s, Mode: %s, SPI: 0x%x, ReqID: 0x%x, ReplayWindow: %d, Mark: %v, OutputMark: %v, Ifid: %d, Auth: %v, Crypt: %v, Aead: %v, Encap: %v, ESN: %t, DontEncapDSCP: %t, OSeqMayWrap: %t, Replay: %v", - sa.Dst, sa.Src, sa.Proto, sa.Mode, sa.Spi, sa.Reqid, sa.ReplayWindow, sa.Mark, sa.OutputMark, sa.Ifid, sa.Auth, sa.Crypt, sa.Aead, sa.Encap, sa.ESN, sa.DontEncapDSCP, sa.OSeqMayWrap, sa.Replay) + return fmt.Sprintf("Dst: %v, Src: %v, Proto: %s, Mode: %s, SPI: 0x%x, ReqID: 0x%x, ReplayWindow: %d, Mark: %v, OutputMark: %v, SADir: %d, Ifid: %d, Pcpunum: %d, Auth: %v, Crypt: %v, Aead: %v, Encap: %v, ESN: %t, DontEncapDSCP: %t, OSeqMayWrap: %t, Replay: %v", + sa.Dst, sa.Src, sa.Proto, sa.Mode, sa.Spi, sa.Reqid, sa.ReplayWindow, sa.Mark, sa.OutputMark, sa.SADir, sa.Ifid, *sa.Pcpunum, sa.Auth, sa.Crypt, sa.Aead, sa.Encap, sa.ESN, sa.DontEncapDSCP, sa.OSeqMayWrap, sa.Replay) } func (sa XfrmState) Print(stats bool) string { if !stats { @@ -332,11 +335,21 @@ func (h *Handle) xfrmStateAddOrUpdate(state *XfrmState, nlProto int) error { req.AddData(out) } + if state.SADir != 0 { + saDir := nl.NewRtAttr(nl.XFRMA_SA_DIR, nl.Uint8Attr(uint8(state.SADir))) + req.AddData(saDir) + } + if state.Ifid != 0 { ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(state.Ifid))) req.AddData(ifId) } + if state.Pcpunum != nil { + pcpuNum := nl.NewRtAttr(nl.XFRMA_SA_PCPU, nl.Uint32Attr(uint32(*state.Pcpunum))) + req.AddData(pcpuNum) + } + _, err := req.Execute(unix.NETLINK_XFRM, 0) return err } @@ -382,6 +395,9 @@ func (h *Handle) XfrmStateDel(state *XfrmState) error { // XfrmStateList gets a list of xfrm states in the system. // Equivalent to: `ip [-4|-6] xfrm state show`. // The list can be filtered by ip family. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func XfrmStateList(family int) ([]XfrmState, error) { return pkgHandle.XfrmStateList(family) } @@ -389,12 +405,15 @@ func XfrmStateList(family int) ([]XfrmState, error) { // XfrmStateList gets a list of xfrm states in the system. // Equivalent to: `ip xfrm state show`. // The list can be filtered by ip family. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) XfrmStateList(family int) ([]XfrmState, error) { req := h.newNetlinkRequest(nl.XFRM_MSG_GETSA, unix.NLM_F_DUMP) - msgs, err := req.Execute(unix.NETLINK_XFRM, nl.XFRM_MSG_NEWSA) - if err != nil { - return nil, err + msgs, executeErr := req.Execute(unix.NETLINK_XFRM, nl.XFRM_MSG_NEWSA) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } var res []XfrmState @@ -407,7 +426,7 @@ func (h *Handle) XfrmStateList(family int) ([]XfrmState, error) { return nil, err } } - return res, nil + return res, executeErr } // XfrmStateGet gets the xfrm state described by the ID, if found. @@ -452,6 +471,11 @@ func (h *Handle) xfrmStateGetOrDelete(state *XfrmState, nlProto int) (*XfrmState req.AddData(ifId) } + if state.Pcpunum != nil { + pcpuNum := nl.NewRtAttr(nl.XFRMA_SA_PCPU, nl.Uint32Attr(uint32(*state.Pcpunum))) + req.AddData(pcpuNum) + } + resType := nl.XFRM_MSG_NEWSA if nlProto == nl.XFRM_MSG_DELSA { resType = 0 @@ -574,8 +598,13 @@ func parseXfrmState(m []byte, family int) (*XfrmState, error) { if state.OutputMark.Mask == 0xffffffff { state.OutputMark.Mask = 0 } + case nl.XFRMA_SA_DIR: + state.SADir = SADir(attr.Value[0]) case nl.XFRMA_IF_ID: state.Ifid = int(native.Uint32(attr.Value)) + case nl.XFRMA_SA_PCPU: + pcpuNum := native.Uint32(attr.Value) + state.Pcpunum = &pcpuNum case nl.XFRMA_REPLAY_VAL: if state.Replay == nil { state.Replay = new(XfrmReplayState) diff --git a/vendor/github.com/xrash/smetrics/.travis.yml b/vendor/github.com/xrash/smetrics/.travis.yml new file mode 100644 index 000000000..d1cd67ff9 --- /dev/null +++ b/vendor/github.com/xrash/smetrics/.travis.yml @@ -0,0 +1,9 @@ +language: go +go: + - 1.11 + - 1.12 + - 1.13 + - 1.14.x + - master +script: + - cd tests && make diff --git a/vendor/github.com/xrash/smetrics/LICENSE b/vendor/github.com/xrash/smetrics/LICENSE new file mode 100644 index 000000000..80445682f --- /dev/null +++ b/vendor/github.com/xrash/smetrics/LICENSE @@ -0,0 +1,21 @@ +Copyright (C) 2016 Felipe da Cunha Gonçalves +All Rights Reserved. + +MIT LICENSE + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/xrash/smetrics/README.md b/vendor/github.com/xrash/smetrics/README.md new file mode 100644 index 000000000..5e0c1a463 --- /dev/null +++ b/vendor/github.com/xrash/smetrics/README.md @@ -0,0 +1,49 @@ +[![Build Status](https://travis-ci.org/xrash/smetrics.svg?branch=master)](http://travis-ci.org/xrash/smetrics) + +# smetrics + +`smetrics` is "string metrics". + +Package smetrics provides a bunch of algorithms for calculating the distance between strings. + +There are implementations for calculating the popular Levenshtein distance (aka Edit Distance or Wagner-Fischer), as well as the Jaro distance, the Jaro-Winkler distance, and more. + +# How to import + +```go +import "github.com/xrash/smetrics" +``` + +# Documentation + +Go to [https://pkg.go.dev/github.com/xrash/smetrics](https://pkg.go.dev/github.com/xrash/smetrics) for complete documentation. + +# Example + +```go +package main + +import ( + "github.com/xrash/smetrics" +) + +func main() { + smetrics.WagnerFischer("POTATO", "POTATTO", 1, 1, 2) + smetrics.WagnerFischer("MOUSE", "HOUSE", 2, 2, 4) + + smetrics.Ukkonen("POTATO", "POTATTO", 1, 1, 2) + smetrics.Ukkonen("MOUSE", "HOUSE", 2, 2, 4) + + smetrics.Jaro("AL", "AL") + smetrics.Jaro("MARTHA", "MARHTA") + + smetrics.JaroWinkler("AL", "AL", 0.7, 4) + smetrics.JaroWinkler("MARTHA", "MARHTA", 0.7, 4) + + smetrics.Soundex("Euler") + smetrics.Soundex("Ellery") + + smetrics.Hamming("aaa", "aaa") + smetrics.Hamming("aaa", "aab") +} +``` diff --git a/vendor/github.com/xrash/smetrics/doc.go b/vendor/github.com/xrash/smetrics/doc.go new file mode 100644 index 000000000..21bc986c9 --- /dev/null +++ b/vendor/github.com/xrash/smetrics/doc.go @@ -0,0 +1,19 @@ +/* +Package smetrics provides a bunch of algorithms for calculating +the distance between strings. + +There are implementations for calculating the popular Levenshtein +distance (aka Edit Distance or Wagner-Fischer), as well as the Jaro +distance, the Jaro-Winkler distance, and more. + +For the Levenshtein distance, you can use the functions WagnerFischer() +and Ukkonen(). Read the documentation on these functions. + +For the Jaro and Jaro-Winkler algorithms, check the functions +Jaro() and JaroWinkler(). Read the documentation on these functions. + +For the Soundex algorithm, check the function Soundex(). + +For the Hamming distance algorithm, check the function Hamming(). +*/ +package smetrics diff --git a/vendor/github.com/xrash/smetrics/hamming.go b/vendor/github.com/xrash/smetrics/hamming.go new file mode 100644 index 000000000..505d3e5da --- /dev/null +++ b/vendor/github.com/xrash/smetrics/hamming.go @@ -0,0 +1,25 @@ +package smetrics + +import ( + "fmt" +) + +// The Hamming distance is the minimum number of substitutions required to change string A into string B. Both strings must have the same size. If the strings have different sizes, the function returns an error. +func Hamming(a, b string) (int, error) { + al := len(a) + bl := len(b) + + if al != bl { + return -1, fmt.Errorf("strings are not equal (len(a)=%d, len(b)=%d)", al, bl) + } + + var difference = 0 + + for i := range a { + if a[i] != b[i] { + difference = difference + 1 + } + } + + return difference, nil +} diff --git a/vendor/github.com/xrash/smetrics/jaro-winkler.go b/vendor/github.com/xrash/smetrics/jaro-winkler.go new file mode 100644 index 000000000..abdb28883 --- /dev/null +++ b/vendor/github.com/xrash/smetrics/jaro-winkler.go @@ -0,0 +1,28 @@ +package smetrics + +import ( + "math" +) + +// The Jaro-Winkler distance. The result is 1 for equal strings, and 0 for completely different strings. It is commonly used on Record Linkage stuff, thus it tries to be accurate for common typos when writing real names such as person names and street names. +// Jaro-Winkler is a modification of the Jaro algorithm. It works by first running Jaro, then boosting the score of exact matches at the beginning of the strings. Because of that, it introduces two more parameters: the boostThreshold and the prefixSize. These are commonly set to 0.7 and 4, respectively. +func JaroWinkler(a, b string, boostThreshold float64, prefixSize int) float64 { + j := Jaro(a, b) + + if j <= boostThreshold { + return j + } + + prefixSize = int(math.Min(float64(len(a)), math.Min(float64(prefixSize), float64(len(b))))) + + var prefixMatch float64 + for i := 0; i < prefixSize; i++ { + if a[i] == b[i] { + prefixMatch++ + } else { + break + } + } + + return j + 0.1*prefixMatch*(1.0-j) +} diff --git a/vendor/github.com/xrash/smetrics/jaro.go b/vendor/github.com/xrash/smetrics/jaro.go new file mode 100644 index 000000000..75f924e11 --- /dev/null +++ b/vendor/github.com/xrash/smetrics/jaro.go @@ -0,0 +1,86 @@ +package smetrics + +import ( + "math" +) + +// The Jaro distance. The result is 1 for equal strings, and 0 for completely different strings. +func Jaro(a, b string) float64 { + // If both strings are zero-length, they are completely equal, + // therefore return 1. + if len(a) == 0 && len(b) == 0 { + return 1 + } + + // If one string is zero-length, strings are completely different, + // therefore return 0. + if len(a) == 0 || len(b) == 0 { + return 0 + } + + // Define the necessary variables for the algorithm. + la := float64(len(a)) + lb := float64(len(b)) + matchRange := int(math.Max(0, math.Floor(math.Max(la, lb)/2.0)-1)) + matchesA := make([]bool, len(a)) + matchesB := make([]bool, len(b)) + var matches float64 = 0 + + // Step 1: Matches + // Loop through each character of the first string, + // looking for a matching character in the second string. + for i := 0; i < len(a); i++ { + start := int(math.Max(0, float64(i-matchRange))) + end := int(math.Min(lb-1, float64(i+matchRange))) + + for j := start; j <= end; j++ { + if matchesB[j] { + continue + } + + if a[i] == b[j] { + matchesA[i] = true + matchesB[j] = true + matches++ + break + } + } + } + + // If there are no matches, strings are completely different, + // therefore return 0. + if matches == 0 { + return 0 + } + + // Step 2: Transpositions + // Loop through the matches' arrays, looking for + // unaligned matches. Count the number of unaligned matches. + unaligned := 0 + j := 0 + for i := 0; i < len(a); i++ { + if !matchesA[i] { + continue + } + + for !matchesB[j] { + j++ + } + + if a[i] != b[j] { + unaligned++ + } + + j++ + } + + // The number of unaligned matches divided by two, is the number of _transpositions_. + transpositions := math.Floor(float64(unaligned / 2)) + + // Jaro distance is the average between these three numbers: + // 1. matches / length of string A + // 2. matches / length of string B + // 3. (matches - transpositions/matches) + // So, all that divided by three is the final result. + return ((matches / la) + (matches / lb) + ((matches - transpositions) / matches)) / 3.0 +} diff --git a/vendor/github.com/xrash/smetrics/soundex.go b/vendor/github.com/xrash/smetrics/soundex.go new file mode 100644 index 000000000..18c3aef72 --- /dev/null +++ b/vendor/github.com/xrash/smetrics/soundex.go @@ -0,0 +1,63 @@ +package smetrics + +import ( + "strings" +) + +// The Soundex encoding. It is a phonetic algorithm that considers how the words sound in English. Soundex maps a string to a 4-byte code consisting of the first letter of the original string and three numbers. Strings that sound similar should map to the same code. +func Soundex(s string) string { + b := strings.Builder{} + b.Grow(4) + + p := s[0] + if p <= 'z' && p >= 'a' { + p -= 32 // convert to uppercase + } + b.WriteByte(p) + + n := 0 + for i := 1; i < len(s); i++ { + c := s[i] + + if c <= 'z' && c >= 'a' { + c -= 32 // convert to uppercase + } else if c < 'A' || c > 'Z' { + continue + } + + if c == p { + continue + } + + p = c + + switch c { + case 'B', 'P', 'F', 'V': + c = '1' + case 'C', 'S', 'K', 'G', 'J', 'Q', 'X', 'Z': + c = '2' + case 'D', 'T': + c = '3' + case 'L': + c = '4' + case 'M', 'N': + c = '5' + case 'R': + c = '6' + default: + continue + } + + b.WriteByte(c) + n++ + if n == 3 { + break + } + } + + for i := n; i < 3; i++ { + b.WriteByte('0') + } + + return b.String() +} diff --git a/vendor/github.com/xrash/smetrics/ukkonen.go b/vendor/github.com/xrash/smetrics/ukkonen.go new file mode 100644 index 000000000..3c5579cd9 --- /dev/null +++ b/vendor/github.com/xrash/smetrics/ukkonen.go @@ -0,0 +1,94 @@ +package smetrics + +import ( + "math" +) + +// The Ukkonen algorithm for calculating the Levenshtein distance. The algorithm is described in http://www.cs.helsinki.fi/u/ukkonen/InfCont85.PDF, or in docs/InfCont85.PDF. It runs on O(t . min(m, n)) where t is the actual distance between strings a and b. It needs O(min(t, m, n)) space. This function might be preferred over WagnerFischer() for *very* similar strings. But test it out yourself. +// The first two parameters are the two strings to be compared. The last three parameters are the insertion cost, the deletion cost and the substitution cost. These are normally defined as 1, 1 and 2 respectively. +func Ukkonen(a, b string, icost, dcost, scost int) int { + var lowerCost int + + if icost < dcost && icost < scost { + lowerCost = icost + } else if dcost < scost { + lowerCost = dcost + } else { + lowerCost = scost + } + + infinite := math.MaxInt32 / 2 + + var r []int + var k, kprime, p, t int + var ins, del, sub int + + if len(a) > len(b) { + t = (len(a) - len(b) + 1) * lowerCost + } else { + t = (len(b) - len(a) + 1) * lowerCost + } + + for { + if (t / lowerCost) < (len(b) - len(a)) { + continue + } + + // This is the right damn thing since the original Ukkonen + // paper minimizes the expression result only, but the uncommented version + // doesn't need to deal with floats so it's faster. + // p = int(math.Floor(0.5*((float64(t)/float64(lowerCost)) - float64(len(b) - len(a))))) + p = ((t / lowerCost) - (len(b) - len(a))) / 2 + + k = -p + kprime = k + + rowlength := (len(b) - len(a)) + (2 * p) + + r = make([]int, rowlength+2) + + for i := 0; i < rowlength+2; i++ { + r[i] = infinite + } + + for i := 0; i <= len(a); i++ { + for j := 0; j <= rowlength; j++ { + if i == j+k && i == 0 { + r[j] = 0 + } else { + if j-1 < 0 { + ins = infinite + } else { + ins = r[j-1] + icost + } + + del = r[j+1] + dcost + sub = r[j] + scost + + if i-1 < 0 || i-1 >= len(a) || j+k-1 >= len(b) || j+k-1 < 0 { + sub = infinite + } else if a[i-1] == b[j+k-1] { + sub = r[j] + } + + if ins < del && ins < sub { + r[j] = ins + } else if del < sub { + r[j] = del + } else { + r[j] = sub + } + } + } + k++ + } + + if r[(len(b)-len(a))+(2*p)+kprime] <= t { + break + } else { + t *= 2 + } + } + + return r[(len(b)-len(a))+(2*p)+kprime] +} diff --git a/vendor/github.com/xrash/smetrics/wagner-fischer.go b/vendor/github.com/xrash/smetrics/wagner-fischer.go new file mode 100644 index 000000000..9883aea04 --- /dev/null +++ b/vendor/github.com/xrash/smetrics/wagner-fischer.go @@ -0,0 +1,48 @@ +package smetrics + +// The Wagner-Fischer algorithm for calculating the Levenshtein distance. +// The first two parameters are the two strings to be compared. The last three parameters are the insertion cost, the deletion cost and the substitution cost. These are normally defined as 1, 1 and 2 respectively. +func WagnerFischer(a, b string, icost, dcost, scost int) int { + + // Allocate both rows. + row1 := make([]int, len(b)+1) + row2 := make([]int, len(b)+1) + var tmp []int + + // Initialize the first row. + for i := 1; i <= len(b); i++ { + row1[i] = i * icost + } + + // For each row... + for i := 1; i <= len(a); i++ { + row2[0] = i * dcost + + // For each column... + for j := 1; j <= len(b); j++ { + if a[i-1] == b[j-1] { + row2[j] = row1[j-1] + } else { + ins := row2[j-1] + icost + del := row1[j] + dcost + sub := row1[j-1] + scost + + if ins < del && ins < sub { + row2[j] = ins + } else if del < sub { + row2[j] = del + } else { + row2[j] = sub + } + } + } + + // Swap the rows at the end of each row. + tmp = row1 + row1 = row2 + row2 = tmp + } + + // Because we swapped the rows, the final result is in row1 instead of row2. + return row1[len(row1)-1] +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go index e854d7e84..2950fdb42 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go @@ -82,7 +82,7 @@ func marshalJSON(id []byte) ([]byte, error) { } // unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes. -func unmarshalJSON(dst []byte, src []byte) error { +func unmarshalJSON(dst, src []byte) error { if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' { src = src[1 : l-1] } diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go index 29e629d66..5bb3b16c7 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go @@ -41,7 +41,7 @@ func (i *protoInt64) UnmarshalJSON(data []byte) error { // strings or integers. type protoUint64 uint64 -// Int64 returns the protoUint64 as a uint64. +// Uint64 returns the protoUint64 as a uint64. func (i *protoUint64) Uint64() uint64 { return uint64(*i) } // UnmarshalJSON decodes both strings and integers. diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go index a13a6b733..67f80b6aa 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go @@ -10,6 +10,7 @@ import ( "errors" "fmt" "io" + "math" "time" ) @@ -151,8 +152,8 @@ func (s Span) MarshalJSON() ([]byte, error) { }{ Alias: Alias(s), ParentSpanID: parentSpanId, - StartTime: uint64(startT), - EndTime: uint64(endT), + StartTime: uint64(startT), // nolint:gosec // >0 checked above. + EndTime: uint64(endT), // nolint:gosec // >0 checked above. }) } @@ -201,11 +202,13 @@ func (s *Span) UnmarshalJSON(data []byte) error { case "startTimeUnixNano", "start_time_unix_nano": var val protoUint64 err = decoder.Decode(&val) - s.StartTime = time.Unix(0, int64(val.Uint64())) + v := int64(min(val.Uint64(), math.MaxInt64)) //nolint:gosec // Overflow checked. + s.StartTime = time.Unix(0, v) case "endTimeUnixNano", "end_time_unix_nano": var val protoUint64 err = decoder.Decode(&val) - s.EndTime = time.Unix(0, int64(val.Uint64())) + v := int64(min(val.Uint64(), math.MaxInt64)) //nolint:gosec // Overflow checked. + s.EndTime = time.Unix(0, v) case "attributes": err = decoder.Decode(&s.Attrs) case "droppedAttributesCount", "dropped_attributes_count": @@ -248,13 +251,20 @@ func (s *Span) UnmarshalJSON(data []byte) error { type SpanFlags int32 const ( + // SpanFlagsTraceFlagsMask is a mask for trace-flags. + // // Bits 0-7 are used for trace flags. SpanFlagsTraceFlagsMask SpanFlags = 255 - // Bits 8 and 9 are used to indicate that the parent span or link span is remote. - // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. - // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote. + // SpanFlagsContextHasIsRemoteMask is a mask for HAS_IS_REMOTE status. + // + // Bits 8 and 9 are used to indicate that the parent span or link span is + // remote. Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. SpanFlagsContextHasIsRemoteMask SpanFlags = 256 - // SpanFlagsContextHasIsRemoteMask indicates the Span is remote. + // SpanFlagsContextIsRemoteMask is a mask for IS_REMOTE status. + // + // Bits 8 and 9 are used to indicate that the parent span or link span is + // remote. Bit 9 (`IS_REMOTE`) indicates whether the span or link is + // remote. SpanFlagsContextIsRemoteMask SpanFlags = 512 ) @@ -263,26 +273,30 @@ const ( type SpanKind int32 const ( - // Indicates that the span represents an internal operation within an application, - // as opposed to an operation happening at the boundaries. Default value. + // SpanKindInternal indicates that the span represents an internal + // operation within an application, as opposed to an operation happening at + // the boundaries. SpanKindInternal SpanKind = 1 - // Indicates that the span covers server-side handling of an RPC or other - // remote network request. + // SpanKindServer indicates that the span covers server-side handling of an + // RPC or other remote network request. SpanKindServer SpanKind = 2 - // Indicates that the span describes a request to some remote service. + // SpanKindClient indicates that the span describes a request to some + // remote service. SpanKindClient SpanKind = 3 - // Indicates that the span describes a producer sending a message to a broker. - // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship - // between producer and consumer spans. A PRODUCER span ends when the message was accepted - // by the broker while the logical processing of the message might span a much longer time. + // SpanKindProducer indicates that the span describes a producer sending a + // message to a broker. Unlike SpanKindClient and SpanKindServer, there is + // often no direct critical path latency relationship between producer and + // consumer spans. A SpanKindProducer span ends when the message was + // accepted by the broker while the logical processing of the message might + // span a much longer time. SpanKindProducer SpanKind = 4 - // Indicates that the span describes consumer receiving a message from a broker. - // Like the PRODUCER kind, there is often no direct critical path latency relationship - // between producer and consumer spans. + // SpanKindConsumer indicates that the span describes a consumer receiving + // a message from a broker. Like SpanKindProducer, there is often no direct + // critical path latency relationship between producer and consumer spans. SpanKindConsumer SpanKind = 5 ) -// Event is a time-stamped annotation of the span, consisting of user-supplied +// SpanEvent is a time-stamped annotation of the span, consisting of user-supplied // text description and key-value pairs. type SpanEvent struct { // time_unix_nano is the time the event occurred. @@ -312,7 +326,7 @@ func (e SpanEvent) MarshalJSON() ([]byte, error) { Time uint64 `json:"timeUnixNano,omitempty"` }{ Alias: Alias(e), - Time: uint64(t), + Time: uint64(t), //nolint:gosec // >0 checked above }) } @@ -347,7 +361,8 @@ func (se *SpanEvent) UnmarshalJSON(data []byte) error { case "timeUnixNano", "time_unix_nano": var val protoUint64 err = decoder.Decode(&val) - se.Time = time.Unix(0, int64(val.Uint64())) + v := int64(min(val.Uint64(), math.MaxInt64)) //nolint:gosec // Overflow checked. + se.Time = time.Unix(0, v) case "name": err = decoder.Decode(&se.Name) case "attributes": @@ -365,10 +380,11 @@ func (se *SpanEvent) UnmarshalJSON(data []byte) error { return nil } -// A pointer from the current span to another span in the same trace or in a -// different trace. For example, this can be used in batching operations, -// where a single batch handler processes multiple requests from different -// traces or when the handler receives a request from a different project. +// SpanLink is a reference from the current span to another span in the same +// trace or in a different trace. For example, this can be used in batching +// operations, where a single batch handler processes multiple requests from +// different traces or when the handler receives a request from a different +// project. type SpanLink struct { // A unique identifier of a trace that this linked span is part of. The ID is a // 16-byte array. diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go index 1217776ea..a2802764f 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go @@ -3,17 +3,19 @@ package telemetry +// StatusCode is the status of a Span. +// // For the semantics of status codes see // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status type StatusCode int32 const ( - // The default status. + // StatusCodeUnset is the default status. StatusCodeUnset StatusCode = 0 - // The Span has been validated by an Application developer or Operator to - // have completed successfully. + // StatusCodeOK is used when the Span has been validated by an Application + // developer or Operator to have completed successfully. StatusCodeOK StatusCode = 1 - // The Span contains an error. + // StatusCodeError is used when the Span contains an error. StatusCodeError StatusCode = 2 ) diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go index 69a348f0f..44197b808 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go @@ -71,7 +71,7 @@ func (td *Traces) UnmarshalJSON(data []byte) error { return nil } -// A collection of ScopeSpans from a Resource. +// ResourceSpans is a collection of ScopeSpans from a Resource. type ResourceSpans struct { // The resource for the spans in this message. // If this field is not set then no resource info is known. @@ -128,7 +128,7 @@ func (rs *ResourceSpans) UnmarshalJSON(data []byte) error { return nil } -// A collection of Spans produced by an InstrumentationScope. +// ScopeSpans is a collection of Spans produced by an InstrumentationScope. type ScopeSpans struct { // The instrumentation scope information for the spans in this message. // Semantically when InstrumentationScope isn't set, it is equivalent with diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go index 0dd01b063..022768bb5 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go @@ -1,8 +1,6 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -//go:generate stringer -type=ValueKind -trimprefix=ValueKind - package telemetry import ( @@ -23,7 +21,7 @@ import ( // A zero value is valid and represents an empty value. type Value struct { // Ensure forward compatibility by explicitly making this not comparable. - noCmp [0]func() //nolint: unused // This is indeed used. + noCmp [0]func() //nolint:unused // This is indeed used. // num holds the value for Int64, Float64, and Bool. It holds the length // for String, Bytes, Slice, Map. @@ -92,7 +90,7 @@ func IntValue(v int) Value { return Int64Value(int64(v)) } // Int64Value returns a [Value] for an int64. func Int64Value(v int64) Value { - return Value{num: uint64(v), any: ValueKindInt64} + return Value{num: uint64(v), any: ValueKindInt64} //nolint:gosec // Raw value conv. } // Float64Value returns a [Value] for a float64. @@ -164,7 +162,7 @@ func (v Value) AsInt64() int64 { // this will return garbage. func (v Value) asInt64() int64 { // Assumes v.num was a valid int64 (overflow not checked). - return int64(v.num) // nolint: gosec + return int64(v.num) //nolint:gosec // Bounded. } // AsBool returns the value held by v as a bool. @@ -309,13 +307,13 @@ func (v Value) String() string { return v.asString() case ValueKindInt64: // Assumes v.num was a valid int64 (overflow not checked). - return strconv.FormatInt(int64(v.num), 10) // nolint: gosec + return strconv.FormatInt(int64(v.num), 10) //nolint:gosec // Bounded. case ValueKindFloat64: return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64) case ValueKindBool: return strconv.FormatBool(v.asBool()) case ValueKindBytes: - return fmt.Sprint(v.asBytes()) + return string(v.asBytes()) case ValueKindMap: return fmt.Sprint(v.asMap()) case ValueKindSlice: @@ -343,7 +341,7 @@ func (v *Value) MarshalJSON() ([]byte, error) { case ValueKindInt64: return json.Marshal(struct { Value string `json:"intValue"` - }{strconv.FormatInt(int64(v.num), 10)}) + }{strconv.FormatInt(int64(v.num), 10)}) //nolint:gosec // Raw value conv. case ValueKindFloat64: return json.Marshal(struct { Value float64 `json:"doubleValue"` diff --git a/vendor/go.opentelemetry.io/auto/sdk/span.go b/vendor/go.opentelemetry.io/auto/sdk/span.go index 6ebea12a9..815d271ff 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/span.go +++ b/vendor/go.opentelemetry.io/auto/sdk/span.go @@ -6,6 +6,7 @@ package sdk import ( "encoding/json" "fmt" + "math" "reflect" "runtime" "strings" @@ -16,7 +17,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/noop" @@ -85,7 +86,12 @@ func (s *span) SetAttributes(attrs ...attribute.KeyValue) { limit := maxSpan.Attrs if limit == 0 { // No attributes allowed. - s.span.DroppedAttrs += uint32(len(attrs)) + n := int64(len(attrs)) + if n > 0 { + s.span.DroppedAttrs += uint32( //nolint:gosec // Bounds checked. + min(n, math.MaxUint32), + ) + } return } @@ -121,8 +127,13 @@ func (s *span) SetAttributes(attrs ...attribute.KeyValue) { // convCappedAttrs converts up to limit attrs into a []telemetry.Attr. The // number of dropped attributes is also returned. func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, uint32) { + n := len(attrs) if limit == 0 { - return nil, uint32(len(attrs)) + var out uint32 + if n > 0 { + out = uint32(min(int64(n), math.MaxUint32)) //nolint:gosec // Bounds checked. + } + return nil, out } if limit < 0 { @@ -130,8 +141,12 @@ func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, u return convAttrs(attrs), 0 } - limit = min(len(attrs), limit) - return convAttrs(attrs[:limit]), uint32(len(attrs) - limit) + if n < 0 { + n = 0 + } + + limit = min(n, limit) + return convAttrs(attrs[:limit]), uint32(n - limit) //nolint:gosec // Bounds checked. } func convAttrs(attrs []attribute.KeyValue) []telemetry.Attr { diff --git a/vendor/go.opentelemetry.io/auto/sdk/tracer.go b/vendor/go.opentelemetry.io/auto/sdk/tracer.go index cbcfabde3..e09acf022 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/tracer.go +++ b/vendor/go.opentelemetry.io/auto/sdk/tracer.go @@ -5,6 +5,7 @@ package sdk import ( "context" + "math" "time" "go.opentelemetry.io/otel/trace" @@ -21,15 +22,20 @@ type tracer struct { var _ trace.Tracer = tracer{} -func (t tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { - var psc trace.SpanContext +func (t tracer) Start( + ctx context.Context, + name string, + opts ...trace.SpanStartOption, +) (context.Context, trace.Span) { + var psc, sc trace.SpanContext sampled := true span := new(span) // Ask eBPF for sampling decision and span context info. - t.start(ctx, span, &psc, &sampled, &span.spanContext) + t.start(ctx, span, &psc, &sampled, &sc) span.sampled.Store(sampled) + span.spanContext = sc ctx = trace.ContextWithSpan(ctx, span) @@ -58,7 +64,13 @@ func (t *tracer) start( // start is used for testing. var start = func(context.Context, *span, *trace.SpanContext, *bool, *trace.SpanContext) {} -func (t tracer) traces(name string, cfg trace.SpanConfig, sc, psc trace.SpanContext) (*telemetry.Traces, *telemetry.Span) { +var intToUint32Bound = min(math.MaxInt, math.MaxUint32) + +func (t tracer) traces( + name string, + cfg trace.SpanConfig, + sc, psc trace.SpanContext, +) (*telemetry.Traces, *telemetry.Span) { span := &telemetry.Span{ TraceID: telemetry.TraceID(sc.TraceID()), SpanID: telemetry.SpanID(sc.SpanID()), @@ -73,11 +85,16 @@ func (t tracer) traces(name string, cfg trace.SpanConfig, sc, psc trace.SpanCont links := cfg.Links() if limit := maxSpan.Links; limit == 0 { - span.DroppedLinks = uint32(len(links)) + n := len(links) + if n > 0 { + bounded := max(min(n, intToUint32Bound), 0) + span.DroppedLinks = uint32(bounded) //nolint:gosec // Bounds checked. + } } else { if limit > 0 { n := max(len(links)-limit, 0) - span.DroppedLinks = uint32(n) + bounded := min(n, intToUint32Bound) + span.DroppedLinks = uint32(bounded) //nolint:gosec // Bounds checked. links = links[n:] } span.Links = convLinks(links) diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go index 6aae83bfd..b25641c55 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go @@ -18,7 +18,7 @@ var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)} // Get is a convenient replacement for http.Get that adds a span around the request. func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, "GET", targetURL, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, targetURL, nil) if err != nil { return nil, err } @@ -27,7 +27,7 @@ func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) // Head is a convenient replacement for http.Head that adds a span around the request. func Head(ctx context.Context, targetURL string) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, "HEAD", targetURL, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodHead, targetURL, nil) if err != nil { return nil, err } @@ -36,7 +36,7 @@ func Head(ctx context.Context, targetURL string) (resp *http.Response, err error // Post is a convenient replacement for http.Post that adds a span around the request. func Post(ctx context.Context, targetURL, contentType string, body io.Reader) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, "POST", targetURL, body) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, targetURL, body) if err != nil { return nil, err } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go index 5d6e6156b..a83a02627 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go @@ -18,13 +18,6 @@ const ( WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded) ) -// Client HTTP metrics. -const ( - clientRequestSize = "http.client.request.size" // Outgoing request bytes total - clientResponseSize = "http.client.response.size" // Outgoing response bytes total - clientDuration = "http.client.duration" // Outgoing end to end duration, milliseconds -) - // Filter is a predicate used to determine whether a given http.request should // be traced. A Filter must return true if the request should be traced. type Filter func(*http.Request) bool diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go index a01bfafbe..6bd50d4c9 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go @@ -176,6 +176,10 @@ func WithMessageEvents(events ...event) Option { // WithSpanNameFormatter takes a function that will be called on every // request and the returned string will become the Span Name. +// +// When using [http.ServeMux] (or any middleware that sets the Pattern of [http.Request]), +// the span name formatter will run twice. Once when the span is created, and +// second time after the middleware, so the pattern can be used. func WithSpanNameFormatter(f func(operation string, r *http.Request) string) Option { return optionFunc(func(c *config) { c.SpanNameFormatter = f diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index 33580a35b..937f9b4e7 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -12,6 +12,7 @@ import ( "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" ) @@ -21,15 +22,16 @@ type middleware struct { operation string server string - tracer trace.Tracer - propagators propagation.TextMapPropagator - spanStartOptions []trace.SpanStartOption - readEvent bool - writeEvent bool - filters []Filter - spanNameFormatter func(string, *http.Request) string - publicEndpoint bool - publicEndpointFn func(*http.Request) bool + tracer trace.Tracer + propagators propagation.TextMapPropagator + spanStartOptions []trace.SpanStartOption + readEvent bool + writeEvent bool + filters []Filter + spanNameFormatter func(string, *http.Request) string + publicEndpoint bool + publicEndpointFn func(*http.Request) bool + metricAttributesFn func(*http.Request) []attribute.KeyValue semconv semconv.HTTPServer } @@ -79,12 +81,7 @@ func (h *middleware) configure(c *config) { h.publicEndpointFn = c.PublicEndpointFn h.server = c.ServerName h.semconv = semconv.NewHTTPServer(c.Meter) -} - -func handleErr(err error) { - if err != nil { - otel.Handle(err) - } + h.metricAttributesFn = c.MetricAttributesFn } // serveHTTP sets up tracing and calls the given next http.Handler with the span @@ -101,7 +98,7 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header)) opts := []trace.SpanStartOption{ - trace.WithAttributes(h.semconv.RequestTraceAttrs(h.server, r)...), + trace.WithAttributes(h.semconv.RequestTraceAttrs(h.server, r, semconv.RequestTraceAttrsOpts{})...), } opts = append(opts, h.spanStartOptions...) @@ -123,6 +120,11 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http } } + if startTime := StartTimeFromContext(ctx); !startTime.IsZero() { + opts = append(opts, trace.WithTimestamp(startTime)) + requestStartTime = startTime + } + ctx, span := tracer.Start(ctx, h.spanNameFormatter(h.operation, r), opts...) defer span.End() @@ -174,7 +176,12 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http ctx = ContextWithLabeler(ctx, labeler) } - next.ServeHTTP(w, r.WithContext(ctx)) + r = r.WithContext(ctx) + next.ServeHTTP(w, r) + + if r.Pattern != "" { + span.SetName(h.spanNameFormatter(h.operation, r)) + } statusCode := rww.StatusCode() bytesWritten := rww.BytesWritten() @@ -190,17 +197,31 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) - h.semconv.RecordMetrics(ctx, semconv.MetricData{ - ServerName: h.server, + metricAttributes := semconv.MetricAttributes{ Req: r, StatusCode: statusCode, - AdditionalAttributes: labeler.Get(), - RequestSize: bw.BytesRead(), - ResponseSize: bytesWritten, - ElapsedTime: elapsedTime, + AdditionalAttributes: append(labeler.Get(), h.metricAttributesFromRequest(r)...), + } + + h.semconv.RecordMetrics(ctx, semconv.ServerMetricData{ + ServerName: h.server, + ResponseSize: bytesWritten, + MetricAttributes: metricAttributes, + MetricData: semconv.MetricData{ + RequestSize: bw.BytesRead(), + ElapsedTime: elapsedTime, + }, }) } +func (h *middleware) metricAttributesFromRequest(r *http.Request) []attribute.KeyValue { + var attributeForRequest []attribute.KeyValue + if h.metricAttributesFn != nil { + attributeForRequest = h.metricAttributesFn(r) + } + return attributeForRequest +} + // WithRouteTag annotates spans and metrics with the provided route name // with HTTP route attribute. func WithRouteTag(route string, h http.Handler) http.Handler { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go index a945f5566..d032aa841 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go @@ -1,6 +1,11 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/request/body_wrapper.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package request provides types and functionality to handle HTTP request +// handling. package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" import ( @@ -53,7 +58,7 @@ func (w *BodyWrapper) updateReadData(n int64, err error) { } } -// Closes closes the io.ReadCloser. +// Close closes the io.ReadCloser. func (w *BodyWrapper) Close() error { return w.ReadCloser.Close() } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/gen.go new file mode 100644 index 000000000..9e00dd2fc --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/gen.go @@ -0,0 +1,10 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + +// Generate request package: +//go:generate gotmpl --body=../../../../../../internal/shared/request/body_wrapper.go.tmpl "--data={}" --out=body_wrapper.go +//go:generate gotmpl --body=../../../../../../internal/shared/request/body_wrapper_test.go.tmpl "--data={}" --out=body_wrapper_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/request/resp_writer_wrapper.go.tmpl "--data={}" --out=resp_writer_wrapper.go +//go:generate gotmpl --body=../../../../../../internal/shared/request/resp_writer_wrapper_test.go.tmpl "--data={}" --out=resp_writer_wrapper_test.go diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go index aea171fb2..ca2e4c14c 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go @@ -1,3 +1,6 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/request/resp_writer_wrapper.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 @@ -44,7 +47,9 @@ func (w *RespWriterWrapper) Write(p []byte) (int, error) { w.mu.Lock() defer w.mu.Unlock() - w.writeHeader(http.StatusOK) + if !w.wroteHeader { + w.writeHeader(http.StatusOK) + } n, err := w.ResponseWriter.Write(p) n1 := int64(n) @@ -80,7 +85,12 @@ func (w *RespWriterWrapper) writeHeader(statusCode int) { // Flush implements [http.Flusher]. func (w *RespWriterWrapper) Flush() { - w.WriteHeader(http.StatusOK) + w.mu.Lock() + defer w.mu.Unlock() + + if !w.wroteHeader { + w.writeHeader(http.StatusOK) + } if f, ok := w.ResponseWriter.(http.Flusher); ok { f.Flush() @@ -95,7 +105,7 @@ func (w *RespWriterWrapper) BytesWritten() int64 { return w.written } -// BytesWritten returns the HTTP status code that was sent. +// StatusCode returns the HTTP status code that was sent. func (w *RespWriterWrapper) StatusCode() int { w.mu.RLock() defer w.mu.RUnlock() diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go index 9cae4cab8..7cb9693d9 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go @@ -1,3 +1,6 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconv/env.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 @@ -9,12 +12,17 @@ import ( "net/http" "os" "strings" + "sync" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/metric" ) +// OTelSemConvStabilityOptIn is an environment variable. +// That can be set to "http/dup" to keep getting the old HTTP semantic conventions. +const OTelSemConvStabilityOptIn = "OTEL_SEMCONV_STABILITY_OPT_IN" + type ResponseTelemetry struct { StatusCode int ReadBytes int64 @@ -30,6 +38,11 @@ type HTTPServer struct { requestBytesCounter metric.Int64Counter responseBytesCounter metric.Int64Counter serverLatencyMeasure metric.Float64Histogram + + // New metrics + requestBodySizeHistogram metric.Int64Histogram + responseBodySizeHistogram metric.Int64Histogram + requestDurationHistogram metric.Float64Histogram } // RequestTraceAttrs returns trace attributes for an HTTP request received by a @@ -48,26 +61,40 @@ type HTTPServer struct { // // If the primary server name is not known, server should be an empty string. // The req Host will be used to determine the server instead. -func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { +func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request, opts RequestTraceAttrsOpts) []attribute.KeyValue { + attrs := CurrentHTTPServer{}.RequestTraceAttrs(server, req, opts) if s.duplicate { - return append(oldHTTPServer{}.RequestTraceAttrs(server, req), newHTTPServer{}.RequestTraceAttrs(server, req)...) + return OldHTTPServer{}.RequestTraceAttrs(server, req, attrs) + } + return attrs +} + +func (s HTTPServer) NetworkTransportAttr(network string) []attribute.KeyValue { + if s.duplicate { + return []attribute.KeyValue{ + OldHTTPServer{}.NetworkTransportAttr(network), + CurrentHTTPServer{}.NetworkTransportAttr(network), + } + } + return []attribute.KeyValue{ + CurrentHTTPServer{}.NetworkTransportAttr(network), } - return oldHTTPServer{}.RequestTraceAttrs(server, req) } // ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. // // If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. func (s HTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { + attrs := CurrentHTTPServer{}.ResponseTraceAttrs(resp) if s.duplicate { - return append(oldHTTPServer{}.ResponseTraceAttrs(resp), newHTTPServer{}.ResponseTraceAttrs(resp)...) + return OldHTTPServer{}.ResponseTraceAttrs(resp, attrs) } - return oldHTTPServer{}.ResponseTraceAttrs(resp) + return attrs } // Route returns the attribute for the route. func (s HTTPServer) Route(route string) attribute.KeyValue { - return oldHTTPServer{}.Route(route) + return CurrentHTTPServer{}.Route(route) } // Status returns a span status code and message for an HTTP status code @@ -83,67 +110,134 @@ func (s HTTPServer) Status(code int) (codes.Code, string) { return codes.Unset, "" } -type MetricData struct { - ServerName string +type ServerMetricData struct { + ServerName string + ResponseSize int64 + + MetricData + MetricAttributes +} + +type MetricAttributes struct { Req *http.Request StatusCode int AdditionalAttributes []attribute.KeyValue +} - RequestSize int64 - ResponseSize int64 - ElapsedTime float64 +type MetricData struct { + RequestSize int64 + + // The request duration, in milliseconds + ElapsedTime float64 } -func (s HTTPServer) RecordMetrics(ctx context.Context, md MetricData) { - if s.requestBytesCounter == nil || s.responseBytesCounter == nil || s.serverLatencyMeasure == nil { - // This will happen if an HTTPServer{} is used insted of NewHTTPServer. - return +var ( + metricAddOptionPool = &sync.Pool{ + New: func() interface{} { + return &[]metric.AddOption{} + }, + } + + metricRecordOptionPool = &sync.Pool{ + New: func() interface{} { + return &[]metric.RecordOption{} + }, + } +) + +func (s HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) { + if s.requestDurationHistogram != nil && s.requestBodySizeHistogram != nil && s.responseBodySizeHistogram != nil { + attributes := CurrentHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) + o := metric.WithAttributeSet(attribute.NewSet(attributes...)) + recordOpts := metricRecordOptionPool.Get().(*[]metric.RecordOption) + *recordOpts = append(*recordOpts, o) + s.requestBodySizeHistogram.Record(ctx, md.RequestSize, *recordOpts...) + s.responseBodySizeHistogram.Record(ctx, md.ResponseSize, *recordOpts...) + s.requestDurationHistogram.Record(ctx, md.ElapsedTime/1000.0, o) + *recordOpts = (*recordOpts)[:0] + metricRecordOptionPool.Put(recordOpts) } - attributes := oldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) - o := metric.WithAttributeSet(attribute.NewSet(attributes...)) - addOpts := []metric.AddOption{o} // Allocate vararg slice once. - s.requestBytesCounter.Add(ctx, md.RequestSize, addOpts...) - s.responseBytesCounter.Add(ctx, md.ResponseSize, addOpts...) - s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o) + if s.duplicate && s.requestBytesCounter != nil && s.responseBytesCounter != nil && s.serverLatencyMeasure != nil { + attributes := OldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) + o := metric.WithAttributeSet(attribute.NewSet(attributes...)) + addOpts := metricAddOptionPool.Get().(*[]metric.AddOption) + *addOpts = append(*addOpts, o) + s.requestBytesCounter.Add(ctx, md.RequestSize, *addOpts...) + s.responseBytesCounter.Add(ctx, md.ResponseSize, *addOpts...) + s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o) + *addOpts = (*addOpts)[:0] + metricAddOptionPool.Put(addOpts) + } +} - // TODO: Duplicate Metrics +// hasOptIn returns true if the comma-separated version string contains the +// exact optIn value. +func hasOptIn(version, optIn string) bool { + for _, v := range strings.Split(version, ",") { + if strings.TrimSpace(v) == optIn { + return true + } + } + return false } func NewHTTPServer(meter metric.Meter) HTTPServer { - env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) - duplicate := env == "http/dup" + env := strings.ToLower(os.Getenv(OTelSemConvStabilityOptIn)) + duplicate := hasOptIn(env, "http/dup") server := HTTPServer{ duplicate: duplicate, } - server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = oldHTTPServer{}.createMeasures(meter) + server.requestBodySizeHistogram, server.responseBodySizeHistogram, server.requestDurationHistogram = CurrentHTTPServer{}.createMeasures(meter) + if duplicate { + server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = OldHTTPServer{}.createMeasures(meter) + } return server } type HTTPClient struct { duplicate bool + + // old metrics + requestBytesCounter metric.Int64Counter + responseBytesCounter metric.Int64Counter + latencyMeasure metric.Float64Histogram + + // new metrics + requestBodySize metric.Int64Histogram + requestDuration metric.Float64Histogram } -func NewHTTPClient() HTTPClient { - env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) - return HTTPClient{duplicate: env == "http/dup"} +func NewHTTPClient(meter metric.Meter) HTTPClient { + env := strings.ToLower(os.Getenv(OTelSemConvStabilityOptIn)) + duplicate := hasOptIn(env, "http/dup") + client := HTTPClient{ + duplicate: duplicate, + } + client.requestBodySize, client.requestDuration = CurrentHTTPClient{}.createMeasures(meter) + if duplicate { + client.requestBytesCounter, client.responseBytesCounter, client.latencyMeasure = OldHTTPClient{}.createMeasures(meter) + } + + return client } // RequestTraceAttrs returns attributes for an HTTP request made by a client. func (c HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { + attrs := CurrentHTTPClient{}.RequestTraceAttrs(req) if c.duplicate { - return append(oldHTTPClient{}.RequestTraceAttrs(req), newHTTPClient{}.RequestTraceAttrs(req)...) + return OldHTTPClient{}.RequestTraceAttrs(req, attrs) } - return oldHTTPClient{}.RequestTraceAttrs(req) + return attrs } // ResponseTraceAttrs returns metric attributes for an HTTP request made by a client. func (c HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { + attrs := CurrentHTTPClient{}.ResponseTraceAttrs(resp) if c.duplicate { - return append(oldHTTPClient{}.ResponseTraceAttrs(resp), newHTTPClient{}.ResponseTraceAttrs(resp)...) + return OldHTTPClient{}.ResponseTraceAttrs(resp, attrs) } - - return oldHTTPClient{}.ResponseTraceAttrs(resp) + return attrs } func (c HTTPClient) Status(code int) (codes.Code, string) { @@ -157,9 +251,73 @@ func (c HTTPClient) Status(code int) (codes.Code, string) { } func (c HTTPClient) ErrorType(err error) attribute.KeyValue { + return CurrentHTTPClient{}.ErrorType(err) +} + +type MetricOpts struct { + measurement metric.MeasurementOption + addOptions metric.AddOption +} + +func (o MetricOpts) MeasurementOption() metric.MeasurementOption { + return o.measurement +} + +func (o MetricOpts) AddOptions() metric.AddOption { + return o.addOptions +} + +func (c HTTPClient) MetricOptions(ma MetricAttributes) map[string]MetricOpts { + opts := map[string]MetricOpts{} + + attributes := CurrentHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes) + set := metric.WithAttributeSet(attribute.NewSet(attributes...)) + opts["new"] = MetricOpts{ + measurement: set, + addOptions: set, + } + if c.duplicate { - return newHTTPClient{}.ErrorType(err) + attributes := OldHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes) + set := metric.WithAttributeSet(attribute.NewSet(attributes...)) + opts["old"] = MetricOpts{ + measurement: set, + addOptions: set, + } + } + + return opts +} + +func (s HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts map[string]MetricOpts) { + if s.requestBodySize == nil || s.requestDuration == nil { + // This will happen if an HTTPClient{} is used instead of NewHTTPClient(). + return + } + + s.requestBodySize.Record(ctx, md.RequestSize, opts["new"].MeasurementOption()) + s.requestDuration.Record(ctx, md.ElapsedTime/1000, opts["new"].MeasurementOption()) + + if s.duplicate { + s.requestBytesCounter.Add(ctx, md.RequestSize, opts["old"].AddOptions()) + s.latencyMeasure.Record(ctx, md.ElapsedTime, opts["old"].MeasurementOption()) + } +} + +func (s HTTPClient) RecordResponseSize(ctx context.Context, responseData int64, opts map[string]MetricOpts) { + if s.responseBytesCounter == nil { + // This will happen if an HTTPClient{} is used instead of NewHTTPClient(). + return + } + + s.responseBytesCounter.Add(ctx, responseData, opts["old"].AddOptions()) +} + +func (s HTTPClient) TraceAttributes(host string) []attribute.KeyValue { + attrs := CurrentHTTPClient{}.TraceAttributes(host) + if s.duplicate { + return OldHTTPClient{}.TraceAttributes(host, attrs) } - return attribute.KeyValue{} + return attrs } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go new file mode 100644 index 000000000..f2cf8a152 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go @@ -0,0 +1,14 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +// Generate semconv package: +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/bench_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=bench_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/env.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=env.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/env_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=env_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/httpconv.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=httpconv.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/httpconv_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=httpconv_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/util.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=util.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/util_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=util_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/v1.20.0.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=v1.20.0.go diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go index 745b8c67b..53976b0d5 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go @@ -1,22 +1,35 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconv/httpconv.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package semconv provides OpenTelemetry semantic convention types and +// functionality. package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" import ( "fmt" "net/http" "reflect" + "slices" "strconv" "strings" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" ) -type newHTTPServer struct{} +type RequestTraceAttrsOpts struct { + // If set, this is used as value for the "http.client_ip" attribute. + HTTPClientIP string +} + +type CurrentHTTPServer struct{} -// TraceRequest returns trace attributes for an HTTP request received by a +// RequestTraceAttrs returns trace attributes for an HTTP request received by a // server. // // The server must be the primary server name if it is known. For example this @@ -32,18 +45,18 @@ type newHTTPServer struct{} // // If the primary server name is not known, server should be an empty string. // The req Host will be used to determine the server instead. -func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { +func (n CurrentHTTPServer) RequestTraceAttrs(server string, req *http.Request, opts RequestTraceAttrsOpts) []attribute.KeyValue { count := 3 // ServerAddress, Method, Scheme var host string var p int if server == "" { - host, p = splitHostPort(req.Host) + host, p = SplitHostPort(req.Host) } else { // Prioritize the primary server name. - host, p = splitHostPort(server) + host, p = SplitHostPort(server) if p < 0 { - _, p = splitHostPort(req.Host) + _, p = SplitHostPort(req.Host) } } @@ -59,7 +72,8 @@ func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []att scheme := n.scheme(req.TLS != nil) - if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { + peer, peerPort := SplitHostPort(req.RemoteAddr) + if peer != "" { // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a // file-path that would be interpreted with a sock family. count++ @@ -73,7 +87,17 @@ func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []att count++ } - clientIP := serverClientIP(req.Header.Get("X-Forwarded-For")) + // For client IP, use, in order: + // 1. The value passed in the options + // 2. The value in the X-Forwarded-For header + // 3. The peer address + clientIP := opts.HTTPClientIP + if clientIP == "" { + clientIP = serverClientIP(req.Header.Get("X-Forwarded-For")) + if clientIP == "" { + clientIP = peer + } + } if clientIP != "" { count++ } @@ -90,6 +114,11 @@ func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []att count++ } + route := httpRoute(req.Pattern) + if route != "" { + count++ + } + attrs := make([]attribute.KeyValue, 0, count) attrs = append(attrs, semconvNew.ServerAddress(host), @@ -104,7 +133,7 @@ func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []att attrs = append(attrs, methodOriginal) } - if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { + if peer, peerPort := SplitHostPort(req.RemoteAddr); peer != "" { // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a // file-path that would be interpreted with a sock family. attrs = append(attrs, semconvNew.NetworkPeerAddress(peer)) @@ -113,7 +142,7 @@ func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []att } } - if useragent := req.UserAgent(); useragent != "" { + if useragent != "" { attrs = append(attrs, semconvNew.UserAgentOriginal(useragent)) } @@ -132,10 +161,27 @@ func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []att attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion)) } + if route != "" { + attrs = append(attrs, n.Route(route)) + } + return attrs } -func (n newHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) { +func (n CurrentHTTPServer) NetworkTransportAttr(network string) attribute.KeyValue { + switch network { + case "tcp", "tcp4", "tcp6": + return semconvNew.NetworkTransportTCP + case "udp", "udp4", "udp6": + return semconvNew.NetworkTransportUDP + case "unix", "unixgram", "unixpacket": + return semconvNew.NetworkTransportUnix + default: + return semconvNew.NetworkTransportPipe + } +} + +func (n CurrentHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) { if method == "" { return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} } @@ -150,17 +196,19 @@ func (n newHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyV return semconvNew.HTTPRequestMethodGet, orig } -func (n newHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive +func (n CurrentHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive if https { return semconvNew.URLScheme("https") } return semconvNew.URLScheme("http") } -// TraceResponse returns trace attributes for telemetry from an HTTP response. +// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP +// response. // -// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. -func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { +// If any of the fields in the ResponseTelemetry are not set the attribute will +// be omitted. +func (n CurrentHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { var count int if resp.ReadBytes > 0 { @@ -195,14 +243,95 @@ func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.Ke } // Route returns the attribute for the route. -func (n newHTTPServer) Route(route string) attribute.KeyValue { +func (n CurrentHTTPServer) Route(route string) attribute.KeyValue { return semconvNew.HTTPRoute(route) } -type newHTTPClient struct{} +func (n CurrentHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Histogram, metric.Int64Histogram, metric.Float64Histogram) { + if meter == nil { + return noop.Int64Histogram{}, noop.Int64Histogram{}, noop.Float64Histogram{} + } + + var err error + requestBodySizeHistogram, err := meter.Int64Histogram( + semconvNew.HTTPServerRequestBodySizeName, + metric.WithUnit(semconvNew.HTTPServerRequestBodySizeUnit), + metric.WithDescription(semconvNew.HTTPServerRequestBodySizeDescription), + ) + handleErr(err) + + responseBodySizeHistogram, err := meter.Int64Histogram( + semconvNew.HTTPServerResponseBodySizeName, + metric.WithUnit(semconvNew.HTTPServerResponseBodySizeUnit), + metric.WithDescription(semconvNew.HTTPServerResponseBodySizeDescription), + ) + handleErr(err) + requestDurationHistogram, err := meter.Float64Histogram( + semconvNew.HTTPServerRequestDurationName, + metric.WithUnit(semconvNew.HTTPServerRequestDurationUnit), + metric.WithDescription(semconvNew.HTTPServerRequestDurationDescription), + metric.WithExplicitBucketBoundaries(0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10), + ) + handleErr(err) + + return requestBodySizeHistogram, responseBodySizeHistogram, requestDurationHistogram +} + +func (n CurrentHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { + num := len(additionalAttributes) + 3 + var host string + var p int + if server == "" { + host, p = SplitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = SplitHostPort(server) + if p < 0 { + _, p = SplitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + num++ + } + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" { + num++ + } + if protoVersion != "" { + num++ + } + + if statusCode > 0 { + num++ + } + + attributes := slices.Grow(additionalAttributes, num) + attributes = append(attributes, + semconvNew.HTTPRequestMethodKey.String(standardizeHTTPMethod(req.Method)), + n.scheme(req.TLS != nil), + semconvNew.ServerAddress(host)) + + if hostPort > 0 { + attributes = append(attributes, semconvNew.ServerPort(hostPort)) + } + if protoName != "" { + attributes = append(attributes, semconvNew.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attributes = append(attributes, semconvNew.NetworkProtocolVersion(protoVersion)) + } + + if statusCode > 0 { + attributes = append(attributes, semconvNew.HTTPResponseStatusCode(statusCode)) + } + return attributes +} + +type CurrentHTTPClient struct{} // RequestTraceAttrs returns trace attributes for an HTTP request made by a client. -func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { +func (n CurrentHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { /* below attributes are returned: - http.request.method @@ -222,7 +351,7 @@ func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue var requestHost string var requestPort int for _, hostport := range []string{urlHost, req.Header.Get("Host")} { - requestHost, requestPort = splitHostPort(hostport) + requestHost, requestPort = SplitHostPort(hostport) if requestHost != "" || requestPort > 0 { break } @@ -284,7 +413,7 @@ func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue } // ResponseTraceAttrs returns trace attributes for an HTTP response made by a client. -func (n newHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { +func (n CurrentHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { /* below attributes are returned: - http.response.status_code @@ -311,7 +440,7 @@ func (n newHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyVa return attrs } -func (n newHTTPClient) ErrorType(err error) attribute.KeyValue { +func (n CurrentHTTPClient) ErrorType(err error) attribute.KeyValue { t := reflect.TypeOf(err) var value string if t.PkgPath() == "" && t.Name() == "" { @@ -328,7 +457,7 @@ func (n newHTTPClient) ErrorType(err error) attribute.KeyValue { return semconvNew.ErrorTypeKey.String(value) } -func (n newHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) { +func (n CurrentHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) { if method == "" { return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} } @@ -343,6 +472,102 @@ func (n newHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyV return semconvNew.HTTPRequestMethodGet, orig } +func (n CurrentHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Histogram, metric.Float64Histogram) { + if meter == nil { + return noop.Int64Histogram{}, noop.Float64Histogram{} + } + + var err error + requestBodySize, err := meter.Int64Histogram( + semconvNew.HTTPClientRequestBodySizeName, + metric.WithUnit(semconvNew.HTTPClientRequestBodySizeUnit), + metric.WithDescription(semconvNew.HTTPClientRequestBodySizeDescription), + ) + handleErr(err) + + requestDuration, err := meter.Float64Histogram( + semconvNew.HTTPClientRequestDurationName, + metric.WithUnit(semconvNew.HTTPClientRequestDurationUnit), + metric.WithDescription(semconvNew.HTTPClientRequestDurationDescription), + metric.WithExplicitBucketBoundaries(0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10), + ) + handleErr(err) + + return requestBodySize, requestDuration +} + +func (n CurrentHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { + num := len(additionalAttributes) + 2 + var h string + if req.URL != nil { + h = req.URL.Host + } + var requestHost string + var requestPort int + for _, hostport := range []string{h, req.Header.Get("Host")} { + requestHost, requestPort = SplitHostPort(hostport) + if requestHost != "" || requestPort > 0 { + break + } + } + + port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) + if port > 0 { + num++ + } + + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" { + num++ + } + if protoVersion != "" { + num++ + } + + if statusCode > 0 { + num++ + } + + attributes := slices.Grow(additionalAttributes, num) + attributes = append(attributes, + semconvNew.HTTPRequestMethodKey.String(standardizeHTTPMethod(req.Method)), + semconvNew.ServerAddress(requestHost), + n.scheme(req), + ) + + if port > 0 { + attributes = append(attributes, semconvNew.ServerPort(port)) + } + if protoName != "" { + attributes = append(attributes, semconvNew.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attributes = append(attributes, semconvNew.NetworkProtocolVersion(protoVersion)) + } + + if statusCode > 0 { + attributes = append(attributes, semconvNew.HTTPResponseStatusCode(statusCode)) + } + return attributes +} + +// TraceAttributes returns attributes for httptrace. +func (n CurrentHTTPClient) TraceAttributes(host string) []attribute.KeyValue { + return []attribute.KeyValue{ + semconvNew.ServerAddress(host), + } +} + +func (n CurrentHTTPClient) scheme(req *http.Request) attribute.KeyValue { + if req.URL != nil && req.URL.Scheme != "" { + return semconvNew.URLScheme(req.URL.Scheme) + } + if req.TLS != nil { + return semconvNew.URLScheme("https") + } + return semconvNew.URLScheme("http") +} + func isErrorStatusCode(code int) bool { return code >= 400 || code < 100 } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go index e6e14924f..bc1f7751d 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go @@ -1,3 +1,6 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconv/util.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 @@ -14,28 +17,28 @@ import ( semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" ) -// splitHostPort splits a network address hostport of the form "host", +// SplitHostPort splits a network address hostport of the form "host", // "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port", // "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and // port. // // An empty host is returned if it is not provided or unparsable. A negative // port is returned if it is not provided or unparsable. -func splitHostPort(hostport string) (host string, port int) { +func SplitHostPort(hostport string) (host string, port int) { port = -1 if strings.HasPrefix(hostport, "[") { - addrEnd := strings.LastIndex(hostport, "]") + addrEnd := strings.LastIndexByte(hostport, ']') if addrEnd < 0 { // Invalid hostport. return } - if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 { + if i := strings.LastIndexByte(hostport[addrEnd:], ':'); i < 0 { host = hostport[1:addrEnd] return } } else { - if i := strings.LastIndex(hostport, ":"); i < 0 { + if i := strings.LastIndexByte(hostport, ':'); i < 0 { host = hostport return } @@ -67,15 +70,31 @@ func requiredHTTPPort(https bool, port int) int { // nolint:revive } func serverClientIP(xForwardedFor string) string { - if idx := strings.Index(xForwardedFor, ","); idx >= 0 { + if idx := strings.IndexByte(xForwardedFor, ','); idx >= 0 { xForwardedFor = xForwardedFor[:idx] } return xForwardedFor } +func httpRoute(pattern string) string { + if idx := strings.IndexByte(pattern, '/'); idx >= 0 { + return pattern[idx:] + } + return "" +} + func netProtocol(proto string) (name string, version string) { name, version, _ = strings.Cut(proto, "/") - name = strings.ToLower(name) + switch name { + case "HTTP": + name = "http" + case "QUIC": + name = "quic" + case "SPDY": + name = "spdy" + default: + name = strings.ToLower(name) + } return name, version } @@ -96,3 +115,13 @@ func handleErr(err error) { otel.Handle(err) } } + +func standardizeHTTPMethod(method string) string { + method = strings.ToUpper(method) + switch method { + case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: + default: + method = "_OTHER" + } + return method +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go index c999b05e6..ba7fccf1e 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go @@ -1,3 +1,6 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconv/v120.0.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 @@ -8,7 +11,6 @@ import ( "io" "net/http" "slices" - "strings" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel/attribute" @@ -17,7 +19,7 @@ import ( semconv "go.opentelemetry.io/otel/semconv/v1.20.0" ) -type oldHTTPServer struct{} +type OldHTTPServer struct{} // RequestTraceAttrs returns trace attributes for an HTTP request received by a // server. @@ -35,16 +37,18 @@ type oldHTTPServer struct{} // // If the primary server name is not known, server should be an empty string. // The req Host will be used to determine the server instead. -func (o oldHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { - return semconvutil.HTTPServerRequest(server, req) +func (o OldHTTPServer) RequestTraceAttrs(server string, req *http.Request, attrs []attribute.KeyValue) []attribute.KeyValue { + return semconvutil.HTTPServerRequest(server, req, semconvutil.HTTPServerRequestOptions{}, attrs) +} + +func (o OldHTTPServer) NetworkTransportAttr(network string) attribute.KeyValue { + return semconvutil.NetTransport(network) } // ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. // // If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. -func (o oldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { - attributes := []attribute.KeyValue{} - +func (o OldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry, attributes []attribute.KeyValue) []attribute.KeyValue { if resp.ReadBytes > 0 { attributes = append(attributes, semconv.HTTPRequestContentLength(int(resp.ReadBytes))) } @@ -67,7 +71,7 @@ func (o oldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.Ke } // Route returns the attribute for the route. -func (o oldHTTPServer) Route(route string) attribute.KeyValue { +func (o OldHTTPServer) Route(route string) attribute.KeyValue { return semconv.HTTPRoute(route) } @@ -84,7 +88,7 @@ const ( serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds ) -func (h oldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { +func (h OldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { if meter == nil { return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{} } @@ -113,17 +117,17 @@ func (h oldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, return requestBytesCounter, responseBytesCounter, serverLatencyMeasure } -func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { +func (o OldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { n := len(additionalAttributes) + 3 var host string var p int if server == "" { - host, p = splitHostPort(req.Host) + host, p = SplitHostPort(req.Host) } else { // Prioritize the primary server name. - host, p = splitHostPort(server) + host, p = SplitHostPort(server) if p < 0 { - _, p = splitHostPort(req.Host) + _, p = SplitHostPort(req.Host) } } hostPort := requiredHTTPPort(req.TLS != nil, p) @@ -144,7 +148,7 @@ func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, status attributes := slices.Grow(additionalAttributes, n) attributes = append(attributes, - o.methodMetric(req.Method), + semconv.HTTPMethod(standardizeHTTPMethod(req.Method)), o.scheme(req.TLS != nil), semconv.NetHostName(host)) @@ -164,29 +168,106 @@ func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, status return attributes } -func (o oldHTTPServer) methodMetric(method string) attribute.KeyValue { - method = strings.ToUpper(method) - switch method { - case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: - default: - method = "_OTHER" - } - return semconv.HTTPMethod(method) -} - -func (o oldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive +func (o OldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive if https { return semconv.HTTPSchemeHTTPS } return semconv.HTTPSchemeHTTP } -type oldHTTPClient struct{} +type OldHTTPClient struct{} + +func (o OldHTTPClient) RequestTraceAttrs(req *http.Request, attrs []attribute.KeyValue) []attribute.KeyValue { + return semconvutil.HTTPClientRequest(req, attrs) +} + +func (o OldHTTPClient) ResponseTraceAttrs(resp *http.Response, attrs []attribute.KeyValue) []attribute.KeyValue { + return semconvutil.HTTPClientResponse(resp, attrs) +} + +func (o OldHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { + /* The following semantic conventions are returned if present: + http.method string + http.status_code int + net.peer.name string + net.peer.port int + */ + + n := 2 // method, peer name. + var h string + if req.URL != nil { + h = req.URL.Host + } + var requestHost string + var requestPort int + for _, hostport := range []string{h, req.Header.Get("Host")} { + requestHost, requestPort = SplitHostPort(hostport) + if requestHost != "" || requestPort > 0 { + break + } + } + + port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) + if port > 0 { + n++ + } + + if statusCode > 0 { + n++ + } + + attributes := slices.Grow(additionalAttributes, n) + attributes = append(attributes, + semconv.HTTPMethod(standardizeHTTPMethod(req.Method)), + semconv.NetPeerName(requestHost), + ) + + if port > 0 { + attributes = append(attributes, semconv.NetPeerPort(port)) + } + + if statusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) + } + return attributes +} + +// Client HTTP metrics. +const ( + clientRequestSize = "http.client.request.size" // Incoming request bytes total + clientResponseSize = "http.client.response.size" // Incoming response bytes total + clientDuration = "http.client.duration" // Incoming end to end duration, milliseconds +) + +func (o OldHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { + if meter == nil { + return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{} + } + requestBytesCounter, err := meter.Int64Counter( + clientRequestSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP request messages."), + ) + handleErr(err) + + responseBytesCounter, err := meter.Int64Counter( + clientResponseSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP response messages."), + ) + handleErr(err) + + latencyMeasure, err := meter.Float64Histogram( + clientDuration, + metric.WithUnit("ms"), + metric.WithDescription("Measures the duration of outbound HTTP requests."), + ) + handleErr(err) -func (o oldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { - return semconvutil.HTTPClientRequest(req) + return requestBytesCounter, responseBytesCounter, latencyMeasure } -func (o oldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { - return semconvutil.HTTPClientResponse(resp) +// TraceAttributes returns attributes for httptrace. +func (c OldHTTPClient) TraceAttributes(host string, attrs []attribute.KeyValue) []attribute.KeyValue { + return append(attrs, semconv.NetHostName(host)) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go index a73bb06e9..b99735479 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go @@ -1,14 +1,16 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/semconvutil/httpconv.go.tmpl // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package semconvutil provides OpenTelemetry semantic convention utilities. package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" import ( "fmt" "net/http" + "slices" "strings" "go.opentelemetry.io/otel/attribute" @@ -16,6 +18,11 @@ import ( semconv "go.opentelemetry.io/otel/semconv/v1.20.0" ) +type HTTPServerRequestOptions struct { + // If set, this is used as value for the "http.client_ip" attribute. + HTTPClientIP string +} + // HTTPClientResponse returns trace attributes for an HTTP response received by a // client from a server. It will return the following attributes if the related // values are defined in resp: "http.status.code", @@ -26,9 +33,9 @@ import ( // attributes. If a complete set of attributes can be generated using the // request contained in resp. For example: // -// append(HTTPClientResponse(resp), ClientRequest(resp.Request)...) -func HTTPClientResponse(resp *http.Response) []attribute.KeyValue { - return hc.ClientResponse(resp) +// HTTPClientResponse(resp, ClientRequest(resp.Request))) +func HTTPClientResponse(resp *http.Response, attrs []attribute.KeyValue) []attribute.KeyValue { + return hc.ClientResponse(resp, attrs) } // HTTPClientRequest returns trace attributes for an HTTP request made by a client. @@ -36,8 +43,8 @@ func HTTPClientResponse(resp *http.Response) []attribute.KeyValue { // "net.peer.name". The following attributes are returned if the related values // are defined in req: "net.peer.port", "user_agent.original", // "http.request_content_length". -func HTTPClientRequest(req *http.Request) []attribute.KeyValue { - return hc.ClientRequest(req) +func HTTPClientRequest(req *http.Request, attrs []attribute.KeyValue) []attribute.KeyValue { + return hc.ClientRequest(req, attrs) } // HTTPClientRequestMetrics returns metric attributes for an HTTP request made by a client. @@ -75,8 +82,8 @@ func HTTPClientStatus(code int) (codes.Code, string) { // "http.target", "net.host.name". The following attributes are returned if // they related values are defined in req: "net.host.port", "net.sock.peer.addr", // "net.sock.peer.port", "user_agent.original", "http.client_ip". -func HTTPServerRequest(server string, req *http.Request) []attribute.KeyValue { - return hc.ServerRequest(server, req) +func HTTPServerRequest(server string, req *http.Request, opts HTTPServerRequestOptions, attrs []attribute.KeyValue) []attribute.KeyValue { + return hc.ServerRequest(server, req, opts, attrs) } // HTTPServerRequestMetrics returns metric attributes for an HTTP request received by a @@ -153,8 +160,8 @@ var hc = &httpConv{ // attributes. If a complete set of attributes can be generated using the // request contained in resp. For example: // -// append(ClientResponse(resp), ClientRequest(resp.Request)...) -func (c *httpConv) ClientResponse(resp *http.Response) []attribute.KeyValue { +// ClientResponse(resp, ClientRequest(resp.Request)) +func (c *httpConv) ClientResponse(resp *http.Response, attrs []attribute.KeyValue) []attribute.KeyValue { /* The following semantic conventions are returned if present: http.status_code int http.response_content_length int @@ -166,8 +173,11 @@ func (c *httpConv) ClientResponse(resp *http.Response) []attribute.KeyValue { if resp.ContentLength > 0 { n++ } + if n == 0 { + return attrs + } - attrs := make([]attribute.KeyValue, 0, n) + attrs = slices.Grow(attrs, n) if resp.StatusCode > 0 { attrs = append(attrs, c.HTTPStatusCodeKey.Int(resp.StatusCode)) } @@ -182,7 +192,7 @@ func (c *httpConv) ClientResponse(resp *http.Response) []attribute.KeyValue { // "net.peer.name". The following attributes are returned if the related values // are defined in req: "net.peer.port", "user_agent.original", // "http.request_content_length", "user_agent.original". -func (c *httpConv) ClientRequest(req *http.Request) []attribute.KeyValue { +func (c *httpConv) ClientRequest(req *http.Request, attrs []attribute.KeyValue) []attribute.KeyValue { /* The following semantic conventions are returned if present: http.method string user_agent.original string @@ -221,8 +231,7 @@ func (c *httpConv) ClientRequest(req *http.Request) []attribute.KeyValue { n++ } - attrs := make([]attribute.KeyValue, 0, n) - + attrs = slices.Grow(attrs, n) attrs = append(attrs, c.method(req.Method)) var u string @@ -305,7 +314,7 @@ func (c *httpConv) ClientRequestMetrics(req *http.Request) []attribute.KeyValue // related values are defined in req: "net.host.port", "net.sock.peer.addr", // "net.sock.peer.port", "user_agent.original", "http.client_ip", // "net.protocol.name", "net.protocol.version". -func (c *httpConv) ServerRequest(server string, req *http.Request) []attribute.KeyValue { +func (c *httpConv) ServerRequest(server string, req *http.Request, opts HTTPServerRequestOptions, attrs []attribute.KeyValue) []attribute.KeyValue { /* The following semantic conventions are returned if present: http.method string http.scheme string @@ -358,7 +367,17 @@ func (c *httpConv) ServerRequest(server string, req *http.Request) []attribute.K n++ } - clientIP := serverClientIP(req.Header.Get("X-Forwarded-For")) + // For client IP, use, in order: + // 1. The value passed in the options + // 2. The value in the X-Forwarded-For header + // 3. The peer address + clientIP := opts.HTTPClientIP + if clientIP == "" { + clientIP = serverClientIP(req.Header.Get("X-Forwarded-For")) + if clientIP == "" { + clientIP = peer + } + } if clientIP != "" { n++ } @@ -378,7 +397,7 @@ func (c *httpConv) ServerRequest(server string, req *http.Request) []attribute.K n++ } - attrs := make([]attribute.KeyValue, 0, n) + attrs = slices.Grow(attrs, n) attrs = append(attrs, c.method(req.Method)) attrs = append(attrs, c.scheme(req.TLS != nil)) diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go index b80a1db61..df97255e4 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/semconvutil/netconv.go.tmpl // Copyright The OpenTelemetry Authors @@ -200,6 +200,15 @@ func splitHostPort(hostport string) (host string, port int) { func netProtocol(proto string) (name string, version string) { name, version, _ = strings.Cut(proto, "/") - name = strings.ToLower(name) + switch name { + case "HTTP": + name = "http" + case "QUIC": + name = "quic" + case "SPDY": + name = "spdy" + default: + name = strings.ToLower(name) + } return name, version } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go index ea504e396..d62ce44b0 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go @@ -35,14 +35,14 @@ func (l *Labeler) Get() []attribute.KeyValue { type labelerContextKeyType int -const lablelerContextKey labelerContextKeyType = 0 +const labelerContextKey labelerContextKeyType = 0 // ContextWithLabeler returns a new context with the provided Labeler instance. // Attributes added to the specified labeler will be injected into metrics // emitted by the instrumentation. Only one labeller can be injected into the // context. Injecting it multiple times will override the previous calls. func ContextWithLabeler(parent context.Context, l *Labeler) context.Context { - return context.WithValue(parent, lablelerContextKey, l) + return context.WithValue(parent, labelerContextKey, l) } // LabelerFromContext retrieves a Labeler instance from the provided context if @@ -50,7 +50,7 @@ func ContextWithLabeler(parent context.Context, l *Labeler) context.Context { // Labeler is returned and the second return value is false. In this case it is // safe to use the Labeler but any attributes added to it will not be used. func LabelerFromContext(ctx context.Context) (*Labeler, bool) { - l, ok := ctx.Value(lablelerContextKey).(*Labeler) + l, ok := ctx.Value(labelerContextKey).(*Labeler) if !ok { l = &Labeler{} } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/start_time_context.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/start_time_context.go new file mode 100644 index 000000000..9476ef01b --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/start_time_context.go @@ -0,0 +1,29 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "time" +) + +type startTimeContextKeyType int + +const startTimeContextKey startTimeContextKeyType = 0 + +// ContextWithStartTime returns a new context with the provided start time. The +// start time will be used for metrics and traces emitted by the +// instrumentation. Only one labeller can be injected into the context. +// Injecting it multiple times will override the previous calls. +func ContextWithStartTime(parent context.Context, start time.Time) context.Context { + return context.WithValue(parent, startTimeContextKey, start) +} + +// StartTimeFromContext retrieves a time.Time from the provided context if one +// is available. If no start time was found in the provided context, a new, +// zero start time is returned and the second return value is false. +func StartTimeFromContext(ctx context.Context) time.Time { + t, _ := ctx.Value(startTimeContextKey).(time.Time) + return t +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go index b4119d343..44b86ad86 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -13,11 +13,9 @@ import ( "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" @@ -29,7 +27,6 @@ type Transport struct { rt http.RoundTripper tracer trace.Tracer - meter metric.Meter propagators propagation.TextMapPropagator spanStartOptions []trace.SpanStartOption filters []Filter @@ -37,10 +34,7 @@ type Transport struct { clientTrace func(context.Context) *httptrace.ClientTrace metricAttributesFn func(*http.Request) []attribute.KeyValue - semconv semconv.HTTPClient - requestBytesCounter metric.Int64Counter - responseBytesCounter metric.Int64Counter - latencyMeasure metric.Float64Histogram + semconv semconv.HTTPClient } var _ http.RoundTripper = &Transport{} @@ -57,8 +51,7 @@ func NewTransport(base http.RoundTripper, opts ...Option) *Transport { } t := Transport{ - rt: base, - semconv: semconv.NewHTTPClient(), + rt: base, } defaultOpts := []Option{ @@ -68,46 +61,21 @@ func NewTransport(base http.RoundTripper, opts ...Option) *Transport { c := newConfig(append(defaultOpts, opts...)...) t.applyConfig(c) - t.createMeasures() return &t } func (t *Transport) applyConfig(c *config) { t.tracer = c.Tracer - t.meter = c.Meter t.propagators = c.Propagators t.spanStartOptions = c.SpanStartOptions t.filters = c.Filters t.spanNameFormatter = c.SpanNameFormatter t.clientTrace = c.ClientTrace + t.semconv = semconv.NewHTTPClient(c.Meter) t.metricAttributesFn = c.MetricAttributesFn } -func (t *Transport) createMeasures() { - var err error - t.requestBytesCounter, err = t.meter.Int64Counter( - clientRequestSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP request messages."), - ) - handleErr(err) - - t.responseBytesCounter, err = t.meter.Int64Counter( - clientResponseSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP response messages."), - ) - handleErr(err) - - t.latencyMeasure, err = t.meter.Float64Histogram( - clientDuration, - metric.WithUnit("ms"), - metric.WithDescription("Measures the duration of outbound HTTP requests."), - ) - handleErr(err) -} - func defaultTransportFormatter(_ string, r *http.Request) string { return "HTTP " + r.Method } @@ -177,16 +145,15 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { } // metrics - metricAttrs := append(append(labeler.Get(), semconvutil.HTTPClientRequestMetrics(r)...), t.metricAttributesFromRequest(r)...) - if res.StatusCode > 0 { - metricAttrs = append(metricAttrs, semconv.HTTPStatusCode(res.StatusCode)) - } - o := metric.WithAttributeSet(attribute.NewSet(metricAttrs...)) + metricOpts := t.semconv.MetricOptions(semconv.MetricAttributes{ + Req: r, + StatusCode: res.StatusCode, + AdditionalAttributes: append(labeler.Get(), t.metricAttributesFromRequest(r)...), + }) - t.requestBytesCounter.Add(ctx, bw.BytesRead(), o) // For handling response bytes we leverage a callback when the client reads the http response readRecordFunc := func(n int64) { - t.responseBytesCounter.Add(ctx, n, o) + t.semconv.RecordResponseSize(ctx, n, metricOpts) } // traces @@ -198,9 +165,12 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) - t.latencyMeasure.Record(ctx, elapsedTime, o) + t.semconv.RecordMetrics(ctx, semconv.MetricData{ + RequestSize: bw.BytesRead(), + ElapsedTime: elapsedTime, + }, metricOpts) - return res, err + return res, nil } func (t *Transport) metricAttributesFromRequest(r *http.Request) []attribute.KeyValue { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index 502c1bdaf..6be4c1fde 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -5,13 +5,6 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.54.0" + return "0.61.0" // This string is updated by the pre_release.sh script during release } - -// SemVersion is the semantic version to be supplied to tracer/meter creation. -// -// Deprecated: Use [Version] instead. -func SemVersion() string { - return Version() -} diff --git a/vendor/go.opentelemetry.io/otel/.clomonitor.yml b/vendor/go.opentelemetry.io/otel/.clomonitor.yml new file mode 100644 index 000000000..128d61a22 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.clomonitor.yml @@ -0,0 +1,3 @@ +exemptions: + - check: artifacthub_badge + reason: "Artifact Hub doesn't support Go packages" diff --git a/vendor/go.opentelemetry.io/otel/.codespellignore b/vendor/go.opentelemetry.io/otel/.codespellignore index 6bf3abc41..2b53a25e1 100644 --- a/vendor/go.opentelemetry.io/otel/.codespellignore +++ b/vendor/go.opentelemetry.io/otel/.codespellignore @@ -7,3 +7,4 @@ ans nam valu thirdparty +addOpt diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore index ae8577ef3..749e8e881 100644 --- a/vendor/go.opentelemetry.io/otel/.gitignore +++ b/vendor/go.opentelemetry.io/otel/.gitignore @@ -1,6 +1,7 @@ .DS_Store Thumbs.db +.cache/ .tools/ venv/ .idea/ diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index ce3f40b60..b01762ffc 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -1,325 +1,260 @@ -# See https://github.com/golangci/golangci-lint#config-file +version: "2" run: - issues-exit-code: 1 #Default - tests: true #Default - + issues-exit-code: 1 + tests: true linters: - # Disable everything by default so upgrades to not include new "default - # enabled" linters. - disable-all: true - # Specifically enable linters we want to use. + default: none enable: - asasalint - bodyclose - depguard - errcheck - errorlint + - gocritic - godot - - gofumpt - - goimports - gosec - - gosimple - govet - ineffassign - misspell - perfsprint - revive - staticcheck - - tenv - testifylint - - typecheck - unconvert - - unused - unparam + - unused - usestdlibvars - + - usetesting + settings: + depguard: + rules: + auto/sdk: + files: + - '!internal/global/trace.go' + - ~internal/global/trace_test.go + deny: + - pkg: go.opentelemetry.io/auto/sdk + desc: Do not use SDK from automatic instrumentation. + non-tests: + files: + - '!$test' + - '!**/*test/*.go' + - '!**/internal/matchers/*.go' + deny: + - pkg: testing + - pkg: github.com/stretchr/testify + - pkg: crypto/md5 + - pkg: crypto/sha1 + - pkg: crypto/**/pkix + otel-internal: + files: + - '**/sdk/*.go' + - '**/sdk/**/*.go' + - '**/exporters/*.go' + - '**/exporters/**/*.go' + - '**/schema/*.go' + - '**/schema/**/*.go' + - '**/metric/*.go' + - '**/metric/**/*.go' + - '**/bridge/*.go' + - '**/bridge/**/*.go' + - '**/trace/*.go' + - '**/trace/**/*.go' + - '**/log/*.go' + - '**/log/**/*.go' + deny: + - pkg: go.opentelemetry.io/otel/internal$ + desc: Do not use cross-module internal packages. + - pkg: go.opentelemetry.io/otel/internal/internaltest + desc: Do not use cross-module internal packages. + otlp-internal: + files: + - '!**/exporters/otlp/internal/**/*.go' + deny: + - pkg: go.opentelemetry.io/otel/exporters/otlp/internal + desc: Do not use cross-module internal packages. + otlpmetric-internal: + files: + - '!**/exporters/otlp/otlpmetric/internal/*.go' + - '!**/exporters/otlp/otlpmetric/internal/**/*.go' + deny: + - pkg: go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal + desc: Do not use cross-module internal packages. + otlptrace-internal: + files: + - '!**/exporters/otlp/otlptrace/*.go' + - '!**/exporters/otlp/otlptrace/internal/**.go' + deny: + - pkg: go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal + desc: Do not use cross-module internal packages. + gocritic: + disabled-checks: + - appendAssign + - commentedOutCode + - dupArg + - hugeParam + - importShadow + - preferDecodeRune + - rangeValCopy + - unnamedResult + - whyNoLint + enable-all: true + godot: + exclude: + # Exclude links. + - '^ *\[[^]]+\]:' + # Exclude sentence fragments for lists. + - ^[ ]*[-•] + # Exclude sentences prefixing a list. + - :$ + misspell: + locale: US + ignore-rules: + - cancelled + perfsprint: + int-conversion: true + err-error: true + errorf: true + sprintf1: true + strconcat: true + revive: + confidence: 0.01 + rules: + - name: blank-imports + - name: bool-literal-in-expr + - name: constant-logical-expr + - name: context-as-argument + arguments: + - allowTypesBefore: '*testing.T' + disabled: true + - name: context-keys-type + - name: deep-exit + - name: defer + arguments: + - - call-chain + - loop + - name: dot-imports + - name: duplicated-imports + - name: early-return + arguments: + - preserveScope + - name: empty-block + - name: empty-lines + - name: error-naming + - name: error-return + - name: error-strings + - name: errorf + - name: exported + arguments: + - sayRepetitiveInsteadOfStutters + - name: flag-parameter + - name: identical-branches + - name: if-return + - name: import-shadowing + - name: increment-decrement + - name: indent-error-flow + arguments: + - preserveScope + - name: package-comments + - name: range + - name: range-val-in-closure + - name: range-val-address + - name: redefines-builtin-id + - name: string-format + arguments: + - - panic + - /^[^\n]*$/ + - must not contain line breaks + - name: struct-tag + - name: superfluous-else + arguments: + - preserveScope + - name: time-equal + - name: unconditional-recursion + - name: unexported-return + - name: unhandled-error + arguments: + - fmt.Fprint + - fmt.Fprintf + - fmt.Fprintln + - fmt.Print + - fmt.Printf + - fmt.Println + - name: unused-parameter + - name: unused-receiver + - name: unnecessary-stmt + - name: use-any + - name: useless-break + - name: var-declaration + - name: var-naming + arguments: + - ["ID"] # AllowList + - ["Otel", "Aws", "Gcp"] # DenyList + - name: waitgroup-by-value + testifylint: + enable-all: true + disable: + - float-compare + - go-require + - require-error + exclusions: + generated: lax + presets: + - common-false-positives + - legacy + - std-error-handling + rules: + - linters: + - revive + path: schema/v.*/types/.* + text: avoid meaningless package names + # TODO: Having appropriate comments for exported objects helps development, + # even for objects in internal packages. Appropriate comments for all + # exported objects should be added and this exclusion removed. + - linters: + - revive + path: .*internal/.* + text: exported (method|function|type|const) (.+) should have comment or be unexported + # Yes, they are, but it's okay in a test. + - linters: + - revive + path: _test\.go + text: exported func.*returns unexported type.*which can be annoying to use + # Example test functions should be treated like main. + - linters: + - revive + path: example.*_test\.go + text: calls to (.+) only in main[(][)] or init[(][)] functions + # It's okay to not run gosec and perfsprint in a test. + - linters: + - gosec + - perfsprint + path: _test\.go + # Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) + # as we commonly use it in tests and examples. + - linters: + - gosec + text: 'G404:' + # Ignoring gosec G402: TLS MinVersion too low + # as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well. + - linters: + - gosec + text: 'G402: TLS MinVersion too low.' issues: - # Maximum issues count per one linter. - # Set to 0 to disable. - # Default: 50 - # Setting to unlimited so the linter only is run once to debug all issues. max-issues-per-linter: 0 - # Maximum count of issues with the same text. - # Set to 0 to disable. - # Default: 3 - # Setting to unlimited so the linter only is run once to debug all issues. max-same-issues: 0 - # Excluding configuration per-path, per-linter, per-text and per-source. - exclude-rules: - # TODO: Having appropriate comments for exported objects helps development, - # even for objects in internal packages. Appropriate comments for all - # exported objects should be added and this exclusion removed. - - path: '.*internal/.*' - text: "exported (method|function|type|const) (.+) should have comment or be unexported" - linters: - - revive - # Yes, they are, but it's okay in a test. - - path: _test\.go - text: "exported func.*returns unexported type.*which can be annoying to use" - linters: - - revive - # Example test functions should be treated like main. - - path: example.*_test\.go - text: "calls to (.+) only in main[(][)] or init[(][)] functions" - linters: - - revive - # It's okay to not run gosec and perfsprint in a test. - - path: _test\.go - linters: - - gosec - - perfsprint - # Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) - # as we commonly use it in tests and examples. - - text: "G404:" - linters: - - gosec - # Ignoring gosec G402: TLS MinVersion too low - # as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well. - - text: "G402: TLS MinVersion too low." - linters: - - gosec - include: - # revive exported should have comment or be unexported. - - EXC0012 - # revive package comment should be of the form ... - - EXC0013 - -linters-settings: - depguard: - rules: - non-tests: - files: - - "!$test" - - "!**/*test/*.go" - - "!**/internal/matchers/*.go" - deny: - - pkg: "testing" - - pkg: "github.com/stretchr/testify" - - pkg: "crypto/md5" - - pkg: "crypto/sha1" - - pkg: "crypto/**/pkix" - auto/sdk: - files: - - "!internal/global/trace.go" - - "~internal/global/trace_test.go" - deny: - - pkg: "go.opentelemetry.io/auto/sdk" - desc: Do not use SDK from automatic instrumentation. - otlp-internal: - files: - - "!**/exporters/otlp/internal/**/*.go" - deny: - - pkg: "go.opentelemetry.io/otel/exporters/otlp/internal" - desc: Do not use cross-module internal packages. - otlptrace-internal: - files: - - "!**/exporters/otlp/otlptrace/*.go" - - "!**/exporters/otlp/otlptrace/internal/**.go" - deny: - - pkg: "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal" - desc: Do not use cross-module internal packages. - otlpmetric-internal: - files: - - "!**/exporters/otlp/otlpmetric/internal/*.go" - - "!**/exporters/otlp/otlpmetric/internal/**/*.go" - deny: - - pkg: "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal" - desc: Do not use cross-module internal packages. - otel-internal: - files: - - "**/sdk/*.go" - - "**/sdk/**/*.go" - - "**/exporters/*.go" - - "**/exporters/**/*.go" - - "**/schema/*.go" - - "**/schema/**/*.go" - - "**/metric/*.go" - - "**/metric/**/*.go" - - "**/bridge/*.go" - - "**/bridge/**/*.go" - - "**/trace/*.go" - - "**/trace/**/*.go" - - "**/log/*.go" - - "**/log/**/*.go" - deny: - - pkg: "go.opentelemetry.io/otel/internal$" - desc: Do not use cross-module internal packages. - - pkg: "go.opentelemetry.io/otel/internal/attribute" - desc: Do not use cross-module internal packages. - - pkg: "go.opentelemetry.io/otel/internal/internaltest" - desc: Do not use cross-module internal packages. - - pkg: "go.opentelemetry.io/otel/internal/matchers" - desc: Do not use cross-module internal packages. - godot: - exclude: - # Exclude links. - - '^ *\[[^]]+\]:' - # Exclude sentence fragments for lists. - - '^[ ]*[-•]' - # Exclude sentences prefixing a list. - - ':$' - goimports: - local-prefixes: go.opentelemetry.io - misspell: - locale: US - ignore-words: - - cancelled - perfsprint: - err-error: true - errorf: true - int-conversion: true - sprintf1: true - strconcat: true - revive: - # Sets the default failure confidence. - # This means that linting errors with less than 0.8 confidence will be ignored. - # Default: 0.8 - confidence: 0.01 - rules: - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#blank-imports - - name: blank-imports - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#bool-literal-in-expr - - name: bool-literal-in-expr - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#constant-logical-expr - - name: constant-logical-expr - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-as-argument - # TODO (#3372) re-enable linter when it is compatible. https://github.com/golangci/golangci-lint/issues/3280 - - name: context-as-argument - disabled: true - arguments: - allowTypesBefore: "*testing.T" - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-keys-type - - name: context-keys-type - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#deep-exit - - name: deep-exit - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#defer - - name: defer - disabled: false - arguments: - - ["call-chain", "loop"] - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#dot-imports - - name: dot-imports - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#duplicated-imports - - name: duplicated-imports - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#early-return - - name: early-return - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-block - - name: empty-block - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-lines - - name: empty-lines - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-naming - - name: error-naming - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-return - - name: error-return - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-strings - - name: error-strings - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#errorf - - name: errorf - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#exported - - name: exported - disabled: false - arguments: - - "sayRepetitiveInsteadOfStutters" - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#flag-parameter - - name: flag-parameter - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#identical-branches - - name: identical-branches - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#if-return - - name: if-return - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#increment-decrement - - name: increment-decrement - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#indent-error-flow - - name: indent-error-flow - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#import-shadowing - - name: import-shadowing - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#package-comments - - name: package-comments - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range - - name: range - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-in-closure - - name: range-val-in-closure - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-address - - name: range-val-address - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#redefines-builtin-id - - name: redefines-builtin-id - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#string-format - - name: string-format - disabled: false - arguments: - - - panic - - '/^[^\n]*$/' - - must not contain line breaks - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#struct-tag - - name: struct-tag - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#superfluous-else - - name: superfluous-else - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#time-equal - - name: time-equal - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-naming - - name: var-naming - disabled: false - arguments: - - ["ID"] # AllowList - - ["Otel", "Aws", "Gcp"] # DenyList - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-declaration - - name: var-declaration - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unconditional-recursion - - name: unconditional-recursion - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unexported-return - - name: unexported-return - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unhandled-error - - name: unhandled-error - disabled: false - arguments: - - "fmt.Fprint" - - "fmt.Fprintf" - - "fmt.Fprintln" - - "fmt.Print" - - "fmt.Printf" - - "fmt.Println" - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unnecessary-stmt - - name: unnecessary-stmt - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#useless-break - - name: useless-break - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value - - name: waitgroup-by-value - disabled: false - testifylint: - enable-all: true - disable: - - float-compare - - go-require - - require-error +formatters: + enable: + - gofumpt + - goimports + - golines + settings: + gofumpt: + extra-rules: true + goimports: + local-prefixes: + - go.opentelemetry.io/otel + golines: + max-len: 120 + exclusions: + generated: lax diff --git a/vendor/go.opentelemetry.io/otel/.lycheeignore b/vendor/go.opentelemetry.io/otel/.lycheeignore index 40d62fa2e..532850588 100644 --- a/vendor/go.opentelemetry.io/otel/.lycheeignore +++ b/vendor/go.opentelemetry.io/otel/.lycheeignore @@ -2,5 +2,8 @@ http://localhost http://jaeger-collector https://github.com/open-telemetry/opentelemetry-go/milestone/ https://github.com/open-telemetry/opentelemetry-go/projects +# Weaver model URL for semantic-conventions repository. +https?:\/\/github\.com\/open-telemetry\/semantic-conventions\/archive\/refs\/tags\/[^.]+\.zip\[[^]]+] file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual +http://4.3.2.1:78/user/123 \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index 599d59cd1..f3abcfdc2 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -11,6 +11,239 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm +## [1.38.0/0.60.0/0.14.0/0.0.13] 2025-08-29 + +This release is the last to support [Go 1.23]. +The next release will require at least [Go 1.24]. + +### Added + +- Add native histogram exemplar support in `go.opentelemetry.io/otel/exporters/prometheus`. (#6772) +- Add template attribute functions to the `go.opentelmetry.io/otel/semconv/v1.34.0` package. (#6939) + - `ContainerLabel` + - `DBOperationParameter` + - `DBSystemParameter` + - `HTTPRequestHeader` + - `HTTPResponseHeader` + - `K8SCronJobAnnotation` + - `K8SCronJobLabel` + - `K8SDaemonSetAnnotation` + - `K8SDaemonSetLabel` + - `K8SDeploymentAnnotation` + - `K8SDeploymentLabel` + - `K8SJobAnnotation` + - `K8SJobLabel` + - `K8SNamespaceAnnotation` + - `K8SNamespaceLabel` + - `K8SNodeAnnotation` + - `K8SNodeLabel` + - `K8SPodAnnotation` + - `K8SPodLabel` + - `K8SReplicaSetAnnotation` + - `K8SReplicaSetLabel` + - `K8SStatefulSetAnnotation` + - `K8SStatefulSetLabel` + - `ProcessEnvironmentVariable` + - `RPCConnectRPCRequestMetadata` + - `RPCConnectRPCResponseMetadata` + - `RPCGRPCRequestMetadata` + - `RPCGRPCResponseMetadata` +- Add `ErrorType` attribute helper function to the `go.opentelmetry.io/otel/semconv/v1.34.0` package. (#6962) +- Add `WithAllowKeyDuplication` in `go.opentelemetry.io/otel/sdk/log` which can be used to disable deduplication for log records. (#6968) +- Add `WithCardinalityLimit` option to configure the cardinality limit in `go.opentelemetry.io/otel/sdk/metric`. (#6996, #7065, #7081, #7164, #7165, #7179) +- Add `Clone` method to `Record` in `go.opentelemetry.io/otel/log` that returns a copy of the record with no shared state. (#7001) +- Add experimental self-observability span and batch span processor metrics in `go.opentelemetry.io/otel/sdk/trace`. + Check the `go.opentelemetry.io/otel/sdk/trace/internal/x` package documentation for more information. (#7027, #6393, #7209) +- The `go.opentelemetry.io/otel/semconv/v1.36.0` package. + The package contains semantic conventions from the `v1.36.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.36.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.34.0.`(#7032, #7041) +- Add support for configuring Prometheus name translation using `WithTranslationStrategy` option in `go.opentelemetry.io/otel/exporters/prometheus`. The current default translation strategy when UTF-8 mode is enabled is `NoUTF8EscapingWithSuffixes`, but a future release will change the default strategy to `UnderscoreEscapingWithSuffixes` for compliance with the specification. (#7111) +- Add experimental self-observability log metrics in `go.opentelemetry.io/otel/sdk/log`. + Check the `go.opentelemetry.io/otel/sdk/log/internal/x` package documentation for more information. (#7121) +- Add experimental self-observability trace exporter metrics in `go.opentelemetry.io/otel/exporters/stdout/stdouttrace`. + Check the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x` package documentation for more information. (#7133) +- Support testing of [Go 1.25]. (#7187) +- The `go.opentelemetry.io/otel/semconv/v1.37.0` package. + The package contains semantic conventions from the `v1.37.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.37.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.36.0.`(#7254) + +### Changed + +- Optimize `TraceIDFromHex` and `SpanIDFromHex` in `go.opentelemetry.io/otel/sdk/trace`. (#6791) +- Change `AssertEqual` in `go.opentelemetry.io/otel/log/logtest` to accept `TestingT` in order to support benchmarks and fuzz tests. (#6908) +- Change `DefaultExemplarReservoirProviderSelector` in `go.opentelemetry.io/otel/sdk/metric` to use `runtime.GOMAXPROCS(0)` instead of `runtime.NumCPU()` for the `FixedSizeReservoirProvider` default size. (#7094) + +### Fixed + +- `SetBody` method of `Record` in `go.opentelemetry.io/otel/sdk/log` now deduplicates key-value collections (`log.Value` of `log.KindMap` from `go.opentelemetry.io/otel/log`). (#7002) +- Fix `go.opentelemetry.io/otel/exporters/prometheus` to not append a suffix if it's already present in metric name. (#7088) +- Fix the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` self-observability component type and name. (#7195) +- Fix partial export count metric in `go.opentelemetry.io/otel/exporters/stdout/stdouttrace`. (#7199) + +### Deprecated + +- Deprecate `WithoutUnits` and `WithoutCounterSuffixes` options, preferring `WithTranslationStrategy` instead. (#7111) +- Deprecate support for `OTEL_GO_X_CARDINALITY_LIMIT` environment variable in `go.opentelemetry.io/otel/sdk/metric`. Use `WithCardinalityLimit` option instead. (#7166) + +## [0.59.1] 2025-07-21 + +### Changed + +- Retract `v0.59.0` release of `go.opentelemetry.io/otel/exporters/prometheus` module which appends incorrect unit suffixes. (#7046) +- Change `go.opentelemetry.io/otel/exporters/prometheus` to no longer deduplicate suffixes when UTF8 is enabled. + It is recommended to disable unit and counter suffixes in the exporter, and manually add suffixes if you rely on the existing behavior. (#7044) + +### Fixed + +- Fix `go.opentelemetry.io/otel/exporters/prometheus` to properly handle unit suffixes when the unit is in brackets. + E.g. `{spans}`. (#7044) + +## [1.37.0/0.59.0/0.13.0] 2025-06-25 + +### Added + +- The `go.opentelemetry.io/otel/semconv/v1.33.0` package. + The package contains semantic conventions from the `v1.33.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.33.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.32.0.`(#6799) +- The `go.opentelemetry.io/otel/semconv/v1.34.0` package. + The package contains semantic conventions from the `v1.34.0` version of the OpenTelemetry Semantic Conventions. (#6812) +- Add metric's schema URL as `otel_scope_schema_url` label in `go.opentelemetry.io/otel/exporters/prometheus`. (#5947) +- Add metric's scope attributes as `otel_scope_[attribute]` labels in `go.opentelemetry.io/otel/exporters/prometheus`. (#5947) +- Add `EventName` to `EnabledParameters` in `go.opentelemetry.io/otel/log`. (#6825) +- Add `EventName` to `EnabledParameters` in `go.opentelemetry.io/otel/sdk/log`. (#6825) +- Changed handling of `go.opentelemetry.io/otel/exporters/prometheus` metric renaming to add unit suffixes when it doesn't match one of the pre-defined values in the unit suffix map. (#6839) + +### Changed + +- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/bridge/opentracing`. (#6827) +- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/exporters/zipkin`. (#6829) +- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/metric`. (#6832) +- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/sdk/resource`. (#6834) +- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/sdk/trace`. (#6835) +- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/trace`. (#6836) +- `Record.Resource` now returns `*resource.Resource` instead of `resource.Resource` in `go.opentelemetry.io/otel/sdk/log`. (#6864) +- Retry now shows error cause for context timeout in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6898) + +### Fixed + +- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#6710) +- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#6710) +- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#6710) +- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#6710) +- Validate exponential histogram scale range for Prometheus compatibility in `go.opentelemetry.io/otel/exporters/prometheus`. (#6822) +- Context cancellation during metric pipeline produce does not corrupt data in `go.opentelemetry.io/otel/sdk/metric`. (#6914) + +### Removed + +- `go.opentelemetry.io/otel/exporters/prometheus` no longer exports `otel_scope_info` metric. (#6770) + +## [0.12.2] 2025-05-22 + +### Fixed + +- Retract `v0.12.0` release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` module that contains invalid dependencies. (#6804) +- Retract `v0.12.0` release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` module that contains invalid dependencies. (#6804) +- Retract `v0.12.0` release of `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` module that contains invalid dependencies. (#6804) + +## [0.12.1] 2025-05-21 + +### Fixes + +- Use the proper dependency version of `go.opentelemetry.io/otel/sdk/log/logtest` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#6800) +- Use the proper dependency version of `go.opentelemetry.io/otel/sdk/log/logtest` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6800) +- Use the proper dependency version of `go.opentelemetry.io/otel/sdk/log/logtest` in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#6800) + +## [1.36.0/0.58.0/0.12.0] 2025-05-20 + +### Added + +- Add exponential histogram support in `go.opentelemetry.io/otel/exporters/prometheus`. (#6421) +- The `go.opentelemetry.io/otel/semconv/v1.31.0` package. + The package contains semantic conventions from the `v1.31.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.31.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.30.0`. (#6479) +- Add `Recording`, `Scope`, and `Record` types in `go.opentelemetry.io/otel/log/logtest`. (#6507) +- Add `WithHTTPClient` option to configure the `http.Client` used by `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#6751) +- Add `WithHTTPClient` option to configure the `http.Client` used by `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#6752) +- Add `WithHTTPClient` option to configure the `http.Client` used by `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6688) +- Add `ValuesGetter` in `go.opentelemetry.io/otel/propagation`, a `TextMapCarrier` that supports retrieving multiple values for a single key. (#5973) +- Add `Values` method to `HeaderCarrier` to implement the new `ValuesGetter` interface in `go.opentelemetry.io/otel/propagation`. (#5973) +- Update `Baggage` in `go.opentelemetry.io/otel/propagation` to retrieve multiple values for a key when the carrier implements `ValuesGetter`. (#5973) +- Add `AssertEqual` function in `go.opentelemetry.io/otel/log/logtest`. (#6662) +- The `go.opentelemetry.io/otel/semconv/v1.32.0` package. + The package contains semantic conventions from the `v1.32.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.32.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.31.0`(#6782) +- Add `Transform` option in `go.opentelemetry.io/otel/log/logtest`. (#6794) +- Add `Desc` option in `go.opentelemetry.io/otel/log/logtest`. (#6796) + +### Removed + +- Drop support for [Go 1.22]. (#6381, #6418) +- Remove `Resource` field from `EnabledParameters` in `go.opentelemetry.io/otel/sdk/log`. (#6494) +- Remove `RecordFactory` type from `go.opentelemetry.io/otel/log/logtest`. (#6492) +- Remove `ScopeRecords`, `EmittedRecord`, and `RecordFactory` types from `go.opentelemetry.io/otel/log/logtest`. (#6507) +- Remove `AssertRecordEqual` function in `go.opentelemetry.io/otel/log/logtest`, use `AssertEqual` instead. (#6662) + +### Changed + +- ⚠️ Update `github.com/prometheus/client_golang` to `v1.21.1`, which changes the `NameValidationScheme` to `UTF8Validation`. + This allows metrics names to keep original delimiters (e.g. `.`), rather than replacing with underscores. + This can be reverted by setting `github.com/prometheus/common/model.NameValidationScheme` to `LegacyValidation` in `github.com/prometheus/common/model`. (#6433) +- Initialize map with `len(keys)` in `NewAllowKeysFilter` and `NewDenyKeysFilter` to avoid unnecessary allocations in `go.opentelemetry.io/otel/attribute`. (#6455) +- `go.opentelemetry.io/otel/log/logtest` is now a separate Go module. (#6465) +- `go.opentelemetry.io/otel/sdk/log/logtest` is now a separate Go module. (#6466) +- `Recorder` in `go.opentelemetry.io/otel/log/logtest` no longer separately stores records emitted by loggers with the same instrumentation scope. (#6507) +- Improve performance of `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` by not exporting when exporter cannot accept more. (#6569, #6641) + +### Deprecated + +- Deprecate support for `model.LegacyValidation` for `go.opentelemetry.io/otel/exporters/prometheus`. (#6449) + +### Fixes + +- Stop percent encoding header environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6392) +- Ensure the `noopSpan.tracerProvider` method is not inlined in `go.opentelemetry.io/otel/trace` so the `go.opentelemetry.io/auto` instrumentation can instrument non-recording spans. (#6456) +- Use a `sync.Pool` instead of allocating `metricdata.ResourceMetrics` in `go.opentelemetry.io/otel/exporters/prometheus`. (#6472) + +## [1.35.0/0.57.0/0.11.0] 2025-03-05 + +This release is the last to support [Go 1.22]. +The next release will require at least [Go 1.23]. + +### Added + +- Add `ValueFromAttribute` and `KeyValueFromAttribute` in `go.opentelemetry.io/otel/log`. (#6180) +- Add `EventName` and `SetEventName` to `Record` in `go.opentelemetry.io/otel/log`. (#6187) +- Add `EventName` to `RecordFactory` in `go.opentelemetry.io/otel/log/logtest`. (#6187) +- `AssertRecordEqual` in `go.opentelemetry.io/otel/log/logtest` checks `Record.EventName`. (#6187) +- Add `EventName` and `SetEventName` to `Record` in `go.opentelemetry.io/otel/sdk/log`. (#6193) +- Add `EventName` to `RecordFactory` in `go.opentelemetry.io/otel/sdk/log/logtest`. (#6193) +- Emit `Record.EventName` field in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#6211) +- Emit `Record.EventName` field in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6211) +- Emit `Record.EventName` field in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` (#6210) +- The `go.opentelemetry.io/otel/semconv/v1.28.0` package. + The package contains semantic conventions from the `v1.28.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.28.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.27.0`(#6236) +- The `go.opentelemetry.io/otel/semconv/v1.30.0` package. + The package contains semantic conventions from the `v1.30.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.30.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.28.0`(#6240) +- Document the pitfalls of using `Resource` as a comparable type. + `Resource.Equal` and `Resource.Equivalent` should be used instead. (#6272) +- Support [Go 1.24]. (#6304) +- Add `FilterProcessor` and `EnabledParameters` in `go.opentelemetry.io/otel/sdk/log`. + It replaces `go.opentelemetry.io/otel/sdk/log/internal/x.FilterProcessor`. + Compared to previous version it additionally gives the possibility to filter by resource and instrumentation scope. (#6317) + +### Changed + +- Update `github.com/prometheus/common` to `v0.62.0`, which changes the `NameValidationScheme` to `NoEscaping`. + This allows metrics names to keep original delimiters (e.g. `.`), rather than replacing with underscores. + This is controlled by the `Content-Type` header, or can be reverted by setting `NameValidationScheme` to `LegacyValidation` in `github.com/prometheus/common/model`. (#6198) + +### Fixes + +- Eliminate goroutine leak for the processor returned by `NewSimpleSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace` when `Shutdown` is called and the passed `ctx` is canceled and `SpanExporter.Shutdown` has not returned. (#6368) +- Eliminate goroutine leak for the processor returned by `NewBatchSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace` when `ForceFlush` is called and the passed `ctx` is canceled and `SpanExporter.Export` has not returned. (#6369) + ## [1.34.0/0.56.0/0.10.0] 2025-01-17 ### Changed @@ -3197,7 +3430,13 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.34.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.38.0...HEAD +[1.38.0/0.60.0/0.14.0/0.0.13]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.38.0 +[1.37.0/0.59.0/0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.37.0 +[0.12.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.2 +[0.12.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.1 +[1.36.0/0.58.0/0.12.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.36.0 +[1.35.0/0.57.0/0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.35.0 [1.34.0/0.56.0/0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.34.0 [1.33.0/0.55.0/0.9.0/0.0.12]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.33.0 [1.32.0/0.54.0/0.8.0/0.0.11]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.32.0 @@ -3288,6 +3527,8 @@ It contains api and sdk for trace and meter. +[Go 1.25]: https://go.dev/doc/go1.25 +[Go 1.24]: https://go.dev/doc/go1.24 [Go 1.23]: https://go.dev/doc/go1.23 [Go 1.22]: https://go.dev/doc/go1.22 [Go 1.21]: https://go.dev/doc/go1.21 diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS index 945a07d2b..26a03aed1 100644 --- a/vendor/go.opentelemetry.io/otel/CODEOWNERS +++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -12,6 +12,6 @@ # https://help.github.com/en/articles/about-code-owners # -* @MrAlias @XSAM @dashpole @pellared @dmathieu +* @MrAlias @XSAM @dashpole @pellared @dmathieu @flc1125 CODEOWNERS @MrAlias @pellared @dashpole @XSAM @dmathieu diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index 22a2e9dbd..0b3ae855c 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -109,10 +109,9 @@ A PR is considered **ready to merge** when: This is not enforced through automation, but needs to be validated by the maintainer merging. - * The qualified approvals need to be from [Approver]s/[Maintainer]s - affiliated with different companies. Two qualified approvals from - [Approver]s or [Maintainer]s affiliated with the same company counts as a - single qualified approval. + * At least one of the qualified approvals need to be from an + [Approver]/[Maintainer] affiliated with a different company than the author + of the PR. * PRs introducing changes that have already been discussed and consensus reached only need one qualified approval. The discussion and resolution needs to be linked to the PR. @@ -181,6 +180,47 @@ patterns in the spec. For a deeper discussion, see [this](https://github.com/open-telemetry/opentelemetry-specification/issues/165). +## Tests + +Each functionality should be covered by tests. + +Performance-critical functionality should also be covered by benchmarks. + +- Pull requests adding a performance-critical functionality +should have `go test -bench` output in their description. +- Pull requests changing a performance-critical functionality +should have [`benchstat`](https://pkg.go.dev/golang.org/x/perf/cmd/benchstat) +output in their description. + +## Dependencies + +This project uses [Go Modules] for dependency management. All modules will use +`go.mod` to explicitly list all direct and indirect dependencies, ensuring a +clear dependency graph. The `go.sum` file for each module will be committed to +the repository and used to verify the integrity of downloaded modules, +preventing malicious tampering. + +This project uses automated dependency update tools (i.e. dependabot, +renovatebot) to manage updates to dependencies. This ensures that dependencies +are kept up-to-date with the latest security patches and features and are +reviewed before being merged. If you would like to propose a change to a +dependency it should be done through a pull request that updates the `go.mod` +file and includes a description of the change. + +See the [versioning and compatibility](./VERSIONING.md) policy for more details +about dependency compatibility. + +[Go Modules]: https://pkg.go.dev/cmd/go#hdr-Modules__module_versions__and_more + +### Environment Dependencies + +This project does not partition dependencies based on the environment (i.e. +`development`, `staging`, `production`). + +Only the dependencies explicitly included in the released modules have be +tested and verified to work with the released code. No other guarantee is made +about the compatibility of other dependencies. + ## Documentation Each (non-internal, non-test) package must be documented using @@ -222,6 +262,10 @@ For a non-comprehensive but foundational overview of these best practices the [Effective Go](https://golang.org/doc/effective_go.html) documentation is an excellent starting place. +We also recommend following the +[Go Code Review Comments](https://go.dev/wiki/CodeReviewComments) +that collects common comments made during reviews of Go code. + As a convenience for developers building this project the `make precommit` will format, lint, validate, and in some cases fix the changes you plan to submit. This check will need to pass for your changes to be able to be @@ -575,6 +619,10 @@ See also: ### Testing +We allow using [`testify`](https://github.com/stretchr/testify) even though +it is seen as non-idiomatic according to +the [Go Test Comments](https://go.dev/wiki/TestComments#assert-libraries) page. + The tests should never leak goroutines. Use the term `ConcurrentSafe` in the test name when it aims to verify the @@ -629,19 +677,28 @@ should be canceled. ## Approvers and Maintainers -### Triagers +### Maintainers -- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent +- [Damien Mathieu](https://github.com/dmathieu), Elastic ([GPG](https://keys.openpgp.org/search?q=5A126B972A81A6CE443E5E1B408B8E44F0873832)) +- [David Ashpole](https://github.com/dashpole), Google ([GPG](https://keys.openpgp.org/search?q=C0D1BDDCAAEAE573673085F176327DA4D864DC70)) +- [Robert Pająk](https://github.com/pellared), Splunk ([GPG](https://keys.openpgp.org/search?q=CDAD3A60476A3DE599AA5092E5F7C35A4DBE90C2)) +- [Sam Xie](https://github.com/XSAM), Splunk ([GPG](https://keys.openpgp.org/search?q=AEA033782371ABB18EE39188B8044925D6FEEBEA)) +- [Tyler Yahn](https://github.com/MrAlias), Splunk ([GPG](https://keys.openpgp.org/search?q=0x46B0F3E1A8B1BA5A)) + +For more information about the maintainer role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#maintainer). ### Approvers -### Maintainers +- [Flc](https://github.com/flc1125), Independent -- [Damien Mathieu](https://github.com/dmathieu), Elastic -- [David Ashpole](https://github.com/dashpole), Google -- [Robert Pająk](https://github.com/pellared), Splunk -- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics -- [Tyler Yahn](https://github.com/MrAlias), Splunk +For more information about the approver role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#approver). + +### Triagers + +- [Alex Kats](https://github.com/akats7), Capital One +- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent + +For more information about the triager role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#triager). ### Emeritus @@ -653,6 +710,8 @@ should be canceled. - [Josh MacDonald](https://github.com/jmacd) - [Liz Fong-Jones](https://github.com/lizthegrey) +For more information about the emeritus role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#emeritus-maintainerapprovertriager). + ### Become an Approver or a Maintainer See the [community membership document in OpenTelemetry community diff --git a/vendor/go.opentelemetry.io/otel/LICENSE b/vendor/go.opentelemetry.io/otel/LICENSE index 261eeb9e9..f1aee0f11 100644 --- a/vendor/go.opentelemetry.io/otel/LICENSE +++ b/vendor/go.opentelemetry.io/otel/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index a7f6d8cc6..bc0f1f92d 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -11,6 +11,10 @@ ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} GO = go TIMEOUT = 60 +# User to run as in docker images. +DOCKER_USER=$(shell id -u):$(shell id -g) +DEPENDENCIES_DOCKERFILE=./dependencies.Dockerfile + .DEFAULT_GOAL := precommit .PHONY: precommit ci @@ -30,17 +34,17 @@ $(TOOLS)/%: $(TOOLS_MOD_DIR)/go.mod | $(TOOLS) MULTIMOD = $(TOOLS)/multimod $(TOOLS)/multimod: PACKAGE=go.opentelemetry.io/build-tools/multimod -SEMCONVGEN = $(TOOLS)/semconvgen -$(TOOLS)/semconvgen: PACKAGE=go.opentelemetry.io/build-tools/semconvgen - CROSSLINK = $(TOOLS)/crosslink $(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink SEMCONVKIT = $(TOOLS)/semconvkit $(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit +VERIFYREADMES = $(TOOLS)/verifyreadmes +$(TOOLS)/verifyreadmes: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/verifyreadmes + GOLANGCI_LINT = $(TOOLS)/golangci-lint -$(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint +$(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/v2/cmd/golangci-lint MISSPELL = $(TOOLS)/misspell $(TOOLS)/misspell: PACKAGE=github.com/client9/misspell/cmd/misspell @@ -64,7 +68,7 @@ GOVULNCHECK = $(TOOLS)/govulncheck $(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck .PHONY: tools -tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) +tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(VERIFYREADMES) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) # Virtualized python tools via docker @@ -81,20 +85,20 @@ PIP := $(PYTOOLS)/pip WORKDIR := /workdir # The python image to use for the virtual environment. -PYTHONIMAGE := python:3.11.3-slim-bullseye +PYTHONIMAGE := $(shell awk '$$4=="python" {print $$2}' $(DEPENDENCIES_DOCKERFILE)) # Run the python image with the current directory mounted. -DOCKERPY := docker run --rm -v "$(CURDIR):$(WORKDIR)" -w $(WORKDIR) $(PYTHONIMAGE) +DOCKERPY := docker run --rm -u $(DOCKER_USER) -v "$(CURDIR):$(WORKDIR)" -w $(WORKDIR) $(PYTHONIMAGE) # Create a virtual environment for Python tools. $(PYTOOLS): # The `--upgrade` flag is needed to ensure that the virtual environment is # created with the latest pip version. - @$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade pip" + @$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade --cache-dir=$(WORKDIR)/.cache/pip pip" # Install python packages into the virtual environment. $(PYTOOLS)/%: $(PYTOOLS) - @$(DOCKERPY) $(PIP) install -r requirements.txt + @$(DOCKERPY) $(PIP) install --cache-dir=$(WORKDIR)/.cache/pip -r requirements.txt CODESPELL = $(PYTOOLS)/codespell $(CODESPELL): PACKAGE=codespell @@ -119,7 +123,7 @@ vanity-import-fix: $(PORTO) # Generate go.work file for local development. .PHONY: go-work go-work: $(CROSSLINK) - $(CROSSLINK) work --root=$(shell pwd) + $(CROSSLINK) work --root=$(shell pwd) --go=1.22.7 # Build @@ -209,11 +213,8 @@ go-mod-tidy/%: crosslink && cd $(DIR) \ && $(GO) mod tidy -compat=1.21 -.PHONY: lint-modules -lint-modules: go-mod-tidy - .PHONY: lint -lint: misspell lint-modules golangci-lint govulncheck +lint: misspell go-mod-tidy golangci-lint govulncheck .PHONY: vanity-import-check vanity-import-check: $(PORTO) @@ -265,14 +266,31 @@ check-clean-work-tree: exit 1; \ fi +# The weaver docker image to use for semconv-generate. +WEAVER_IMAGE := $(shell awk '$$4=="weaver" {print $$2}' $(DEPENDENCIES_DOCKERFILE)) + SEMCONVPKG ?= "semconv/" .PHONY: semconv-generate -semconv-generate: $(SEMCONVGEN) $(SEMCONVKIT) +semconv-generate: $(SEMCONVKIT) [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 ) - [ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 ) - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -z "$(SEMCONVPKG)/capitalizations.txt" -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=metric -f metric.go -t "$(SEMCONVPKG)/metric_template.j2" -s "$(TAG)" - $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" + # Ensure the target directory for source code is available. + mkdir -p $(PWD)/$(SEMCONVPKG)/${TAG} + # Note: We mount a home directory for downloading/storing the semconv repository. + # Weaver will automatically clean the cache when finished, but the directories will remain. + mkdir -p ~/.weaver + docker run --rm \ + -u $(DOCKER_USER) \ + --env HOME=/tmp/weaver \ + --mount 'type=bind,source=$(PWD)/semconv/templates,target=/home/weaver/templates,readonly' \ + --mount 'type=bind,source=$(PWD)/semconv/${TAG},target=/home/weaver/target' \ + --mount 'type=bind,source=$(HOME)/.weaver,target=/tmp/weaver/.weaver' \ + $(WEAVER_IMAGE) registry generate \ + --registry=https://github.com/open-telemetry/semantic-conventions/archive/refs/tags/$(TAG).zip[model] \ + --templates=/home/weaver/templates \ + --param tag=$(TAG) \ + go \ + /home/weaver/target + $(SEMCONVKIT) -semconv "$(SEMCONVPKG)" -tag "$(TAG)" .PHONY: gorelease gorelease: $(OTEL_GO_MOD_DIRS:%=gorelease/%) @@ -298,10 +316,11 @@ add-tags: verify-mods @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} +MARKDOWNIMAGE := $(shell awk '$$4=="markdown" {print $$2}' $(DEPENDENCIES_DOCKERFILE)) .PHONY: lint-markdown lint-markdown: - docker run -v "$(CURDIR):$(WORKDIR)" avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md + docker run --rm -u $(DOCKER_USER) -v "$(CURDIR):$(WORKDIR)" $(MARKDOWNIMAGE) -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md .PHONY: verify-readmes -verify-readmes: - ./verify_readmes.sh +verify-readmes: $(VERIFYREADMES) + $(VERIFYREADMES) diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index d9a192076..6b7ab5f21 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -4,6 +4,10 @@ [![codecov.io](https://codecov.io/gh/open-telemetry/opentelemetry-go/coverage.svg?branch=main)](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main) [![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel)](https://pkg.go.dev/go.opentelemetry.io/otel) [![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel) +[![OpenSSF Scorecard](https://api.scorecard.dev/projects/github.com/open-telemetry/opentelemetry-go/badge)](https://scorecard.dev/viewer/?uri=github.com/open-telemetry/opentelemetry-go) +[![OpenSSF Best Practices](https://www.bestpractices.dev/projects/9996/badge)](https://www.bestpractices.dev/projects/9996) +[![Fuzzing Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/opentelemetry-go.svg)](https://issues.oss-fuzz.com/issues?q=project:opentelemetry-go) +[![FOSSA Status](https://app.fossa.com/api/projects/custom%2B162%2Fgithub.com%2Fopen-telemetry%2Fopentelemetry-go.svg?type=shield&issueType=license)](https://app.fossa.com/projects/custom%2B162%2Fgithub.com%2Fopen-telemetry%2Fopentelemetry-go?ref=badge_shield&issueType=license) [![Slack](https://img.shields.io/badge/slack-@cncf/otel--go-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01NPAXACKT) OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/). @@ -49,20 +53,27 @@ Currently, this project supports the following environments. | OS | Go Version | Architecture | |----------|------------|--------------| +| Ubuntu | 1.25 | amd64 | +| Ubuntu | 1.24 | amd64 | | Ubuntu | 1.23 | amd64 | -| Ubuntu | 1.22 | amd64 | +| Ubuntu | 1.25 | 386 | +| Ubuntu | 1.24 | 386 | | Ubuntu | 1.23 | 386 | -| Ubuntu | 1.22 | 386 | -| Linux | 1.23 | arm64 | -| Linux | 1.22 | arm64 | +| Ubuntu | 1.25 | arm64 | +| Ubuntu | 1.24 | arm64 | +| Ubuntu | 1.23 | arm64 | +| macOS 13 | 1.25 | amd64 | +| macOS 13 | 1.24 | amd64 | | macOS 13 | 1.23 | amd64 | -| macOS 13 | 1.22 | amd64 | +| macOS | 1.25 | arm64 | +| macOS | 1.24 | arm64 | | macOS | 1.23 | arm64 | -| macOS | 1.22 | arm64 | +| Windows | 1.25 | amd64 | +| Windows | 1.24 | amd64 | | Windows | 1.23 | amd64 | -| Windows | 1.22 | amd64 | +| Windows | 1.25 | 386 | +| Windows | 1.24 | 386 | | Windows | 1.23 | 386 | -| Windows | 1.22 | 386 | While this project should work for other systems, no compatibility guarantees are made for those systems currently. diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index 4ebef4f9d..1ddcdef03 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -1,21 +1,22 @@ # Release Process +## Create a `Version Release` issue + +Create a `Version Release` issue to track the release process. + ## Semantic Convention Generation New versions of the [OpenTelemetry Semantic Conventions] mean new versions of the `semconv` package need to be generated. The `semconv-generate` make target is used for this. -1. Checkout a local copy of the [OpenTelemetry Semantic Conventions] to the desired release tag. -2. Pull the latest `otel/semconvgen` image: `docker pull otel/semconvgen:latest` -3. Run the `make semconv-generate ...` target from this repository. +1. Set the `TAG` environment variable to the semantic convention tag you want to generate. +2. Run the `make semconv-generate ...` target from this repository. For example, ```sh -export TAG="v1.21.0" # Change to the release version you are generating. -export OTEL_SEMCONV_REPO="/absolute/path/to/opentelemetry/semantic-conventions" -docker pull otel/semconvgen:latest -make semconv-generate # Uses the exported TAG and OTEL_SEMCONV_REPO. +export TAG="v1.30.0" # Change to the release version you are generating. +make semconv-generate # Uses the exported TAG. ``` This should create a new sub-package of [`semconv`](./semconv). @@ -111,6 +112,29 @@ It is critical you make sure the version you push upstream is correct. Finally create a Release for the new `` on GitHub. The release body should include all the release notes from the Changelog for this release. +### Sign the Release Artifact + +To ensure we comply with CNCF best practices, we need to sign the release artifact. +The tarball attached to the GitHub release needs to be signed with your GPG key. + +Follow [these steps] to sign the release artifact and upload it to GitHub. +You can use [this script] to verify the contents of the tarball before signing it. + +Be sure to use the correct GPG key when signing the release artifact. + +```terminal +gpg --local-user --armor --detach-sign opentelemetry-go-.tar.gz +``` + +You can verify the signature with: + +```terminal +gpg --verify opentelemetry-go-.tar.gz.asc opentelemetry-go-.tar.gz +``` + +[these steps]: https://wiki.debian.org/Creating%20signed%20GitHub%20releases +[this script]: https://github.com/MrAlias/attest-sh + ## Post-Release ### Contrib Repository @@ -126,6 +150,16 @@ Importantly, bump any package versions referenced to be the latest one you just [Go instrumentation documentation]: https://opentelemetry.io/docs/languages/go/ [content/en/docs/languages/go]: https://github.com/open-telemetry/opentelemetry.io/tree/main/content/en/docs/languages/go +### Close the milestone + +Once a release is made, ensure all issues that were fixed and PRs that were merged as part of this release are added to the corresponding milestone. +This helps track what changes were included in each release. + +- To find issues that haven't been included in a milestone, use this [GitHub search query](https://github.com/open-telemetry/opentelemetry-go/issues?q=is%3Aissue%20no%3Amilestone%20is%3Aclosed%20sort%3Aupdated-desc%20reason%3Acompleted%20-label%3AStale%20linked%3Apr) +- To find merged PRs that haven't been included in a milestone, use this [GitHub search query](https://github.com/open-telemetry/opentelemetry-go/pulls?q=is%3Apr+no%3Amilestone+is%3Amerged). + +Once all related issues and PRs have been added to the milestone, close the milestone. + ### Demo Repository Bump the dependencies in the following Go services: @@ -133,3 +167,7 @@ Bump the dependencies in the following Go services: - [`accounting`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accounting) - [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkout) - [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/product-catalog) + +### Close the `Version Release` issue + +Once the todo list in the `Version Release` issue is complete, close the issue. diff --git a/vendor/go.opentelemetry.io/otel/SECURITY-INSIGHTS.yml b/vendor/go.opentelemetry.io/otel/SECURITY-INSIGHTS.yml new file mode 100644 index 000000000..8041fc62e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/SECURITY-INSIGHTS.yml @@ -0,0 +1,203 @@ +header: + schema-version: "1.0.0" + expiration-date: "2026-08-04T00:00:00.000Z" + last-updated: "2025-08-04" + last-reviewed: "2025-08-04" + commit-hash: 69e81088ad40f45a0764597326722dea8f3f00a8 + project-url: https://github.com/open-telemetry/opentelemetry-go + project-release: "v1.37.0" + changelog: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CHANGELOG.md + license: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/LICENSE + +project-lifecycle: + status: active + bug-fixes-only: false + core-maintainers: + - https://github.com/dmathieu + - https://github.com/dashpole + - https://github.com/pellared + - https://github.com/XSAM + - https://github.com/MrAlias + release-process: | + See https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/RELEASING.md + +contribution-policy: + accepts-pull-requests: true + accepts-automated-pull-requests: true + automated-tools-list: + - automated-tool: dependabot + action: allowed + comment: Automated dependency updates are accepted. + - automated-tool: renovatebot + action: allowed + comment: Automated dependency updates are accepted. + - automated-tool: opentelemetrybot + action: allowed + comment: Automated OpenTelemetry actions are accepted. + contributing-policy: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CONTRIBUTING.md + code-of-conduct: https://github.com/open-telemetry/.github/blob/ffa15f76b65ec7bcc41f6a0b277edbb74f832206/CODE_OF_CONDUCT.md + +documentation: + - https://pkg.go.dev/go.opentelemetry.io/otel + - https://opentelemetry.io/docs/instrumentation/go/ + +distribution-points: + - pkg:golang/go.opentelemetry.io/otel + - pkg:golang/go.opentelemetry.io/otel/bridge/opencensus + - pkg:golang/go.opentelemetry.io/otel/bridge/opencensus/test + - pkg:golang/go.opentelemetry.io/otel/bridge/opentracing + - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc + - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp + - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlptrace + - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc + - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp + - pkg:golang/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric + - pkg:golang/go.opentelemetry.io/otel/exporters/stdout/stdouttrace + - pkg:golang/go.opentelemetry.io/otel/exporters/zipkin + - pkg:golang/go.opentelemetry.io/otel/metric + - pkg:golang/go.opentelemetry.io/otel/sdk + - pkg:golang/go.opentelemetry.io/otel/sdk/metric + - pkg:golang/go.opentelemetry.io/otel/trace + - pkg:golang/go.opentelemetry.io/otel/exporters/prometheus + - pkg:golang/go.opentelemetry.io/otel/log + - pkg:golang/go.opentelemetry.io/otel/log/logtest + - pkg:golang/go.opentelemetry.io/otel/sdk/log + - pkg:golang/go.opentelemetry.io/otel/sdk/log/logtest + - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc + - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp + - pkg:golang/go.opentelemetry.io/otel/exporters/stdout/stdoutlog + - pkg:golang/go.opentelemetry.io/otel/schema + +security-artifacts: + threat-model: + threat-model-created: false + comment: | + No formal threat model created yet. + self-assessment: + self-assessment-created: false + comment: | + No formal self-assessment yet. + +security-testing: + - tool-type: sca + tool-name: Dependabot + tool-version: latest + tool-url: https://github.com/dependabot + tool-rulesets: + - built-in + integration: + ad-hoc: false + ci: true + before-release: true + comment: | + Automated dependency updates. + - tool-type: sast + tool-name: golangci-lint + tool-version: latest + tool-url: https://github.com/golangci/golangci-lint + tool-rulesets: + - built-in + integration: + ad-hoc: false + ci: true + before-release: true + comment: | + Static analysis in CI. + - tool-type: fuzzing + tool-name: OSS-Fuzz + tool-version: latest + tool-url: https://github.com/google/oss-fuzz + tool-rulesets: + - default + integration: + ad-hoc: false + ci: false + before-release: false + comment: | + OpenTelemetry Go is integrated with OSS-Fuzz for continuous fuzz testing. See https://github.com/google/oss-fuzz/tree/f0f9b221190c6063a773bea606d192ebfc3d00cf/projects/opentelemetry-go for more details. + - tool-type: sast + tool-name: CodeQL + tool-version: latest + tool-url: https://github.com/github/codeql + tool-rulesets: + - default + integration: + ad-hoc: false + ci: true + before-release: true + comment: | + CodeQL static analysis is run in CI for all commits and pull requests to detect security vulnerabilities in the Go source code. See https://github.com/open-telemetry/opentelemetry-go/blob/d5b5b059849720144a03ca5c87561bfbdb940119/.github/workflows/codeql-analysis.yml for workflow details. + - tool-type: sca + tool-name: govulncheck + tool-version: latest + tool-url: https://pkg.go.dev/golang.org/x/vuln/cmd/govulncheck + tool-rulesets: + - default + integration: + ad-hoc: false + ci: true + before-release: true + comment: | + govulncheck is run in CI to detect known vulnerabilities in Go modules and code paths. See https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/.github/workflows/ci.yml for workflow configuration. + +security-assessments: + - auditor-name: 7ASecurity + auditor-url: https://7asecurity.com + auditor-report: https://7asecurity.com/reports/pentest-report-opentelemetry.pdf + report-year: 2023 + comment: | + This independent penetration test by 7ASecurity covered OpenTelemetry repositories including opentelemetry-go. The assessment focused on codebase review, threat modeling, and vulnerability identification. See the report for details of findings and recommendations applicable to opentelemetry-go. No critical vulnerabilities were found for this repository. + +security-contacts: + - type: email + value: cncf-opentelemetry-security@lists.cncf.io + primary: true + - type: website + value: https://github.com/open-telemetry/opentelemetry-go/security/policy + primary: false + +vulnerability-reporting: + accepts-vulnerability-reports: true + email-contact: cncf-opentelemetry-security@lists.cncf.io + security-policy: https://github.com/open-telemetry/opentelemetry-go/security/policy + comment: | + Security issues should be reported via email or GitHub security policy page. + +dependencies: + third-party-packages: true + dependencies-lists: + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/bridge/opencensus/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/bridge/opencensus/test/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/bridge/opentracing/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlplog/otlploggrpc/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlplog/otlploghttp/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlpmetric/otlpmetrichttp/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlptrace/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlptrace/otlptracegrpc/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlptrace/otlptracehttp/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/prometheus/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/stdout/stdoutlog/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/stdout/stdoutmetric/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/stdout/stdouttrace/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/zipkin/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/internal/tools/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/log/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/log/logtest/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/metric/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/schema/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/log/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/log/logtest/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/metric/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/trace/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/trace/internal/telemetry/test/go.mod + dependencies-lifecycle: + policy-url: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CONTRIBUTING.md + comment: | + Dependency lifecycle managed via go.mod and renovatebot. + env-dependencies-policy: + policy-url: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CONTRIBUTING.md + comment: | + See contributing policy for environment usage. diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go index 318e42fca..6333d34b3 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/encoder.go +++ b/vendor/go.opentelemetry.io/otel/attribute/encoder.go @@ -78,7 +78,7 @@ func DefaultEncoder() Encoder { defaultEncoderOnce.Do(func() { defaultEncoderInstance = &defaultAttrEncoder{ pool: sync.Pool{ - New: func() interface{} { + New: func() any { return &bytes.Buffer{} }, }, @@ -96,11 +96,11 @@ func (d *defaultAttrEncoder) Encode(iter Iterator) string { for iter.Next() { i, keyValue := iter.IndexedAttribute() if i > 0 { - _, _ = buf.WriteRune(',') + _ = buf.WriteByte(',') } copyAndEscape(buf, string(keyValue.Key)) - _, _ = buf.WriteRune('=') + _ = buf.WriteByte('=') if keyValue.Value.Type() == STRING { copyAndEscape(buf, keyValue.Value.AsString()) @@ -122,14 +122,14 @@ func copyAndEscape(buf *bytes.Buffer, val string) { for _, ch := range val { switch ch { case '=', ',', escapeChar: - _, _ = buf.WriteRune(escapeChar) + _ = buf.WriteByte(escapeChar) } _, _ = buf.WriteRune(ch) } } -// Valid returns true if this encoder ID was allocated by -// `NewEncoderID`. Invalid encoder IDs will not be cached. +// Valid reports whether this encoder ID was allocated by +// [NewEncoderID]. Invalid encoder IDs will not be cached. func (id EncoderID) Valid() bool { return id.value != 0 } diff --git a/vendor/go.opentelemetry.io/otel/attribute/filter.go b/vendor/go.opentelemetry.io/otel/attribute/filter.go index be9cd922d..624ebbe38 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/filter.go +++ b/vendor/go.opentelemetry.io/otel/attribute/filter.go @@ -15,11 +15,11 @@ type Filter func(KeyValue) bool // // If keys is empty a deny-all filter is returned. func NewAllowKeysFilter(keys ...Key) Filter { - if len(keys) <= 0 { - return func(kv KeyValue) bool { return false } + if len(keys) == 0 { + return func(KeyValue) bool { return false } } - allowed := make(map[Key]struct{}) + allowed := make(map[Key]struct{}, len(keys)) for _, k := range keys { allowed[k] = struct{}{} } @@ -34,11 +34,11 @@ func NewAllowKeysFilter(keys ...Key) Filter { // // If keys is empty an allow-all filter is returned. func NewDenyKeysFilter(keys ...Key) Filter { - if len(keys) <= 0 { - return func(kv KeyValue) bool { return true } + if len(keys) == 0 { + return func(KeyValue) bool { return true } } - forbid := make(map[Key]struct{}) + forbid := make(map[Key]struct{}, len(keys)) for _, k := range keys { forbid[k] = struct{}{} } diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go similarity index 84% rename from vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go rename to vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go index 691d96c75..087550430 100644 --- a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go +++ b/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go @@ -5,14 +5,14 @@ Package attribute provide several helper functions for some commonly used logic of processing attributes. */ -package attribute // import "go.opentelemetry.io/otel/internal/attribute" +package attribute // import "go.opentelemetry.io/otel/attribute/internal" import ( "reflect" ) // BoolSliceValue converts a bool slice into an array with same elements as slice. -func BoolSliceValue(v []bool) interface{} { +func BoolSliceValue(v []bool) any { var zero bool cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() reflect.Copy(cp, reflect.ValueOf(v)) @@ -20,7 +20,7 @@ func BoolSliceValue(v []bool) interface{} { } // Int64SliceValue converts an int64 slice into an array with same elements as slice. -func Int64SliceValue(v []int64) interface{} { +func Int64SliceValue(v []int64) any { var zero int64 cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() reflect.Copy(cp, reflect.ValueOf(v)) @@ -28,7 +28,7 @@ func Int64SliceValue(v []int64) interface{} { } // Float64SliceValue converts a float64 slice into an array with same elements as slice. -func Float64SliceValue(v []float64) interface{} { +func Float64SliceValue(v []float64) any { var zero float64 cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() reflect.Copy(cp, reflect.ValueOf(v)) @@ -36,7 +36,7 @@ func Float64SliceValue(v []float64) interface{} { } // StringSliceValue converts a string slice into an array with same elements as slice. -func StringSliceValue(v []string) interface{} { +func StringSliceValue(v []string) any { var zero string cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() reflect.Copy(cp, reflect.ValueOf(v)) @@ -44,7 +44,7 @@ func StringSliceValue(v []string) interface{} { } // AsBoolSlice converts a bool array into a slice into with same elements as array. -func AsBoolSlice(v interface{}) []bool { +func AsBoolSlice(v any) []bool { rv := reflect.ValueOf(v) if rv.Type().Kind() != reflect.Array { return nil @@ -57,7 +57,7 @@ func AsBoolSlice(v interface{}) []bool { } // AsInt64Slice converts an int64 array into a slice into with same elements as array. -func AsInt64Slice(v interface{}) []int64 { +func AsInt64Slice(v any) []int64 { rv := reflect.ValueOf(v) if rv.Type().Kind() != reflect.Array { return nil @@ -70,7 +70,7 @@ func AsInt64Slice(v interface{}) []int64 { } // AsFloat64Slice converts a float64 array into a slice into with same elements as array. -func AsFloat64Slice(v interface{}) []float64 { +func AsFloat64Slice(v any) []float64 { rv := reflect.ValueOf(v) if rv.Type().Kind() != reflect.Array { return nil @@ -83,7 +83,7 @@ func AsFloat64Slice(v interface{}) []float64 { } // AsStringSlice converts a string array into a slice into with same elements as array. -func AsStringSlice(v interface{}) []string { +func AsStringSlice(v any) []string { rv := reflect.ValueOf(v) if rv.Type().Kind() != reflect.Array { return nil diff --git a/vendor/go.opentelemetry.io/otel/attribute/iterator.go b/vendor/go.opentelemetry.io/otel/attribute/iterator.go index f2ba89ce4..8df6249f0 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/iterator.go +++ b/vendor/go.opentelemetry.io/otel/attribute/iterator.go @@ -25,8 +25,8 @@ type oneIterator struct { attr KeyValue } -// Next moves the iterator to the next position. Returns false if there are no -// more attributes. +// Next moves the iterator to the next position. +// Next reports whether there are more attributes. func (i *Iterator) Next() bool { i.idx++ return i.idx < i.Len() @@ -106,7 +106,8 @@ func (oi *oneIterator) advance() { } } -// Next returns true if there is another attribute available. +// Next moves the iterator to the next position. +// Next reports whether there is another attribute available. func (m *MergeIterator) Next() bool { if m.one.done && m.two.done { return false diff --git a/vendor/go.opentelemetry.io/otel/attribute/key.go b/vendor/go.opentelemetry.io/otel/attribute/key.go index d9a22c650..80a9e5643 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/key.go +++ b/vendor/go.opentelemetry.io/otel/attribute/key.go @@ -117,7 +117,7 @@ func (k Key) StringSlice(v []string) KeyValue { } } -// Defined returns true for non-empty keys. +// Defined reports whether the key is not empty. func (k Key) Defined() bool { return len(k) != 0 } diff --git a/vendor/go.opentelemetry.io/otel/attribute/kv.go b/vendor/go.opentelemetry.io/otel/attribute/kv.go index 3028f9a40..8c6928ca7 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/kv.go +++ b/vendor/go.opentelemetry.io/otel/attribute/kv.go @@ -13,7 +13,7 @@ type KeyValue struct { Value Value } -// Valid returns if kv is a valid OpenTelemetry attribute. +// Valid reports whether kv is a valid OpenTelemetry attribute. func (kv KeyValue) Valid() bool { return kv.Key.Defined() && kv.Value.Type() != INVALID } diff --git a/vendor/go.opentelemetry.io/otel/attribute/rawhelpers.go b/vendor/go.opentelemetry.io/otel/attribute/rawhelpers.go new file mode 100644 index 000000000..5791c6e7a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/rawhelpers.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "math" +) + +func boolToRaw(b bool) uint64 { // nolint:revive // b is not a control flag. + if b { + return 1 + } + return 0 +} + +func rawToBool(r uint64) bool { + return r != 0 +} + +func int64ToRaw(i int64) uint64 { + // Assumes original was a valid int64 (overflow not checked). + return uint64(i) // nolint: gosec +} + +func rawToInt64(r uint64) int64 { + // Assumes original was a valid int64 (overflow not checked). + return int64(r) // nolint: gosec +} + +func float64ToRaw(f float64) uint64 { + return math.Float64bits(f) +} + +func rawToFloat64(r uint64) float64 { + return math.Float64frombits(r) +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go index 6cbefcead..64735d382 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/set.go +++ b/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -31,11 +31,11 @@ type ( // Distinct is a unique identifier of a Set. // - // Distinct is designed to be ensures equivalence stability: comparisons - // will return the save value across versions. For this reason, Distinct - // should always be used as a map key instead of a Set. + // Distinct is designed to ensure equivalence stability: comparisons will + // return the same value across versions. For this reason, Distinct should + // always be used as a map key instead of a Set. Distinct struct { - iface interface{} + iface any } // Sortable implements sort.Interface, used for sorting KeyValue. @@ -70,7 +70,7 @@ func (d Distinct) reflectValue() reflect.Value { return reflect.ValueOf(d.iface) } -// Valid returns true if this value refers to a valid Set. +// Valid reports whether this value refers to a valid Set. func (d Distinct) Valid() bool { return d.iface != nil } @@ -120,7 +120,7 @@ func (l *Set) Value(k Key) (Value, bool) { return Value{}, false } -// HasValue tests whether a key is defined in this set. +// HasValue reports whether a key is defined in this set. func (l *Set) HasValue(k Key) bool { if l == nil { return false @@ -155,7 +155,7 @@ func (l *Set) Equivalent() Distinct { return l.equivalent } -// Equals returns true if the argument set is equivalent to this set. +// Equals reports whether the argument set is equivalent to this set. func (l *Set) Equals(o *Set) bool { return l.Equivalent() == o.Equivalent() } @@ -344,7 +344,7 @@ func computeDistinct(kvs []KeyValue) Distinct { // computeDistinctFixed computes a Distinct for small slices. It returns nil // if the input is too large for this code path. -func computeDistinctFixed(kvs []KeyValue) interface{} { +func computeDistinctFixed(kvs []KeyValue) any { switch len(kvs) { case 1: return [1]KeyValue(kvs) @@ -373,7 +373,7 @@ func computeDistinctFixed(kvs []KeyValue) interface{} { // computeDistinctReflect computes a Distinct using reflection, works for any // size input. -func computeDistinctReflect(kvs []KeyValue) interface{} { +func computeDistinctReflect(kvs []KeyValue) any { at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem() for i, keyValue := range kvs { *(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue @@ -387,7 +387,7 @@ func (l *Set) MarshalJSON() ([]byte, error) { } // MarshalLog is the marshaling function used by the logging system to represent this Set. -func (l Set) MarshalLog() interface{} { +func (l Set) MarshalLog() any { kvs := make(map[string]string) for _, kv := range l.ToSlice() { kvs[string(kv.Key)] = kv.Value.Emit() diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go index 9ea0ecbbd..653c33a86 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/value.go +++ b/vendor/go.opentelemetry.io/otel/attribute/value.go @@ -9,8 +9,7 @@ import ( "reflect" "strconv" - "go.opentelemetry.io/otel/internal" - "go.opentelemetry.io/otel/internal/attribute" + attribute "go.opentelemetry.io/otel/attribute/internal" ) //go:generate stringer -type=Type @@ -23,7 +22,7 @@ type Value struct { vtype Type numeric uint64 stringly string - slice interface{} + slice any } const ( @@ -51,7 +50,7 @@ const ( func BoolValue(v bool) Value { return Value{ vtype: BOOL, - numeric: internal.BoolToRaw(v), + numeric: boolToRaw(v), } } @@ -82,7 +81,7 @@ func IntSliceValue(v []int) Value { func Int64Value(v int64) Value { return Value{ vtype: INT64, - numeric: internal.Int64ToRaw(v), + numeric: int64ToRaw(v), } } @@ -95,7 +94,7 @@ func Int64SliceValue(v []int64) Value { func Float64Value(v float64) Value { return Value{ vtype: FLOAT64, - numeric: internal.Float64ToRaw(v), + numeric: float64ToRaw(v), } } @@ -125,7 +124,7 @@ func (v Value) Type() Type { // AsBool returns the bool value. Make sure that the Value's type is // BOOL. func (v Value) AsBool() bool { - return internal.RawToBool(v.numeric) + return rawToBool(v.numeric) } // AsBoolSlice returns the []bool value. Make sure that the Value's type is @@ -144,7 +143,7 @@ func (v Value) asBoolSlice() []bool { // AsInt64 returns the int64 value. Make sure that the Value's type is // INT64. func (v Value) AsInt64() int64 { - return internal.RawToInt64(v.numeric) + return rawToInt64(v.numeric) } // AsInt64Slice returns the []int64 value. Make sure that the Value's type is @@ -163,7 +162,7 @@ func (v Value) asInt64Slice() []int64 { // AsFloat64 returns the float64 value. Make sure that the Value's // type is FLOAT64. func (v Value) AsFloat64() float64 { - return internal.RawToFloat64(v.numeric) + return rawToFloat64(v.numeric) } // AsFloat64Slice returns the []float64 value. Make sure that the Value's type is @@ -200,8 +199,8 @@ func (v Value) asStringSlice() []string { type unknownValueType struct{} -// AsInterface returns Value's data as interface{}. -func (v Value) AsInterface() interface{} { +// AsInterface returns Value's data as any. +func (v Value) AsInterface() any { switch v.Type() { case BOOL: return v.AsBool() @@ -263,7 +262,7 @@ func (v Value) Emit() string { func (v Value) MarshalJSON() ([]byte, error) { var jsonVal struct { Type string - Value interface{} + Value any } jsonVal.Type = v.Type().String() jsonVal.Value = v.AsInterface() diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index 0e1fe2422..f83a448ec 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -812,7 +812,7 @@ var safeKeyCharset = [utf8.RuneSelf]bool{ // validateBaggageName checks if the string is a valid OpenTelemetry Baggage name. // Baggage name is a valid, non-empty UTF-8 string. func validateBaggageName(s string) bool { - if len(s) == 0 { + if s == "" { return false } @@ -828,7 +828,7 @@ func validateBaggageValue(s string) bool { // validateKey checks if the string is a valid W3C Baggage key. func validateKey(s string) bool { - if len(s) == 0 { + if s == "" { return false } diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go index 49a35b122..d48847ed8 100644 --- a/vendor/go.opentelemetry.io/otel/codes/codes.go +++ b/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -67,7 +67,7 @@ func (c *Code) UnmarshalJSON(b []byte) error { return errors.New("nil receiver passed to UnmarshalJSON") } - var x interface{} + var x any if err := json.Unmarshal(b, &x); err != nil { return err } @@ -102,5 +102,5 @@ func (c *Code) MarshalJSON() ([]byte, error) { if !ok { return nil, fmt.Errorf("invalid code: %d", *c) } - return []byte(fmt.Sprintf("%q", str)), nil + return fmt.Appendf(nil, "%q", str), nil } diff --git a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile new file mode 100644 index 000000000..a311fbb48 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile @@ -0,0 +1,4 @@ +# This is a renovate-friendly source of Docker images. +FROM python:3.13.6-slim-bullseye@sha256:e98b521460ee75bca92175c16247bdf7275637a8faaeb2bcfa19d879ae5c4b9a AS python +FROM otel/weaver:v0.17.1@sha256:32523b5e44fb44418786347e9f7dde187d8797adb6d57a2ee99c245346c3cdfe AS weaver +FROM avtodev/markdown-lint:v1@sha256:6aeedc2f49138ce7a1cd0adffc1b1c0321b841dc2102408967d9301c031949ee AS markdown diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go index 4571a5ca3..ca4544f0d 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go @@ -1,6 +1,8 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package tracetransform provides conversion functionality for the otlptrace +// exporters. package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" import ( diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go index 2171bee3c..8236c995a 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go @@ -223,7 +223,7 @@ func (c *client) exportContext(parent context.Context) (context.Context, context ) if c.exportTimeout > 0 { - ctx, cancel = context.WithTimeout(parent, c.exportTimeout) + ctx, cancel = context.WithTimeoutCause(parent, c.exportTimeout, errors.New("exporter export timeout")) } else { ctx, cancel = context.WithCancel(parent) } @@ -294,7 +294,7 @@ func (c *client) MarshalLog() interface{} { Type string Endpoint string }{ - Type: "otlphttpgrpc", + Type: "otlptracegrpc", Endpoint: c.endpoint, } } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go index 4abf48d1f..6eacdf311 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go @@ -1,9 +1,11 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/envconfig/envconfig.go.tmpl // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package envconfig provides functionality to parse configuration from +// environment variables. package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig" import ( diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go index 97cd6c54f..b6e6b10fb 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go @@ -1,6 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package internal provides internal functionally for the otlptracegrpc package. package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal" //go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go index 7bb189a94..1d840be20 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl // Copyright The OpenTelemetry Authors @@ -77,8 +77,16 @@ func getOptionsFromEnv() []GenericOption { }), envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), envconfig.WithCertPool("TRACES_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), - envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), - envconfig.WithClientCert("TRACES_CLIENT_CERTIFICATE", "TRACES_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), + envconfig.WithClientCert( + "CLIENT_CERTIFICATE", + "CLIENT_KEY", + func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }, + ), + envconfig.WithClientCert( + "TRACES_CLIENT_CERTIFICATE", + "TRACES_CLIENT_KEY", + func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }, + ), withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }), envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), envconfig.WithBool("TRACES_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go index 0a317d926..4f47117a5 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go @@ -1,9 +1,10 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package otlpconfig provides configuration for the otlptrace exporters. package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" import ( @@ -52,7 +53,9 @@ type ( // gRPC configurations GRPCCredentials credentials.TransportCredentials - Proxy HTTPTransportProxyFunc + // HTTP configurations + Proxy HTTPTransportProxyFunc + HTTPClient *http.Client } Config struct { @@ -89,12 +92,11 @@ func NewHTTPConfig(opts ...HTTPOption) Config { return cfg } -// cleanPath returns a path with all spaces trimmed and all redundancies -// removed. If urlPath is empty or cleaning it results in an empty string, +// cleanPath returns a path with all spaces trimmed. If urlPath is empty, // defaultPath is returned instead. func cleanPath(urlPath string, defaultPath string) string { - tmp := path.Clean(strings.TrimSpace(urlPath)) - if tmp == "." { + tmp := strings.TrimSpace(urlPath) + if tmp == "" || tmp == "." { return defaultPath } if !path.IsAbs(tmp) { @@ -349,3 +351,10 @@ func WithProxy(pf HTTPTransportProxyFunc) GenericOption { return cfg }) } + +func WithHTTPClient(c *http.Client) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.HTTPClient = c + return cfg + }) +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go index 3d4f699d4..918490387 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl // Copyright The OpenTelemetry Authors diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go index 38b97a013..ba6e41183 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl // Copyright The OpenTelemetry Authors diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go index a12ea4c48..1c4659423 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/partialsuccess.go // Copyright The OpenTelemetry Authors diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go index 1c5450ab6..259a898ae 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/retry/retry.go.tmpl // Copyright The OpenTelemetry Authors @@ -14,7 +14,7 @@ import ( "fmt" "time" - "github.com/cenkalti/backoff/v4" + "github.com/cenkalti/backoff/v5" ) // DefaultConfig are the recommended defaults to use. @@ -77,12 +77,12 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { RandomizationFactor: backoff.DefaultRandomizationFactor, Multiplier: backoff.DefaultMultiplier, MaxInterval: c.MaxInterval, - MaxElapsedTime: c.MaxElapsedTime, - Stop: backoff.Stop, - Clock: backoff.SystemClock, } b.Reset() + maxElapsedTime := c.MaxElapsedTime + startTime := time.Now() + for { err := fn(ctx) if err == nil { @@ -94,21 +94,17 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { return err } - bOff := b.NextBackOff() - if bOff == backoff.Stop { + if maxElapsedTime != 0 && time.Since(startTime) > maxElapsedTime { return fmt.Errorf("max retry time elapsed: %w", err) } // Wait for the greater of the backoff or throttle delay. - var delay time.Duration - if bOff > throttle { - delay = bOff - } else { - elapsed := b.GetElapsedTime() - if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime { - return fmt.Errorf("max retry time would elapse: %w", err) - } - delay = throttle + bOff := b.NextBackOff() + delay := max(throttle, bOff) + + elapsed := time.Since(startTime) + if maxElapsedTime != 0 && elapsed+throttle > maxElapsedTime { + return fmt.Errorf("max retry time would elapse: %w", err) } if ctxErr := waitFunc(ctx, delay); ctxErr != nil { @@ -136,7 +132,7 @@ func wait(ctx context.Context, delay time.Duration) error { select { case <-timer.C: default: - return ctx.Err() + return context.Cause(ctx) } case <-timer.C: } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go index 00ab1f20c..2da229870 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go @@ -199,8 +199,9 @@ func WithTimeout(duration time.Duration) Option { // explicitly returns a backoff time in the response. That time will take // precedence over these settings. // -// These settings do not define any network retry strategy. That is entirely -// handled by the gRPC ClientConn. +// These settings define the retry strategy implemented by the exporter. +// These settings do not define any network retry strategy. +// That is handled by the gRPC ClientConn. // // If unset, the default retry policy will be used. It will retry the export // 5 seconds after receiving a retryable error and increase exponentially diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go index f156ee667..ed2ddce71 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go @@ -5,5 +5,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" // Version is the current release version of the OpenTelemetry OTLP trace exporter in use. func Version() string { - return "1.34.0" + return "1.37.0" } diff --git a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh deleted file mode 100644 index 93e80ea30..000000000 --- a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env bash - -# Copyright The OpenTelemetry Authors -# SPDX-License-Identifier: Apache-2.0 - -set -euo pipefail - -top_dir='.' -if [[ $# -gt 0 ]]; then - top_dir="${1}" -fi - -p=$(pwd) -mod_dirs=() - -# Note `mapfile` does not exist in older bash versions: -# https://stackoverflow.com/questions/41475261/need-alternative-to-readarray-mapfile-for-script-on-older-version-of-bash - -while IFS= read -r line; do - mod_dirs+=("$line") -done < <(find "${top_dir}" -type f -name 'go.mod' -exec dirname {} \; | sort) - -for mod_dir in "${mod_dirs[@]}"; do - cd "${mod_dir}" - - while IFS= read -r line; do - echo ".${line#${p}}" - done < <(go list --find -f '{{.Name}}|{{.Dir}}' ./... | grep '^main|' | cut -f 2- -d '|') - cd "${p}" -done diff --git a/vendor/go.opentelemetry.io/otel/internal/gen.go b/vendor/go.opentelemetry.io/otel/internal/gen.go deleted file mode 100644 index 4259f0320..000000000 --- a/vendor/go.opentelemetry.io/otel/internal/gen.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package internal // import "go.opentelemetry.io/otel/internal" - -//go:generate gotmpl --body=./shared/matchers/expectation.go.tmpl "--data={}" --out=matchers/expectation.go -//go:generate gotmpl --body=./shared/matchers/expecter.go.tmpl "--data={}" --out=matchers/expecter.go -//go:generate gotmpl --body=./shared/matchers/temporal_matcher.go.tmpl "--data={}" --out=matchers/temporal_matcher.go - -//go:generate gotmpl --body=./shared/internaltest/alignment.go.tmpl "--data={}" --out=internaltest/alignment.go -//go:generate gotmpl --body=./shared/internaltest/env.go.tmpl "--data={}" --out=internaltest/env.go -//go:generate gotmpl --body=./shared/internaltest/env_test.go.tmpl "--data={}" --out=internaltest/env_test.go -//go:generate gotmpl --body=./shared/internaltest/errors.go.tmpl "--data={}" --out=internaltest/errors.go -//go:generate gotmpl --body=./shared/internaltest/harness.go.tmpl "--data={\"matchersImportPath\": \"go.opentelemetry.io/otel/internal/matchers\"}" --out=internaltest/harness.go -//go:generate gotmpl --body=./shared/internaltest/text_map_carrier.go.tmpl "--data={}" --out=internaltest/text_map_carrier.go -//go:generate gotmpl --body=./shared/internaltest/text_map_carrier_test.go.tmpl "--data={}" --out=internaltest/text_map_carrier_test.go -//go:generate gotmpl --body=./shared/internaltest/text_map_propagator.go.tmpl "--data={}" --out=internaltest/text_map_propagator.go -//go:generate gotmpl --body=./shared/internaltest/text_map_propagator_test.go.tmpl "--data={}" --out=internaltest/text_map_propagator_test.go diff --git a/vendor/go.opentelemetry.io/otel/internal/global/handler.go b/vendor/go.opentelemetry.io/otel/internal/global/handler.go index c657ff8e7..2e47b2964 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/handler.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/handler.go @@ -1,6 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package global provides the OpenTelemetry global API. package global // import "go.opentelemetry.io/otel/internal/global" import ( diff --git a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go index adbca7d34..86d7f4ba0 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go @@ -41,22 +41,22 @@ func GetLogger() logr.Logger { // Info prints messages about the general state of the API or SDK. // This should usually be less than 5 messages a minute. -func Info(msg string, keysAndValues ...interface{}) { +func Info(msg string, keysAndValues ...any) { GetLogger().V(4).Info(msg, keysAndValues...) } // Error prints messages about exceptional states of the API or SDK. -func Error(err error, msg string, keysAndValues ...interface{}) { +func Error(err error, msg string, keysAndValues ...any) { GetLogger().Error(err, msg, keysAndValues...) } // Debug prints messages about all internal changes in the API or SDK. -func Debug(msg string, keysAndValues ...interface{}) { +func Debug(msg string, keysAndValues ...any) { GetLogger().V(8).Info(msg, keysAndValues...) } // Warn prints messages about warnings in the API or SDK. // Not an error but is likely more important than an informational event. -func Warn(msg string, keysAndValues ...interface{}) { +func Warn(msg string, keysAndValues ...any) { GetLogger().V(1).Info(msg, keysAndValues...) } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go index a6acd8dca..adb37b5b0 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -169,7 +169,10 @@ func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) return i, nil } -func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { +func (m *meter) Int64UpDownCounter( + name string, + options ...metric.Int64UpDownCounterOption, +) (metric.Int64UpDownCounter, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -238,7 +241,10 @@ func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (met return i, nil } -func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { +func (m *meter) Int64ObservableCounter( + name string, + options ...metric.Int64ObservableCounterOption, +) (metric.Int64ObservableCounter, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -261,7 +267,10 @@ func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64Obser return i, nil } -func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { +func (m *meter) Int64ObservableUpDownCounter( + name string, + options ...metric.Int64ObservableUpDownCounterOption, +) (metric.Int64ObservableUpDownCounter, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -284,7 +293,10 @@ func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int6 return i, nil } -func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { +func (m *meter) Int64ObservableGauge( + name string, + options ...metric.Int64ObservableGaugeOption, +) (metric.Int64ObservableGauge, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -330,7 +342,10 @@ func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOpti return i, nil } -func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { +func (m *meter) Float64UpDownCounter( + name string, + options ...metric.Float64UpDownCounterOption, +) (metric.Float64UpDownCounter, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -353,7 +368,10 @@ func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDow return i, nil } -func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { +func (m *meter) Float64Histogram( + name string, + options ...metric.Float64HistogramOption, +) (metric.Float64Histogram, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -399,7 +417,10 @@ func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) return i, nil } -func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { +func (m *meter) Float64ObservableCounter( + name string, + options ...metric.Float64ObservableCounterOption, +) (metric.Float64ObservableCounter, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -422,7 +443,10 @@ func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64O return i, nil } -func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { +func (m *meter) Float64ObservableUpDownCounter( + name string, + options ...metric.Float64ObservableUpDownCounterOption, +) (metric.Float64ObservableUpDownCounter, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -445,7 +469,10 @@ func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Fl return i, nil } -func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { +func (m *meter) Float64ObservableGauge( + name string, + options ...metric.Float64ObservableGaugeOption, +) (metric.Float64ObservableGauge, error) { m.mtx.Lock() defer m.mtx.Unlock() diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go index 8982aa0dc..bf5cf3119 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -26,6 +26,7 @@ import ( "sync/atomic" "go.opentelemetry.io/auto/sdk" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace" @@ -158,7 +159,18 @@ func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStart // a nonRecordingSpan by default. var autoInstEnabled = new(bool) -func (t *tracer) newSpan(ctx context.Context, autoSpan *bool, name string, opts []trace.SpanStartOption) (context.Context, trace.Span) { +// newSpan is called by tracer.Start so auto-instrumentation can attach an eBPF +// uprobe to this code. +// +// "noinline" pragma prevents the method from ever being inlined. +// +//go:noinline +func (t *tracer) newSpan( + ctx context.Context, + autoSpan *bool, + name string, + opts []trace.SpanStartOption, +) (context.Context, trace.Span) { // autoInstEnabled is passed to newSpan via the autoSpan parameter. This is // so the auto-instrumentation can define a uprobe for (*t).newSpan and be // provided with the address of the bool autoInstEnabled points to. It diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go deleted file mode 100644 index b2fe3e41d..000000000 --- a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package internal // import "go.opentelemetry.io/otel/internal" - -import ( - "math" - "unsafe" -) - -func BoolToRaw(b bool) uint64 { // nolint:revive // b is not a control flag. - if b { - return 1 - } - return 0 -} - -func RawToBool(r uint64) bool { - return r != 0 -} - -func Int64ToRaw(i int64) uint64 { - // Assumes original was a valid int64 (overflow not checked). - return uint64(i) // nolint: gosec -} - -func RawToInt64(r uint64) int64 { - // Assumes original was a valid int64 (overflow not checked). - return int64(r) // nolint: gosec -} - -func Float64ToRaw(f float64) uint64 { - return math.Float64bits(f) -} - -func RawToFloat64(r uint64) float64 { - return math.Float64frombits(r) -} - -func RawPtrToFloat64Ptr(r *uint64) *float64 { - // Assumes original was a valid *float64 (overflow not checked). - return (*float64)(unsafe.Pointer(r)) // nolint: gosec -} - -func RawPtrToInt64Ptr(r *uint64) *int64 { - // Assumes original was a valid *int64 (overflow not checked). - return (*int64)(unsafe.Pointer(r)) // nolint: gosec -} diff --git a/vendor/go.opentelemetry.io/otel/metric/LICENSE b/vendor/go.opentelemetry.io/otel/metric/LICENSE index 261eeb9e9..f1aee0f11 100644 --- a/vendor/go.opentelemetry.io/otel/metric/LICENSE +++ b/vendor/go.opentelemetry.io/otel/metric/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go index f8435d8f2..b7fc973a6 100644 --- a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go +++ b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go @@ -106,7 +106,9 @@ type Float64ObservableUpDownCounterConfig struct { // NewFloat64ObservableUpDownCounterConfig returns a new // [Float64ObservableUpDownCounterConfig] with all opts applied. -func NewFloat64ObservableUpDownCounterConfig(opts ...Float64ObservableUpDownCounterOption) Float64ObservableUpDownCounterConfig { +func NewFloat64ObservableUpDownCounterConfig( + opts ...Float64ObservableUpDownCounterOption, +) Float64ObservableUpDownCounterConfig { var config Float64ObservableUpDownCounterConfig for _, o := range opts { config = o.applyFloat64ObservableUpDownCounter(config) @@ -239,12 +241,16 @@ type float64CallbackOpt struct { cback Float64Callback } -func (o float64CallbackOpt) applyFloat64ObservableCounter(cfg Float64ObservableCounterConfig) Float64ObservableCounterConfig { +func (o float64CallbackOpt) applyFloat64ObservableCounter( + cfg Float64ObservableCounterConfig, +) Float64ObservableCounterConfig { cfg.callbacks = append(cfg.callbacks, o.cback) return cfg } -func (o float64CallbackOpt) applyFloat64ObservableUpDownCounter(cfg Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { +func (o float64CallbackOpt) applyFloat64ObservableUpDownCounter( + cfg Float64ObservableUpDownCounterConfig, +) Float64ObservableUpDownCounterConfig { cfg.callbacks = append(cfg.callbacks, o.cback) return cfg } diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go index e079aaef1..4404b71a2 100644 --- a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go +++ b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go @@ -105,7 +105,9 @@ type Int64ObservableUpDownCounterConfig struct { // NewInt64ObservableUpDownCounterConfig returns a new // [Int64ObservableUpDownCounterConfig] with all opts applied. -func NewInt64ObservableUpDownCounterConfig(opts ...Int64ObservableUpDownCounterOption) Int64ObservableUpDownCounterConfig { +func NewInt64ObservableUpDownCounterConfig( + opts ...Int64ObservableUpDownCounterOption, +) Int64ObservableUpDownCounterConfig { var config Int64ObservableUpDownCounterConfig for _, o := range opts { config = o.applyInt64ObservableUpDownCounter(config) @@ -242,7 +244,9 @@ func (o int64CallbackOpt) applyInt64ObservableCounter(cfg Int64ObservableCounter return cfg } -func (o int64CallbackOpt) applyInt64ObservableUpDownCounter(cfg Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { +func (o int64CallbackOpt) applyInt64ObservableUpDownCounter( + cfg Int64ObservableUpDownCounterConfig, +) Int64ObservableUpDownCounterConfig { cfg.callbacks = append(cfg.callbacks, o.cback) return cfg } diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go index a535782e1..9f48d5f11 100644 --- a/vendor/go.opentelemetry.io/otel/metric/instrument.go +++ b/vendor/go.opentelemetry.io/otel/metric/instrument.go @@ -63,7 +63,9 @@ func (o descOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) return c } -func (o descOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { +func (o descOpt) applyFloat64ObservableUpDownCounter( + c Float64ObservableUpDownCounterConfig, +) Float64ObservableUpDownCounterConfig { c.description = string(o) return c } @@ -98,7 +100,9 @@ func (o descOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int return c } -func (o descOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { +func (o descOpt) applyInt64ObservableUpDownCounter( + c Int64ObservableUpDownCounterConfig, +) Int64ObservableUpDownCounterConfig { c.description = string(o) return c } @@ -138,7 +142,9 @@ func (o unitOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) return c } -func (o unitOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { +func (o unitOpt) applyFloat64ObservableUpDownCounter( + c Float64ObservableUpDownCounterConfig, +) Float64ObservableUpDownCounterConfig { c.unit = string(o) return c } @@ -173,7 +179,9 @@ func (o unitOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int return c } -func (o unitOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { +func (o unitOpt) applyInt64ObservableUpDownCounter( + c Int64ObservableUpDownCounterConfig, +) Int64ObservableUpDownCounterConfig { c.unit = string(o) return c } diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go index 14e08c24a..fdd2a7011 100644 --- a/vendor/go.opentelemetry.io/otel/metric/meter.go +++ b/vendor/go.opentelemetry.io/otel/metric/meter.go @@ -110,7 +110,10 @@ type Meter interface { // The name needs to conform to the OpenTelemetry instrument name syntax. // See the Instrument Name section of the package documentation for more // information. - Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error) + Int64ObservableUpDownCounter( + name string, + options ...Int64ObservableUpDownCounterOption, + ) (Int64ObservableUpDownCounter, error) // Int64ObservableGauge returns a new Int64ObservableGauge instrument // identified by name and configured with options. The instrument is used @@ -194,7 +197,10 @@ type Meter interface { // The name needs to conform to the OpenTelemetry instrument name syntax. // See the Instrument Name section of the package documentation for more // information. - Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error) + Float64ObservableUpDownCounter( + name string, + options ...Float64ObservableUpDownCounterOption, + ) (Float64ObservableUpDownCounter, error) // Float64ObservableGauge returns a new Float64ObservableGauge instrument // identified by name and configured with options. The instrument is used diff --git a/vendor/go.opentelemetry.io/otel/metric/noop/noop.go b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go index ca6fcbdc0..9afb69e58 100644 --- a/vendor/go.opentelemetry.io/otel/metric/noop/noop.go +++ b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go @@ -86,13 +86,19 @@ func (Meter) Int64Gauge(string, ...metric.Int64GaugeOption) (metric.Int64Gauge, // Int64ObservableCounter returns an ObservableCounter used to record int64 // measurements that produces no telemetry. -func (Meter) Int64ObservableCounter(string, ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { +func (Meter) Int64ObservableCounter( + string, + ...metric.Int64ObservableCounterOption, +) (metric.Int64ObservableCounter, error) { return Int64ObservableCounter{}, nil } // Int64ObservableUpDownCounter returns an ObservableUpDownCounter used to // record int64 measurements that produces no telemetry. -func (Meter) Int64ObservableUpDownCounter(string, ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { +func (Meter) Int64ObservableUpDownCounter( + string, + ...metric.Int64ObservableUpDownCounterOption, +) (metric.Int64ObservableUpDownCounter, error) { return Int64ObservableUpDownCounter{}, nil } @@ -128,19 +134,28 @@ func (Meter) Float64Gauge(string, ...metric.Float64GaugeOption) (metric.Float64G // Float64ObservableCounter returns an ObservableCounter used to record int64 // measurements that produces no telemetry. -func (Meter) Float64ObservableCounter(string, ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { +func (Meter) Float64ObservableCounter( + string, + ...metric.Float64ObservableCounterOption, +) (metric.Float64ObservableCounter, error) { return Float64ObservableCounter{}, nil } // Float64ObservableUpDownCounter returns an ObservableUpDownCounter used to // record int64 measurements that produces no telemetry. -func (Meter) Float64ObservableUpDownCounter(string, ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { +func (Meter) Float64ObservableUpDownCounter( + string, + ...metric.Float64ObservableUpDownCounterOption, +) (metric.Float64ObservableUpDownCounter, error) { return Float64ObservableUpDownCounter{}, nil } // Float64ObservableGauge returns an ObservableGauge used to record int64 // measurements that produces no telemetry. -func (Meter) Float64ObservableGauge(string, ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { +func (Meter) Float64ObservableGauge( + string, + ...metric.Float64ObservableGaugeOption, +) (metric.Float64ObservableGauge, error) { return Float64ObservableGauge{}, nil } diff --git a/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/vendor/go.opentelemetry.io/otel/propagation/baggage.go index 552263ba7..051882602 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/baggage.go +++ b/vendor/go.opentelemetry.io/otel/propagation/baggage.go @@ -20,7 +20,7 @@ type Baggage struct{} var _ TextMapPropagator = Baggage{} // Inject sets baggage key-values from ctx into the carrier. -func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) { +func (Baggage) Inject(ctx context.Context, carrier TextMapCarrier) { bStr := baggage.FromContext(ctx).String() if bStr != "" { carrier.Set(baggageHeader, bStr) @@ -28,7 +28,21 @@ func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) { } // Extract returns a copy of parent with the baggage from the carrier added. -func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context { +// If carrier implements [ValuesGetter] (e.g. [HeaderCarrier]), Values is invoked +// for multiple values extraction. Otherwise, Get is called. +func (Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context { + if multiCarrier, ok := carrier.(ValuesGetter); ok { + return extractMultiBaggage(parent, multiCarrier) + } + return extractSingleBaggage(parent, carrier) +} + +// Fields returns the keys who's values are set with Inject. +func (Baggage) Fields() []string { + return []string{baggageHeader} +} + +func extractSingleBaggage(parent context.Context, carrier TextMapCarrier) context.Context { bStr := carrier.Get(baggageHeader) if bStr == "" { return parent @@ -41,7 +55,23 @@ func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context return baggage.ContextWithBaggage(parent, bag) } -// Fields returns the keys who's values are set with Inject. -func (b Baggage) Fields() []string { - return []string{baggageHeader} +func extractMultiBaggage(parent context.Context, carrier ValuesGetter) context.Context { + bVals := carrier.Values(baggageHeader) + if len(bVals) == 0 { + return parent + } + var members []baggage.Member + for _, bStr := range bVals { + currBag, err := baggage.Parse(bStr) + if err != nil { + continue + } + members = append(members, currBag.Members()...) + } + + b, err := baggage.New(members...) + if err != nil || b.Len() == 0 { + return parent + } + return baggage.ContextWithBaggage(parent, b) } diff --git a/vendor/go.opentelemetry.io/otel/propagation/propagation.go b/vendor/go.opentelemetry.io/otel/propagation/propagation.go index 8c8286aab..0a32c59aa 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/propagation.go +++ b/vendor/go.opentelemetry.io/otel/propagation/propagation.go @@ -9,6 +9,7 @@ import ( ) // TextMapCarrier is the storage medium used by a TextMapPropagator. +// See ValuesGetter for how a TextMapCarrier can get multiple values for a key. type TextMapCarrier interface { // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. @@ -19,7 +20,7 @@ type TextMapCarrier interface { // must never be done outside of a new major release. // Set stores the key-value pair. - Set(key string, value string) + Set(key, value string) // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. @@ -29,6 +30,18 @@ type TextMapCarrier interface { // must never be done outside of a new major release. } +// ValuesGetter can return multiple values for a single key, +// with contrast to TextMapCarrier.Get which returns a single value. +type ValuesGetter interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Values returns all values associated with the passed key. + Values(key string) []string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + // MapCarrier is a TextMapCarrier that uses a map held in memory as a storage // medium for propagated key-value pairs. type MapCarrier map[string]string @@ -55,16 +68,27 @@ func (c MapCarrier) Keys() []string { return keys } -// HeaderCarrier adapts http.Header to satisfy the TextMapCarrier interface. +// HeaderCarrier adapts http.Header to satisfy the TextMapCarrier and ValuesGetter interfaces. type HeaderCarrier http.Header -// Get returns the value associated with the passed key. +// Compile time check that HeaderCarrier implements ValuesGetter. +var _ TextMapCarrier = HeaderCarrier{} + +// Compile time check that HeaderCarrier implements TextMapCarrier. +var _ ValuesGetter = HeaderCarrier{} + +// Get returns the first value associated with the passed key. func (hc HeaderCarrier) Get(key string) string { return http.Header(hc).Get(key) } +// Values returns all values associated with the passed key. +func (hc HeaderCarrier) Values(key string) []string { + return http.Header(hc).Values(key) +} + // Set stores the key-value pair. -func (hc HeaderCarrier) Set(key string, value string) { +func (hc HeaderCarrier) Set(key, value string) { http.Header(hc).Set(key, value) } @@ -89,6 +113,8 @@ type TextMapPropagator interface { // must never be done outside of a new major release. // Extract reads cross-cutting concerns from the carrier into a Context. + // Implementations may check if the carrier implements ValuesGetter, + // to support extraction of multiple values per key. Extract(ctx context.Context, carrier TextMapCarrier) context.Context // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go index 6870e316d..6692d2665 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go +++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go @@ -36,7 +36,7 @@ var ( ) // Inject injects the trace context from ctx into carrier. -func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { +func (TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { sc := trace.SpanContextFromContext(ctx) if !sc.IsValid() { return @@ -77,7 +77,7 @@ func (tc TraceContext) Extract(ctx context.Context, carrier TextMapCarrier) cont return trace.ContextWithRemoteSpanContext(ctx, sc) } -func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext { +func (TraceContext) extract(carrier TextMapCarrier) trace.SpanContext { h := carrier.Get(traceparentHeader) if h == "" { return trace.SpanContext{} @@ -151,6 +151,6 @@ func extractPart(dst []byte, h *string, n int) bool { } // Fields returns the keys who's values are set with Inject. -func (tc TraceContext) Fields() []string { +func (TraceContext) Fields() []string { return []string{traceparentHeader, tracestateHeader} } diff --git a/vendor/go.opentelemetry.io/otel/renovate.json b/vendor/go.opentelemetry.io/otel/renovate.json index 4f80c898a..fa5acf2d3 100644 --- a/vendor/go.opentelemetry.io/otel/renovate.json +++ b/vendor/go.opentelemetry.io/otel/renovate.json @@ -1,7 +1,8 @@ { "$schema": "https://docs.renovatebot.com/renovate-schema.json", "extends": [ - "config:recommended" + "config:best-practices", + "helpers:pinGitHubActionDigestsToSemver" ], "ignorePaths": [], "labels": ["Skip Changelog", "dependencies"], @@ -14,6 +15,10 @@ "matchDepTypes": ["indirect"], "enabled": true }, + { + "matchPackageNames": ["go.opentelemetry.io/build-tools/**"], + "groupName": "build-tools" + }, { "matchPackageNames": ["google.golang.org/genproto/googleapis/**"], "groupName": "googleapis" @@ -21,6 +26,10 @@ { "matchPackageNames": ["golang.org/x/**"], "groupName": "golang.org/x" + }, + { + "matchPackageNames": ["go.opentelemetry.io/otel/sdk/log/logtest"], + "enabled": false } ] } diff --git a/vendor/go.opentelemetry.io/otel/requirements.txt b/vendor/go.opentelemetry.io/otel/requirements.txt index ab09daf9d..1bb55fb1c 100644 --- a/vendor/go.opentelemetry.io/otel/requirements.txt +++ b/vendor/go.opentelemetry.io/otel/requirements.txt @@ -1 +1 @@ -codespell==2.3.0 +codespell==2.4.1 diff --git a/vendor/go.opentelemetry.io/otel/sdk/LICENSE b/vendor/go.opentelemetry.io/otel/sdk/LICENSE index 261eeb9e9..f1aee0f11 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/LICENSE +++ b/vendor/go.opentelemetry.io/otel/sdk/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go b/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go index 07923ed8d..e3309231d 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go @@ -1,6 +1,8 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package env provides types and functionality for environment variable support +// in the OpenTelemetry SDK. package env // import "go.opentelemetry.io/otel/sdk/internal/env" import ( diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go index 68d296cbe..1be472e91 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go @@ -19,7 +19,7 @@ import ( // to the case-insensitive string value of "true" (i.e. "True" and "TRUE" // will also enable this). var Resource = newFeature("RESOURCE", func(v string) (string, bool) { - if strings.ToLower(v) == "true" { + if strings.EqualFold(v, "true") { return v, true } return "", false @@ -59,7 +59,7 @@ func (f Feature[T]) Lookup() (v T, ok bool) { return f.parse(vRaw) } -// Enabled returns if the feature is enabled. +// Enabled reports whether the feature is enabled. func (f Feature[T]) Enabled() bool { _, ok := f.Lookup() return ok diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go index cf3c88e15..3f20eb7a5 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go @@ -13,7 +13,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) type ( @@ -72,7 +72,7 @@ func StringDetector(schemaURL string, k attribute.Key, f func() (string, error)) // Detect returns a *Resource that describes the string as a value // corresponding to attribute.Key as well as the specific schemaURL. -func (sd stringDetector) Detect(ctx context.Context) (*Resource, error) { +func (sd stringDetector) Detect(context.Context) (*Resource, error) { value, err := sd.F() if err != nil { return nil, fmt.Errorf("%s: %w", string(sd.K), err) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go index 5ecd859a5..bbe142d20 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go @@ -11,7 +11,7 @@ import ( "os" "regexp" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) type containerIDProvider func() (string, error) @@ -27,7 +27,7 @@ const cgroupPath = "/proc/self/cgroup" // Detect returns a *Resource that describes the id of the container. // If no container id found, an empty resource will be returned. -func (cgroupContainerIDDetector) Detect(ctx context.Context) (*Resource, error) { +func (cgroupContainerIDDetector) Detect(context.Context) (*Resource, error) { containerID, err := containerID() if err != nil { return nil, err diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go index 813f05624..4a1b017ee 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go @@ -12,7 +12,7 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) const ( diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go index 2d0f65498..5fed33d4f 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go @@ -8,7 +8,7 @@ import ( "errors" "strings" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) type hostIDProvider func() (string, error) @@ -96,7 +96,7 @@ func (r *hostIDReaderLinux) read() (string, error) { type hostIDDetector struct{} // Detect returns a *Resource containing the platform specific host id. -func (hostIDDetector) Detect(ctx context.Context) (*Resource, error) { +func (hostIDDetector) Detect(context.Context) (*Resource, error) { hostID, err := hostID() if err != nil { return nil, err diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go index 8a48ab4fa..51da76e80 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go @@ -8,7 +8,7 @@ import ( "strings" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) type osDescriptionProvider func() (string, error) @@ -32,7 +32,7 @@ type ( // Detect returns a *Resource that describes the operating system type the // service is running on. -func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) { +func (osTypeDetector) Detect(context.Context) (*Resource, error) { osType := runtimeOS() osTypeAttribute := mapRuntimeOSToSemconvOSType(osType) @@ -45,7 +45,7 @@ func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) { // Detect returns a *Resource that describes the operating system the // service is running on. -func (osDescriptionDetector) Detect(ctx context.Context) (*Resource, error) { +func (osDescriptionDetector) Detect(context.Context) (*Resource, error) { description, err := osDescription() if err != nil { return nil, err diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go index ce455dc54..3d703c5d9 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go @@ -5,6 +5,7 @@ package resource // import "go.opentelemetry.io/otel/sdk/resource" import ( "encoding/xml" + "errors" "fmt" "io" "os" @@ -63,7 +64,7 @@ func parsePlistFile(file io.Reader) (map[string]string, error) { } if len(v.Dict.Key) != len(v.Dict.String) { - return nil, fmt.Errorf("the number of and elements doesn't match") + return nil, errors.New("the number of and elements doesn't match") } properties := make(map[string]string, len(v.Dict.Key)) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go index f537e5ca5..7252af79f 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go @@ -63,12 +63,12 @@ func parseOSReleaseFile(file io.Reader) map[string]string { return values } -// skip returns true if the line is blank or starts with a '#' character, and +// skip reports whether the line is blank or starts with a '#' character, and // therefore should be skipped from processing. func skip(line string) bool { line = strings.TrimSpace(line) - return len(line) == 0 || strings.HasPrefix(line, "#") + return line == "" || strings.HasPrefix(line, "#") } // parse attempts to split the provided line on the first '=' character, and then @@ -76,7 +76,7 @@ func skip(line string) bool { func parse(line string) (string, string, bool) { k, v, found := strings.Cut(line, "=") - if !found || len(k) == 0 { + if !found || k == "" { return "", "", false } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go index 085fe68fd..138e57721 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go @@ -11,7 +11,7 @@ import ( "path/filepath" "runtime" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) type ( @@ -112,19 +112,19 @@ type ( // Detect returns a *Resource that describes the process identifier (PID) of the // executing process. -func (processPIDDetector) Detect(ctx context.Context) (*Resource, error) { +func (processPIDDetector) Detect(context.Context) (*Resource, error) { return NewWithAttributes(semconv.SchemaURL, semconv.ProcessPID(pid())), nil } // Detect returns a *Resource that describes the name of the process executable. -func (processExecutableNameDetector) Detect(ctx context.Context) (*Resource, error) { +func (processExecutableNameDetector) Detect(context.Context) (*Resource, error) { executableName := filepath.Base(commandArgs()[0]) return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutableName(executableName)), nil } // Detect returns a *Resource that describes the full path of the process executable. -func (processExecutablePathDetector) Detect(ctx context.Context) (*Resource, error) { +func (processExecutablePathDetector) Detect(context.Context) (*Resource, error) { executablePath, err := executablePath() if err != nil { return nil, err @@ -135,13 +135,13 @@ func (processExecutablePathDetector) Detect(ctx context.Context) (*Resource, err // Detect returns a *Resource that describes all the command arguments as received // by the process. -func (processCommandArgsDetector) Detect(ctx context.Context) (*Resource, error) { +func (processCommandArgsDetector) Detect(context.Context) (*Resource, error) { return NewWithAttributes(semconv.SchemaURL, semconv.ProcessCommandArgs(commandArgs()...)), nil } // Detect returns a *Resource that describes the username of the user that owns the // process. -func (processOwnerDetector) Detect(ctx context.Context) (*Resource, error) { +func (processOwnerDetector) Detect(context.Context) (*Resource, error) { owner, err := owner() if err != nil { return nil, err @@ -152,17 +152,17 @@ func (processOwnerDetector) Detect(ctx context.Context) (*Resource, error) { // Detect returns a *Resource that describes the name of the compiler used to compile // this process image. -func (processRuntimeNameDetector) Detect(ctx context.Context) (*Resource, error) { +func (processRuntimeNameDetector) Detect(context.Context) (*Resource, error) { return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeName(runtimeName())), nil } // Detect returns a *Resource that describes the version of the runtime of this process. -func (processRuntimeVersionDetector) Detect(ctx context.Context) (*Resource, error) { +func (processRuntimeVersionDetector) Detect(context.Context) (*Resource, error) { return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeVersion(runtimeVersion())), nil } // Detect returns a *Resource that describes the runtime of this process. -func (processRuntimeDescriptionDetector) Detect(ctx context.Context) (*Resource, error) { +func (processRuntimeDescriptionDetector) Detect(context.Context) (*Resource, error) { runtimeDescription := fmt.Sprintf( "go version %s %s/%s", runtimeVersion(), runtimeOS(), runtimeArch()) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go index ad4b50df4..28e1e4f7e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go @@ -21,11 +21,22 @@ import ( // Resources should be passed and stored as pointers // (`*resource.Resource`). The `nil` value is equivalent to an empty // Resource. +// +// Note that the Go == operator compares not just the resource attributes but +// also all other internals of the Resource type. Therefore, Resource values +// should not be used as map or database keys. In general, the [Resource.Equal] +// method should be used instead of direct comparison with ==, since that +// method ensures the correct comparison of resource attributes, and the +// [attribute.Distinct] returned from [Resource.Equivalent] should be used for +// map and database keys instead. type Resource struct { attrs attribute.Set schemaURL string } +// Compile-time check that the Resource remains comparable. +var _ map[Resource]struct{} = nil + var ( defaultResource *Resource defaultResourceOnce sync.Once @@ -101,7 +112,7 @@ func (r *Resource) String() string { } // MarshalLog is the marshaling function used by the logging system to represent this Resource. -func (r *Resource) MarshalLog() interface{} { +func (r *Resource) MarshalLog() any { return struct { Attributes attribute.Set SchemaURL string @@ -137,15 +148,19 @@ func (r *Resource) Iter() attribute.Iterator { return r.attrs.Iter() } -// Equal returns true when a Resource is equivalent to this Resource. -func (r *Resource) Equal(eq *Resource) bool { +// Equal reports whether r and o represent the same resource. Two resources can +// be equal even if they have different schema URLs. +// +// See the documentation on the [Resource] type for the pitfalls of using == +// with Resource values; most code should use Equal instead. +func (r *Resource) Equal(o *Resource) bool { if r == nil { r = Empty() } - if eq == nil { - eq = Empty() + if o == nil { + o = Empty() } - return r.Equivalent() == eq.Equivalent() + return r.Equivalent() == o.Equivalent() } // Merge creates a new [Resource] by merging a and b. diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go index ccc97e1b6..9bc3e525d 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go @@ -5,24 +5,36 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" + "errors" + "fmt" "sync" "sync/atomic" "time" "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/sdk" "go.opentelemetry.io/otel/sdk/internal/env" + "go.opentelemetry.io/otel/sdk/trace/internal/x" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" "go.opentelemetry.io/otel/trace" ) // Defaults for BatchSpanProcessorOptions. const ( - DefaultMaxQueueSize = 2048 - DefaultScheduleDelay = 5000 + DefaultMaxQueueSize = 2048 + // DefaultScheduleDelay is the delay interval between two consecutive exports, in milliseconds. + DefaultScheduleDelay = 5000 + // DefaultExportTimeout is the duration after which an export is cancelled, in milliseconds. DefaultExportTimeout = 30000 DefaultMaxExportBatchSize = 512 ) +var queueFull = otelconv.ErrorTypeAttr("queue_full") + // BatchSpanProcessorOption configures a BatchSpanProcessor. type BatchSpanProcessorOption func(o *BatchSpanProcessorOptions) @@ -66,6 +78,11 @@ type batchSpanProcessor struct { queue chan ReadOnlySpan dropped uint32 + selfObservabilityEnabled bool + callbackRegistration metric.Registration + spansProcessedCounter otelconv.SDKProcessorSpanProcessed + componentNameAttr attribute.KeyValue + batch []ReadOnlySpan batchMutex sync.Mutex timer *time.Timer @@ -86,11 +103,7 @@ func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorO maxExportBatchSize := env.BatchSpanProcessorMaxExportBatchSize(DefaultMaxExportBatchSize) if maxExportBatchSize > maxQueueSize { - if DefaultMaxExportBatchSize > maxQueueSize { - maxExportBatchSize = maxQueueSize - } else { - maxExportBatchSize = DefaultMaxExportBatchSize - } + maxExportBatchSize = min(DefaultMaxExportBatchSize, maxQueueSize) } o := BatchSpanProcessorOptions{ @@ -111,6 +124,21 @@ func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorO stopCh: make(chan struct{}), } + if x.SelfObservability.Enabled() { + bsp.selfObservabilityEnabled = true + bsp.componentNameAttr = componentName() + + var err error + bsp.spansProcessedCounter, bsp.callbackRegistration, err = newBSPObs( + bsp.componentNameAttr, + func() int64 { return int64(len(bsp.queue)) }, + int64(bsp.o.MaxQueueSize), + ) + if err != nil { + otel.Handle(err) + } + } + bsp.stopWait.Add(1) go func() { defer bsp.stopWait.Done() @@ -121,8 +149,61 @@ func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorO return bsp } +var processorIDCounter atomic.Int64 + +// nextProcessorID returns an identifier for this batch span processor, +// starting with 0 and incrementing by 1 each time it is called. +func nextProcessorID() int64 { + return processorIDCounter.Add(1) - 1 +} + +func componentName() attribute.KeyValue { + id := nextProcessorID() + name := fmt.Sprintf("%s/%d", otelconv.ComponentTypeBatchingSpanProcessor, id) + return semconv.OTelComponentName(name) +} + +// newBSPObs creates and returns a new set of metrics instruments and a +// registration for a BatchSpanProcessor. It is the caller's responsibility +// to unregister the registration when it is no longer needed. +func newBSPObs( + cmpnt attribute.KeyValue, + qLen func() int64, + qMax int64, +) (otelconv.SDKProcessorSpanProcessed, metric.Registration, error) { + meter := otel.GetMeterProvider().Meter( + selfObsScopeName, + metric.WithInstrumentationVersion(sdk.Version()), + metric.WithSchemaURL(semconv.SchemaURL), + ) + + qCap, err := otelconv.NewSDKProcessorSpanQueueCapacity(meter) + + qSize, e := otelconv.NewSDKProcessorSpanQueueSize(meter) + err = errors.Join(err, e) + + spansProcessed, e := otelconv.NewSDKProcessorSpanProcessed(meter) + err = errors.Join(err, e) + + cmpntT := semconv.OTelComponentTypeBatchingSpanProcessor + attrs := metric.WithAttributes(cmpnt, cmpntT) + + reg, e := meter.RegisterCallback( + func(_ context.Context, o metric.Observer) error { + o.ObserveInt64(qSize.Inst(), qLen(), attrs) + o.ObserveInt64(qCap.Inst(), qMax, attrs) + return nil + }, + qSize.Inst(), + qCap.Inst(), + ) + err = errors.Join(err, e) + + return spansProcessed, reg, err +} + // OnStart method does nothing. -func (bsp *batchSpanProcessor) OnStart(parent context.Context, s ReadWriteSpan) {} +func (*batchSpanProcessor) OnStart(context.Context, ReadWriteSpan) {} // OnEnd method enqueues a ReadOnlySpan for later processing. func (bsp *batchSpanProcessor) OnEnd(s ReadOnlySpan) { @@ -161,6 +242,9 @@ func (bsp *batchSpanProcessor) Shutdown(ctx context.Context) error { case <-ctx.Done(): err = ctx.Err() } + if bsp.selfObservabilityEnabled { + err = errors.Join(err, bsp.callbackRegistration.Unregister()) + } }) return err } @@ -170,7 +254,7 @@ type forceFlushSpan struct { flushed chan struct{} } -func (f forceFlushSpan) SpanContext() trace.SpanContext { +func (forceFlushSpan) SpanContext() trace.SpanContext { return trace.NewSpanContext(trace.SpanContextConfig{TraceFlags: trace.FlagsSampled}) } @@ -201,10 +285,9 @@ func (bsp *batchSpanProcessor) ForceFlush(ctx context.Context) error { } } - wait := make(chan error) + wait := make(chan error, 1) go func() { wait <- bsp.exportSpans(ctx) - close(wait) }() // Wait until the export is finished or the context is cancelled/timed out select { @@ -268,12 +351,17 @@ func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error { if bsp.o.ExportTimeout > 0 { var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, bsp.o.ExportTimeout) + ctx, cancel = context.WithTimeoutCause(ctx, bsp.o.ExportTimeout, errors.New("processor export timeout")) defer cancel() } if l := len(bsp.batch); l > 0 { global.Debug("exporting spans", "count", len(bsp.batch), "total_dropped", atomic.LoadUint32(&bsp.dropped)) + if bsp.selfObservabilityEnabled { + bsp.spansProcessedCounter.Add(ctx, int64(l), + bsp.componentNameAttr, + bsp.spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeBatchingSpanProcessor)) + } err := bsp.e.ExportSpans(ctx, bsp.batch) // A new batch is always created after exporting, even if the batch failed to be exported. @@ -382,11 +470,17 @@ func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd R case bsp.queue <- sd: return true case <-ctx.Done(): + if bsp.selfObservabilityEnabled { + bsp.spansProcessedCounter.Add(ctx, 1, + bsp.componentNameAttr, + bsp.spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeBatchingSpanProcessor), + bsp.spansProcessedCounter.AttrErrorType(queueFull)) + } return false } } -func (bsp *batchSpanProcessor) enqueueDrop(_ context.Context, sd ReadOnlySpan) bool { +func (bsp *batchSpanProcessor) enqueueDrop(ctx context.Context, sd ReadOnlySpan) bool { if !sd.SpanContext().IsSampled() { return false } @@ -396,12 +490,18 @@ func (bsp *batchSpanProcessor) enqueueDrop(_ context.Context, sd ReadOnlySpan) b return true default: atomic.AddUint32(&bsp.dropped, 1) + if bsp.selfObservabilityEnabled { + bsp.spansProcessedCounter.Add(ctx, 1, + bsp.componentNameAttr, + bsp.spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeBatchingSpanProcessor), + bsp.spansProcessedCounter.AttrErrorType(queueFull)) + } } return false } // MarshalLog is the marshaling function used by the logging system to represent this Span Processor. -func (bsp *batchSpanProcessor) MarshalLog() interface{} { +func (bsp *batchSpanProcessor) MarshalLog() any { return struct { Type string SpanExporter SpanExporter diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go index 1f60524e3..e58e7f6ed 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go @@ -6,5 +6,8 @@ Package trace contains support for OpenTelemetry distributed tracing. The following assumes a basic familiarity with OpenTelemetry concepts. See https://opentelemetry.io. + +See [go.opentelemetry.io/otel/sdk/trace/internal/x] for information about +the experimental features. */ package trace // import "go.opentelemetry.io/otel/sdk/trace" diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go index 925bcf993..3649322a6 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go @@ -5,10 +5,8 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" - crand "crypto/rand" "encoding/binary" - "math/rand" - "sync" + "math/rand/v2" "go.opentelemetry.io/otel/trace" ) @@ -29,20 +27,15 @@ type IDGenerator interface { // must never be done outside of a new major release. } -type randomIDGenerator struct { - sync.Mutex - randSource *rand.Rand -} +type randomIDGenerator struct{} var _ IDGenerator = &randomIDGenerator{} // NewSpanID returns a non-zero span ID from a randomly-chosen sequence. -func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID { - gen.Lock() - defer gen.Unlock() +func (*randomIDGenerator) NewSpanID(context.Context, trace.TraceID) trace.SpanID { sid := trace.SpanID{} for { - _, _ = gen.randSource.Read(sid[:]) + binary.NativeEndian.PutUint64(sid[:], rand.Uint64()) if sid.IsValid() { break } @@ -52,19 +45,18 @@ func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.Trace // NewIDs returns a non-zero trace ID and a non-zero span ID from a // randomly-chosen sequence. -func (gen *randomIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID) { - gen.Lock() - defer gen.Unlock() +func (*randomIDGenerator) NewIDs(context.Context) (trace.TraceID, trace.SpanID) { tid := trace.TraceID{} sid := trace.SpanID{} for { - _, _ = gen.randSource.Read(tid[:]) + binary.NativeEndian.PutUint64(tid[:8], rand.Uint64()) + binary.NativeEndian.PutUint64(tid[8:], rand.Uint64()) if tid.IsValid() { break } } for { - _, _ = gen.randSource.Read(sid[:]) + binary.NativeEndian.PutUint64(sid[:], rand.Uint64()) if sid.IsValid() { break } @@ -73,9 +65,5 @@ func (gen *randomIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace. } func defaultIDGenerator() IDGenerator { - gen := &randomIDGenerator{} - var rngSeed int64 - _ = binary.Read(crand.Reader, binary.LittleEndian, &rngSeed) - gen.randSource = rand.New(rand.NewSource(rngSeed)) - return gen + return &randomIDGenerator{} } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/README.md b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/README.md new file mode 100644 index 000000000..feec16fa6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/README.md @@ -0,0 +1,35 @@ +# Experimental Features + +The Trace SDK contains features that have not yet stabilized in the OpenTelemetry specification. +These features are added to the OpenTelemetry Go Trace SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback. + +These features may change in backwards incompatible ways as feedback is applied. +See the [Compatibility and Stability](#compatibility-and-stability) section for more information. + +## Features + +- [Self-Observability](#self-observability) + +### Self-Observability + +The SDK provides a self-observability feature that allows you to monitor the SDK itself. + +To opt-in, set the environment variable `OTEL_GO_X_SELF_OBSERVABILITY` to `true`. + +When enabled, the SDK will create the following metrics using the global `MeterProvider`: + +- `otel.sdk.span.live` +- `otel.sdk.span.started` + +Please see the [Semantic conventions for OpenTelemetry SDK metrics] documentation for more details on these metrics. + +[Semantic conventions for OpenTelemetry SDK metrics]: https://github.com/open-telemetry/semantic-conventions/blob/v1.36.0/docs/otel/sdk-metrics.md + +## Compatibility and Stability + +Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../../VERSIONING.md). +These features may be removed or modified in successive version releases, including patch versions. + +When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release. +There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version. +If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support. diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/x.go new file mode 100644 index 000000000..2fcbbcc66 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/x.go @@ -0,0 +1,63 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package x documents experimental features for [go.opentelemetry.io/otel/sdk/trace]. +package x // import "go.opentelemetry.io/otel/sdk/trace/internal/x" + +import ( + "os" + "strings" +) + +// SelfObservability is an experimental feature flag that determines if SDK +// self-observability metrics are enabled. +// +// To enable this feature set the OTEL_GO_X_SELF_OBSERVABILITY environment variable +// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" +// will also enable this). +var SelfObservability = newFeature("SELF_OBSERVABILITY", func(v string) (string, bool) { + if strings.EqualFold(v, "true") { + return v, true + } + return "", false +}) + +// Feature is an experimental feature control flag. It provides a uniform way +// to interact with these feature flags and parse their values. +type Feature[T any] struct { + key string + parse func(v string) (T, bool) +} + +func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] { + const envKeyRoot = "OTEL_GO_X_" + return Feature[T]{ + key: envKeyRoot + suffix, + parse: parse, + } +} + +// Key returns the environment variable key that needs to be set to enable the +// feature. +func (f Feature[T]) Key() string { return f.key } + +// Lookup returns the user configured value for the feature and true if the +// user has enabled the feature. Otherwise, if the feature is not enabled, a +// zero-value and false are returned. +func (f Feature[T]) Lookup() (v T, ok bool) { + // https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value + // + // > The SDK MUST interpret an empty value of an environment variable the + // > same way as when the variable is unset. + vRaw := os.Getenv(f.key) + if vRaw == "" { + return v, ok + } + return f.parse(vRaw) +} + +// Enabled reports whether the feature is enabled. +func (f Feature[T]) Enabled() bool { + _, ok := f.Lookup() + return ok +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go index 185aa7c08..37ce2ac87 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go @@ -5,14 +5,20 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" + "errors" "fmt" "sync" "sync/atomic" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/sdk" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/resource" + "go.opentelemetry.io/otel/sdk/trace/internal/x" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" "go.opentelemetry.io/otel/trace/noop" @@ -20,6 +26,7 @@ import ( const ( defaultTracerName = "go.opentelemetry.io/otel/sdk/tracer" + selfObsScopeName = "go.opentelemetry.io/otel/sdk/trace" ) // tracerProviderConfig. @@ -45,7 +52,7 @@ type tracerProviderConfig struct { } // MarshalLog is the marshaling function used by the logging system to represent this Provider. -func (cfg tracerProviderConfig) MarshalLog() interface{} { +func (cfg tracerProviderConfig) MarshalLog() any { return struct { SpanProcessors []SpanProcessor SamplerType string @@ -156,8 +163,18 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T t, ok := p.namedTracer[is] if !ok { t = &tracer{ - provider: p, - instrumentationScope: is, + provider: p, + instrumentationScope: is, + selfObservabilityEnabled: x.SelfObservability.Enabled(), + } + if t.selfObservabilityEnabled { + var err error + t.spanLiveMetric, t.spanStartedMetric, err = newInst() + if err != nil { + msg := "failed to create self-observability metrics for tracer: %w" + err := fmt.Errorf(msg, err) + otel.Handle(err) + } } p.namedTracer[is] = t } @@ -169,11 +186,38 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T // slowing down all tracing consumers. // - Logging code may be instrumented with tracing and deadlock because it could try // acquiring the same non-reentrant mutex. - global.Info("Tracer created", "name", name, "version", is.Version, "schemaURL", is.SchemaURL, "attributes", is.Attributes) + global.Info( + "Tracer created", + "name", + name, + "version", + is.Version, + "schemaURL", + is.SchemaURL, + "attributes", + is.Attributes, + ) } return t } +func newInst() (otelconv.SDKSpanLive, otelconv.SDKSpanStarted, error) { + m := otel.GetMeterProvider().Meter( + selfObsScopeName, + metric.WithInstrumentationVersion(sdk.Version()), + metric.WithSchemaURL(semconv.SchemaURL), + ) + + var err error + spanLiveMetric, e := otelconv.NewSDKSpanLive(m) + err = errors.Join(err, e) + + spanStartedMetric, e := otelconv.NewSDKSpanStarted(m) + err = errors.Join(err, e) + + return spanLiveMetric, spanStartedMetric, err +} + // RegisterSpanProcessor adds the given SpanProcessor to the list of SpanProcessors. func (p *TracerProvider) RegisterSpanProcessor(sp SpanProcessor) { // This check prevents calls during a shutdown. diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go index ebb6df6c9..689663d48 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go @@ -47,12 +47,12 @@ const ( // Drop will not record the span and all attributes/events will be dropped. Drop SamplingDecision = iota - // Record indicates the span's `IsRecording() == true`, but `Sampled` flag - // *must not* be set. + // RecordOnly indicates the span's IsRecording method returns true, but trace.FlagsSampled flag + // must not be set. RecordOnly - // RecordAndSample has span's `IsRecording() == true` and `Sampled` flag - // *must* be set. + // RecordAndSample indicates the span's IsRecording method returns true and trace.FlagsSampled flag + // must be set. RecordAndSample ) @@ -110,14 +110,14 @@ func TraceIDRatioBased(fraction float64) Sampler { type alwaysOnSampler struct{} -func (as alwaysOnSampler) ShouldSample(p SamplingParameters) SamplingResult { +func (alwaysOnSampler) ShouldSample(p SamplingParameters) SamplingResult { return SamplingResult{ Decision: RecordAndSample, Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(), } } -func (as alwaysOnSampler) Description() string { +func (alwaysOnSampler) Description() string { return "AlwaysOnSampler" } @@ -131,14 +131,14 @@ func AlwaysSample() Sampler { type alwaysOffSampler struct{} -func (as alwaysOffSampler) ShouldSample(p SamplingParameters) SamplingResult { +func (alwaysOffSampler) ShouldSample(p SamplingParameters) SamplingResult { return SamplingResult{ Decision: Drop, Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(), } } -func (as alwaysOffSampler) Description() string { +func (alwaysOffSampler) Description() string { return "AlwaysOffSampler" } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go index 554111bb4..411d9ccdd 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go @@ -39,7 +39,7 @@ func NewSimpleSpanProcessor(exporter SpanExporter) SpanProcessor { } // OnStart does nothing. -func (ssp *simpleSpanProcessor) OnStart(context.Context, ReadWriteSpan) {} +func (*simpleSpanProcessor) OnStart(context.Context, ReadWriteSpan) {} // OnEnd immediately exports a ReadOnlySpan. func (ssp *simpleSpanProcessor) OnEnd(s ReadOnlySpan) { @@ -58,7 +58,7 @@ func (ssp *simpleSpanProcessor) Shutdown(ctx context.Context) error { var err error ssp.stopOnce.Do(func() { stopFunc := func(exp SpanExporter) (<-chan error, func()) { - done := make(chan error) + done := make(chan error, 1) return done, func() { done <- exp.Shutdown(ctx) } } @@ -104,13 +104,13 @@ func (ssp *simpleSpanProcessor) Shutdown(ctx context.Context) error { } // ForceFlush does nothing as there is no data to flush. -func (ssp *simpleSpanProcessor) ForceFlush(context.Context) error { +func (*simpleSpanProcessor) ForceFlush(context.Context) error { return nil } // MarshalLog is the marshaling function used by the logging system to represent // this Span Processor. -func (ssp *simpleSpanProcessor) MarshalLog() interface{} { +func (ssp *simpleSpanProcessor) MarshalLog() any { return struct { Type string Exporter SpanExporter diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go index d511d0f27..63aa33780 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go @@ -35,7 +35,7 @@ type snapshot struct { var _ ReadOnlySpan = snapshot{} -func (s snapshot) private() {} +func (snapshot) private() {} // Name returns the name of the span. func (s snapshot) Name() string { diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go index 8f4fc3850..b376051fb 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -20,7 +20,7 @@ import ( "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" ) @@ -61,6 +61,7 @@ type ReadOnlySpan interface { InstrumentationScope() instrumentation.Scope // InstrumentationLibrary returns information about the instrumentation // library that created the span. + // // Deprecated: please use InstrumentationScope instead. InstrumentationLibrary() instrumentation.Library //nolint:staticcheck // This method needs to be define for backwards compatibility // Resource returns information about the entity that produced the span. @@ -165,7 +166,7 @@ func (s *recordingSpan) SpanContext() trace.SpanContext { return s.spanContext } -// IsRecording returns if this span is being recorded. If this span has ended +// IsRecording reports whether this span is being recorded. If this span has ended // this will return false. func (s *recordingSpan) IsRecording() bool { if s == nil { @@ -177,7 +178,7 @@ func (s *recordingSpan) IsRecording() bool { return s.isRecording() } -// isRecording returns if this span is being recorded. If this span has ended +// isRecording reports whether this span is being recorded. If this span has ended // this will return false. // // This method assumes s.mu.Lock is held by the caller. @@ -495,6 +496,16 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) { } s.mu.Unlock() + if s.tracer.selfObservabilityEnabled { + defer func() { + // Add the span to the context to ensure the metric is recorded + // with the correct span context. + ctx := trace.ContextWithSpan(context.Background(), s) + set := spanLiveSet(s.spanContext.IsSampled()) + s.tracer.spanLiveMetric.AddSet(ctx, -1, set) + }() + } + sps := s.tracer.provider.getSpanProcessors() if len(sps) == 0 { return @@ -545,7 +556,7 @@ func (s *recordingSpan) RecordError(err error, opts ...trace.EventOption) { s.addEvent(semconv.ExceptionEventName, opts...) } -func typeStr(i interface{}) string { +func typeStr(i any) string { t := reflect.TypeOf(i) if t.PkgPath() == "" && t.Name() == "" { // Likely a builtin type. diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go index 43419d3b5..e965c4cce 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go @@ -7,7 +7,9 @@ import ( "context" "time" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" ) @@ -17,6 +19,10 @@ type tracer struct { provider *TracerProvider instrumentationScope instrumentation.Scope + + selfObservabilityEnabled bool + spanLiveMetric otelconv.SDKSpanLive + spanStartedMetric otelconv.SDKSpanStarted } var _ trace.Tracer = &tracer{} @@ -26,7 +32,11 @@ var _ trace.Tracer = &tracer{} // The Span is created with the provided name and as a child of any existing // span context found in the passed context. The created Span will be // configured appropriately by any SpanOption passed. -func (tr *tracer) Start(ctx context.Context, name string, options ...trace.SpanStartOption) (context.Context, trace.Span) { +func (tr *tracer) Start( + ctx context.Context, + name string, + options ...trace.SpanStartOption, +) (context.Context, trace.Span) { config := trace.NewSpanStartConfig(options...) if ctx == nil { @@ -42,17 +52,25 @@ func (tr *tracer) Start(ctx context.Context, name string, options ...trace.SpanS } s := tr.newSpan(ctx, name, &config) + newCtx := trace.ContextWithSpan(ctx, s) + if tr.selfObservabilityEnabled { + psc := trace.SpanContextFromContext(ctx) + set := spanStartedSet(psc, s) + tr.spanStartedMetric.AddSet(newCtx, 1, set) + } + if rw, ok := s.(ReadWriteSpan); ok && s.IsRecording() { sps := tr.provider.getSpanProcessors() for _, sp := range sps { + // Use original context. sp.sp.OnStart(ctx, rw) } } if rtt, ok := s.(runtimeTracer); ok { - ctx = rtt.runtimeTrace(ctx) + newCtx = rtt.runtimeTrace(newCtx) } - return trace.ContextWithSpan(ctx, s), s + return newCtx, s } type runtimeTracer interface { @@ -108,11 +126,17 @@ func (tr *tracer) newSpan(ctx context.Context, name string, config *trace.SpanCo if !isRecording(samplingResult) { return tr.newNonRecordingSpan(sc) } - return tr.newRecordingSpan(psc, sc, name, samplingResult, config) + return tr.newRecordingSpan(ctx, psc, sc, name, samplingResult, config) } // newRecordingSpan returns a new configured recordingSpan. -func (tr *tracer) newRecordingSpan(psc, sc trace.SpanContext, name string, sr SamplingResult, config *trace.SpanConfig) *recordingSpan { +func (tr *tracer) newRecordingSpan( + ctx context.Context, + psc, sc trace.SpanContext, + name string, + sr SamplingResult, + config *trace.SpanConfig, +) *recordingSpan { startTime := config.Timestamp() if startTime.IsZero() { startTime = time.Now() @@ -144,6 +168,14 @@ func (tr *tracer) newRecordingSpan(psc, sc trace.SpanContext, name string, sr Sa s.SetAttributes(sr.Attributes...) s.SetAttributes(config.Attributes()...) + if tr.selfObservabilityEnabled { + // Propagate any existing values from the context with the new span to + // the measurement context. + ctx = trace.ContextWithSpan(ctx, s) + set := spanLiveSet(s.spanContext.IsSampled()) + tr.spanLiveMetric.AddSet(ctx, 1, set) + } + return s } @@ -151,3 +183,112 @@ func (tr *tracer) newRecordingSpan(psc, sc trace.SpanContext, name string, sr Sa func (tr *tracer) newNonRecordingSpan(sc trace.SpanContext) nonRecordingSpan { return nonRecordingSpan{tracer: tr, sc: sc} } + +type parentState int + +const ( + parentStateNoParent parentState = iota + parentStateLocalParent + parentStateRemoteParent +) + +type samplingState int + +const ( + samplingStateDrop samplingState = iota + samplingStateRecordOnly + samplingStateRecordAndSample +) + +type spanStartedSetKey struct { + parent parentState + sampling samplingState +} + +var spanStartedSetCache = map[spanStartedSetKey]attribute.Set{ + {parentStateNoParent, samplingStateDrop}: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop), + ), + {parentStateLocalParent, samplingStateDrop}: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop), + ), + {parentStateRemoteParent, samplingStateDrop}: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop), + ), + + {parentStateNoParent, samplingStateRecordOnly}: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly), + ), + {parentStateLocalParent, samplingStateRecordOnly}: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly), + ), + {parentStateRemoteParent, samplingStateRecordOnly}: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly), + ), + + {parentStateNoParent, samplingStateRecordAndSample}: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample), + ), + {parentStateLocalParent, samplingStateRecordAndSample}: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample), + ), + {parentStateRemoteParent, samplingStateRecordAndSample}: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample), + ), +} + +func spanStartedSet(psc trace.SpanContext, span trace.Span) attribute.Set { + key := spanStartedSetKey{ + parent: parentStateNoParent, + sampling: samplingStateDrop, + } + + if psc.IsValid() { + if psc.IsRemote() { + key.parent = parentStateRemoteParent + } else { + key.parent = parentStateLocalParent + } + } + + if span.IsRecording() { + if span.SpanContext().IsSampled() { + key.sampling = samplingStateRecordAndSample + } else { + key.sampling = samplingStateRecordOnly + } + } + + return spanStartedSetCache[key] +} + +type spanLiveSetKey struct { + sampled bool +} + +var spanLiveSetCache = map[spanLiveSetKey]attribute.Set{ + {true}: attribute.NewSet( + otelconv.SDKSpanLive{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultRecordAndSample, + ), + ), + {false}: attribute.NewSet( + otelconv.SDKSpanLive{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultRecordOnly, + ), + ), +} + +func spanLiveSet(sampled bool) attribute.Set { + key := spanLiveSetKey{sampled: sampled} + return spanLiveSetCache[key] +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/version.go b/vendor/go.opentelemetry.io/otel/sdk/trace/version.go deleted file mode 100644 index b84dd2c5e..000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/version.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package trace // import "go.opentelemetry.io/otel/sdk/trace" - -// version is the current release version of the metric SDK in use. -func version() string { - return "1.16.0-rc.1" -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go index 6b4038510..7f97cc31e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -1,9 +1,10 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package sdk provides the OpenTelemetry default SDK for Go. package sdk // import "go.opentelemetry.io/otel/sdk" // Version is the current release version of the OpenTelemetry SDK in use. func Version() string { - return "1.34.0" + return "1.38.0" } diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/MIGRATION.md b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/MIGRATION.md new file mode 100644 index 000000000..248054789 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/MIGRATION.md @@ -0,0 +1,41 @@ + +# Migration from v1.36.0 to v1.37.0 + +The `go.opentelemetry.io/otel/semconv/v1.37.0` package should be a drop-in replacement for `go.opentelemetry.io/otel/semconv/v1.36.0` with the following exceptions. + +## Removed + +The following declarations have been removed. +Refer to the [OpenTelemetry Semantic Conventions documentation] for deprecation instructions. + +If the type is not listed in the documentation as deprecated, it has been removed in this version due to lack of applicability or use. +If you use any of these non-deprecated declarations in your Go application, please [open an issue] describing your use-case. + +- `ContainerRuntime` +- `ContainerRuntimeKey` +- `GenAIOpenAIRequestServiceTierAuto` +- `GenAIOpenAIRequestServiceTierDefault` +- `GenAIOpenAIRequestServiceTierKey` +- `GenAIOpenAIResponseServiceTier` +- `GenAIOpenAIResponseServiceTierKey` +- `GenAIOpenAIResponseSystemFingerprint` +- `GenAIOpenAIResponseSystemFingerprintKey` +- `GenAISystemAWSBedrock` +- `GenAISystemAnthropic` +- `GenAISystemAzureAIInference` +- `GenAISystemAzureAIOpenAI` +- `GenAISystemCohere` +- `GenAISystemDeepseek` +- `GenAISystemGCPGemini` +- `GenAISystemGCPGenAI` +- `GenAISystemGCPVertexAI` +- `GenAISystemGroq` +- `GenAISystemIBMWatsonxAI` +- `GenAISystemKey` +- `GenAISystemMistralAI` +- `GenAISystemOpenAI` +- `GenAISystemPerplexity` +- `GenAISystemXai` + +[OpenTelemetry Semantic Conventions documentation]: https://github.com/open-telemetry/semantic-conventions +[open an issue]: https://github.com/open-telemetry/opentelemetry-go/issues/new?template=Blank+issue diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/README.md new file mode 100644 index 000000000..d795247f3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.37.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.37.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.37.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/attribute_group.go new file mode 100644 index 000000000..b6b27498f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/attribute_group.go @@ -0,0 +1,15193 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0" + +import "go.opentelemetry.io/otel/attribute" + +// Namespace: android +const ( + // AndroidAppStateKey is the attribute Key conforming to the "android.app.state" + // semantic conventions. It represents the this attribute represents the state + // of the application. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "created" + // Note: The Android lifecycle states are defined in + // [Activity lifecycle callbacks], and from which the `OS identifiers` are + // derived. + // + // [Activity lifecycle callbacks]: https://developer.android.com/guide/components/activities/activity-lifecycle#lc + AndroidAppStateKey = attribute.Key("android.app.state") + + // AndroidOSAPILevelKey is the attribute Key conforming to the + // "android.os.api_level" semantic conventions. It represents the uniquely + // identifies the framework API revision offered by a version (`os.version`) of + // the android operating system. More information can be found in the + // [Android API levels documentation]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "33", "32" + // + // [Android API levels documentation]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels + AndroidOSAPILevelKey = attribute.Key("android.os.api_level") +) + +// AndroidOSAPILevel returns an attribute KeyValue conforming to the +// "android.os.api_level" semantic conventions. It represents the uniquely +// identifies the framework API revision offered by a version (`os.version`) of +// the android operating system. More information can be found in the +// [Android API levels documentation]. +// +// [Android API levels documentation]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels +func AndroidOSAPILevel(val string) attribute.KeyValue { + return AndroidOSAPILevelKey.String(val) +} + +// Enum values for android.app.state +var ( + // Any time before Activity.onResume() or, if the app has no Activity, + // Context.startService() has been called in the app for the first time. + // + // Stability: development + AndroidAppStateCreated = AndroidAppStateKey.String("created") + // Any time after Activity.onPause() or, if the app has no Activity, + // Context.stopService() has been called when the app was in the foreground + // state. + // + // Stability: development + AndroidAppStateBackground = AndroidAppStateKey.String("background") + // Any time after Activity.onResume() or, if the app has no Activity, + // Context.startService() has been called when the app was in either the created + // or background states. + // + // Stability: development + AndroidAppStateForeground = AndroidAppStateKey.String("foreground") +) + +// Namespace: app +const ( + // AppBuildIDKey is the attribute Key conforming to the "app.build_id" semantic + // conventions. It represents the unique identifier for a particular build or + // compilation of the application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "6cff0a7e-cefc-4668-96f5-1273d8b334d0", + // "9f2b833506aa6973a92fde9733e6271f", "my-app-1.0.0-code-123" + AppBuildIDKey = attribute.Key("app.build_id") + + // AppInstallationIDKey is the attribute Key conforming to the + // "app.installation.id" semantic conventions. It represents a unique identifier + // representing the installation of an application on a specific device. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2ab2916d-a51f-4ac8-80ee-45ac31a28092" + // Note: Its value SHOULD persist across launches of the same application + // installation, including through application upgrades. + // It SHOULD change if the application is uninstalled or if all applications of + // the vendor are uninstalled. + // Additionally, users might be able to reset this value (e.g. by clearing + // application data). + // If an app is installed multiple times on the same device (e.g. in different + // accounts on Android), each `app.installation.id` SHOULD have a different + // value. + // If multiple OpenTelemetry SDKs are used within the same application, they + // SHOULD use the same value for `app.installation.id`. + // Hardware IDs (e.g. serial number, IMEI, MAC address) MUST NOT be used as the + // `app.installation.id`. + // + // For iOS, this value SHOULD be equal to the [vendor identifier]. + // + // For Android, examples of `app.installation.id` implementations include: + // + // - [Firebase Installation ID]. + // - A globally unique UUID which is persisted across sessions in your + // application. + // - [App set ID]. + // - [`Settings.getString(Settings.Secure.ANDROID_ID)`]. + // + // More information about Android identifier best practices can be found in the + // [Android user data IDs guide]. + // + // [vendor identifier]: https://developer.apple.com/documentation/uikit/uidevice/identifierforvendor + // [Firebase Installation ID]: https://firebase.google.com/docs/projects/manage-installations + // [App set ID]: https://developer.android.com/identity/app-set-id + // [`Settings.getString(Settings.Secure.ANDROID_ID)`]: https://developer.android.com/reference/android/provider/Settings.Secure#ANDROID_ID + // [Android user data IDs guide]: https://developer.android.com/training/articles/user-data-ids + AppInstallationIDKey = attribute.Key("app.installation.id") + + // AppJankFrameCountKey is the attribute Key conforming to the + // "app.jank.frame_count" semantic conventions. It represents a number of frame + // renders that experienced jank. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 9, 42 + // Note: Depending on platform limitations, the value provided MAY be + // approximation. + AppJankFrameCountKey = attribute.Key("app.jank.frame_count") + + // AppJankPeriodKey is the attribute Key conforming to the "app.jank.period" + // semantic conventions. It represents the time period, in seconds, for which + // this jank is being reported. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0, 5.0, 10.24 + AppJankPeriodKey = attribute.Key("app.jank.period") + + // AppJankThresholdKey is the attribute Key conforming to the + // "app.jank.threshold" semantic conventions. It represents the minimum + // rendering threshold for this jank, in seconds. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0.016, 0.7, 1.024 + AppJankThresholdKey = attribute.Key("app.jank.threshold") + + // AppScreenCoordinateXKey is the attribute Key conforming to the + // "app.screen.coordinate.x" semantic conventions. It represents the x + // (horizontal) coordinate of a screen coordinate, in screen pixels. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0, 131 + AppScreenCoordinateXKey = attribute.Key("app.screen.coordinate.x") + + // AppScreenCoordinateYKey is the attribute Key conforming to the + // "app.screen.coordinate.y" semantic conventions. It represents the y + // (vertical) component of a screen coordinate, in screen pixels. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 12, 99 + AppScreenCoordinateYKey = attribute.Key("app.screen.coordinate.y") + + // AppWidgetIDKey is the attribute Key conforming to the "app.widget.id" + // semantic conventions. It represents an identifier that uniquely + // differentiates this widget from other widgets in the same application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "f9bc787d-ff05-48ad-90e1-fca1d46130b3", "submit_order_1829" + // Note: A widget is an application component, typically an on-screen visual GUI + // element. + AppWidgetIDKey = attribute.Key("app.widget.id") + + // AppWidgetNameKey is the attribute Key conforming to the "app.widget.name" + // semantic conventions. It represents the name of an application widget. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "submit", "attack", "Clear Cart" + // Note: A widget is an application component, typically an on-screen visual GUI + // element. + AppWidgetNameKey = attribute.Key("app.widget.name") +) + +// AppBuildID returns an attribute KeyValue conforming to the "app.build_id" +// semantic conventions. It represents the unique identifier for a particular +// build or compilation of the application. +func AppBuildID(val string) attribute.KeyValue { + return AppBuildIDKey.String(val) +} + +// AppInstallationID returns an attribute KeyValue conforming to the +// "app.installation.id" semantic conventions. It represents a unique identifier +// representing the installation of an application on a specific device. +func AppInstallationID(val string) attribute.KeyValue { + return AppInstallationIDKey.String(val) +} + +// AppJankFrameCount returns an attribute KeyValue conforming to the +// "app.jank.frame_count" semantic conventions. It represents a number of frame +// renders that experienced jank. +func AppJankFrameCount(val int) attribute.KeyValue { + return AppJankFrameCountKey.Int(val) +} + +// AppJankPeriod returns an attribute KeyValue conforming to the +// "app.jank.period" semantic conventions. It represents the time period, in +// seconds, for which this jank is being reported. +func AppJankPeriod(val float64) attribute.KeyValue { + return AppJankPeriodKey.Float64(val) +} + +// AppJankThreshold returns an attribute KeyValue conforming to the +// "app.jank.threshold" semantic conventions. It represents the minimum rendering +// threshold for this jank, in seconds. +func AppJankThreshold(val float64) attribute.KeyValue { + return AppJankThresholdKey.Float64(val) +} + +// AppScreenCoordinateX returns an attribute KeyValue conforming to the +// "app.screen.coordinate.x" semantic conventions. It represents the x +// (horizontal) coordinate of a screen coordinate, in screen pixels. +func AppScreenCoordinateX(val int) attribute.KeyValue { + return AppScreenCoordinateXKey.Int(val) +} + +// AppScreenCoordinateY returns an attribute KeyValue conforming to the +// "app.screen.coordinate.y" semantic conventions. It represents the y (vertical) +// component of a screen coordinate, in screen pixels. +func AppScreenCoordinateY(val int) attribute.KeyValue { + return AppScreenCoordinateYKey.Int(val) +} + +// AppWidgetID returns an attribute KeyValue conforming to the "app.widget.id" +// semantic conventions. It represents an identifier that uniquely differentiates +// this widget from other widgets in the same application. +func AppWidgetID(val string) attribute.KeyValue { + return AppWidgetIDKey.String(val) +} + +// AppWidgetName returns an attribute KeyValue conforming to the +// "app.widget.name" semantic conventions. It represents the name of an +// application widget. +func AppWidgetName(val string) attribute.KeyValue { + return AppWidgetNameKey.String(val) +} + +// Namespace: artifact +const ( + // ArtifactAttestationFilenameKey is the attribute Key conforming to the + // "artifact.attestation.filename" semantic conventions. It represents the + // provenance filename of the built attestation which directly relates to the + // build artifact filename. This filename SHOULD accompany the artifact at + // publish time. See the [SLSA Relationship] specification for more information. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "golang-binary-amd64-v0.1.0.attestation", + // "docker-image-amd64-v0.1.0.intoto.json1", "release-1.tar.gz.attestation", + // "file-name-package.tar.gz.intoto.json1" + // + // [SLSA Relationship]: https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations + ArtifactAttestationFilenameKey = attribute.Key("artifact.attestation.filename") + + // ArtifactAttestationHashKey is the attribute Key conforming to the + // "artifact.attestation.hash" semantic conventions. It represents the full + // [hash value (see glossary)], of the built attestation. Some envelopes in the + // [software attestation space] also refer to this as the **digest**. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1b31dfcd5b7f9267bf2ff47651df1cfb9147b9e4df1f335accf65b4cda498408" + // + // [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf + // [software attestation space]: https://github.com/in-toto/attestation/tree/main/spec + ArtifactAttestationHashKey = attribute.Key("artifact.attestation.hash") + + // ArtifactAttestationIDKey is the attribute Key conforming to the + // "artifact.attestation.id" semantic conventions. It represents the id of the + // build [software attestation]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "123" + // + // [software attestation]: https://slsa.dev/attestation-model + ArtifactAttestationIDKey = attribute.Key("artifact.attestation.id") + + // ArtifactFilenameKey is the attribute Key conforming to the + // "artifact.filename" semantic conventions. It represents the human readable + // file name of the artifact, typically generated during build and release + // processes. Often includes the package name and version in the file name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "golang-binary-amd64-v0.1.0", "docker-image-amd64-v0.1.0", + // "release-1.tar.gz", "file-name-package.tar.gz" + // Note: This file name can also act as the [Package Name] + // in cases where the package ecosystem maps accordingly. + // Additionally, the artifact [can be published] + // for others, but that is not a guarantee. + // + // [Package Name]: https://slsa.dev/spec/v1.0/terminology#package-model + // [can be published]: https://slsa.dev/spec/v1.0/terminology#software-supply-chain + ArtifactFilenameKey = attribute.Key("artifact.filename") + + // ArtifactHashKey is the attribute Key conforming to the "artifact.hash" + // semantic conventions. It represents the full [hash value (see glossary)], + // often found in checksum.txt on a release of the artifact and used to verify + // package integrity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9ff4c52759e2c4ac70b7d517bc7fcdc1cda631ca0045271ddd1b192544f8a3e9" + // Note: The specific algorithm used to create the cryptographic hash value is + // not defined. In situations where an artifact has multiple + // cryptographic hashes, it is up to the implementer to choose which + // hash value to set here; this should be the most secure hash algorithm + // that is suitable for the situation and consistent with the + // corresponding attestation. The implementer can then provide the other + // hash values through an additional set of attribute extensions as they + // deem necessary. + // + // [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf + ArtifactHashKey = attribute.Key("artifact.hash") + + // ArtifactPurlKey is the attribute Key conforming to the "artifact.purl" + // semantic conventions. It represents the [Package URL] of the + // [package artifact] provides a standard way to identify and locate the + // packaged artifact. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "pkg:github/package-url/purl-spec@1209109710924", + // "pkg:npm/foo@12.12.3" + // + // [Package URL]: https://github.com/package-url/purl-spec + // [package artifact]: https://slsa.dev/spec/v1.0/terminology#package-model + ArtifactPurlKey = attribute.Key("artifact.purl") + + // ArtifactVersionKey is the attribute Key conforming to the "artifact.version" + // semantic conventions. It represents the version of the artifact. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "v0.1.0", "1.2.1", "122691-build" + ArtifactVersionKey = attribute.Key("artifact.version") +) + +// ArtifactAttestationFilename returns an attribute KeyValue conforming to the +// "artifact.attestation.filename" semantic conventions. It represents the +// provenance filename of the built attestation which directly relates to the +// build artifact filename. This filename SHOULD accompany the artifact at +// publish time. See the [SLSA Relationship] specification for more information. +// +// [SLSA Relationship]: https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations +func ArtifactAttestationFilename(val string) attribute.KeyValue { + return ArtifactAttestationFilenameKey.String(val) +} + +// ArtifactAttestationHash returns an attribute KeyValue conforming to the +// "artifact.attestation.hash" semantic conventions. It represents the full +// [hash value (see glossary)], of the built attestation. Some envelopes in the +// [software attestation space] also refer to this as the **digest**. +// +// [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf +// [software attestation space]: https://github.com/in-toto/attestation/tree/main/spec +func ArtifactAttestationHash(val string) attribute.KeyValue { + return ArtifactAttestationHashKey.String(val) +} + +// ArtifactAttestationID returns an attribute KeyValue conforming to the +// "artifact.attestation.id" semantic conventions. It represents the id of the +// build [software attestation]. +// +// [software attestation]: https://slsa.dev/attestation-model +func ArtifactAttestationID(val string) attribute.KeyValue { + return ArtifactAttestationIDKey.String(val) +} + +// ArtifactFilename returns an attribute KeyValue conforming to the +// "artifact.filename" semantic conventions. It represents the human readable +// file name of the artifact, typically generated during build and release +// processes. Often includes the package name and version in the file name. +func ArtifactFilename(val string) attribute.KeyValue { + return ArtifactFilenameKey.String(val) +} + +// ArtifactHash returns an attribute KeyValue conforming to the "artifact.hash" +// semantic conventions. It represents the full [hash value (see glossary)], +// often found in checksum.txt on a release of the artifact and used to verify +// package integrity. +// +// [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf +func ArtifactHash(val string) attribute.KeyValue { + return ArtifactHashKey.String(val) +} + +// ArtifactPurl returns an attribute KeyValue conforming to the "artifact.purl" +// semantic conventions. It represents the [Package URL] of the +// [package artifact] provides a standard way to identify and locate the packaged +// artifact. +// +// [Package URL]: https://github.com/package-url/purl-spec +// [package artifact]: https://slsa.dev/spec/v1.0/terminology#package-model +func ArtifactPurl(val string) attribute.KeyValue { + return ArtifactPurlKey.String(val) +} + +// ArtifactVersion returns an attribute KeyValue conforming to the +// "artifact.version" semantic conventions. It represents the version of the +// artifact. +func ArtifactVersion(val string) attribute.KeyValue { + return ArtifactVersionKey.String(val) +} + +// Namespace: aws +const ( + // AWSBedrockGuardrailIDKey is the attribute Key conforming to the + // "aws.bedrock.guardrail.id" semantic conventions. It represents the unique + // identifier of the AWS Bedrock Guardrail. A [guardrail] helps safeguard and + // prevent unwanted behavior from model responses or user messages. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "sgi5gkybzqak" + // + // [guardrail]: https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html + AWSBedrockGuardrailIDKey = attribute.Key("aws.bedrock.guardrail.id") + + // AWSBedrockKnowledgeBaseIDKey is the attribute Key conforming to the + // "aws.bedrock.knowledge_base.id" semantic conventions. It represents the + // unique identifier of the AWS Bedrock Knowledge base. A [knowledge base] is a + // bank of information that can be queried by models to generate more relevant + // responses and augment prompts. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "XFWUPB9PAW" + // + // [knowledge base]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base.html + AWSBedrockKnowledgeBaseIDKey = attribute.Key("aws.bedrock.knowledge_base.id") + + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to the + // "aws.dynamodb.attribute_definitions" semantic conventions. It represents the + // JSON-serialized value of each item in the `AttributeDefinitions` request + // field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "AttributeName": "string", "AttributeType": "string" }" + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "lives", "id" + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the value + // of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, + // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": + // "string", "WriteCapacityUnits": number }" + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of the + // `Count` response parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents the + // value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Users", "CatsTable" + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_index_updates" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }" + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") + + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to the + // "aws.dynamodb.global_secondary_indexes" semantic conventions. It represents + // the JSON-serialized value of each item of the `GlobalSecondaryIndexes` + // request field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }" + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value of + // the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "name_to_group" + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to the + // "aws.dynamodb.item_collection_metrics" semantic conventions. It represents + // the JSON-serialized value of the `ItemCollectionMetrics` response field. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, + // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : + // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": + // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }" + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of the + // `Limit` request parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to the + // "aws.dynamodb.local_secondary_indexes" semantic conventions. It represents + // the JSON-serialized value of each item of the `LocalSecondaryIndexes` request + // field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "IndexArn": "string", "IndexName": "string", "IndexSizeBytes": + // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" } }" + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value of + // the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Title", "Title, Price, Color", "Title, Description, RelatedItems, + // ProductReviews" + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.provisioned_read_capacity" semantic conventions. It represents + // the value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.provisioned_write_capacity" semantic conventions. It represents + // the value of the `ProvisionedThroughput.WriteCapacityUnits` request + // parameter. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the value of + // the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the value of + // the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") + + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of the + // `Segment` request parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of the + // `Select` request parameter. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ALL_ATTRIBUTES", "COUNT" + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the number of + // items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") + + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys in + // the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Users", "Cats" + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the value + // of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS cluster]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster" + // + // [ECS cluster]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container instance]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9" + // + // [ECS container instance]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch type] + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [launch type]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the "aws.ecs.task.arn" + // semantic conventions. It represents the ARN of a running [ECS task]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b", + // "arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd" + // + // [ECS task]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the family name of + // the [ECS task definition] used to create the ECS task. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-family" + // + // [ECS task definition]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id" + // semantic conventions. It represents the ID of a running ECS task. The ID MUST + // be extracted from `task.arn`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "10838bed-421f-43ef-870a-f43feacbbb5b", + // "23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd" + AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision for + // the task definition used to create the ECS task. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "8", "26" + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") + + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS + // cluster. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster" + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") + + // AWSExtendedRequestIDKey is the attribute Key conforming to the + // "aws.extended_request_id" semantic conventions. It represents the AWS + // extended request ID as returned in the response header `x-amz-id-2`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "wzHcyEWfmOGDIE5QOhTAqFDoDWP3y8IUvpNINCwL9N4TEHbUw0/gZJ+VZTmCNCWR7fezEN3eCiQ=" + AWSExtendedRequestIDKey = attribute.Key("aws.extended_request_id") + + // AWSKinesisStreamNameKey is the attribute Key conforming to the + // "aws.kinesis.stream_name" semantic conventions. It represents the name of the + // AWS Kinesis [stream] the request refers to. Corresponds to the + // `--stream-name` parameter of the Kinesis [describe-stream] operation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "some-stream-name" + // + // [stream]: https://docs.aws.amazon.com/streams/latest/dev/introduction.html + // [describe-stream]: https://docs.aws.amazon.com/cli/latest/reference/kinesis/describe-stream.html + AWSKinesisStreamNameKey = attribute.Key("aws.kinesis.stream_name") + + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full invoked + // ARN as provided on the `Context` passed to the function ( + // `Lambda-Runtime-Invoked-Function-Arn` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:lambda:us-east-1:123456:function:myfunction:myalias" + // Note: This may be different from `cloud.resource_id` if an alias is involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") + + // AWSLambdaResourceMappingIDKey is the attribute Key conforming to the + // "aws.lambda.resource_mapping.id" semantic conventions. It represents the UUID + // of the [AWS Lambda EvenSource Mapping]. An event source is mapped to a lambda + // function. It's contents are read by Lambda and used to trigger a function. + // This isn't available in the lambda execution context or the lambda runtime + // environtment. This is going to be populated by the AWS SDK for each language + // when that UUID is present. Some of these operations are + // Create/Delete/Get/List/Update EventSourceMapping. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "587ad24b-03b9-4413-8202-bbd56b36e5b7" + // + // [AWS Lambda EvenSource Mapping]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html + AWSLambdaResourceMappingIDKey = attribute.Key("aws.lambda.resource_mapping.id") + + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon Resource + // Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*" + // Note: See the [log group ARN format documentation]. + // + // [log group ARN format documentation]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of the + // AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/aws/lambda/my-function", "opentelemetry-service" + // Note: Multiple log groups must be supported for cases like multi-container + // applications, where a single application has sidecar containers, and each + // write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the + // AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b" + // Note: See the [log stream ARN format documentation]. One log group can + // contain several log streams, so these ARNs necessarily identify both a log + // group and a log stream. + // + // [log stream ARN format documentation]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) of the + // AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "logs/main/10838bed-421f-43ef-870a-f43feacbbb5b" + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") + + // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" + // semantic conventions. It represents the AWS request ID as returned in the + // response headers `x-amzn-requestid`, `x-amzn-request-id` or + // `x-amz-request-id`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "79b9da39-b7ae-508a-a6bc-864b2829c622", "C9ER4AJX75574TDJ" + AWSRequestIDKey = attribute.Key("aws.request_id") + + // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" + // semantic conventions. It represents the S3 bucket name the request refers to. + // Corresponds to the `--bucket` parameter of the [S3 API] operations. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "some-bucket-name" + // Note: The `bucket` attribute is applicable to all S3 operations that + // reference a bucket, i.e. that require the bucket name as a mandatory + // parameter. + // This applies to almost all S3 operations except `list-buckets`. + // + // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html + AWSS3BucketKey = attribute.Key("aws.s3.bucket") + + // AWSS3CopySourceKey is the attribute Key conforming to the + // "aws.s3.copy_source" semantic conventions. It represents the source object + // (in the form `bucket`/`key`) for the copy operation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "someFile.yml" + // Note: The `copy_source` attribute applies to S3 copy operations and + // corresponds to the `--copy-source` parameter + // of the [copy-object operation within the S3 API]. + // This applies in particular to the following operations: + // + // - [copy-object] + // - [upload-part-copy] + // + // + // [copy-object operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html + // [copy-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html + // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html + AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") + + // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" + // semantic conventions. It represents the delete request container that + // specifies the objects to be deleted. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "Objects=[{Key=string,VersionId=string},{Key=string,VersionId=string}],Quiet=boolean" + // Note: The `delete` attribute is only applicable to the [delete-object] + // operation. + // The `delete` attribute corresponds to the `--delete` parameter of the + // [delete-objects operation within the S3 API]. + // + // [delete-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html + // [delete-objects operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html + AWSS3DeleteKey = attribute.Key("aws.s3.delete") + + // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic + // conventions. It represents the S3 object key the request refers to. + // Corresponds to the `--key` parameter of the [S3 API] operations. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "someFile.yml" + // Note: The `key` attribute is applicable to all object-related S3 operations, + // i.e. that require the object key as a mandatory parameter. + // This applies in particular to the following operations: + // + // - [copy-object] + // - [delete-object] + // - [get-object] + // - [head-object] + // - [put-object] + // - [restore-object] + // - [select-object-content] + // - [abort-multipart-upload] + // - [complete-multipart-upload] + // - [create-multipart-upload] + // - [list-parts] + // - [upload-part] + // - [upload-part-copy] + // + // + // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html + // [copy-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html + // [delete-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html + // [get-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html + // [head-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html + // [put-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html + // [restore-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html + // [select-object-content]: https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html + // [abort-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html + // [complete-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html + // [create-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html + // [list-parts]: https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html + // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html + // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html + AWSS3KeyKey = attribute.Key("aws.s3.key") + + // AWSS3PartNumberKey is the attribute Key conforming to the + // "aws.s3.part_number" semantic conventions. It represents the part number of + // the part being uploaded in a multipart-upload operation. This is a positive + // integer between 1 and 10,000. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3456 + // Note: The `part_number` attribute is only applicable to the [upload-part] + // and [upload-part-copy] operations. + // The `part_number` attribute corresponds to the `--part-number` parameter of + // the + // [upload-part operation within the S3 API]. + // + // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html + // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html + // [upload-part operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html + AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") + + // AWSS3UploadIDKey is the attribute Key conforming to the "aws.s3.upload_id" + // semantic conventions. It represents the upload ID that identifies the + // multipart upload. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ" + // Note: The `upload_id` attribute applies to S3 multipart-upload operations and + // corresponds to the `--upload-id` parameter + // of the [S3 API] multipart operations. + // This applies in particular to the following operations: + // + // - [abort-multipart-upload] + // - [complete-multipart-upload] + // - [list-parts] + // - [upload-part] + // - [upload-part-copy] + // + // + // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html + // [abort-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html + // [complete-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html + // [list-parts]: https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html + // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html + // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html + AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") + + // AWSSecretsmanagerSecretARNKey is the attribute Key conforming to the + // "aws.secretsmanager.secret.arn" semantic conventions. It represents the ARN + // of the Secret stored in the Secrets Mangger. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:secretsmanager:us-east-1:123456789012:secret:SecretName-6RandomCharacters" + AWSSecretsmanagerSecretARNKey = attribute.Key("aws.secretsmanager.secret.arn") + + // AWSSNSTopicARNKey is the attribute Key conforming to the "aws.sns.topic.arn" + // semantic conventions. It represents the ARN of the AWS SNS Topic. An Amazon + // SNS [topic] is a logical access point that acts as a communication channel. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:sns:us-east-1:123456789012:mystack-mytopic-NZJ5JSMVGFIE" + // + // [topic]: https://docs.aws.amazon.com/sns/latest/dg/sns-create-topic.html + AWSSNSTopicARNKey = attribute.Key("aws.sns.topic.arn") + + // AWSSQSQueueURLKey is the attribute Key conforming to the "aws.sqs.queue.url" + // semantic conventions. It represents the URL of the AWS SQS Queue. It's a + // unique identifier for a queue in Amazon Simple Queue Service (SQS) and is + // used to access the queue and perform actions on it. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://sqs.us-east-1.amazonaws.com/123456789012/MyQueue" + AWSSQSQueueURLKey = attribute.Key("aws.sqs.queue.url") + + // AWSStepFunctionsActivityARNKey is the attribute Key conforming to the + // "aws.step_functions.activity.arn" semantic conventions. It represents the ARN + // of the AWS Step Functions Activity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:states:us-east-1:123456789012:activity:get-greeting" + AWSStepFunctionsActivityARNKey = attribute.Key("aws.step_functions.activity.arn") + + // AWSStepFunctionsStateMachineARNKey is the attribute Key conforming to the + // "aws.step_functions.state_machine.arn" semantic conventions. It represents + // the ARN of the AWS Step Functions State Machine. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:states:us-east-1:123456789012:stateMachine:myStateMachine:1" + AWSStepFunctionsStateMachineARNKey = attribute.Key("aws.step_functions.state_machine.arn") +) + +// AWSBedrockGuardrailID returns an attribute KeyValue conforming to the +// "aws.bedrock.guardrail.id" semantic conventions. It represents the unique +// identifier of the AWS Bedrock Guardrail. A [guardrail] helps safeguard and +// prevent unwanted behavior from model responses or user messages. +// +// [guardrail]: https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html +func AWSBedrockGuardrailID(val string) attribute.KeyValue { + return AWSBedrockGuardrailIDKey.String(val) +} + +// AWSBedrockKnowledgeBaseID returns an attribute KeyValue conforming to the +// "aws.bedrock.knowledge_base.id" semantic conventions. It represents the unique +// identifier of the AWS Bedrock Knowledge base. A [knowledge base] is a bank of +// information that can be queried by models to generate more relevant responses +// and augment prompts. +// +// [knowledge base]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base.html +func AWSBedrockKnowledgeBaseID(val string) attribute.KeyValue { + return AWSBedrockKnowledgeBaseIDKey.String(val) +} + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming to +// the "aws.dynamodb.attribute_definitions" semantic conventions. It represents +// the JSON-serialized value of each item in the `AttributeDefinitions` request +// field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to the +// "aws.dynamodb.attributes_to_get" semantic conventions. It represents the value +// of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to the +// "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming to the +// "aws.dynamodb.exclusive_start_table" semantic conventions. It represents the +// value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue conforming to +// the "aws.dynamodb.global_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field. +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of the +// `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming to +// the "aws.dynamodb.item_collection_metrics" semantic conventions. It represents +// the JSON-serialized value of the `ItemCollectionMetrics` response field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming to +// the "aws.dynamodb.local_secondary_indexes" semantic conventions. It represents +// the JSON-serialized value of each item of the `LocalSecondaryIndexes` request +// field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of the +// `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It +// represents the value of the `ProvisionedThroughput.ReadCapacityUnits` request +// parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue conforming +// to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. It +// represents the value of the `ProvisionedThroughput.WriteCapacityUnits` request +// parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value of +// the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the number of +// items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in the +// `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value of +// the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an +// [ECS cluster]. +// +// [ECS cluster]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container instance]. +// +// [ECS container instance]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running +// [ECS task]. +// +// [ECS task]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the family name of +// the [ECS task definition] used to create the ECS task. +// +// [ECS task definition]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskID returns an attribute KeyValue conforming to the "aws.ecs.task.id" +// semantic conventions. It represents the ID of a running ECS task. The ID MUST +// be extracted from `task.arn`. +func AWSECSTaskID(val string) attribute.KeyValue { + return AWSECSTaskIDKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// the task definition used to create the ECS task. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// AWSExtendedRequestID returns an attribute KeyValue conforming to the +// "aws.extended_request_id" semantic conventions. It represents the AWS extended +// request ID as returned in the response header `x-amz-id-2`. +func AWSExtendedRequestID(val string) attribute.KeyValue { + return AWSExtendedRequestIDKey.String(val) +} + +// AWSKinesisStreamName returns an attribute KeyValue conforming to the +// "aws.kinesis.stream_name" semantic conventions. It represents the name of the +// AWS Kinesis [stream] the request refers to. Corresponds to the `--stream-name` +// parameter of the Kinesis [describe-stream] operation. +// +// [stream]: https://docs.aws.amazon.com/streams/latest/dev/introduction.html +// [describe-stream]: https://docs.aws.amazon.com/cli/latest/reference/kinesis/describe-stream.html +func AWSKinesisStreamName(val string) attribute.KeyValue { + return AWSKinesisStreamNameKey.String(val) +} + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full invoked +// ARN as provided on the `Context` passed to the function ( +// `Lambda-Runtime-Invoked-Function-Arn` header on the `/runtime/invocation/next` +// applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// AWSLambdaResourceMappingID returns an attribute KeyValue conforming to the +// "aws.lambda.resource_mapping.id" semantic conventions. It represents the UUID +// of the [AWS Lambda EvenSource Mapping]. An event source is mapped to a lambda +// function. It's contents are read by Lambda and used to trigger a function. +// This isn't available in the lambda execution context or the lambda runtime +// environtment. This is going to be populated by the AWS SDK for each language +// when that UUID is present. Some of these operations are +// Create/Delete/Get/List/Update EventSourceMapping. +// +// [AWS Lambda EvenSource Mapping]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html +func AWSLambdaResourceMappingID(val string) attribute.KeyValue { + return AWSLambdaResourceMappingIDKey.String(val) +} + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of the +// AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// AWSRequestID returns an attribute KeyValue conforming to the "aws.request_id" +// semantic conventions. It represents the AWS request ID as returned in the +// response headers `x-amzn-requestid`, `x-amzn-request-id` or `x-amz-request-id` +// . +func AWSRequestID(val string) attribute.KeyValue { + return AWSRequestIDKey.String(val) +} + +// AWSS3Bucket returns an attribute KeyValue conforming to the "aws.s3.bucket" +// semantic conventions. It represents the S3 bucket name the request refers to. +// Corresponds to the `--bucket` parameter of the [S3 API] operations. +// +// [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html +func AWSS3Bucket(val string) attribute.KeyValue { + return AWSS3BucketKey.String(val) +} + +// AWSS3CopySource returns an attribute KeyValue conforming to the +// "aws.s3.copy_source" semantic conventions. It represents the source object (in +// the form `bucket`/`key`) for the copy operation. +func AWSS3CopySource(val string) attribute.KeyValue { + return AWSS3CopySourceKey.String(val) +} + +// AWSS3Delete returns an attribute KeyValue conforming to the "aws.s3.delete" +// semantic conventions. It represents the delete request container that +// specifies the objects to be deleted. +func AWSS3Delete(val string) attribute.KeyValue { + return AWSS3DeleteKey.String(val) +} + +// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" semantic +// conventions. It represents the S3 object key the request refers to. +// Corresponds to the `--key` parameter of the [S3 API] operations. +// +// [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html +func AWSS3Key(val string) attribute.KeyValue { + return AWSS3KeyKey.String(val) +} + +// AWSS3PartNumber returns an attribute KeyValue conforming to the +// "aws.s3.part_number" semantic conventions. It represents the part number of +// the part being uploaded in a multipart-upload operation. This is a positive +// integer between 1 and 10,000. +func AWSS3PartNumber(val int) attribute.KeyValue { + return AWSS3PartNumberKey.Int(val) +} + +// AWSS3UploadID returns an attribute KeyValue conforming to the +// "aws.s3.upload_id" semantic conventions. It represents the upload ID that +// identifies the multipart upload. +func AWSS3UploadID(val string) attribute.KeyValue { + return AWSS3UploadIDKey.String(val) +} + +// AWSSecretsmanagerSecretARN returns an attribute KeyValue conforming to the +// "aws.secretsmanager.secret.arn" semantic conventions. It represents the ARN of +// the Secret stored in the Secrets Mangger. +func AWSSecretsmanagerSecretARN(val string) attribute.KeyValue { + return AWSSecretsmanagerSecretARNKey.String(val) +} + +// AWSSNSTopicARN returns an attribute KeyValue conforming to the +// "aws.sns.topic.arn" semantic conventions. It represents the ARN of the AWS SNS +// Topic. An Amazon SNS [topic] is a logical access point that acts as a +// communication channel. +// +// [topic]: https://docs.aws.amazon.com/sns/latest/dg/sns-create-topic.html +func AWSSNSTopicARN(val string) attribute.KeyValue { + return AWSSNSTopicARNKey.String(val) +} + +// AWSSQSQueueURL returns an attribute KeyValue conforming to the +// "aws.sqs.queue.url" semantic conventions. It represents the URL of the AWS SQS +// Queue. It's a unique identifier for a queue in Amazon Simple Queue Service +// (SQS) and is used to access the queue and perform actions on it. +func AWSSQSQueueURL(val string) attribute.KeyValue { + return AWSSQSQueueURLKey.String(val) +} + +// AWSStepFunctionsActivityARN returns an attribute KeyValue conforming to the +// "aws.step_functions.activity.arn" semantic conventions. It represents the ARN +// of the AWS Step Functions Activity. +func AWSStepFunctionsActivityARN(val string) attribute.KeyValue { + return AWSStepFunctionsActivityARNKey.String(val) +} + +// AWSStepFunctionsStateMachineARN returns an attribute KeyValue conforming to +// the "aws.step_functions.state_machine.arn" semantic conventions. It represents +// the ARN of the AWS Step Functions State Machine. +func AWSStepFunctionsStateMachineARN(val string) attribute.KeyValue { + return AWSStepFunctionsStateMachineARNKey.String(val) +} + +// Enum values for aws.ecs.launchtype +var ( + // Amazon EC2 + // Stability: development + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // Amazon Fargate + // Stability: development + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// Namespace: azure +const ( + // AzureClientIDKey is the attribute Key conforming to the "azure.client.id" + // semantic conventions. It represents the unique identifier of the client + // instance. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "3ba4827d-4422-483f-b59f-85b74211c11d", "storage-client-1" + AzureClientIDKey = attribute.Key("azure.client.id") + + // AzureCosmosDBConnectionModeKey is the attribute Key conforming to the + // "azure.cosmosdb.connection.mode" semantic conventions. It represents the + // cosmos client connection mode. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + AzureCosmosDBConnectionModeKey = attribute.Key("azure.cosmosdb.connection.mode") + + // AzureCosmosDBConsistencyLevelKey is the attribute Key conforming to the + // "azure.cosmosdb.consistency.level" semantic conventions. It represents the + // account or request [consistency level]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Eventual", "ConsistentPrefix", "BoundedStaleness", "Strong", + // "Session" + // + // [consistency level]: https://learn.microsoft.com/azure/cosmos-db/consistency-levels + AzureCosmosDBConsistencyLevelKey = attribute.Key("azure.cosmosdb.consistency.level") + + // AzureCosmosDBOperationContactedRegionsKey is the attribute Key conforming to + // the "azure.cosmosdb.operation.contacted_regions" semantic conventions. It + // represents the list of regions contacted during operation in the order that + // they were contacted. If there is more than one region listed, it indicates + // that the operation was performed on multiple regions i.e. cross-regional + // call. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "North Central US", "Australia East", "Australia Southeast" + // Note: Region name matches the format of `displayName` in [Azure Location API] + // + // [Azure Location API]: https://learn.microsoft.com/rest/api/subscription/subscriptions/list-locations?view=rest-subscription-2021-10-01&tabs=HTTP#location + AzureCosmosDBOperationContactedRegionsKey = attribute.Key("azure.cosmosdb.operation.contacted_regions") + + // AzureCosmosDBOperationRequestChargeKey is the attribute Key conforming to the + // "azure.cosmosdb.operation.request_charge" semantic conventions. It represents + // the number of request units consumed by the operation. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 46.18, 1.0 + AzureCosmosDBOperationRequestChargeKey = attribute.Key("azure.cosmosdb.operation.request_charge") + + // AzureCosmosDBRequestBodySizeKey is the attribute Key conforming to the + // "azure.cosmosdb.request.body.size" semantic conventions. It represents the + // request payload size in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + AzureCosmosDBRequestBodySizeKey = attribute.Key("azure.cosmosdb.request.body.size") + + // AzureCosmosDBResponseSubStatusCodeKey is the attribute Key conforming to the + // "azure.cosmosdb.response.sub_status_code" semantic conventions. It represents + // the cosmos DB sub status code. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1000, 1002 + AzureCosmosDBResponseSubStatusCodeKey = attribute.Key("azure.cosmosdb.response.sub_status_code") + + // AzureResourceProviderNamespaceKey is the attribute Key conforming to the + // "azure.resource_provider.namespace" semantic conventions. It represents the + // [Azure Resource Provider Namespace] as recognized by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Microsoft.Storage", "Microsoft.KeyVault", "Microsoft.ServiceBus" + // + // [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers + AzureResourceProviderNamespaceKey = attribute.Key("azure.resource_provider.namespace") + + // AzureServiceRequestIDKey is the attribute Key conforming to the + // "azure.service.request.id" semantic conventions. It represents the unique + // identifier of the service request. It's generated by the Azure service and + // returned with the response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "00000000-0000-0000-0000-000000000000" + AzureServiceRequestIDKey = attribute.Key("azure.service.request.id") +) + +// AzureClientID returns an attribute KeyValue conforming to the +// "azure.client.id" semantic conventions. It represents the unique identifier of +// the client instance. +func AzureClientID(val string) attribute.KeyValue { + return AzureClientIDKey.String(val) +} + +// AzureCosmosDBOperationContactedRegions returns an attribute KeyValue +// conforming to the "azure.cosmosdb.operation.contacted_regions" semantic +// conventions. It represents the list of regions contacted during operation in +// the order that they were contacted. If there is more than one region listed, +// it indicates that the operation was performed on multiple regions i.e. +// cross-regional call. +func AzureCosmosDBOperationContactedRegions(val ...string) attribute.KeyValue { + return AzureCosmosDBOperationContactedRegionsKey.StringSlice(val) +} + +// AzureCosmosDBOperationRequestCharge returns an attribute KeyValue conforming +// to the "azure.cosmosdb.operation.request_charge" semantic conventions. It +// represents the number of request units consumed by the operation. +func AzureCosmosDBOperationRequestCharge(val float64) attribute.KeyValue { + return AzureCosmosDBOperationRequestChargeKey.Float64(val) +} + +// AzureCosmosDBRequestBodySize returns an attribute KeyValue conforming to the +// "azure.cosmosdb.request.body.size" semantic conventions. It represents the +// request payload size in bytes. +func AzureCosmosDBRequestBodySize(val int) attribute.KeyValue { + return AzureCosmosDBRequestBodySizeKey.Int(val) +} + +// AzureCosmosDBResponseSubStatusCode returns an attribute KeyValue conforming to +// the "azure.cosmosdb.response.sub_status_code" semantic conventions. It +// represents the cosmos DB sub status code. +func AzureCosmosDBResponseSubStatusCode(val int) attribute.KeyValue { + return AzureCosmosDBResponseSubStatusCodeKey.Int(val) +} + +// AzureResourceProviderNamespace returns an attribute KeyValue conforming to the +// "azure.resource_provider.namespace" semantic conventions. It represents the +// [Azure Resource Provider Namespace] as recognized by the client. +// +// [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers +func AzureResourceProviderNamespace(val string) attribute.KeyValue { + return AzureResourceProviderNamespaceKey.String(val) +} + +// AzureServiceRequestID returns an attribute KeyValue conforming to the +// "azure.service.request.id" semantic conventions. It represents the unique +// identifier of the service request. It's generated by the Azure service and +// returned with the response. +func AzureServiceRequestID(val string) attribute.KeyValue { + return AzureServiceRequestIDKey.String(val) +} + +// Enum values for azure.cosmosdb.connection.mode +var ( + // Gateway (HTTP) connection. + // Stability: development + AzureCosmosDBConnectionModeGateway = AzureCosmosDBConnectionModeKey.String("gateway") + // Direct connection. + // Stability: development + AzureCosmosDBConnectionModeDirect = AzureCosmosDBConnectionModeKey.String("direct") +) + +// Enum values for azure.cosmosdb.consistency.level +var ( + // Strong + // Stability: development + AzureCosmosDBConsistencyLevelStrong = AzureCosmosDBConsistencyLevelKey.String("Strong") + // Bounded Staleness + // Stability: development + AzureCosmosDBConsistencyLevelBoundedStaleness = AzureCosmosDBConsistencyLevelKey.String("BoundedStaleness") + // Session + // Stability: development + AzureCosmosDBConsistencyLevelSession = AzureCosmosDBConsistencyLevelKey.String("Session") + // Eventual + // Stability: development + AzureCosmosDBConsistencyLevelEventual = AzureCosmosDBConsistencyLevelKey.String("Eventual") + // Consistent Prefix + // Stability: development + AzureCosmosDBConsistencyLevelConsistentPrefix = AzureCosmosDBConsistencyLevelKey.String("ConsistentPrefix") +) + +// Namespace: browser +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: " Not A;Brand 99", "Chromium 99", "Chrome 99" + // Note: This value is intended to be taken from the [UA client hints API] ( + // `navigator.userAgentData.brands`). + // + // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserLanguageKey is the attribute Key conforming to the "browser.language" + // semantic conventions. It represents the preferred language of the user using + // the browser. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "en", "en-US", "fr", "fr-FR" + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the browser is + // running on a mobile device. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: This value is intended to be taken from the [UA client hints API] ( + // `navigator.userAgentData.mobile`). If unavailable, this attribute SHOULD be + // left unset. + // + // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserPlatformKey is the attribute Key conforming to the "browser.platform" + // semantic conventions. It represents the platform on which the browser is + // running. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Windows", "macOS", "Android" + // Note: This value is intended to be taken from the [UA client hints API] ( + // `navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute SHOULD + // be left unset in order for the values to be consistent. + // The list of possible values is defined in the + // [W3C User-Agent Client Hints specification]. Note that some (but not all) of + // these values can overlap with values in the + // [`os.type` and `os.name` attributes]. However, for consistency, the values in + // the `browser.platform` attribute should capture the exact value that the user + // agent provides. + // + // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface + // [W3C User-Agent Client Hints specification]: https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform + // [`os.type` and `os.name` attributes]: ./os.md + BrowserPlatformKey = attribute.Key("browser.platform") +) + +// BrowserBrands returns an attribute KeyValue conforming to the "browser.brands" +// semantic conventions. It represents the array of brand name and version +// separated by a space. +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred language +// of the user using the browser. +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the "browser.mobile" +// semantic conventions. It represents a boolean that is true if the browser is +// running on a mobile device. +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running. +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// Namespace: cassandra +const ( + // CassandraConsistencyLevelKey is the attribute Key conforming to the + // "cassandra.consistency.level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from [CQL]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [CQL]: https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html + CassandraConsistencyLevelKey = attribute.Key("cassandra.consistency.level") + + // CassandraCoordinatorDCKey is the attribute Key conforming to the + // "cassandra.coordinator.dc" semantic conventions. It represents the data + // center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: us-west-2 + CassandraCoordinatorDCKey = attribute.Key("cassandra.coordinator.dc") + + // CassandraCoordinatorIDKey is the attribute Key conforming to the + // "cassandra.coordinator.id" semantic conventions. It represents the ID of the + // coordinating node for a query. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: be13faa2-8574-4d71-926d-27f16cf8a7af + CassandraCoordinatorIDKey = attribute.Key("cassandra.coordinator.id") + + // CassandraPageSizeKey is the attribute Key conforming to the + // "cassandra.page.size" semantic conventions. It represents the fetch size used + // for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 5000 + CassandraPageSizeKey = attribute.Key("cassandra.page.size") + + // CassandraQueryIdempotentKey is the attribute Key conforming to the + // "cassandra.query.idempotent" semantic conventions. It represents the whether + // or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + CassandraQueryIdempotentKey = attribute.Key("cassandra.query.idempotent") + + // CassandraSpeculativeExecutionCountKey is the attribute Key conforming to the + // "cassandra.speculative_execution.count" semantic conventions. It represents + // the number of times a query was speculatively executed. Not set or `0` if the + // query was not executed speculatively. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0, 2 + CassandraSpeculativeExecutionCountKey = attribute.Key("cassandra.speculative_execution.count") +) + +// CassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "cassandra.coordinator.dc" semantic conventions. It represents the data center +// of the coordinating node for a query. +func CassandraCoordinatorDC(val string) attribute.KeyValue { + return CassandraCoordinatorDCKey.String(val) +} + +// CassandraCoordinatorID returns an attribute KeyValue conforming to the +// "cassandra.coordinator.id" semantic conventions. It represents the ID of the +// coordinating node for a query. +func CassandraCoordinatorID(val string) attribute.KeyValue { + return CassandraCoordinatorIDKey.String(val) +} + +// CassandraPageSize returns an attribute KeyValue conforming to the +// "cassandra.page.size" semantic conventions. It represents the fetch size used +// for paging, i.e. how many rows will be returned at once. +func CassandraPageSize(val int) attribute.KeyValue { + return CassandraPageSizeKey.Int(val) +} + +// CassandraQueryIdempotent returns an attribute KeyValue conforming to the +// "cassandra.query.idempotent" semantic conventions. It represents the whether +// or not the query is idempotent. +func CassandraQueryIdempotent(val bool) attribute.KeyValue { + return CassandraQueryIdempotentKey.Bool(val) +} + +// CassandraSpeculativeExecutionCount returns an attribute KeyValue conforming to +// the "cassandra.speculative_execution.count" semantic conventions. It +// represents the number of times a query was speculatively executed. Not set or +// `0` if the query was not executed speculatively. +func CassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return CassandraSpeculativeExecutionCountKey.Int(val) +} + +// Enum values for cassandra.consistency.level +var ( + // All + // Stability: development + CassandraConsistencyLevelAll = CassandraConsistencyLevelKey.String("all") + // Each Quorum + // Stability: development + CassandraConsistencyLevelEachQuorum = CassandraConsistencyLevelKey.String("each_quorum") + // Quorum + // Stability: development + CassandraConsistencyLevelQuorum = CassandraConsistencyLevelKey.String("quorum") + // Local Quorum + // Stability: development + CassandraConsistencyLevelLocalQuorum = CassandraConsistencyLevelKey.String("local_quorum") + // One + // Stability: development + CassandraConsistencyLevelOne = CassandraConsistencyLevelKey.String("one") + // Two + // Stability: development + CassandraConsistencyLevelTwo = CassandraConsistencyLevelKey.String("two") + // Three + // Stability: development + CassandraConsistencyLevelThree = CassandraConsistencyLevelKey.String("three") + // Local One + // Stability: development + CassandraConsistencyLevelLocalOne = CassandraConsistencyLevelKey.String("local_one") + // Any + // Stability: development + CassandraConsistencyLevelAny = CassandraConsistencyLevelKey.String("any") + // Serial + // Stability: development + CassandraConsistencyLevelSerial = CassandraConsistencyLevelKey.String("serial") + // Local Serial + // Stability: development + CassandraConsistencyLevelLocalSerial = CassandraConsistencyLevelKey.String("local_serial") +) + +// Namespace: cicd +const ( + // CICDPipelineActionNameKey is the attribute Key conforming to the + // "cicd.pipeline.action.name" semantic conventions. It represents the kind of + // action a pipeline run is performing. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "BUILD", "RUN", "SYNC" + CICDPipelineActionNameKey = attribute.Key("cicd.pipeline.action.name") + + // CICDPipelineNameKey is the attribute Key conforming to the + // "cicd.pipeline.name" semantic conventions. It represents the human readable + // name of the pipeline within a CI/CD system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Build and Test", "Lint", "Deploy Go Project", + // "deploy_to_environment" + CICDPipelineNameKey = attribute.Key("cicd.pipeline.name") + + // CICDPipelineResultKey is the attribute Key conforming to the + // "cicd.pipeline.result" semantic conventions. It represents the result of a + // pipeline run. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "success", "failure", "timeout", "skipped" + CICDPipelineResultKey = attribute.Key("cicd.pipeline.result") + + // CICDPipelineRunIDKey is the attribute Key conforming to the + // "cicd.pipeline.run.id" semantic conventions. It represents the unique + // identifier of a pipeline run within a CI/CD system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "120912" + CICDPipelineRunIDKey = attribute.Key("cicd.pipeline.run.id") + + // CICDPipelineRunStateKey is the attribute Key conforming to the + // "cicd.pipeline.run.state" semantic conventions. It represents the pipeline + // run goes through these states during its lifecycle. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "pending", "executing", "finalizing" + CICDPipelineRunStateKey = attribute.Key("cicd.pipeline.run.state") + + // CICDPipelineRunURLFullKey is the attribute Key conforming to the + // "cicd.pipeline.run.url.full" semantic conventions. It represents the [URL] of + // the pipeline run, providing the complete address in order to locate and + // identify the pipeline run. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "https://github.com/open-telemetry/semantic-conventions/actions/runs/9753949763?pr=1075" + // + // [URL]: https://wikipedia.org/wiki/URL + CICDPipelineRunURLFullKey = attribute.Key("cicd.pipeline.run.url.full") + + // CICDPipelineTaskNameKey is the attribute Key conforming to the + // "cicd.pipeline.task.name" semantic conventions. It represents the human + // readable name of a task within a pipeline. Task here most closely aligns with + // a [computing process] in a pipeline. Other terms for tasks include commands, + // steps, and procedures. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Run GoLang Linter", "Go Build", "go-test", "deploy_binary" + // + // [computing process]: https://wikipedia.org/wiki/Pipeline_(computing) + CICDPipelineTaskNameKey = attribute.Key("cicd.pipeline.task.name") + + // CICDPipelineTaskRunIDKey is the attribute Key conforming to the + // "cicd.pipeline.task.run.id" semantic conventions. It represents the unique + // identifier of a task run within a pipeline. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "12097" + CICDPipelineTaskRunIDKey = attribute.Key("cicd.pipeline.task.run.id") + + // CICDPipelineTaskRunResultKey is the attribute Key conforming to the + // "cicd.pipeline.task.run.result" semantic conventions. It represents the + // result of a task run. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "success", "failure", "timeout", "skipped" + CICDPipelineTaskRunResultKey = attribute.Key("cicd.pipeline.task.run.result") + + // CICDPipelineTaskRunURLFullKey is the attribute Key conforming to the + // "cicd.pipeline.task.run.url.full" semantic conventions. It represents the + // [URL] of the pipeline task run, providing the complete address in order to + // locate and identify the pipeline task run. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "https://github.com/open-telemetry/semantic-conventions/actions/runs/9753949763/job/26920038674?pr=1075" + // + // [URL]: https://wikipedia.org/wiki/URL + CICDPipelineTaskRunURLFullKey = attribute.Key("cicd.pipeline.task.run.url.full") + + // CICDPipelineTaskTypeKey is the attribute Key conforming to the + // "cicd.pipeline.task.type" semantic conventions. It represents the type of the + // task within a pipeline. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "build", "test", "deploy" + CICDPipelineTaskTypeKey = attribute.Key("cicd.pipeline.task.type") + + // CICDSystemComponentKey is the attribute Key conforming to the + // "cicd.system.component" semantic conventions. It represents the name of a + // component of the CICD system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "controller", "scheduler", "agent" + CICDSystemComponentKey = attribute.Key("cicd.system.component") + + // CICDWorkerIDKey is the attribute Key conforming to the "cicd.worker.id" + // semantic conventions. It represents the unique identifier of a worker within + // a CICD system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "abc123", "10.0.1.2", "controller" + CICDWorkerIDKey = attribute.Key("cicd.worker.id") + + // CICDWorkerNameKey is the attribute Key conforming to the "cicd.worker.name" + // semantic conventions. It represents the name of a worker within a CICD + // system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "agent-abc", "controller", "Ubuntu LTS" + CICDWorkerNameKey = attribute.Key("cicd.worker.name") + + // CICDWorkerStateKey is the attribute Key conforming to the "cicd.worker.state" + // semantic conventions. It represents the state of a CICD worker / agent. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "idle", "busy", "down" + CICDWorkerStateKey = attribute.Key("cicd.worker.state") + + // CICDWorkerURLFullKey is the attribute Key conforming to the + // "cicd.worker.url.full" semantic conventions. It represents the [URL] of the + // worker, providing the complete address in order to locate and identify the + // worker. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://cicd.example.org/worker/abc123" + // + // [URL]: https://wikipedia.org/wiki/URL + CICDWorkerURLFullKey = attribute.Key("cicd.worker.url.full") +) + +// CICDPipelineName returns an attribute KeyValue conforming to the +// "cicd.pipeline.name" semantic conventions. It represents the human readable +// name of the pipeline within a CI/CD system. +func CICDPipelineName(val string) attribute.KeyValue { + return CICDPipelineNameKey.String(val) +} + +// CICDPipelineRunID returns an attribute KeyValue conforming to the +// "cicd.pipeline.run.id" semantic conventions. It represents the unique +// identifier of a pipeline run within a CI/CD system. +func CICDPipelineRunID(val string) attribute.KeyValue { + return CICDPipelineRunIDKey.String(val) +} + +// CICDPipelineRunURLFull returns an attribute KeyValue conforming to the +// "cicd.pipeline.run.url.full" semantic conventions. It represents the [URL] of +// the pipeline run, providing the complete address in order to locate and +// identify the pipeline run. +// +// [URL]: https://wikipedia.org/wiki/URL +func CICDPipelineRunURLFull(val string) attribute.KeyValue { + return CICDPipelineRunURLFullKey.String(val) +} + +// CICDPipelineTaskName returns an attribute KeyValue conforming to the +// "cicd.pipeline.task.name" semantic conventions. It represents the human +// readable name of a task within a pipeline. Task here most closely aligns with +// a [computing process] in a pipeline. Other terms for tasks include commands, +// steps, and procedures. +// +// [computing process]: https://wikipedia.org/wiki/Pipeline_(computing) +func CICDPipelineTaskName(val string) attribute.KeyValue { + return CICDPipelineTaskNameKey.String(val) +} + +// CICDPipelineTaskRunID returns an attribute KeyValue conforming to the +// "cicd.pipeline.task.run.id" semantic conventions. It represents the unique +// identifier of a task run within a pipeline. +func CICDPipelineTaskRunID(val string) attribute.KeyValue { + return CICDPipelineTaskRunIDKey.String(val) +} + +// CICDPipelineTaskRunURLFull returns an attribute KeyValue conforming to the +// "cicd.pipeline.task.run.url.full" semantic conventions. It represents the +// [URL] of the pipeline task run, providing the complete address in order to +// locate and identify the pipeline task run. +// +// [URL]: https://wikipedia.org/wiki/URL +func CICDPipelineTaskRunURLFull(val string) attribute.KeyValue { + return CICDPipelineTaskRunURLFullKey.String(val) +} + +// CICDSystemComponent returns an attribute KeyValue conforming to the +// "cicd.system.component" semantic conventions. It represents the name of a +// component of the CICD system. +func CICDSystemComponent(val string) attribute.KeyValue { + return CICDSystemComponentKey.String(val) +} + +// CICDWorkerID returns an attribute KeyValue conforming to the "cicd.worker.id" +// semantic conventions. It represents the unique identifier of a worker within a +// CICD system. +func CICDWorkerID(val string) attribute.KeyValue { + return CICDWorkerIDKey.String(val) +} + +// CICDWorkerName returns an attribute KeyValue conforming to the +// "cicd.worker.name" semantic conventions. It represents the name of a worker +// within a CICD system. +func CICDWorkerName(val string) attribute.KeyValue { + return CICDWorkerNameKey.String(val) +} + +// CICDWorkerURLFull returns an attribute KeyValue conforming to the +// "cicd.worker.url.full" semantic conventions. It represents the [URL] of the +// worker, providing the complete address in order to locate and identify the +// worker. +// +// [URL]: https://wikipedia.org/wiki/URL +func CICDWorkerURLFull(val string) attribute.KeyValue { + return CICDWorkerURLFullKey.String(val) +} + +// Enum values for cicd.pipeline.action.name +var ( + // The pipeline run is executing a build. + // Stability: development + CICDPipelineActionNameBuild = CICDPipelineActionNameKey.String("BUILD") + // The pipeline run is executing. + // Stability: development + CICDPipelineActionNameRun = CICDPipelineActionNameKey.String("RUN") + // The pipeline run is executing a sync. + // Stability: development + CICDPipelineActionNameSync = CICDPipelineActionNameKey.String("SYNC") +) + +// Enum values for cicd.pipeline.result +var ( + // The pipeline run finished successfully. + // Stability: development + CICDPipelineResultSuccess = CICDPipelineResultKey.String("success") + // The pipeline run did not finish successfully, eg. due to a compile error or a + // failing test. Such failures are usually detected by non-zero exit codes of + // the tools executed in the pipeline run. + // Stability: development + CICDPipelineResultFailure = CICDPipelineResultKey.String("failure") + // The pipeline run failed due to an error in the CICD system, eg. due to the + // worker being killed. + // Stability: development + CICDPipelineResultError = CICDPipelineResultKey.String("error") + // A timeout caused the pipeline run to be interrupted. + // Stability: development + CICDPipelineResultTimeout = CICDPipelineResultKey.String("timeout") + // The pipeline run was cancelled, eg. by a user manually cancelling the + // pipeline run. + // Stability: development + CICDPipelineResultCancellation = CICDPipelineResultKey.String("cancellation") + // The pipeline run was skipped, eg. due to a precondition not being met. + // Stability: development + CICDPipelineResultSkip = CICDPipelineResultKey.String("skip") +) + +// Enum values for cicd.pipeline.run.state +var ( + // The run pending state spans from the event triggering the pipeline run until + // the execution of the run starts (eg. time spent in a queue, provisioning + // agents, creating run resources). + // + // Stability: development + CICDPipelineRunStatePending = CICDPipelineRunStateKey.String("pending") + // The executing state spans the execution of any run tasks (eg. build, test). + // Stability: development + CICDPipelineRunStateExecuting = CICDPipelineRunStateKey.String("executing") + // The finalizing state spans from when the run has finished executing (eg. + // cleanup of run resources). + // Stability: development + CICDPipelineRunStateFinalizing = CICDPipelineRunStateKey.String("finalizing") +) + +// Enum values for cicd.pipeline.task.run.result +var ( + // The task run finished successfully. + // Stability: development + CICDPipelineTaskRunResultSuccess = CICDPipelineTaskRunResultKey.String("success") + // The task run did not finish successfully, eg. due to a compile error or a + // failing test. Such failures are usually detected by non-zero exit codes of + // the tools executed in the task run. + // Stability: development + CICDPipelineTaskRunResultFailure = CICDPipelineTaskRunResultKey.String("failure") + // The task run failed due to an error in the CICD system, eg. due to the worker + // being killed. + // Stability: development + CICDPipelineTaskRunResultError = CICDPipelineTaskRunResultKey.String("error") + // A timeout caused the task run to be interrupted. + // Stability: development + CICDPipelineTaskRunResultTimeout = CICDPipelineTaskRunResultKey.String("timeout") + // The task run was cancelled, eg. by a user manually cancelling the task run. + // Stability: development + CICDPipelineTaskRunResultCancellation = CICDPipelineTaskRunResultKey.String("cancellation") + // The task run was skipped, eg. due to a precondition not being met. + // Stability: development + CICDPipelineTaskRunResultSkip = CICDPipelineTaskRunResultKey.String("skip") +) + +// Enum values for cicd.pipeline.task.type +var ( + // build + // Stability: development + CICDPipelineTaskTypeBuild = CICDPipelineTaskTypeKey.String("build") + // test + // Stability: development + CICDPipelineTaskTypeTest = CICDPipelineTaskTypeKey.String("test") + // deploy + // Stability: development + CICDPipelineTaskTypeDeploy = CICDPipelineTaskTypeKey.String("deploy") +) + +// Enum values for cicd.worker.state +var ( + // The worker is not performing work for the CICD system. It is available to the + // CICD system to perform work on (online / idle). + // Stability: development + CICDWorkerStateAvailable = CICDWorkerStateKey.String("available") + // The worker is performing work for the CICD system. + // Stability: development + CICDWorkerStateBusy = CICDWorkerStateKey.String("busy") + // The worker is not available to the CICD system (disconnected / down). + // Stability: development + CICDWorkerStateOffline = CICDWorkerStateKey.String("offline") +) + +// Namespace: client +const ( + // ClientAddressKey is the attribute Key conforming to the "client.address" + // semantic conventions. It represents the client address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix domain + // socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "client.example.com", "10.1.2.80", "/tmp/my.sock" + // Note: When observed from the server side, and when communicating through an + // intermediary, `client.address` SHOULD represent the client address behind any + // intermediaries, for example proxies, if it's available. + ClientAddressKey = attribute.Key("client.address") + + // ClientPortKey is the attribute Key conforming to the "client.port" semantic + // conventions. It represents the client port number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 65123 + // Note: When observed from the server side, and when communicating through an + // intermediary, `client.port` SHOULD represent the client port behind any + // intermediaries, for example proxies, if it's available. + ClientPortKey = attribute.Key("client.port") +) + +// ClientAddress returns an attribute KeyValue conforming to the "client.address" +// semantic conventions. It represents the client address - domain name if +// available without reverse DNS lookup; otherwise, IP address or Unix domain +// socket name. +func ClientAddress(val string) attribute.KeyValue { + return ClientAddressKey.String(val) +} + +// ClientPort returns an attribute KeyValue conforming to the "client.port" +// semantic conventions. It represents the client port number. +func ClientPort(val int) attribute.KeyValue { + return ClientPortKey.Int(val) +} + +// Namespace: cloud +const ( + // CloudAccountIDKey is the attribute Key conforming to the "cloud.account.id" + // semantic conventions. It represents the cloud account ID the resource is + // assigned to. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "111111111111", "opentelemetry" + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to increase + // availability. Availability zone represents the zone where the resource is + // running. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "us-east-1c" + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") + + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" semantic + // conventions. It represents the geographical region within a cloud provider. + // When associated with a resource, this attribute specifies the region where + // the resource operates. When calling services or APIs deployed on a cloud, + // this attribute identifies the region where the called destination is + // deployed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "us-central1", "us-east-1" + // Note: Refer to your provider's docs to see the available regions, for example + // [Alibaba Cloud regions], [AWS regions], [Azure regions], + // [Google Cloud regions], or [Tencent Cloud regions]. + // + // [Alibaba Cloud regions]: https://www.alibabacloud.com/help/doc-detail/40654.htm + // [AWS regions]: https://aws.amazon.com/about-aws/global-infrastructure/regions_az/ + // [Azure regions]: https://azure.microsoft.com/global-infrastructure/geographies/ + // [Google Cloud regions]: https://cloud.google.com/about/locations + // [Tencent Cloud regions]: https://www.tencentcloud.com/document/product/213/6091 + CloudRegionKey = attribute.Key("cloud.region") + + // CloudResourceIDKey is the attribute Key conforming to the "cloud.resource_id" + // semantic conventions. It represents the cloud provider-specific native + // identifier of the monitored cloud resource (e.g. an [ARN] on AWS, a + // [fully qualified resource ID] on Azure, a [full resource name] on GCP). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function", + // "//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID", + // "/subscriptions//resourceGroups/ + // /providers/Microsoft.Web/sites//functions/" + // Note: On some cloud providers, it may not be possible to determine the full + // ID at startup, + // so it may be necessary to set `cloud.resource_id` as a span attribute + // instead. + // + // The exact value to use for `cloud.resource_id` depends on the cloud provider. + // The following well-known definitions MUST be used if you set this attribute + // and they apply: + // + // - **AWS Lambda:** The function [ARN]. + // Take care not to use the "invoked ARN" directly but replace any + // [alias suffix] + // with the resolved function version, as the same runtime instance may be + // invocable with + // multiple different aliases. + // - **GCP:** The [URI of the resource] + // - **Azure:** The [Fully Qualified Resource ID] of the invoked function, + // *not* the function app, having the form + // + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/` + // . + // This means that a span attribute MUST be used, as an Azure function app + // can host multiple functions that would usually share + // a TracerProvider. + // + // + // [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + // [fully qualified resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id + // [full resource name]: https://google.aip.dev/122#full-resource-names + // [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + // [alias suffix]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html + // [URI of the resource]: https://cloud.google.com/iam/docs/full-resource-names + // [Fully Qualified Resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id + CloudResourceIDKey = attribute.Key("cloud.resource_id") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the "cloud.region" +// semantic conventions. It represents the geographical region within a cloud +// provider. When associated with a resource, this attribute specifies the region +// where the resource operates. When calling services or APIs deployed on a +// cloud, this attribute identifies the region where the called destination is +// deployed. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudResourceID returns an attribute KeyValue conforming to the +// "cloud.resource_id" semantic conventions. It represents the cloud +// provider-specific native identifier of the monitored cloud resource (e.g. an +// [ARN] on AWS, a [fully qualified resource ID] on Azure, a [full resource name] +// on GCP). +// +// [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html +// [fully qualified resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id +// [full resource name]: https://google.aip.dev/122#full-resource-names +func CloudResourceID(val string) attribute.KeyValue { + return CloudResourceIDKey.String(val) +} + +// Enum values for cloud.platform +var ( + // Alibaba Cloud Elastic Compute Service + // Stability: development + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + // Stability: development + CloudPlatformAlibabaCloudFC = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + // Stability: development + CloudPlatformAlibabaCloudOpenShift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + // Stability: development + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + // Stability: development + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + // Stability: development + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + // Stability: development + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + // Stability: development + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + // Stability: development + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + // Stability: development + CloudPlatformAWSOpenShift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + // Stability: development + CloudPlatformAzureVM = CloudPlatformKey.String("azure.vm") + // Azure Container Apps + // Stability: development + CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure.container_apps") + // Azure Container Instances + // Stability: development + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure.container_instances") + // Azure Kubernetes Service + // Stability: development + CloudPlatformAzureAKS = CloudPlatformKey.String("azure.aks") + // Azure Functions + // Stability: development + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure.functions") + // Azure App Service + // Stability: development + CloudPlatformAzureAppService = CloudPlatformKey.String("azure.app_service") + // Azure Red Hat OpenShift + // Stability: development + CloudPlatformAzureOpenShift = CloudPlatformKey.String("azure.openshift") + // Google Bare Metal Solution (BMS) + // Stability: development + CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") + // Google Cloud Compute Engine (GCE) + // Stability: development + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + // Stability: development + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + // Stability: development + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + // Stability: development + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + // Stability: development + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + // Stability: development + CloudPlatformGCPOpenShift = CloudPlatformKey.String("gcp_openshift") + // Red Hat OpenShift on IBM Cloud + // Stability: development + CloudPlatformIBMCloudOpenShift = CloudPlatformKey.String("ibm_cloud_openshift") + // Compute on Oracle Cloud Infrastructure (OCI) + // Stability: development + CloudPlatformOracleCloudCompute = CloudPlatformKey.String("oracle_cloud_compute") + // Kubernetes Engine (OKE) on Oracle Cloud Infrastructure (OCI) + // Stability: development + CloudPlatformOracleCloudOKE = CloudPlatformKey.String("oracle_cloud_oke") + // Tencent Cloud Cloud Virtual Machine (CVM) + // Stability: development + CloudPlatformTencentCloudCVM = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + // Stability: development + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + // Stability: development + CloudPlatformTencentCloudSCF = CloudPlatformKey.String("tencent_cloud_scf") +) + +// Enum values for cloud.provider +var ( + // Alibaba Cloud + // Stability: development + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + // Stability: development + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + // Stability: development + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + // Stability: development + CloudProviderGCP = CloudProviderKey.String("gcp") + // Heroku Platform as a Service + // Stability: development + CloudProviderHeroku = CloudProviderKey.String("heroku") + // IBM Cloud + // Stability: development + CloudProviderIBMCloud = CloudProviderKey.String("ibm_cloud") + // Oracle Cloud Infrastructure (OCI) + // Stability: development + CloudProviderOracleCloud = CloudProviderKey.String("oracle_cloud") + // Tencent Cloud + // Stability: development + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +// Namespace: cloudevents +const ( + // CloudEventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the [event_id] + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "123e4567-e89b-12d3-a456-426614174000", "0001" + // + // [event_id]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id + CloudEventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudEventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the [source] + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://github.com/cloudevents", "/cloudevents/spec/pull/123", + // "my-service" + // + // [source]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1 + CloudEventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudEventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents specification] which the event uses. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0 + // + // [version of the CloudEvents specification]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion + CloudEventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudEventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the [subject] + // of the event in the context of the event producer (identified by source). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: mynewfile.jpg + // + // [subject]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject + CloudEventsEventSubjectKey = attribute.Key("cloudevents.event_subject") + + // CloudEventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the [event_type] + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "com.github.pull_request.opened", "com.example.object.deleted.v2" + // + // [event_type]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type + CloudEventsEventTypeKey = attribute.Key("cloudevents.event_type") +) + +// CloudEventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the [event_id] +// uniquely identifies the event. +// +// [event_id]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id +func CloudEventsEventID(val string) attribute.KeyValue { + return CloudEventsEventIDKey.String(val) +} + +// CloudEventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the [source] +// identifies the context in which an event happened. +// +// [source]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1 +func CloudEventsEventSource(val string) attribute.KeyValue { + return CloudEventsEventSourceKey.String(val) +} + +// CloudEventsEventSpecVersion returns an attribute KeyValue conforming to the +// "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents specification] which the event uses. +// +// [version of the CloudEvents specification]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion +func CloudEventsEventSpecVersion(val string) attribute.KeyValue { + return CloudEventsEventSpecVersionKey.String(val) +} + +// CloudEventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the [subject] +// of the event in the context of the event producer (identified by source). +// +// [subject]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject +func CloudEventsEventSubject(val string) attribute.KeyValue { + return CloudEventsEventSubjectKey.String(val) +} + +// CloudEventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the [event_type] +// contains a value describing the type of event related to the originating +// occurrence. +// +// [event_type]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type +func CloudEventsEventType(val string) attribute.KeyValue { + return CloudEventsEventTypeKey.String(val) +} + +// Namespace: cloudfoundry +const ( + // CloudFoundryAppIDKey is the attribute Key conforming to the + // "cloudfoundry.app.id" semantic conventions. It represents the guid of the + // application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.application_id`. This is the same value as + // reported by `cf app --guid`. + CloudFoundryAppIDKey = attribute.Key("cloudfoundry.app.id") + + // CloudFoundryAppInstanceIDKey is the attribute Key conforming to the + // "cloudfoundry.app.instance.id" semantic conventions. It represents the index + // of the application instance. 0 when just one instance is active. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0", "1" + // Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope] + // . + // It is used for logs and metrics emitted by CloudFoundry. It is + // supposed to contain the application instance index for applications + // deployed on the runtime. + // + // Application instrumentation should use the value from environment + // variable `CF_INSTANCE_INDEX`. + // + // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope + CloudFoundryAppInstanceIDKey = attribute.Key("cloudfoundry.app.instance.id") + + // CloudFoundryAppNameKey is the attribute Key conforming to the + // "cloudfoundry.app.name" semantic conventions. It represents the name of the + // application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-app-name" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.application_name`. This is the same value + // as reported by `cf apps`. + CloudFoundryAppNameKey = attribute.Key("cloudfoundry.app.name") + + // CloudFoundryOrgIDKey is the attribute Key conforming to the + // "cloudfoundry.org.id" semantic conventions. It represents the guid of the + // CloudFoundry org the application is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.org_id`. This is the same value as + // reported by `cf org --guid`. + CloudFoundryOrgIDKey = attribute.Key("cloudfoundry.org.id") + + // CloudFoundryOrgNameKey is the attribute Key conforming to the + // "cloudfoundry.org.name" semantic conventions. It represents the name of the + // CloudFoundry organization the app is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-org-name" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.org_name`. This is the same value as + // reported by `cf orgs`. + CloudFoundryOrgNameKey = attribute.Key("cloudfoundry.org.name") + + // CloudFoundryProcessIDKey is the attribute Key conforming to the + // "cloudfoundry.process.id" semantic conventions. It represents the UID + // identifying the process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.process_id`. It is supposed to be equal to + // `VCAP_APPLICATION.app_id` for applications deployed to the runtime. + // For system components, this could be the actual PID. + CloudFoundryProcessIDKey = attribute.Key("cloudfoundry.process.id") + + // CloudFoundryProcessTypeKey is the attribute Key conforming to the + // "cloudfoundry.process.type" semantic conventions. It represents the type of + // process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "web" + // Note: CloudFoundry applications can consist of multiple jobs. Usually the + // main process will be of type `web`. There can be additional background + // tasks or side-cars with different process types. + CloudFoundryProcessTypeKey = attribute.Key("cloudfoundry.process.type") + + // CloudFoundrySpaceIDKey is the attribute Key conforming to the + // "cloudfoundry.space.id" semantic conventions. It represents the guid of the + // CloudFoundry space the application is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.space_id`. This is the same value as + // reported by `cf space --guid`. + CloudFoundrySpaceIDKey = attribute.Key("cloudfoundry.space.id") + + // CloudFoundrySpaceNameKey is the attribute Key conforming to the + // "cloudfoundry.space.name" semantic conventions. It represents the name of the + // CloudFoundry space the application is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-space-name" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.space_name`. This is the same value as + // reported by `cf spaces`. + CloudFoundrySpaceNameKey = attribute.Key("cloudfoundry.space.name") + + // CloudFoundrySystemIDKey is the attribute Key conforming to the + // "cloudfoundry.system.id" semantic conventions. It represents a guid or + // another name describing the event source. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cf/gorouter" + // Note: CloudFoundry defines the `source_id` in the [Loggregator v2 envelope]. + // It is used for logs and metrics emitted by CloudFoundry. It is + // supposed to contain the component name, e.g. "gorouter", for + // CloudFoundry components. + // + // When system components are instrumented, values from the + // [Bosh spec] + // should be used. The `system.id` should be set to + // `spec.deployment/spec.name`. + // + // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope + // [Bosh spec]: https://bosh.io/docs/jobs/#properties-spec + CloudFoundrySystemIDKey = attribute.Key("cloudfoundry.system.id") + + // CloudFoundrySystemInstanceIDKey is the attribute Key conforming to the + // "cloudfoundry.system.instance.id" semantic conventions. It represents a guid + // describing the concrete instance of the event source. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope] + // . + // It is used for logs and metrics emitted by CloudFoundry. It is + // supposed to contain the vm id for CloudFoundry components. + // + // When system components are instrumented, values from the + // [Bosh spec] + // should be used. The `system.instance.id` should be set to `spec.id`. + // + // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope + // [Bosh spec]: https://bosh.io/docs/jobs/#properties-spec + CloudFoundrySystemInstanceIDKey = attribute.Key("cloudfoundry.system.instance.id") +) + +// CloudFoundryAppID returns an attribute KeyValue conforming to the +// "cloudfoundry.app.id" semantic conventions. It represents the guid of the +// application. +func CloudFoundryAppID(val string) attribute.KeyValue { + return CloudFoundryAppIDKey.String(val) +} + +// CloudFoundryAppInstanceID returns an attribute KeyValue conforming to the +// "cloudfoundry.app.instance.id" semantic conventions. It represents the index +// of the application instance. 0 when just one instance is active. +func CloudFoundryAppInstanceID(val string) attribute.KeyValue { + return CloudFoundryAppInstanceIDKey.String(val) +} + +// CloudFoundryAppName returns an attribute KeyValue conforming to the +// "cloudfoundry.app.name" semantic conventions. It represents the name of the +// application. +func CloudFoundryAppName(val string) attribute.KeyValue { + return CloudFoundryAppNameKey.String(val) +} + +// CloudFoundryOrgID returns an attribute KeyValue conforming to the +// "cloudfoundry.org.id" semantic conventions. It represents the guid of the +// CloudFoundry org the application is running in. +func CloudFoundryOrgID(val string) attribute.KeyValue { + return CloudFoundryOrgIDKey.String(val) +} + +// CloudFoundryOrgName returns an attribute KeyValue conforming to the +// "cloudfoundry.org.name" semantic conventions. It represents the name of the +// CloudFoundry organization the app is running in. +func CloudFoundryOrgName(val string) attribute.KeyValue { + return CloudFoundryOrgNameKey.String(val) +} + +// CloudFoundryProcessID returns an attribute KeyValue conforming to the +// "cloudfoundry.process.id" semantic conventions. It represents the UID +// identifying the process. +func CloudFoundryProcessID(val string) attribute.KeyValue { + return CloudFoundryProcessIDKey.String(val) +} + +// CloudFoundryProcessType returns an attribute KeyValue conforming to the +// "cloudfoundry.process.type" semantic conventions. It represents the type of +// process. +func CloudFoundryProcessType(val string) attribute.KeyValue { + return CloudFoundryProcessTypeKey.String(val) +} + +// CloudFoundrySpaceID returns an attribute KeyValue conforming to the +// "cloudfoundry.space.id" semantic conventions. It represents the guid of the +// CloudFoundry space the application is running in. +func CloudFoundrySpaceID(val string) attribute.KeyValue { + return CloudFoundrySpaceIDKey.String(val) +} + +// CloudFoundrySpaceName returns an attribute KeyValue conforming to the +// "cloudfoundry.space.name" semantic conventions. It represents the name of the +// CloudFoundry space the application is running in. +func CloudFoundrySpaceName(val string) attribute.KeyValue { + return CloudFoundrySpaceNameKey.String(val) +} + +// CloudFoundrySystemID returns an attribute KeyValue conforming to the +// "cloudfoundry.system.id" semantic conventions. It represents a guid or another +// name describing the event source. +func CloudFoundrySystemID(val string) attribute.KeyValue { + return CloudFoundrySystemIDKey.String(val) +} + +// CloudFoundrySystemInstanceID returns an attribute KeyValue conforming to the +// "cloudfoundry.system.instance.id" semantic conventions. It represents a guid +// describing the concrete instance of the event source. +func CloudFoundrySystemInstanceID(val string) attribute.KeyValue { + return CloudFoundrySystemInstanceIDKey.String(val) +} + +// Namespace: code +const ( + // CodeColumnNumberKey is the attribute Key conforming to the + // "code.column.number" semantic conventions. It represents the column number in + // `code.file.path` best representing the operation. It SHOULD point within the + // code unit named in `code.function.name`. This attribute MUST NOT be used on + // the Profile signal since the data is already captured in 'message Line'. This + // constraint is imposed to prevent redundancy and maintain data integrity. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + CodeColumnNumberKey = attribute.Key("code.column.number") + + // CodeFilePathKey is the attribute Key conforming to the "code.file.path" + // semantic conventions. It represents the source code file name that identifies + // the code unit as uniquely as possible (preferably an absolute file path). + // This attribute MUST NOT be used on the Profile signal since the data is + // already captured in 'message Function'. This constraint is imposed to prevent + // redundancy and maintain data integrity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: /usr/local/MyApplication/content_root/app/index.php + CodeFilePathKey = attribute.Key("code.file.path") + + // CodeFunctionNameKey is the attribute Key conforming to the + // "code.function.name" semantic conventions. It represents the method or + // function fully-qualified name without arguments. The value should fit the + // natural representation of the language runtime, which is also likely the same + // used within `code.stacktrace` attribute value. This attribute MUST NOT be + // used on the Profile signal since the data is already captured in 'message + // Function'. This constraint is imposed to prevent redundancy and maintain data + // integrity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "com.example.MyHttpService.serveRequest", + // "GuzzleHttp\Client::transfer", "fopen" + // Note: Values and format depends on each language runtime, thus it is + // impossible to provide an exhaustive list of examples. + // The values are usually the same (or prefixes of) the ones found in native + // stack trace representation stored in + // `code.stacktrace` without information on arguments. + // + // Examples: + // + // - Java method: `com.example.MyHttpService.serveRequest` + // - Java anonymous class method: `com.mycompany.Main$1.myMethod` + // - Java lambda method: + // `com.mycompany.Main$$Lambda/0x0000748ae4149c00.myMethod` + // - PHP function: `GuzzleHttp\Client::transfer` + // - Go function: `github.com/my/repo/pkg.foo.func5` + // - Elixir: `OpenTelemetry.Ctx.new` + // - Erlang: `opentelemetry_ctx:new` + // - Rust: `playground::my_module::my_cool_func` + // - C function: `fopen` + CodeFunctionNameKey = attribute.Key("code.function.name") + + // CodeLineNumberKey is the attribute Key conforming to the "code.line.number" + // semantic conventions. It represents the line number in `code.file.path` best + // representing the operation. It SHOULD point within the code unit named in + // `code.function.name`. This attribute MUST NOT be used on the Profile signal + // since the data is already captured in 'message Line'. This constraint is + // imposed to prevent redundancy and maintain data integrity. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + CodeLineNumberKey = attribute.Key("code.line.number") + + // CodeStacktraceKey is the attribute Key conforming to the "code.stacktrace" + // semantic conventions. It represents a stacktrace as a string in the natural + // representation for the language runtime. The representation is identical to + // [`exception.stacktrace`]. This attribute MUST NOT be used on the Profile + // signal since the data is already captured in 'message Location'. This + // constraint is imposed to prevent redundancy and maintain data integrity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\n at + // com.example.GenerateTrace.methodA(GenerateTrace.java:9)\n at + // com.example.GenerateTrace.main(GenerateTrace.java:5) + // + // [`exception.stacktrace`]: /docs/exceptions/exceptions-spans.md#stacktrace-representation + CodeStacktraceKey = attribute.Key("code.stacktrace") +) + +// CodeColumnNumber returns an attribute KeyValue conforming to the +// "code.column.number" semantic conventions. It represents the column number in +// `code.file.path` best representing the operation. It SHOULD point within the +// code unit named in `code.function.name`. This attribute MUST NOT be used on +// the Profile signal since the data is already captured in 'message Line'. This +// constraint is imposed to prevent redundancy and maintain data integrity. +func CodeColumnNumber(val int) attribute.KeyValue { + return CodeColumnNumberKey.Int(val) +} + +// CodeFilePath returns an attribute KeyValue conforming to the "code.file.path" +// semantic conventions. It represents the source code file name that identifies +// the code unit as uniquely as possible (preferably an absolute file path). This +// attribute MUST NOT be used on the Profile signal since the data is already +// captured in 'message Function'. This constraint is imposed to prevent +// redundancy and maintain data integrity. +func CodeFilePath(val string) attribute.KeyValue { + return CodeFilePathKey.String(val) +} + +// CodeFunctionName returns an attribute KeyValue conforming to the +// "code.function.name" semantic conventions. It represents the method or +// function fully-qualified name without arguments. The value should fit the +// natural representation of the language runtime, which is also likely the same +// used within `code.stacktrace` attribute value. This attribute MUST NOT be used +// on the Profile signal since the data is already captured in 'message +// Function'. This constraint is imposed to prevent redundancy and maintain data +// integrity. +func CodeFunctionName(val string) attribute.KeyValue { + return CodeFunctionNameKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the +// "code.line.number" semantic conventions. It represents the line number in +// `code.file.path` best representing the operation. It SHOULD point within the +// code unit named in `code.function.name`. This attribute MUST NOT be used on +// the Profile signal since the data is already captured in 'message Line'. This +// constraint is imposed to prevent redundancy and maintain data integrity. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeStacktrace returns an attribute KeyValue conforming to the +// "code.stacktrace" semantic conventions. It represents a stacktrace as a string +// in the natural representation for the language runtime. The representation is +// identical to [`exception.stacktrace`]. This attribute MUST NOT be used on the +// Profile signal since the data is already captured in 'message Location'. This +// constraint is imposed to prevent redundancy and maintain data integrity. +// +// [`exception.stacktrace`]: /docs/exceptions/exceptions-spans.md#stacktrace-representation +func CodeStacktrace(val string) attribute.KeyValue { + return CodeStacktraceKey.String(val) +} + +// Namespace: container +const ( + // ContainerCommandKey is the attribute Key conforming to the + // "container.command" semantic conventions. It represents the command used to + // run the container (i.e. the command name). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otelcontribcol" + // Note: If using embedded credentials or sensitive data, it is recommended to + // remove them to prevent potential leakage. + ContainerCommandKey = attribute.Key("container.command") + + // ContainerCommandArgsKey is the attribute Key conforming to the + // "container.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) run by the + // container. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otelcontribcol", "--config", "config.yaml" + ContainerCommandArgsKey = attribute.Key("container.command_args") + + // ContainerCommandLineKey is the attribute Key conforming to the + // "container.command_line" semantic conventions. It represents the full command + // run by the container as a single string representing the full command. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otelcontribcol --config config.yaml" + ContainerCommandLineKey = attribute.Key("container.command_line") + + // ContainerCSIPluginNameKey is the attribute Key conforming to the + // "container.csi.plugin.name" semantic conventions. It represents the name of + // the CSI ([Container Storage Interface]) plugin used by the volume. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "pd.csi.storage.gke.io" + // Note: This can sometimes be referred to as a "driver" in CSI implementations. + // This should represent the `name` field of the GetPluginInfo RPC. + // + // [Container Storage Interface]: https://github.com/container-storage-interface/spec + ContainerCSIPluginNameKey = attribute.Key("container.csi.plugin.name") + + // ContainerCSIVolumeIDKey is the attribute Key conforming to the + // "container.csi.volume.id" semantic conventions. It represents the unique + // volume ID returned by the CSI ([Container Storage Interface]) plugin. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "projects/my-gcp-project/zones/my-gcp-zone/disks/my-gcp-disk" + // Note: This can sometimes be referred to as a "volume handle" in CSI + // implementations. This should represent the `Volume.volume_id` field in CSI + // spec. + // + // [Container Storage Interface]: https://github.com/container-storage-interface/spec + ContainerCSIVolumeIDKey = attribute.Key("container.csi.volume.id") + + // ContainerIDKey is the attribute Key conforming to the "container.id" semantic + // conventions. It represents the container ID. Usually a UUID, as for example + // used to [identify Docker containers]. The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "a3bf90e006b2" + // + // [identify Docker containers]: https://docs.docker.com/engine/containers/run/#container-identification + ContainerIDKey = attribute.Key("container.id") + + // ContainerImageIDKey is the attribute Key conforming to the + // "container.image.id" semantic conventions. It represents the runtime specific + // image identifier. Usually a hash algorithm followed by a UUID. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f" + // Note: Docker defines a sha256 of the image id; `container.image.id` + // corresponds to the `Image` field from the Docker container inspect [API] + // endpoint. + // K8s defines a link to the container registry repository with digest + // `"imageID": "registry.azurecr.io /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"` + // . + // The ID is assigned by the container runtime and can vary in different + // environments. Consider using `oci.manifest.digest` if it is important to + // identify the same image in different environments/runtimes. + // + // [API]: https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect + ContainerImageIDKey = attribute.Key("container.image.id") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of the + // image the container was built on. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "gcr.io/opentelemetry/operator" + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageRepoDigestsKey is the attribute Key conforming to the + // "container.image.repo_digests" semantic conventions. It represents the repo + // digests of the container image as provided by the container runtime. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb", + // "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" + // Note: [Docker] and [CRI] report those under the `RepoDigests` field. + // + // [Docker]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect + // [CRI]: https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238 + ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") + + // ContainerImageTagsKey is the attribute Key conforming to the + // "container.image.tags" semantic conventions. It represents the container + // image tags. An example can be found in [Docker Image Inspect]. Should be only + // the `` section of the full name for example from + // `registry.example.com/my-org/my-image:`. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "v1.27.1", "3.5.7-0" + // + // [Docker Image Inspect]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect + ContainerImageTagsKey = attribute.Key("container.image.tags") + + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-autoconf" + ContainerNameKey = attribute.Key("container.name") + + // ContainerRuntimeDescriptionKey is the attribute Key conforming to the + // "container.runtime.description" semantic conventions. It represents a + // description about the runtime which could include, for example details about + // the CRI/API version being used or other customisations. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "docker://19.3.1 - CRI: 1.22.0" + ContainerRuntimeDescriptionKey = attribute.Key("container.runtime.description") + + // ContainerRuntimeNameKey is the attribute Key conforming to the + // "container.runtime.name" semantic conventions. It represents the container + // runtime managing this container. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "docker", "containerd", "rkt" + ContainerRuntimeNameKey = attribute.Key("container.runtime.name") + + // ContainerRuntimeVersionKey is the attribute Key conforming to the + // "container.runtime.version" semantic conventions. It represents the version + // of the runtime of this process, as returned by the runtime without + // modification. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0.0 + ContainerRuntimeVersionKey = attribute.Key("container.runtime.version") +) + +// ContainerCommand returns an attribute KeyValue conforming to the +// "container.command" semantic conventions. It represents the command used to +// run the container (i.e. the command name). +func ContainerCommand(val string) attribute.KeyValue { + return ContainerCommandKey.String(val) +} + +// ContainerCommandArgs returns an attribute KeyValue conforming to the +// "container.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) run by the +// container. +func ContainerCommandArgs(val ...string) attribute.KeyValue { + return ContainerCommandArgsKey.StringSlice(val) +} + +// ContainerCommandLine returns an attribute KeyValue conforming to the +// "container.command_line" semantic conventions. It represents the full command +// run by the container as a single string representing the full command. +func ContainerCommandLine(val string) attribute.KeyValue { + return ContainerCommandLineKey.String(val) +} + +// ContainerCSIPluginName returns an attribute KeyValue conforming to the +// "container.csi.plugin.name" semantic conventions. It represents the name of +// the CSI ([Container Storage Interface]) plugin used by the volume. +// +// [Container Storage Interface]: https://github.com/container-storage-interface/spec +func ContainerCSIPluginName(val string) attribute.KeyValue { + return ContainerCSIPluginNameKey.String(val) +} + +// ContainerCSIVolumeID returns an attribute KeyValue conforming to the +// "container.csi.volume.id" semantic conventions. It represents the unique +// volume ID returned by the CSI ([Container Storage Interface]) plugin. +// +// [Container Storage Interface]: https://github.com/container-storage-interface/spec +func ContainerCSIVolumeID(val string) attribute.KeyValue { + return ContainerCSIVolumeIDKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the "container.id" +// semantic conventions. It represents the container ID. Usually a UUID, as for +// example used to [identify Docker containers]. The UUID might be abbreviated. +// +// [identify Docker containers]: https://docs.docker.com/engine/containers/run/#container-identification +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerImageID returns an attribute KeyValue conforming to the +// "container.image.id" semantic conventions. It represents the runtime specific +// image identifier. Usually a hash algorithm followed by a UUID. +func ContainerImageID(val string) attribute.KeyValue { + return ContainerImageIDKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageRepoDigests returns an attribute KeyValue conforming to the +// "container.image.repo_digests" semantic conventions. It represents the repo +// digests of the container image as provided by the container runtime. +func ContainerImageRepoDigests(val ...string) attribute.KeyValue { + return ContainerImageRepoDigestsKey.StringSlice(val) +} + +// ContainerImageTags returns an attribute KeyValue conforming to the +// "container.image.tags" semantic conventions. It represents the container image +// tags. An example can be found in [Docker Image Inspect]. Should be only the +// `` section of the full name for example from +// `registry.example.com/my-org/my-image:`. +// +// [Docker Image Inspect]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect +func ContainerImageTags(val ...string) attribute.KeyValue { + return ContainerImageTagsKey.StringSlice(val) +} + +// ContainerLabel returns an attribute KeyValue conforming to the +// "container.label" semantic conventions. It represents the container labels, +// `` being the label name, the value being the label value. +func ContainerLabel(key string, val string) attribute.KeyValue { + return attribute.String("container.label."+key, val) +} + +// ContainerName returns an attribute KeyValue conforming to the "container.name" +// semantic conventions. It represents the container name used by container +// runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerRuntimeDescription returns an attribute KeyValue conforming to the +// "container.runtime.description" semantic conventions. It represents a +// description about the runtime which could include, for example details about +// the CRI/API version being used or other customisations. +func ContainerRuntimeDescription(val string) attribute.KeyValue { + return ContainerRuntimeDescriptionKey.String(val) +} + +// ContainerRuntimeName returns an attribute KeyValue conforming to the +// "container.runtime.name" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntimeName(val string) attribute.KeyValue { + return ContainerRuntimeNameKey.String(val) +} + +// ContainerRuntimeVersion returns an attribute KeyValue conforming to the +// "container.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without modification. +func ContainerRuntimeVersion(val string) attribute.KeyValue { + return ContainerRuntimeVersionKey.String(val) +} + +// Namespace: cpu +const ( + // CPULogicalNumberKey is the attribute Key conforming to the + // "cpu.logical_number" semantic conventions. It represents the logical CPU + // number [0..n-1]. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1 + CPULogicalNumberKey = attribute.Key("cpu.logical_number") + + // CPUModeKey is the attribute Key conforming to the "cpu.mode" semantic + // conventions. It represents the mode of the CPU. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "user", "system" + CPUModeKey = attribute.Key("cpu.mode") +) + +// CPULogicalNumber returns an attribute KeyValue conforming to the +// "cpu.logical_number" semantic conventions. It represents the logical CPU +// number [0..n-1]. +func CPULogicalNumber(val int) attribute.KeyValue { + return CPULogicalNumberKey.Int(val) +} + +// Enum values for cpu.mode +var ( + // User + // Stability: development + CPUModeUser = CPUModeKey.String("user") + // System + // Stability: development + CPUModeSystem = CPUModeKey.String("system") + // Nice + // Stability: development + CPUModeNice = CPUModeKey.String("nice") + // Idle + // Stability: development + CPUModeIdle = CPUModeKey.String("idle") + // IO Wait + // Stability: development + CPUModeIOWait = CPUModeKey.String("iowait") + // Interrupt + // Stability: development + CPUModeInterrupt = CPUModeKey.String("interrupt") + // Steal + // Stability: development + CPUModeSteal = CPUModeKey.String("steal") + // Kernel + // Stability: development + CPUModeKernel = CPUModeKey.String("kernel") +) + +// Namespace: db +const ( + // DBClientConnectionPoolNameKey is the attribute Key conforming to the + // "db.client.connection.pool.name" semantic conventions. It represents the name + // of the connection pool; unique within the instrumented application. In case + // the connection pool implementation doesn't provide a name, instrumentation + // SHOULD use a combination of parameters that would make the name unique, for + // example, combining attributes `server.address`, `server.port`, and + // `db.namespace`, formatted as `server.address:server.port/db.namespace`. + // Instrumentations that generate connection pool name following different + // patterns SHOULD document it. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "myDataSource" + DBClientConnectionPoolNameKey = attribute.Key("db.client.connection.pool.name") + + // DBClientConnectionStateKey is the attribute Key conforming to the + // "db.client.connection.state" semantic conventions. It represents the state of + // a connection in the pool. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "idle" + DBClientConnectionStateKey = attribute.Key("db.client.connection.state") + + // DBCollectionNameKey is the attribute Key conforming to the + // "db.collection.name" semantic conventions. It represents the name of a + // collection (table, container) within the database. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "public.users", "customers" + // Note: It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + // + // The collection name SHOULD NOT be extracted from `db.query.text`, + // when the database system supports query text with multiple collections + // in non-batch operations. + // + // For batch operations, if the individual operations are known to have the same + // collection name then that collection name SHOULD be used. + DBCollectionNameKey = attribute.Key("db.collection.name") + + // DBNamespaceKey is the attribute Key conforming to the "db.namespace" semantic + // conventions. It represents the name of the database, fully qualified within + // the server address and port. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "customers", "test.users" + // Note: If a database system has multiple namespace components, they SHOULD be + // concatenated from the most general to the most specific namespace component, + // using `|` as a separator between the components. Any missing components (and + // their associated separators) SHOULD be omitted. + // Semantic conventions for individual database systems SHOULD document what + // `db.namespace` means in the context of that system. + // It is RECOMMENDED to capture the value as provided by the application without + // attempting to do any case normalization. + DBNamespaceKey = attribute.Key("db.namespace") + + // DBOperationBatchSizeKey is the attribute Key conforming to the + // "db.operation.batch.size" semantic conventions. It represents the number of + // queries included in a batch operation. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 2, 3, 4 + // Note: Operations are only considered batches when they contain two or more + // operations, and so `db.operation.batch.size` SHOULD never be `1`. + DBOperationBatchSizeKey = attribute.Key("db.operation.batch.size") + + // DBOperationNameKey is the attribute Key conforming to the "db.operation.name" + // semantic conventions. It represents the name of the operation or command + // being executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "findAndModify", "HMSET", "SELECT" + // Note: It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + // + // The operation name SHOULD NOT be extracted from `db.query.text`, + // when the database system supports query text with multiple operations + // in non-batch operations. + // + // If spaces can occur in the operation name, multiple consecutive spaces + // SHOULD be normalized to a single space. + // + // For batch operations, if the individual operations are known to have the same + // operation name + // then that operation name SHOULD be used prepended by `BATCH `, + // otherwise `db.operation.name` SHOULD be `BATCH` or some other database + // system specific term if more applicable. + DBOperationNameKey = attribute.Key("db.operation.name") + + // DBQuerySummaryKey is the attribute Key conforming to the "db.query.summary" + // semantic conventions. It represents the low cardinality summary of a database + // query. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "SELECT wuser_table", "INSERT shipping_details SELECT orders", "get + // user by id" + // Note: The query summary describes a class of database queries and is useful + // as a grouping key, especially when analyzing telemetry for database + // calls involving complex queries. + // + // Summary may be available to the instrumentation through + // instrumentation hooks or other means. If it is not available, + // instrumentations + // that support query parsing SHOULD generate a summary following + // [Generating query summary] + // section. + // + // [Generating query summary]: /docs/database/database-spans.md#generating-a-summary-of-the-query + DBQuerySummaryKey = attribute.Key("db.query.summary") + + // DBQueryTextKey is the attribute Key conforming to the "db.query.text" + // semantic conventions. It represents the database query being executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "SELECT * FROM wuser_table where username = ?", "SET mykey ?" + // Note: For sanitization see [Sanitization of `db.query.text`]. + // For batch operations, if the individual operations are known to have the same + // query text then that query text SHOULD be used, otherwise all of the + // individual query texts SHOULD be concatenated with separator `; ` or some + // other database system specific separator if more applicable. + // Parameterized query text SHOULD NOT be sanitized. Even though parameterized + // query text can potentially have sensitive data, by using a parameterized + // query the user is giving a strong signal that any sensitive data will be + // passed as parameter values, and the benefit to observability of capturing the + // static part of the query text by default outweighs the risk. + // + // [Sanitization of `db.query.text`]: /docs/database/database-spans.md#sanitization-of-dbquerytext + DBQueryTextKey = attribute.Key("db.query.text") + + // DBResponseReturnedRowsKey is the attribute Key conforming to the + // "db.response.returned_rows" semantic conventions. It represents the number of + // rows returned by the operation. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 10, 30, 1000 + DBResponseReturnedRowsKey = attribute.Key("db.response.returned_rows") + + // DBResponseStatusCodeKey is the attribute Key conforming to the + // "db.response.status_code" semantic conventions. It represents the database + // response status code. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "102", "ORA-17002", "08P01", "404" + // Note: The status code returned by the database. Usually it represents an + // error code, but may also represent partial success, warning, or differentiate + // between various types of successful outcomes. + // Semantic conventions for individual database systems SHOULD document what + // `db.response.status_code` means in the context of that system. + DBResponseStatusCodeKey = attribute.Key("db.response.status_code") + + // DBStoredProcedureNameKey is the attribute Key conforming to the + // "db.stored_procedure.name" semantic conventions. It represents the name of a + // stored procedure within the database. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "GetCustomer" + // Note: It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + // + // For batch operations, if the individual operations are known to have the same + // stored procedure name then that stored procedure name SHOULD be used. + DBStoredProcedureNameKey = attribute.Key("db.stored_procedure.name") + + // DBSystemNameKey is the attribute Key conforming to the "db.system.name" + // semantic conventions. It represents the database management system (DBMS) + // product as identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: + // Note: The actual DBMS may differ from the one identified by the client. For + // example, when using PostgreSQL client libraries to connect to a CockroachDB, + // the `db.system.name` is set to `postgresql` based on the instrumentation's + // best knowledge. + DBSystemNameKey = attribute.Key("db.system.name") +) + +// DBClientConnectionPoolName returns an attribute KeyValue conforming to the +// "db.client.connection.pool.name" semantic conventions. It represents the name +// of the connection pool; unique within the instrumented application. In case +// the connection pool implementation doesn't provide a name, instrumentation +// SHOULD use a combination of parameters that would make the name unique, for +// example, combining attributes `server.address`, `server.port`, and +// `db.namespace`, formatted as `server.address:server.port/db.namespace`. +// Instrumentations that generate connection pool name following different +// patterns SHOULD document it. +func DBClientConnectionPoolName(val string) attribute.KeyValue { + return DBClientConnectionPoolNameKey.String(val) +} + +// DBCollectionName returns an attribute KeyValue conforming to the +// "db.collection.name" semantic conventions. It represents the name of a +// collection (table, container) within the database. +func DBCollectionName(val string) attribute.KeyValue { + return DBCollectionNameKey.String(val) +} + +// DBNamespace returns an attribute KeyValue conforming to the "db.namespace" +// semantic conventions. It represents the name of the database, fully qualified +// within the server address and port. +func DBNamespace(val string) attribute.KeyValue { + return DBNamespaceKey.String(val) +} + +// DBOperationBatchSize returns an attribute KeyValue conforming to the +// "db.operation.batch.size" semantic conventions. It represents the number of +// queries included in a batch operation. +func DBOperationBatchSize(val int) attribute.KeyValue { + return DBOperationBatchSizeKey.Int(val) +} + +// DBOperationName returns an attribute KeyValue conforming to the +// "db.operation.name" semantic conventions. It represents the name of the +// operation or command being executed. +func DBOperationName(val string) attribute.KeyValue { + return DBOperationNameKey.String(val) +} + +// DBOperationParameter returns an attribute KeyValue conforming to the +// "db.operation.parameter" semantic conventions. It represents a database +// operation parameter, with `` being the parameter name, and the attribute +// value being a string representation of the parameter value. +func DBOperationParameter(key string, val string) attribute.KeyValue { + return attribute.String("db.operation.parameter."+key, val) +} + +// DBQueryParameter returns an attribute KeyValue conforming to the +// "db.query.parameter" semantic conventions. It represents a database query +// parameter, with `` being the parameter name, and the attribute value +// being a string representation of the parameter value. +func DBQueryParameter(key string, val string) attribute.KeyValue { + return attribute.String("db.query.parameter."+key, val) +} + +// DBQuerySummary returns an attribute KeyValue conforming to the +// "db.query.summary" semantic conventions. It represents the low cardinality +// summary of a database query. +func DBQuerySummary(val string) attribute.KeyValue { + return DBQuerySummaryKey.String(val) +} + +// DBQueryText returns an attribute KeyValue conforming to the "db.query.text" +// semantic conventions. It represents the database query being executed. +func DBQueryText(val string) attribute.KeyValue { + return DBQueryTextKey.String(val) +} + +// DBResponseReturnedRows returns an attribute KeyValue conforming to the +// "db.response.returned_rows" semantic conventions. It represents the number of +// rows returned by the operation. +func DBResponseReturnedRows(val int) attribute.KeyValue { + return DBResponseReturnedRowsKey.Int(val) +} + +// DBResponseStatusCode returns an attribute KeyValue conforming to the +// "db.response.status_code" semantic conventions. It represents the database +// response status code. +func DBResponseStatusCode(val string) attribute.KeyValue { + return DBResponseStatusCodeKey.String(val) +} + +// DBStoredProcedureName returns an attribute KeyValue conforming to the +// "db.stored_procedure.name" semantic conventions. It represents the name of a +// stored procedure within the database. +func DBStoredProcedureName(val string) attribute.KeyValue { + return DBStoredProcedureNameKey.String(val) +} + +// Enum values for db.client.connection.state +var ( + // idle + // Stability: development + DBClientConnectionStateIdle = DBClientConnectionStateKey.String("idle") + // used + // Stability: development + DBClientConnectionStateUsed = DBClientConnectionStateKey.String("used") +) + +// Enum values for db.system.name +var ( + // Some other SQL database. Fallback only. + // Stability: development + DBSystemNameOtherSQL = DBSystemNameKey.String("other_sql") + // [Adabas (Adaptable Database System)] + // Stability: development + // + // [Adabas (Adaptable Database System)]: https://documentation.softwareag.com/?pf=adabas + DBSystemNameSoftwareagAdabas = DBSystemNameKey.String("softwareag.adabas") + // [Actian Ingres] + // Stability: development + // + // [Actian Ingres]: https://www.actian.com/databases/ingres/ + DBSystemNameActianIngres = DBSystemNameKey.String("actian.ingres") + // [Amazon DynamoDB] + // Stability: development + // + // [Amazon DynamoDB]: https://aws.amazon.com/pm/dynamodb/ + DBSystemNameAWSDynamoDB = DBSystemNameKey.String("aws.dynamodb") + // [Amazon Redshift] + // Stability: development + // + // [Amazon Redshift]: https://aws.amazon.com/redshift/ + DBSystemNameAWSRedshift = DBSystemNameKey.String("aws.redshift") + // [Azure Cosmos DB] + // Stability: development + // + // [Azure Cosmos DB]: https://learn.microsoft.com/azure/cosmos-db + DBSystemNameAzureCosmosDB = DBSystemNameKey.String("azure.cosmosdb") + // [InterSystems Caché] + // Stability: development + // + // [InterSystems Caché]: https://www.intersystems.com/products/cache/ + DBSystemNameIntersystemsCache = DBSystemNameKey.String("intersystems.cache") + // [Apache Cassandra] + // Stability: development + // + // [Apache Cassandra]: https://cassandra.apache.org/ + DBSystemNameCassandra = DBSystemNameKey.String("cassandra") + // [ClickHouse] + // Stability: development + // + // [ClickHouse]: https://clickhouse.com/ + DBSystemNameClickHouse = DBSystemNameKey.String("clickhouse") + // [CockroachDB] + // Stability: development + // + // [CockroachDB]: https://www.cockroachlabs.com/ + DBSystemNameCockroachDB = DBSystemNameKey.String("cockroachdb") + // [Couchbase] + // Stability: development + // + // [Couchbase]: https://www.couchbase.com/ + DBSystemNameCouchbase = DBSystemNameKey.String("couchbase") + // [Apache CouchDB] + // Stability: development + // + // [Apache CouchDB]: https://couchdb.apache.org/ + DBSystemNameCouchDB = DBSystemNameKey.String("couchdb") + // [Apache Derby] + // Stability: development + // + // [Apache Derby]: https://db.apache.org/derby/ + DBSystemNameDerby = DBSystemNameKey.String("derby") + // [Elasticsearch] + // Stability: development + // + // [Elasticsearch]: https://www.elastic.co/elasticsearch + DBSystemNameElasticsearch = DBSystemNameKey.String("elasticsearch") + // [Firebird] + // Stability: development + // + // [Firebird]: https://www.firebirdsql.org/ + DBSystemNameFirebirdSQL = DBSystemNameKey.String("firebirdsql") + // [Google Cloud Spanner] + // Stability: development + // + // [Google Cloud Spanner]: https://cloud.google.com/spanner + DBSystemNameGCPSpanner = DBSystemNameKey.String("gcp.spanner") + // [Apache Geode] + // Stability: development + // + // [Apache Geode]: https://geode.apache.org/ + DBSystemNameGeode = DBSystemNameKey.String("geode") + // [H2 Database] + // Stability: development + // + // [H2 Database]: https://h2database.com/ + DBSystemNameH2database = DBSystemNameKey.String("h2database") + // [Apache HBase] + // Stability: development + // + // [Apache HBase]: https://hbase.apache.org/ + DBSystemNameHBase = DBSystemNameKey.String("hbase") + // [Apache Hive] + // Stability: development + // + // [Apache Hive]: https://hive.apache.org/ + DBSystemNameHive = DBSystemNameKey.String("hive") + // [HyperSQL Database] + // Stability: development + // + // [HyperSQL Database]: https://hsqldb.org/ + DBSystemNameHSQLDB = DBSystemNameKey.String("hsqldb") + // [IBM Db2] + // Stability: development + // + // [IBM Db2]: https://www.ibm.com/db2 + DBSystemNameIBMDB2 = DBSystemNameKey.String("ibm.db2") + // [IBM Informix] + // Stability: development + // + // [IBM Informix]: https://www.ibm.com/products/informix + DBSystemNameIBMInformix = DBSystemNameKey.String("ibm.informix") + // [IBM Netezza] + // Stability: development + // + // [IBM Netezza]: https://www.ibm.com/products/netezza + DBSystemNameIBMNetezza = DBSystemNameKey.String("ibm.netezza") + // [InfluxDB] + // Stability: development + // + // [InfluxDB]: https://www.influxdata.com/ + DBSystemNameInfluxDB = DBSystemNameKey.String("influxdb") + // [Instant] + // Stability: development + // + // [Instant]: https://www.instantdb.com/ + DBSystemNameInstantDB = DBSystemNameKey.String("instantdb") + // [MariaDB] + // Stability: stable + // + // [MariaDB]: https://mariadb.org/ + DBSystemNameMariaDB = DBSystemNameKey.String("mariadb") + // [Memcached] + // Stability: development + // + // [Memcached]: https://memcached.org/ + DBSystemNameMemcached = DBSystemNameKey.String("memcached") + // [MongoDB] + // Stability: development + // + // [MongoDB]: https://www.mongodb.com/ + DBSystemNameMongoDB = DBSystemNameKey.String("mongodb") + // [Microsoft SQL Server] + // Stability: stable + // + // [Microsoft SQL Server]: https://www.microsoft.com/sql-server + DBSystemNameMicrosoftSQLServer = DBSystemNameKey.String("microsoft.sql_server") + // [MySQL] + // Stability: stable + // + // [MySQL]: https://www.mysql.com/ + DBSystemNameMySQL = DBSystemNameKey.String("mysql") + // [Neo4j] + // Stability: development + // + // [Neo4j]: https://neo4j.com/ + DBSystemNameNeo4j = DBSystemNameKey.String("neo4j") + // [OpenSearch] + // Stability: development + // + // [OpenSearch]: https://opensearch.org/ + DBSystemNameOpenSearch = DBSystemNameKey.String("opensearch") + // [Oracle Database] + // Stability: development + // + // [Oracle Database]: https://www.oracle.com/database/ + DBSystemNameOracleDB = DBSystemNameKey.String("oracle.db") + // [PostgreSQL] + // Stability: stable + // + // [PostgreSQL]: https://www.postgresql.org/ + DBSystemNamePostgreSQL = DBSystemNameKey.String("postgresql") + // [Redis] + // Stability: development + // + // [Redis]: https://redis.io/ + DBSystemNameRedis = DBSystemNameKey.String("redis") + // [SAP HANA] + // Stability: development + // + // [SAP HANA]: https://www.sap.com/products/technology-platform/hana/what-is-sap-hana.html + DBSystemNameSAPHANA = DBSystemNameKey.String("sap.hana") + // [SAP MaxDB] + // Stability: development + // + // [SAP MaxDB]: https://maxdb.sap.com/ + DBSystemNameSAPMaxDB = DBSystemNameKey.String("sap.maxdb") + // [SQLite] + // Stability: development + // + // [SQLite]: https://www.sqlite.org/ + DBSystemNameSQLite = DBSystemNameKey.String("sqlite") + // [Teradata] + // Stability: development + // + // [Teradata]: https://www.teradata.com/ + DBSystemNameTeradata = DBSystemNameKey.String("teradata") + // [Trino] + // Stability: development + // + // [Trino]: https://trino.io/ + DBSystemNameTrino = DBSystemNameKey.String("trino") +) + +// Namespace: deployment +const ( + // DeploymentEnvironmentNameKey is the attribute Key conforming to the + // "deployment.environment.name" semantic conventions. It represents the name of + // the [deployment environment] (aka deployment tier). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "staging", "production" + // Note: `deployment.environment.name` does not affect the uniqueness + // constraints defined through + // the `service.namespace`, `service.name` and `service.instance.id` resource + // attributes. + // This implies that resources carrying the following attribute combinations + // MUST be + // considered to be identifying the same service: + // + // - `service.name=frontend`, `deployment.environment.name=production` + // - `service.name=frontend`, `deployment.environment.name=staging`. + // + // + // [deployment environment]: https://wikipedia.org/wiki/Deployment_environment + DeploymentEnvironmentNameKey = attribute.Key("deployment.environment.name") + + // DeploymentIDKey is the attribute Key conforming to the "deployment.id" + // semantic conventions. It represents the id of the deployment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1208" + DeploymentIDKey = attribute.Key("deployment.id") + + // DeploymentNameKey is the attribute Key conforming to the "deployment.name" + // semantic conventions. It represents the name of the deployment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "deploy my app", "deploy-frontend" + DeploymentNameKey = attribute.Key("deployment.name") + + // DeploymentStatusKey is the attribute Key conforming to the + // "deployment.status" semantic conventions. It represents the status of the + // deployment. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + DeploymentStatusKey = attribute.Key("deployment.status") +) + +// DeploymentEnvironmentName returns an attribute KeyValue conforming to the +// "deployment.environment.name" semantic conventions. It represents the name of +// the [deployment environment] (aka deployment tier). +// +// [deployment environment]: https://wikipedia.org/wiki/Deployment_environment +func DeploymentEnvironmentName(val string) attribute.KeyValue { + return DeploymentEnvironmentNameKey.String(val) +} + +// DeploymentID returns an attribute KeyValue conforming to the "deployment.id" +// semantic conventions. It represents the id of the deployment. +func DeploymentID(val string) attribute.KeyValue { + return DeploymentIDKey.String(val) +} + +// DeploymentName returns an attribute KeyValue conforming to the +// "deployment.name" semantic conventions. It represents the name of the +// deployment. +func DeploymentName(val string) attribute.KeyValue { + return DeploymentNameKey.String(val) +} + +// Enum values for deployment.status +var ( + // failed + // Stability: development + DeploymentStatusFailed = DeploymentStatusKey.String("failed") + // succeeded + // Stability: development + DeploymentStatusSucceeded = DeploymentStatusKey.String("succeeded") +) + +// Namespace: destination +const ( + // DestinationAddressKey is the attribute Key conforming to the + // "destination.address" semantic conventions. It represents the destination + // address - domain name if available without reverse DNS lookup; otherwise, IP + // address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "destination.example.com", "10.1.2.80", "/tmp/my.sock" + // Note: When observed from the source side, and when communicating through an + // intermediary, `destination.address` SHOULD represent the destination address + // behind any intermediaries, for example proxies, if it's available. + DestinationAddressKey = attribute.Key("destination.address") + + // DestinationPortKey is the attribute Key conforming to the "destination.port" + // semantic conventions. It represents the destination port number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3389, 2888 + DestinationPortKey = attribute.Key("destination.port") +) + +// DestinationAddress returns an attribute KeyValue conforming to the +// "destination.address" semantic conventions. It represents the destination +// address - domain name if available without reverse DNS lookup; otherwise, IP +// address or Unix domain socket name. +func DestinationAddress(val string) attribute.KeyValue { + return DestinationAddressKey.String(val) +} + +// DestinationPort returns an attribute KeyValue conforming to the +// "destination.port" semantic conventions. It represents the destination port +// number. +func DestinationPort(val int) attribute.KeyValue { + return DestinationPortKey.Int(val) +} + +// Namespace: device +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "123456789012345", "01:23:45:67:89:AB" + // Note: Its value SHOULD be identical for all apps on a device and it SHOULD + // NOT change if an app is uninstalled and re-installed. + // However, it might be resettable by the user for all apps on a device. + // Hardware IDs (e.g. vendor-specific serial number, IMEI or MAC address) MAY be + // used as values. + // + // More information about Android identifier best practices can be found in the + // [Android user data IDs guide]. + // + // > [!WARNING]> This attribute may contain sensitive (PII) information. Caution + // > should be taken when storing personal data or anything which can identify a + // > user. GDPR and data protection laws may apply, + // > ensure you do your own due diligence.> Due to these reasons, this + // > identifier is not recommended for consumer applications and will likely + // > result in rejection from both Google Play and App Store. + // > However, it may be appropriate for specific enterprise scenarios, such as + // > kiosk devices or enterprise-managed devices, with appropriate compliance + // > clearance. + // > Any instrumentation providing this identifier MUST implement it as an + // > opt-in feature.> See [`app.installation.id`]> for a more + // > privacy-preserving alternative. + // + // [Android user data IDs guide]: https://developer.android.com/training/articles/user-data-ids + // [`app.installation.id`]: /docs/registry/attributes/app.md#app-installation-id + DeviceIDKey = attribute.Key("device.id") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of the + // device manufacturer. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Apple", "Samsung" + // Note: The Android OS provides this field via [Build]. iOS apps SHOULD + // hardcode the value `Apple`. + // + // [Build]: https://developer.android.com/reference/android/os/Build#MANUFACTURER + DeviceManufacturerKey = attribute.Key("device.manufacturer") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "iPhone3,4", "SM-G920F" + // Note: It's recommended this value represents a machine-readable version of + // the model identifier rather than the market or consumer-friendly name of the + // device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the "device.model.name" + // semantic conventions. It represents the marketing name for the device model. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "iPhone 6s Plus", "Samsung Galaxy S6" + // Note: It's recommended this value represents a human-readable version of the + // device model rather than a machine-readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" semantic +// conventions. It represents a unique identifier representing the device. +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer. +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device. +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name for +// the device model. +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// Namespace: disk +const ( + // DiskIODirectionKey is the attribute Key conforming to the "disk.io.direction" + // semantic conventions. It represents the disk IO operation direction. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "read" + DiskIODirectionKey = attribute.Key("disk.io.direction") +) + +// Enum values for disk.io.direction +var ( + // read + // Stability: development + DiskIODirectionRead = DiskIODirectionKey.String("read") + // write + // Stability: development + DiskIODirectionWrite = DiskIODirectionKey.String("write") +) + +// Namespace: dns +const ( + // DNSAnswersKey is the attribute Key conforming to the "dns.answers" semantic + // conventions. It represents the list of IPv4 or IPv6 addresses resolved during + // DNS lookup. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "10.0.0.1", "2001:0db8:85a3:0000:0000:8a2e:0370:7334" + DNSAnswersKey = attribute.Key("dns.answers") + + // DNSQuestionNameKey is the attribute Key conforming to the "dns.question.name" + // semantic conventions. It represents the name being queried. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "www.example.com", "opentelemetry.io" + // Note: If the name field contains non-printable characters (below 32 or above + // 126), those characters should be represented as escaped base 10 integers + // (\DDD). Back slashes and quotes should be escaped. Tabs, carriage returns, + // and line feeds should be converted to \t, \r, and \n respectively. + DNSQuestionNameKey = attribute.Key("dns.question.name") +) + +// DNSAnswers returns an attribute KeyValue conforming to the "dns.answers" +// semantic conventions. It represents the list of IPv4 or IPv6 addresses +// resolved during DNS lookup. +func DNSAnswers(val ...string) attribute.KeyValue { + return DNSAnswersKey.StringSlice(val) +} + +// DNSQuestionName returns an attribute KeyValue conforming to the +// "dns.question.name" semantic conventions. It represents the name being +// queried. +func DNSQuestionName(val string) attribute.KeyValue { + return DNSQuestionNameKey.String(val) +} + +// Namespace: elasticsearch +const ( + // ElasticsearchNodeNameKey is the attribute Key conforming to the + // "elasticsearch.node.name" semantic conventions. It represents the represents + // the human-readable identifier of the node/instance to which a request was + // routed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "instance-0000000001" + ElasticsearchNodeNameKey = attribute.Key("elasticsearch.node.name") +) + +// ElasticsearchNodeName returns an attribute KeyValue conforming to the +// "elasticsearch.node.name" semantic conventions. It represents the represents +// the human-readable identifier of the node/instance to which a request was +// routed. +func ElasticsearchNodeName(val string) attribute.KeyValue { + return ElasticsearchNodeNameKey.String(val) +} + +// Namespace: enduser +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" semantic + // conventions. It represents the unique identifier of an end user in the + // system. It maybe a username, email address, or other identifier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "username" + // Note: Unique identifier of an end user in the system. + // + // > [!Warning] + // > This field contains sensitive (PII) information. + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserPseudoIDKey is the attribute Key conforming to the "enduser.pseudo.id" + // semantic conventions. It represents the pseudonymous identifier of an end + // user. This identifier should be a random value that is not directly linked or + // associated with the end user's actual identity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "QdH5CAWJgqVT4rOr0qtumf" + // Note: Pseudonymous identifier of an end user. + // + // > [!Warning] + // > This field contains sensitive (linkable PII) information. + EnduserPseudoIDKey = attribute.Key("enduser.pseudo.id") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the unique identifier of an end user in +// the system. It maybe a username, email address, or other identifier. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserPseudoID returns an attribute KeyValue conforming to the +// "enduser.pseudo.id" semantic conventions. It represents the pseudonymous +// identifier of an end user. This identifier should be a random value that is +// not directly linked or associated with the end user's actual identity. +func EnduserPseudoID(val string) attribute.KeyValue { + return EnduserPseudoIDKey.String(val) +} + +// Namespace: error +const ( + // ErrorMessageKey is the attribute Key conforming to the "error.message" + // semantic conventions. It represents a message providing more detail about an + // error in human-readable form. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Unexpected input type: string", "The user has exceeded their + // storage quota" + // Note: `error.message` should provide additional context and detail about an + // error. + // It is NOT RECOMMENDED to duplicate the value of `error.type` in + // `error.message`. + // It is also NOT RECOMMENDED to duplicate the value of `exception.message` in + // `error.message`. + // + // `error.message` is NOT RECOMMENDED for metrics or spans due to its unbounded + // cardinality and overlap with span status. + ErrorMessageKey = attribute.Key("error.message") + + // ErrorTypeKey is the attribute Key conforming to the "error.type" semantic + // conventions. It represents the describes a class of error the operation ended + // with. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "timeout", "java.net.UnknownHostException", + // "server_certificate_invalid", "500" + // Note: The `error.type` SHOULD be predictable, and SHOULD have low + // cardinality. + // + // When `error.type` is set to a type (e.g., an exception type), its + // canonical class name identifying the type within the artifact SHOULD be used. + // + // Instrumentations SHOULD document the list of errors they report. + // + // The cardinality of `error.type` within one instrumentation library SHOULD be + // low. + // Telemetry consumers that aggregate data from multiple instrumentation + // libraries and applications + // should be prepared for `error.type` to have high cardinality at query time + // when no + // additional filters are applied. + // + // If the operation has completed successfully, instrumentations SHOULD NOT set + // `error.type`. + // + // If a specific domain defines its own set of error identifiers (such as HTTP + // or gRPC status codes), + // it's RECOMMENDED to: + // + // - Use a domain-specific attribute + // - Set `error.type` to capture all errors, regardless of whether they are + // defined within the domain-specific set or not. + ErrorTypeKey = attribute.Key("error.type") +) + +// ErrorMessage returns an attribute KeyValue conforming to the "error.message" +// semantic conventions. It represents a message providing more detail about an +// error in human-readable form. +func ErrorMessage(val string) attribute.KeyValue { + return ErrorMessageKey.String(val) +} + +// Enum values for error.type +var ( + // A fallback error value to be used when the instrumentation doesn't define a + // custom value. + // + // Stability: stable + ErrorTypeOther = ErrorTypeKey.String("_OTHER") +) + +// Namespace: exception +const ( + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "Division by zero", "Can't convert 'int' object to str implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace as a + // string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: Exception in thread "main" java.lang.RuntimeException: Test + // exception\n at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\n at + // com.example.GenerateTrace.methodA(GenerateTrace.java:9)\n at + // com.example.GenerateTrace.main(GenerateTrace.java:5) + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") + + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the exception + // should be preferred over the static type in languages that support it. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "java.net.ConnectException", "OSError" + ExceptionTypeKey = attribute.Key("exception.type") +) + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// ExceptionType returns an attribute KeyValue conforming to the "exception.type" +// semantic conventions. It represents the type of the exception (its +// fully-qualified class name, if applicable). The dynamic type of the exception +// should be preferred over the static type in languages that support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// Namespace: faas +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the serverless + // function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + FaaSColdstartKey = attribute.Key("faas.coldstart") + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron Expression]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0/5 * * * ? * + // + // [Cron Expression]: https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm + FaaSCronKey = attribute.Key("faas.cron") + + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name of + // the source on which the triggering operation was performed. For example, in + // Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the + // database name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "myBucketName", "myDbName" + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or S3 is + // the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "myFile.txt", "myTableName" + FaaSDocumentNameKey = attribute.Key("faas.document.name") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the describes + // the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string containing + // the time when the data was accessed in the [ISO 8601] format expressed in + // [UTC]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 2020-01-23T13:47:06Z + // + // [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html + // [UTC]: https://www.w3.org/TR/NOTE-datetime + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a string, + // that will be potentially reused for other invocations to the same + // function/function version. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de" + // Note: - **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSInvocationIDKey is the attribute Key conforming to the + // "faas.invocation_id" semantic conventions. It represents the invocation ID of + // the current function invocation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: af9d5aa4-a685-4c5f-a22b-444f80b3cc28 + FaaSInvocationIDKey = attribute.Key("faas.invocation_id") + + // FaaSInvokedNameKey is the attribute Key conforming to the "faas.invoked_name" + // semantic conventions. It represents the name of the invoked function. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: my-function + // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked + // function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud region of + // the invoked function. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: eu-central-1 + // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked + // function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") + + // FaaSMaxMemoryKey is the attribute Key conforming to the "faas.max_memory" + // semantic conventions. It represents the amount of memory available to the + // serverless function converted to Bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Note: It's recommended to set this attribute since e.g. too little memory can + // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, + // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this + // information (which must be multiplied by 1,048,576). + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") + + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this runtime + // instance executes. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-function", "myazurefunctionapp/some-function-name" + // Note: This is the name of the function as configured/deployed on the FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function.name`] + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud providers/products: + // + // - **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `cloud.resource_id` attribute). + // + // + // [`code.namespace`/`code.function.name`]: /docs/general/attributes.md#source-code-attributes + FaaSNameKey = attribute.Key("faas.name") + + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation time + // in the [ISO 8601] format expressed in [UTC]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 2020-01-23T13:47:06Z + // + // [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html + // [UTC]: https://www.w3.org/TR/NOTE-datetime + FaaSTimeKey = attribute.Key("faas.time") + + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" semantic + // conventions. It represents the type of the trigger which caused this function + // invocation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + FaaSTriggerKey = attribute.Key("faas.trigger") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" semantic + // conventions. It represents the immutable version of the function being + // executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "26", "pinkfroid-00002" + // Note: Depending on the cloud provider and platform, use: + // + // - **AWS Lambda:** The [function version] + // (an integer represented as a decimal string). + // - **Google Cloud Run (Services):** The [revision] + // (i.e., the function name plus the revision suffix). + // - **Google Cloud Functions:** The value of the + // [`K_REVISION` environment variable]. + // - **Azure Functions:** Not applicable. Do not set this attribute. + // + // + // [function version]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html + // [revision]: https://cloud.google.com/run/docs/managing/revisions + // [`K_REVISION` environment variable]: https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically + FaaSVersionKey = attribute.Key("faas.version") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the "faas.coldstart" +// semantic conventions. It represents a boolean that is true if the serverless +// function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" semantic +// conventions. It represents a string containing the schedule period as +// [Cron Expression]. +// +// [Cron Expression]: https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of the +// source on which the triggering operation was performed. For example, in Cloud +// Storage or S3 corresponds to the bucket name, and in Cosmos DB to the database +// name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 is +// the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO 8601] format expressed in +// [UTC]. +// +// [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html +// [UTC]: https://www.w3.org/TR/NOTE-datetime +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the "faas.instance" +// semantic conventions. It represents the execution environment ID as a string, +// that will be potentially reused for other invocations to the same +// function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSInvocationID returns an attribute KeyValue conforming to the +// "faas.invocation_id" semantic conventions. It represents the invocation ID of +// the current function invocation. +func FaaSInvocationID(val string) attribute.KeyValue { + return FaaSInvocationIDKey.String(val) +} + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region of +// the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function converted to Bytes. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" semantic +// conventions. It represents the name of the single function that this runtime +// instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" semantic +// conventions. It represents a string containing the function invocation time in +// the [ISO 8601] format expressed in [UTC]. +// +// [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html +// [UTC]: https://www.w3.org/TR/NOTE-datetime +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the "faas.version" +// semantic conventions. It represents the immutable version of the function +// being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// Enum values for faas.document.operation +var ( + // When a new object is created. + // Stability: development + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified. + // Stability: development + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted. + // Stability: development + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// Enum values for faas.invoked_provider +var ( + // Alibaba Cloud + // Stability: development + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + // Stability: development + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + // Stability: development + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + // Stability: development + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + // Stability: development + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +// Enum values for faas.trigger +var ( + // A response to some data source operation such as a database or filesystem + // read/write + // Stability: development + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + // Stability: development + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + // Stability: development + FaaSTriggerPubSub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + // Stability: development + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + // Stability: development + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// Namespace: feature_flag +const ( + // FeatureFlagContextIDKey is the attribute Key conforming to the + // "feature_flag.context.id" semantic conventions. It represents the unique + // identifier for the flag evaluation context. For example, the targeting key. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "5157782b-2203-4c80-a857-dbbd5e7761db" + FeatureFlagContextIDKey = attribute.Key("feature_flag.context.id") + + // FeatureFlagKeyKey is the attribute Key conforming to the "feature_flag.key" + // semantic conventions. It represents the lookup key of the feature flag. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "logo-color" + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider.name" semantic conventions. It represents the + // identifies the feature flag provider. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "Flag Manager" + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider.name") + + // FeatureFlagResultReasonKey is the attribute Key conforming to the + // "feature_flag.result.reason" semantic conventions. It represents the reason + // code which shows how a feature flag value was determined. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "static", "targeting_match", "error", "default" + FeatureFlagResultReasonKey = attribute.Key("feature_flag.result.reason") + + // FeatureFlagResultValueKey is the attribute Key conforming to the + // "feature_flag.result.value" semantic conventions. It represents the evaluated + // value of the feature flag. + // + // Type: any + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "#ff0000", true, 3 + // Note: With some feature flag providers, feature flag results can be quite + // large or contain private or sensitive details. + // Because of this, `feature_flag.result.variant` is often the preferred + // attribute if it is available. + // + // It may be desirable to redact or otherwise limit the size and scope of + // `feature_flag.result.value` if possible. + // Because the evaluated flag value is unstructured and may be any type, it is + // left to the instrumentation author to determine how best to achieve this. + FeatureFlagResultValueKey = attribute.Key("feature_flag.result.value") + + // FeatureFlagResultVariantKey is the attribute Key conforming to the + // "feature_flag.result.variant" semantic conventions. It represents a semantic + // identifier for an evaluated flag value. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "red", "true", "on" + // Note: A semantic identifier, commonly referred to as a variant, provides a + // means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + FeatureFlagResultVariantKey = attribute.Key("feature_flag.result.variant") + + // FeatureFlagSetIDKey is the attribute Key conforming to the + // "feature_flag.set.id" semantic conventions. It represents the identifier of + // the [flag set] to which the feature flag belongs. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "proj-1", "ab98sgs", "service1/dev" + // + // [flag set]: https://openfeature.dev/specification/glossary/#flag-set + FeatureFlagSetIDKey = attribute.Key("feature_flag.set.id") + + // FeatureFlagVersionKey is the attribute Key conforming to the + // "feature_flag.version" semantic conventions. It represents the version of the + // ruleset used during the evaluation. This may be any stable value which + // uniquely identifies the ruleset. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "1", "01ABCDEF" + FeatureFlagVersionKey = attribute.Key("feature_flag.version") +) + +// FeatureFlagContextID returns an attribute KeyValue conforming to the +// "feature_flag.context.id" semantic conventions. It represents the unique +// identifier for the flag evaluation context. For example, the targeting key. +func FeatureFlagContextID(val string) attribute.KeyValue { + return FeatureFlagContextIDKey.String(val) +} + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the lookup key of the +// feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider.name" semantic conventions. It represents the +// identifies the feature flag provider. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagResultVariant returns an attribute KeyValue conforming to the +// "feature_flag.result.variant" semantic conventions. It represents a semantic +// identifier for an evaluated flag value. +func FeatureFlagResultVariant(val string) attribute.KeyValue { + return FeatureFlagResultVariantKey.String(val) +} + +// FeatureFlagSetID returns an attribute KeyValue conforming to the +// "feature_flag.set.id" semantic conventions. It represents the identifier of +// the [flag set] to which the feature flag belongs. +// +// [flag set]: https://openfeature.dev/specification/glossary/#flag-set +func FeatureFlagSetID(val string) attribute.KeyValue { + return FeatureFlagSetIDKey.String(val) +} + +// FeatureFlagVersion returns an attribute KeyValue conforming to the +// "feature_flag.version" semantic conventions. It represents the version of the +// ruleset used during the evaluation. This may be any stable value which +// uniquely identifies the ruleset. +func FeatureFlagVersion(val string) attribute.KeyValue { + return FeatureFlagVersionKey.String(val) +} + +// Enum values for feature_flag.result.reason +var ( + // The resolved value is static (no dynamic evaluation). + // Stability: release_candidate + FeatureFlagResultReasonStatic = FeatureFlagResultReasonKey.String("static") + // The resolved value fell back to a pre-configured value (no dynamic evaluation + // occurred or dynamic evaluation yielded no result). + // Stability: release_candidate + FeatureFlagResultReasonDefault = FeatureFlagResultReasonKey.String("default") + // The resolved value was the result of a dynamic evaluation, such as a rule or + // specific user-targeting. + // Stability: release_candidate + FeatureFlagResultReasonTargetingMatch = FeatureFlagResultReasonKey.String("targeting_match") + // The resolved value was the result of pseudorandom assignment. + // Stability: release_candidate + FeatureFlagResultReasonSplit = FeatureFlagResultReasonKey.String("split") + // The resolved value was retrieved from cache. + // Stability: release_candidate + FeatureFlagResultReasonCached = FeatureFlagResultReasonKey.String("cached") + // The resolved value was the result of the flag being disabled in the + // management system. + // Stability: release_candidate + FeatureFlagResultReasonDisabled = FeatureFlagResultReasonKey.String("disabled") + // The reason for the resolved value could not be determined. + // Stability: release_candidate + FeatureFlagResultReasonUnknown = FeatureFlagResultReasonKey.String("unknown") + // The resolved value is non-authoritative or possibly out of date + // Stability: release_candidate + FeatureFlagResultReasonStale = FeatureFlagResultReasonKey.String("stale") + // The resolved value was the result of an error. + // Stability: release_candidate + FeatureFlagResultReasonError = FeatureFlagResultReasonKey.String("error") +) + +// Namespace: file +const ( + // FileAccessedKey is the attribute Key conforming to the "file.accessed" + // semantic conventions. It represents the time when the file was last accessed, + // in ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T12:00:00Z" + // Note: This attribute might not be supported by some file systems — NFS, + // FAT32, in embedded OS, etc. + FileAccessedKey = attribute.Key("file.accessed") + + // FileAttributesKey is the attribute Key conforming to the "file.attributes" + // semantic conventions. It represents the array of file attributes. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "readonly", "hidden" + // Note: Attributes names depend on the OS or file system. Here’s a + // non-exhaustive list of values expected for this attribute: `archive`, + // `compressed`, `directory`, `encrypted`, `execute`, `hidden`, `immutable`, + // `journaled`, `read`, `readonly`, `symbolic link`, `system`, `temporary`, + // `write`. + FileAttributesKey = attribute.Key("file.attributes") + + // FileChangedKey is the attribute Key conforming to the "file.changed" semantic + // conventions. It represents the time when the file attributes or metadata was + // last changed, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T12:00:00Z" + // Note: `file.changed` captures the time when any of the file's properties or + // attributes (including the content) are changed, while `file.modified` + // captures the timestamp when the file content is modified. + FileChangedKey = attribute.Key("file.changed") + + // FileCreatedKey is the attribute Key conforming to the "file.created" semantic + // conventions. It represents the time when the file was created, in ISO 8601 + // format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T12:00:00Z" + // Note: This attribute might not be supported by some file systems — NFS, + // FAT32, in embedded OS, etc. + FileCreatedKey = attribute.Key("file.created") + + // FileDirectoryKey is the attribute Key conforming to the "file.directory" + // semantic conventions. It represents the directory where the file is located. + // It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/home/user", "C:\Program Files\MyApp" + FileDirectoryKey = attribute.Key("file.directory") + + // FileExtensionKey is the attribute Key conforming to the "file.extension" + // semantic conventions. It represents the file extension, excluding the leading + // dot. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "png", "gz" + // Note: When the file name has multiple extensions (example.tar.gz), only the + // last one should be captured ("gz", not "tar.gz"). + FileExtensionKey = attribute.Key("file.extension") + + // FileForkNameKey is the attribute Key conforming to the "file.fork_name" + // semantic conventions. It represents the name of the fork. A fork is + // additional data associated with a filesystem object. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Zone.Identifier" + // Note: On Linux, a resource fork is used to store additional data with a + // filesystem object. A file always has at least one fork for the data portion, + // and additional forks may exist. + // On NTFS, this is analogous to an Alternate Data Stream (ADS), and the default + // data stream for a file is just called $DATA. Zone.Identifier is commonly used + // by Windows to track contents downloaded from the Internet. An ADS is + // typically of the form: C:\path\to\filename.extension:some_fork_name, and + // some_fork_name is the value that should populate `fork_name`. + // `filename.extension` should populate `file.name`, and `extension` should + // populate `file.extension`. The full path, `file.path`, will include the fork + // name. + FileForkNameKey = attribute.Key("file.fork_name") + + // FileGroupIDKey is the attribute Key conforming to the "file.group.id" + // semantic conventions. It represents the primary Group ID (GID) of the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1000" + FileGroupIDKey = attribute.Key("file.group.id") + + // FileGroupNameKey is the attribute Key conforming to the "file.group.name" + // semantic conventions. It represents the primary group name of the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "users" + FileGroupNameKey = attribute.Key("file.group.name") + + // FileInodeKey is the attribute Key conforming to the "file.inode" semantic + // conventions. It represents the inode representing the file in the filesystem. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "256383" + FileInodeKey = attribute.Key("file.inode") + + // FileModeKey is the attribute Key conforming to the "file.mode" semantic + // conventions. It represents the mode of the file in octal representation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0640" + FileModeKey = attribute.Key("file.mode") + + // FileModifiedKey is the attribute Key conforming to the "file.modified" + // semantic conventions. It represents the time when the file content was last + // modified, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T12:00:00Z" + FileModifiedKey = attribute.Key("file.modified") + + // FileNameKey is the attribute Key conforming to the "file.name" semantic + // conventions. It represents the name of the file including the extension, + // without the directory. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "example.png" + FileNameKey = attribute.Key("file.name") + + // FileOwnerIDKey is the attribute Key conforming to the "file.owner.id" + // semantic conventions. It represents the user ID (UID) or security identifier + // (SID) of the file owner. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1000" + FileOwnerIDKey = attribute.Key("file.owner.id") + + // FileOwnerNameKey is the attribute Key conforming to the "file.owner.name" + // semantic conventions. It represents the username of the file owner. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "root" + FileOwnerNameKey = attribute.Key("file.owner.name") + + // FilePathKey is the attribute Key conforming to the "file.path" semantic + // conventions. It represents the full path to the file, including the file + // name. It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/home/alice/example.png", "C:\Program Files\MyApp\myapp.exe" + FilePathKey = attribute.Key("file.path") + + // FileSizeKey is the attribute Key conforming to the "file.size" semantic + // conventions. It represents the file size in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + FileSizeKey = attribute.Key("file.size") + + // FileSymbolicLinkTargetPathKey is the attribute Key conforming to the + // "file.symbolic_link.target_path" semantic conventions. It represents the path + // to the target of a symbolic link. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/usr/bin/python3" + // Note: This attribute is only applicable to symbolic links. + FileSymbolicLinkTargetPathKey = attribute.Key("file.symbolic_link.target_path") +) + +// FileAccessed returns an attribute KeyValue conforming to the "file.accessed" +// semantic conventions. It represents the time when the file was last accessed, +// in ISO 8601 format. +func FileAccessed(val string) attribute.KeyValue { + return FileAccessedKey.String(val) +} + +// FileAttributes returns an attribute KeyValue conforming to the +// "file.attributes" semantic conventions. It represents the array of file +// attributes. +func FileAttributes(val ...string) attribute.KeyValue { + return FileAttributesKey.StringSlice(val) +} + +// FileChanged returns an attribute KeyValue conforming to the "file.changed" +// semantic conventions. It represents the time when the file attributes or +// metadata was last changed, in ISO 8601 format. +func FileChanged(val string) attribute.KeyValue { + return FileChangedKey.String(val) +} + +// FileCreated returns an attribute KeyValue conforming to the "file.created" +// semantic conventions. It represents the time when the file was created, in ISO +// 8601 format. +func FileCreated(val string) attribute.KeyValue { + return FileCreatedKey.String(val) +} + +// FileDirectory returns an attribute KeyValue conforming to the "file.directory" +// semantic conventions. It represents the directory where the file is located. +// It should include the drive letter, when appropriate. +func FileDirectory(val string) attribute.KeyValue { + return FileDirectoryKey.String(val) +} + +// FileExtension returns an attribute KeyValue conforming to the "file.extension" +// semantic conventions. It represents the file extension, excluding the leading +// dot. +func FileExtension(val string) attribute.KeyValue { + return FileExtensionKey.String(val) +} + +// FileForkName returns an attribute KeyValue conforming to the "file.fork_name" +// semantic conventions. It represents the name of the fork. A fork is additional +// data associated with a filesystem object. +func FileForkName(val string) attribute.KeyValue { + return FileForkNameKey.String(val) +} + +// FileGroupID returns an attribute KeyValue conforming to the "file.group.id" +// semantic conventions. It represents the primary Group ID (GID) of the file. +func FileGroupID(val string) attribute.KeyValue { + return FileGroupIDKey.String(val) +} + +// FileGroupName returns an attribute KeyValue conforming to the +// "file.group.name" semantic conventions. It represents the primary group name +// of the file. +func FileGroupName(val string) attribute.KeyValue { + return FileGroupNameKey.String(val) +} + +// FileInode returns an attribute KeyValue conforming to the "file.inode" +// semantic conventions. It represents the inode representing the file in the +// filesystem. +func FileInode(val string) attribute.KeyValue { + return FileInodeKey.String(val) +} + +// FileMode returns an attribute KeyValue conforming to the "file.mode" semantic +// conventions. It represents the mode of the file in octal representation. +func FileMode(val string) attribute.KeyValue { + return FileModeKey.String(val) +} + +// FileModified returns an attribute KeyValue conforming to the "file.modified" +// semantic conventions. It represents the time when the file content was last +// modified, in ISO 8601 format. +func FileModified(val string) attribute.KeyValue { + return FileModifiedKey.String(val) +} + +// FileName returns an attribute KeyValue conforming to the "file.name" semantic +// conventions. It represents the name of the file including the extension, +// without the directory. +func FileName(val string) attribute.KeyValue { + return FileNameKey.String(val) +} + +// FileOwnerID returns an attribute KeyValue conforming to the "file.owner.id" +// semantic conventions. It represents the user ID (UID) or security identifier +// (SID) of the file owner. +func FileOwnerID(val string) attribute.KeyValue { + return FileOwnerIDKey.String(val) +} + +// FileOwnerName returns an attribute KeyValue conforming to the +// "file.owner.name" semantic conventions. It represents the username of the file +// owner. +func FileOwnerName(val string) attribute.KeyValue { + return FileOwnerNameKey.String(val) +} + +// FilePath returns an attribute KeyValue conforming to the "file.path" semantic +// conventions. It represents the full path to the file, including the file name. +// It should include the drive letter, when appropriate. +func FilePath(val string) attribute.KeyValue { + return FilePathKey.String(val) +} + +// FileSize returns an attribute KeyValue conforming to the "file.size" semantic +// conventions. It represents the file size in bytes. +func FileSize(val int) attribute.KeyValue { + return FileSizeKey.Int(val) +} + +// FileSymbolicLinkTargetPath returns an attribute KeyValue conforming to the +// "file.symbolic_link.target_path" semantic conventions. It represents the path +// to the target of a symbolic link. +func FileSymbolicLinkTargetPath(val string) attribute.KeyValue { + return FileSymbolicLinkTargetPathKey.String(val) +} + +// Namespace: gcp +const ( + // GCPAppHubApplicationContainerKey is the attribute Key conforming to the + // "gcp.apphub.application.container" semantic conventions. It represents the + // container within GCP where the AppHub application is defined. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "projects/my-container-project" + GCPAppHubApplicationContainerKey = attribute.Key("gcp.apphub.application.container") + + // GCPAppHubApplicationIDKey is the attribute Key conforming to the + // "gcp.apphub.application.id" semantic conventions. It represents the name of + // the application as configured in AppHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-application" + GCPAppHubApplicationIDKey = attribute.Key("gcp.apphub.application.id") + + // GCPAppHubApplicationLocationKey is the attribute Key conforming to the + // "gcp.apphub.application.location" semantic conventions. It represents the GCP + // zone or region where the application is defined. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "us-central1" + GCPAppHubApplicationLocationKey = attribute.Key("gcp.apphub.application.location") + + // GCPAppHubServiceCriticalityTypeKey is the attribute Key conforming to the + // "gcp.apphub.service.criticality_type" semantic conventions. It represents the + // criticality of a service indicates its importance to the business. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: [See AppHub type enum] + // + // [See AppHub type enum]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type + GCPAppHubServiceCriticalityTypeKey = attribute.Key("gcp.apphub.service.criticality_type") + + // GCPAppHubServiceEnvironmentTypeKey is the attribute Key conforming to the + // "gcp.apphub.service.environment_type" semantic conventions. It represents the + // environment of a service is the stage of a software lifecycle. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: [See AppHub environment type] + // + // [See AppHub environment type]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1 + GCPAppHubServiceEnvironmentTypeKey = attribute.Key("gcp.apphub.service.environment_type") + + // GCPAppHubServiceIDKey is the attribute Key conforming to the + // "gcp.apphub.service.id" semantic conventions. It represents the name of the + // service as configured in AppHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-service" + GCPAppHubServiceIDKey = attribute.Key("gcp.apphub.service.id") + + // GCPAppHubWorkloadCriticalityTypeKey is the attribute Key conforming to the + // "gcp.apphub.workload.criticality_type" semantic conventions. It represents + // the criticality of a workload indicates its importance to the business. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: [See AppHub type enum] + // + // [See AppHub type enum]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type + GCPAppHubWorkloadCriticalityTypeKey = attribute.Key("gcp.apphub.workload.criticality_type") + + // GCPAppHubWorkloadEnvironmentTypeKey is the attribute Key conforming to the + // "gcp.apphub.workload.environment_type" semantic conventions. It represents + // the environment of a workload is the stage of a software lifecycle. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: [See AppHub environment type] + // + // [See AppHub environment type]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1 + GCPAppHubWorkloadEnvironmentTypeKey = attribute.Key("gcp.apphub.workload.environment_type") + + // GCPAppHubWorkloadIDKey is the attribute Key conforming to the + // "gcp.apphub.workload.id" semantic conventions. It represents the name of the + // workload as configured in AppHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-workload" + GCPAppHubWorkloadIDKey = attribute.Key("gcp.apphub.workload.id") + + // GCPClientServiceKey is the attribute Key conforming to the + // "gcp.client.service" semantic conventions. It represents the identifies the + // Google Cloud service for which the official client library is intended. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "appengine", "run", "firestore", "alloydb", "spanner" + // Note: Intended to be a stable identifier for Google Cloud client libraries + // that is uniform across implementation languages. The value should be derived + // from the canonical service domain for the service; for example, + // 'foo.googleapis.com' should result in a value of 'foo'. + GCPClientServiceKey = attribute.Key("gcp.client.service") + + // GCPCloudRunJobExecutionKey is the attribute Key conforming to the + // "gcp.cloud_run.job.execution" semantic conventions. It represents the name of + // the Cloud Run [execution] being run for the Job, as set by the + // [`CLOUD_RUN_EXECUTION`] environment variable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "job-name-xxxx", "sample-job-mdw84" + // + // [execution]: https://cloud.google.com/run/docs/managing/job-executions + // [`CLOUD_RUN_EXECUTION`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars + GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") + + // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the + // "gcp.cloud_run.job.task_index" semantic conventions. It represents the index + // for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`] + // environment variable. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0, 1 + // + // [`CLOUD_RUN_TASK_INDEX`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars + GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") + + // GCPGCEInstanceHostnameKey is the attribute Key conforming to the + // "gcp.gce.instance.hostname" semantic conventions. It represents the hostname + // of a GCE instance. This is the full value of the default or [custom hostname] + // . + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-host1234.example.com", + // "sample-vm.us-west1-b.c.my-project.internal" + // + // [custom hostname]: https://cloud.google.com/compute/docs/instances/custom-hostname-vm + GCPGCEInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") + + // GCPGCEInstanceNameKey is the attribute Key conforming to the + // "gcp.gce.instance.name" semantic conventions. It represents the instance name + // of a GCE instance. This is the value provided by `host.name`, the visible + // name of the instance in the Cloud Console UI, and the prefix for the default + // hostname of the instance as defined by the [default internal DNS name]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "instance-1", "my-vm-name" + // + // [default internal DNS name]: https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names + GCPGCEInstanceNameKey = attribute.Key("gcp.gce.instance.name") +) + +// GCPAppHubApplicationContainer returns an attribute KeyValue conforming to the +// "gcp.apphub.application.container" semantic conventions. It represents the +// container within GCP where the AppHub application is defined. +func GCPAppHubApplicationContainer(val string) attribute.KeyValue { + return GCPAppHubApplicationContainerKey.String(val) +} + +// GCPAppHubApplicationID returns an attribute KeyValue conforming to the +// "gcp.apphub.application.id" semantic conventions. It represents the name of +// the application as configured in AppHub. +func GCPAppHubApplicationID(val string) attribute.KeyValue { + return GCPAppHubApplicationIDKey.String(val) +} + +// GCPAppHubApplicationLocation returns an attribute KeyValue conforming to the +// "gcp.apphub.application.location" semantic conventions. It represents the GCP +// zone or region where the application is defined. +func GCPAppHubApplicationLocation(val string) attribute.KeyValue { + return GCPAppHubApplicationLocationKey.String(val) +} + +// GCPAppHubServiceID returns an attribute KeyValue conforming to the +// "gcp.apphub.service.id" semantic conventions. It represents the name of the +// service as configured in AppHub. +func GCPAppHubServiceID(val string) attribute.KeyValue { + return GCPAppHubServiceIDKey.String(val) +} + +// GCPAppHubWorkloadID returns an attribute KeyValue conforming to the +// "gcp.apphub.workload.id" semantic conventions. It represents the name of the +// workload as configured in AppHub. +func GCPAppHubWorkloadID(val string) attribute.KeyValue { + return GCPAppHubWorkloadIDKey.String(val) +} + +// GCPClientService returns an attribute KeyValue conforming to the +// "gcp.client.service" semantic conventions. It represents the identifies the +// Google Cloud service for which the official client library is intended. +func GCPClientService(val string) attribute.KeyValue { + return GCPClientServiceKey.String(val) +} + +// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.execution" semantic conventions. It represents the name of +// the Cloud Run [execution] being run for the Job, as set by the +// [`CLOUD_RUN_EXECUTION`] environment variable. +// +// [execution]: https://cloud.google.com/run/docs/managing/job-executions +// [`CLOUD_RUN_EXECUTION`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars +func GCPCloudRunJobExecution(val string) attribute.KeyValue { + return GCPCloudRunJobExecutionKey.String(val) +} + +// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index +// for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`] +// environment variable. +// +// [`CLOUD_RUN_TASK_INDEX`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars +func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { + return GCPCloudRunJobTaskIndexKey.Int(val) +} + +// GCPGCEInstanceHostname returns an attribute KeyValue conforming to the +// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname +// of a GCE instance. This is the full value of the default or [custom hostname] +// . +// +// [custom hostname]: https://cloud.google.com/compute/docs/instances/custom-hostname-vm +func GCPGCEInstanceHostname(val string) attribute.KeyValue { + return GCPGCEInstanceHostnameKey.String(val) +} + +// GCPGCEInstanceName returns an attribute KeyValue conforming to the +// "gcp.gce.instance.name" semantic conventions. It represents the instance name +// of a GCE instance. This is the value provided by `host.name`, the visible name +// of the instance in the Cloud Console UI, and the prefix for the default +// hostname of the instance as defined by the [default internal DNS name]. +// +// [default internal DNS name]: https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names +func GCPGCEInstanceName(val string) attribute.KeyValue { + return GCPGCEInstanceNameKey.String(val) +} + +// Enum values for gcp.apphub.service.criticality_type +var ( + // Mission critical service. + // Stability: development + GCPAppHubServiceCriticalityTypeMissionCritical = GCPAppHubServiceCriticalityTypeKey.String("MISSION_CRITICAL") + // High impact. + // Stability: development + GCPAppHubServiceCriticalityTypeHigh = GCPAppHubServiceCriticalityTypeKey.String("HIGH") + // Medium impact. + // Stability: development + GCPAppHubServiceCriticalityTypeMedium = GCPAppHubServiceCriticalityTypeKey.String("MEDIUM") + // Low impact. + // Stability: development + GCPAppHubServiceCriticalityTypeLow = GCPAppHubServiceCriticalityTypeKey.String("LOW") +) + +// Enum values for gcp.apphub.service.environment_type +var ( + // Production environment. + // Stability: development + GCPAppHubServiceEnvironmentTypeProduction = GCPAppHubServiceEnvironmentTypeKey.String("PRODUCTION") + // Staging environment. + // Stability: development + GCPAppHubServiceEnvironmentTypeStaging = GCPAppHubServiceEnvironmentTypeKey.String("STAGING") + // Test environment. + // Stability: development + GCPAppHubServiceEnvironmentTypeTest = GCPAppHubServiceEnvironmentTypeKey.String("TEST") + // Development environment. + // Stability: development + GCPAppHubServiceEnvironmentTypeDevelopment = GCPAppHubServiceEnvironmentTypeKey.String("DEVELOPMENT") +) + +// Enum values for gcp.apphub.workload.criticality_type +var ( + // Mission critical service. + // Stability: development + GCPAppHubWorkloadCriticalityTypeMissionCritical = GCPAppHubWorkloadCriticalityTypeKey.String("MISSION_CRITICAL") + // High impact. + // Stability: development + GCPAppHubWorkloadCriticalityTypeHigh = GCPAppHubWorkloadCriticalityTypeKey.String("HIGH") + // Medium impact. + // Stability: development + GCPAppHubWorkloadCriticalityTypeMedium = GCPAppHubWorkloadCriticalityTypeKey.String("MEDIUM") + // Low impact. + // Stability: development + GCPAppHubWorkloadCriticalityTypeLow = GCPAppHubWorkloadCriticalityTypeKey.String("LOW") +) + +// Enum values for gcp.apphub.workload.environment_type +var ( + // Production environment. + // Stability: development + GCPAppHubWorkloadEnvironmentTypeProduction = GCPAppHubWorkloadEnvironmentTypeKey.String("PRODUCTION") + // Staging environment. + // Stability: development + GCPAppHubWorkloadEnvironmentTypeStaging = GCPAppHubWorkloadEnvironmentTypeKey.String("STAGING") + // Test environment. + // Stability: development + GCPAppHubWorkloadEnvironmentTypeTest = GCPAppHubWorkloadEnvironmentTypeKey.String("TEST") + // Development environment. + // Stability: development + GCPAppHubWorkloadEnvironmentTypeDevelopment = GCPAppHubWorkloadEnvironmentTypeKey.String("DEVELOPMENT") +) + +// Namespace: gen_ai +const ( + // GenAIAgentDescriptionKey is the attribute Key conforming to the + // "gen_ai.agent.description" semantic conventions. It represents the free-form + // description of the GenAI agent provided by the application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Helps with math problems", "Generates fiction stories" + GenAIAgentDescriptionKey = attribute.Key("gen_ai.agent.description") + + // GenAIAgentIDKey is the attribute Key conforming to the "gen_ai.agent.id" + // semantic conventions. It represents the unique identifier of the GenAI agent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "asst_5j66UpCpwteGg4YSxUnt7lPY" + GenAIAgentIDKey = attribute.Key("gen_ai.agent.id") + + // GenAIAgentNameKey is the attribute Key conforming to the "gen_ai.agent.name" + // semantic conventions. It represents the human-readable name of the GenAI + // agent provided by the application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Math Tutor", "Fiction Writer" + GenAIAgentNameKey = attribute.Key("gen_ai.agent.name") + + // GenAIConversationIDKey is the attribute Key conforming to the + // "gen_ai.conversation.id" semantic conventions. It represents the unique + // identifier for a conversation (session, thread), used to store and correlate + // messages within this conversation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "conv_5j66UpCpwteGg4YSxUnt7lPY" + GenAIConversationIDKey = attribute.Key("gen_ai.conversation.id") + + // GenAIDataSourceIDKey is the attribute Key conforming to the + // "gen_ai.data_source.id" semantic conventions. It represents the data source + // identifier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "H7STPQYOND" + // Note: Data sources are used by AI agents and RAG applications to store + // grounding data. A data source may be an external database, object store, + // document collection, website, or any other storage system used by the GenAI + // agent or application. The `gen_ai.data_source.id` SHOULD match the identifier + // used by the GenAI system rather than a name specific to the external storage, + // such as a database or object store. Semantic conventions referencing + // `gen_ai.data_source.id` MAY also leverage additional attributes, such as + // `db.*`, to further identify and describe the data source. + GenAIDataSourceIDKey = attribute.Key("gen_ai.data_source.id") + + // GenAIInputMessagesKey is the attribute Key conforming to the + // "gen_ai.input.messages" semantic conventions. It represents the chat history + // provided to the model as an input. + // + // Type: any + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "[\n {\n "role": "user",\n "parts": [\n {\n "type": "text",\n + // "content": "Weather in Paris?"\n }\n ]\n },\n {\n "role": "assistant",\n + // "parts": [\n {\n "type": "tool_call",\n "id": + // "call_VSPygqKTWdrhaFErNvMV18Yl",\n "name": "get_weather",\n "arguments": {\n + // "location": "Paris"\n }\n }\n ]\n },\n {\n "role": "tool",\n "parts": [\n {\n + // "type": "tool_call_response",\n "id": " call_VSPygqKTWdrhaFErNvMV18Yl",\n + // "result": "rainy, 57°F"\n }\n ]\n }\n]\n" + // Note: Instrumentations MUST follow [Input messages JSON schema]. + // When the attribute is recorded on events, it MUST be recorded in structured + // form. When recorded on spans, it MAY be recorded as a JSON string if + // structured + // format is not supported and SHOULD be recorded in structured form otherwise. + // + // Messages MUST be provided in the order they were sent to the model. + // Instrumentations MAY provide a way for users to filter or truncate + // input messages. + // + // > [!Warning] + // > This attribute is likely to contain sensitive information including + // > user/PII data. + // + // See [Recording content on attributes] + // section for more details. + // + // [Input messages JSON schema]: /docs/gen-ai/gen-ai-input-messages.json + // [Recording content on attributes]: /docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes + GenAIInputMessagesKey = attribute.Key("gen_ai.input.messages") + + // GenAIOperationNameKey is the attribute Key conforming to the + // "gen_ai.operation.name" semantic conventions. It represents the name of the + // operation being performed. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: If one of the predefined values applies, but specific system uses a + // different name it's RECOMMENDED to document it in the semantic conventions + // for specific GenAI system and use system-specific name in the + // instrumentation. If a different name is not documented, instrumentation + // libraries SHOULD use applicable predefined value. + GenAIOperationNameKey = attribute.Key("gen_ai.operation.name") + + // GenAIOutputMessagesKey is the attribute Key conforming to the + // "gen_ai.output.messages" semantic conventions. It represents the messages + // returned by the model where each message represents a specific model response + // (choice, candidate). + // + // Type: any + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "[\n {\n "role": "assistant",\n "parts": [\n {\n "type": "text",\n + // "content": "The weather in Paris is currently rainy with a temperature of + // 57°F."\n }\n ],\n "finish_reason": "stop"\n }\n]\n" + // Note: Instrumentations MUST follow [Output messages JSON schema] + // + // Each message represents a single output choice/candidate generated by + // the model. Each message corresponds to exactly one generation + // (choice/candidate) and vice versa - one choice cannot be split across + // multiple messages or one message cannot contain parts from multiple choices. + // + // When the attribute is recorded on events, it MUST be recorded in structured + // form. When recorded on spans, it MAY be recorded as a JSON string if + // structured + // format is not supported and SHOULD be recorded in structured form otherwise. + // + // Instrumentations MAY provide a way for users to filter or truncate + // output messages. + // + // > [!Warning] + // > This attribute is likely to contain sensitive information including + // > user/PII data. + // + // See [Recording content on attributes] + // section for more details. + // + // [Output messages JSON schema]: /docs/gen-ai/gen-ai-output-messages.json + // [Recording content on attributes]: /docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes + GenAIOutputMessagesKey = attribute.Key("gen_ai.output.messages") + + // GenAIOutputTypeKey is the attribute Key conforming to the + // "gen_ai.output.type" semantic conventions. It represents the represents the + // content type requested by the client. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: This attribute SHOULD be used when the client requests output of a + // specific type. The model may return zero or more outputs of this type. + // This attribute specifies the output modality and not the actual output + // format. For example, if an image is requested, the actual output could be a + // URL pointing to an image file. + // Additional output format details may be recorded in the future in the + // `gen_ai.output.{type}.*` attributes. + GenAIOutputTypeKey = attribute.Key("gen_ai.output.type") + + // GenAIProviderNameKey is the attribute Key conforming to the + // "gen_ai.provider.name" semantic conventions. It represents the Generative AI + // provider as identified by the client or server instrumentation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The attribute SHOULD be set based on the instrumentation's best + // knowledge and may differ from the actual model provider. + // + // Multiple providers, including Azure OpenAI, Gemini, and AI hosting platforms + // are accessible using the OpenAI REST API and corresponding client libraries, + // but may proxy or host models from different providers. + // + // The `gen_ai.request.model`, `gen_ai.response.model`, and `server.address` + // attributes may help identify the actual system in use. + // + // The `gen_ai.provider.name` attribute acts as a discriminator that + // identifies the GenAI telemetry format flavor specific to that provider + // within GenAI semantic conventions. + // It SHOULD be set consistently with provider-specific attributes and signals. + // For example, GenAI spans, metrics, and events related to AWS Bedrock + // should have the `gen_ai.provider.name` set to `aws.bedrock` and include + // applicable `aws.bedrock.*` attributes and are not expected to include + // `openai.*` attributes. + GenAIProviderNameKey = attribute.Key("gen_ai.provider.name") + + // GenAIRequestChoiceCountKey is the attribute Key conforming to the + // "gen_ai.request.choice.count" semantic conventions. It represents the target + // number of candidate completions to return. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3 + GenAIRequestChoiceCountKey = attribute.Key("gen_ai.request.choice.count") + + // GenAIRequestEncodingFormatsKey is the attribute Key conforming to the + // "gen_ai.request.encoding_formats" semantic conventions. It represents the + // encoding formats requested in an embeddings operation, if specified. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "base64"], ["float", "binary" + // Note: In some GenAI systems the encoding formats are called embedding types. + // Also, some GenAI systems only accept a single format per request. + GenAIRequestEncodingFormatsKey = attribute.Key("gen_ai.request.encoding_formats") + + // GenAIRequestFrequencyPenaltyKey is the attribute Key conforming to the + // "gen_ai.request.frequency_penalty" semantic conventions. It represents the + // frequency penalty setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0.1 + GenAIRequestFrequencyPenaltyKey = attribute.Key("gen_ai.request.frequency_penalty") + + // GenAIRequestMaxTokensKey is the attribute Key conforming to the + // "gen_ai.request.max_tokens" semantic conventions. It represents the maximum + // number of tokens the model generates for a request. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 100 + GenAIRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens") + + // GenAIRequestModelKey is the attribute Key conforming to the + // "gen_ai.request.model" semantic conventions. It represents the name of the + // GenAI model a request is being made to. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: gpt-4 + GenAIRequestModelKey = attribute.Key("gen_ai.request.model") + + // GenAIRequestPresencePenaltyKey is the attribute Key conforming to the + // "gen_ai.request.presence_penalty" semantic conventions. It represents the + // presence penalty setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0.1 + GenAIRequestPresencePenaltyKey = attribute.Key("gen_ai.request.presence_penalty") + + // GenAIRequestSeedKey is the attribute Key conforming to the + // "gen_ai.request.seed" semantic conventions. It represents the requests with + // same seed value more likely to return same result. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 100 + GenAIRequestSeedKey = attribute.Key("gen_ai.request.seed") + + // GenAIRequestStopSequencesKey is the attribute Key conforming to the + // "gen_ai.request.stop_sequences" semantic conventions. It represents the list + // of sequences that the model will use to stop generating further tokens. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "forest", "lived" + GenAIRequestStopSequencesKey = attribute.Key("gen_ai.request.stop_sequences") + + // GenAIRequestTemperatureKey is the attribute Key conforming to the + // "gen_ai.request.temperature" semantic conventions. It represents the + // temperature setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0.0 + GenAIRequestTemperatureKey = attribute.Key("gen_ai.request.temperature") + + // GenAIRequestTopKKey is the attribute Key conforming to the + // "gen_ai.request.top_k" semantic conventions. It represents the top_k sampling + // setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0 + GenAIRequestTopKKey = attribute.Key("gen_ai.request.top_k") + + // GenAIRequestTopPKey is the attribute Key conforming to the + // "gen_ai.request.top_p" semantic conventions. It represents the top_p sampling + // setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0 + GenAIRequestTopPKey = attribute.Key("gen_ai.request.top_p") + + // GenAIResponseFinishReasonsKey is the attribute Key conforming to the + // "gen_ai.response.finish_reasons" semantic conventions. It represents the + // array of reasons the model stopped generating tokens, corresponding to each + // generation received. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "stop"], ["stop", "length" + GenAIResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons") + + // GenAIResponseIDKey is the attribute Key conforming to the + // "gen_ai.response.id" semantic conventions. It represents the unique + // identifier for the completion. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "chatcmpl-123" + GenAIResponseIDKey = attribute.Key("gen_ai.response.id") + + // GenAIResponseModelKey is the attribute Key conforming to the + // "gen_ai.response.model" semantic conventions. It represents the name of the + // model that generated the response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "gpt-4-0613" + GenAIResponseModelKey = attribute.Key("gen_ai.response.model") + + // GenAISystemInstructionsKey is the attribute Key conforming to the + // "gen_ai.system_instructions" semantic conventions. It represents the system + // message or instructions provided to the GenAI model separately from the chat + // history. + // + // Type: any + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "[\n {\n "type": "text",\n "content": "You are an Agent that greet + // users, always use greetings tool to respond"\n }\n]\n", "[\n {\n "type": + // "text",\n "content": "You are a language translator."\n },\n {\n "type": + // "text",\n "content": "Your mission is to translate text in English to + // French."\n }\n]\n" + // Note: This attribute SHOULD be used when the corresponding provider or API + // allows to provide system instructions or messages separately from the + // chat history. + // + // Instructions that are part of the chat history SHOULD be recorded in + // `gen_ai.input.messages` attribute instead. + // + // Instrumentations MUST follow [System instructions JSON schema]. + // + // When recorded on spans, it MAY be recorded as a JSON string if structured + // format is not supported and SHOULD be recorded in structured form otherwise. + // + // Instrumentations MAY provide a way for users to filter or truncate + // system instructions. + // + // > [!Warning] + // > This attribute may contain sensitive information. + // + // See [Recording content on attributes] + // section for more details. + // + // [System instructions JSON schema]: /docs/gen-ai/gen-ai-system-instructions.json + // [Recording content on attributes]: /docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes + GenAISystemInstructionsKey = attribute.Key("gen_ai.system_instructions") + + // GenAITokenTypeKey is the attribute Key conforming to the "gen_ai.token.type" + // semantic conventions. It represents the type of token being counted. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "input", "output" + GenAITokenTypeKey = attribute.Key("gen_ai.token.type") + + // GenAIToolCallIDKey is the attribute Key conforming to the + // "gen_ai.tool.call.id" semantic conventions. It represents the tool call + // identifier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "call_mszuSIzqtI65i1wAUOE8w5H4" + GenAIToolCallIDKey = attribute.Key("gen_ai.tool.call.id") + + // GenAIToolDescriptionKey is the attribute Key conforming to the + // "gen_ai.tool.description" semantic conventions. It represents the tool + // description. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Multiply two numbers" + GenAIToolDescriptionKey = attribute.Key("gen_ai.tool.description") + + // GenAIToolNameKey is the attribute Key conforming to the "gen_ai.tool.name" + // semantic conventions. It represents the name of the tool utilized by the + // agent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Flights" + GenAIToolNameKey = attribute.Key("gen_ai.tool.name") + + // GenAIToolTypeKey is the attribute Key conforming to the "gen_ai.tool.type" + // semantic conventions. It represents the type of the tool utilized by the + // agent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "function", "extension", "datastore" + // Note: Extension: A tool executed on the agent-side to directly call external + // APIs, bridging the gap between the agent and real-world systems. + // Agent-side operations involve actions that are performed by the agent on the + // server or within the agent's controlled environment. + // Function: A tool executed on the client-side, where the agent generates + // parameters for a predefined function, and the client executes the logic. + // Client-side operations are actions taken on the user's end or within the + // client application. + // Datastore: A tool used by the agent to access and query structured or + // unstructured external data for retrieval-augmented tasks or knowledge + // updates. + GenAIToolTypeKey = attribute.Key("gen_ai.tool.type") + + // GenAIUsageInputTokensKey is the attribute Key conforming to the + // "gen_ai.usage.input_tokens" semantic conventions. It represents the number of + // tokens used in the GenAI input (prompt). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 100 + GenAIUsageInputTokensKey = attribute.Key("gen_ai.usage.input_tokens") + + // GenAIUsageOutputTokensKey is the attribute Key conforming to the + // "gen_ai.usage.output_tokens" semantic conventions. It represents the number + // of tokens used in the GenAI response (completion). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 180 + GenAIUsageOutputTokensKey = attribute.Key("gen_ai.usage.output_tokens") +) + +// GenAIAgentDescription returns an attribute KeyValue conforming to the +// "gen_ai.agent.description" semantic conventions. It represents the free-form +// description of the GenAI agent provided by the application. +func GenAIAgentDescription(val string) attribute.KeyValue { + return GenAIAgentDescriptionKey.String(val) +} + +// GenAIAgentID returns an attribute KeyValue conforming to the "gen_ai.agent.id" +// semantic conventions. It represents the unique identifier of the GenAI agent. +func GenAIAgentID(val string) attribute.KeyValue { + return GenAIAgentIDKey.String(val) +} + +// GenAIAgentName returns an attribute KeyValue conforming to the +// "gen_ai.agent.name" semantic conventions. It represents the human-readable +// name of the GenAI agent provided by the application. +func GenAIAgentName(val string) attribute.KeyValue { + return GenAIAgentNameKey.String(val) +} + +// GenAIConversationID returns an attribute KeyValue conforming to the +// "gen_ai.conversation.id" semantic conventions. It represents the unique +// identifier for a conversation (session, thread), used to store and correlate +// messages within this conversation. +func GenAIConversationID(val string) attribute.KeyValue { + return GenAIConversationIDKey.String(val) +} + +// GenAIDataSourceID returns an attribute KeyValue conforming to the +// "gen_ai.data_source.id" semantic conventions. It represents the data source +// identifier. +func GenAIDataSourceID(val string) attribute.KeyValue { + return GenAIDataSourceIDKey.String(val) +} + +// GenAIRequestChoiceCount returns an attribute KeyValue conforming to the +// "gen_ai.request.choice.count" semantic conventions. It represents the target +// number of candidate completions to return. +func GenAIRequestChoiceCount(val int) attribute.KeyValue { + return GenAIRequestChoiceCountKey.Int(val) +} + +// GenAIRequestEncodingFormats returns an attribute KeyValue conforming to the +// "gen_ai.request.encoding_formats" semantic conventions. It represents the +// encoding formats requested in an embeddings operation, if specified. +func GenAIRequestEncodingFormats(val ...string) attribute.KeyValue { + return GenAIRequestEncodingFormatsKey.StringSlice(val) +} + +// GenAIRequestFrequencyPenalty returns an attribute KeyValue conforming to the +// "gen_ai.request.frequency_penalty" semantic conventions. It represents the +// frequency penalty setting for the GenAI request. +func GenAIRequestFrequencyPenalty(val float64) attribute.KeyValue { + return GenAIRequestFrequencyPenaltyKey.Float64(val) +} + +// GenAIRequestMaxTokens returns an attribute KeyValue conforming to the +// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum +// number of tokens the model generates for a request. +func GenAIRequestMaxTokens(val int) attribute.KeyValue { + return GenAIRequestMaxTokensKey.Int(val) +} + +// GenAIRequestModel returns an attribute KeyValue conforming to the +// "gen_ai.request.model" semantic conventions. It represents the name of the +// GenAI model a request is being made to. +func GenAIRequestModel(val string) attribute.KeyValue { + return GenAIRequestModelKey.String(val) +} + +// GenAIRequestPresencePenalty returns an attribute KeyValue conforming to the +// "gen_ai.request.presence_penalty" semantic conventions. It represents the +// presence penalty setting for the GenAI request. +func GenAIRequestPresencePenalty(val float64) attribute.KeyValue { + return GenAIRequestPresencePenaltyKey.Float64(val) +} + +// GenAIRequestSeed returns an attribute KeyValue conforming to the +// "gen_ai.request.seed" semantic conventions. It represents the requests with +// same seed value more likely to return same result. +func GenAIRequestSeed(val int) attribute.KeyValue { + return GenAIRequestSeedKey.Int(val) +} + +// GenAIRequestStopSequences returns an attribute KeyValue conforming to the +// "gen_ai.request.stop_sequences" semantic conventions. It represents the list +// of sequences that the model will use to stop generating further tokens. +func GenAIRequestStopSequences(val ...string) attribute.KeyValue { + return GenAIRequestStopSequencesKey.StringSlice(val) +} + +// GenAIRequestTemperature returns an attribute KeyValue conforming to the +// "gen_ai.request.temperature" semantic conventions. It represents the +// temperature setting for the GenAI request. +func GenAIRequestTemperature(val float64) attribute.KeyValue { + return GenAIRequestTemperatureKey.Float64(val) +} + +// GenAIRequestTopK returns an attribute KeyValue conforming to the +// "gen_ai.request.top_k" semantic conventions. It represents the top_k sampling +// setting for the GenAI request. +func GenAIRequestTopK(val float64) attribute.KeyValue { + return GenAIRequestTopKKey.Float64(val) +} + +// GenAIRequestTopP returns an attribute KeyValue conforming to the +// "gen_ai.request.top_p" semantic conventions. It represents the top_p sampling +// setting for the GenAI request. +func GenAIRequestTopP(val float64) attribute.KeyValue { + return GenAIRequestTopPKey.Float64(val) +} + +// GenAIResponseFinishReasons returns an attribute KeyValue conforming to the +// "gen_ai.response.finish_reasons" semantic conventions. It represents the array +// of reasons the model stopped generating tokens, corresponding to each +// generation received. +func GenAIResponseFinishReasons(val ...string) attribute.KeyValue { + return GenAIResponseFinishReasonsKey.StringSlice(val) +} + +// GenAIResponseID returns an attribute KeyValue conforming to the +// "gen_ai.response.id" semantic conventions. It represents the unique identifier +// for the completion. +func GenAIResponseID(val string) attribute.KeyValue { + return GenAIResponseIDKey.String(val) +} + +// GenAIResponseModel returns an attribute KeyValue conforming to the +// "gen_ai.response.model" semantic conventions. It represents the name of the +// model that generated the response. +func GenAIResponseModel(val string) attribute.KeyValue { + return GenAIResponseModelKey.String(val) +} + +// GenAIToolCallID returns an attribute KeyValue conforming to the +// "gen_ai.tool.call.id" semantic conventions. It represents the tool call +// identifier. +func GenAIToolCallID(val string) attribute.KeyValue { + return GenAIToolCallIDKey.String(val) +} + +// GenAIToolDescription returns an attribute KeyValue conforming to the +// "gen_ai.tool.description" semantic conventions. It represents the tool +// description. +func GenAIToolDescription(val string) attribute.KeyValue { + return GenAIToolDescriptionKey.String(val) +} + +// GenAIToolName returns an attribute KeyValue conforming to the +// "gen_ai.tool.name" semantic conventions. It represents the name of the tool +// utilized by the agent. +func GenAIToolName(val string) attribute.KeyValue { + return GenAIToolNameKey.String(val) +} + +// GenAIToolType returns an attribute KeyValue conforming to the +// "gen_ai.tool.type" semantic conventions. It represents the type of the tool +// utilized by the agent. +func GenAIToolType(val string) attribute.KeyValue { + return GenAIToolTypeKey.String(val) +} + +// GenAIUsageInputTokens returns an attribute KeyValue conforming to the +// "gen_ai.usage.input_tokens" semantic conventions. It represents the number of +// tokens used in the GenAI input (prompt). +func GenAIUsageInputTokens(val int) attribute.KeyValue { + return GenAIUsageInputTokensKey.Int(val) +} + +// GenAIUsageOutputTokens returns an attribute KeyValue conforming to the +// "gen_ai.usage.output_tokens" semantic conventions. It represents the number of +// tokens used in the GenAI response (completion). +func GenAIUsageOutputTokens(val int) attribute.KeyValue { + return GenAIUsageOutputTokensKey.Int(val) +} + +// Enum values for gen_ai.operation.name +var ( + // Chat completion operation such as [OpenAI Chat API] + // Stability: development + // + // [OpenAI Chat API]: https://platform.openai.com/docs/api-reference/chat + GenAIOperationNameChat = GenAIOperationNameKey.String("chat") + // Multimodal content generation operation such as [Gemini Generate Content] + // Stability: development + // + // [Gemini Generate Content]: https://ai.google.dev/api/generate-content + GenAIOperationNameGenerateContent = GenAIOperationNameKey.String("generate_content") + // Text completions operation such as [OpenAI Completions API (Legacy)] + // Stability: development + // + // [OpenAI Completions API (Legacy)]: https://platform.openai.com/docs/api-reference/completions + GenAIOperationNameTextCompletion = GenAIOperationNameKey.String("text_completion") + // Embeddings operation such as [OpenAI Create embeddings API] + // Stability: development + // + // [OpenAI Create embeddings API]: https://platform.openai.com/docs/api-reference/embeddings/create + GenAIOperationNameEmbeddings = GenAIOperationNameKey.String("embeddings") + // Create GenAI agent + // Stability: development + GenAIOperationNameCreateAgent = GenAIOperationNameKey.String("create_agent") + // Invoke GenAI agent + // Stability: development + GenAIOperationNameInvokeAgent = GenAIOperationNameKey.String("invoke_agent") + // Execute a tool + // Stability: development + GenAIOperationNameExecuteTool = GenAIOperationNameKey.String("execute_tool") +) + +// Enum values for gen_ai.output.type +var ( + // Plain text + // Stability: development + GenAIOutputTypeText = GenAIOutputTypeKey.String("text") + // JSON object with known or unknown schema + // Stability: development + GenAIOutputTypeJSON = GenAIOutputTypeKey.String("json") + // Image + // Stability: development + GenAIOutputTypeImage = GenAIOutputTypeKey.String("image") + // Speech + // Stability: development + GenAIOutputTypeSpeech = GenAIOutputTypeKey.String("speech") +) + +// Enum values for gen_ai.provider.name +var ( + // [OpenAI] + // Stability: development + // + // [OpenAI]: https://openai.com/ + GenAIProviderNameOpenAI = GenAIProviderNameKey.String("openai") + // Any Google generative AI endpoint + // Stability: development + GenAIProviderNameGCPGenAI = GenAIProviderNameKey.String("gcp.gen_ai") + // [Vertex AI] + // Stability: development + // + // [Vertex AI]: https://cloud.google.com/vertex-ai + GenAIProviderNameGCPVertexAI = GenAIProviderNameKey.String("gcp.vertex_ai") + // [Gemini] + // Stability: development + // + // [Gemini]: https://cloud.google.com/products/gemini + GenAIProviderNameGCPGemini = GenAIProviderNameKey.String("gcp.gemini") + // [Anthropic] + // Stability: development + // + // [Anthropic]: https://www.anthropic.com/ + GenAIProviderNameAnthropic = GenAIProviderNameKey.String("anthropic") + // [Cohere] + // Stability: development + // + // [Cohere]: https://cohere.com/ + GenAIProviderNameCohere = GenAIProviderNameKey.String("cohere") + // Azure AI Inference + // Stability: development + GenAIProviderNameAzureAIInference = GenAIProviderNameKey.String("azure.ai.inference") + // [Azure OpenAI] + // Stability: development + // + // [Azure OpenAI]: https://azure.microsoft.com/products/ai-services/openai-service/ + GenAIProviderNameAzureAIOpenAI = GenAIProviderNameKey.String("azure.ai.openai") + // [IBM Watsonx AI] + // Stability: development + // + // [IBM Watsonx AI]: https://www.ibm.com/products/watsonx-ai + GenAIProviderNameIBMWatsonxAI = GenAIProviderNameKey.String("ibm.watsonx.ai") + // [AWS Bedrock] + // Stability: development + // + // [AWS Bedrock]: https://aws.amazon.com/bedrock + GenAIProviderNameAWSBedrock = GenAIProviderNameKey.String("aws.bedrock") + // [Perplexity] + // Stability: development + // + // [Perplexity]: https://www.perplexity.ai/ + GenAIProviderNamePerplexity = GenAIProviderNameKey.String("perplexity") + // [xAI] + // Stability: development + // + // [xAI]: https://x.ai/ + GenAIProviderNameXAI = GenAIProviderNameKey.String("x_ai") + // [DeepSeek] + // Stability: development + // + // [DeepSeek]: https://www.deepseek.com/ + GenAIProviderNameDeepseek = GenAIProviderNameKey.String("deepseek") + // [Groq] + // Stability: development + // + // [Groq]: https://groq.com/ + GenAIProviderNameGroq = GenAIProviderNameKey.String("groq") + // [Mistral AI] + // Stability: development + // + // [Mistral AI]: https://mistral.ai/ + GenAIProviderNameMistralAI = GenAIProviderNameKey.String("mistral_ai") +) + +// Enum values for gen_ai.token.type +var ( + // Input tokens (prompt, input, etc.) + // Stability: development + GenAITokenTypeInput = GenAITokenTypeKey.String("input") + // Output tokens (completion, response, etc.) + // Stability: development + GenAITokenTypeOutput = GenAITokenTypeKey.String("output") +) + +// Namespace: geo +const ( + // GeoContinentCodeKey is the attribute Key conforming to the + // "geo.continent.code" semantic conventions. It represents the two-letter code + // representing continent’s name. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + GeoContinentCodeKey = attribute.Key("geo.continent.code") + + // GeoCountryISOCodeKey is the attribute Key conforming to the + // "geo.country.iso_code" semantic conventions. It represents the two-letter ISO + // Country Code ([ISO 3166-1 alpha2]). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CA" + // + // [ISO 3166-1 alpha2]: https://wikipedia.org/wiki/ISO_3166-1#Codes + GeoCountryISOCodeKey = attribute.Key("geo.country.iso_code") + + // GeoLocalityNameKey is the attribute Key conforming to the "geo.locality.name" + // semantic conventions. It represents the locality name. Represents the name of + // a city, town, village, or similar populated place. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Montreal", "Berlin" + GeoLocalityNameKey = attribute.Key("geo.locality.name") + + // GeoLocationLatKey is the attribute Key conforming to the "geo.location.lat" + // semantic conventions. It represents the latitude of the geo location in + // [WGS84]. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 45.505918 + // + // [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 + GeoLocationLatKey = attribute.Key("geo.location.lat") + + // GeoLocationLonKey is the attribute Key conforming to the "geo.location.lon" + // semantic conventions. It represents the longitude of the geo location in + // [WGS84]. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: -73.61483 + // + // [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 + GeoLocationLonKey = attribute.Key("geo.location.lon") + + // GeoPostalCodeKey is the attribute Key conforming to the "geo.postal_code" + // semantic conventions. It represents the postal code associated with the + // location. Values appropriate for this field may also be known as a postcode + // or ZIP code and will vary widely from country to country. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "94040" + GeoPostalCodeKey = attribute.Key("geo.postal_code") + + // GeoRegionISOCodeKey is the attribute Key conforming to the + // "geo.region.iso_code" semantic conventions. It represents the region ISO code + // ([ISO 3166-2]). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CA-QC" + // + // [ISO 3166-2]: https://wikipedia.org/wiki/ISO_3166-2 + GeoRegionISOCodeKey = attribute.Key("geo.region.iso_code") +) + +// GeoCountryISOCode returns an attribute KeyValue conforming to the +// "geo.country.iso_code" semantic conventions. It represents the two-letter ISO +// Country Code ([ISO 3166-1 alpha2]). +// +// [ISO 3166-1 alpha2]: https://wikipedia.org/wiki/ISO_3166-1#Codes +func GeoCountryISOCode(val string) attribute.KeyValue { + return GeoCountryISOCodeKey.String(val) +} + +// GeoLocalityName returns an attribute KeyValue conforming to the +// "geo.locality.name" semantic conventions. It represents the locality name. +// Represents the name of a city, town, village, or similar populated place. +func GeoLocalityName(val string) attribute.KeyValue { + return GeoLocalityNameKey.String(val) +} + +// GeoLocationLat returns an attribute KeyValue conforming to the +// "geo.location.lat" semantic conventions. It represents the latitude of the geo +// location in [WGS84]. +// +// [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 +func GeoLocationLat(val float64) attribute.KeyValue { + return GeoLocationLatKey.Float64(val) +} + +// GeoLocationLon returns an attribute KeyValue conforming to the +// "geo.location.lon" semantic conventions. It represents the longitude of the +// geo location in [WGS84]. +// +// [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 +func GeoLocationLon(val float64) attribute.KeyValue { + return GeoLocationLonKey.Float64(val) +} + +// GeoPostalCode returns an attribute KeyValue conforming to the +// "geo.postal_code" semantic conventions. It represents the postal code +// associated with the location. Values appropriate for this field may also be +// known as a postcode or ZIP code and will vary widely from country to country. +func GeoPostalCode(val string) attribute.KeyValue { + return GeoPostalCodeKey.String(val) +} + +// GeoRegionISOCode returns an attribute KeyValue conforming to the +// "geo.region.iso_code" semantic conventions. It represents the region ISO code +// ([ISO 3166-2]). +// +// [ISO 3166-2]: https://wikipedia.org/wiki/ISO_3166-2 +func GeoRegionISOCode(val string) attribute.KeyValue { + return GeoRegionISOCodeKey.String(val) +} + +// Enum values for geo.continent.code +var ( + // Africa + // Stability: development + GeoContinentCodeAf = GeoContinentCodeKey.String("AF") + // Antarctica + // Stability: development + GeoContinentCodeAn = GeoContinentCodeKey.String("AN") + // Asia + // Stability: development + GeoContinentCodeAs = GeoContinentCodeKey.String("AS") + // Europe + // Stability: development + GeoContinentCodeEu = GeoContinentCodeKey.String("EU") + // North America + // Stability: development + GeoContinentCodeNa = GeoContinentCodeKey.String("NA") + // Oceania + // Stability: development + GeoContinentCodeOc = GeoContinentCodeKey.String("OC") + // South America + // Stability: development + GeoContinentCodeSa = GeoContinentCodeKey.String("SA") +) + +// Namespace: go +const ( + // GoMemoryTypeKey is the attribute Key conforming to the "go.memory.type" + // semantic conventions. It represents the type of memory. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "other", "stack" + GoMemoryTypeKey = attribute.Key("go.memory.type") +) + +// Enum values for go.memory.type +var ( + // Memory allocated from the heap that is reserved for stack space, whether or + // not it is currently in-use. + // Stability: development + GoMemoryTypeStack = GoMemoryTypeKey.String("stack") + // Memory used by the Go runtime, excluding other categories of memory usage + // described in this enumeration. + // Stability: development + GoMemoryTypeOther = GoMemoryTypeKey.String("other") +) + +// Namespace: graphql +const ( + // GraphQLDocumentKey is the attribute Key conforming to the "graphql.document" + // semantic conventions. It represents the GraphQL document being executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: query findBookById { bookById(id: ?) { name } } + // Note: The value may be sanitized to exclude sensitive information. + GraphQLDocumentKey = attribute.Key("graphql.document") + + // GraphQLOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of the + // operation being executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: findBookById + GraphQLOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphQLOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of the + // operation being executed. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "query", "mutation", "subscription" + GraphQLOperationTypeKey = attribute.Key("graphql.operation.type") +) + +// GraphQLDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphQLDocument(val string) attribute.KeyValue { + return GraphQLDocumentKey.String(val) +} + +// GraphQLOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphQLOperationName(val string) attribute.KeyValue { + return GraphQLOperationNameKey.String(val) +} + +// Enum values for graphql.operation.type +var ( + // GraphQL query + // Stability: development + GraphQLOperationTypeQuery = GraphQLOperationTypeKey.String("query") + // GraphQL mutation + // Stability: development + GraphQLOperationTypeMutation = GraphQLOperationTypeKey.String("mutation") + // GraphQL subscription + // Stability: development + GraphQLOperationTypeSubscription = GraphQLOperationTypeKey.String("subscription") +) + +// Namespace: heroku +const ( + // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" + // semantic conventions. It represents the unique identifier for the + // application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2daa2797-e42b-4624-9322-ec3f968df4da" + HerokuAppIDKey = attribute.Key("heroku.app.id") + + // HerokuReleaseCommitKey is the attribute Key conforming to the + // "heroku.release.commit" semantic conventions. It represents the commit hash + // for the current release. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "e6134959463efd8966b20e75b913cafe3f5ec" + HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") + + // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the + // "heroku.release.creation_timestamp" semantic conventions. It represents the + // time and date the release was created. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2022-10-23T18:00:42Z" + HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") +) + +// HerokuAppID returns an attribute KeyValue conforming to the "heroku.app.id" +// semantic conventions. It represents the unique identifier for the application. +func HerokuAppID(val string) attribute.KeyValue { + return HerokuAppIDKey.String(val) +} + +// HerokuReleaseCommit returns an attribute KeyValue conforming to the +// "heroku.release.commit" semantic conventions. It represents the commit hash +// for the current release. +func HerokuReleaseCommit(val string) attribute.KeyValue { + return HerokuReleaseCommitKey.String(val) +} + +// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming to the +// "heroku.release.creation_timestamp" semantic conventions. It represents the +// time and date the release was created. +func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { + return HerokuReleaseCreationTimestampKey.String(val) +} + +// Namespace: host +const ( + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is running + // on. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HostArchKey = attribute.Key("host.arch") + + // HostCPUCacheL2SizeKey is the attribute Key conforming to the + // "host.cpu.cache.l2.size" semantic conventions. It represents the amount of + // level 2 memory cache available to the processor (in Bytes). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 12288000 + HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") + + // HostCPUFamilyKey is the attribute Key conforming to the "host.cpu.family" + // semantic conventions. It represents the family or generation of the CPU. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "6", "PA-RISC 1.1e" + HostCPUFamilyKey = attribute.Key("host.cpu.family") + + // HostCPUModelIDKey is the attribute Key conforming to the "host.cpu.model.id" + // semantic conventions. It represents the model identifier. It provides more + // granular information about the CPU, distinguishing it from other CPUs within + // the same family. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "6", "9000/778/B180L" + HostCPUModelIDKey = attribute.Key("host.cpu.model.id") + + // HostCPUModelNameKey is the attribute Key conforming to the + // "host.cpu.model.name" semantic conventions. It represents the model + // designation of the processor. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz" + HostCPUModelNameKey = attribute.Key("host.cpu.model.name") + + // HostCPUSteppingKey is the attribute Key conforming to the "host.cpu.stepping" + // semantic conventions. It represents the stepping or core revisions. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1", "r1p1" + HostCPUSteppingKey = attribute.Key("host.cpu.stepping") + + // HostCPUVendorIDKey is the attribute Key conforming to the + // "host.cpu.vendor.id" semantic conventions. It represents the processor + // manufacturer identifier. A maximum 12-character string. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "GenuineIntel" + // Note: [CPUID] command returns the vendor ID string in EBX, EDX and ECX + // registers. Writing these to memory in this order results in a 12-character + // string. + // + // [CPUID]: https://wiki.osdev.org/CPUID + HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") + + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be the + // instance_id assigned by the cloud provider. For non-containerized systems, + // this should be the `machine-id`. See the table below for the sources to use + // to determine the `machine-id` based on operating system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "fdbf79e8af94cb7f9e8df36789187052" + HostIDKey = attribute.Key("host.id") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the VM image ID or host OS image ID. For + // Cloud, this value is from the provider. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ami-07b06b442921831e5" + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageNameKey is the attribute Key conforming to the "host.image.name" + // semantic conventions. It represents the name of the VM image or OS install + // the host was instantiated from. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "infra-ami-eks-worker-node-7d4ec78312", "CentOS-8-x86_64-1905" + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version string + // of the VM image or host OS as defined in [Version Attributes]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0.1" + // + // [Version Attributes]: /docs/resource/README.md#version-attributes + HostImageVersionKey = attribute.Key("host.image.version") + + // HostIPKey is the attribute Key conforming to the "host.ip" semantic + // conventions. It represents the available IP addresses of the host, excluding + // loopback interfaces. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "192.168.1.140", "fe80::abc2:4a28:737a:609e" + // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 + // addresses MUST be specified in the [RFC 5952] format. + // + // [RFC 5952]: https://www.rfc-editor.org/rfc/rfc5952.html + HostIPKey = attribute.Key("host.ip") + + // HostMacKey is the attribute Key conforming to the "host.mac" semantic + // conventions. It represents the available MAC addresses of the host, excluding + // loopback interfaces. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "AC-DE-48-23-45-67", "AC-DE-48-23-45-67-01-9F" + // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal form]: as + // hyphen-separated octets in uppercase hexadecimal form from most to least + // significant. + // + // [IEEE RA hexadecimal form]: https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf + HostMacKey = attribute.Key("host.mac") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified hostname, + // or another name specified by the user. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-test" + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "n1-standard-1" + HostTypeKey = attribute.Key("host.type") +) + +// HostCPUCacheL2Size returns an attribute KeyValue conforming to the +// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of +// level 2 memory cache available to the processor (in Bytes). +func HostCPUCacheL2Size(val int) attribute.KeyValue { + return HostCPUCacheL2SizeKey.Int(val) +} + +// HostCPUFamily returns an attribute KeyValue conforming to the +// "host.cpu.family" semantic conventions. It represents the family or generation +// of the CPU. +func HostCPUFamily(val string) attribute.KeyValue { + return HostCPUFamilyKey.String(val) +} + +// HostCPUModelID returns an attribute KeyValue conforming to the +// "host.cpu.model.id" semantic conventions. It represents the model identifier. +// It provides more granular information about the CPU, distinguishing it from +// other CPUs within the same family. +func HostCPUModelID(val string) attribute.KeyValue { + return HostCPUModelIDKey.String(val) +} + +// HostCPUModelName returns an attribute KeyValue conforming to the +// "host.cpu.model.name" semantic conventions. It represents the model +// designation of the processor. +func HostCPUModelName(val string) attribute.KeyValue { + return HostCPUModelNameKey.String(val) +} + +// HostCPUStepping returns an attribute KeyValue conforming to the +// "host.cpu.stepping" semantic conventions. It represents the stepping or core +// revisions. +func HostCPUStepping(val string) attribute.KeyValue { + return HostCPUSteppingKey.String(val) +} + +// HostCPUVendorID returns an attribute KeyValue conforming to the +// "host.cpu.vendor.id" semantic conventions. It represents the processor +// manufacturer identifier. A maximum 12-character string. +func HostCPUVendorID(val string) attribute.KeyValue { + return HostCPUVendorIDKey.String(val) +} + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized systems, +// this should be the `machine-id`. See the table below for the sources to use to +// determine the `machine-id` based on operating system. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the "host.image.id" +// semantic conventions. It represents the VM image ID or host OS image ID. For +// Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM image +// or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string of +// the VM image or host OS as defined in [Version Attributes]. +// +// [Version Attributes]: /docs/resource/README.md#version-attributes +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic +// conventions. It represents the available IP addresses of the host, excluding +// loopback interfaces. +func HostIP(val ...string) attribute.KeyValue { + return HostIPKey.StringSlice(val) +} + +// HostMac returns an attribute KeyValue conforming to the "host.mac" semantic +// conventions. It represents the available MAC addresses of the host, excluding +// loopback interfaces. +func HostMac(val ...string) attribute.KeyValue { + return HostMacKey.StringSlice(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" semantic +// conventions. It represents the name of the host. On Unix systems, it may +// contain what the hostname command returns, or the fully qualified hostname, or +// another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" semantic +// conventions. It represents the type of host. For Cloud, this must be the +// machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// Enum values for host.arch +var ( + // AMD64 + // Stability: development + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + // Stability: development + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + // Stability: development + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + // Stability: development + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + // Stability: development + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + // Stability: development + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + // Stability: development + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + // Stability: development + HostArchX86 = HostArchKey.String("x86") +) + +// Namespace: http +const ( + // HTTPConnectionStateKey is the attribute Key conforming to the + // "http.connection.state" semantic conventions. It represents the state of the + // HTTP connection in the HTTP connection pool. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "active", "idle" + HTTPConnectionStateKey = attribute.Key("http.connection.state") + + // HTTPRequestBodySizeKey is the attribute Key conforming to the + // "http.request.body.size" semantic conventions. It represents the size of the + // request payload body in bytes. This is the number of bytes transferred + // excluding headers and is often, but not always, present as the + // [Content-Length] header. For requests using transport encoding, this should + // be the compressed size. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length + HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") + + // HTTPRequestMethodKey is the attribute Key conforming to the + // "http.request.method" semantic conventions. It represents the HTTP request + // method. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "GET", "POST", "HEAD" + // Note: HTTP request method value SHOULD be "known" to the instrumentation. + // By default, this convention defines "known" methods as the ones listed in + // [RFC9110] + // and the PATCH method defined in [RFC5789]. + // + // If the HTTP request method is not known to instrumentation, it MUST set the + // `http.request.method` attribute to `_OTHER`. + // + // If the HTTP instrumentation could end up converting valid HTTP request + // methods to `_OTHER`, then it MUST provide a way to override + // the list of known HTTP methods. If this override is done via environment + // variable, then the environment variable MUST be named + // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated list of + // case-sensitive known HTTP methods + // (this list MUST be a full override of the default known method, it is not a + // list of known methods in addition to the defaults). + // + // HTTP method names are case-sensitive and `http.request.method` attribute + // value MUST match a known HTTP method name exactly. + // Instrumentations for specific web frameworks that consider HTTP methods to be + // case insensitive, SHOULD populate a canonical equivalent. + // Tracing instrumentations that do so, MUST also set + // `http.request.method_original` to the original value. + // + // [RFC9110]: https://www.rfc-editor.org/rfc/rfc9110.html#name-methods + // [RFC5789]: https://www.rfc-editor.org/rfc/rfc5789.html + HTTPRequestMethodKey = attribute.Key("http.request.method") + + // HTTPRequestMethodOriginalKey is the attribute Key conforming to the + // "http.request.method_original" semantic conventions. It represents the + // original HTTP method sent by the client in the request line. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "GeT", "ACL", "foo" + HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") + + // HTTPRequestResendCountKey is the attribute Key conforming to the + // "http.request.resend_count" semantic conventions. It represents the ordinal + // number of request resending attempt (for any reason, including redirects). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending (e.g. + // redirection, authorization failure, 503 Server Unavailable, network issues, + // or any other). + HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") + + // HTTPRequestSizeKey is the attribute Key conforming to the "http.request.size" + // semantic conventions. It represents the total size of the request in bytes. + // This should be the total number of bytes sent over the wire, including the + // request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers, and request + // body if any. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + HTTPRequestSizeKey = attribute.Key("http.request.size") + + // HTTPResponseBodySizeKey is the attribute Key conforming to the + // "http.response.body.size" semantic conventions. It represents the size of the + // response payload body in bytes. This is the number of bytes transferred + // excluding headers and is often, but not always, present as the + // [Content-Length] header. For requests using transport encoding, this should + // be the compressed size. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length + HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") + + // HTTPResponseSizeKey is the attribute Key conforming to the + // "http.response.size" semantic conventions. It represents the total size of + // the response in bytes. This should be the total number of bytes sent over the + // wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), + // headers, and response body and trailers if any. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + HTTPResponseSizeKey = attribute.Key("http.response.size") + + // HTTPResponseStatusCodeKey is the attribute Key conforming to the + // "http.response.status_code" semantic conventions. It represents the + // [HTTP response status code]. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 200 + // + // [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 + HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" semantic + // conventions. It represents the matched route, that is, the path template in + // the format used by the respective server framework. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "/users/:userID?", "{controller}/{action}/{id?}" + // Note: MUST NOT be populated when this is not supported by the HTTP server + // framework as the route attribute should have low-cardinality and the URI path + // can NOT substitute it. + // SHOULD include the [application root] if there is one. + // + // [application root]: /docs/http/http-spans.md#http-server-definitions + HTTPRouteKey = attribute.Key("http.route") +) + +// HTTPRequestBodySize returns an attribute KeyValue conforming to the +// "http.request.body.size" semantic conventions. It represents the size of the +// request payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func HTTPRequestBodySize(val int) attribute.KeyValue { + return HTTPRequestBodySizeKey.Int(val) +} + +// HTTPRequestHeader returns an attribute KeyValue conforming to the +// "http.request.header" semantic conventions. It represents the HTTP request +// headers, `` being the normalized HTTP Header name (lowercase), the value +// being the header values. +func HTTPRequestHeader(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("http.request.header."+key, val) +} + +// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the +// "http.request.method_original" semantic conventions. It represents the +// original HTTP method sent by the client in the request line. +func HTTPRequestMethodOriginal(val string) attribute.KeyValue { + return HTTPRequestMethodOriginalKey.String(val) +} + +// HTTPRequestResendCount returns an attribute KeyValue conforming to the +// "http.request.resend_count" semantic conventions. It represents the ordinal +// number of request resending attempt (for any reason, including redirects). +func HTTPRequestResendCount(val int) attribute.KeyValue { + return HTTPRequestResendCountKey.Int(val) +} + +// HTTPRequestSize returns an attribute KeyValue conforming to the +// "http.request.size" semantic conventions. It represents the total size of the +// request in bytes. This should be the total number of bytes sent over the wire, +// including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers, +// and request body if any. +func HTTPRequestSize(val int) attribute.KeyValue { + return HTTPRequestSizeKey.Int(val) +} + +// HTTPResponseBodySize returns an attribute KeyValue conforming to the +// "http.response.body.size" semantic conventions. It represents the size of the +// response payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func HTTPResponseBodySize(val int) attribute.KeyValue { + return HTTPResponseBodySizeKey.Int(val) +} + +// HTTPResponseHeader returns an attribute KeyValue conforming to the +// "http.response.header" semantic conventions. It represents the HTTP response +// headers, `` being the normalized HTTP Header name (lowercase), the value +// being the header values. +func HTTPResponseHeader(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("http.response.header."+key, val) +} + +// HTTPResponseSize returns an attribute KeyValue conforming to the +// "http.response.size" semantic conventions. It represents the total size of the +// response in bytes. This should be the total number of bytes sent over the +// wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), +// headers, and response body and trailers if any. +func HTTPResponseSize(val int) attribute.KeyValue { + return HTTPResponseSizeKey.Int(val) +} + +// HTTPResponseStatusCode returns an attribute KeyValue conforming to the +// "http.response.status_code" semantic conventions. It represents the +// [HTTP response status code]. +// +// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 +func HTTPResponseStatusCode(val int) attribute.KeyValue { + return HTTPResponseStatusCodeKey.Int(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route, that is, the path +// template in the format used by the respective server framework. +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// Enum values for http.connection.state +var ( + // active state. + // Stability: development + HTTPConnectionStateActive = HTTPConnectionStateKey.String("active") + // idle state. + // Stability: development + HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle") +) + +// Enum values for http.request.method +var ( + // CONNECT method. + // Stability: stable + HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") + // DELETE method. + // Stability: stable + HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") + // GET method. + // Stability: stable + HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") + // HEAD method. + // Stability: stable + HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") + // OPTIONS method. + // Stability: stable + HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") + // PATCH method. + // Stability: stable + HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") + // POST method. + // Stability: stable + HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") + // PUT method. + // Stability: stable + HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") + // TRACE method. + // Stability: stable + HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") + // Any HTTP method that the instrumentation has no prior knowledge of. + // Stability: stable + HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") +) + +// Namespace: hw +const ( + // HwBatteryCapacityKey is the attribute Key conforming to the + // "hw.battery.capacity" semantic conventions. It represents the design capacity + // in Watts-hours or Amper-hours. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9.3Ah", "50Wh" + HwBatteryCapacityKey = attribute.Key("hw.battery.capacity") + + // HwBatteryChemistryKey is the attribute Key conforming to the + // "hw.battery.chemistry" semantic conventions. It represents the battery + // [chemistry], e.g. Lithium-Ion, Nickel-Cadmium, etc. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Li-ion", "NiMH" + // + // [chemistry]: https://schemas.dmtf.org/wbem/cim-html/2.31.0/CIM_Battery.html + HwBatteryChemistryKey = attribute.Key("hw.battery.chemistry") + + // HwBatteryStateKey is the attribute Key conforming to the "hw.battery.state" + // semantic conventions. It represents the current state of the battery. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwBatteryStateKey = attribute.Key("hw.battery.state") + + // HwBiosVersionKey is the attribute Key conforming to the "hw.bios_version" + // semantic conventions. It represents the BIOS version of the hardware + // component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1.2.3" + HwBiosVersionKey = attribute.Key("hw.bios_version") + + // HwDriverVersionKey is the attribute Key conforming to the "hw.driver_version" + // semantic conventions. It represents the driver version for the hardware + // component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "10.2.1-3" + HwDriverVersionKey = attribute.Key("hw.driver_version") + + // HwEnclosureTypeKey is the attribute Key conforming to the "hw.enclosure.type" + // semantic conventions. It represents the type of the enclosure (useful for + // modular systems). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Computer", "Storage", "Switch" + HwEnclosureTypeKey = attribute.Key("hw.enclosure.type") + + // HwFirmwareVersionKey is the attribute Key conforming to the + // "hw.firmware_version" semantic conventions. It represents the firmware + // version of the hardware component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2.0.1" + HwFirmwareVersionKey = attribute.Key("hw.firmware_version") + + // HwGpuTaskKey is the attribute Key conforming to the "hw.gpu.task" semantic + // conventions. It represents the type of task the GPU is performing. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwGpuTaskKey = attribute.Key("hw.gpu.task") + + // HwIDKey is the attribute Key conforming to the "hw.id" semantic conventions. + // It represents an identifier for the hardware component, unique within the + // monitored host. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "win32battery_battery_testsysa33_1" + HwIDKey = attribute.Key("hw.id") + + // HwLimitTypeKey is the attribute Key conforming to the "hw.limit_type" + // semantic conventions. It represents the type of limit for hardware + // components. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwLimitTypeKey = attribute.Key("hw.limit_type") + + // HwLogicalDiskRaidLevelKey is the attribute Key conforming to the + // "hw.logical_disk.raid_level" semantic conventions. It represents the RAID + // Level of the logical disk. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "RAID0+1", "RAID5", "RAID10" + HwLogicalDiskRaidLevelKey = attribute.Key("hw.logical_disk.raid_level") + + // HwLogicalDiskStateKey is the attribute Key conforming to the + // "hw.logical_disk.state" semantic conventions. It represents the state of the + // logical disk space usage. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwLogicalDiskStateKey = attribute.Key("hw.logical_disk.state") + + // HwMemoryTypeKey is the attribute Key conforming to the "hw.memory.type" + // semantic conventions. It represents the type of the memory module. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "DDR4", "DDR5", "LPDDR5" + HwMemoryTypeKey = attribute.Key("hw.memory.type") + + // HwModelKey is the attribute Key conforming to the "hw.model" semantic + // conventions. It represents the descriptive model name of the hardware + // component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "PERC H740P", "Intel(R) Core(TM) i7-10700K", "Dell XPS 15 Battery" + HwModelKey = attribute.Key("hw.model") + + // HwNameKey is the attribute Key conforming to the "hw.name" semantic + // conventions. It represents an easily-recognizable name for the hardware + // component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "eth0" + HwNameKey = attribute.Key("hw.name") + + // HwNetworkLogicalAddressesKey is the attribute Key conforming to the + // "hw.network.logical_addresses" semantic conventions. It represents the + // logical addresses of the adapter (e.g. IP address, or WWPN). + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "172.16.8.21", "57.11.193.42" + HwNetworkLogicalAddressesKey = attribute.Key("hw.network.logical_addresses") + + // HwNetworkPhysicalAddressKey is the attribute Key conforming to the + // "hw.network.physical_address" semantic conventions. It represents the + // physical address of the adapter (e.g. MAC address, or WWNN). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "00-90-F5-E9-7B-36" + HwNetworkPhysicalAddressKey = attribute.Key("hw.network.physical_address") + + // HwParentKey is the attribute Key conforming to the "hw.parent" semantic + // conventions. It represents the unique identifier of the parent component + // (typically the `hw.id` attribute of the enclosure, or disk controller). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "dellStorage_perc_0" + HwParentKey = attribute.Key("hw.parent") + + // HwPhysicalDiskSmartAttributeKey is the attribute Key conforming to the + // "hw.physical_disk.smart_attribute" semantic conventions. It represents the + // [S.M.A.R.T.] (Self-Monitoring, Analysis, and Reporting Technology) attribute + // of the physical disk. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Spin Retry Count", "Seek Error Rate", "Raw Read Error Rate" + // + // [S.M.A.R.T.]: https://wikipedia.org/wiki/S.M.A.R.T. + HwPhysicalDiskSmartAttributeKey = attribute.Key("hw.physical_disk.smart_attribute") + + // HwPhysicalDiskStateKey is the attribute Key conforming to the + // "hw.physical_disk.state" semantic conventions. It represents the state of the + // physical disk endurance utilization. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwPhysicalDiskStateKey = attribute.Key("hw.physical_disk.state") + + // HwPhysicalDiskTypeKey is the attribute Key conforming to the + // "hw.physical_disk.type" semantic conventions. It represents the type of the + // physical disk. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "HDD", "SSD", "10K" + HwPhysicalDiskTypeKey = attribute.Key("hw.physical_disk.type") + + // HwSensorLocationKey is the attribute Key conforming to the + // "hw.sensor_location" semantic conventions. It represents the location of the + // sensor. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cpu0", "ps1", "INLET", "CPU0_DIE", "AMBIENT", "MOTHERBOARD", "PS0 + // V3_3", "MAIN_12V", "CPU_VCORE" + HwSensorLocationKey = attribute.Key("hw.sensor_location") + + // HwSerialNumberKey is the attribute Key conforming to the "hw.serial_number" + // semantic conventions. It represents the serial number of the hardware + // component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CNFCP0123456789" + HwSerialNumberKey = attribute.Key("hw.serial_number") + + // HwStateKey is the attribute Key conforming to the "hw.state" semantic + // conventions. It represents the current state of the component. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwStateKey = attribute.Key("hw.state") + + // HwTapeDriveOperationTypeKey is the attribute Key conforming to the + // "hw.tape_drive.operation_type" semantic conventions. It represents the type + // of tape drive operation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwTapeDriveOperationTypeKey = attribute.Key("hw.tape_drive.operation_type") + + // HwTypeKey is the attribute Key conforming to the "hw.type" semantic + // conventions. It represents the type of the component. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: Describes the category of the hardware component for which `hw.state` + // is being reported. For example, `hw.type=temperature` along with + // `hw.state=degraded` would indicate that the temperature of the hardware + // component has been reported as `degraded`. + HwTypeKey = attribute.Key("hw.type") + + // HwVendorKey is the attribute Key conforming to the "hw.vendor" semantic + // conventions. It represents the vendor name of the hardware component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Dell", "HP", "Intel", "AMD", "LSI", "Lenovo" + HwVendorKey = attribute.Key("hw.vendor") +) + +// HwBatteryCapacity returns an attribute KeyValue conforming to the +// "hw.battery.capacity" semantic conventions. It represents the design capacity +// in Watts-hours or Amper-hours. +func HwBatteryCapacity(val string) attribute.KeyValue { + return HwBatteryCapacityKey.String(val) +} + +// HwBatteryChemistry returns an attribute KeyValue conforming to the +// "hw.battery.chemistry" semantic conventions. It represents the battery +// [chemistry], e.g. Lithium-Ion, Nickel-Cadmium, etc. +// +// [chemistry]: https://schemas.dmtf.org/wbem/cim-html/2.31.0/CIM_Battery.html +func HwBatteryChemistry(val string) attribute.KeyValue { + return HwBatteryChemistryKey.String(val) +} + +// HwBiosVersion returns an attribute KeyValue conforming to the +// "hw.bios_version" semantic conventions. It represents the BIOS version of the +// hardware component. +func HwBiosVersion(val string) attribute.KeyValue { + return HwBiosVersionKey.String(val) +} + +// HwDriverVersion returns an attribute KeyValue conforming to the +// "hw.driver_version" semantic conventions. It represents the driver version for +// the hardware component. +func HwDriverVersion(val string) attribute.KeyValue { + return HwDriverVersionKey.String(val) +} + +// HwEnclosureType returns an attribute KeyValue conforming to the +// "hw.enclosure.type" semantic conventions. It represents the type of the +// enclosure (useful for modular systems). +func HwEnclosureType(val string) attribute.KeyValue { + return HwEnclosureTypeKey.String(val) +} + +// HwFirmwareVersion returns an attribute KeyValue conforming to the +// "hw.firmware_version" semantic conventions. It represents the firmware version +// of the hardware component. +func HwFirmwareVersion(val string) attribute.KeyValue { + return HwFirmwareVersionKey.String(val) +} + +// HwID returns an attribute KeyValue conforming to the "hw.id" semantic +// conventions. It represents an identifier for the hardware component, unique +// within the monitored host. +func HwID(val string) attribute.KeyValue { + return HwIDKey.String(val) +} + +// HwLogicalDiskRaidLevel returns an attribute KeyValue conforming to the +// "hw.logical_disk.raid_level" semantic conventions. It represents the RAID +// Level of the logical disk. +func HwLogicalDiskRaidLevel(val string) attribute.KeyValue { + return HwLogicalDiskRaidLevelKey.String(val) +} + +// HwMemoryType returns an attribute KeyValue conforming to the "hw.memory.type" +// semantic conventions. It represents the type of the memory module. +func HwMemoryType(val string) attribute.KeyValue { + return HwMemoryTypeKey.String(val) +} + +// HwModel returns an attribute KeyValue conforming to the "hw.model" semantic +// conventions. It represents the descriptive model name of the hardware +// component. +func HwModel(val string) attribute.KeyValue { + return HwModelKey.String(val) +} + +// HwName returns an attribute KeyValue conforming to the "hw.name" semantic +// conventions. It represents an easily-recognizable name for the hardware +// component. +func HwName(val string) attribute.KeyValue { + return HwNameKey.String(val) +} + +// HwNetworkLogicalAddresses returns an attribute KeyValue conforming to the +// "hw.network.logical_addresses" semantic conventions. It represents the logical +// addresses of the adapter (e.g. IP address, or WWPN). +func HwNetworkLogicalAddresses(val ...string) attribute.KeyValue { + return HwNetworkLogicalAddressesKey.StringSlice(val) +} + +// HwNetworkPhysicalAddress returns an attribute KeyValue conforming to the +// "hw.network.physical_address" semantic conventions. It represents the physical +// address of the adapter (e.g. MAC address, or WWNN). +func HwNetworkPhysicalAddress(val string) attribute.KeyValue { + return HwNetworkPhysicalAddressKey.String(val) +} + +// HwParent returns an attribute KeyValue conforming to the "hw.parent" semantic +// conventions. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func HwParent(val string) attribute.KeyValue { + return HwParentKey.String(val) +} + +// HwPhysicalDiskSmartAttribute returns an attribute KeyValue conforming to the +// "hw.physical_disk.smart_attribute" semantic conventions. It represents the +// [S.M.A.R.T.] (Self-Monitoring, Analysis, and Reporting Technology) attribute +// of the physical disk. +// +// [S.M.A.R.T.]: https://wikipedia.org/wiki/S.M.A.R.T. +func HwPhysicalDiskSmartAttribute(val string) attribute.KeyValue { + return HwPhysicalDiskSmartAttributeKey.String(val) +} + +// HwPhysicalDiskType returns an attribute KeyValue conforming to the +// "hw.physical_disk.type" semantic conventions. It represents the type of the +// physical disk. +func HwPhysicalDiskType(val string) attribute.KeyValue { + return HwPhysicalDiskTypeKey.String(val) +} + +// HwSensorLocation returns an attribute KeyValue conforming to the +// "hw.sensor_location" semantic conventions. It represents the location of the +// sensor. +func HwSensorLocation(val string) attribute.KeyValue { + return HwSensorLocationKey.String(val) +} + +// HwSerialNumber returns an attribute KeyValue conforming to the +// "hw.serial_number" semantic conventions. It represents the serial number of +// the hardware component. +func HwSerialNumber(val string) attribute.KeyValue { + return HwSerialNumberKey.String(val) +} + +// HwVendor returns an attribute KeyValue conforming to the "hw.vendor" semantic +// conventions. It represents the vendor name of the hardware component. +func HwVendor(val string) attribute.KeyValue { + return HwVendorKey.String(val) +} + +// Enum values for hw.battery.state +var ( + // Charging + // Stability: development + HwBatteryStateCharging = HwBatteryStateKey.String("charging") + // Discharging + // Stability: development + HwBatteryStateDischarging = HwBatteryStateKey.String("discharging") +) + +// Enum values for hw.gpu.task +var ( + // Decoder + // Stability: development + HwGpuTaskDecoder = HwGpuTaskKey.String("decoder") + // Encoder + // Stability: development + HwGpuTaskEncoder = HwGpuTaskKey.String("encoder") + // General + // Stability: development + HwGpuTaskGeneral = HwGpuTaskKey.String("general") +) + +// Enum values for hw.limit_type +var ( + // Critical + // Stability: development + HwLimitTypeCritical = HwLimitTypeKey.String("critical") + // Degraded + // Stability: development + HwLimitTypeDegraded = HwLimitTypeKey.String("degraded") + // High Critical + // Stability: development + HwLimitTypeHighCritical = HwLimitTypeKey.String("high.critical") + // High Degraded + // Stability: development + HwLimitTypeHighDegraded = HwLimitTypeKey.String("high.degraded") + // Low Critical + // Stability: development + HwLimitTypeLowCritical = HwLimitTypeKey.String("low.critical") + // Low Degraded + // Stability: development + HwLimitTypeLowDegraded = HwLimitTypeKey.String("low.degraded") + // Maximum + // Stability: development + HwLimitTypeMax = HwLimitTypeKey.String("max") + // Throttled + // Stability: development + HwLimitTypeThrottled = HwLimitTypeKey.String("throttled") + // Turbo + // Stability: development + HwLimitTypeTurbo = HwLimitTypeKey.String("turbo") +) + +// Enum values for hw.logical_disk.state +var ( + // Used + // Stability: development + HwLogicalDiskStateUsed = HwLogicalDiskStateKey.String("used") + // Free + // Stability: development + HwLogicalDiskStateFree = HwLogicalDiskStateKey.String("free") +) + +// Enum values for hw.physical_disk.state +var ( + // Remaining + // Stability: development + HwPhysicalDiskStateRemaining = HwPhysicalDiskStateKey.String("remaining") +) + +// Enum values for hw.state +var ( + // Degraded + // Stability: development + HwStateDegraded = HwStateKey.String("degraded") + // Failed + // Stability: development + HwStateFailed = HwStateKey.String("failed") + // Needs Cleaning + // Stability: development + HwStateNeedsCleaning = HwStateKey.String("needs_cleaning") + // OK + // Stability: development + HwStateOk = HwStateKey.String("ok") + // Predicted Failure + // Stability: development + HwStatePredictedFailure = HwStateKey.String("predicted_failure") +) + +// Enum values for hw.tape_drive.operation_type +var ( + // Mount + // Stability: development + HwTapeDriveOperationTypeMount = HwTapeDriveOperationTypeKey.String("mount") + // Unmount + // Stability: development + HwTapeDriveOperationTypeUnmount = HwTapeDriveOperationTypeKey.String("unmount") + // Clean + // Stability: development + HwTapeDriveOperationTypeClean = HwTapeDriveOperationTypeKey.String("clean") +) + +// Enum values for hw.type +var ( + // Battery + // Stability: development + HwTypeBattery = HwTypeKey.String("battery") + // CPU + // Stability: development + HwTypeCPU = HwTypeKey.String("cpu") + // Disk controller + // Stability: development + HwTypeDiskController = HwTypeKey.String("disk_controller") + // Enclosure + // Stability: development + HwTypeEnclosure = HwTypeKey.String("enclosure") + // Fan + // Stability: development + HwTypeFan = HwTypeKey.String("fan") + // GPU + // Stability: development + HwTypeGpu = HwTypeKey.String("gpu") + // Logical disk + // Stability: development + HwTypeLogicalDisk = HwTypeKey.String("logical_disk") + // Memory + // Stability: development + HwTypeMemory = HwTypeKey.String("memory") + // Network + // Stability: development + HwTypeNetwork = HwTypeKey.String("network") + // Physical disk + // Stability: development + HwTypePhysicalDisk = HwTypeKey.String("physical_disk") + // Power supply + // Stability: development + HwTypePowerSupply = HwTypeKey.String("power_supply") + // Tape drive + // Stability: development + HwTypeTapeDrive = HwTypeKey.String("tape_drive") + // Temperature + // Stability: development + HwTypeTemperature = HwTypeKey.String("temperature") + // Voltage + // Stability: development + HwTypeVoltage = HwTypeKey.String("voltage") +) + +// Namespace: ios +const ( + // IOSAppStateKey is the attribute Key conforming to the "ios.app.state" + // semantic conventions. It represents the this attribute represents the state + // of the application. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The iOS lifecycle states are defined in the + // [UIApplicationDelegate documentation], and from which the `OS terminology` + // column values are derived. + // + // [UIApplicationDelegate documentation]: https://developer.apple.com/documentation/uikit/uiapplicationdelegate + IOSAppStateKey = attribute.Key("ios.app.state") +) + +// Enum values for ios.app.state +var ( + // The app has become `active`. Associated with UIKit notification + // `applicationDidBecomeActive`. + // + // Stability: development + IOSAppStateActive = IOSAppStateKey.String("active") + // The app is now `inactive`. Associated with UIKit notification + // `applicationWillResignActive`. + // + // Stability: development + IOSAppStateInactive = IOSAppStateKey.String("inactive") + // The app is now in the background. This value is associated with UIKit + // notification `applicationDidEnterBackground`. + // + // Stability: development + IOSAppStateBackground = IOSAppStateKey.String("background") + // The app is now in the foreground. This value is associated with UIKit + // notification `applicationWillEnterForeground`. + // + // Stability: development + IOSAppStateForeground = IOSAppStateKey.String("foreground") + // The app is about to terminate. Associated with UIKit notification + // `applicationWillTerminate`. + // + // Stability: development + IOSAppStateTerminate = IOSAppStateKey.String("terminate") +) + +// Namespace: k8s +const ( + // K8SClusterNameKey is the attribute Key conforming to the "k8s.cluster.name" + // semantic conventions. It represents the name of the cluster. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-cluster" + K8SClusterNameKey = attribute.Key("k8s.cluster.name") + + // K8SClusterUIDKey is the attribute Key conforming to the "k8s.cluster.uid" + // semantic conventions. It represents a pseudo-ID for the cluster, set to the + // UID of the `kube-system` namespace. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: K8s doesn't have support for obtaining a cluster ID. If this is ever + // added, we will recommend collecting the `k8s.cluster.uid` through the + // official APIs. In the meantime, we are able to use the `uid` of the + // `kube-system` namespace as a proxy for cluster ID. Read on for the + // rationale. + // + // Every object created in a K8s cluster is assigned a distinct UID. The + // `kube-system` namespace is used by Kubernetes itself and will exist + // for the lifetime of the cluster. Using the `uid` of the `kube-system` + // namespace is a reasonable proxy for the K8s ClusterID as it will only + // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are + // UUIDs as standardized by + // [ISO/IEC 9834-8 and ITU-T X.667]. + // Which states: + // + // > If generated according to one of the mechanisms defined in Rec. + // > ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be + // > different from all other UUIDs generated before 3603 A.D., or is + // > extremely likely to be different (depending on the mechanism chosen). + // + // Therefore, UIDs between clusters should be extremely unlikely to + // conflict. + // + // [ISO/IEC 9834-8 and ITU-T X.667]: https://www.itu.int/ITU-T/studygroups/com17/oid.html + K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") + + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "redis" + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the number + // of times the container was restarted. This attribute can be used to identify + // a particular container (running or stopped) within a container spec. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") + + // K8SContainerStatusLastTerminatedReasonKey is the attribute Key conforming to + // the "k8s.container.status.last_terminated_reason" semantic conventions. It + // represents the last terminated reason of the Container. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Evicted", "Error" + K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason") + + // K8SContainerStatusReasonKey is the attribute Key conforming to the + // "k8s.container.status.reason" semantic conventions. It represents the reason + // for the container state. Corresponds to the `reason` field of the: + // [K8s ContainerStateWaiting] or [K8s ContainerStateTerminated]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ContainerCreating", "CrashLoopBackOff", + // "CreateContainerConfigError", "ErrImagePull", "ImagePullBackOff", + // "OOMKilled", "Completed", "Error", "ContainerCannotRun" + // + // [K8s ContainerStateWaiting]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstatewaiting-v1-core + // [K8s ContainerStateTerminated]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstateterminated-v1-core + K8SContainerStatusReasonKey = attribute.Key("k8s.container.status.reason") + + // K8SContainerStatusStateKey is the attribute Key conforming to the + // "k8s.container.status.state" semantic conventions. It represents the state of + // the container. [K8s ContainerState]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "terminated", "running", "waiting" + // + // [K8s ContainerState]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstate-v1-core + K8SContainerStatusStateKey = attribute.Key("k8s.container.status.state") + + // K8SCronJobNameKey is the attribute Key conforming to the "k8s.cronjob.name" + // semantic conventions. It represents the name of the CronJob. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") + + // K8SCronJobUIDKey is the attribute Key conforming to the "k8s.cronjob.uid" + // semantic conventions. It represents the UID of the CronJob. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") + + // K8SDaemonSetUIDKey is the attribute Key conforming to the "k8s.daemonset.uid" + // semantic conventions. It represents the UID of the DaemonSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of the + // Deployment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") + + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SHPAMetricTypeKey is the attribute Key conforming to the + // "k8s.hpa.metric.type" semantic conventions. It represents the type of metric + // source for the horizontal pod autoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Resource", "ContainerResource" + // Note: This attribute reflects the `type` field of spec.metrics[] in the HPA. + K8SHPAMetricTypeKey = attribute.Key("k8s.hpa.metric.type") + + // K8SHPANameKey is the attribute Key conforming to the "k8s.hpa.name" semantic + // conventions. It represents the name of the horizontal pod autoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SHPANameKey = attribute.Key("k8s.hpa.name") + + // K8SHPAScaletargetrefAPIVersionKey is the attribute Key conforming to the + // "k8s.hpa.scaletargetref.api_version" semantic conventions. It represents the + // API version of the target resource to scale for the HorizontalPodAutoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "apps/v1", "autoscaling/v2" + // Note: This maps to the `apiVersion` field in the `scaleTargetRef` of the HPA + // spec. + K8SHPAScaletargetrefAPIVersionKey = attribute.Key("k8s.hpa.scaletargetref.api_version") + + // K8SHPAScaletargetrefKindKey is the attribute Key conforming to the + // "k8s.hpa.scaletargetref.kind" semantic conventions. It represents the kind of + // the target resource to scale for the HorizontalPodAutoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Deployment", "StatefulSet" + // Note: This maps to the `kind` field in the `scaleTargetRef` of the HPA spec. + K8SHPAScaletargetrefKindKey = attribute.Key("k8s.hpa.scaletargetref.kind") + + // K8SHPAScaletargetrefNameKey is the attribute Key conforming to the + // "k8s.hpa.scaletargetref.name" semantic conventions. It represents the name of + // the target resource to scale for the HorizontalPodAutoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-deployment", "my-statefulset" + // Note: This maps to the `name` field in the `scaleTargetRef` of the HPA spec. + K8SHPAScaletargetrefNameKey = attribute.Key("k8s.hpa.scaletargetref.name") + + // K8SHPAUIDKey is the attribute Key conforming to the "k8s.hpa.uid" semantic + // conventions. It represents the UID of the horizontal pod autoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SHPAUIDKey = attribute.Key("k8s.hpa.uid") + + // K8SHugepageSizeKey is the attribute Key conforming to the "k8s.hugepage.size" + // semantic conventions. It represents the size (identifier) of the K8s huge + // page. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2Mi" + K8SHugepageSizeKey = attribute.Key("k8s.hugepage.size") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" semantic + // conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SJobNameKey = attribute.Key("k8s.job.name") + + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" semantic + // conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "default" + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") + + // K8SNamespacePhaseKey is the attribute Key conforming to the + // "k8s.namespace.phase" semantic conventions. It represents the phase of the + // K8s namespace. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "active", "terminating" + // Note: This attribute aligns with the `phase` field of the + // [K8s NamespaceStatus] + // + // [K8s NamespaceStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#namespacestatus-v1-core + K8SNamespacePhaseKey = attribute.Key("k8s.namespace.phase") + + // K8SNodeConditionStatusKey is the attribute Key conforming to the + // "k8s.node.condition.status" semantic conventions. It represents the status of + // the condition, one of True, False, Unknown. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "true", "false", "unknown" + // Note: This attribute aligns with the `status` field of the + // [NodeCondition] + // + // [NodeCondition]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#nodecondition-v1-core + K8SNodeConditionStatusKey = attribute.Key("k8s.node.condition.status") + + // K8SNodeConditionTypeKey is the attribute Key conforming to the + // "k8s.node.condition.type" semantic conventions. It represents the condition + // type of a K8s Node. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Ready", "DiskPressure" + // Note: K8s Node conditions as described + // by [K8s documentation]. + // + // This attribute aligns with the `type` field of the + // [NodeCondition] + // + // The set of possible values is not limited to those listed here. Managed + // Kubernetes environments, + // or custom controllers MAY introduce additional node condition types. + // When this occurs, the exact value as reported by the Kubernetes API SHOULD be + // used. + // + // [K8s documentation]: https://v1-32.docs.kubernetes.io/docs/reference/node/node-status/#condition + // [NodeCondition]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#nodecondition-v1-core + K8SNodeConditionTypeKey = attribute.Key("k8s.node.condition.type") + + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "node-1" + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" semantic + // conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2" + K8SNodeUIDKey = attribute.Key("k8s.node.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" semantic + // conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-pod-autoconf" + K8SPodNameKey = attribute.Key("k8s.pod.name") + + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" semantic + // conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") + + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SReplicationControllerNameKey is the attribute Key conforming to the + // "k8s.replicationcontroller.name" semantic conventions. It represents the name + // of the replication controller. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SReplicationControllerNameKey = attribute.Key("k8s.replicationcontroller.name") + + // K8SReplicationControllerUIDKey is the attribute Key conforming to the + // "k8s.replicationcontroller.uid" semantic conventions. It represents the UID + // of the replication controller. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SReplicationControllerUIDKey = attribute.Key("k8s.replicationcontroller.uid") + + // K8SResourceQuotaNameKey is the attribute Key conforming to the + // "k8s.resourcequota.name" semantic conventions. It represents the name of the + // resource quota. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SResourceQuotaNameKey = attribute.Key("k8s.resourcequota.name") + + // K8SResourceQuotaResourceNameKey is the attribute Key conforming to the + // "k8s.resourcequota.resource_name" semantic conventions. It represents the + // name of the K8s resource a resource quota defines. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "count/replicationcontrollers" + // Note: The value for this attribute can be either the full + // `count/[.]` string (e.g., count/deployments.apps, + // count/pods), or, for certain core Kubernetes resources, just the resource + // name (e.g., pods, services, configmaps). Both forms are supported by + // Kubernetes for object count quotas. See + // [Kubernetes Resource Quotas documentation] for more details. + // + // [Kubernetes Resource Quotas documentation]: https://kubernetes.io/docs/concepts/policy/resource-quotas/#object-count-quota + K8SResourceQuotaResourceNameKey = attribute.Key("k8s.resourcequota.resource_name") + + // K8SResourceQuotaUIDKey is the attribute Key conforming to the + // "k8s.resourcequota.uid" semantic conventions. It represents the UID of the + // resource quota. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SResourceQuotaUIDKey = attribute.Key("k8s.resourcequota.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") + + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") + + // K8SStorageclassNameKey is the attribute Key conforming to the + // "k8s.storageclass.name" semantic conventions. It represents the name of K8s + // [StorageClass] object. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "gold.storageclass.storage.k8s.io" + // + // [StorageClass]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#storageclass-v1-storage-k8s-io + K8SStorageclassNameKey = attribute.Key("k8s.storageclass.name") + + // K8SVolumeNameKey is the attribute Key conforming to the "k8s.volume.name" + // semantic conventions. It represents the name of the K8s volume. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "volume0" + K8SVolumeNameKey = attribute.Key("k8s.volume.name") + + // K8SVolumeTypeKey is the attribute Key conforming to the "k8s.volume.type" + // semantic conventions. It represents the type of the K8s volume. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "emptyDir", "persistentVolumeClaim" + K8SVolumeTypeKey = attribute.Key("k8s.volume.type") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// K8SClusterUID returns an attribute KeyValue conforming to the +// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the +// cluster, set to the UID of the `kube-system` namespace. +func K8SClusterUID(val string) attribute.KeyValue { + return K8SClusterUIDKey.String(val) +} + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify a +// particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue +// conforming to the "k8s.container.status.last_terminated_reason" semantic +// conventions. It represents the last terminated reason of the Container. +func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue { + return K8SContainerStatusLastTerminatedReasonKey.String(val) +} + +// K8SCronJobAnnotation returns an attribute KeyValue conforming to the +// "k8s.cronjob.annotation" semantic conventions. It represents the cronjob +// annotation placed on the CronJob, the `` being the annotation name, the +// value being the annotation value. +func K8SCronJobAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.cronjob.annotation."+key, val) +} + +// K8SCronJobLabel returns an attribute KeyValue conforming to the +// "k8s.cronjob.label" semantic conventions. It represents the label placed on +// the CronJob, the `` being the label name, the value being the label +// value. +func K8SCronJobLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.cronjob.label."+key, val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SDaemonSetAnnotation returns an attribute KeyValue conforming to the +// "k8s.daemonset.annotation" semantic conventions. It represents the annotation +// placed on the DaemonSet, the `` being the annotation name, the value +// being the annotation value, even if the value is empty. +func K8SDaemonSetAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.daemonset.annotation."+key, val) +} + +// K8SDaemonSetLabel returns an attribute KeyValue conforming to the +// "k8s.daemonset.label" semantic conventions. It represents the label placed on +// the DaemonSet, the `` being the label name, the value being the label +// value, even if the value is empty. +func K8SDaemonSetLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.daemonset.label."+key, val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDeploymentAnnotation returns an attribute KeyValue conforming to the +// "k8s.deployment.annotation" semantic conventions. It represents the annotation +// placed on the Deployment, the `` being the annotation name, the value +// being the annotation value, even if the value is empty. +func K8SDeploymentAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.deployment.annotation."+key, val) +} + +// K8SDeploymentLabel returns an attribute KeyValue conforming to the +// "k8s.deployment.label" semantic conventions. It represents the label placed on +// the Deployment, the `` being the label name, the value being the label +// value, even if the value is empty. +func K8SDeploymentLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.deployment.label."+key, val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SHPAMetricType returns an attribute KeyValue conforming to the +// "k8s.hpa.metric.type" semantic conventions. It represents the type of metric +// source for the horizontal pod autoscaler. +func K8SHPAMetricType(val string) attribute.KeyValue { + return K8SHPAMetricTypeKey.String(val) +} + +// K8SHPAName returns an attribute KeyValue conforming to the "k8s.hpa.name" +// semantic conventions. It represents the name of the horizontal pod autoscaler. +func K8SHPAName(val string) attribute.KeyValue { + return K8SHPANameKey.String(val) +} + +// K8SHPAScaletargetrefAPIVersion returns an attribute KeyValue conforming to the +// "k8s.hpa.scaletargetref.api_version" semantic conventions. It represents the +// API version of the target resource to scale for the HorizontalPodAutoscaler. +func K8SHPAScaletargetrefAPIVersion(val string) attribute.KeyValue { + return K8SHPAScaletargetrefAPIVersionKey.String(val) +} + +// K8SHPAScaletargetrefKind returns an attribute KeyValue conforming to the +// "k8s.hpa.scaletargetref.kind" semantic conventions. It represents the kind of +// the target resource to scale for the HorizontalPodAutoscaler. +func K8SHPAScaletargetrefKind(val string) attribute.KeyValue { + return K8SHPAScaletargetrefKindKey.String(val) +} + +// K8SHPAScaletargetrefName returns an attribute KeyValue conforming to the +// "k8s.hpa.scaletargetref.name" semantic conventions. It represents the name of +// the target resource to scale for the HorizontalPodAutoscaler. +func K8SHPAScaletargetrefName(val string) attribute.KeyValue { + return K8SHPAScaletargetrefNameKey.String(val) +} + +// K8SHPAUID returns an attribute KeyValue conforming to the "k8s.hpa.uid" +// semantic conventions. It represents the UID of the horizontal pod autoscaler. +func K8SHPAUID(val string) attribute.KeyValue { + return K8SHPAUIDKey.String(val) +} + +// K8SHugepageSize returns an attribute KeyValue conforming to the +// "k8s.hugepage.size" semantic conventions. It represents the size (identifier) +// of the K8s huge page. +func K8SHugepageSize(val string) attribute.KeyValue { + return K8SHugepageSizeKey.String(val) +} + +// K8SJobAnnotation returns an attribute KeyValue conforming to the +// "k8s.job.annotation" semantic conventions. It represents the annotation placed +// on the Job, the `` being the annotation name, the value being the +// annotation value, even if the value is empty. +func K8SJobAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.job.annotation."+key, val) +} + +// K8SJobLabel returns an attribute KeyValue conforming to the "k8s.job.label" +// semantic conventions. It represents the label placed on the Job, the `` +// being the label name, the value being the label value, even if the value is +// empty. +func K8SJobLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.job.label."+key, val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SNamespaceAnnotation returns an attribute KeyValue conforming to the +// "k8s.namespace.annotation" semantic conventions. It represents the annotation +// placed on the Namespace, the `` being the annotation name, the value +// being the annotation value, even if the value is empty. +func K8SNamespaceAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.namespace.annotation."+key, val) +} + +// K8SNamespaceLabel returns an attribute KeyValue conforming to the +// "k8s.namespace.label" semantic conventions. It represents the label placed on +// the Namespace, the `` being the label name, the value being the label +// value, even if the value is empty. +func K8SNamespaceLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.namespace.label."+key, val) +} + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// K8SNodeAnnotation returns an attribute KeyValue conforming to the +// "k8s.node.annotation" semantic conventions. It represents the annotation +// placed on the Node, the `` being the annotation name, the value being the +// annotation value, even if the value is empty. +func K8SNodeAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.node.annotation."+key, val) +} + +// K8SNodeLabel returns an attribute KeyValue conforming to the "k8s.node.label" +// semantic conventions. It represents the label placed on the Node, the `` +// being the label name, the value being the label value, even if the value is +// empty. +func K8SNodeLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.node.label."+key, val) +} + +// K8SNodeName returns an attribute KeyValue conforming to the "k8s.node.name" +// semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// K8SPodAnnotation returns an attribute KeyValue conforming to the +// "k8s.pod.annotation" semantic conventions. It represents the annotation placed +// on the Pod, the `` being the annotation name, the value being the +// annotation value. +func K8SPodAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.pod.annotation."+key, val) +} + +// K8SPodLabel returns an attribute KeyValue conforming to the "k8s.pod.label" +// semantic conventions. It represents the label placed on the Pod, the `` +// being the label name, the value being the label value. +func K8SPodLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.pod.label."+key, val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SReplicaSetAnnotation returns an attribute KeyValue conforming to the +// "k8s.replicaset.annotation" semantic conventions. It represents the annotation +// placed on the ReplicaSet, the `` being the annotation name, the value +// being the annotation value, even if the value is empty. +func K8SReplicaSetAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.replicaset.annotation."+key, val) +} + +// K8SReplicaSetLabel returns an attribute KeyValue conforming to the +// "k8s.replicaset.label" semantic conventions. It represents the label placed on +// the ReplicaSet, the `` being the label name, the value being the label +// value, even if the value is empty. +func K8SReplicaSetLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.replicaset.label."+key, val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SReplicationControllerName returns an attribute KeyValue conforming to the +// "k8s.replicationcontroller.name" semantic conventions. It represents the name +// of the replication controller. +func K8SReplicationControllerName(val string) attribute.KeyValue { + return K8SReplicationControllerNameKey.String(val) +} + +// K8SReplicationControllerUID returns an attribute KeyValue conforming to the +// "k8s.replicationcontroller.uid" semantic conventions. It represents the UID of +// the replication controller. +func K8SReplicationControllerUID(val string) attribute.KeyValue { + return K8SReplicationControllerUIDKey.String(val) +} + +// K8SResourceQuotaName returns an attribute KeyValue conforming to the +// "k8s.resourcequota.name" semantic conventions. It represents the name of the +// resource quota. +func K8SResourceQuotaName(val string) attribute.KeyValue { + return K8SResourceQuotaNameKey.String(val) +} + +// K8SResourceQuotaResourceName returns an attribute KeyValue conforming to the +// "k8s.resourcequota.resource_name" semantic conventions. It represents the name +// of the K8s resource a resource quota defines. +func K8SResourceQuotaResourceName(val string) attribute.KeyValue { + return K8SResourceQuotaResourceNameKey.String(val) +} + +// K8SResourceQuotaUID returns an attribute KeyValue conforming to the +// "k8s.resourcequota.uid" semantic conventions. It represents the UID of the +// resource quota. +func K8SResourceQuotaUID(val string) attribute.KeyValue { + return K8SResourceQuotaUIDKey.String(val) +} + +// K8SStatefulSetAnnotation returns an attribute KeyValue conforming to the +// "k8s.statefulset.annotation" semantic conventions. It represents the +// annotation placed on the StatefulSet, the `` being the annotation name, +// the value being the annotation value, even if the value is empty. +func K8SStatefulSetAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.statefulset.annotation."+key, val) +} + +// K8SStatefulSetLabel returns an attribute KeyValue conforming to the +// "k8s.statefulset.label" semantic conventions. It represents the label placed +// on the StatefulSet, the `` being the label name, the value being the +// label value, even if the value is empty. +func K8SStatefulSetLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.statefulset.label."+key, val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// K8SStorageclassName returns an attribute KeyValue conforming to the +// "k8s.storageclass.name" semantic conventions. It represents the name of K8s +// [StorageClass] object. +// +// [StorageClass]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#storageclass-v1-storage-k8s-io +func K8SStorageclassName(val string) attribute.KeyValue { + return K8SStorageclassNameKey.String(val) +} + +// K8SVolumeName returns an attribute KeyValue conforming to the +// "k8s.volume.name" semantic conventions. It represents the name of the K8s +// volume. +func K8SVolumeName(val string) attribute.KeyValue { + return K8SVolumeNameKey.String(val) +} + +// Enum values for k8s.container.status.reason +var ( + // The container is being created. + // Stability: development + K8SContainerStatusReasonContainerCreating = K8SContainerStatusReasonKey.String("ContainerCreating") + // The container is in a crash loop back off state. + // Stability: development + K8SContainerStatusReasonCrashLoopBackOff = K8SContainerStatusReasonKey.String("CrashLoopBackOff") + // There was an error creating the container configuration. + // Stability: development + K8SContainerStatusReasonCreateContainerConfigError = K8SContainerStatusReasonKey.String("CreateContainerConfigError") + // There was an error pulling the container image. + // Stability: development + K8SContainerStatusReasonErrImagePull = K8SContainerStatusReasonKey.String("ErrImagePull") + // The container image pull is in back off state. + // Stability: development + K8SContainerStatusReasonImagePullBackOff = K8SContainerStatusReasonKey.String("ImagePullBackOff") + // The container was killed due to out of memory. + // Stability: development + K8SContainerStatusReasonOomKilled = K8SContainerStatusReasonKey.String("OOMKilled") + // The container has completed execution. + // Stability: development + K8SContainerStatusReasonCompleted = K8SContainerStatusReasonKey.String("Completed") + // There was an error with the container. + // Stability: development + K8SContainerStatusReasonError = K8SContainerStatusReasonKey.String("Error") + // The container cannot run. + // Stability: development + K8SContainerStatusReasonContainerCannotRun = K8SContainerStatusReasonKey.String("ContainerCannotRun") +) + +// Enum values for k8s.container.status.state +var ( + // The container has terminated. + // Stability: development + K8SContainerStatusStateTerminated = K8SContainerStatusStateKey.String("terminated") + // The container is running. + // Stability: development + K8SContainerStatusStateRunning = K8SContainerStatusStateKey.String("running") + // The container is waiting. + // Stability: development + K8SContainerStatusStateWaiting = K8SContainerStatusStateKey.String("waiting") +) + +// Enum values for k8s.namespace.phase +var ( + // Active namespace phase as described by [K8s API] + // Stability: development + // + // [K8s API]: https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase + K8SNamespacePhaseActive = K8SNamespacePhaseKey.String("active") + // Terminating namespace phase as described by [K8s API] + // Stability: development + // + // [K8s API]: https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase + K8SNamespacePhaseTerminating = K8SNamespacePhaseKey.String("terminating") +) + +// Enum values for k8s.node.condition.status +var ( + // condition_true + // Stability: development + K8SNodeConditionStatusConditionTrue = K8SNodeConditionStatusKey.String("true") + // condition_false + // Stability: development + K8SNodeConditionStatusConditionFalse = K8SNodeConditionStatusKey.String("false") + // condition_unknown + // Stability: development + K8SNodeConditionStatusConditionUnknown = K8SNodeConditionStatusKey.String("unknown") +) + +// Enum values for k8s.node.condition.type +var ( + // The node is healthy and ready to accept pods + // Stability: development + K8SNodeConditionTypeReady = K8SNodeConditionTypeKey.String("Ready") + // Pressure exists on the disk size—that is, if the disk capacity is low + // Stability: development + K8SNodeConditionTypeDiskPressure = K8SNodeConditionTypeKey.String("DiskPressure") + // Pressure exists on the node memory—that is, if the node memory is low + // Stability: development + K8SNodeConditionTypeMemoryPressure = K8SNodeConditionTypeKey.String("MemoryPressure") + // Pressure exists on the processes—that is, if there are too many processes + // on the node + // Stability: development + K8SNodeConditionTypePIDPressure = K8SNodeConditionTypeKey.String("PIDPressure") + // The network for the node is not correctly configured + // Stability: development + K8SNodeConditionTypeNetworkUnavailable = K8SNodeConditionTypeKey.String("NetworkUnavailable") +) + +// Enum values for k8s.volume.type +var ( + // A [persistentVolumeClaim] volume + // Stability: development + // + // [persistentVolumeClaim]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#persistentvolumeclaim + K8SVolumeTypePersistentVolumeClaim = K8SVolumeTypeKey.String("persistentVolumeClaim") + // A [configMap] volume + // Stability: development + // + // [configMap]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#configmap + K8SVolumeTypeConfigMap = K8SVolumeTypeKey.String("configMap") + // A [downwardAPI] volume + // Stability: development + // + // [downwardAPI]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#downwardapi + K8SVolumeTypeDownwardAPI = K8SVolumeTypeKey.String("downwardAPI") + // An [emptyDir] volume + // Stability: development + // + // [emptyDir]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#emptydir + K8SVolumeTypeEmptyDir = K8SVolumeTypeKey.String("emptyDir") + // A [secret] volume + // Stability: development + // + // [secret]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#secret + K8SVolumeTypeSecret = K8SVolumeTypeKey.String("secret") + // A [local] volume + // Stability: development + // + // [local]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#local + K8SVolumeTypeLocal = K8SVolumeTypeKey.String("local") +) + +// Namespace: linux +const ( + // LinuxMemorySlabStateKey is the attribute Key conforming to the + // "linux.memory.slab.state" semantic conventions. It represents the Linux Slab + // memory state. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "reclaimable", "unreclaimable" + LinuxMemorySlabStateKey = attribute.Key("linux.memory.slab.state") +) + +// Enum values for linux.memory.slab.state +var ( + // reclaimable + // Stability: development + LinuxMemorySlabStateReclaimable = LinuxMemorySlabStateKey.String("reclaimable") + // unreclaimable + // Stability: development + LinuxMemorySlabStateUnreclaimable = LinuxMemorySlabStateKey.String("unreclaimable") +) + +// Namespace: log +const ( + // LogFileNameKey is the attribute Key conforming to the "log.file.name" + // semantic conventions. It represents the basename of the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "audit.log" + LogFileNameKey = attribute.Key("log.file.name") + + // LogFileNameResolvedKey is the attribute Key conforming to the + // "log.file.name_resolved" semantic conventions. It represents the basename of + // the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "uuid.log" + LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") + + // LogFilePathKey is the attribute Key conforming to the "log.file.path" + // semantic conventions. It represents the full path to the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/var/log/mysql/audit.log" + LogFilePathKey = attribute.Key("log.file.path") + + // LogFilePathResolvedKey is the attribute Key conforming to the + // "log.file.path_resolved" semantic conventions. It represents the full path to + // the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/var/lib/docker/uuid.log" + LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") + + // LogIostreamKey is the attribute Key conforming to the "log.iostream" semantic + // conventions. It represents the stream associated with the log. See below for + // a list of well-known values. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + LogIostreamKey = attribute.Key("log.iostream") + + // LogRecordOriginalKey is the attribute Key conforming to the + // "log.record.original" semantic conventions. It represents the complete + // original Log Record. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "77 <86>1 2015-08-06T21:58:59.694Z 192.168.2.133 inactive - - - + // Something happened", "[INFO] 8/3/24 12:34:56 Something happened" + // Note: This value MAY be added when processing a Log Record which was + // originally transmitted as a string or equivalent data type AND the Body field + // of the Log Record does not contain the same value. (e.g. a syslog or a log + // record read from a file.) + LogRecordOriginalKey = attribute.Key("log.record.original") + + // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" + // semantic conventions. It represents a unique identifier for the Log Record. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "01ARZ3NDEKTSV4RRFFQ69G5FAV" + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an + // [Universally Unique Lexicographically Sortable Identifier (ULID)], but other + // identifiers (e.g. UUID) may be used as needed. + // + // [Universally Unique Lexicographically Sortable Identifier (ULID)]: https://github.com/ulid/spec + LogRecordUIDKey = attribute.Key("log.record.uid") +) + +// LogFileName returns an attribute KeyValue conforming to the "log.file.name" +// semantic conventions. It represents the basename of the file. +func LogFileName(val string) attribute.KeyValue { + return LogFileNameKey.String(val) +} + +// LogFileNameResolved returns an attribute KeyValue conforming to the +// "log.file.name_resolved" semantic conventions. It represents the basename of +// the file, with symlinks resolved. +func LogFileNameResolved(val string) attribute.KeyValue { + return LogFileNameResolvedKey.String(val) +} + +// LogFilePath returns an attribute KeyValue conforming to the "log.file.path" +// semantic conventions. It represents the full path to the file. +func LogFilePath(val string) attribute.KeyValue { + return LogFilePathKey.String(val) +} + +// LogFilePathResolved returns an attribute KeyValue conforming to the +// "log.file.path_resolved" semantic conventions. It represents the full path to +// the file, with symlinks resolved. +func LogFilePathResolved(val string) attribute.KeyValue { + return LogFilePathResolvedKey.String(val) +} + +// LogRecordOriginal returns an attribute KeyValue conforming to the +// "log.record.original" semantic conventions. It represents the complete +// original Log Record. +func LogRecordOriginal(val string) attribute.KeyValue { + return LogRecordOriginalKey.String(val) +} + +// LogRecordUID returns an attribute KeyValue conforming to the "log.record.uid" +// semantic conventions. It represents a unique identifier for the Log Record. +func LogRecordUID(val string) attribute.KeyValue { + return LogRecordUIDKey.String(val) +} + +// Enum values for log.iostream +var ( + // Logs from stdout stream + // Stability: development + LogIostreamStdout = LogIostreamKey.String("stdout") + // Events from stderr stream + // Stability: development + LogIostreamStderr = LogIostreamKey.String("stderr") +) + +// Namespace: mainframe +const ( + // MainframeLparNameKey is the attribute Key conforming to the + // "mainframe.lpar.name" semantic conventions. It represents the name of the + // logical partition that hosts a systems with a mainframe operating system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "LPAR01" + MainframeLparNameKey = attribute.Key("mainframe.lpar.name") +) + +// MainframeLparName returns an attribute KeyValue conforming to the +// "mainframe.lpar.name" semantic conventions. It represents the name of the +// logical partition that hosts a systems with a mainframe operating system. +func MainframeLparName(val string) attribute.KeyValue { + return MainframeLparNameKey.String(val) +} + +// Namespace: messaging +const ( + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the batching + // operation. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client library + // supports both batch and single-message API for the same operation, + // instrumentations SHOULD use `messaging.batch.message_count` for batching APIs + // and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") + + // MessagingClientIDKey is the attribute Key conforming to the + // "messaging.client.id" semantic conventions. It represents a unique identifier + // for the client that consumes or produces a message. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "client-5", "myhost@8742@s8083jm" + MessagingClientIDKey = attribute.Key("messaging.client.id") + + // MessagingConsumerGroupNameKey is the attribute Key conforming to the + // "messaging.consumer.group.name" semantic conventions. It represents the name + // of the consumer group with which a consumer is associated. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-group", "indexer" + // Note: Semantic conventions for individual messaging systems SHOULD document + // whether `messaging.consumer.group.name` is applicable and what it means in + // the context of that system. + MessagingConsumerGroupNameKey = attribute.Key("messaging.consumer.group.name") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") + + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the message + // destination name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MyQueue", "MyTopic" + // Note: Destination name SHOULD uniquely identify a specific queue, topic or + // other entity within the broker. If + // the broker doesn't have such notion, the destination name SHOULD uniquely + // identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationPartitionIDKey is the attribute Key conforming to the + // "messaging.destination.partition.id" semantic conventions. It represents the + // identifier of the partition messages are sent to or received from, unique + // within the `messaging.destination.name`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1 + MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id") + + // MessagingDestinationSubscriptionNameKey is the attribute Key conforming to + // the "messaging.destination.subscription.name" semantic conventions. It + // represents the name of the destination subscription from which a message is + // consumed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "subscription-a" + // Note: Semantic conventions for individual messaging systems SHOULD document + // whether `messaging.destination.subscription.name` is applicable and what it + // means in the context of that system. + MessagingDestinationSubscriptionNameKey = attribute.Key("messaging.destination.subscription.name") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the low + // cardinality representation of the messaging destination name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/customers/{customerId}" + // Note: Destination names could be constructed from templates. An example would + // be a destination name involving a user name or product id. Although the + // destination name in this case is of high cardinality, the underlying template + // is of low cardinality and can be effectively used for grouping and + // aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might not + // exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingEventHubsMessageEnqueuedTimeKey is the attribute Key conforming to + // the "messaging.eventhubs.message.enqueued_time" semantic conventions. It + // represents the UTC epoch seconds at which the message has been accepted and + // stored in the entity. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingEventHubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time") + + // MessagingGCPPubSubMessageAckDeadlineKey is the attribute Key conforming to + // the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. It + // represents the ack deadline in seconds set for the modify ack deadline + // request. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingGCPPubSubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline") + + // MessagingGCPPubSubMessageAckIDKey is the attribute Key conforming to the + // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It represents the + // ack id for a given message. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: ack_id + MessagingGCPPubSubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id") + + // MessagingGCPPubSubMessageDeliveryAttemptKey is the attribute Key conforming + // to the "messaging.gcp_pubsub.message.delivery_attempt" semantic conventions. + // It represents the delivery attempt for a given message. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingGCPPubSubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt") + + // MessagingGCPPubSubMessageOrderingKeyKey is the attribute Key conforming to + // the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. It + // represents the ordering key for a given message. If the attribute is not + // present, the message does not have an ordering key. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: ordering_key + MessagingGCPPubSubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") + + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the message + // keys in Kafka are used for grouping alike messages to ensure they're + // processed on the same partition. They differ from `messaging.message.id` in + // that they're not unique. If the key is `null`, the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myKey + // Note: If the key type is not string, it's string representation has to be + // supplied for the attribute. If the key has no unambiguous, canonical string + // form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents a + // boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") + + // MessagingKafkaOffsetKey is the attribute Key conforming to the + // "messaging.kafka.offset" semantic conventions. It represents the offset of a + // record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingKafkaOffsetKey = attribute.Key("messaging.kafka.offset") + + // MessagingMessageBodySizeKey is the attribute Key conforming to the + // "messaging.message.body.size" semantic conventions. It represents the size of + // the message body in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Note: This can refer to both the compressed or uncompressed body size. If + // both sizes are known, the uncompressed + // body size should be used. + MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents the + // conversation ID identifying the conversation to which the message belongs, + // represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: MyConversationId + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the + // "messaging.message.envelope.size" semantic conventions. It represents the + // size of the message body and metadata in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Note: This can refer to both the compressed or uncompressed size. If both + // sizes are known, the uncompressed + // size should be used. + MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") + + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used by + // the messaging system as an identifier for the message, represented as a + // string. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 452a7c7c7c7048c2f887f61572b18fc2 + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingOperationNameKey is the attribute Key conforming to the + // "messaging.operation.name" semantic conventions. It represents the + // system-specific name of the messaging operation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ack", "nack", "send" + MessagingOperationNameKey = attribute.Key("messaging.operation.name") + + // MessagingOperationTypeKey is the attribute Key conforming to the + // "messaging.operation.type" semantic conventions. It represents a string + // identifying the type of the messaging operation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationTypeKey = attribute.Key("messaging.operation.type") + + // MessagingRabbitMQDestinationRoutingKeyKey is the attribute Key conforming to + // the "messaging.rabbitmq.destination.routing_key" semantic conventions. It + // represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myKey + MessagingRabbitMQDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") + + // MessagingRabbitMQMessageDeliveryTagKey is the attribute Key conforming to the + // "messaging.rabbitmq.message.delivery_tag" semantic conventions. It represents + // the rabbitMQ message delivery tag. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingRabbitMQMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag") + + // MessagingRocketMQConsumptionModelKey is the attribute Key conforming to the + // "messaging.rocketmq.consumption_model" semantic conventions. It represents + // the model of message consumption. This only applies to consumer spans. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingRocketMQConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") + + // MessagingRocketMQMessageDelayTimeLevelKey is the attribute Key conforming to + // the "messaging.rocketmq.message.delay_time_level" semantic conventions. It + // represents the delay time level for delay message, which determines the + // message delay time. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingRocketMQMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketMQMessageDeliveryTimestampKey is the attribute Key conforming + // to the "messaging.rocketmq.message.delivery_timestamp" semantic conventions. + // It represents the timestamp in milliseconds that the delay message is + // expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingRocketMQMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketMQMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents the it + // is essential for FIFO message. Messages that belong to the same message group + // are always processed one by one within the same consumer group. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myMessageGroup + MessagingRocketMQMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketMQMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents the + // key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "keyA", "keyB" + MessagingRocketMQMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketMQMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: tagA + MessagingRocketMQMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketMQMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents the + // type of message. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingRocketMQMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketMQNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myNamespace + MessagingRocketMQNamespaceKey = attribute.Key("messaging.rocketmq.namespace") + + // MessagingServiceBusDispositionStatusKey is the attribute Key conforming to + // the "messaging.servicebus.disposition_status" semantic conventions. It + // represents the describes the [settlement type]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [settlement type]: https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock + MessagingServiceBusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status") + + // MessagingServiceBusMessageDeliveryCountKey is the attribute Key conforming to + // the "messaging.servicebus.message.delivery_count" semantic conventions. It + // represents the number of deliveries that have been attempted for this + // message. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingServiceBusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count") + + // MessagingServiceBusMessageEnqueuedTimeKey is the attribute Key conforming to + // the "messaging.servicebus.message.enqueued_time" semantic conventions. It + // represents the UTC epoch seconds at which the message has been accepted and + // stored in the entity. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingServiceBusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time") + + // MessagingSystemKey is the attribute Key conforming to the "messaging.system" + // semantic conventions. It represents the messaging system as identified by the + // client instrumentation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The actual messaging system may differ from the one known by the + // client. For example, when using Kafka client libraries to communicate with + // Azure Event Hubs, the `messaging.system` is set to `kafka` based on the + // instrumentation's best knowledge. + MessagingSystemKey = attribute.Key("messaging.system") +) + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to the +// "messaging.batch.message_count" semantic conventions. It represents the number +// of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// MessagingClientID returns an attribute KeyValue conforming to the +// "messaging.client.id" semantic conventions. It represents a unique identifier +// for the client that consumes or produces a message. +func MessagingClientID(val string) attribute.KeyValue { + return MessagingClientIDKey.String(val) +} + +// MessagingConsumerGroupName returns an attribute KeyValue conforming to the +// "messaging.consumer.group.name" semantic conventions. It represents the name +// of the consumer group with which a consumer is associated. +func MessagingConsumerGroupName(val string) attribute.KeyValue { + return MessagingConsumerGroupNameKey.String(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to the +// "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be unnamed +// or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name. +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationPartitionID returns an attribute KeyValue conforming to +// the "messaging.destination.partition.id" semantic conventions. It represents +// the identifier of the partition messages are sent to or received from, unique +// within the `messaging.destination.name`. +func MessagingDestinationPartitionID(val string) attribute.KeyValue { + return MessagingDestinationPartitionIDKey.String(val) +} + +// MessagingDestinationSubscriptionName returns an attribute KeyValue conforming +// to the "messaging.destination.subscription.name" semantic conventions. It +// represents the name of the destination subscription from which a message is +// consumed. +func MessagingDestinationSubscriptionName(val string) attribute.KeyValue { + return MessagingDestinationSubscriptionNameKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to the +// "messaging.destination.template" semantic conventions. It represents the low +// cardinality representation of the messaging destination name. +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to the +// "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingEventHubsMessageEnqueuedTime returns an attribute KeyValue conforming +// to the "messaging.eventhubs.message.enqueued_time" semantic conventions. It +// represents the UTC epoch seconds at which the message has been accepted and +// stored in the entity. +func MessagingEventHubsMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingEventHubsMessageEnqueuedTimeKey.Int(val) +} + +// MessagingGCPPubSubMessageAckDeadline returns an attribute KeyValue conforming +// to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. It +// represents the ack deadline in seconds set for the modify ack deadline +// request. +func MessagingGCPPubSubMessageAckDeadline(val int) attribute.KeyValue { + return MessagingGCPPubSubMessageAckDeadlineKey.Int(val) +} + +// MessagingGCPPubSubMessageAckID returns an attribute KeyValue conforming to the +// "messaging.gcp_pubsub.message.ack_id" semantic conventions. It represents the +// ack id for a given message. +func MessagingGCPPubSubMessageAckID(val string) attribute.KeyValue { + return MessagingGCPPubSubMessageAckIDKey.String(val) +} + +// MessagingGCPPubSubMessageDeliveryAttempt returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic +// conventions. It represents the delivery attempt for a given message. +func MessagingGCPPubSubMessageDeliveryAttempt(val int) attribute.KeyValue { + return MessagingGCPPubSubMessageDeliveryAttemptKey.Int(val) +} + +// MessagingGCPPubSubMessageOrderingKey returns an attribute KeyValue conforming +// to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. It +// represents the ordering key for a given message. If the attribute is not +// present, the message does not have an ordering key. +func MessagingGCPPubSubMessageOrderingKey(val string) attribute.KeyValue { + return MessagingGCPPubSubMessageOrderingKeyKey.String(val) +} + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the message +// keys in Kafka are used for grouping alike messages to ensure they're processed +// on the same partition. They differ from `messaging.message.id` in that they're +// not unique. If the key is `null`, the attribute MUST NOT be set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming to the +// "messaging.kafka.message.tombstone" semantic conventions. It represents a +// boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// MessagingKafkaOffset returns an attribute KeyValue conforming to the +// "messaging.kafka.offset" semantic conventions. It represents the offset of a +// record in the corresponding Kafka partition. +func MessagingKafkaOffset(val int) attribute.KeyValue { + return MessagingKafkaOffsetKey.Int(val) +} + +// MessagingMessageBodySize returns an attribute KeyValue conforming to the +// "messaging.message.body.size" semantic conventions. It represents the size of +// the message body in bytes. +func MessagingMessageBodySize(val int) attribute.KeyValue { + return MessagingMessageBodySizeKey.Int(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming to the +// "messaging.message.conversation_id" semantic conventions. It represents the +// conversation ID identifying the conversation to which the message belongs, +// represented as a string. Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to the +// "messaging.message.envelope.size" semantic conventions. It represents the size +// of the message body and metadata in bytes. +func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { + return MessagingMessageEnvelopeSizeKey.Int(val) +} + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by the +// messaging system as an identifier for the message, represented as a string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingOperationName returns an attribute KeyValue conforming to the +// "messaging.operation.name" semantic conventions. It represents the +// system-specific name of the messaging operation. +func MessagingOperationName(val string) attribute.KeyValue { + return MessagingOperationNameKey.String(val) +} + +// MessagingRabbitMQDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitMQDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitMQDestinationRoutingKeyKey.String(val) +} + +// MessagingRabbitMQMessageDeliveryTag returns an attribute KeyValue conforming +// to the "messaging.rabbitmq.message.delivery_tag" semantic conventions. It +// represents the rabbitMQ message delivery tag. +func MessagingRabbitMQMessageDeliveryTag(val int) attribute.KeyValue { + return MessagingRabbitMQMessageDeliveryTagKey.Int(val) +} + +// MessagingRocketMQMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketMQMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketMQMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketMQMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketMQMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketMQMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketMQMessageGroup returns an attribute KeyValue conforming to the +// "messaging.rocketmq.message.group" semantic conventions. It represents the it +// is essential for FIFO message. Messages that belong to the same message group +// are always processed one by one within the same consumer group. +func MessagingRocketMQMessageGroup(val string) attribute.KeyValue { + return MessagingRocketMQMessageGroupKey.String(val) +} + +// MessagingRocketMQMessageKeys returns an attribute KeyValue conforming to the +// "messaging.rocketmq.message.keys" semantic conventions. It represents the +// key(s) of message, another way to mark message besides message id. +func MessagingRocketMQMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketMQMessageKeysKey.StringSlice(val) +} + +// MessagingRocketMQMessageTag returns an attribute KeyValue conforming to the +// "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketMQMessageTag(val string) attribute.KeyValue { + return MessagingRocketMQMessageTagKey.String(val) +} + +// MessagingRocketMQNamespace returns an attribute KeyValue conforming to the +// "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketMQNamespace(val string) attribute.KeyValue { + return MessagingRocketMQNamespaceKey.String(val) +} + +// MessagingServiceBusMessageDeliveryCount returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.delivery_count" semantic +// conventions. It represents the number of deliveries that have been attempted +// for this message. +func MessagingServiceBusMessageDeliveryCount(val int) attribute.KeyValue { + return MessagingServiceBusMessageDeliveryCountKey.Int(val) +} + +// MessagingServiceBusMessageEnqueuedTime returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.enqueued_time" semantic +// conventions. It represents the UTC epoch seconds at which the message has been +// accepted and stored in the entity. +func MessagingServiceBusMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingServiceBusMessageEnqueuedTimeKey.Int(val) +} + +// Enum values for messaging.operation.type +var ( + // A message is created. "Create" spans always refer to a single message and are + // used to provide a unique creation context for messages in batch sending + // scenarios. + // + // Stability: development + MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create") + // One or more messages are provided for sending to an intermediary. If a single + // message is sent, the context of the "Send" span can be used as the creation + // context and no "Create" span needs to be created. + // + // Stability: development + MessagingOperationTypeSend = MessagingOperationTypeKey.String("send") + // One or more messages are requested by a consumer. This operation refers to + // pull-based scenarios, where consumers explicitly call methods of messaging + // SDKs to receive messages. + // + // Stability: development + MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive") + // One or more messages are processed by a consumer. + // + // Stability: development + MessagingOperationTypeProcess = MessagingOperationTypeKey.String("process") + // One or more messages are settled. + // + // Stability: development + MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle") +) + +// Enum values for messaging.rocketmq.consumption_model +var ( + // Clustering consumption model + // Stability: development + MessagingRocketMQConsumptionModelClustering = MessagingRocketMQConsumptionModelKey.String("clustering") + // Broadcasting consumption model + // Stability: development + MessagingRocketMQConsumptionModelBroadcasting = MessagingRocketMQConsumptionModelKey.String("broadcasting") +) + +// Enum values for messaging.rocketmq.message.type +var ( + // Normal message + // Stability: development + MessagingRocketMQMessageTypeNormal = MessagingRocketMQMessageTypeKey.String("normal") + // FIFO message + // Stability: development + MessagingRocketMQMessageTypeFifo = MessagingRocketMQMessageTypeKey.String("fifo") + // Delay message + // Stability: development + MessagingRocketMQMessageTypeDelay = MessagingRocketMQMessageTypeKey.String("delay") + // Transaction message + // Stability: development + MessagingRocketMQMessageTypeTransaction = MessagingRocketMQMessageTypeKey.String("transaction") +) + +// Enum values for messaging.servicebus.disposition_status +var ( + // Message is completed + // Stability: development + MessagingServiceBusDispositionStatusComplete = MessagingServiceBusDispositionStatusKey.String("complete") + // Message is abandoned + // Stability: development + MessagingServiceBusDispositionStatusAbandon = MessagingServiceBusDispositionStatusKey.String("abandon") + // Message is sent to dead letter queue + // Stability: development + MessagingServiceBusDispositionStatusDeadLetter = MessagingServiceBusDispositionStatusKey.String("dead_letter") + // Message is deferred + // Stability: development + MessagingServiceBusDispositionStatusDefer = MessagingServiceBusDispositionStatusKey.String("defer") +) + +// Enum values for messaging.system +var ( + // Apache ActiveMQ + // Stability: development + MessagingSystemActiveMQ = MessagingSystemKey.String("activemq") + // Amazon Simple Notification Service (SNS) + // Stability: development + MessagingSystemAWSSNS = MessagingSystemKey.String("aws.sns") + // Amazon Simple Queue Service (SQS) + // Stability: development + MessagingSystemAWSSQS = MessagingSystemKey.String("aws_sqs") + // Azure Event Grid + // Stability: development + MessagingSystemEventGrid = MessagingSystemKey.String("eventgrid") + // Azure Event Hubs + // Stability: development + MessagingSystemEventHubs = MessagingSystemKey.String("eventhubs") + // Azure Service Bus + // Stability: development + MessagingSystemServiceBus = MessagingSystemKey.String("servicebus") + // Google Cloud Pub/Sub + // Stability: development + MessagingSystemGCPPubSub = MessagingSystemKey.String("gcp_pubsub") + // Java Message Service + // Stability: development + MessagingSystemJMS = MessagingSystemKey.String("jms") + // Apache Kafka + // Stability: development + MessagingSystemKafka = MessagingSystemKey.String("kafka") + // RabbitMQ + // Stability: development + MessagingSystemRabbitMQ = MessagingSystemKey.String("rabbitmq") + // Apache RocketMQ + // Stability: development + MessagingSystemRocketMQ = MessagingSystemKey.String("rocketmq") + // Apache Pulsar + // Stability: development + MessagingSystemPulsar = MessagingSystemKey.String("pulsar") +) + +// Namespace: network +const ( + // NetworkCarrierICCKey is the attribute Key conforming to the + // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 + // alpha-2 2-character country code associated with the mobile carrier network. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: DE + NetworkCarrierICCKey = attribute.Key("network.carrier.icc") + + // NetworkCarrierMCCKey is the attribute Key conforming to the + // "network.carrier.mcc" semantic conventions. It represents the mobile carrier + // country code. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 310 + NetworkCarrierMCCKey = attribute.Key("network.carrier.mcc") + + // NetworkCarrierMNCKey is the attribute Key conforming to the + // "network.carrier.mnc" semantic conventions. It represents the mobile carrier + // network code. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 001 + NetworkCarrierMNCKey = attribute.Key("network.carrier.mnc") + + // NetworkCarrierNameKey is the attribute Key conforming to the + // "network.carrier.name" semantic conventions. It represents the name of the + // mobile carrier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: sprint + NetworkCarrierNameKey = attribute.Key("network.carrier.name") + + // NetworkConnectionStateKey is the attribute Key conforming to the + // "network.connection.state" semantic conventions. It represents the state of + // network connection. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "close_wait" + // Note: Connection states are defined as part of the [rfc9293] + // + // [rfc9293]: https://datatracker.ietf.org/doc/html/rfc9293#section-3.3.2 + NetworkConnectionStateKey = attribute.Key("network.connection.state") + + // NetworkConnectionSubtypeKey is the attribute Key conforming to the + // "network.connection.subtype" semantic conventions. It represents the this + // describes more details regarding the connection.type. It may be the type of + // cell technology connection, but it could be used for describing details about + // a wifi connection. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: LTE + NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") + + // NetworkConnectionTypeKey is the attribute Key conforming to the + // "network.connection.type" semantic conventions. It represents the internet + // connection type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: wifi + NetworkConnectionTypeKey = attribute.Key("network.connection.type") + + // NetworkInterfaceNameKey is the attribute Key conforming to the + // "network.interface.name" semantic conventions. It represents the network + // interface name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "lo", "eth0" + NetworkInterfaceNameKey = attribute.Key("network.interface.name") + + // NetworkIODirectionKey is the attribute Key conforming to the + // "network.io.direction" semantic conventions. It represents the network IO + // operation direction. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "transmit" + NetworkIODirectionKey = attribute.Key("network.io.direction") + + // NetworkLocalAddressKey is the attribute Key conforming to the + // "network.local.address" semantic conventions. It represents the local address + // of the network connection - IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "10.1.2.80", "/tmp/my.sock" + NetworkLocalAddressKey = attribute.Key("network.local.address") + + // NetworkLocalPortKey is the attribute Key conforming to the + // "network.local.port" semantic conventions. It represents the local port + // number of the network connection. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 65123 + NetworkLocalPortKey = attribute.Key("network.local.port") + + // NetworkPeerAddressKey is the attribute Key conforming to the + // "network.peer.address" semantic conventions. It represents the peer address + // of the network connection - IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "10.1.2.80", "/tmp/my.sock" + NetworkPeerAddressKey = attribute.Key("network.peer.address") + + // NetworkPeerPortKey is the attribute Key conforming to the "network.peer.port" + // semantic conventions. It represents the peer port number of the network + // connection. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 65123 + NetworkPeerPortKey = attribute.Key("network.peer.port") + + // NetworkProtocolNameKey is the attribute Key conforming to the + // "network.protocol.name" semantic conventions. It represents the + // [OSI application layer] or non-OSI equivalent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "amqp", "http", "mqtt" + // Note: The value SHOULD be normalized to lowercase. + // + // [OSI application layer]: https://wikipedia.org/wiki/Application_layer + NetworkProtocolNameKey = attribute.Key("network.protocol.name") + + // NetworkProtocolVersionKey is the attribute Key conforming to the + // "network.protocol.version" semantic conventions. It represents the actual + // version of the protocol used for network communication. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "1.1", "2" + // Note: If protocol version is subject to negotiation (for example using [ALPN] + // ), this attribute SHOULD be set to the negotiated version. If the actual + // protocol version is not known, this attribute SHOULD NOT be set. + // + // [ALPN]: https://www.rfc-editor.org/rfc/rfc7301.html + NetworkProtocolVersionKey = attribute.Key("network.protocol.version") + + // NetworkTransportKey is the attribute Key conforming to the + // "network.transport" semantic conventions. It represents the + // [OSI transport layer] or [inter-process communication method]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "tcp", "udp" + // Note: The value SHOULD be normalized to lowercase. + // + // Consider always setting the transport when setting a port number, since + // a port number is ambiguous without knowing the transport. For example + // different processes could be listening on TCP port 12345 and UDP port 12345. + // + // [OSI transport layer]: https://wikipedia.org/wiki/Transport_layer + // [inter-process communication method]: https://wikipedia.org/wiki/Inter-process_communication + NetworkTransportKey = attribute.Key("network.transport") + + // NetworkTypeKey is the attribute Key conforming to the "network.type" semantic + // conventions. It represents the [OSI network layer] or non-OSI equivalent. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "ipv4", "ipv6" + // Note: The value SHOULD be normalized to lowercase. + // + // [OSI network layer]: https://wikipedia.org/wiki/Network_layer + NetworkTypeKey = attribute.Key("network.type") +) + +// NetworkCarrierICC returns an attribute KeyValue conforming to the +// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetworkCarrierICC(val string) attribute.KeyValue { + return NetworkCarrierICCKey.String(val) +} + +// NetworkCarrierMCC returns an attribute KeyValue conforming to the +// "network.carrier.mcc" semantic conventions. It represents the mobile carrier +// country code. +func NetworkCarrierMCC(val string) attribute.KeyValue { + return NetworkCarrierMCCKey.String(val) +} + +// NetworkCarrierMNC returns an attribute KeyValue conforming to the +// "network.carrier.mnc" semantic conventions. It represents the mobile carrier +// network code. +func NetworkCarrierMNC(val string) attribute.KeyValue { + return NetworkCarrierMNCKey.String(val) +} + +// NetworkCarrierName returns an attribute KeyValue conforming to the +// "network.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetworkCarrierName(val string) attribute.KeyValue { + return NetworkCarrierNameKey.String(val) +} + +// NetworkInterfaceName returns an attribute KeyValue conforming to the +// "network.interface.name" semantic conventions. It represents the network +// interface name. +func NetworkInterfaceName(val string) attribute.KeyValue { + return NetworkInterfaceNameKey.String(val) +} + +// NetworkLocalAddress returns an attribute KeyValue conforming to the +// "network.local.address" semantic conventions. It represents the local address +// of the network connection - IP address or Unix domain socket name. +func NetworkLocalAddress(val string) attribute.KeyValue { + return NetworkLocalAddressKey.String(val) +} + +// NetworkLocalPort returns an attribute KeyValue conforming to the +// "network.local.port" semantic conventions. It represents the local port number +// of the network connection. +func NetworkLocalPort(val int) attribute.KeyValue { + return NetworkLocalPortKey.Int(val) +} + +// NetworkPeerAddress returns an attribute KeyValue conforming to the +// "network.peer.address" semantic conventions. It represents the peer address of +// the network connection - IP address or Unix domain socket name. +func NetworkPeerAddress(val string) attribute.KeyValue { + return NetworkPeerAddressKey.String(val) +} + +// NetworkPeerPort returns an attribute KeyValue conforming to the +// "network.peer.port" semantic conventions. It represents the peer port number +// of the network connection. +func NetworkPeerPort(val int) attribute.KeyValue { + return NetworkPeerPortKey.Int(val) +} + +// NetworkProtocolName returns an attribute KeyValue conforming to the +// "network.protocol.name" semantic conventions. It represents the +// [OSI application layer] or non-OSI equivalent. +// +// [OSI application layer]: https://wikipedia.org/wiki/Application_layer +func NetworkProtocolName(val string) attribute.KeyValue { + return NetworkProtocolNameKey.String(val) +} + +// NetworkProtocolVersion returns an attribute KeyValue conforming to the +// "network.protocol.version" semantic conventions. It represents the actual +// version of the protocol used for network communication. +func NetworkProtocolVersion(val string) attribute.KeyValue { + return NetworkProtocolVersionKey.String(val) +} + +// Enum values for network.connection.state +var ( + // closed + // Stability: development + NetworkConnectionStateClosed = NetworkConnectionStateKey.String("closed") + // close_wait + // Stability: development + NetworkConnectionStateCloseWait = NetworkConnectionStateKey.String("close_wait") + // closing + // Stability: development + NetworkConnectionStateClosing = NetworkConnectionStateKey.String("closing") + // established + // Stability: development + NetworkConnectionStateEstablished = NetworkConnectionStateKey.String("established") + // fin_wait_1 + // Stability: development + NetworkConnectionStateFinWait1 = NetworkConnectionStateKey.String("fin_wait_1") + // fin_wait_2 + // Stability: development + NetworkConnectionStateFinWait2 = NetworkConnectionStateKey.String("fin_wait_2") + // last_ack + // Stability: development + NetworkConnectionStateLastAck = NetworkConnectionStateKey.String("last_ack") + // listen + // Stability: development + NetworkConnectionStateListen = NetworkConnectionStateKey.String("listen") + // syn_received + // Stability: development + NetworkConnectionStateSynReceived = NetworkConnectionStateKey.String("syn_received") + // syn_sent + // Stability: development + NetworkConnectionStateSynSent = NetworkConnectionStateKey.String("syn_sent") + // time_wait + // Stability: development + NetworkConnectionStateTimeWait = NetworkConnectionStateKey.String("time_wait") +) + +// Enum values for network.connection.subtype +var ( + // GPRS + // Stability: development + NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") + // EDGE + // Stability: development + NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") + // UMTS + // Stability: development + NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") + // CDMA + // Stability: development + NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + // Stability: development + NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + // Stability: development + NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + // Stability: development + NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + // Stability: development + NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") + // HSUPA + // Stability: development + NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") + // HSPA + // Stability: development + NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") + // IDEN + // Stability: development + NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") + // EVDO Rev. B + // Stability: development + NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") + // LTE + // Stability: development + NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") + // EHRPD + // Stability: development + NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") + // HSPAP + // Stability: development + NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") + // GSM + // Stability: development + NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") + // TD-SCDMA + // Stability: development + NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") + // IWLAN + // Stability: development + NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + // Stability: development + NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + // Stability: development + NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") + // LTE CA + // Stability: development + NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") +) + +// Enum values for network.connection.type +var ( + // wifi + // Stability: development + NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") + // wired + // Stability: development + NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") + // cell + // Stability: development + NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") + // unavailable + // Stability: development + NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") + // unknown + // Stability: development + NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") +) + +// Enum values for network.io.direction +var ( + // transmit + // Stability: development + NetworkIODirectionTransmit = NetworkIODirectionKey.String("transmit") + // receive + // Stability: development + NetworkIODirectionReceive = NetworkIODirectionKey.String("receive") +) + +// Enum values for network.transport +var ( + // TCP + // Stability: stable + NetworkTransportTCP = NetworkTransportKey.String("tcp") + // UDP + // Stability: stable + NetworkTransportUDP = NetworkTransportKey.String("udp") + // Named or anonymous pipe. + // Stability: stable + NetworkTransportPipe = NetworkTransportKey.String("pipe") + // Unix domain socket + // Stability: stable + NetworkTransportUnix = NetworkTransportKey.String("unix") + // QUIC + // Stability: stable + NetworkTransportQUIC = NetworkTransportKey.String("quic") +) + +// Enum values for network.type +var ( + // IPv4 + // Stability: stable + NetworkTypeIPv4 = NetworkTypeKey.String("ipv4") + // IPv6 + // Stability: stable + NetworkTypeIPv6 = NetworkTypeKey.String("ipv6") +) + +// Namespace: oci +const ( + // OCIManifestDigestKey is the attribute Key conforming to the + // "oci.manifest.digest" semantic conventions. It represents the digest of the + // OCI image manifest. For container images specifically is the digest by which + // the container image is known. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4" + // Note: Follows [OCI Image Manifest Specification], and specifically the + // [Digest property]. + // An example can be found in [Example Image Manifest]. + // + // [OCI Image Manifest Specification]: https://github.com/opencontainers/image-spec/blob/main/manifest.md + // [Digest property]: https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests + // [Example Image Manifest]: https://github.com/opencontainers/image-spec/blob/main/manifest.md#example-image-manifest + OCIManifestDigestKey = attribute.Key("oci.manifest.digest") +) + +// OCIManifestDigest returns an attribute KeyValue conforming to the +// "oci.manifest.digest" semantic conventions. It represents the digest of the +// OCI image manifest. For container images specifically is the digest by which +// the container image is known. +func OCIManifestDigest(val string) attribute.KeyValue { + return OCIManifestDigestKey.String(val) +} + +// Namespace: openai +const ( + // OpenAIRequestServiceTierKey is the attribute Key conforming to the + // "openai.request.service_tier" semantic conventions. It represents the service + // tier requested. May be a specific tier, default, or auto. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "auto", "default" + OpenAIRequestServiceTierKey = attribute.Key("openai.request.service_tier") + + // OpenAIResponseServiceTierKey is the attribute Key conforming to the + // "openai.response.service_tier" semantic conventions. It represents the + // service tier used for the response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "scale", "default" + OpenAIResponseServiceTierKey = attribute.Key("openai.response.service_tier") + + // OpenAIResponseSystemFingerprintKey is the attribute Key conforming to the + // "openai.response.system_fingerprint" semantic conventions. It represents a + // fingerprint to track any eventual change in the Generative AI environment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "fp_44709d6fcb" + OpenAIResponseSystemFingerprintKey = attribute.Key("openai.response.system_fingerprint") +) + +// OpenAIResponseServiceTier returns an attribute KeyValue conforming to the +// "openai.response.service_tier" semantic conventions. It represents the service +// tier used for the response. +func OpenAIResponseServiceTier(val string) attribute.KeyValue { + return OpenAIResponseServiceTierKey.String(val) +} + +// OpenAIResponseSystemFingerprint returns an attribute KeyValue conforming to +// the "openai.response.system_fingerprint" semantic conventions. It represents a +// fingerprint to track any eventual change in the Generative AI environment. +func OpenAIResponseSystemFingerprint(val string) attribute.KeyValue { + return OpenAIResponseSystemFingerprintKey.String(val) +} + +// Enum values for openai.request.service_tier +var ( + // The system will utilize scale tier credits until they are exhausted. + // Stability: development + OpenAIRequestServiceTierAuto = OpenAIRequestServiceTierKey.String("auto") + // The system will utilize the default scale tier. + // Stability: development + OpenAIRequestServiceTierDefault = OpenAIRequestServiceTierKey.String("default") +) + +// Namespace: opentracing +const ( + // OpenTracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the parent-child + // Reference type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The causal relationship between a child Span and a parent Span. + OpenTracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +// Enum values for opentracing.ref_type +var ( + // The parent Span depends on the child Span in some capacity + // Stability: development + OpenTracingRefTypeChildOf = OpenTracingRefTypeKey.String("child_of") + // The parent Span doesn't depend in any way on the result of the child Span + // Stability: development + OpenTracingRefTypeFollowsFrom = OpenTracingRefTypeKey.String("follows_from") +) + +// Namespace: os +const ( + // OSBuildIDKey is the attribute Key conforming to the "os.build_id" semantic + // conventions. It represents the unique identifier for a particular build or + // compilation of the operating system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "TQ3C.230805.001.B2", "20E247", "22621" + OSBuildIDKey = attribute.Key("os.build_id") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to be + // parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Microsoft Windows [Version 10.0.18363.778]", "Ubuntu 18.04.1 LTS" + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "iOS", "Android", "Ubuntu" + OSNameKey = attribute.Key("os.name") + + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + OSTypeKey = attribute.Key("os.type") + + // OSVersionKey is the attribute Key conforming to the "os.version" semantic + // conventions. It represents the version string of the operating system as + // defined in [Version Attributes]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "14.2.1", "18.04.1" + // + // [Version Attributes]: /docs/resource/README.md#version-attributes + OSVersionKey = attribute.Key("os.version") +) + +// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" +// semantic conventions. It represents the unique identifier for a particular +// build or compilation of the operating system. +func OSBuildID(val string) attribute.KeyValue { + return OSBuildIDKey.String(val) +} + +// OSDescription returns an attribute KeyValue conforming to the "os.description" +// semantic conventions. It represents the human readable (not intended to be +// parsed) OS version information, like e.g. reported by `ver` or +// `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating system +// as defined in [Version Attributes]. +// +// [Version Attributes]: /docs/resource/README.md#version-attributes +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// Enum values for os.type +var ( + // Microsoft Windows + // Stability: development + OSTypeWindows = OSTypeKey.String("windows") + // Linux + // Stability: development + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + // Stability: development + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + // Stability: development + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + // Stability: development + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + // Stability: development + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + // Stability: development + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + // Stability: development + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + // Stability: development + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + // Stability: development + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + // Stability: development + OSTypeZOS = OSTypeKey.String("zos") +) + +// Namespace: otel +const ( + // OTelComponentNameKey is the attribute Key conforming to the + // "otel.component.name" semantic conventions. It represents a name uniquely + // identifying the instance of the OpenTelemetry component within its containing + // SDK instance. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otlp_grpc_span_exporter/0", "custom-name" + // Note: Implementations SHOULD ensure a low cardinality for this attribute, + // even across application or SDK restarts. + // E.g. implementations MUST NOT use UUIDs as values for this attribute. + // + // Implementations MAY achieve these goals by following a + // `/` pattern, e.g. + // `batching_span_processor/0`. + // Hereby `otel.component.type` refers to the corresponding attribute value of + // the component. + // + // The value of `instance-counter` MAY be automatically assigned by the + // component and uniqueness within the enclosing SDK instance MUST be + // guaranteed. + // For example, `` MAY be implemented by using a monotonically + // increasing counter (starting with `0`), which is incremented every time an + // instance of the given component type is started. + // + // With this implementation, for example the first Batching Span Processor would + // have `batching_span_processor/0` + // as `otel.component.name`, the second one `batching_span_processor/1` and so + // on. + // These values will therefore be reused in the case of an application restart. + OTelComponentNameKey = attribute.Key("otel.component.name") + + // OTelComponentTypeKey is the attribute Key conforming to the + // "otel.component.type" semantic conventions. It represents a name identifying + // the type of the OpenTelemetry component. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "batching_span_processor", "com.example.MySpanExporter" + // Note: If none of the standardized values apply, implementations SHOULD use + // the language-defined name of the type. + // E.g. for Java the fully qualified classname SHOULD be used in this case. + OTelComponentTypeKey = attribute.Key("otel.component.type") + + // OTelScopeNameKey is the attribute Key conforming to the "otel.scope.name" + // semantic conventions. It represents the name of the instrumentation scope - ( + // `InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "io.opentelemetry.contrib.mongodb" + OTelScopeNameKey = attribute.Key("otel.scope.name") + + // OTelScopeSchemaURLKey is the attribute Key conforming to the + // "otel.scope.schema_url" semantic conventions. It represents the schema URL of + // the instrumentation scope. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://opentelemetry.io/schemas/1.31.0" + OTelScopeSchemaURLKey = attribute.Key("otel.scope.schema_url") + + // OTelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of the + // instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "1.0.0" + OTelScopeVersionKey = attribute.Key("otel.scope.version") + + // OTelSpanParentOriginKey is the attribute Key conforming to the + // "otel.span.parent.origin" semantic conventions. It represents the determines + // whether the span has a parent span, and if so, + // [whether it is a remote parent]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [whether it is a remote parent]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote + OTelSpanParentOriginKey = attribute.Key("otel.span.parent.origin") + + // OTelSpanSamplingResultKey is the attribute Key conforming to the + // "otel.span.sampling_result" semantic conventions. It represents the result + // value of the sampler for this span. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + OTelSpanSamplingResultKey = attribute.Key("otel.span.sampling_result") + + // OTelStatusCodeKey is the attribute Key conforming to the "otel.status_code" + // semantic conventions. It represents the name of the code, either "OK" or + // "ERROR". MUST NOT be set if the status code is UNSET. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: + OTelStatusCodeKey = attribute.Key("otel.status_code") + + // OTelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the description + // of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "resource not found" + OTelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +// OTelComponentName returns an attribute KeyValue conforming to the +// "otel.component.name" semantic conventions. It represents a name uniquely +// identifying the instance of the OpenTelemetry component within its containing +// SDK instance. +func OTelComponentName(val string) attribute.KeyValue { + return OTelComponentNameKey.String(val) +} + +// OTelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OTelScopeName(val string) attribute.KeyValue { + return OTelScopeNameKey.String(val) +} + +// OTelScopeSchemaURL returns an attribute KeyValue conforming to the +// "otel.scope.schema_url" semantic conventions. It represents the schema URL of +// the instrumentation scope. +func OTelScopeSchemaURL(val string) attribute.KeyValue { + return OTelScopeSchemaURLKey.String(val) +} + +// OTelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OTelScopeVersion(val string) attribute.KeyValue { + return OTelScopeVersionKey.String(val) +} + +// OTelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the description +// of the Status if it has a value, otherwise not set. +func OTelStatusDescription(val string) attribute.KeyValue { + return OTelStatusDescriptionKey.String(val) +} + +// Enum values for otel.component.type +var ( + // The builtin SDK batching span processor + // + // Stability: development + OTelComponentTypeBatchingSpanProcessor = OTelComponentTypeKey.String("batching_span_processor") + // The builtin SDK simple span processor + // + // Stability: development + OTelComponentTypeSimpleSpanProcessor = OTelComponentTypeKey.String("simple_span_processor") + // The builtin SDK batching log record processor + // + // Stability: development + OTelComponentTypeBatchingLogProcessor = OTelComponentTypeKey.String("batching_log_processor") + // The builtin SDK simple log record processor + // + // Stability: development + OTelComponentTypeSimpleLogProcessor = OTelComponentTypeKey.String("simple_log_processor") + // OTLP span exporter over gRPC with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpGRPCSpanExporter = OTelComponentTypeKey.String("otlp_grpc_span_exporter") + // OTLP span exporter over HTTP with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPSpanExporter = OTelComponentTypeKey.String("otlp_http_span_exporter") + // OTLP span exporter over HTTP with JSON serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPJSONSpanExporter = OTelComponentTypeKey.String("otlp_http_json_span_exporter") + // Zipkin span exporter over HTTP + // + // Stability: development + OTelComponentTypeZipkinHTTPSpanExporter = OTelComponentTypeKey.String("zipkin_http_span_exporter") + // OTLP log record exporter over gRPC with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpGRPCLogExporter = OTelComponentTypeKey.String("otlp_grpc_log_exporter") + // OTLP log record exporter over HTTP with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPLogExporter = OTelComponentTypeKey.String("otlp_http_log_exporter") + // OTLP log record exporter over HTTP with JSON serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPJSONLogExporter = OTelComponentTypeKey.String("otlp_http_json_log_exporter") + // The builtin SDK periodically exporting metric reader + // + // Stability: development + OTelComponentTypePeriodicMetricReader = OTelComponentTypeKey.String("periodic_metric_reader") + // OTLP metric exporter over gRPC with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpGRPCMetricExporter = OTelComponentTypeKey.String("otlp_grpc_metric_exporter") + // OTLP metric exporter over HTTP with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPMetricExporter = OTelComponentTypeKey.String("otlp_http_metric_exporter") + // OTLP metric exporter over HTTP with JSON serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPJSONMetricExporter = OTelComponentTypeKey.String("otlp_http_json_metric_exporter") + // Prometheus metric exporter over HTTP with the default text-based format + // + // Stability: development + OTelComponentTypePrometheusHTTPTextMetricExporter = OTelComponentTypeKey.String("prometheus_http_text_metric_exporter") +) + +// Enum values for otel.span.parent.origin +var ( + // The span does not have a parent, it is a root span + // Stability: development + OTelSpanParentOriginNone = OTelSpanParentOriginKey.String("none") + // The span has a parent and the parent's span context [isRemote()] is false + // Stability: development + // + // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote + OTelSpanParentOriginLocal = OTelSpanParentOriginKey.String("local") + // The span has a parent and the parent's span context [isRemote()] is true + // Stability: development + // + // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote + OTelSpanParentOriginRemote = OTelSpanParentOriginKey.String("remote") +) + +// Enum values for otel.span.sampling_result +var ( + // The span is not sampled and not recording + // Stability: development + OTelSpanSamplingResultDrop = OTelSpanSamplingResultKey.String("DROP") + // The span is not sampled, but recording + // Stability: development + OTelSpanSamplingResultRecordOnly = OTelSpanSamplingResultKey.String("RECORD_ONLY") + // The span is sampled and recording + // Stability: development + OTelSpanSamplingResultRecordAndSample = OTelSpanSamplingResultKey.String("RECORD_AND_SAMPLE") +) + +// Enum values for otel.status_code +var ( + // The operation has been validated by an Application developer or Operator to + // have completed successfully. + // Stability: stable + OTelStatusCodeOk = OTelStatusCodeKey.String("OK") + // The operation contains an error. + // Stability: stable + OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") +) + +// Namespace: peer +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" semantic + // conventions. It represents the [`service.name`] of the remote service. SHOULD + // be equal to the actual `service.name` resource attribute of the remote + // service if any. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: AuthTokenCache + // + // [`service.name`]: /docs/resource/README.md#service + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the "peer.service" +// semantic conventions. It represents the [`service.name`] of the remote +// service. SHOULD be equal to the actual `service.name` resource attribute of +// the remote service if any. +// +// [`service.name`]: /docs/resource/README.md#service +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// Namespace: process +const ( + // ProcessArgsCountKey is the attribute Key conforming to the + // "process.args_count" semantic conventions. It represents the length of the + // process.command_args array. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 4 + // Note: This field can be useful for querying or performing bucket analysis on + // how many arguments were provided to start a process. More arguments may be an + // indication of suspicious activity. + ProcessArgsCountKey = attribute.Key("process.args_count") + + // ProcessCommandKey is the attribute Key conforming to the "process.command" + // semantic conventions. It represents the command used to launch the process + // (i.e. the command name). On Linux based systems, can be set to the zeroth + // string in `proc/[pid]/cmdline`. On Windows, can be set to the first parameter + // extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cmd/otelcol" + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received by + // the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, this + // would be the full argv vector passed to `main`. SHOULD NOT be collected by + // default unless there is sanitization that excludes sensitive data. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cmd/otecol", "--config=config.yaml" + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full command + // used to launch the process as a single string representing the full command. + // On Windows, can be set to the result of `GetCommandLineW`. Do not set this if + // you have to assemble it just for monitoring; use `process.command_args` + // instead. SHOULD NOT be collected by default unless there is sanitization that + // excludes sensitive data. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "C:\cmd\otecol --config="my directory\config.yaml"" + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessContextSwitchTypeKey is the attribute Key conforming to the + // "process.context_switch_type" semantic conventions. It represents the + // specifies whether the context switches for this data point were voluntary or + // involuntary. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type") + + // ProcessCreationTimeKey is the attribute Key conforming to the + // "process.creation.time" semantic conventions. It represents the date and time + // the process was created, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2023-11-21T09:25:34.853Z" + ProcessCreationTimeKey = attribute.Key("process.creation.time") + + // ProcessExecutableBuildIDGNUKey is the attribute Key conforming to the + // "process.executable.build_id.gnu" semantic conventions. It represents the GNU + // build ID as found in the `.note.gnu.build-id` ELF section (hex string). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "c89b11207f6479603b0d49bf291c092c2b719293" + ProcessExecutableBuildIDGNUKey = attribute.Key("process.executable.build_id.gnu") + + // ProcessExecutableBuildIDGoKey is the attribute Key conforming to the + // "process.executable.build_id.go" semantic conventions. It represents the Go + // build ID as retrieved by `go tool buildid `. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "foh3mEXu7BLZjsN9pOwG/kATcXlYVCDEFouRMQed_/WwRFB1hPo9LBkekthSPG/x8hMC8emW2cCjXD0_1aY" + ProcessExecutableBuildIDGoKey = attribute.Key("process.executable.build_id.go") + + // ProcessExecutableBuildIDHtlhashKey is the attribute Key conforming to the + // "process.executable.build_id.htlhash" semantic conventions. It represents the + // profiling specific build ID for executables. See the OTel specification for + // Profiles for more information. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "600DCAFE4A110000F2BF38C493F5FB92" + ProcessExecutableBuildIDHtlhashKey = attribute.Key("process.executable.build_id.htlhash") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name of the + // process executable. On Linux based systems, this SHOULD be set to the base + // name of the target of `/proc/[pid]/exe`. On Windows, this SHOULD be set to + // the base name of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otelcol" + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full path + // to the process executable. On Linux based systems, can be set to the target + // of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/usr/bin/cmd/otelcol" + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessExitCodeKey is the attribute Key conforming to the "process.exit.code" + // semantic conventions. It represents the exit code of the process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 127 + ProcessExitCodeKey = attribute.Key("process.exit.code") + + // ProcessExitTimeKey is the attribute Key conforming to the "process.exit.time" + // semantic conventions. It represents the date and time the process exited, in + // ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2023-11-21T09:26:12.315Z" + ProcessExitTimeKey = attribute.Key("process.exit.time") + + // ProcessGroupLeaderPIDKey is the attribute Key conforming to the + // "process.group_leader.pid" semantic conventions. It represents the PID of the + // process's group leader. This is also the process group ID (PGID) of the + // process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 23 + ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid") + + // ProcessInteractiveKey is the attribute Key conforming to the + // "process.interactive" semantic conventions. It represents the whether the + // process is connected to an interactive shell. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + ProcessInteractiveKey = attribute.Key("process.interactive") + + // ProcessLinuxCgroupKey is the attribute Key conforming to the + // "process.linux.cgroup" semantic conventions. It represents the control group + // associated with the process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1:name=systemd:/user.slice/user-1000.slice/session-3.scope", + // "0::/user.slice/user-1000.slice/user@1000.service/tmux-spawn-0267755b-4639-4a27-90ed-f19f88e53748.scope" + // Note: Control groups (cgroups) are a kernel feature used to organize and + // manage process resources. This attribute provides the path(s) to the + // cgroup(s) associated with the process, which should match the contents of the + // [/proc/[PID]/cgroup] file. + // + // [/proc/[PID]/cgroup]: https://man7.org/linux/man-pages/man7/cgroups.7.html + ProcessLinuxCgroupKey = attribute.Key("process.linux.cgroup") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns the + // process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "root" + ProcessOwnerKey = attribute.Key("process.owner") + + // ProcessPagingFaultTypeKey is the attribute Key conforming to the + // "process.paging.fault_type" semantic conventions. It represents the type of + // page fault for this data point. Type `major` is for major/hard page faults, + // and `minor` is for minor/soft page faults. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent Process + // identifier (PPID). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessPIDKey is the attribute Key conforming to the "process.pid" semantic + // conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessRealUserIDKey is the attribute Key conforming to the + // "process.real_user.id" semantic conventions. It represents the real user ID + // (RUID) of the process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1000 + ProcessRealUserIDKey = attribute.Key("process.real_user.id") + + // ProcessRealUserNameKey is the attribute Key conforming to the + // "process.real_user.name" semantic conventions. It represents the username of + // the real user of the process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "operator" + ProcessRealUserNameKey = attribute.Key("process.real_user.name") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0 + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") + + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of the + // runtime of this process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "OpenJDK Runtime Environment" + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the version of + // the runtime of this process, as returned by the runtime without modification. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 14.0.2 + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + + // ProcessSavedUserIDKey is the attribute Key conforming to the + // "process.saved_user.id" semantic conventions. It represents the saved user ID + // (SUID) of the process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1002 + ProcessSavedUserIDKey = attribute.Key("process.saved_user.id") + + // ProcessSavedUserNameKey is the attribute Key conforming to the + // "process.saved_user.name" semantic conventions. It represents the username of + // the saved user. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "operator" + ProcessSavedUserNameKey = attribute.Key("process.saved_user.name") + + // ProcessSessionLeaderPIDKey is the attribute Key conforming to the + // "process.session_leader.pid" semantic conventions. It represents the PID of + // the process's session leader. This is also the session ID (SID) of the + // process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 14 + ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid") + + // ProcessTitleKey is the attribute Key conforming to the "process.title" + // semantic conventions. It represents the process title (proctitle). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cat /etc/hostname", "xfce4-session", "bash" + // Note: In many Unix-like systems, process title (proctitle), is the string + // that represents the name or command line of a running process, displayed by + // system monitoring tools like ps, top, and htop. + ProcessTitleKey = attribute.Key("process.title") + + // ProcessUserIDKey is the attribute Key conforming to the "process.user.id" + // semantic conventions. It represents the effective user ID (EUID) of the + // process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1001 + ProcessUserIDKey = attribute.Key("process.user.id") + + // ProcessUserNameKey is the attribute Key conforming to the "process.user.name" + // semantic conventions. It represents the username of the effective user of the + // process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "root" + ProcessUserNameKey = attribute.Key("process.user.name") + + // ProcessVpidKey is the attribute Key conforming to the "process.vpid" semantic + // conventions. It represents the virtual process identifier. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 12 + // Note: The process ID within a PID namespace. This is not necessarily unique + // across all processes on the host but it is unique within the process + // namespace that the process exists within. + ProcessVpidKey = attribute.Key("process.vpid") + + // ProcessWorkingDirectoryKey is the attribute Key conforming to the + // "process.working_directory" semantic conventions. It represents the working + // directory of the process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/root" + ProcessWorkingDirectoryKey = attribute.Key("process.working_directory") +) + +// ProcessArgsCount returns an attribute KeyValue conforming to the +// "process.args_count" semantic conventions. It represents the length of the +// process.command_args array. +func ProcessArgsCount(val int) attribute.KeyValue { + return ProcessArgsCountKey.Int(val) +} + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be set +// to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to the +// first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the command +// arguments (including the command/executable itself) as received by the +// process. On Linux-based systems (and some other Unixoid systems supporting +// procfs), can be set according to the list of null-delimited strings extracted +// from `proc/[pid]/cmdline`. For libc-based executables, this would be the full +// argv vector passed to `main`. SHOULD NOT be collected by default unless there +// is sanitization that excludes sensitive data. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this if +// you have to assemble it just for monitoring; use `process.command_args` +// instead. SHOULD NOT be collected by default unless there is sanitization that +// excludes sensitive data. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessCreationTime returns an attribute KeyValue conforming to the +// "process.creation.time" semantic conventions. It represents the date and time +// the process was created, in ISO 8601 format. +func ProcessCreationTime(val string) attribute.KeyValue { + return ProcessCreationTimeKey.String(val) +} + +// ProcessEnvironmentVariable returns an attribute KeyValue conforming to the +// "process.environment_variable" semantic conventions. It represents the process +// environment variables, `` being the environment variable name, the value +// being the environment variable value. +func ProcessEnvironmentVariable(key string, val string) attribute.KeyValue { + return attribute.String("process.environment_variable."+key, val) +} + +// ProcessExecutableBuildIDGNU returns an attribute KeyValue conforming to the +// "process.executable.build_id.gnu" semantic conventions. It represents the GNU +// build ID as found in the `.note.gnu.build-id` ELF section (hex string). +func ProcessExecutableBuildIDGNU(val string) attribute.KeyValue { + return ProcessExecutableBuildIDGNUKey.String(val) +} + +// ProcessExecutableBuildIDGo returns an attribute KeyValue conforming to the +// "process.executable.build_id.go" semantic conventions. It represents the Go +// build ID as retrieved by `go tool buildid `. +func ProcessExecutableBuildIDGo(val string) attribute.KeyValue { + return ProcessExecutableBuildIDGoKey.String(val) +} + +// ProcessExecutableBuildIDHtlhash returns an attribute KeyValue conforming to +// the "process.executable.build_id.htlhash" semantic conventions. It represents +// the profiling specific build ID for executables. See the OTel specification +// for Profiles for more information. +func ProcessExecutableBuildIDHtlhash(val string) attribute.KeyValue { + return ProcessExecutableBuildIDHtlhashKey.String(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of the +// process executable. On Linux based systems, this SHOULD be set to the base +// name of the target of `/proc/[pid]/exe`. On Windows, this SHOULD be set to the +// base name of `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path to +// the process executable. On Linux based systems, can be set to the target of +// `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessExitCode returns an attribute KeyValue conforming to the +// "process.exit.code" semantic conventions. It represents the exit code of the +// process. +func ProcessExitCode(val int) attribute.KeyValue { + return ProcessExitCodeKey.Int(val) +} + +// ProcessExitTime returns an attribute KeyValue conforming to the +// "process.exit.time" semantic conventions. It represents the date and time the +// process exited, in ISO 8601 format. +func ProcessExitTime(val string) attribute.KeyValue { + return ProcessExitTimeKey.String(val) +} + +// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the +// "process.group_leader.pid" semantic conventions. It represents the PID of the +// process's group leader. This is also the process group ID (PGID) of the +// process. +func ProcessGroupLeaderPID(val int) attribute.KeyValue { + return ProcessGroupLeaderPIDKey.Int(val) +} + +// ProcessInteractive returns an attribute KeyValue conforming to the +// "process.interactive" semantic conventions. It represents the whether the +// process is connected to an interactive shell. +func ProcessInteractive(val bool) attribute.KeyValue { + return ProcessInteractiveKey.Bool(val) +} + +// ProcessLinuxCgroup returns an attribute KeyValue conforming to the +// "process.linux.cgroup" semantic conventions. It represents the control group +// associated with the process. +func ProcessLinuxCgroup(val string) attribute.KeyValue { + return ProcessLinuxCgroupKey.String(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the "process.owner" +// semantic conventions. It represents the username of the user that owns the +// process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PPID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessRealUserID returns an attribute KeyValue conforming to the +// "process.real_user.id" semantic conventions. It represents the real user ID +// (RUID) of the process. +func ProcessRealUserID(val int) attribute.KeyValue { + return ProcessRealUserIDKey.Int(val) +} + +// ProcessRealUserName returns an attribute KeyValue conforming to the +// "process.real_user.name" semantic conventions. It represents the username of +// the real user of the process. +func ProcessRealUserName(val string) attribute.KeyValue { + return ProcessRealUserNameKey.String(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// ProcessSavedUserID returns an attribute KeyValue conforming to the +// "process.saved_user.id" semantic conventions. It represents the saved user ID +// (SUID) of the process. +func ProcessSavedUserID(val int) attribute.KeyValue { + return ProcessSavedUserIDKey.Int(val) +} + +// ProcessSavedUserName returns an attribute KeyValue conforming to the +// "process.saved_user.name" semantic conventions. It represents the username of +// the saved user. +func ProcessSavedUserName(val string) attribute.KeyValue { + return ProcessSavedUserNameKey.String(val) +} + +// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the +// "process.session_leader.pid" semantic conventions. It represents the PID of +// the process's session leader. This is also the session ID (SID) of the +// process. +func ProcessSessionLeaderPID(val int) attribute.KeyValue { + return ProcessSessionLeaderPIDKey.Int(val) +} + +// ProcessTitle returns an attribute KeyValue conforming to the "process.title" +// semantic conventions. It represents the process title (proctitle). +func ProcessTitle(val string) attribute.KeyValue { + return ProcessTitleKey.String(val) +} + +// ProcessUserID returns an attribute KeyValue conforming to the +// "process.user.id" semantic conventions. It represents the effective user ID +// (EUID) of the process. +func ProcessUserID(val int) attribute.KeyValue { + return ProcessUserIDKey.Int(val) +} + +// ProcessUserName returns an attribute KeyValue conforming to the +// "process.user.name" semantic conventions. It represents the username of the +// effective user of the process. +func ProcessUserName(val string) attribute.KeyValue { + return ProcessUserNameKey.String(val) +} + +// ProcessVpid returns an attribute KeyValue conforming to the "process.vpid" +// semantic conventions. It represents the virtual process identifier. +func ProcessVpid(val int) attribute.KeyValue { + return ProcessVpidKey.Int(val) +} + +// ProcessWorkingDirectory returns an attribute KeyValue conforming to the +// "process.working_directory" semantic conventions. It represents the working +// directory of the process. +func ProcessWorkingDirectory(val string) attribute.KeyValue { + return ProcessWorkingDirectoryKey.String(val) +} + +// Enum values for process.context_switch_type +var ( + // voluntary + // Stability: development + ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary") + // involuntary + // Stability: development + ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary") +) + +// Enum values for process.paging.fault_type +var ( + // major + // Stability: development + ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major") + // minor + // Stability: development + ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor") +) + +// Namespace: profile +const ( + // ProfileFrameTypeKey is the attribute Key conforming to the + // "profile.frame.type" semantic conventions. It represents the describes the + // interpreter or compiler of a single frame. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cpython" + ProfileFrameTypeKey = attribute.Key("profile.frame.type") +) + +// Enum values for profile.frame.type +var ( + // [.NET] + // + // Stability: development + // + // [.NET]: https://wikipedia.org/wiki/.NET + ProfileFrameTypeDotnet = ProfileFrameTypeKey.String("dotnet") + // [JVM] + // + // Stability: development + // + // [JVM]: https://wikipedia.org/wiki/Java_virtual_machine + ProfileFrameTypeJVM = ProfileFrameTypeKey.String("jvm") + // [Kernel] + // + // Stability: development + // + // [Kernel]: https://wikipedia.org/wiki/Kernel_(operating_system) + ProfileFrameTypeKernel = ProfileFrameTypeKey.String("kernel") + // Can be one of but not limited to [C], [C++], [Go] or [Rust]. If possible, a + // more precise value MUST be used. + // + // Stability: development + // + // [C]: https://wikipedia.org/wiki/C_(programming_language) + // [C++]: https://wikipedia.org/wiki/C%2B%2B + // [Go]: https://wikipedia.org/wiki/Go_(programming_language) + // [Rust]: https://wikipedia.org/wiki/Rust_(programming_language) + ProfileFrameTypeNative = ProfileFrameTypeKey.String("native") + // [Perl] + // + // Stability: development + // + // [Perl]: https://wikipedia.org/wiki/Perl + ProfileFrameTypePerl = ProfileFrameTypeKey.String("perl") + // [PHP] + // + // Stability: development + // + // [PHP]: https://wikipedia.org/wiki/PHP + ProfileFrameTypePHP = ProfileFrameTypeKey.String("php") + // [Python] + // + // Stability: development + // + // [Python]: https://wikipedia.org/wiki/Python_(programming_language) + ProfileFrameTypeCpython = ProfileFrameTypeKey.String("cpython") + // [Ruby] + // + // Stability: development + // + // [Ruby]: https://wikipedia.org/wiki/Ruby_(programming_language) + ProfileFrameTypeRuby = ProfileFrameTypeKey.String("ruby") + // [V8JS] + // + // Stability: development + // + // [V8JS]: https://wikipedia.org/wiki/V8_(JavaScript_engine) + ProfileFrameTypeV8JS = ProfileFrameTypeKey.String("v8js") + // [Erlang] + // + // Stability: development + // + // [Erlang]: https://en.wikipedia.org/wiki/BEAM_(Erlang_virtual_machine) + ProfileFrameTypeBeam = ProfileFrameTypeKey.String("beam") + // [Go], + // + // Stability: development + // + // [Go]: https://wikipedia.org/wiki/Go_(programming_language) + ProfileFrameTypeGo = ProfileFrameTypeKey.String("go") + // [Rust] + // + // Stability: development + // + // [Rust]: https://wikipedia.org/wiki/Rust_(programming_language) + ProfileFrameTypeRust = ProfileFrameTypeKey.String("rust") +) + +// Namespace: rpc +const ( + // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.connect_rpc.error_code" semantic conventions. It represents the + // [error codes] of the Connect request. Error codes are always string values. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [error codes]: https://connectrpc.com//docs/protocol/#error-codes + RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") + + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the + // [numeric status code] of the gRPC request. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [numeric status code]: https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") + + // RPCJSONRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the `error.code` + // property of response if it is an error response. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: -32700, 100 + RPCJSONRPCErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJSONRPCErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Parse error", "User already exists" + RPCJSONRPCErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") + + // RPCJSONRPCRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, string, + // `null` or missing (for notifications), value is expected to be cast to string + // for simplicity. Use empty string in case of `null` value. Omit entirely if + // this is a notification. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "10", "request-7", "" + RPCJSONRPCRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJSONRPCVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // doesn't specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2.0", "1.0" + RPCJSONRPCVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCMessageCompressedSizeKey is the attribute Key conforming to the + // "rpc.message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size") + + // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id" + // semantic conventions. It MUST be calculated as two different counters + // starting from `1` one for sent messages and one for received message.. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: This way we guarantee that the values will be consistent between + // different implementations. + RPCMessageIDKey = attribute.Key("rpc.message.id") + + // RPCMessageTypeKey is the attribute Key conforming to the "rpc.message.type" + // semantic conventions. It represents the whether this is a received or sent + // message. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + RPCMessageTypeKey = attribute.Key("rpc.message.type") + + // RPCMessageUncompressedSizeKey is the attribute Key conforming to the + // "rpc.message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" semantic + // conventions. It represents the name of the (logical) method being called, + // must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: exampleMethod + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function.name` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" semantic + // conventions. It represents the full (logical) name of the service being + // called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myservice.EchoService + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing class. + // The `code.namespace` attribute may be used to store the latter (despite the + // attribute name, it may include a class name; e.g., class with method actually + // executing the call on the server side, RPC client stub class on the client + // side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCSystemKey is the attribute Key conforming to the "rpc.system" semantic + // conventions. It represents a string identifying the remoting system. See + // below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + RPCSystemKey = attribute.Key("rpc.system") +) + +// RPCConnectRPCRequestMetadata returns an attribute KeyValue conforming to the +// "rpc.connect_rpc.request.metadata" semantic conventions. It represents the +// connect request metadata, `` being the normalized Connect Metadata key +// (lowercase), the value being the metadata values. +func RPCConnectRPCRequestMetadata(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("rpc.connect_rpc.request.metadata."+key, val) +} + +// RPCConnectRPCResponseMetadata returns an attribute KeyValue conforming to the +// "rpc.connect_rpc.response.metadata" semantic conventions. It represents the +// connect response metadata, `` being the normalized Connect Metadata key +// (lowercase), the value being the metadata values. +func RPCConnectRPCResponseMetadata(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("rpc.connect_rpc.response.metadata."+key, val) +} + +// RPCGRPCRequestMetadata returns an attribute KeyValue conforming to the +// "rpc.grpc.request.metadata" semantic conventions. It represents the gRPC +// request metadata, `` being the normalized gRPC Metadata key (lowercase), +// the value being the metadata values. +func RPCGRPCRequestMetadata(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("rpc.grpc.request.metadata."+key, val) +} + +// RPCGRPCResponseMetadata returns an attribute KeyValue conforming to the +// "rpc.grpc.response.metadata" semantic conventions. It represents the gRPC +// response metadata, `` being the normalized gRPC Metadata key (lowercase), +// the value being the metadata values. +func RPCGRPCResponseMetadata(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("rpc.grpc.response.metadata."+key, val) +} + +// RPCJSONRPCErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the `error.code` +// property of response if it is an error response. +func RPCJSONRPCErrorCode(val int) attribute.KeyValue { + return RPCJSONRPCErrorCodeKey.Int(val) +} + +// RPCJSONRPCErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJSONRPCErrorMessage(val string) attribute.KeyValue { + return RPCJSONRPCErrorMessageKey.String(val) +} + +// RPCJSONRPCRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` property +// of request or response. Since protocol allows id to be int, string, `null` or +// missing (for notifications), value is expected to be cast to string for +// simplicity. Use empty string in case of `null` value. Omit entirely if this is +// a notification. +func RPCJSONRPCRequestID(val string) attribute.KeyValue { + return RPCJSONRPCRequestIDKey.String(val) +} + +// RPCJSONRPCVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol version +// as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 doesn't +// specify this, the value can be omitted. +func RPCJSONRPCVersion(val string) attribute.KeyValue { + return RPCJSONRPCVersionKey.String(val) +} + +// RPCMessageCompressedSize returns an attribute KeyValue conforming to the +// "rpc.message.compressed_size" semantic conventions. It represents the +// compressed size of the message in bytes. +func RPCMessageCompressedSize(val int) attribute.KeyValue { + return RPCMessageCompressedSizeKey.Int(val) +} + +// RPCMessageID returns an attribute KeyValue conforming to the "rpc.message.id" +// semantic conventions. It MUST be calculated as two different counters starting +// from `1` one for sent messages and one for received message.. +func RPCMessageID(val int) attribute.KeyValue { + return RPCMessageIDKey.Int(val) +} + +// RPCMessageUncompressedSize returns an attribute KeyValue conforming to the +// "rpc.message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func RPCMessageUncompressedSize(val int) attribute.KeyValue { + return RPCMessageUncompressedSizeKey.Int(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// Enum values for rpc.connect_rpc.error_code +var ( + // cancelled + // Stability: development + RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") + // unknown + // Stability: development + RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") + // invalid_argument + // Stability: development + RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") + // deadline_exceeded + // Stability: development + RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") + // not_found + // Stability: development + RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") + // already_exists + // Stability: development + RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") + // permission_denied + // Stability: development + RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") + // resource_exhausted + // Stability: development + RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") + // failed_precondition + // Stability: development + RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") + // aborted + // Stability: development + RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") + // out_of_range + // Stability: development + RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") + // unimplemented + // Stability: development + RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") + // internal + // Stability: development + RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") + // unavailable + // Stability: development + RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") + // data_loss + // Stability: development + RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") + // unauthenticated + // Stability: development + RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") +) + +// Enum values for rpc.grpc.status_code +var ( + // OK + // Stability: development + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + // Stability: development + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + // Stability: development + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + // Stability: development + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + // Stability: development + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + // Stability: development + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + // Stability: development + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + // Stability: development + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + // Stability: development + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + // Stability: development + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + // Stability: development + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + // Stability: development + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + // Stability: development + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + // Stability: development + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + // Stability: development + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + // Stability: development + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + // Stability: development + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +// Enum values for rpc.message.type +var ( + // sent + // Stability: development + RPCMessageTypeSent = RPCMessageTypeKey.String("SENT") + // received + // Stability: development + RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED") +) + +// Enum values for rpc.system +var ( + // gRPC + // Stability: development + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + // Stability: development + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + // Stability: development + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + // Stability: development + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") + // Connect RPC + // Stability: development + RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") +) + +// Namespace: security_rule +const ( + // SecurityRuleCategoryKey is the attribute Key conforming to the + // "security_rule.category" semantic conventions. It represents a categorization + // value keyword used by the entity using the rule for detection of this event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Attempted Information Leak" + SecurityRuleCategoryKey = attribute.Key("security_rule.category") + + // SecurityRuleDescriptionKey is the attribute Key conforming to the + // "security_rule.description" semantic conventions. It represents the + // description of the rule generating the event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Block requests to public DNS over HTTPS / TLS protocols" + SecurityRuleDescriptionKey = attribute.Key("security_rule.description") + + // SecurityRuleLicenseKey is the attribute Key conforming to the + // "security_rule.license" semantic conventions. It represents the name of the + // license under which the rule used to generate this event is made available. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Apache 2.0" + SecurityRuleLicenseKey = attribute.Key("security_rule.license") + + // SecurityRuleNameKey is the attribute Key conforming to the + // "security_rule.name" semantic conventions. It represents the name of the rule + // or signature generating the event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "BLOCK_DNS_over_TLS" + SecurityRuleNameKey = attribute.Key("security_rule.name") + + // SecurityRuleReferenceKey is the attribute Key conforming to the + // "security_rule.reference" semantic conventions. It represents the reference + // URL to additional information about the rule used to generate this event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://en.wikipedia.org/wiki/DNS_over_TLS" + // Note: The URL can point to the vendor’s documentation about the rule. If + // that’s not available, it can also be a link to a more general page + // describing this type of alert. + SecurityRuleReferenceKey = attribute.Key("security_rule.reference") + + // SecurityRuleRulesetNameKey is the attribute Key conforming to the + // "security_rule.ruleset.name" semantic conventions. It represents the name of + // the ruleset, policy, group, or parent category in which the rule used to + // generate this event is a member. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Standard_Protocol_Filters" + SecurityRuleRulesetNameKey = attribute.Key("security_rule.ruleset.name") + + // SecurityRuleUUIDKey is the attribute Key conforming to the + // "security_rule.uuid" semantic conventions. It represents a rule ID that is + // unique within the scope of a set or group of agents, observers, or other + // entities using the rule for detection of this event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "550e8400-e29b-41d4-a716-446655440000", "1100110011" + SecurityRuleUUIDKey = attribute.Key("security_rule.uuid") + + // SecurityRuleVersionKey is the attribute Key conforming to the + // "security_rule.version" semantic conventions. It represents the version / + // revision of the rule being used for analysis. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1.0.0" + SecurityRuleVersionKey = attribute.Key("security_rule.version") +) + +// SecurityRuleCategory returns an attribute KeyValue conforming to the +// "security_rule.category" semantic conventions. It represents a categorization +// value keyword used by the entity using the rule for detection of this event. +func SecurityRuleCategory(val string) attribute.KeyValue { + return SecurityRuleCategoryKey.String(val) +} + +// SecurityRuleDescription returns an attribute KeyValue conforming to the +// "security_rule.description" semantic conventions. It represents the +// description of the rule generating the event. +func SecurityRuleDescription(val string) attribute.KeyValue { + return SecurityRuleDescriptionKey.String(val) +} + +// SecurityRuleLicense returns an attribute KeyValue conforming to the +// "security_rule.license" semantic conventions. It represents the name of the +// license under which the rule used to generate this event is made available. +func SecurityRuleLicense(val string) attribute.KeyValue { + return SecurityRuleLicenseKey.String(val) +} + +// SecurityRuleName returns an attribute KeyValue conforming to the +// "security_rule.name" semantic conventions. It represents the name of the rule +// or signature generating the event. +func SecurityRuleName(val string) attribute.KeyValue { + return SecurityRuleNameKey.String(val) +} + +// SecurityRuleReference returns an attribute KeyValue conforming to the +// "security_rule.reference" semantic conventions. It represents the reference +// URL to additional information about the rule used to generate this event. +func SecurityRuleReference(val string) attribute.KeyValue { + return SecurityRuleReferenceKey.String(val) +} + +// SecurityRuleRulesetName returns an attribute KeyValue conforming to the +// "security_rule.ruleset.name" semantic conventions. It represents the name of +// the ruleset, policy, group, or parent category in which the rule used to +// generate this event is a member. +func SecurityRuleRulesetName(val string) attribute.KeyValue { + return SecurityRuleRulesetNameKey.String(val) +} + +// SecurityRuleUUID returns an attribute KeyValue conforming to the +// "security_rule.uuid" semantic conventions. It represents a rule ID that is +// unique within the scope of a set or group of agents, observers, or other +// entities using the rule for detection of this event. +func SecurityRuleUUID(val string) attribute.KeyValue { + return SecurityRuleUUIDKey.String(val) +} + +// SecurityRuleVersion returns an attribute KeyValue conforming to the +// "security_rule.version" semantic conventions. It represents the version / +// revision of the rule being used for analysis. +func SecurityRuleVersion(val string) attribute.KeyValue { + return SecurityRuleVersionKey.String(val) +} + +// Namespace: server +const ( + // ServerAddressKey is the attribute Key conforming to the "server.address" + // semantic conventions. It represents the server domain name if available + // without reverse DNS lookup; otherwise, IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "example.com", "10.1.2.80", "/tmp/my.sock" + // Note: When observed from the client side, and when communicating through an + // intermediary, `server.address` SHOULD represent the server address behind any + // intermediaries, for example proxies, if it's available. + ServerAddressKey = attribute.Key("server.address") + + // ServerPortKey is the attribute Key conforming to the "server.port" semantic + // conventions. It represents the server port number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 80, 8080, 443 + // Note: When observed from the client side, and when communicating through an + // intermediary, `server.port` SHOULD represent the server port behind any + // intermediaries, for example proxies, if it's available. + ServerPortKey = attribute.Key("server.port") +) + +// ServerAddress returns an attribute KeyValue conforming to the "server.address" +// semantic conventions. It represents the server domain name if available +// without reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func ServerAddress(val string) attribute.KeyValue { + return ServerAddressKey.String(val) +} + +// ServerPort returns an attribute KeyValue conforming to the "server.port" +// semantic conventions. It represents the server port number. +func ServerPort(val int) attribute.KeyValue { + return ServerPortKey.Int(val) +} + +// Namespace: service +const ( + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID of + // the service instance. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "627cc493-f310-47de-96bd-71410b7dec09" + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be globally + // unique). The ID helps to + // distinguish instances of the same service that exist at the same time (e.g. + // instances of a horizontally scaled + // service). + // + // Implementations, such as SDKs, are recommended to generate a random Version 1 + // or Version 4 [RFC + // 4122] UUID, but are free to use an inherent unique ID as + // the source of + // this value if stability is desirable. In that case, the ID SHOULD be used as + // source of a UUID Version 5 and + // SHOULD use the following UUID as the namespace: + // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`. + // + // UUIDs are typically recommended, as only an opaque value for the purposes of + // identifying a service instance is + // needed. Similar to what can be seen in the man page for the + // [`/etc/machine-id`] file, the underlying + // data, such as pod name and namespace should be treated as confidential, being + // the user's choice to expose it + // or not via another resource attribute. + // + // For applications running behind an application server (like unicorn), we do + // not recommend using one identifier + // for all processes participating in the application. Instead, it's recommended + // each division (e.g. a worker + // thread in unicorn) to have its own instance.id. + // + // It's not recommended for a Collector to set `service.instance.id` if it can't + // unambiguously determine the + // service instance that is generating that telemetry. For instance, creating an + // UUID based on `pod.name` will + // likely be wrong, as the Collector might not know from which container within + // that pod the telemetry originated. + // However, Collectors can set the `service.instance.id` if they can + // unambiguously determine the service instance + // for that telemetry. This is typically the case for scraping receivers, as + // they know the target address and + // port. + // + // [RFC + // 4122]: https://www.ietf.org/rfc/rfc4122.txt + // [`/etc/machine-id`]: https://www.freedesktop.org/software/systemd/man/latest/machine-id.html + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceNameKey is the attribute Key conforming to the "service.name" semantic + // conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "shoppingcart" + // Note: MUST be the same for all instances of horizontally scaled services. If + // the value was not specified, SDKs MUST fallback to `unknown_service:` + // concatenated with [`process.executable.name`], e.g. `unknown_service:bash`. + // If `process.executable.name` is not available, the value MUST be set to + // `unknown_service`. + // + // [`process.executable.name`]: process.md + ServiceNameKey = attribute.Key("service.name") + + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Shop" + // Note: A string value having a meaning that helps to distinguish a group of + // services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` is + // expected to be unique for all services that have no explicit namespace + // defined (so the empty/unspecified namespace is simply one more valid + // namespace). Zero-length namespace string is assumed equal to unspecified + // namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceVersionKey is the attribute Key conforming to the "service.version" + // semantic conventions. It represents the version string of the service API or + // implementation. The format is not defined by these conventions. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "2.0.0", "a01dbef8a" + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of the +// service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceName returns an attribute KeyValue conforming to the "service.name" +// semantic conventions. It represents the logical name of the service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. The format is not defined by these +// conventions. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// Namespace: session +const ( + // SessionIDKey is the attribute Key conforming to the "session.id" semantic + // conventions. It represents a unique id to identify a session. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 00112233-4455-6677-8899-aabbccddeeff + SessionIDKey = attribute.Key("session.id") + + // SessionPreviousIDKey is the attribute Key conforming to the + // "session.previous_id" semantic conventions. It represents the previous + // `session.id` for this user, when known. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 00112233-4455-6677-8899-aabbccddeeff + SessionPreviousIDKey = attribute.Key("session.previous_id") +) + +// SessionID returns an attribute KeyValue conforming to the "session.id" +// semantic conventions. It represents a unique id to identify a session. +func SessionID(val string) attribute.KeyValue { + return SessionIDKey.String(val) +} + +// SessionPreviousID returns an attribute KeyValue conforming to the +// "session.previous_id" semantic conventions. It represents the previous +// `session.id` for this user, when known. +func SessionPreviousID(val string) attribute.KeyValue { + return SessionPreviousIDKey.String(val) +} + +// Namespace: signalr +const ( + // SignalRConnectionStatusKey is the attribute Key conforming to the + // "signalr.connection.status" semantic conventions. It represents the signalR + // HTTP connection closure status. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "app_shutdown", "timeout" + SignalRConnectionStatusKey = attribute.Key("signalr.connection.status") + + // SignalRTransportKey is the attribute Key conforming to the + // "signalr.transport" semantic conventions. It represents the + // [SignalR transport type]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "web_sockets", "long_polling" + // + // [SignalR transport type]: https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md + SignalRTransportKey = attribute.Key("signalr.transport") +) + +// Enum values for signalr.connection.status +var ( + // The connection was closed normally. + // Stability: stable + SignalRConnectionStatusNormalClosure = SignalRConnectionStatusKey.String("normal_closure") + // The connection was closed due to a timeout. + // Stability: stable + SignalRConnectionStatusTimeout = SignalRConnectionStatusKey.String("timeout") + // The connection was closed because the app is shutting down. + // Stability: stable + SignalRConnectionStatusAppShutdown = SignalRConnectionStatusKey.String("app_shutdown") +) + +// Enum values for signalr.transport +var ( + // ServerSentEvents protocol + // Stability: stable + SignalRTransportServerSentEvents = SignalRTransportKey.String("server_sent_events") + // LongPolling protocol + // Stability: stable + SignalRTransportLongPolling = SignalRTransportKey.String("long_polling") + // WebSockets protocol + // Stability: stable + SignalRTransportWebSockets = SignalRTransportKey.String("web_sockets") +) + +// Namespace: source +const ( + // SourceAddressKey is the attribute Key conforming to the "source.address" + // semantic conventions. It represents the source address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix domain + // socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "source.example.com", "10.1.2.80", "/tmp/my.sock" + // Note: When observed from the destination side, and when communicating through + // an intermediary, `source.address` SHOULD represent the source address behind + // any intermediaries, for example proxies, if it's available. + SourceAddressKey = attribute.Key("source.address") + + // SourcePortKey is the attribute Key conforming to the "source.port" semantic + // conventions. It represents the source port number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3389, 2888 + SourcePortKey = attribute.Key("source.port") +) + +// SourceAddress returns an attribute KeyValue conforming to the "source.address" +// semantic conventions. It represents the source address - domain name if +// available without reverse DNS lookup; otherwise, IP address or Unix domain +// socket name. +func SourceAddress(val string) attribute.KeyValue { + return SourceAddressKey.String(val) +} + +// SourcePort returns an attribute KeyValue conforming to the "source.port" +// semantic conventions. It represents the source port number. +func SourcePort(val int) attribute.KeyValue { + return SourcePortKey.Int(val) +} + +// Namespace: system +const ( + // SystemCPULogicalNumberKey is the attribute Key conforming to the + // "system.cpu.logical_number" semantic conventions. It represents the + // deprecated, use `cpu.logical_number` instead. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1 + SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") + + // SystemDeviceKey is the attribute Key conforming to the "system.device" + // semantic conventions. It represents the device identifier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "(identifier)" + SystemDeviceKey = attribute.Key("system.device") + + // SystemFilesystemModeKey is the attribute Key conforming to the + // "system.filesystem.mode" semantic conventions. It represents the filesystem + // mode. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "rw, ro" + SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") + + // SystemFilesystemMountpointKey is the attribute Key conforming to the + // "system.filesystem.mountpoint" semantic conventions. It represents the + // filesystem mount path. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/mnt/data" + SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") + + // SystemFilesystemStateKey is the attribute Key conforming to the + // "system.filesystem.state" semantic conventions. It represents the filesystem + // state. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "used" + SystemFilesystemStateKey = attribute.Key("system.filesystem.state") + + // SystemFilesystemTypeKey is the attribute Key conforming to the + // "system.filesystem.type" semantic conventions. It represents the filesystem + // type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ext4" + SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") + + // SystemMemoryStateKey is the attribute Key conforming to the + // "system.memory.state" semantic conventions. It represents the memory state. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "free", "cached" + SystemMemoryStateKey = attribute.Key("system.memory.state") + + // SystemPagingDirectionKey is the attribute Key conforming to the + // "system.paging.direction" semantic conventions. It represents the paging + // access direction. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "in" + SystemPagingDirectionKey = attribute.Key("system.paging.direction") + + // SystemPagingStateKey is the attribute Key conforming to the + // "system.paging.state" semantic conventions. It represents the memory paging + // state. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "free" + SystemPagingStateKey = attribute.Key("system.paging.state") + + // SystemPagingTypeKey is the attribute Key conforming to the + // "system.paging.type" semantic conventions. It represents the memory paging + // type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "minor" + SystemPagingTypeKey = attribute.Key("system.paging.type") + + // SystemProcessStatusKey is the attribute Key conforming to the + // "system.process.status" semantic conventions. It represents the process + // state, e.g., [Linux Process State Codes]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "running" + // + // [Linux Process State Codes]: https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES + SystemProcessStatusKey = attribute.Key("system.process.status") +) + +// SystemCPULogicalNumber returns an attribute KeyValue conforming to the +// "system.cpu.logical_number" semantic conventions. It represents the +// deprecated, use `cpu.logical_number` instead. +func SystemCPULogicalNumber(val int) attribute.KeyValue { + return SystemCPULogicalNumberKey.Int(val) +} + +// SystemDevice returns an attribute KeyValue conforming to the "system.device" +// semantic conventions. It represents the device identifier. +func SystemDevice(val string) attribute.KeyValue { + return SystemDeviceKey.String(val) +} + +// SystemFilesystemMode returns an attribute KeyValue conforming to the +// "system.filesystem.mode" semantic conventions. It represents the filesystem +// mode. +func SystemFilesystemMode(val string) attribute.KeyValue { + return SystemFilesystemModeKey.String(val) +} + +// SystemFilesystemMountpoint returns an attribute KeyValue conforming to the +// "system.filesystem.mountpoint" semantic conventions. It represents the +// filesystem mount path. +func SystemFilesystemMountpoint(val string) attribute.KeyValue { + return SystemFilesystemMountpointKey.String(val) +} + +// Enum values for system.filesystem.state +var ( + // used + // Stability: development + SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") + // free + // Stability: development + SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") + // reserved + // Stability: development + SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") +) + +// Enum values for system.filesystem.type +var ( + // fat32 + // Stability: development + SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") + // exfat + // Stability: development + SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") + // ntfs + // Stability: development + SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") + // refs + // Stability: development + SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") + // hfsplus + // Stability: development + SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") + // ext4 + // Stability: development + SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") +) + +// Enum values for system.memory.state +var ( + // Actual used virtual memory in bytes. + // Stability: development + SystemMemoryStateUsed = SystemMemoryStateKey.String("used") + // free + // Stability: development + SystemMemoryStateFree = SystemMemoryStateKey.String("free") + // buffers + // Stability: development + SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") + // cached + // Stability: development + SystemMemoryStateCached = SystemMemoryStateKey.String("cached") +) + +// Enum values for system.paging.direction +var ( + // in + // Stability: development + SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") + // out + // Stability: development + SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") +) + +// Enum values for system.paging.state +var ( + // used + // Stability: development + SystemPagingStateUsed = SystemPagingStateKey.String("used") + // free + // Stability: development + SystemPagingStateFree = SystemPagingStateKey.String("free") +) + +// Enum values for system.paging.type +var ( + // major + // Stability: development + SystemPagingTypeMajor = SystemPagingTypeKey.String("major") + // minor + // Stability: development + SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") +) + +// Enum values for system.process.status +var ( + // running + // Stability: development + SystemProcessStatusRunning = SystemProcessStatusKey.String("running") + // sleeping + // Stability: development + SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping") + // stopped + // Stability: development + SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped") + // defunct + // Stability: development + SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct") +) + +// Namespace: telemetry +const ( + // TelemetryDistroNameKey is the attribute Key conforming to the + // "telemetry.distro.name" semantic conventions. It represents the name of the + // auto instrumentation agent or distribution, if used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "parts-unlimited-java" + // Note: Official auto instrumentation agents and distributions SHOULD set the + // `telemetry.distro.name` attribute to + // a string starting with `opentelemetry-`, e.g. + // `opentelemetry-java-instrumentation`. + TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") + + // TelemetryDistroVersionKey is the attribute Key conforming to the + // "telemetry.distro.version" semantic conventions. It represents the version + // string of the auto instrumentation agent or distribution, if used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1.2.3" + TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") + + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the language of + // the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "opentelemetry" + // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute to + // `opentelemetry`. + // If another SDK, like a fork or a vendor-provided implementation, is used, + // this SDK MUST set the + // `telemetry.sdk.name` attribute to the fully-qualified class or module name of + // this SDK's main entry point + // or another suitable identifier depending on the language. + // The identifier `opentelemetry` is reserved and MUST NOT be used in this case. + // All custom identifiers SHOULD be stable across different versions of an + // implementation. + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "1.2.3" + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") +) + +// TelemetryDistroName returns an attribute KeyValue conforming to the +// "telemetry.distro.name" semantic conventions. It represents the name of the +// auto instrumentation agent or distribution, if used. +func TelemetryDistroName(val string) attribute.KeyValue { + return TelemetryDistroNameKey.String(val) +} + +// TelemetryDistroVersion returns an attribute KeyValue conforming to the +// "telemetry.distro.version" semantic conventions. It represents the version +// string of the auto instrumentation agent or distribution, if used. +func TelemetryDistroVersion(val string) attribute.KeyValue { + return TelemetryDistroVersionKey.String(val) +} + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version string +// of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// Enum values for telemetry.sdk.language +var ( + // cpp + // Stability: stable + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + // Stability: stable + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + // Stability: stable + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + // Stability: stable + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + // Stability: stable + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + // Stability: stable + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + // Stability: stable + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + // Stability: stable + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + // Stability: stable + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // rust + // Stability: stable + TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") + // swift + // Stability: stable + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") + // webjs + // Stability: stable + TelemetrySDKLanguageWebJS = TelemetrySDKLanguageKey.String("webjs") +) + +// Namespace: test +const ( + // TestCaseNameKey is the attribute Key conforming to the "test.case.name" + // semantic conventions. It represents the fully qualified human readable name + // of the [test case]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "org.example.TestCase1.test1", "example/tests/TestCase1.test1", + // "ExampleTestCase1_test1" + // + // [test case]: https://wikipedia.org/wiki/Test_case + TestCaseNameKey = attribute.Key("test.case.name") + + // TestCaseResultStatusKey is the attribute Key conforming to the + // "test.case.result.status" semantic conventions. It represents the status of + // the actual test case result from test execution. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "pass", "fail" + TestCaseResultStatusKey = attribute.Key("test.case.result.status") + + // TestSuiteNameKey is the attribute Key conforming to the "test.suite.name" + // semantic conventions. It represents the human readable name of a [test suite] + // . + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "TestSuite1" + // + // [test suite]: https://wikipedia.org/wiki/Test_suite + TestSuiteNameKey = attribute.Key("test.suite.name") + + // TestSuiteRunStatusKey is the attribute Key conforming to the + // "test.suite.run.status" semantic conventions. It represents the status of the + // test suite run. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "success", "failure", "skipped", "aborted", "timed_out", + // "in_progress" + TestSuiteRunStatusKey = attribute.Key("test.suite.run.status") +) + +// TestCaseName returns an attribute KeyValue conforming to the "test.case.name" +// semantic conventions. It represents the fully qualified human readable name of +// the [test case]. +// +// [test case]: https://wikipedia.org/wiki/Test_case +func TestCaseName(val string) attribute.KeyValue { + return TestCaseNameKey.String(val) +} + +// TestSuiteName returns an attribute KeyValue conforming to the +// "test.suite.name" semantic conventions. It represents the human readable name +// of a [test suite]. +// +// [test suite]: https://wikipedia.org/wiki/Test_suite +func TestSuiteName(val string) attribute.KeyValue { + return TestSuiteNameKey.String(val) +} + +// Enum values for test.case.result.status +var ( + // pass + // Stability: development + TestCaseResultStatusPass = TestCaseResultStatusKey.String("pass") + // fail + // Stability: development + TestCaseResultStatusFail = TestCaseResultStatusKey.String("fail") +) + +// Enum values for test.suite.run.status +var ( + // success + // Stability: development + TestSuiteRunStatusSuccess = TestSuiteRunStatusKey.String("success") + // failure + // Stability: development + TestSuiteRunStatusFailure = TestSuiteRunStatusKey.String("failure") + // skipped + // Stability: development + TestSuiteRunStatusSkipped = TestSuiteRunStatusKey.String("skipped") + // aborted + // Stability: development + TestSuiteRunStatusAborted = TestSuiteRunStatusKey.String("aborted") + // timed_out + // Stability: development + TestSuiteRunStatusTimedOut = TestSuiteRunStatusKey.String("timed_out") + // in_progress + // Stability: development + TestSuiteRunStatusInProgress = TestSuiteRunStatusKey.String("in_progress") +) + +// Namespace: thread +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed to OS + // thread ID). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" semantic + // conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: main + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" semantic +// conventions. It represents the current "managed" thread ID (as opposed to OS +// thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// Namespace: tls +const ( + // TLSCipherKey is the attribute Key conforming to the "tls.cipher" semantic + // conventions. It represents the string indicating the [cipher] used during the + // current connection. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", + // "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256" + // Note: The values allowed for `tls.cipher` MUST be one of the `Descriptions` + // of the [registered TLS Cipher Suits]. + // + // [cipher]: https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5 + // [registered TLS Cipher Suits]: https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4 + TLSCipherKey = attribute.Key("tls.cipher") + + // TLSClientCertificateKey is the attribute Key conforming to the + // "tls.client.certificate" semantic conventions. It represents the PEM-encoded + // stand-alone certificate offered by the client. This is usually + // mutually-exclusive of `client.certificate_chain` since this value also exists + // in that list. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MII..." + TLSClientCertificateKey = attribute.Key("tls.client.certificate") + + // TLSClientCertificateChainKey is the attribute Key conforming to the + // "tls.client.certificate_chain" semantic conventions. It represents the array + // of PEM-encoded certificates that make up the certificate chain offered by the + // client. This is usually mutually-exclusive of `client.certificate` since that + // value should be the first certificate in the chain. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MII...", "MI..." + TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") + + // TLSClientHashMd5Key is the attribute Key conforming to the + // "tls.client.hash.md5" semantic conventions. It represents the certificate + // fingerprint using the MD5 digest of DER-encoded version of certificate + // offered by the client. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC" + TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") + + // TLSClientHashSha1Key is the attribute Key conforming to the + // "tls.client.hash.sha1" semantic conventions. It represents the certificate + // fingerprint using the SHA1 digest of DER-encoded version of certificate + // offered by the client. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9E393D93138888D288266C2D915214D1D1CCEB2A" + TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") + + // TLSClientHashSha256Key is the attribute Key conforming to the + // "tls.client.hash.sha256" semantic conventions. It represents the certificate + // fingerprint using the SHA256 digest of DER-encoded version of certificate + // offered by the client. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0" + TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") + + // TLSClientIssuerKey is the attribute Key conforming to the "tls.client.issuer" + // semantic conventions. It represents the distinguished name of [subject] of + // the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com" + // + // [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 + TLSClientIssuerKey = attribute.Key("tls.client.issuer") + + // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" + // semantic conventions. It represents a hash that identifies clients based on + // how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "d4e5b18d6b55c71272893221c96ba240" + TLSClientJa3Key = attribute.Key("tls.client.ja3") + + // TLSClientNotAfterKey is the attribute Key conforming to the + // "tls.client.not_after" semantic conventions. It represents the date/Time + // indicating when client certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T00:00:00.000Z" + TLSClientNotAfterKey = attribute.Key("tls.client.not_after") + + // TLSClientNotBeforeKey is the attribute Key conforming to the + // "tls.client.not_before" semantic conventions. It represents the date/Time + // indicating when client certificate is first considered valid. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1970-01-01T00:00:00.000Z" + TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") + + // TLSClientSubjectKey is the attribute Key conforming to the + // "tls.client.subject" semantic conventions. It represents the distinguished + // name of subject of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CN=myclient, OU=Documentation Team, DC=example, DC=com" + TLSClientSubjectKey = attribute.Key("tls.client.subject") + + // TLSClientSupportedCiphersKey is the attribute Key conforming to the + // "tls.client.supported_ciphers" semantic conventions. It represents the array + // of ciphers offered by the client during the client hello. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" + TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") + + // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic + // conventions. It represents the string indicating the curve used for the given + // cipher, when applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "secp256r1" + TLSCurveKey = attribute.Key("tls.curve") + + // TLSEstablishedKey is the attribute Key conforming to the "tls.established" + // semantic conventions. It represents the boolean flag indicating if the TLS + // negotiation was successful and transitioned to an encrypted tunnel. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: true + TLSEstablishedKey = attribute.Key("tls.established") + + // TLSNextProtocolKey is the attribute Key conforming to the "tls.next_protocol" + // semantic conventions. It represents the string indicating the protocol being + // tunneled. Per the values in the [IANA registry], this string should be lower + // case. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "http/1.1" + // + // [IANA registry]: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids + TLSNextProtocolKey = attribute.Key("tls.next_protocol") + + // TLSProtocolNameKey is the attribute Key conforming to the "tls.protocol.name" + // semantic conventions. It represents the normalized lowercase protocol name + // parsed from original string of the negotiated [SSL/TLS protocol version]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values + TLSProtocolNameKey = attribute.Key("tls.protocol.name") + + // TLSProtocolVersionKey is the attribute Key conforming to the + // "tls.protocol.version" semantic conventions. It represents the numeric part + // of the version parsed from the original string of the negotiated + // [SSL/TLS protocol version]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1.2", "3" + // + // [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values + TLSProtocolVersionKey = attribute.Key("tls.protocol.version") + + // TLSResumedKey is the attribute Key conforming to the "tls.resumed" semantic + // conventions. It represents the boolean flag indicating if this TLS connection + // was resumed from an existing TLS negotiation. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: true + TLSResumedKey = attribute.Key("tls.resumed") + + // TLSServerCertificateKey is the attribute Key conforming to the + // "tls.server.certificate" semantic conventions. It represents the PEM-encoded + // stand-alone certificate offered by the server. This is usually + // mutually-exclusive of `server.certificate_chain` since this value also exists + // in that list. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MII..." + TLSServerCertificateKey = attribute.Key("tls.server.certificate") + + // TLSServerCertificateChainKey is the attribute Key conforming to the + // "tls.server.certificate_chain" semantic conventions. It represents the array + // of PEM-encoded certificates that make up the certificate chain offered by the + // server. This is usually mutually-exclusive of `server.certificate` since that + // value should be the first certificate in the chain. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MII...", "MI..." + TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") + + // TLSServerHashMd5Key is the attribute Key conforming to the + // "tls.server.hash.md5" semantic conventions. It represents the certificate + // fingerprint using the MD5 digest of DER-encoded version of certificate + // offered by the server. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC" + TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") + + // TLSServerHashSha1Key is the attribute Key conforming to the + // "tls.server.hash.sha1" semantic conventions. It represents the certificate + // fingerprint using the SHA1 digest of DER-encoded version of certificate + // offered by the server. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9E393D93138888D288266C2D915214D1D1CCEB2A" + TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") + + // TLSServerHashSha256Key is the attribute Key conforming to the + // "tls.server.hash.sha256" semantic conventions. It represents the certificate + // fingerprint using the SHA256 digest of DER-encoded version of certificate + // offered by the server. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0" + TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") + + // TLSServerIssuerKey is the attribute Key conforming to the "tls.server.issuer" + // semantic conventions. It represents the distinguished name of [subject] of + // the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com" + // + // [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 + TLSServerIssuerKey = attribute.Key("tls.server.issuer") + + // TLSServerJa3sKey is the attribute Key conforming to the "tls.server.ja3s" + // semantic conventions. It represents a hash that identifies servers based on + // how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "d4e5b18d6b55c71272893221c96ba240" + TLSServerJa3sKey = attribute.Key("tls.server.ja3s") + + // TLSServerNotAfterKey is the attribute Key conforming to the + // "tls.server.not_after" semantic conventions. It represents the date/Time + // indicating when server certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T00:00:00.000Z" + TLSServerNotAfterKey = attribute.Key("tls.server.not_after") + + // TLSServerNotBeforeKey is the attribute Key conforming to the + // "tls.server.not_before" semantic conventions. It represents the date/Time + // indicating when server certificate is first considered valid. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1970-01-01T00:00:00.000Z" + TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") + + // TLSServerSubjectKey is the attribute Key conforming to the + // "tls.server.subject" semantic conventions. It represents the distinguished + // name of subject of the x.509 certificate presented by the server. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CN=myserver, OU=Documentation Team, DC=example, DC=com" + TLSServerSubjectKey = attribute.Key("tls.server.subject") +) + +// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" +// semantic conventions. It represents the string indicating the [cipher] used +// during the current connection. +// +// [cipher]: https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5 +func TLSCipher(val string) attribute.KeyValue { + return TLSCipherKey.String(val) +} + +// TLSClientCertificate returns an attribute KeyValue conforming to the +// "tls.client.certificate" semantic conventions. It represents the PEM-encoded +// stand-alone certificate offered by the client. This is usually +// mutually-exclusive of `client.certificate_chain` since this value also exists +// in that list. +func TLSClientCertificate(val string) attribute.KeyValue { + return TLSClientCertificateKey.String(val) +} + +// TLSClientCertificateChain returns an attribute KeyValue conforming to the +// "tls.client.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by the +// client. This is usually mutually-exclusive of `client.certificate` since that +// value should be the first certificate in the chain. +func TLSClientCertificateChain(val ...string) attribute.KeyValue { + return TLSClientCertificateChainKey.StringSlice(val) +} + +// TLSClientHashMd5 returns an attribute KeyValue conforming to the +// "tls.client.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate offered +// by the client. For consistency with other hash values, this value should be +// formatted as an uppercase hash. +func TLSClientHashMd5(val string) attribute.KeyValue { + return TLSClientHashMd5Key.String(val) +} + +// TLSClientHashSha1 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha1(val string) attribute.KeyValue { + return TLSClientHashSha1Key.String(val) +} + +// TLSClientHashSha256 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha256(val string) attribute.KeyValue { + return TLSClientHashSha256Key.String(val) +} + +// TLSClientIssuer returns an attribute KeyValue conforming to the +// "tls.client.issuer" semantic conventions. It represents the distinguished name +// of [subject] of the issuer of the x.509 certificate presented by the client. +// +// [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 +func TLSClientIssuer(val string) attribute.KeyValue { + return TLSClientIssuerKey.String(val) +} + +// TLSClientJa3 returns an attribute KeyValue conforming to the "tls.client.ja3" +// semantic conventions. It represents a hash that identifies clients based on +// how they perform an SSL/TLS handshake. +func TLSClientJa3(val string) attribute.KeyValue { + return TLSClientJa3Key.String(val) +} + +// TLSClientNotAfter returns an attribute KeyValue conforming to the +// "tls.client.not_after" semantic conventions. It represents the date/Time +// indicating when client certificate is no longer considered valid. +func TLSClientNotAfter(val string) attribute.KeyValue { + return TLSClientNotAfterKey.String(val) +} + +// TLSClientNotBefore returns an attribute KeyValue conforming to the +// "tls.client.not_before" semantic conventions. It represents the date/Time +// indicating when client certificate is first considered valid. +func TLSClientNotBefore(val string) attribute.KeyValue { + return TLSClientNotBeforeKey.String(val) +} + +// TLSClientSubject returns an attribute KeyValue conforming to the +// "tls.client.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the client. +func TLSClientSubject(val string) attribute.KeyValue { + return TLSClientSubjectKey.String(val) +} + +// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the +// "tls.client.supported_ciphers" semantic conventions. It represents the array +// of ciphers offered by the client during the client hello. +func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { + return TLSClientSupportedCiphersKey.StringSlice(val) +} + +// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" semantic +// conventions. It represents the string indicating the curve used for the given +// cipher, when applicable. +func TLSCurve(val string) attribute.KeyValue { + return TLSCurveKey.String(val) +} + +// TLSEstablished returns an attribute KeyValue conforming to the +// "tls.established" semantic conventions. It represents the boolean flag +// indicating if the TLS negotiation was successful and transitioned to an +// encrypted tunnel. +func TLSEstablished(val bool) attribute.KeyValue { + return TLSEstablishedKey.Bool(val) +} + +// TLSNextProtocol returns an attribute KeyValue conforming to the +// "tls.next_protocol" semantic conventions. It represents the string indicating +// the protocol being tunneled. Per the values in the [IANA registry], this +// string should be lower case. +// +// [IANA registry]: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids +func TLSNextProtocol(val string) attribute.KeyValue { + return TLSNextProtocolKey.String(val) +} + +// TLSProtocolVersion returns an attribute KeyValue conforming to the +// "tls.protocol.version" semantic conventions. It represents the numeric part of +// the version parsed from the original string of the negotiated +// [SSL/TLS protocol version]. +// +// [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values +func TLSProtocolVersion(val string) attribute.KeyValue { + return TLSProtocolVersionKey.String(val) +} + +// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" +// semantic conventions. It represents the boolean flag indicating if this TLS +// connection was resumed from an existing TLS negotiation. +func TLSResumed(val bool) attribute.KeyValue { + return TLSResumedKey.Bool(val) +} + +// TLSServerCertificate returns an attribute KeyValue conforming to the +// "tls.server.certificate" semantic conventions. It represents the PEM-encoded +// stand-alone certificate offered by the server. This is usually +// mutually-exclusive of `server.certificate_chain` since this value also exists +// in that list. +func TLSServerCertificate(val string) attribute.KeyValue { + return TLSServerCertificateKey.String(val) +} + +// TLSServerCertificateChain returns an attribute KeyValue conforming to the +// "tls.server.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by the +// server. This is usually mutually-exclusive of `server.certificate` since that +// value should be the first certificate in the chain. +func TLSServerCertificateChain(val ...string) attribute.KeyValue { + return TLSServerCertificateChainKey.StringSlice(val) +} + +// TLSServerHashMd5 returns an attribute KeyValue conforming to the +// "tls.server.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate offered +// by the server. For consistency with other hash values, this value should be +// formatted as an uppercase hash. +func TLSServerHashMd5(val string) attribute.KeyValue { + return TLSServerHashMd5Key.String(val) +} + +// TLSServerHashSha1 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha1(val string) attribute.KeyValue { + return TLSServerHashSha1Key.String(val) +} + +// TLSServerHashSha256 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha256(val string) attribute.KeyValue { + return TLSServerHashSha256Key.String(val) +} + +// TLSServerIssuer returns an attribute KeyValue conforming to the +// "tls.server.issuer" semantic conventions. It represents the distinguished name +// of [subject] of the issuer of the x.509 certificate presented by the client. +// +// [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 +func TLSServerIssuer(val string) attribute.KeyValue { + return TLSServerIssuerKey.String(val) +} + +// TLSServerJa3s returns an attribute KeyValue conforming to the +// "tls.server.ja3s" semantic conventions. It represents a hash that identifies +// servers based on how they perform an SSL/TLS handshake. +func TLSServerJa3s(val string) attribute.KeyValue { + return TLSServerJa3sKey.String(val) +} + +// TLSServerNotAfter returns an attribute KeyValue conforming to the +// "tls.server.not_after" semantic conventions. It represents the date/Time +// indicating when server certificate is no longer considered valid. +func TLSServerNotAfter(val string) attribute.KeyValue { + return TLSServerNotAfterKey.String(val) +} + +// TLSServerNotBefore returns an attribute KeyValue conforming to the +// "tls.server.not_before" semantic conventions. It represents the date/Time +// indicating when server certificate is first considered valid. +func TLSServerNotBefore(val string) attribute.KeyValue { + return TLSServerNotBeforeKey.String(val) +} + +// TLSServerSubject returns an attribute KeyValue conforming to the +// "tls.server.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the server. +func TLSServerSubject(val string) attribute.KeyValue { + return TLSServerSubjectKey.String(val) +} + +// Enum values for tls.protocol.name +var ( + // ssl + // Stability: development + TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") + // tls + // Stability: development + TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") +) + +// Namespace: url +const ( + // URLDomainKey is the attribute Key conforming to the "url.domain" semantic + // conventions. It represents the domain extracted from the `url.full`, such as + // "opentelemetry.io". + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "www.foo.bar", "opentelemetry.io", "3.12.167.2", + // "[1080:0:0:0:8:800:200C:417A]" + // Note: In some cases a URL may refer to an IP and/or port directly, without a + // domain name. In this case, the IP address would go to the domain field. If + // the URL contains a [literal IPv6 address] enclosed by `[` and `]`, the `[` + // and `]` characters should also be captured in the domain field. + // + // [literal IPv6 address]: https://www.rfc-editor.org/rfc/rfc2732#section-2 + URLDomainKey = attribute.Key("url.domain") + + // URLExtensionKey is the attribute Key conforming to the "url.extension" + // semantic conventions. It represents the file extension extracted from the + // `url.full`, excluding the leading dot. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "png", "gz" + // Note: The file extension is only set if it exists, as not every url has a + // file extension. When the file name has multiple extensions `example.tar.gz`, + // only the last one should be captured `gz`, not `tar.gz`. + URLExtensionKey = attribute.Key("url.extension") + + // URLFragmentKey is the attribute Key conforming to the "url.fragment" semantic + // conventions. It represents the [URI fragment] component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "SemConv" + // + // [URI fragment]: https://www.rfc-editor.org/rfc/rfc3986#section-3.5 + URLFragmentKey = attribute.Key("url.fragment") + + // URLFullKey is the attribute Key conforming to the "url.full" semantic + // conventions. It represents the absolute URL describing a network resource + // according to [RFC3986]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "https://www.foo.bar/search?q=OpenTelemetry#SemConv", "//localhost" + // Note: For network calls, URL usually has + // `scheme://host[:port][path][?query][#fragment]` format, where the fragment + // is not transmitted over HTTP, but if it is known, it SHOULD be included + // nevertheless. + // + // `url.full` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. + // In such case username and password SHOULD be redacted and attribute's value + // SHOULD be `https://REDACTED:REDACTED@www.example.com/`. + // + // `url.full` SHOULD capture the absolute URL when it is available (or can be + // reconstructed). + // + // Sensitive content provided in `url.full` SHOULD be scrubbed when + // instrumentations can identify it. + // + // + // Query string values for the following keys SHOULD be redacted by default and + // replaced by the + // value `REDACTED`: + // + // - [`AWSAccessKeyId`] + // - [`Signature`] + // - [`sig`] + // - [`X-Goog-Signature`] + // + // This list is subject to change over time. + // + // When a query string value is redacted, the query string key SHOULD still be + // preserved, e.g. + // `https://www.example.com/path?color=blue&sig=REDACTED`. + // + // [RFC3986]: https://www.rfc-editor.org/rfc/rfc3986 + // [`AWSAccessKeyId`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth + // [`Signature`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth + // [`sig`]: https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token + // [`X-Goog-Signature`]: https://cloud.google.com/storage/docs/access-control/signed-urls + URLFullKey = attribute.Key("url.full") + + // URLOriginalKey is the attribute Key conforming to the "url.original" semantic + // conventions. It represents the unmodified original URL as seen in the event + // source. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://www.foo.bar/search?q=OpenTelemetry#SemConv", + // "search?q=OpenTelemetry" + // Note: In network monitoring, the observed URL may be a full URL, whereas in + // access logs, the URL is often just represented as a path. This field is meant + // to represent the URL as it was observed, complete or not. + // `url.original` might contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case password and + // username SHOULD NOT be redacted and attribute's value SHOULD remain the same. + URLOriginalKey = attribute.Key("url.original") + + // URLPathKey is the attribute Key conforming to the "url.path" semantic + // conventions. It represents the [URI path] component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "/search" + // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when + // instrumentations can identify it. + // + // [URI path]: https://www.rfc-editor.org/rfc/rfc3986#section-3.3 + URLPathKey = attribute.Key("url.path") + + // URLPortKey is the attribute Key conforming to the "url.port" semantic + // conventions. It represents the port extracted from the `url.full`. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 443 + URLPortKey = attribute.Key("url.port") + + // URLQueryKey is the attribute Key conforming to the "url.query" semantic + // conventions. It represents the [URI query] component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "q=OpenTelemetry" + // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when + // instrumentations can identify it. + // + // + // Query string values for the following keys SHOULD be redacted by default and + // replaced by the value `REDACTED`: + // + // - [`AWSAccessKeyId`] + // - [`Signature`] + // - [`sig`] + // - [`X-Goog-Signature`] + // + // This list is subject to change over time. + // + // When a query string value is redacted, the query string key SHOULD still be + // preserved, e.g. + // `q=OpenTelemetry&sig=REDACTED`. + // + // [URI query]: https://www.rfc-editor.org/rfc/rfc3986#section-3.4 + // [`AWSAccessKeyId`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth + // [`Signature`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth + // [`sig`]: https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token + // [`X-Goog-Signature`]: https://cloud.google.com/storage/docs/access-control/signed-urls + URLQueryKey = attribute.Key("url.query") + + // URLRegisteredDomainKey is the attribute Key conforming to the + // "url.registered_domain" semantic conventions. It represents the highest + // registered url domain, stripped of the subdomain. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "example.com", "foo.co.uk" + // Note: This value can be determined precisely with the [public suffix list]. + // For example, the registered domain for `foo.example.com` is `example.com`. + // Trying to approximate this by simply taking the last two labels will not work + // well for TLDs such as `co.uk`. + // + // [public suffix list]: https://publicsuffix.org/ + URLRegisteredDomainKey = attribute.Key("url.registered_domain") + + // URLSchemeKey is the attribute Key conforming to the "url.scheme" semantic + // conventions. It represents the [URI scheme] component identifying the used + // protocol. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "https", "ftp", "telnet" + // + // [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 + URLSchemeKey = attribute.Key("url.scheme") + + // URLSubdomainKey is the attribute Key conforming to the "url.subdomain" + // semantic conventions. It represents the subdomain portion of a fully + // qualified domain name includes all of the names except the host name under + // the registered_domain. In a partially qualified domain, or if the + // qualification level of the full name cannot be determined, subdomain contains + // all of the names below the registered domain. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "east", "sub2.sub1" + // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If the + // domain has multiple levels of subdomain, such as `sub2.sub1.example.com`, the + // subdomain field should contain `sub2.sub1`, with no trailing period. + URLSubdomainKey = attribute.Key("url.subdomain") + + // URLTemplateKey is the attribute Key conforming to the "url.template" semantic + // conventions. It represents the low-cardinality template of an + // [absolute path reference]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/users/{id}", "/users/:id", "/users?id={id}" + // + // [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2 + URLTemplateKey = attribute.Key("url.template") + + // URLTopLevelDomainKey is the attribute Key conforming to the + // "url.top_level_domain" semantic conventions. It represents the effective top + // level domain (eTLD), also known as the domain suffix, is the last part of the + // domain name. For example, the top level domain for example.com is `com`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "com", "co.uk" + // Note: This value can be determined precisely with the [public suffix list]. + // + // [public suffix list]: https://publicsuffix.org/ + URLTopLevelDomainKey = attribute.Key("url.top_level_domain") +) + +// URLDomain returns an attribute KeyValue conforming to the "url.domain" +// semantic conventions. It represents the domain extracted from the `url.full`, +// such as "opentelemetry.io". +func URLDomain(val string) attribute.KeyValue { + return URLDomainKey.String(val) +} + +// URLExtension returns an attribute KeyValue conforming to the "url.extension" +// semantic conventions. It represents the file extension extracted from the +// `url.full`, excluding the leading dot. +func URLExtension(val string) attribute.KeyValue { + return URLExtensionKey.String(val) +} + +// URLFragment returns an attribute KeyValue conforming to the "url.fragment" +// semantic conventions. It represents the [URI fragment] component. +// +// [URI fragment]: https://www.rfc-editor.org/rfc/rfc3986#section-3.5 +func URLFragment(val string) attribute.KeyValue { + return URLFragmentKey.String(val) +} + +// URLFull returns an attribute KeyValue conforming to the "url.full" semantic +// conventions. It represents the absolute URL describing a network resource +// according to [RFC3986]. +// +// [RFC3986]: https://www.rfc-editor.org/rfc/rfc3986 +func URLFull(val string) attribute.KeyValue { + return URLFullKey.String(val) +} + +// URLOriginal returns an attribute KeyValue conforming to the "url.original" +// semantic conventions. It represents the unmodified original URL as seen in the +// event source. +func URLOriginal(val string) attribute.KeyValue { + return URLOriginalKey.String(val) +} + +// URLPath returns an attribute KeyValue conforming to the "url.path" semantic +// conventions. It represents the [URI path] component. +// +// [URI path]: https://www.rfc-editor.org/rfc/rfc3986#section-3.3 +func URLPath(val string) attribute.KeyValue { + return URLPathKey.String(val) +} + +// URLPort returns an attribute KeyValue conforming to the "url.port" semantic +// conventions. It represents the port extracted from the `url.full`. +func URLPort(val int) attribute.KeyValue { + return URLPortKey.Int(val) +} + +// URLQuery returns an attribute KeyValue conforming to the "url.query" semantic +// conventions. It represents the [URI query] component. +// +// [URI query]: https://www.rfc-editor.org/rfc/rfc3986#section-3.4 +func URLQuery(val string) attribute.KeyValue { + return URLQueryKey.String(val) +} + +// URLRegisteredDomain returns an attribute KeyValue conforming to the +// "url.registered_domain" semantic conventions. It represents the highest +// registered url domain, stripped of the subdomain. +func URLRegisteredDomain(val string) attribute.KeyValue { + return URLRegisteredDomainKey.String(val) +} + +// URLScheme returns an attribute KeyValue conforming to the "url.scheme" +// semantic conventions. It represents the [URI scheme] component identifying the +// used protocol. +// +// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 +func URLScheme(val string) attribute.KeyValue { + return URLSchemeKey.String(val) +} + +// URLSubdomain returns an attribute KeyValue conforming to the "url.subdomain" +// semantic conventions. It represents the subdomain portion of a fully qualified +// domain name includes all of the names except the host name under the +// registered_domain. In a partially qualified domain, or if the qualification +// level of the full name cannot be determined, subdomain contains all of the +// names below the registered domain. +func URLSubdomain(val string) attribute.KeyValue { + return URLSubdomainKey.String(val) +} + +// URLTemplate returns an attribute KeyValue conforming to the "url.template" +// semantic conventions. It represents the low-cardinality template of an +// [absolute path reference]. +// +// [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2 +func URLTemplate(val string) attribute.KeyValue { + return URLTemplateKey.String(val) +} + +// URLTopLevelDomain returns an attribute KeyValue conforming to the +// "url.top_level_domain" semantic conventions. It represents the effective top +// level domain (eTLD), also known as the domain suffix, is the last part of the +// domain name. For example, the top level domain for example.com is `com`. +func URLTopLevelDomain(val string) attribute.KeyValue { + return URLTopLevelDomainKey.String(val) +} + +// Namespace: user +const ( + // UserEmailKey is the attribute Key conforming to the "user.email" semantic + // conventions. It represents the user email address. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "a.einstein@example.com" + UserEmailKey = attribute.Key("user.email") + + // UserFullNameKey is the attribute Key conforming to the "user.full_name" + // semantic conventions. It represents the user's full name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Albert Einstein" + UserFullNameKey = attribute.Key("user.full_name") + + // UserHashKey is the attribute Key conforming to the "user.hash" semantic + // conventions. It represents the unique user hash to correlate information for + // a user in anonymized form. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "364fc68eaf4c8acec74a4e52d7d1feaa" + // Note: Useful if `user.id` or `user.name` contain confidential information and + // cannot be used. + UserHashKey = attribute.Key("user.hash") + + // UserIDKey is the attribute Key conforming to the "user.id" semantic + // conventions. It represents the unique identifier of the user. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "S-1-5-21-202424912787-2692429404-2351956786-1000" + UserIDKey = attribute.Key("user.id") + + // UserNameKey is the attribute Key conforming to the "user.name" semantic + // conventions. It represents the short name or login/username of the user. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "a.einstein" + UserNameKey = attribute.Key("user.name") + + // UserRolesKey is the attribute Key conforming to the "user.roles" semantic + // conventions. It represents the array of user roles at the time of the event. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "admin", "reporting_user" + UserRolesKey = attribute.Key("user.roles") +) + +// UserEmail returns an attribute KeyValue conforming to the "user.email" +// semantic conventions. It represents the user email address. +func UserEmail(val string) attribute.KeyValue { + return UserEmailKey.String(val) +} + +// UserFullName returns an attribute KeyValue conforming to the "user.full_name" +// semantic conventions. It represents the user's full name. +func UserFullName(val string) attribute.KeyValue { + return UserFullNameKey.String(val) +} + +// UserHash returns an attribute KeyValue conforming to the "user.hash" semantic +// conventions. It represents the unique user hash to correlate information for a +// user in anonymized form. +func UserHash(val string) attribute.KeyValue { + return UserHashKey.String(val) +} + +// UserID returns an attribute KeyValue conforming to the "user.id" semantic +// conventions. It represents the unique identifier of the user. +func UserID(val string) attribute.KeyValue { + return UserIDKey.String(val) +} + +// UserName returns an attribute KeyValue conforming to the "user.name" semantic +// conventions. It represents the short name or login/username of the user. +func UserName(val string) attribute.KeyValue { + return UserNameKey.String(val) +} + +// UserRoles returns an attribute KeyValue conforming to the "user.roles" +// semantic conventions. It represents the array of user roles at the time of the +// event. +func UserRoles(val ...string) attribute.KeyValue { + return UserRolesKey.StringSlice(val) +} + +// Namespace: user_agent +const ( + // UserAgentNameKey is the attribute Key conforming to the "user_agent.name" + // semantic conventions. It represents the name of the user-agent extracted from + // original. Usually refers to the browser's name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Safari", "YourApp" + // Note: [Example] of extracting browser's name from original string. In the + // case of using a user-agent for non-browser products, such as microservices + // with multiple names/versions inside the `user_agent.original`, the most + // significant name SHOULD be selected. In such a scenario it should align with + // `user_agent.version` + // + // [Example]: https://www.whatsmyua.info + UserAgentNameKey = attribute.Key("user_agent.name") + + // UserAgentOriginalKey is the attribute Key conforming to the + // "user_agent.original" semantic conventions. It represents the value of the + // [HTTP User-Agent] header sent by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "CERN-LineMode/2.15 libwww/2.17b3", "Mozilla/5.0 (iPhone; CPU + // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1", "YourApp/1.0.0 + // grpc-java-okhttp/1.27.2" + // + // [HTTP User-Agent]: https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent + UserAgentOriginalKey = attribute.Key("user_agent.original") + + // UserAgentOSNameKey is the attribute Key conforming to the + // "user_agent.os.name" semantic conventions. It represents the human readable + // operating system name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "iOS", "Android", "Ubuntu" + // Note: For mapping user agent strings to OS names, libraries such as + // [ua-parser] can be utilized. + // + // [ua-parser]: https://github.com/ua-parser + UserAgentOSNameKey = attribute.Key("user_agent.os.name") + + // UserAgentOSVersionKey is the attribute Key conforming to the + // "user_agent.os.version" semantic conventions. It represents the version + // string of the operating system as defined in [Version Attributes]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "14.2.1", "18.04.1" + // Note: For mapping user agent strings to OS versions, libraries such as + // [ua-parser] can be utilized. + // + // [Version Attributes]: /docs/resource/README.md#version-attributes + // [ua-parser]: https://github.com/ua-parser + UserAgentOSVersionKey = attribute.Key("user_agent.os.version") + + // UserAgentSyntheticTypeKey is the attribute Key conforming to the + // "user_agent.synthetic.type" semantic conventions. It represents the specifies + // the category of synthetic traffic, such as tests or bots. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: This attribute MAY be derived from the contents of the + // `user_agent.original` attribute. Components that populate the attribute are + // responsible for determining what they consider to be synthetic bot or test + // traffic. This attribute can either be set for self-identification purposes, + // or on telemetry detected to be generated as a result of a synthetic request. + // This attribute is useful for distinguishing between genuine client traffic + // and synthetic traffic generated by bots or tests. + UserAgentSyntheticTypeKey = attribute.Key("user_agent.synthetic.type") + + // UserAgentVersionKey is the attribute Key conforming to the + // "user_agent.version" semantic conventions. It represents the version of the + // user-agent extracted from original. Usually refers to the browser's version. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "14.1.2", "1.0.0" + // Note: [Example] of extracting browser's version from original string. In the + // case of using a user-agent for non-browser products, such as microservices + // with multiple names/versions inside the `user_agent.original`, the most + // significant version SHOULD be selected. In such a scenario it should align + // with `user_agent.name` + // + // [Example]: https://www.whatsmyua.info + UserAgentVersionKey = attribute.Key("user_agent.version") +) + +// UserAgentName returns an attribute KeyValue conforming to the +// "user_agent.name" semantic conventions. It represents the name of the +// user-agent extracted from original. Usually refers to the browser's name. +func UserAgentName(val string) attribute.KeyValue { + return UserAgentNameKey.String(val) +} + +// UserAgentOriginal returns an attribute KeyValue conforming to the +// "user_agent.original" semantic conventions. It represents the value of the +// [HTTP User-Agent] header sent by the client. +// +// [HTTP User-Agent]: https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent +func UserAgentOriginal(val string) attribute.KeyValue { + return UserAgentOriginalKey.String(val) +} + +// UserAgentOSName returns an attribute KeyValue conforming to the +// "user_agent.os.name" semantic conventions. It represents the human readable +// operating system name. +func UserAgentOSName(val string) attribute.KeyValue { + return UserAgentOSNameKey.String(val) +} + +// UserAgentOSVersion returns an attribute KeyValue conforming to the +// "user_agent.os.version" semantic conventions. It represents the version string +// of the operating system as defined in [Version Attributes]. +// +// [Version Attributes]: /docs/resource/README.md#version-attributes +func UserAgentOSVersion(val string) attribute.KeyValue { + return UserAgentOSVersionKey.String(val) +} + +// UserAgentVersion returns an attribute KeyValue conforming to the +// "user_agent.version" semantic conventions. It represents the version of the +// user-agent extracted from original. Usually refers to the browser's version. +func UserAgentVersion(val string) attribute.KeyValue { + return UserAgentVersionKey.String(val) +} + +// Enum values for user_agent.synthetic.type +var ( + // Bot source. + // Stability: development + UserAgentSyntheticTypeBot = UserAgentSyntheticTypeKey.String("bot") + // Synthetic test source. + // Stability: development + UserAgentSyntheticTypeTest = UserAgentSyntheticTypeKey.String("test") +) + +// Namespace: vcs +const ( + // VCSChangeIDKey is the attribute Key conforming to the "vcs.change.id" + // semantic conventions. It represents the ID of the change (pull request/merge + // request/changelist) if applicable. This is usually a unique (within + // repository) identifier generated by the VCS system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "123" + VCSChangeIDKey = attribute.Key("vcs.change.id") + + // VCSChangeStateKey is the attribute Key conforming to the "vcs.change.state" + // semantic conventions. It represents the state of the change (pull + // request/merge request/changelist). + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "open", "closed", "merged" + VCSChangeStateKey = attribute.Key("vcs.change.state") + + // VCSChangeTitleKey is the attribute Key conforming to the "vcs.change.title" + // semantic conventions. It represents the human readable title of the change + // (pull request/merge request/changelist). This title is often a brief summary + // of the change and may get merged in to a ref as the commit summary. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Fixes broken thing", "feat: add my new feature", "[chore] update + // dependency" + VCSChangeTitleKey = attribute.Key("vcs.change.title") + + // VCSLineChangeTypeKey is the attribute Key conforming to the + // "vcs.line_change.type" semantic conventions. It represents the type of line + // change being measured on a branch or change. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "added", "removed" + VCSLineChangeTypeKey = attribute.Key("vcs.line_change.type") + + // VCSOwnerNameKey is the attribute Key conforming to the "vcs.owner.name" + // semantic conventions. It represents the group owner within the version + // control system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-org", "myteam", "business-unit" + VCSOwnerNameKey = attribute.Key("vcs.owner.name") + + // VCSProviderNameKey is the attribute Key conforming to the "vcs.provider.name" + // semantic conventions. It represents the name of the version control system + // provider. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "github", "gitlab", "gitea", "bitbucket" + VCSProviderNameKey = attribute.Key("vcs.provider.name") + + // VCSRefBaseNameKey is the attribute Key conforming to the "vcs.ref.base.name" + // semantic conventions. It represents the name of the [reference] such as + // **branch** or **tag** in the repository. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-feature-branch", "tag-1-test" + // Note: `base` refers to the starting point of a change. For example, `main` + // would be the base reference of type branch if you've created a new + // reference of type branch from it and created new commits. + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefBaseNameKey = attribute.Key("vcs.ref.base.name") + + // VCSRefBaseRevisionKey is the attribute Key conforming to the + // "vcs.ref.base.revision" semantic conventions. It represents the revision, + // literally [revised version], The revision most often refers to a commit + // object in Git, or a revision number in SVN. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc", + // "main", "123", "HEAD" + // Note: `base` refers to the starting point of a change. For example, `main` + // would be the base reference of type branch if you've created a new + // reference of type branch from it and created new commits. The + // revision can be a full [hash value (see + // glossary)], + // of the recorded change to a ref within a repository pointing to a + // commit [commit] object. It does + // not necessarily have to be a hash; it can simply define a [revision + // number] + // which is an integer that is monotonically increasing. In cases where + // it is identical to the `ref.base.name`, it SHOULD still be included. + // It is up to the implementer to decide which value to set as the + // revision based on the VCS system and situational context. + // + // [revised version]: https://www.merriam-webster.com/dictionary/revision + // [hash value (see + // glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf + // [commit]: https://git-scm.com/docs/git-commit + // [revision + // number]: https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html + VCSRefBaseRevisionKey = attribute.Key("vcs.ref.base.revision") + + // VCSRefBaseTypeKey is the attribute Key conforming to the "vcs.ref.base.type" + // semantic conventions. It represents the type of the [reference] in the + // repository. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "branch", "tag" + // Note: `base` refers to the starting point of a change. For example, `main` + // would be the base reference of type branch if you've created a new + // reference of type branch from it and created new commits. + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefBaseTypeKey = attribute.Key("vcs.ref.base.type") + + // VCSRefHeadNameKey is the attribute Key conforming to the "vcs.ref.head.name" + // semantic conventions. It represents the name of the [reference] such as + // **branch** or **tag** in the repository. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-feature-branch", "tag-1-test" + // Note: `head` refers to where you are right now; the current reference at a + // given time. + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefHeadNameKey = attribute.Key("vcs.ref.head.name") + + // VCSRefHeadRevisionKey is the attribute Key conforming to the + // "vcs.ref.head.revision" semantic conventions. It represents the revision, + // literally [revised version], The revision most often refers to a commit + // object in Git, or a revision number in SVN. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc", + // "main", "123", "HEAD" + // Note: `head` refers to where you are right now; the current reference at a + // given time.The revision can be a full [hash value (see + // glossary)], + // of the recorded change to a ref within a repository pointing to a + // commit [commit] object. It does + // not necessarily have to be a hash; it can simply define a [revision + // number] + // which is an integer that is monotonically increasing. In cases where + // it is identical to the `ref.head.name`, it SHOULD still be included. + // It is up to the implementer to decide which value to set as the + // revision based on the VCS system and situational context. + // + // [revised version]: https://www.merriam-webster.com/dictionary/revision + // [hash value (see + // glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf + // [commit]: https://git-scm.com/docs/git-commit + // [revision + // number]: https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html + VCSRefHeadRevisionKey = attribute.Key("vcs.ref.head.revision") + + // VCSRefHeadTypeKey is the attribute Key conforming to the "vcs.ref.head.type" + // semantic conventions. It represents the type of the [reference] in the + // repository. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "branch", "tag" + // Note: `head` refers to where you are right now; the current reference at a + // given time. + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefHeadTypeKey = attribute.Key("vcs.ref.head.type") + + // VCSRefTypeKey is the attribute Key conforming to the "vcs.ref.type" semantic + // conventions. It represents the type of the [reference] in the repository. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "branch", "tag" + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefTypeKey = attribute.Key("vcs.ref.type") + + // VCSRepositoryNameKey is the attribute Key conforming to the + // "vcs.repository.name" semantic conventions. It represents the human readable + // name of the repository. It SHOULD NOT include any additional identifier like + // Group/SubGroup in GitLab or organization in GitHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "semantic-conventions", "my-cool-repo" + // Note: Due to it only being the name, it can clash with forks of the same + // repository if collecting telemetry across multiple orgs or groups in + // the same backends. + VCSRepositoryNameKey = attribute.Key("vcs.repository.name") + + // VCSRepositoryURLFullKey is the attribute Key conforming to the + // "vcs.repository.url.full" semantic conventions. It represents the + // [canonical URL] of the repository providing the complete HTTP(S) address in + // order to locate and identify the repository through a browser. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "https://github.com/opentelemetry/open-telemetry-collector-contrib", + // "https://gitlab.com/my-org/my-project/my-projects-project/repo" + // Note: In Git Version Control Systems, the canonical URL SHOULD NOT include + // the `.git` extension. + // + // [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical. + VCSRepositoryURLFullKey = attribute.Key("vcs.repository.url.full") + + // VCSRevisionDeltaDirectionKey is the attribute Key conforming to the + // "vcs.revision_delta.direction" semantic conventions. It represents the type + // of revision comparison. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ahead", "behind" + VCSRevisionDeltaDirectionKey = attribute.Key("vcs.revision_delta.direction") +) + +// VCSChangeID returns an attribute KeyValue conforming to the "vcs.change.id" +// semantic conventions. It represents the ID of the change (pull request/merge +// request/changelist) if applicable. This is usually a unique (within +// repository) identifier generated by the VCS system. +func VCSChangeID(val string) attribute.KeyValue { + return VCSChangeIDKey.String(val) +} + +// VCSChangeTitle returns an attribute KeyValue conforming to the +// "vcs.change.title" semantic conventions. It represents the human readable +// title of the change (pull request/merge request/changelist). This title is +// often a brief summary of the change and may get merged in to a ref as the +// commit summary. +func VCSChangeTitle(val string) attribute.KeyValue { + return VCSChangeTitleKey.String(val) +} + +// VCSOwnerName returns an attribute KeyValue conforming to the "vcs.owner.name" +// semantic conventions. It represents the group owner within the version control +// system. +func VCSOwnerName(val string) attribute.KeyValue { + return VCSOwnerNameKey.String(val) +} + +// VCSRefBaseName returns an attribute KeyValue conforming to the +// "vcs.ref.base.name" semantic conventions. It represents the name of the +// [reference] such as **branch** or **tag** in the repository. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +func VCSRefBaseName(val string) attribute.KeyValue { + return VCSRefBaseNameKey.String(val) +} + +// VCSRefBaseRevision returns an attribute KeyValue conforming to the +// "vcs.ref.base.revision" semantic conventions. It represents the revision, +// literally [revised version], The revision most often refers to a commit object +// in Git, or a revision number in SVN. +// +// [revised version]: https://www.merriam-webster.com/dictionary/revision +func VCSRefBaseRevision(val string) attribute.KeyValue { + return VCSRefBaseRevisionKey.String(val) +} + +// VCSRefHeadName returns an attribute KeyValue conforming to the +// "vcs.ref.head.name" semantic conventions. It represents the name of the +// [reference] such as **branch** or **tag** in the repository. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +func VCSRefHeadName(val string) attribute.KeyValue { + return VCSRefHeadNameKey.String(val) +} + +// VCSRefHeadRevision returns an attribute KeyValue conforming to the +// "vcs.ref.head.revision" semantic conventions. It represents the revision, +// literally [revised version], The revision most often refers to a commit object +// in Git, or a revision number in SVN. +// +// [revised version]: https://www.merriam-webster.com/dictionary/revision +func VCSRefHeadRevision(val string) attribute.KeyValue { + return VCSRefHeadRevisionKey.String(val) +} + +// VCSRepositoryName returns an attribute KeyValue conforming to the +// "vcs.repository.name" semantic conventions. It represents the human readable +// name of the repository. It SHOULD NOT include any additional identifier like +// Group/SubGroup in GitLab or organization in GitHub. +func VCSRepositoryName(val string) attribute.KeyValue { + return VCSRepositoryNameKey.String(val) +} + +// VCSRepositoryURLFull returns an attribute KeyValue conforming to the +// "vcs.repository.url.full" semantic conventions. It represents the +// [canonical URL] of the repository providing the complete HTTP(S) address in +// order to locate and identify the repository through a browser. +// +// [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical. +func VCSRepositoryURLFull(val string) attribute.KeyValue { + return VCSRepositoryURLFullKey.String(val) +} + +// Enum values for vcs.change.state +var ( + // Open means the change is currently active and under review. It hasn't been + // merged into the target branch yet, and it's still possible to make changes or + // add comments. + // Stability: development + VCSChangeStateOpen = VCSChangeStateKey.String("open") + // WIP (work-in-progress, draft) means the change is still in progress and not + // yet ready for a full review. It might still undergo significant changes. + // Stability: development + VCSChangeStateWip = VCSChangeStateKey.String("wip") + // Closed means the merge request has been closed without merging. This can + // happen for various reasons, such as the changes being deemed unnecessary, the + // issue being resolved in another way, or the author deciding to withdraw the + // request. + // Stability: development + VCSChangeStateClosed = VCSChangeStateKey.String("closed") + // Merged indicates that the change has been successfully integrated into the + // target codebase. + // Stability: development + VCSChangeStateMerged = VCSChangeStateKey.String("merged") +) + +// Enum values for vcs.line_change.type +var ( + // How many lines were added. + // Stability: development + VCSLineChangeTypeAdded = VCSLineChangeTypeKey.String("added") + // How many lines were removed. + // Stability: development + VCSLineChangeTypeRemoved = VCSLineChangeTypeKey.String("removed") +) + +// Enum values for vcs.provider.name +var ( + // [GitHub] + // Stability: development + // + // [GitHub]: https://github.com + VCSProviderNameGithub = VCSProviderNameKey.String("github") + // [GitLab] + // Stability: development + // + // [GitLab]: https://gitlab.com + VCSProviderNameGitlab = VCSProviderNameKey.String("gitlab") + // [Gitea] + // Stability: development + // + // [Gitea]: https://gitea.io + VCSProviderNameGitea = VCSProviderNameKey.String("gitea") + // [Bitbucket] + // Stability: development + // + // [Bitbucket]: https://bitbucket.org + VCSProviderNameBitbucket = VCSProviderNameKey.String("bitbucket") +) + +// Enum values for vcs.ref.base.type +var ( + // [branch] + // Stability: development + // + // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch + VCSRefBaseTypeBranch = VCSRefBaseTypeKey.String("branch") + // [tag] + // Stability: development + // + // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag + VCSRefBaseTypeTag = VCSRefBaseTypeKey.String("tag") +) + +// Enum values for vcs.ref.head.type +var ( + // [branch] + // Stability: development + // + // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch + VCSRefHeadTypeBranch = VCSRefHeadTypeKey.String("branch") + // [tag] + // Stability: development + // + // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag + VCSRefHeadTypeTag = VCSRefHeadTypeKey.String("tag") +) + +// Enum values for vcs.ref.type +var ( + // [branch] + // Stability: development + // + // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch + VCSRefTypeBranch = VCSRefTypeKey.String("branch") + // [tag] + // Stability: development + // + // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag + VCSRefTypeTag = VCSRefTypeKey.String("tag") +) + +// Enum values for vcs.revision_delta.direction +var ( + // How many revisions the change is behind the target ref. + // Stability: development + VCSRevisionDeltaDirectionBehind = VCSRevisionDeltaDirectionKey.String("behind") + // How many revisions the change is ahead of the target ref. + // Stability: development + VCSRevisionDeltaDirectionAhead = VCSRevisionDeltaDirectionKey.String("ahead") +) + +// Namespace: webengine +const ( + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the additional + // description of the web engine (e.g. detailed version and edition + // information). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final" + WebEngineDescriptionKey = attribute.Key("webengine.description") + + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "WildFly" + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of the + // web engine. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "21.0.0" + WebEngineVersionKey = attribute.Key("webengine.version") +) + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// WebEngineName returns an attribute KeyValue conforming to the "webengine.name" +// semantic conventions. It represents the name of the web engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the web +// engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} + +// Namespace: zos +const ( + // ZOSSmfIDKey is the attribute Key conforming to the "zos.smf.id" semantic + // conventions. It represents the System Management Facility (SMF) Identifier + // uniquely identified a z/OS system within a SYSPLEX or mainframe environment + // and is used for system and performance analysis. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "SYS1" + ZOSSmfIDKey = attribute.Key("zos.smf.id") + + // ZOSSysplexNameKey is the attribute Key conforming to the "zos.sysplex.name" + // semantic conventions. It represents the name of the SYSPLEX to which the z/OS + // system belongs too. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "SYSPLEX1" + ZOSSysplexNameKey = attribute.Key("zos.sysplex.name") +) + +// ZOSSmfID returns an attribute KeyValue conforming to the "zos.smf.id" semantic +// conventions. It represents the System Management Facility (SMF) Identifier +// uniquely identified a z/OS system within a SYSPLEX or mainframe environment +// and is used for system and performance analysis. +func ZOSSmfID(val string) attribute.KeyValue { + return ZOSSmfIDKey.String(val) +} + +// ZOSSysplexName returns an attribute KeyValue conforming to the +// "zos.sysplex.name" semantic conventions. It represents the name of the SYSPLEX +// to which the z/OS system belongs too. +func ZOSSysplexName(val string) attribute.KeyValue { + return ZOSSysplexNameKey.String(val) +} \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/doc.go new file mode 100644 index 000000000..111010321 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the v1.37.0 +// version of the OpenTelemetry semantic conventions. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go new file mode 100644 index 000000000..666bded4b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0" + +import ( + "fmt" + "reflect" + + "go.opentelemetry.io/otel/attribute" +) + +// ErrorType returns an [attribute.KeyValue] identifying the error type of err. +func ErrorType(err error) attribute.KeyValue { + if err == nil { + return ErrorTypeOther + } + t := reflect.TypeOf(err) + var value string + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + value = t.String() + } else { + value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) + } + + if value == "" { + return ErrorTypeOther + } + return ErrorTypeKey.String(value) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/exception.go new file mode 100644 index 000000000..e67469a4f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/exception.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go new file mode 100644 index 000000000..a78eafd1f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go @@ -0,0 +1,2126 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package httpconv provides types and functionality for OpenTelemetry semantic +// conventions in the "otel" namespace. +package otelconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// ErrorTypeAttr is an attribute conforming to the error.type semantic +// conventions. It represents the describes a class of error the operation ended +// with. +type ErrorTypeAttr string + +var ( + // ErrorTypeOther is a fallback error value to be used when the instrumentation + // doesn't define a custom value. + ErrorTypeOther ErrorTypeAttr = "_OTHER" +) + +// ComponentTypeAttr is an attribute conforming to the otel.component.type +// semantic conventions. It represents a name identifying the type of the +// OpenTelemetry component. +type ComponentTypeAttr string + +var ( + // ComponentTypeBatchingSpanProcessor is the builtin SDK batching span + // processor. + ComponentTypeBatchingSpanProcessor ComponentTypeAttr = "batching_span_processor" + // ComponentTypeSimpleSpanProcessor is the builtin SDK simple span processor. + ComponentTypeSimpleSpanProcessor ComponentTypeAttr = "simple_span_processor" + // ComponentTypeBatchingLogProcessor is the builtin SDK batching log record + // processor. + ComponentTypeBatchingLogProcessor ComponentTypeAttr = "batching_log_processor" + // ComponentTypeSimpleLogProcessor is the builtin SDK simple log record + // processor. + ComponentTypeSimpleLogProcessor ComponentTypeAttr = "simple_log_processor" + // ComponentTypeOtlpGRPCSpanExporter is the OTLP span exporter over gRPC with + // protobuf serialization. + ComponentTypeOtlpGRPCSpanExporter ComponentTypeAttr = "otlp_grpc_span_exporter" + // ComponentTypeOtlpHTTPSpanExporter is the OTLP span exporter over HTTP with + // protobuf serialization. + ComponentTypeOtlpHTTPSpanExporter ComponentTypeAttr = "otlp_http_span_exporter" + // ComponentTypeOtlpHTTPJSONSpanExporter is the OTLP span exporter over HTTP + // with JSON serialization. + ComponentTypeOtlpHTTPJSONSpanExporter ComponentTypeAttr = "otlp_http_json_span_exporter" + // ComponentTypeZipkinHTTPSpanExporter is the zipkin span exporter over HTTP. + ComponentTypeZipkinHTTPSpanExporter ComponentTypeAttr = "zipkin_http_span_exporter" + // ComponentTypeOtlpGRPCLogExporter is the OTLP log record exporter over gRPC + // with protobuf serialization. + ComponentTypeOtlpGRPCLogExporter ComponentTypeAttr = "otlp_grpc_log_exporter" + // ComponentTypeOtlpHTTPLogExporter is the OTLP log record exporter over HTTP + // with protobuf serialization. + ComponentTypeOtlpHTTPLogExporter ComponentTypeAttr = "otlp_http_log_exporter" + // ComponentTypeOtlpHTTPJSONLogExporter is the OTLP log record exporter over + // HTTP with JSON serialization. + ComponentTypeOtlpHTTPJSONLogExporter ComponentTypeAttr = "otlp_http_json_log_exporter" + // ComponentTypePeriodicMetricReader is the builtin SDK periodically exporting + // metric reader. + ComponentTypePeriodicMetricReader ComponentTypeAttr = "periodic_metric_reader" + // ComponentTypeOtlpGRPCMetricExporter is the OTLP metric exporter over gRPC + // with protobuf serialization. + ComponentTypeOtlpGRPCMetricExporter ComponentTypeAttr = "otlp_grpc_metric_exporter" + // ComponentTypeOtlpHTTPMetricExporter is the OTLP metric exporter over HTTP + // with protobuf serialization. + ComponentTypeOtlpHTTPMetricExporter ComponentTypeAttr = "otlp_http_metric_exporter" + // ComponentTypeOtlpHTTPJSONMetricExporter is the OTLP metric exporter over HTTP + // with JSON serialization. + ComponentTypeOtlpHTTPJSONMetricExporter ComponentTypeAttr = "otlp_http_json_metric_exporter" + // ComponentTypePrometheusHTTPTextMetricExporter is the prometheus metric + // exporter over HTTP with the default text-based format. + ComponentTypePrometheusHTTPTextMetricExporter ComponentTypeAttr = "prometheus_http_text_metric_exporter" +) + +// SpanParentOriginAttr is an attribute conforming to the otel.span.parent.origin +// semantic conventions. It represents the determines whether the span has a +// parent span, and if so, [whether it is a remote parent]. +// +// [whether it is a remote parent]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote +type SpanParentOriginAttr string + +var ( + // SpanParentOriginNone is the span does not have a parent, it is a root span. + SpanParentOriginNone SpanParentOriginAttr = "none" + // SpanParentOriginLocal is the span has a parent and the parent's span context + // [isRemote()] is false. + // + // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote + SpanParentOriginLocal SpanParentOriginAttr = "local" + // SpanParentOriginRemote is the span has a parent and the parent's span context + // [isRemote()] is true. + // + // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote + SpanParentOriginRemote SpanParentOriginAttr = "remote" +) + +// SpanSamplingResultAttr is an attribute conforming to the +// otel.span.sampling_result semantic conventions. It represents the result value +// of the sampler for this span. +type SpanSamplingResultAttr string + +var ( + // SpanSamplingResultDrop is the span is not sampled and not recording. + SpanSamplingResultDrop SpanSamplingResultAttr = "DROP" + // SpanSamplingResultRecordOnly is the span is not sampled, but recording. + SpanSamplingResultRecordOnly SpanSamplingResultAttr = "RECORD_ONLY" + // SpanSamplingResultRecordAndSample is the span is sampled and recording. + SpanSamplingResultRecordAndSample SpanSamplingResultAttr = "RECORD_AND_SAMPLE" +) + +// RPCGRPCStatusCodeAttr is an attribute conforming to the rpc.grpc.status_code +// semantic conventions. It represents the gRPC status code of the last gRPC +// requests performed in scope of this export call. +type RPCGRPCStatusCodeAttr int64 + +var ( + // RPCGRPCStatusCodeOk is the OK. + RPCGRPCStatusCodeOk RPCGRPCStatusCodeAttr = 0 + // RPCGRPCStatusCodeCancelled is the CANCELLED. + RPCGRPCStatusCodeCancelled RPCGRPCStatusCodeAttr = 1 + // RPCGRPCStatusCodeUnknown is the UNKNOWN. + RPCGRPCStatusCodeUnknown RPCGRPCStatusCodeAttr = 2 + // RPCGRPCStatusCodeInvalidArgument is the INVALID_ARGUMENT. + RPCGRPCStatusCodeInvalidArgument RPCGRPCStatusCodeAttr = 3 + // RPCGRPCStatusCodeDeadlineExceeded is the DEADLINE_EXCEEDED. + RPCGRPCStatusCodeDeadlineExceeded RPCGRPCStatusCodeAttr = 4 + // RPCGRPCStatusCodeNotFound is the NOT_FOUND. + RPCGRPCStatusCodeNotFound RPCGRPCStatusCodeAttr = 5 + // RPCGRPCStatusCodeAlreadyExists is the ALREADY_EXISTS. + RPCGRPCStatusCodeAlreadyExists RPCGRPCStatusCodeAttr = 6 + // RPCGRPCStatusCodePermissionDenied is the PERMISSION_DENIED. + RPCGRPCStatusCodePermissionDenied RPCGRPCStatusCodeAttr = 7 + // RPCGRPCStatusCodeResourceExhausted is the RESOURCE_EXHAUSTED. + RPCGRPCStatusCodeResourceExhausted RPCGRPCStatusCodeAttr = 8 + // RPCGRPCStatusCodeFailedPrecondition is the FAILED_PRECONDITION. + RPCGRPCStatusCodeFailedPrecondition RPCGRPCStatusCodeAttr = 9 + // RPCGRPCStatusCodeAborted is the ABORTED. + RPCGRPCStatusCodeAborted RPCGRPCStatusCodeAttr = 10 + // RPCGRPCStatusCodeOutOfRange is the OUT_OF_RANGE. + RPCGRPCStatusCodeOutOfRange RPCGRPCStatusCodeAttr = 11 + // RPCGRPCStatusCodeUnimplemented is the UNIMPLEMENTED. + RPCGRPCStatusCodeUnimplemented RPCGRPCStatusCodeAttr = 12 + // RPCGRPCStatusCodeInternal is the INTERNAL. + RPCGRPCStatusCodeInternal RPCGRPCStatusCodeAttr = 13 + // RPCGRPCStatusCodeUnavailable is the UNAVAILABLE. + RPCGRPCStatusCodeUnavailable RPCGRPCStatusCodeAttr = 14 + // RPCGRPCStatusCodeDataLoss is the DATA_LOSS. + RPCGRPCStatusCodeDataLoss RPCGRPCStatusCodeAttr = 15 + // RPCGRPCStatusCodeUnauthenticated is the UNAUTHENTICATED. + RPCGRPCStatusCodeUnauthenticated RPCGRPCStatusCodeAttr = 16 +) + +// SDKExporterLogExported is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.log.exported" semantic conventions. It +// represents the number of log records for which the export has finished, either +// successful or failed. +type SDKExporterLogExported struct { + metric.Int64Counter +} + +// NewSDKExporterLogExported returns a new SDKExporterLogExported instrument. +func NewSDKExporterLogExported( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKExporterLogExported, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterLogExported{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "otel.sdk.exporter.log.exported", + append([]metric.Int64CounterOption{ + metric.WithDescription("The number of log records for which the export has finished, either successful or failed."), + metric.WithUnit("{log_record}"), + }, opt...)..., + ) + if err != nil { + return SDKExporterLogExported{noop.Int64Counter{}}, err + } + return SDKExporterLogExported{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterLogExported) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterLogExported) Name() string { + return "otel.sdk.exporter.log.exported" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterLogExported) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterLogExported) Description() string { + return "The number of log records for which the export has finished, either successful or failed." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with +// `rejected_log_records`), rejected log records MUST count as failed and only +// non-rejected log records count as success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterLogExported) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with +// `rejected_log_records`), rejected log records MUST count as failed and only +// non-rejected log records count as success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterLogExported) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKExporterLogExported) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterLogExported) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterLogExported) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterLogExported) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterLogExported) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterLogInflight is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.log.inflight" semantic conventions. It +// represents the number of log records which were passed to the exporter, but +// that have not been exported yet (neither successful, nor failed). +type SDKExporterLogInflight struct { + metric.Int64UpDownCounter +} + +// NewSDKExporterLogInflight returns a new SDKExporterLogInflight instrument. +func NewSDKExporterLogInflight( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (SDKExporterLogInflight, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "otel.sdk.exporter.log.inflight", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of log records which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), + metric.WithUnit("{log_record}"), + }, opt...)..., + ) + if err != nil { + return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, err + } + return SDKExporterLogInflight{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterLogInflight) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterLogInflight) Name() string { + return "otel.sdk.exporter.log.inflight" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterLogInflight) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterLogInflight) Description() string { + return "The number of log records which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterLogInflight) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterLogInflight) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterLogInflight) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterLogInflight) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterLogInflight) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterLogInflight) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterMetricDataPointExported is an instrument used to record metric +// values conforming to the "otel.sdk.exporter.metric_data_point.exported" +// semantic conventions. It represents the number of metric data points for which +// the export has finished, either successful or failed. +type SDKExporterMetricDataPointExported struct { + metric.Int64Counter +} + +// NewSDKExporterMetricDataPointExported returns a new +// SDKExporterMetricDataPointExported instrument. +func NewSDKExporterMetricDataPointExported( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKExporterMetricDataPointExported, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "otel.sdk.exporter.metric_data_point.exported", + append([]metric.Int64CounterOption{ + metric.WithDescription("The number of metric data points for which the export has finished, either successful or failed."), + metric.WithUnit("{data_point}"), + }, opt...)..., + ) + if err != nil { + return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, err + } + return SDKExporterMetricDataPointExported{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterMetricDataPointExported) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterMetricDataPointExported) Name() string { + return "otel.sdk.exporter.metric_data_point.exported" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterMetricDataPointExported) Unit() string { + return "{data_point}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterMetricDataPointExported) Description() string { + return "The number of metric data points for which the export has finished, either successful or failed." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with +// `rejected_data_points`), rejected data points MUST count as failed and only +// non-rejected data points count as success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterMetricDataPointExported) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with +// `rejected_data_points`), rejected data points MUST count as failed and only +// non-rejected data points count as success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterMetricDataPointExported) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKExporterMetricDataPointExported) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterMetricDataPointExported) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterMetricDataPointExported) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterMetricDataPointExported) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterMetricDataPointExported) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterMetricDataPointInflight is an instrument used to record metric +// values conforming to the "otel.sdk.exporter.metric_data_point.inflight" +// semantic conventions. It represents the number of metric data points which +// were passed to the exporter, but that have not been exported yet (neither +// successful, nor failed). +type SDKExporterMetricDataPointInflight struct { + metric.Int64UpDownCounter +} + +// NewSDKExporterMetricDataPointInflight returns a new +// SDKExporterMetricDataPointInflight instrument. +func NewSDKExporterMetricDataPointInflight( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (SDKExporterMetricDataPointInflight, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "otel.sdk.exporter.metric_data_point.inflight", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of metric data points which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), + metric.WithUnit("{data_point}"), + }, opt...)..., + ) + if err != nil { + return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, err + } + return SDKExporterMetricDataPointInflight{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterMetricDataPointInflight) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterMetricDataPointInflight) Name() string { + return "otel.sdk.exporter.metric_data_point.inflight" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterMetricDataPointInflight) Unit() string { + return "{data_point}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterMetricDataPointInflight) Description() string { + return "The number of metric data points which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterMetricDataPointInflight) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterMetricDataPointInflight) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterMetricDataPointInflight) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterMetricDataPointInflight) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterMetricDataPointInflight) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterMetricDataPointInflight) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterOperationDuration is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.operation.duration" semantic conventions. +// It represents the duration of exporting a batch of telemetry records. +type SDKExporterOperationDuration struct { + metric.Float64Histogram +} + +// NewSDKExporterOperationDuration returns a new SDKExporterOperationDuration +// instrument. +func NewSDKExporterOperationDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (SDKExporterOperationDuration, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterOperationDuration{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "otel.sdk.exporter.operation.duration", + append([]metric.Float64HistogramOption{ + metric.WithDescription("The duration of exporting a batch of telemetry records."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return SDKExporterOperationDuration{noop.Float64Histogram{}}, err + } + return SDKExporterOperationDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterOperationDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterOperationDuration) Name() string { + return "otel.sdk.exporter.operation.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterOperationDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterOperationDuration) Description() string { + return "The duration of exporting a batch of telemetry records." +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// This metric defines successful operations using the full success definitions +// for [http] +// and [grpc]. Anything else is defined as an unsuccessful operation. For +// successful +// operations, `error.type` MUST NOT be set. For unsuccessful export operations, +// `error.type` MUST contain a relevant failure cause. +// +// [http]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success-1 +// [grpc]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success +func (m SDKExporterOperationDuration) Record( + ctx context.Context, + val float64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// This metric defines successful operations using the full success definitions +// for [http] +// and [grpc]. Anything else is defined as an unsuccessful operation. For +// successful +// operations, `error.type` MUST NOT be set. For unsuccessful export operations, +// `error.type` MUST contain a relevant failure cause. +// +// [http]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success-1 +// [grpc]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success +func (m SDKExporterOperationDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKExporterOperationDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrHTTPResponseStatusCode returns an optional attribute for the +// "http.response.status_code" semantic convention. It represents the HTTP status +// code of the last HTTP request performed in scope of this export call. +func (SDKExporterOperationDuration) AttrHTTPResponseStatusCode(val int) attribute.KeyValue { + return attribute.Int("http.response.status_code", val) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterOperationDuration) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterOperationDuration) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrRPCGRPCStatusCode returns an optional attribute for the +// "rpc.grpc.status_code" semantic convention. It represents the gRPC status code +// of the last gRPC requests performed in scope of this export call. +func (SDKExporterOperationDuration) AttrRPCGRPCStatusCode(val RPCGRPCStatusCodeAttr) attribute.KeyValue { + return attribute.Int64("rpc.grpc.status_code", int64(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterOperationDuration) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterOperationDuration) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterSpanExported is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.span.exported" semantic conventions. It +// represents the number of spans for which the export has finished, either +// successful or failed. +type SDKExporterSpanExported struct { + metric.Int64Counter +} + +// NewSDKExporterSpanExported returns a new SDKExporterSpanExported instrument. +func NewSDKExporterSpanExported( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKExporterSpanExported, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterSpanExported{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "otel.sdk.exporter.span.exported", + append([]metric.Int64CounterOption{ + metric.WithDescription("The number of spans for which the export has finished, either successful or failed."), + metric.WithUnit("{span}"), + }, opt...)..., + ) + if err != nil { + return SDKExporterSpanExported{noop.Int64Counter{}}, err + } + return SDKExporterSpanExported{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterSpanExported) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterSpanExported) Name() string { + return "otel.sdk.exporter.span.exported" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterSpanExported) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterSpanExported) Description() string { + return "The number of spans for which the export has finished, either successful or failed." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with `rejected_spans` +// ), rejected spans MUST count as failed and only non-rejected spans count as +// success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterSpanExported) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with `rejected_spans` +// ), rejected spans MUST count as failed and only non-rejected spans count as +// success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterSpanExported) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKExporterSpanExported) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterSpanExported) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterSpanExported) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterSpanExported) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterSpanExported) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterSpanInflight is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.span.inflight" semantic conventions. It +// represents the number of spans which were passed to the exporter, but that +// have not been exported yet (neither successful, nor failed). +type SDKExporterSpanInflight struct { + metric.Int64UpDownCounter +} + +// NewSDKExporterSpanInflight returns a new SDKExporterSpanInflight instrument. +func NewSDKExporterSpanInflight( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (SDKExporterSpanInflight, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "otel.sdk.exporter.span.inflight", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), + metric.WithUnit("{span}"), + }, opt...)..., + ) + if err != nil { + return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, err + } + return SDKExporterSpanInflight{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterSpanInflight) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterSpanInflight) Name() string { + return "otel.sdk.exporter.span.inflight" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterSpanInflight) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterSpanInflight) Description() string { + return "The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterSpanInflight) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterSpanInflight) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterSpanInflight) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterSpanInflight) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterSpanInflight) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterSpanInflight) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKLogCreated is an instrument used to record metric values conforming to the +// "otel.sdk.log.created" semantic conventions. It represents the number of logs +// submitted to enabled SDK Loggers. +type SDKLogCreated struct { + metric.Int64Counter +} + +// NewSDKLogCreated returns a new SDKLogCreated instrument. +func NewSDKLogCreated( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKLogCreated, error) { + // Check if the meter is nil. + if m == nil { + return SDKLogCreated{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "otel.sdk.log.created", + append([]metric.Int64CounterOption{ + metric.WithDescription("The number of logs submitted to enabled SDK Loggers."), + metric.WithUnit("{log_record}"), + }, opt...)..., + ) + if err != nil { + return SDKLogCreated{noop.Int64Counter{}}, err + } + return SDKLogCreated{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKLogCreated) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKLogCreated) Name() string { + return "otel.sdk.log.created" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKLogCreated) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKLogCreated) Description() string { + return "The number of logs submitted to enabled SDK Loggers." +} + +// Add adds incr to the existing count for attrs. +func (m SDKLogCreated) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m SDKLogCreated) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// SDKMetricReaderCollectionDuration is an instrument used to record metric +// values conforming to the "otel.sdk.metric_reader.collection.duration" semantic +// conventions. It represents the duration of the collect operation of the metric +// reader. +type SDKMetricReaderCollectionDuration struct { + metric.Float64Histogram +} + +// NewSDKMetricReaderCollectionDuration returns a new +// SDKMetricReaderCollectionDuration instrument. +func NewSDKMetricReaderCollectionDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (SDKMetricReaderCollectionDuration, error) { + // Check if the meter is nil. + if m == nil { + return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "otel.sdk.metric_reader.collection.duration", + append([]metric.Float64HistogramOption{ + metric.WithDescription("The duration of the collect operation of the metric reader."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, err + } + return SDKMetricReaderCollectionDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKMetricReaderCollectionDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (SDKMetricReaderCollectionDuration) Name() string { + return "otel.sdk.metric_reader.collection.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKMetricReaderCollectionDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (SDKMetricReaderCollectionDuration) Description() string { + return "The duration of the collect operation of the metric reader." +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful collections, `error.type` MUST NOT be set. For failed +// collections, `error.type` SHOULD contain the failure cause. +// It can happen that metrics collection is successful for some MetricProducers, +// while others fail. In that case `error.type` SHOULD be set to any of the +// failure causes. +func (m SDKMetricReaderCollectionDuration) Record( + ctx context.Context, + val float64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// For successful collections, `error.type` MUST NOT be set. For failed +// collections, `error.type` SHOULD contain the failure cause. +// It can happen that metrics collection is successful for some MetricProducers, +// while others fail. In that case `error.type` SHOULD be set to any of the +// failure causes. +func (m SDKMetricReaderCollectionDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKMetricReaderCollectionDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKMetricReaderCollectionDuration) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKMetricReaderCollectionDuration) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorLogProcessed is an instrument used to record metric values +// conforming to the "otel.sdk.processor.log.processed" semantic conventions. It +// represents the number of log records for which the processing has finished, +// either successful or failed. +type SDKProcessorLogProcessed struct { + metric.Int64Counter +} + +// NewSDKProcessorLogProcessed returns a new SDKProcessorLogProcessed instrument. +func NewSDKProcessorLogProcessed( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKProcessorLogProcessed, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorLogProcessed{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "otel.sdk.processor.log.processed", + append([]metric.Int64CounterOption{ + metric.WithDescription("The number of log records for which the processing has finished, either successful or failed."), + metric.WithUnit("{log_record}"), + }, opt...)..., + ) + if err != nil { + return SDKProcessorLogProcessed{noop.Int64Counter{}}, err + } + return SDKProcessorLogProcessed{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorLogProcessed) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorLogProcessed) Name() string { + return "otel.sdk.processor.log.processed" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorLogProcessed) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorLogProcessed) Description() string { + return "The number of log records for which the processing has finished, either successful or failed." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful processing, `error.type` MUST NOT be set. For failed +// processing, `error.type` MUST contain the failure cause. +// For the SDK Simple and Batching Log Record Processor a log record is +// considered to be processed already when it has been submitted to the exporter, +// not when the corresponding export call has finished. +func (m SDKProcessorLogProcessed) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful processing, `error.type` MUST NOT be set. For failed +// processing, `error.type` MUST contain the failure cause. +// For the SDK Simple and Batching Log Record Processor a log record is +// considered to be processed already when it has been submitted to the exporter, +// not when the corresponding export call has finished. +func (m SDKProcessorLogProcessed) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents a low-cardinality description of the failure reason. +// SDK Batching Log Record Processors MUST use `queue_full` for log records +// dropped due to a full queue. +func (SDKProcessorLogProcessed) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorLogProcessed) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorLogProcessed) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorLogQueueCapacity is an instrument used to record metric values +// conforming to the "otel.sdk.processor.log.queue.capacity" semantic +// conventions. It represents the maximum number of log records the queue of a +// given instance of an SDK Log Record processor can hold. +type SDKProcessorLogQueueCapacity struct { + metric.Int64ObservableUpDownCounter +} + +// NewSDKProcessorLogQueueCapacity returns a new SDKProcessorLogQueueCapacity +// instrument. +func NewSDKProcessorLogQueueCapacity( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (SDKProcessorLogQueueCapacity, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, nil + } + + i, err := m.Int64ObservableUpDownCounter( + "otel.sdk.processor.log.queue.capacity", + append([]metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The maximum number of log records the queue of a given instance of an SDK Log Record processor can hold."), + metric.WithUnit("{log_record}"), + }, opt...)..., + ) + if err != nil { + return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err + } + return SDKProcessorLogQueueCapacity{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorLogQueueCapacity) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorLogQueueCapacity) Name() string { + return "otel.sdk.processor.log.queue.capacity" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorLogQueueCapacity) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorLogQueueCapacity) Description() string { + return "The maximum number of log records the queue of a given instance of an SDK Log Record processor can hold." +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorLogQueueCapacity) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorLogQueueCapacity) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorLogQueueSize is an instrument used to record metric values +// conforming to the "otel.sdk.processor.log.queue.size" semantic conventions. It +// represents the number of log records in the queue of a given instance of an +// SDK log processor. +type SDKProcessorLogQueueSize struct { + metric.Int64ObservableUpDownCounter +} + +// NewSDKProcessorLogQueueSize returns a new SDKProcessorLogQueueSize instrument. +func NewSDKProcessorLogQueueSize( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (SDKProcessorLogQueueSize, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, nil + } + + i, err := m.Int64ObservableUpDownCounter( + "otel.sdk.processor.log.queue.size", + append([]metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The number of log records in the queue of a given instance of an SDK log processor."), + metric.WithUnit("{log_record}"), + }, opt...)..., + ) + if err != nil { + return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, err + } + return SDKProcessorLogQueueSize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorLogQueueSize) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorLogQueueSize) Name() string { + return "otel.sdk.processor.log.queue.size" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorLogQueueSize) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorLogQueueSize) Description() string { + return "The number of log records in the queue of a given instance of an SDK log processor." +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorLogQueueSize) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorLogQueueSize) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorSpanProcessed is an instrument used to record metric values +// conforming to the "otel.sdk.processor.span.processed" semantic conventions. It +// represents the number of spans for which the processing has finished, either +// successful or failed. +type SDKProcessorSpanProcessed struct { + metric.Int64Counter +} + +// NewSDKProcessorSpanProcessed returns a new SDKProcessorSpanProcessed +// instrument. +func NewSDKProcessorSpanProcessed( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKProcessorSpanProcessed, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorSpanProcessed{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "otel.sdk.processor.span.processed", + append([]metric.Int64CounterOption{ + metric.WithDescription("The number of spans for which the processing has finished, either successful or failed."), + metric.WithUnit("{span}"), + }, opt...)..., + ) + if err != nil { + return SDKProcessorSpanProcessed{noop.Int64Counter{}}, err + } + return SDKProcessorSpanProcessed{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorSpanProcessed) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorSpanProcessed) Name() string { + return "otel.sdk.processor.span.processed" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorSpanProcessed) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorSpanProcessed) Description() string { + return "The number of spans for which the processing has finished, either successful or failed." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful processing, `error.type` MUST NOT be set. For failed +// processing, `error.type` MUST contain the failure cause. +// For the SDK Simple and Batching Span Processor a span is considered to be +// processed already when it has been submitted to the exporter, not when the +// corresponding export call has finished. +func (m SDKProcessorSpanProcessed) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful processing, `error.type` MUST NOT be set. For failed +// processing, `error.type` MUST contain the failure cause. +// For the SDK Simple and Batching Span Processor a span is considered to be +// processed already when it has been submitted to the exporter, not when the +// corresponding export call has finished. +func (m SDKProcessorSpanProcessed) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents a low-cardinality description of the failure reason. +// SDK Batching Span Processors MUST use `queue_full` for spans dropped due to a +// full queue. +func (SDKProcessorSpanProcessed) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorSpanProcessed) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorSpanProcessed) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorSpanQueueCapacity is an instrument used to record metric values +// conforming to the "otel.sdk.processor.span.queue.capacity" semantic +// conventions. It represents the maximum number of spans the queue of a given +// instance of an SDK span processor can hold. +type SDKProcessorSpanQueueCapacity struct { + metric.Int64ObservableUpDownCounter +} + +// NewSDKProcessorSpanQueueCapacity returns a new SDKProcessorSpanQueueCapacity +// instrument. +func NewSDKProcessorSpanQueueCapacity( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (SDKProcessorSpanQueueCapacity, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, nil + } + + i, err := m.Int64ObservableUpDownCounter( + "otel.sdk.processor.span.queue.capacity", + append([]metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The maximum number of spans the queue of a given instance of an SDK span processor can hold."), + metric.WithUnit("{span}"), + }, opt...)..., + ) + if err != nil { + return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err + } + return SDKProcessorSpanQueueCapacity{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorSpanQueueCapacity) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorSpanQueueCapacity) Name() string { + return "otel.sdk.processor.span.queue.capacity" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorSpanQueueCapacity) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorSpanQueueCapacity) Description() string { + return "The maximum number of spans the queue of a given instance of an SDK span processor can hold." +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorSpanQueueCapacity) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorSpanQueueCapacity) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorSpanQueueSize is an instrument used to record metric values +// conforming to the "otel.sdk.processor.span.queue.size" semantic conventions. +// It represents the number of spans in the queue of a given instance of an SDK +// span processor. +type SDKProcessorSpanQueueSize struct { + metric.Int64ObservableUpDownCounter +} + +// NewSDKProcessorSpanQueueSize returns a new SDKProcessorSpanQueueSize +// instrument. +func NewSDKProcessorSpanQueueSize( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (SDKProcessorSpanQueueSize, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, nil + } + + i, err := m.Int64ObservableUpDownCounter( + "otel.sdk.processor.span.queue.size", + append([]metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The number of spans in the queue of a given instance of an SDK span processor."), + metric.WithUnit("{span}"), + }, opt...)..., + ) + if err != nil { + return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, err + } + return SDKProcessorSpanQueueSize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorSpanQueueSize) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorSpanQueueSize) Name() string { + return "otel.sdk.processor.span.queue.size" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorSpanQueueSize) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorSpanQueueSize) Description() string { + return "The number of spans in the queue of a given instance of an SDK span processor." +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorSpanQueueSize) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorSpanQueueSize) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKSpanLive is an instrument used to record metric values conforming to the +// "otel.sdk.span.live" semantic conventions. It represents the number of created +// spans with `recording=true` for which the end operation has not been called +// yet. +type SDKSpanLive struct { + metric.Int64UpDownCounter +} + +// NewSDKSpanLive returns a new SDKSpanLive instrument. +func NewSDKSpanLive( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (SDKSpanLive, error) { + // Check if the meter is nil. + if m == nil { + return SDKSpanLive{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "otel.sdk.span.live", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of created spans with `recording=true` for which the end operation has not been called yet."), + metric.WithUnit("{span}"), + }, opt...)..., + ) + if err != nil { + return SDKSpanLive{noop.Int64UpDownCounter{}}, err + } + return SDKSpanLive{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKSpanLive) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKSpanLive) Name() string { + return "otel.sdk.span.live" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKSpanLive) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKSpanLive) Description() string { + return "The number of created spans with `recording=true` for which the end operation has not been called yet." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m SDKSpanLive) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m SDKSpanLive) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrSpanSamplingResult returns an optional attribute for the +// "otel.span.sampling_result" semantic convention. It represents the result +// value of the sampler for this span. +func (SDKSpanLive) AttrSpanSamplingResult(val SpanSamplingResultAttr) attribute.KeyValue { + return attribute.String("otel.span.sampling_result", string(val)) +} + +// SDKSpanStarted is an instrument used to record metric values conforming to the +// "otel.sdk.span.started" semantic conventions. It represents the number of +// created spans. +type SDKSpanStarted struct { + metric.Int64Counter +} + +// NewSDKSpanStarted returns a new SDKSpanStarted instrument. +func NewSDKSpanStarted( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKSpanStarted, error) { + // Check if the meter is nil. + if m == nil { + return SDKSpanStarted{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "otel.sdk.span.started", + append([]metric.Int64CounterOption{ + metric.WithDescription("The number of created spans."), + metric.WithUnit("{span}"), + }, opt...)..., + ) + if err != nil { + return SDKSpanStarted{noop.Int64Counter{}}, err + } + return SDKSpanStarted{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKSpanStarted) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKSpanStarted) Name() string { + return "otel.sdk.span.started" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKSpanStarted) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKSpanStarted) Description() string { + return "The number of created spans." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// Implementations MUST record this metric for all spans, even for non-recording +// ones. +func (m SDKSpanStarted) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// Implementations MUST record this metric for all spans, even for non-recording +// ones. +func (m SDKSpanStarted) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrSpanParentOrigin returns an optional attribute for the +// "otel.span.parent.origin" semantic convention. It represents the determines +// whether the span has a parent span, and if so, [whether it is a remote parent] +// . +// +// [whether it is a remote parent]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote +func (SDKSpanStarted) AttrSpanParentOrigin(val SpanParentOriginAttr) attribute.KeyValue { + return attribute.String("otel.span.parent.origin", string(val)) +} + +// AttrSpanSamplingResult returns an optional attribute for the +// "otel.span.sampling_result" semantic convention. It represents the result +// value of the sampler for this span. +func (SDKSpanStarted) AttrSpanSamplingResult(val SpanSamplingResultAttr) attribute.KeyValue { + return attribute.String("otel.span.sampling_result", string(val)) +} \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/schema.go new file mode 100644 index 000000000..f8a0b7044 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.37.0" diff --git a/vendor/go.opentelemetry.io/otel/trace/LICENSE b/vendor/go.opentelemetry.io/otel/trace/LICENSE index 261eeb9e9..f1aee0f11 100644 --- a/vendor/go.opentelemetry.io/otel/trace/LICENSE +++ b/vendor/go.opentelemetry.io/otel/trace/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/trace/auto.go b/vendor/go.opentelemetry.io/otel/trace/auto.go new file mode 100644 index 000000000..8763936a8 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/auto.go @@ -0,0 +1,662 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + "encoding/json" + "fmt" + "math" + "os" + "reflect" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + "unicode/utf8" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/trace/embedded" + "go.opentelemetry.io/otel/trace/internal/telemetry" +) + +// newAutoTracerProvider returns an auto-instrumentable [trace.TracerProvider]. +// If an [go.opentelemetry.io/auto.Instrumentation] is configured to instrument +// the process using the returned TracerProvider, all of the telemetry it +// produces will be processed and handled by that Instrumentation. By default, +// if no Instrumentation instruments the TracerProvider it will not generate +// any trace telemetry. +func newAutoTracerProvider() TracerProvider { return tracerProviderInstance } + +var tracerProviderInstance = new(autoTracerProvider) + +type autoTracerProvider struct{ embedded.TracerProvider } + +var _ TracerProvider = autoTracerProvider{} + +func (autoTracerProvider) Tracer(name string, opts ...TracerOption) Tracer { + cfg := NewTracerConfig(opts...) + return autoTracer{ + name: name, + version: cfg.InstrumentationVersion(), + schemaURL: cfg.SchemaURL(), + } +} + +type autoTracer struct { + embedded.Tracer + + name, schemaURL, version string +} + +var _ Tracer = autoTracer{} + +func (t autoTracer) Start(ctx context.Context, name string, opts ...SpanStartOption) (context.Context, Span) { + var psc, sc SpanContext + sampled := true + span := new(autoSpan) + + // Ask eBPF for sampling decision and span context info. + t.start(ctx, span, &psc, &sampled, &sc) + + span.sampled.Store(sampled) + span.spanContext = sc + + ctx = ContextWithSpan(ctx, span) + + if sampled { + // Only build traces if sampled. + cfg := NewSpanStartConfig(opts...) + span.traces, span.span = t.traces(name, cfg, span.spanContext, psc) + } + + return ctx, span +} + +// Expected to be implemented in eBPF. +// +//go:noinline +func (*autoTracer) start( + ctx context.Context, + spanPtr *autoSpan, + psc *SpanContext, + sampled *bool, + sc *SpanContext, +) { + start(ctx, spanPtr, psc, sampled, sc) +} + +// start is used for testing. +var start = func(context.Context, *autoSpan, *SpanContext, *bool, *SpanContext) {} + +func (t autoTracer) traces(name string, cfg SpanConfig, sc, psc SpanContext) (*telemetry.Traces, *telemetry.Span) { + span := &telemetry.Span{ + TraceID: telemetry.TraceID(sc.TraceID()), + SpanID: telemetry.SpanID(sc.SpanID()), + Flags: uint32(sc.TraceFlags()), + TraceState: sc.TraceState().String(), + ParentSpanID: telemetry.SpanID(psc.SpanID()), + Name: name, + Kind: spanKind(cfg.SpanKind()), + } + + span.Attrs, span.DroppedAttrs = convCappedAttrs(maxSpan.Attrs, cfg.Attributes()) + + links := cfg.Links() + if limit := maxSpan.Links; limit == 0 { + n := int64(len(links)) + if n > 0 { + span.DroppedLinks = uint32(min(n, math.MaxUint32)) // nolint: gosec // Bounds checked. + } + } else { + if limit > 0 { + n := int64(max(len(links)-limit, 0)) + span.DroppedLinks = uint32(min(n, math.MaxUint32)) // nolint: gosec // Bounds checked. + links = links[n:] + } + span.Links = convLinks(links) + } + + if t := cfg.Timestamp(); !t.IsZero() { + span.StartTime = cfg.Timestamp() + } else { + span.StartTime = time.Now() + } + + return &telemetry.Traces{ + ResourceSpans: []*telemetry.ResourceSpans{ + { + ScopeSpans: []*telemetry.ScopeSpans{ + { + Scope: &telemetry.Scope{ + Name: t.name, + Version: t.version, + }, + Spans: []*telemetry.Span{span}, + SchemaURL: t.schemaURL, + }, + }, + }, + }, + }, span +} + +func spanKind(kind SpanKind) telemetry.SpanKind { + switch kind { + case SpanKindInternal: + return telemetry.SpanKindInternal + case SpanKindServer: + return telemetry.SpanKindServer + case SpanKindClient: + return telemetry.SpanKindClient + case SpanKindProducer: + return telemetry.SpanKindProducer + case SpanKindConsumer: + return telemetry.SpanKindConsumer + } + return telemetry.SpanKind(0) // undefined. +} + +type autoSpan struct { + embedded.Span + + spanContext SpanContext + sampled atomic.Bool + + mu sync.Mutex + traces *telemetry.Traces + span *telemetry.Span +} + +func (s *autoSpan) SpanContext() SpanContext { + if s == nil { + return SpanContext{} + } + // s.spanContext is immutable, do not acquire lock s.mu. + return s.spanContext +} + +func (s *autoSpan) IsRecording() bool { + if s == nil { + return false + } + + return s.sampled.Load() +} + +func (s *autoSpan) SetStatus(c codes.Code, msg string) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + if s.span.Status == nil { + s.span.Status = new(telemetry.Status) + } + + s.span.Status.Message = msg + + switch c { + case codes.Unset: + s.span.Status.Code = telemetry.StatusCodeUnset + case codes.Error: + s.span.Status.Code = telemetry.StatusCodeError + case codes.Ok: + s.span.Status.Code = telemetry.StatusCodeOK + } +} + +func (s *autoSpan) SetAttributes(attrs ...attribute.KeyValue) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + limit := maxSpan.Attrs + if limit == 0 { + // No attributes allowed. + n := int64(len(attrs)) + if n > 0 { + s.span.DroppedAttrs += uint32(min(n, math.MaxUint32)) // nolint: gosec // Bounds checked. + } + return + } + + m := make(map[string]int) + for i, a := range s.span.Attrs { + m[a.Key] = i + } + + for _, a := range attrs { + val := convAttrValue(a.Value) + if val.Empty() { + s.span.DroppedAttrs++ + continue + } + + if idx, ok := m[string(a.Key)]; ok { + s.span.Attrs[idx] = telemetry.Attr{ + Key: string(a.Key), + Value: val, + } + } else if limit < 0 || len(s.span.Attrs) < limit { + s.span.Attrs = append(s.span.Attrs, telemetry.Attr{ + Key: string(a.Key), + Value: val, + }) + m[string(a.Key)] = len(s.span.Attrs) - 1 + } else { + s.span.DroppedAttrs++ + } + } +} + +// convCappedAttrs converts up to limit attrs into a []telemetry.Attr. The +// number of dropped attributes is also returned. +func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, uint32) { + n := len(attrs) + if limit == 0 { + var out uint32 + if n > 0 { + out = uint32(min(int64(n), math.MaxUint32)) // nolint: gosec // Bounds checked. + } + return nil, out + } + + if limit < 0 { + // Unlimited. + return convAttrs(attrs), 0 + } + + if n < 0 { + n = 0 + } + + limit = min(n, limit) + return convAttrs(attrs[:limit]), uint32(n - limit) // nolint: gosec // Bounds checked. +} + +func convAttrs(attrs []attribute.KeyValue) []telemetry.Attr { + if len(attrs) == 0 { + // Avoid allocations if not necessary. + return nil + } + + out := make([]telemetry.Attr, 0, len(attrs)) + for _, attr := range attrs { + key := string(attr.Key) + val := convAttrValue(attr.Value) + if val.Empty() { + continue + } + out = append(out, telemetry.Attr{Key: key, Value: val}) + } + return out +} + +func convAttrValue(value attribute.Value) telemetry.Value { + switch value.Type() { + case attribute.BOOL: + return telemetry.BoolValue(value.AsBool()) + case attribute.INT64: + return telemetry.Int64Value(value.AsInt64()) + case attribute.FLOAT64: + return telemetry.Float64Value(value.AsFloat64()) + case attribute.STRING: + v := truncate(maxSpan.AttrValueLen, value.AsString()) + return telemetry.StringValue(v) + case attribute.BOOLSLICE: + slice := value.AsBoolSlice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.BoolValue(v)) + } + return telemetry.SliceValue(out...) + case attribute.INT64SLICE: + slice := value.AsInt64Slice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.Int64Value(v)) + } + return telemetry.SliceValue(out...) + case attribute.FLOAT64SLICE: + slice := value.AsFloat64Slice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.Float64Value(v)) + } + return telemetry.SliceValue(out...) + case attribute.STRINGSLICE: + slice := value.AsStringSlice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + v = truncate(maxSpan.AttrValueLen, v) + out = append(out, telemetry.StringValue(v)) + } + return telemetry.SliceValue(out...) + } + return telemetry.Value{} +} + +// truncate returns a truncated version of s such that it contains less than +// the limit number of characters. Truncation is applied by returning the limit +// number of valid characters contained in s. +// +// If limit is negative, it returns the original string. +// +// UTF-8 is supported. When truncating, all invalid characters are dropped +// before applying truncation. +// +// If s already contains less than the limit number of bytes, it is returned +// unchanged. No invalid characters are removed. +func truncate(limit int, s string) string { + // This prioritize performance in the following order based on the most + // common expected use-cases. + // + // - Short values less than the default limit (128). + // - Strings with valid encodings that exceed the limit. + // - No limit. + // - Strings with invalid encodings that exceed the limit. + if limit < 0 || len(s) <= limit { + return s + } + + // Optimistically, assume all valid UTF-8. + var b strings.Builder + count := 0 + for i, c := range s { + if c != utf8.RuneError { + count++ + if count > limit { + return s[:i] + } + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // Invalid encoding. + b.Grow(len(s) - 1) + _, _ = b.WriteString(s[:i]) + s = s[i:] + break + } + } + + // Fast-path, no invalid input. + if b.Cap() == 0 { + return s + } + + // Truncate while validating UTF-8. + for i := 0; i < len(s) && count < limit; { + c := s[i] + if c < utf8.RuneSelf { + // Optimization for single byte runes (common case). + _ = b.WriteByte(c) + i++ + count++ + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // We checked for all 1-byte runes above, this is a RuneError. + i++ + continue + } + + _, _ = b.WriteString(s[i : i+size]) + i += size + count++ + } + + return b.String() +} + +func (s *autoSpan) End(opts ...SpanEndOption) { + if s == nil || !s.sampled.Swap(false) { + return + } + + // s.end exists so the lock (s.mu) is not held while s.ended is called. + s.ended(s.end(opts)) +} + +func (s *autoSpan) end(opts []SpanEndOption) []byte { + s.mu.Lock() + defer s.mu.Unlock() + + cfg := NewSpanEndConfig(opts...) + if t := cfg.Timestamp(); !t.IsZero() { + s.span.EndTime = cfg.Timestamp() + } else { + s.span.EndTime = time.Now() + } + + b, _ := json.Marshal(s.traces) // TODO: do not ignore this error. + return b +} + +// Expected to be implemented in eBPF. +// +//go:noinline +func (*autoSpan) ended(buf []byte) { ended(buf) } + +// ended is used for testing. +var ended = func([]byte) {} + +func (s *autoSpan) RecordError(err error, opts ...EventOption) { + if s == nil || err == nil || !s.sampled.Load() { + return + } + + cfg := NewEventConfig(opts...) + + attrs := cfg.Attributes() + attrs = append(attrs, + semconv.ExceptionType(typeStr(err)), + semconv.ExceptionMessage(err.Error()), + ) + if cfg.StackTrace() { + buf := make([]byte, 2048) + n := runtime.Stack(buf, false) + attrs = append(attrs, semconv.ExceptionStacktrace(string(buf[0:n]))) + } + + s.mu.Lock() + defer s.mu.Unlock() + + s.addEvent(semconv.ExceptionEventName, cfg.Timestamp(), attrs) +} + +func typeStr(i any) string { + t := reflect.TypeOf(i) + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + return t.String() + } + return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) +} + +func (s *autoSpan) AddEvent(name string, opts ...EventOption) { + if s == nil || !s.sampled.Load() { + return + } + + cfg := NewEventConfig(opts...) + + s.mu.Lock() + defer s.mu.Unlock() + + s.addEvent(name, cfg.Timestamp(), cfg.Attributes()) +} + +// addEvent adds an event with name and attrs at tStamp to the span. The span +// lock (s.mu) needs to be held by the caller. +func (s *autoSpan) addEvent(name string, tStamp time.Time, attrs []attribute.KeyValue) { + limit := maxSpan.Events + + if limit == 0 { + s.span.DroppedEvents++ + return + } + + if limit > 0 && len(s.span.Events) == limit { + // Drop head while avoiding allocation of more capacity. + copy(s.span.Events[:limit-1], s.span.Events[1:]) + s.span.Events = s.span.Events[:limit-1] + s.span.DroppedEvents++ + } + + e := &telemetry.SpanEvent{Time: tStamp, Name: name} + e.Attrs, e.DroppedAttrs = convCappedAttrs(maxSpan.EventAttrs, attrs) + + s.span.Events = append(s.span.Events, e) +} + +func (s *autoSpan) AddLink(link Link) { + if s == nil || !s.sampled.Load() { + return + } + + l := maxSpan.Links + + s.mu.Lock() + defer s.mu.Unlock() + + if l == 0 { + s.span.DroppedLinks++ + return + } + + if l > 0 && len(s.span.Links) == l { + // Drop head while avoiding allocation of more capacity. + copy(s.span.Links[:l-1], s.span.Links[1:]) + s.span.Links = s.span.Links[:l-1] + s.span.DroppedLinks++ + } + + s.span.Links = append(s.span.Links, convLink(link)) +} + +func convLinks(links []Link) []*telemetry.SpanLink { + out := make([]*telemetry.SpanLink, 0, len(links)) + for _, link := range links { + out = append(out, convLink(link)) + } + return out +} + +func convLink(link Link) *telemetry.SpanLink { + l := &telemetry.SpanLink{ + TraceID: telemetry.TraceID(link.SpanContext.TraceID()), + SpanID: telemetry.SpanID(link.SpanContext.SpanID()), + TraceState: link.SpanContext.TraceState().String(), + Flags: uint32(link.SpanContext.TraceFlags()), + } + l.Attrs, l.DroppedAttrs = convCappedAttrs(maxSpan.LinkAttrs, link.Attributes) + + return l +} + +func (s *autoSpan) SetName(name string) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + s.span.Name = name +} + +func (*autoSpan) TracerProvider() TracerProvider { return newAutoTracerProvider() } + +// maxSpan are the span limits resolved during startup. +var maxSpan = newSpanLimits() + +type spanLimits struct { + // Attrs is the number of allowed attributes for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the + // environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT, or 128 if + // that is not set, is used. + Attrs int + // AttrValueLen is the maximum attribute value length allowed for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the + // environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT, or -1 + // if that is not set, is used. + AttrValueLen int + // Events is the number of allowed events for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_EVENT_COUNT_LIMIT key, or 128 is used if that is not set. + Events int + // EventAttrs is the number of allowed attributes for a span event. + // + // The is resolved from the environment variable value for the + // OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key, or 128 is used if that is not set. + EventAttrs int + // Links is the number of allowed Links for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_LINK_COUNT_LIMIT, or 128 is used if that is not set. + Links int + // LinkAttrs is the number of allowed attributes for a span link. + // + // This is resolved from the environment variable value for the + // OTEL_LINK_ATTRIBUTE_COUNT_LIMIT, or 128 is used if that is not set. + LinkAttrs int +} + +func newSpanLimits() spanLimits { + return spanLimits{ + Attrs: firstEnv( + 128, + "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT", + "OTEL_ATTRIBUTE_COUNT_LIMIT", + ), + AttrValueLen: firstEnv( + -1, // Unlimited. + "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT", + "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT", + ), + Events: firstEnv(128, "OTEL_SPAN_EVENT_COUNT_LIMIT"), + EventAttrs: firstEnv(128, "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT"), + Links: firstEnv(128, "OTEL_SPAN_LINK_COUNT_LIMIT"), + LinkAttrs: firstEnv(128, "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT"), + } +} + +// firstEnv returns the parsed integer value of the first matching environment +// variable from keys. The defaultVal is returned if the value is not an +// integer or no match is found. +func firstEnv(defaultVal int, keys ...string) int { + for _, key := range keys { + strV := os.Getenv(key) + if strV == "" { + continue + } + + v, err := strconv.Atoi(strV) + if err == nil { + return v + } + // Ignore invalid environment variable. + } + + return defaultVal +} diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go index 9c0b720a4..aea11a2b5 100644 --- a/vendor/go.opentelemetry.io/otel/trace/config.go +++ b/vendor/go.opentelemetry.io/otel/trace/config.go @@ -73,7 +73,7 @@ func (cfg *SpanConfig) Timestamp() time.Time { return cfg.timestamp } -// StackTrace checks whether stack trace capturing is enabled. +// StackTrace reports whether stack trace capturing is enabled. func (cfg *SpanConfig) StackTrace() bool { return cfg.stackTrace } @@ -154,7 +154,7 @@ func (cfg *EventConfig) Timestamp() time.Time { return cfg.timestamp } -// StackTrace checks whether stack trace capturing is enabled. +// StackTrace reports whether stack trace capturing is enabled. func (cfg *EventConfig) StackTrace() bool { return cfg.stackTrace } diff --git a/vendor/go.opentelemetry.io/otel/trace/hex.go b/vendor/go.opentelemetry.io/otel/trace/hex.go new file mode 100644 index 000000000..1cbef1d4b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/hex.go @@ -0,0 +1,38 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +const ( + // hexLU is a hex lookup table of the 16 lowercase hex digits. + // The character values of the string are indexed at the equivalent + // hexadecimal value they represent. This table efficiently encodes byte data + // into a string representation of hexadecimal. + hexLU = "0123456789abcdef" + + // hexRev is a reverse hex lookup table for lowercase hex digits. + // The table is efficiently decodes a hexadecimal string into bytes. + // Valid hexadecimal characters are indexed at their respective values. All + // other invalid ASCII characters are represented with '\xff'. + // + // The '\xff' character is used as invalid because no valid character has + // the upper 4 bits set. Meaning, an efficient validation can be performed + // over multiple character parsing by checking these bits remain zero. + hexRev = "" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\x0a\x0b\x0c\x0d\x0e\x0f\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +) diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go new file mode 100644 index 000000000..ff0f6eac6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +// Attr is a key-value pair. +type Attr struct { + Key string `json:"key,omitempty"` + Value Value `json:"value,omitempty"` +} + +// String returns an Attr for a string value. +func String(key, value string) Attr { + return Attr{key, StringValue(value)} +} + +// Int64 returns an Attr for an int64 value. +func Int64(key string, value int64) Attr { + return Attr{key, Int64Value(value)} +} + +// Int returns an Attr for an int value. +func Int(key string, value int) Attr { + return Int64(key, int64(value)) +} + +// Float64 returns an Attr for a float64 value. +func Float64(key string, value float64) Attr { + return Attr{key, Float64Value(value)} +} + +// Bool returns an Attr for a bool value. +func Bool(key string, value bool) Attr { + return Attr{key, BoolValue(value)} +} + +// Bytes returns an Attr for a []byte value. +// The passed slice must not be changed after it is passed. +func Bytes(key string, value []byte) Attr { + return Attr{key, BytesValue(value)} +} + +// Slice returns an Attr for a []Value value. +// The passed slice must not be changed after it is passed. +func Slice(key string, value ...Value) Attr { + return Attr{key, SliceValue(value...)} +} + +// Map returns an Attr for a map value. +// The passed slice must not be changed after it is passed. +func Map(key string, value ...Attr) Attr { + return Attr{key, MapValue(value...)} +} + +// Equal reports whether a is equal to b. +func (a Attr) Equal(b Attr) bool { + return a.Key == b.Key && a.Value.Equal(b.Value) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/doc.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/doc.go new file mode 100644 index 000000000..5debe90bb --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/doc.go @@ -0,0 +1,8 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package telemetry provides a lightweight representations of OpenTelemetry +telemetry that is compatible with the OTLP JSON protobuf encoding. +*/ +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go new file mode 100644 index 000000000..bea56f2e7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go @@ -0,0 +1,103 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +import ( + "encoding/hex" + "errors" + "fmt" +) + +const ( + traceIDSize = 16 + spanIDSize = 8 +) + +// TraceID is a custom data type that is used for all trace IDs. +type TraceID [traceIDSize]byte + +// String returns the hex string representation form of a TraceID. +func (tid TraceID) String() string { + return hex.EncodeToString(tid[:]) +} + +// IsEmpty reports whether the TraceID contains only zero bytes. +func (tid TraceID) IsEmpty() bool { + return tid == [traceIDSize]byte{} +} + +// MarshalJSON converts the trace ID into a hex string enclosed in quotes. +func (tid TraceID) MarshalJSON() ([]byte, error) { + if tid.IsEmpty() { + return []byte(`""`), nil + } + return marshalJSON(tid[:]) +} + +// UnmarshalJSON inflates the trace ID from hex string, possibly enclosed in +// quotes. +func (tid *TraceID) UnmarshalJSON(data []byte) error { + *tid = [traceIDSize]byte{} + return unmarshalJSON(tid[:], data) +} + +// SpanID is a custom data type that is used for all span IDs. +type SpanID [spanIDSize]byte + +// String returns the hex string representation form of a SpanID. +func (sid SpanID) String() string { + return hex.EncodeToString(sid[:]) +} + +// IsEmpty reports whether the SpanID contains only zero bytes. +func (sid SpanID) IsEmpty() bool { + return sid == [spanIDSize]byte{} +} + +// MarshalJSON converts span ID into a hex string enclosed in quotes. +func (sid SpanID) MarshalJSON() ([]byte, error) { + if sid.IsEmpty() { + return []byte(`""`), nil + } + return marshalJSON(sid[:]) +} + +// UnmarshalJSON decodes span ID from hex string, possibly enclosed in quotes. +func (sid *SpanID) UnmarshalJSON(data []byte) error { + *sid = [spanIDSize]byte{} + return unmarshalJSON(sid[:], data) +} + +// marshalJSON converts id into a hex string enclosed in quotes. +func marshalJSON(id []byte) ([]byte, error) { + // Plus 2 quote chars at the start and end. + hexLen := hex.EncodedLen(len(id)) + 2 + + b := make([]byte, hexLen) + hex.Encode(b[1:hexLen-1], id) + b[0], b[hexLen-1] = '"', '"' + + return b, nil +} + +// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes. +func unmarshalJSON(dst, src []byte) error { + if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' { + src = src[1 : l-1] + } + nLen := len(src) + if nLen == 0 { + return nil + } + + if len(dst) != hex.DecodedLen(nLen) { + return errors.New("invalid length for ID") + } + + _, err := hex.Decode(dst, src) + if err != nil { + return fmt.Errorf("cannot unmarshal ID from string '%s': %w", string(src), err) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/number.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/number.go new file mode 100644 index 000000000..f5e3a8cec --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/number.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +import ( + "encoding/json" + "strconv" +) + +// protoInt64 represents the protobuf encoding of integers which can be either +// strings or integers. +type protoInt64 int64 + +// Int64 returns the protoInt64 as an int64. +func (i *protoInt64) Int64() int64 { return int64(*i) } + +// UnmarshalJSON decodes both strings and integers. +func (i *protoInt64) UnmarshalJSON(data []byte) error { + if data[0] == '"' { + var str string + if err := json.Unmarshal(data, &str); err != nil { + return err + } + parsedInt, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return err + } + *i = protoInt64(parsedInt) + } else { + var parsedInt int64 + if err := json.Unmarshal(data, &parsedInt); err != nil { + return err + } + *i = protoInt64(parsedInt) + } + return nil +} + +// protoUint64 represents the protobuf encoding of integers which can be either +// strings or integers. +type protoUint64 uint64 + +// Int64 returns the protoUint64 as a uint64. +func (i *protoUint64) Uint64() uint64 { return uint64(*i) } + +// UnmarshalJSON decodes both strings and integers. +func (i *protoUint64) UnmarshalJSON(data []byte) error { + if data[0] == '"' { + var str string + if err := json.Unmarshal(data, &str); err != nil { + return err + } + parsedUint, err := strconv.ParseUint(str, 10, 64) + if err != nil { + return err + } + *i = protoUint64(parsedUint) + } else { + var parsedUint uint64 + if err := json.Unmarshal(data, &parsedUint); err != nil { + return err + } + *i = protoUint64(parsedUint) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/resource.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/resource.go new file mode 100644 index 000000000..1798a702d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/resource.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Resource information. +type Resource struct { + // Attrs are the set of attributes that describe the resource. Attribute + // keys MUST be unique (it is not allowed to have more than one attribute + // with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // DroppedAttrs is the number of dropped attributes. If the value + // is 0, then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r. +func (r *Resource) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Resource type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Resource field: %#v", keyIface) + } + + switch key { + case "attributes": + err = decoder.Decode(&r.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&r.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/scope.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/scope.go new file mode 100644 index 000000000..c2b4c635b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/scope.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Scope is the identifying values of the instrumentation scope. +type Scope struct { + Name string `json:"name,omitempty"` + Version string `json:"version,omitempty"` + Attrs []Attr `json:"attributes,omitempty"` + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r. +func (s *Scope) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Scope type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Scope field: %#v", keyIface) + } + + switch key { + case "name": + err = decoder.Decode(&s.Name) + case "version": + err = decoder.Decode(&s.Version) + case "attributes": + err = decoder.Decode(&s.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&s.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go new file mode 100644 index 000000000..e7ca62c66 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go @@ -0,0 +1,472 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "time" +) + +// A Span represents a single operation performed by a single component of the +// system. +type Span struct { + // A unique identifier for a trace. All spans from the same trace share + // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR + // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + TraceID TraceID `json:"traceId,omitempty"` + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. An ID with all zeroes OR of length + // other than 8 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + SpanID SpanID `json:"spanId,omitempty"` + // trace_state conveys information about request position in multiple distributed tracing graphs. + // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header + // See also https://github.com/w3c/distributed-tracing for more details about this field. + TraceState string `json:"traceState,omitempty"` + // The `span_id` of this span's parent span. If this is a root span, then this + // field must be empty. The ID is an 8-byte array. + ParentSpanID SpanID `json:"parentSpanId,omitempty"` + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether a span's parent + // is remote. The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // When creating span messages, if the message is logically forwarded from another source + // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD + // be copied as-is. If creating from a source that does not have an equivalent flags field + // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST + // be set to zero. + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // + // [Optional]. + Flags uint32 `json:"flags,omitempty"` + // A description of the span's operation. + // + // For example, the name can be a qualified method name or a file name + // and a line number where the operation is called. A best practice is to use + // the same display name at the same call point in an application. + // This makes it easier to correlate spans in different traces. + // + // This field is semantically required to be set to non-empty string. + // Empty value is equivalent to an unknown span name. + // + // This field is required. + Name string `json:"name"` + // Distinguishes between spans generated in a particular context. For example, + // two spans with the same name may be distinguished using `CLIENT` (caller) + // and `SERVER` (callee) to identify queueing latency associated with the span. + Kind SpanKind `json:"kind,omitempty"` + // start_time_unix_nano is the start time of the span. On the client side, this is the time + // kept by the local machine where the span execution starts. On the server side, this + // is the time when the server's application handler starts running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + StartTime time.Time `json:"startTimeUnixNano,omitempty"` + // end_time_unix_nano is the end time of the span. On the client side, this is the time + // kept by the local machine where the span execution ends. On the server side, this + // is the time when the server application handler stops running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + EndTime time.Time `json:"endTimeUnixNano,omitempty"` + // attributes is a collection of key/value pairs. Note, global attributes + // like server name can be set using the resource API. Examples of attributes: + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 + // "example.com/myattribute": true + // "example.com/score": 10.239 + // + // The OpenTelemetry API specification further restricts the allowed value types: + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of attributes that were discarded. Attributes + // can be discarded because their keys are too long or because there are too many + // attributes. If this value is 0, then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` + // events is a collection of Event items. + Events []*SpanEvent `json:"events,omitempty"` + // dropped_events_count is the number of dropped events. If the value is 0, then no + // events were dropped. + DroppedEvents uint32 `json:"droppedEventsCount,omitempty"` + // links is a collection of Links, which are references from this span to a span + // in the same or different trace. + Links []*SpanLink `json:"links,omitempty"` + // dropped_links_count is the number of dropped links after the maximum size was + // enforced. If this value is 0, then no links were dropped. + DroppedLinks uint32 `json:"droppedLinksCount,omitempty"` + // An optional final status for this span. Semantically when Status isn't set, it means + // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). + Status *Status `json:"status,omitempty"` +} + +// MarshalJSON encodes s into OTLP formatted JSON. +func (s Span) MarshalJSON() ([]byte, error) { + startT := s.StartTime.UnixNano() + if s.StartTime.IsZero() || startT < 0 { + startT = 0 + } + + endT := s.EndTime.UnixNano() + if s.EndTime.IsZero() || endT < 0 { + endT = 0 + } + + // Override non-empty default SpanID marshal and omitempty. + var parentSpanId string + if !s.ParentSpanID.IsEmpty() { + b := make([]byte, hex.EncodedLen(spanIDSize)) + hex.Encode(b, s.ParentSpanID[:]) + parentSpanId = string(b) + } + + type Alias Span + return json.Marshal(struct { + Alias + ParentSpanID string `json:"parentSpanId,omitempty"` + StartTime uint64 `json:"startTimeUnixNano,omitempty"` + EndTime uint64 `json:"endTimeUnixNano,omitempty"` + }{ + Alias: Alias(s), + ParentSpanID: parentSpanId, + StartTime: uint64(startT), // nolint:gosec // >0 checked above. + EndTime: uint64(endT), // nolint:gosec // >0 checked above. + }) +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into s. +func (s *Span) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Span type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Span field: %#v", keyIface) + } + + switch key { + case "traceId", "trace_id": + err = decoder.Decode(&s.TraceID) + case "spanId", "span_id": + err = decoder.Decode(&s.SpanID) + case "traceState", "trace_state": + err = decoder.Decode(&s.TraceState) + case "parentSpanId", "parent_span_id": + err = decoder.Decode(&s.ParentSpanID) + case "flags": + err = decoder.Decode(&s.Flags) + case "name": + err = decoder.Decode(&s.Name) + case "kind": + err = decoder.Decode(&s.Kind) + case "startTimeUnixNano", "start_time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + v := int64(min(val.Uint64(), math.MaxInt64)) // nolint: gosec // Overflow checked. + s.StartTime = time.Unix(0, v) + case "endTimeUnixNano", "end_time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + v := int64(min(val.Uint64(), math.MaxInt64)) // nolint: gosec // Overflow checked. + s.EndTime = time.Unix(0, v) + case "attributes": + err = decoder.Decode(&s.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&s.DroppedAttrs) + case "events": + err = decoder.Decode(&s.Events) + case "droppedEventsCount", "dropped_events_count": + err = decoder.Decode(&s.DroppedEvents) + case "links": + err = decoder.Decode(&s.Links) + case "droppedLinksCount", "dropped_links_count": + err = decoder.Decode(&s.DroppedLinks) + case "status": + err = decoder.Decode(&s.Status) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// SpanFlags represents constants used to interpret the +// Span.flags field, which is protobuf 'fixed32' type and is to +// be used as bit-fields. Each non-zero value defined in this enum is +// a bit-mask. To extract the bit-field, for example, use an +// expression like: +// +// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK) +// +// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. +// +// Note that Span flags were introduced in version 1.1 of the +// OpenTelemetry protocol. Older Span producers do not set this +// field, consequently consumers should not rely on the absence of a +// particular flag bit to indicate the presence of a particular feature. +type SpanFlags int32 + +const ( + // SpanFlagsTraceFlagsMask is a mask for trace-flags. + // + // Bits 0-7 are used for trace flags. + SpanFlagsTraceFlagsMask SpanFlags = 255 + // SpanFlagsContextHasIsRemoteMask is a mask for HAS_IS_REMOTE status. + // + // Bits 8 and 9 are used to indicate that the parent span or link span is + // remote. Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. + SpanFlagsContextHasIsRemoteMask SpanFlags = 256 + // SpanFlagsContextIsRemoteMask is a mask for IS_REMOTE status. + // + // Bits 8 and 9 are used to indicate that the parent span or link span is + // remote. Bit 9 (`IS_REMOTE`) indicates whether the span or link is + // remote. + SpanFlagsContextIsRemoteMask SpanFlags = 512 +) + +// SpanKind is the type of span. Can be used to specify additional relationships between spans +// in addition to a parent/child relationship. +type SpanKind int32 + +const ( + // SpanKindInternal indicates that the span represents an internal + // operation within an application, as opposed to an operation happening at + // the boundaries. + SpanKindInternal SpanKind = 1 + // SpanKindServer indicates that the span covers server-side handling of an + // RPC or other remote network request. + SpanKindServer SpanKind = 2 + // SpanKindClient indicates that the span describes a request to some + // remote service. + SpanKindClient SpanKind = 3 + // SpanKindProducer indicates that the span describes a producer sending a + // message to a broker. Unlike SpanKindClient and SpanKindServer, there is + // often no direct critical path latency relationship between producer and + // consumer spans. A SpanKindProducer span ends when the message was + // accepted by the broker while the logical processing of the message might + // span a much longer time. + SpanKindProducer SpanKind = 4 + // SpanKindConsumer indicates that the span describes a consumer receiving + // a message from a broker. Like SpanKindProducer, there is often no direct + // critical path latency relationship between producer and consumer spans. + SpanKindConsumer SpanKind = 5 +) + +// SpanEvent is a time-stamped annotation of the span, consisting of +// user-supplied text description and key-value pairs. +type SpanEvent struct { + // time_unix_nano is the time the event occurred. + Time time.Time `json:"timeUnixNano,omitempty"` + // name of the event. + // This field is semantically required to be set to non-empty string. + Name string `json:"name,omitempty"` + // attributes is a collection of attribute key/value pairs on the event. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// MarshalJSON encodes e into OTLP formatted JSON. +func (e SpanEvent) MarshalJSON() ([]byte, error) { + t := e.Time.UnixNano() + if e.Time.IsZero() || t < 0 { + t = 0 + } + + type Alias SpanEvent + return json.Marshal(struct { + Alias + Time uint64 `json:"timeUnixNano,omitempty"` + }{ + Alias: Alias(e), + Time: uint64(t), // nolint: gosec // >0 checked above + }) +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into se. +func (se *SpanEvent) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid SpanEvent type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid SpanEvent field: %#v", keyIface) + } + + switch key { + case "timeUnixNano", "time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + v := int64(min(val.Uint64(), math.MaxInt64)) // nolint: gosec // Overflow checked. + se.Time = time.Unix(0, v) + case "name": + err = decoder.Decode(&se.Name) + case "attributes": + err = decoder.Decode(&se.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&se.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// SpanLink is a reference from the current span to another span in the same +// trace or in a different trace. For example, this can be used in batching +// operations, where a single batch handler processes multiple requests from +// different traces or when the handler receives a request from a different +// project. +type SpanLink struct { + // A unique identifier of a trace that this linked span is part of. The ID is a + // 16-byte array. + TraceID TraceID `json:"traceId,omitempty"` + // A unique identifier for the linked span. The ID is an 8-byte array. + SpanID SpanID `json:"spanId,omitempty"` + // The trace_state associated with the link. + TraceState string `json:"traceState,omitempty"` + // attributes is a collection of attribute key/value pairs on the link. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether the link is remote. + // The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero. + // + // [Optional]. + Flags uint32 `json:"flags,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into sl. +func (sl *SpanLink) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid SpanLink type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid SpanLink field: %#v", keyIface) + } + + switch key { + case "traceId", "trace_id": + err = decoder.Decode(&sl.TraceID) + case "spanId", "span_id": + err = decoder.Decode(&sl.SpanID) + case "traceState", "trace_state": + err = decoder.Decode(&sl.TraceState) + case "attributes": + err = decoder.Decode(&sl.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&sl.DroppedAttrs) + case "flags": + err = decoder.Decode(&sl.Flags) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go new file mode 100644 index 000000000..1039bf40c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go @@ -0,0 +1,42 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +// StatusCode is the status of a Span. +// +// For the semantics of status codes see +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status +type StatusCode int32 + +const ( + // StatusCodeUnset is the default status. + StatusCodeUnset StatusCode = 0 + // StatusCodeOK is used when the Span has been validated by an Application + // developer or Operator to have completed successfully. + StatusCodeOK StatusCode = 1 + // StatusCodeError is used when the Span contains an error. + StatusCodeError StatusCode = 2 +) + +var statusCodeStrings = []string{ + "Unset", + "OK", + "Error", +} + +func (s StatusCode) String() string { + if s >= 0 && int(s) < len(statusCodeStrings) { + return statusCodeStrings[s] + } + return "" +} + +// Status defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. +type Status struct { + // A developer-facing human readable error message. + Message string `json:"message,omitempty"` + // The status code. + Code StatusCode `json:"code,omitempty"` +} diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go new file mode 100644 index 000000000..e5f10767c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go @@ -0,0 +1,189 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Traces represents the traces data that can be stored in a persistent storage, +// OR can be embedded by other protocols that transfer OTLP traces data but do +// not implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +type Traces struct { + // An array of ResourceSpans. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + ResourceSpans []*ResourceSpans `json:"resourceSpans,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into td. +func (td *Traces) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid TracesData type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid TracesData field: %#v", keyIface) + } + + switch key { + case "resourceSpans", "resource_spans": + err = decoder.Decode(&td.ResourceSpans) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// ResourceSpans is a collection of ScopeSpans from a Resource. +type ResourceSpans struct { + // The resource for the spans in this message. + // If this field is not set then no resource info is known. + Resource Resource `json:"resource"` + // A list of ScopeSpans that originate from a resource. + ScopeSpans []*ScopeSpans `json:"scopeSpans,omitempty"` + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "scope_spans" field which have their own schema_url field. + SchemaURL string `json:"schemaUrl,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into rs. +func (rs *ResourceSpans) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid ResourceSpans type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid ResourceSpans field: %#v", keyIface) + } + + switch key { + case "resource": + err = decoder.Decode(&rs.Resource) + case "scopeSpans", "scope_spans": + err = decoder.Decode(&rs.ScopeSpans) + case "schemaUrl", "schema_url": + err = decoder.Decode(&rs.SchemaURL) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// ScopeSpans is a collection of Spans produced by an InstrumentationScope. +type ScopeSpans struct { + // The instrumentation scope information for the spans in this message. + // Semantically when InstrumentationScope isn't set, it is equivalent with + // an empty instrumentation scope name (unknown). + Scope *Scope `json:"scope"` + // A list of Spans that originate from an instrumentation scope. + Spans []*Span `json:"spans,omitempty"` + // The Schema URL, if known. This is the identifier of the Schema that the span data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to all spans and span events in the "spans" field. + SchemaURL string `json:"schemaUrl,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into ss. +func (ss *ScopeSpans) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid ScopeSpans type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid ScopeSpans field: %#v", keyIface) + } + + switch key { + case "scope": + err = decoder.Decode(&ss.Scope) + case "spans": + err = decoder.Decode(&ss.Spans) + case "schemaUrl", "schema_url": + err = decoder.Decode(&ss.SchemaURL) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go new file mode 100644 index 000000000..cb7927b81 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go @@ -0,0 +1,453 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +import ( + "bytes" + "cmp" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "slices" + "strconv" + "unsafe" +) + +// A Value represents a structured value. +// A zero value is valid and represents an empty value. +type Value struct { + // Ensure forward compatibility by explicitly making this not comparable. + noCmp [0]func() //nolint: unused // This is indeed used. + + // num holds the value for Int64, Float64, and Bool. It holds the length + // for String, Bytes, Slice, Map. + num uint64 + // any holds either the KindBool, KindInt64, KindFloat64, stringptr, + // bytesptr, sliceptr, or mapptr. If KindBool, KindInt64, or KindFloat64 + // then the value of Value is in num as described above. Otherwise, it + // contains the value wrapped in the appropriate type. + any any +} + +type ( + // sliceptr represents a value in Value.any for KindString Values. + stringptr *byte + // bytesptr represents a value in Value.any for KindBytes Values. + bytesptr *byte + // sliceptr represents a value in Value.any for KindSlice Values. + sliceptr *Value + // mapptr represents a value in Value.any for KindMap Values. + mapptr *Attr +) + +// ValueKind is the kind of a [Value]. +type ValueKind int + +// ValueKind values. +const ( + ValueKindEmpty ValueKind = iota + ValueKindBool + ValueKindFloat64 + ValueKindInt64 + ValueKindString + ValueKindBytes + ValueKindSlice + ValueKindMap +) + +var valueKindStrings = []string{ + "Empty", + "Bool", + "Float64", + "Int64", + "String", + "Bytes", + "Slice", + "Map", +} + +func (k ValueKind) String() string { + if k >= 0 && int(k) < len(valueKindStrings) { + return valueKindStrings[k] + } + return "" +} + +// StringValue returns a new [Value] for a string. +func StringValue(v string) Value { + return Value{ + num: uint64(len(v)), + any: stringptr(unsafe.StringData(v)), + } +} + +// IntValue returns a [Value] for an int. +func IntValue(v int) Value { return Int64Value(int64(v)) } + +// Int64Value returns a [Value] for an int64. +func Int64Value(v int64) Value { + return Value{ + num: uint64(v), // nolint: gosec // Store raw bytes. + any: ValueKindInt64, + } +} + +// Float64Value returns a [Value] for a float64. +func Float64Value(v float64) Value { + return Value{num: math.Float64bits(v), any: ValueKindFloat64} +} + +// BoolValue returns a [Value] for a bool. +func BoolValue(v bool) Value { //nolint:revive // Not a control flag. + var n uint64 + if v { + n = 1 + } + return Value{num: n, any: ValueKindBool} +} + +// BytesValue returns a [Value] for a byte slice. The passed slice must not be +// changed after it is passed. +func BytesValue(v []byte) Value { + return Value{ + num: uint64(len(v)), + any: bytesptr(unsafe.SliceData(v)), + } +} + +// SliceValue returns a [Value] for a slice of [Value]. The passed slice must +// not be changed after it is passed. +func SliceValue(vs ...Value) Value { + return Value{ + num: uint64(len(vs)), + any: sliceptr(unsafe.SliceData(vs)), + } +} + +// MapValue returns a new [Value] for a slice of key-value pairs. The passed +// slice must not be changed after it is passed. +func MapValue(kvs ...Attr) Value { + return Value{ + num: uint64(len(kvs)), + any: mapptr(unsafe.SliceData(kvs)), + } +} + +// AsString returns the value held by v as a string. +func (v Value) AsString() string { + if sp, ok := v.any.(stringptr); ok { + return unsafe.String(sp, v.num) + } + // TODO: error handle + return "" +} + +// asString returns the value held by v as a string. It will panic if the Value +// is not KindString. +func (v Value) asString() string { + return unsafe.String(v.any.(stringptr), v.num) +} + +// AsInt64 returns the value held by v as an int64. +func (v Value) AsInt64() int64 { + if v.Kind() != ValueKindInt64 { + // TODO: error handle + return 0 + } + return v.asInt64() +} + +// asInt64 returns the value held by v as an int64. If v is not of KindInt64, +// this will return garbage. +func (v Value) asInt64() int64 { + // Assumes v.num was a valid int64 (overflow not checked). + return int64(v.num) // nolint: gosec +} + +// AsBool returns the value held by v as a bool. +func (v Value) AsBool() bool { + if v.Kind() != ValueKindBool { + // TODO: error handle + return false + } + return v.asBool() +} + +// asBool returns the value held by v as a bool. If v is not of KindBool, this +// will return garbage. +func (v Value) asBool() bool { return v.num == 1 } + +// AsFloat64 returns the value held by v as a float64. +func (v Value) AsFloat64() float64 { + if v.Kind() != ValueKindFloat64 { + // TODO: error handle + return 0 + } + return v.asFloat64() +} + +// asFloat64 returns the value held by v as a float64. If v is not of +// KindFloat64, this will return garbage. +func (v Value) asFloat64() float64 { return math.Float64frombits(v.num) } + +// AsBytes returns the value held by v as a []byte. +func (v Value) AsBytes() []byte { + if sp, ok := v.any.(bytesptr); ok { + return unsafe.Slice((*byte)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asBytes returns the value held by v as a []byte. It will panic if the Value +// is not KindBytes. +func (v Value) asBytes() []byte { + return unsafe.Slice((*byte)(v.any.(bytesptr)), v.num) +} + +// AsSlice returns the value held by v as a []Value. +func (v Value) AsSlice() []Value { + if sp, ok := v.any.(sliceptr); ok { + return unsafe.Slice((*Value)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asSlice returns the value held by v as a []Value. It will panic if the Value +// is not KindSlice. +func (v Value) asSlice() []Value { + return unsafe.Slice((*Value)(v.any.(sliceptr)), v.num) +} + +// AsMap returns the value held by v as a []Attr. +func (v Value) AsMap() []Attr { + if sp, ok := v.any.(mapptr); ok { + return unsafe.Slice((*Attr)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asMap returns the value held by v as a []Attr. It will panic if the +// Value is not KindMap. +func (v Value) asMap() []Attr { + return unsafe.Slice((*Attr)(v.any.(mapptr)), v.num) +} + +// Kind returns the Kind of v. +func (v Value) Kind() ValueKind { + switch x := v.any.(type) { + case ValueKind: + return x + case stringptr: + return ValueKindString + case bytesptr: + return ValueKindBytes + case sliceptr: + return ValueKindSlice + case mapptr: + return ValueKindMap + default: + return ValueKindEmpty + } +} + +// Empty reports whether v does not hold any value. +func (v Value) Empty() bool { return v.Kind() == ValueKindEmpty } + +// Equal reports whether v is equal to w. +func (v Value) Equal(w Value) bool { + k1 := v.Kind() + k2 := w.Kind() + if k1 != k2 { + return false + } + switch k1 { + case ValueKindInt64, ValueKindBool: + return v.num == w.num + case ValueKindString: + return v.asString() == w.asString() + case ValueKindFloat64: + return v.asFloat64() == w.asFloat64() + case ValueKindSlice: + return slices.EqualFunc(v.asSlice(), w.asSlice(), Value.Equal) + case ValueKindMap: + sv := sortMap(v.asMap()) + sw := sortMap(w.asMap()) + return slices.EqualFunc(sv, sw, Attr.Equal) + case ValueKindBytes: + return bytes.Equal(v.asBytes(), w.asBytes()) + case ValueKindEmpty: + return true + default: + // TODO: error handle + return false + } +} + +func sortMap(m []Attr) []Attr { + sm := make([]Attr, len(m)) + copy(sm, m) + slices.SortFunc(sm, func(a, b Attr) int { + return cmp.Compare(a.Key, b.Key) + }) + + return sm +} + +// String returns Value's value as a string, formatted like [fmt.Sprint]. +// +// The returned string is meant for debugging; +// the string representation is not stable. +func (v Value) String() string { + switch v.Kind() { + case ValueKindString: + return v.asString() + case ValueKindInt64: + // Assumes v.num was a valid int64 (overflow not checked). + return strconv.FormatInt(int64(v.num), 10) // nolint: gosec + case ValueKindFloat64: + return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64) + case ValueKindBool: + return strconv.FormatBool(v.asBool()) + case ValueKindBytes: + return string(v.asBytes()) + case ValueKindMap: + return fmt.Sprint(v.asMap()) + case ValueKindSlice: + return fmt.Sprint(v.asSlice()) + case ValueKindEmpty: + return "" + default: + // Try to handle this as gracefully as possible. + // + // Don't panic here. The goal here is to have developers find this + // first if a slog.Kind is is not handled. It is + // preferable to have user's open issue asking why their attributes + // have a "unhandled: " prefix than say that their code is panicking. + return fmt.Sprintf("", v.Kind()) + } +} + +// MarshalJSON encodes v into OTLP formatted JSON. +func (v *Value) MarshalJSON() ([]byte, error) { + switch v.Kind() { + case ValueKindString: + return json.Marshal(struct { + Value string `json:"stringValue"` + }{v.asString()}) + case ValueKindInt64: + return json.Marshal(struct { + Value string `json:"intValue"` + }{strconv.FormatInt(int64(v.num), 10)}) // nolint: gosec // From raw bytes. + case ValueKindFloat64: + return json.Marshal(struct { + Value float64 `json:"doubleValue"` + }{v.asFloat64()}) + case ValueKindBool: + return json.Marshal(struct { + Value bool `json:"boolValue"` + }{v.asBool()}) + case ValueKindBytes: + return json.Marshal(struct { + Value []byte `json:"bytesValue"` + }{v.asBytes()}) + case ValueKindMap: + return json.Marshal(struct { + Value struct { + Values []Attr `json:"values"` + } `json:"kvlistValue"` + }{struct { + Values []Attr `json:"values"` + }{v.asMap()}}) + case ValueKindSlice: + return json.Marshal(struct { + Value struct { + Values []Value `json:"values"` + } `json:"arrayValue"` + }{struct { + Values []Value `json:"values"` + }{v.asSlice()}}) + case ValueKindEmpty: + return nil, nil + default: + return nil, fmt.Errorf("unknown Value kind: %s", v.Kind().String()) + } +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into v. +func (v *Value) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Value type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Value key: %#v", keyIface) + } + + switch key { + case "stringValue", "string_value": + var val string + err = decoder.Decode(&val) + *v = StringValue(val) + case "boolValue", "bool_value": + var val bool + err = decoder.Decode(&val) + *v = BoolValue(val) + case "intValue", "int_value": + var val protoInt64 + err = decoder.Decode(&val) + *v = Int64Value(val.Int64()) + case "doubleValue", "double_value": + var val float64 + err = decoder.Decode(&val) + *v = Float64Value(val) + case "bytesValue", "bytes_value": + var val64 string + if err := decoder.Decode(&val64); err != nil { + return err + } + var val []byte + val, err = base64.StdEncoding.DecodeString(val64) + *v = BytesValue(val) + case "arrayValue", "array_value": + var val struct{ Values []Value } + err = decoder.Decode(&val) + *v = SliceValue(val.Values...) + case "kvlistValue", "kvlist_value": + var val struct{ Values []Attr } + err = decoder.Decode(&val) + *v = MapValue(val.Values...) + default: + // Skip unknown. + continue + } + // Use first valid. Ignore the rest. + return err + } + + // Only unknown fields. Return nil without unmarshaling any value. + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go index ca20e9997..400fab123 100644 --- a/vendor/go.opentelemetry.io/otel/trace/noop.go +++ b/vendor/go.opentelemetry.io/otel/trace/noop.go @@ -26,7 +26,7 @@ type noopTracerProvider struct{ embedded.TracerProvider } var _ TracerProvider = noopTracerProvider{} // Tracer returns noop implementation of Tracer. -func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer { +func (noopTracerProvider) Tracer(string, ...TracerOption) Tracer { return noopTracer{} } @@ -37,7 +37,7 @@ var _ Tracer = noopTracer{} // Start carries forward a non-recording Span, if one is present in the context, otherwise it // creates a no-op Span. -func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption) (context.Context, Span) { +func (noopTracer) Start(ctx context.Context, _ string, _ ...SpanStartOption) (context.Context, Span) { span := SpanFromContext(ctx) if _, ok := span.(nonRecordingSpan); !ok { // span is likely already a noopSpan, but let's be sure @@ -82,4 +82,24 @@ func (noopSpan) AddLink(Link) {} func (noopSpan) SetName(string) {} // TracerProvider returns a no-op TracerProvider. -func (noopSpan) TracerProvider() TracerProvider { return noopTracerProvider{} } +func (s noopSpan) TracerProvider() TracerProvider { + return s.tracerProvider(autoInstEnabled) +} + +// autoInstEnabled defines if the auto-instrumentation SDK is enabled. +// +// The auto-instrumentation is expected to overwrite this value to true when it +// attaches to the process. +var autoInstEnabled = new(bool) + +// tracerProvider return a noopTracerProvider if autoEnabled is false, +// otherwise it will return a TracerProvider from the sdk package used in +// auto-instrumentation. +// +//go:noinline +func (noopSpan) tracerProvider(autoEnabled *bool) TracerProvider { + if *autoEnabled { + return newAutoTracerProvider() + } + return noopTracerProvider{} +} diff --git a/vendor/go.opentelemetry.io/otel/trace/noop/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go index 64a4f1b36..689d220df 100644 --- a/vendor/go.opentelemetry.io/otel/trace/noop/noop.go +++ b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go @@ -51,7 +51,7 @@ type Tracer struct{ embedded.Tracer } // If ctx contains a span context, the returned span will also contain that // span context. If the span context in ctx is for a non-recording span, that // span instance will be returned directly. -func (t Tracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) (context.Context, trace.Span) { +func (Tracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) (context.Context, trace.Span) { span := trace.SpanFromContext(ctx) // If the parent context contains a non-zero span context, that span diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go index d49adf671..ee6f4bcb2 100644 --- a/vendor/go.opentelemetry.io/otel/trace/trace.go +++ b/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -4,8 +4,6 @@ package trace // import "go.opentelemetry.io/otel/trace" import ( - "bytes" - "encoding/hex" "encoding/json" ) @@ -38,21 +36,47 @@ var ( _ json.Marshaler = nilTraceID ) -// IsValid checks whether the trace TraceID is valid. A valid trace ID does +// IsValid reports whether the trace TraceID is valid. A valid trace ID does // not consist of zeros only. func (t TraceID) IsValid() bool { - return !bytes.Equal(t[:], nilTraceID[:]) + return t != nilTraceID } // MarshalJSON implements a custom marshal function to encode TraceID // as a hex string. func (t TraceID) MarshalJSON() ([]byte, error) { - return json.Marshal(t.String()) + b := [32 + 2]byte{0: '"', 33: '"'} + h := t.hexBytes() + copy(b[1:], h[:]) + return b[:], nil } // String returns the hex string representation form of a TraceID. func (t TraceID) String() string { - return hex.EncodeToString(t[:]) + h := t.hexBytes() + return string(h[:]) +} + +// hexBytes returns the hex string representation form of a TraceID. +func (t TraceID) hexBytes() [32]byte { + return [32]byte{ + hexLU[t[0x0]>>4], hexLU[t[0x0]&0xf], + hexLU[t[0x1]>>4], hexLU[t[0x1]&0xf], + hexLU[t[0x2]>>4], hexLU[t[0x2]&0xf], + hexLU[t[0x3]>>4], hexLU[t[0x3]&0xf], + hexLU[t[0x4]>>4], hexLU[t[0x4]&0xf], + hexLU[t[0x5]>>4], hexLU[t[0x5]&0xf], + hexLU[t[0x6]>>4], hexLU[t[0x6]&0xf], + hexLU[t[0x7]>>4], hexLU[t[0x7]&0xf], + hexLU[t[0x8]>>4], hexLU[t[0x8]&0xf], + hexLU[t[0x9]>>4], hexLU[t[0x9]&0xf], + hexLU[t[0xa]>>4], hexLU[t[0xa]&0xf], + hexLU[t[0xb]>>4], hexLU[t[0xb]&0xf], + hexLU[t[0xc]>>4], hexLU[t[0xc]&0xf], + hexLU[t[0xd]>>4], hexLU[t[0xd]&0xf], + hexLU[t[0xe]>>4], hexLU[t[0xe]&0xf], + hexLU[t[0xf]>>4], hexLU[t[0xf]&0xf], + } } // SpanID is a unique identity of a span in a trace. @@ -63,21 +87,38 @@ var ( _ json.Marshaler = nilSpanID ) -// IsValid checks whether the SpanID is valid. A valid SpanID does not consist +// IsValid reports whether the SpanID is valid. A valid SpanID does not consist // of zeros only. func (s SpanID) IsValid() bool { - return !bytes.Equal(s[:], nilSpanID[:]) + return s != nilSpanID } // MarshalJSON implements a custom marshal function to encode SpanID // as a hex string. func (s SpanID) MarshalJSON() ([]byte, error) { - return json.Marshal(s.String()) + b := [16 + 2]byte{0: '"', 17: '"'} + h := s.hexBytes() + copy(b[1:], h[:]) + return b[:], nil } // String returns the hex string representation form of a SpanID. func (s SpanID) String() string { - return hex.EncodeToString(s[:]) + b := s.hexBytes() + return string(b[:]) +} + +func (s SpanID) hexBytes() [16]byte { + return [16]byte{ + hexLU[s[0]>>4], hexLU[s[0]&0xf], + hexLU[s[1]>>4], hexLU[s[1]&0xf], + hexLU[s[2]>>4], hexLU[s[2]&0xf], + hexLU[s[3]>>4], hexLU[s[3]&0xf], + hexLU[s[4]>>4], hexLU[s[4]&0xf], + hexLU[s[5]>>4], hexLU[s[5]&0xf], + hexLU[s[6]>>4], hexLU[s[6]&0xf], + hexLU[s[7]>>4], hexLU[s[7]&0xf], + } } // TraceIDFromHex returns a TraceID from a hex string if it is compliant with @@ -85,65 +126,58 @@ func (s SpanID) String() string { // https://www.w3.org/TR/trace-context/#trace-id // nolint:revive // revive complains about stutter of `trace.TraceIDFromHex`. func TraceIDFromHex(h string) (TraceID, error) { - t := TraceID{} if len(h) != 32 { - return t, errInvalidTraceIDLength + return [16]byte{}, errInvalidTraceIDLength } - - if err := decodeHex(h, t[:]); err != nil { - return t, err + var b [16]byte + invalidMark := byte(0) + for i := 0; i < len(h); i += 4 { + b[i/2] = (hexRev[h[i]] << 4) | hexRev[h[i+1]] + b[i/2+1] = (hexRev[h[i+2]] << 4) | hexRev[h[i+3]] + invalidMark |= hexRev[h[i]] | hexRev[h[i+1]] | hexRev[h[i+2]] | hexRev[h[i+3]] } - - if !t.IsValid() { - return t, errNilTraceID + // If the upper 4 bits of any byte are not zero, there was an invalid hex + // character since invalid hex characters are 0xff in hexRev. + if invalidMark&0xf0 != 0 { + return [16]byte{}, errInvalidHexID + } + // If we didn't set any bits, then h was all zeros. + if invalidMark == 0 { + return [16]byte{}, errNilTraceID } - return t, nil + return b, nil } // SpanIDFromHex returns a SpanID from a hex string if it is compliant // with the w3c trace-context specification. // See more at https://www.w3.org/TR/trace-context/#parent-id func SpanIDFromHex(h string) (SpanID, error) { - s := SpanID{} if len(h) != 16 { - return s, errInvalidSpanIDLength - } - - if err := decodeHex(h, s[:]); err != nil { - return s, err + return [8]byte{}, errInvalidSpanIDLength } - - if !s.IsValid() { - return s, errNilSpanID + var b [8]byte + invalidMark := byte(0) + for i := 0; i < len(h); i += 4 { + b[i/2] = (hexRev[h[i]] << 4) | hexRev[h[i+1]] + b[i/2+1] = (hexRev[h[i+2]] << 4) | hexRev[h[i+3]] + invalidMark |= hexRev[h[i]] | hexRev[h[i+1]] | hexRev[h[i+2]] | hexRev[h[i+3]] } - return s, nil -} - -func decodeHex(h string, b []byte) error { - for _, r := range h { - switch { - case 'a' <= r && r <= 'f': - continue - case '0' <= r && r <= '9': - continue - default: - return errInvalidHexID - } + // If the upper 4 bits of any byte are not zero, there was an invalid hex + // character since invalid hex characters are 0xff in hexRev. + if invalidMark&0xf0 != 0 { + return [8]byte{}, errInvalidHexID } - - decoded, err := hex.DecodeString(h) - if err != nil { - return err + // If we didn't set any bits, then h was all zeros. + if invalidMark == 0 { + return [8]byte{}, errNilSpanID } - - copy(b, decoded) - return nil + return b, nil } // TraceFlags contains flags that can be set on a SpanContext. type TraceFlags byte //nolint:revive // revive complains about stutter of `trace.TraceFlags`. -// IsSampled returns if the sampling bit is set in the TraceFlags. +// IsSampled reports whether the sampling bit is set in the TraceFlags. func (tf TraceFlags) IsSampled() bool { return tf&FlagsSampled == FlagsSampled } @@ -160,12 +194,20 @@ func (tf TraceFlags) WithSampled(sampled bool) TraceFlags { // nolint:revive // // MarshalJSON implements a custom marshal function to encode TraceFlags // as a hex string. func (tf TraceFlags) MarshalJSON() ([]byte, error) { - return json.Marshal(tf.String()) + b := [2 + 2]byte{0: '"', 3: '"'} + h := tf.hexBytes() + copy(b[1:], h[:]) + return b[:], nil } // String returns the hex string representation form of TraceFlags. func (tf TraceFlags) String() string { - return hex.EncodeToString([]byte{byte(tf)}[:]) + h := tf.hexBytes() + return string(h[:]) +} + +func (tf TraceFlags) hexBytes() [2]byte { + return [2]byte{hexLU[tf>>4], hexLU[tf&0xf]} } // SpanContextConfig contains mutable fields usable for constructing @@ -201,13 +243,13 @@ type SpanContext struct { var _ json.Marshaler = SpanContext{} -// IsValid returns if the SpanContext is valid. A valid span context has a +// IsValid reports whether the SpanContext is valid. A valid span context has a // valid TraceID and SpanID. func (sc SpanContext) IsValid() bool { return sc.HasTraceID() && sc.HasSpanID() } -// IsRemote indicates whether the SpanContext represents a remotely-created Span. +// IsRemote reports whether the SpanContext represents a remotely-created Span. func (sc SpanContext) IsRemote() bool { return sc.remote } @@ -228,7 +270,7 @@ func (sc SpanContext) TraceID() TraceID { return sc.traceID } -// HasTraceID checks if the SpanContext has a valid TraceID. +// HasTraceID reports whether the SpanContext has a valid TraceID. func (sc SpanContext) HasTraceID() bool { return sc.traceID.IsValid() } @@ -249,7 +291,7 @@ func (sc SpanContext) SpanID() SpanID { return sc.spanID } -// HasSpanID checks if the SpanContext has a valid SpanID. +// HasSpanID reports whether the SpanContext has a valid SpanID. func (sc SpanContext) HasSpanID() bool { return sc.spanID.IsValid() } @@ -270,7 +312,7 @@ func (sc SpanContext) TraceFlags() TraceFlags { return sc.traceFlags } -// IsSampled returns if the sampling bit is set in the SpanContext's TraceFlags. +// IsSampled reports whether the sampling bit is set in the SpanContext's TraceFlags. func (sc SpanContext) IsSampled() bool { return sc.traceFlags.IsSampled() } @@ -302,7 +344,7 @@ func (sc SpanContext) WithTraceState(state TraceState) SpanContext { } } -// Equal is a predicate that determines whether two SpanContext values are equal. +// Equal reports whether two SpanContext values are equal. func (sc SpanContext) Equal(other SpanContext) bool { return sc.traceID == other.traceID && sc.spanID == other.spanID && diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go index dc5e34cad..073adae2f 100644 --- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go +++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -80,7 +80,7 @@ func checkKeyRemain(key string) bool { // // param n is remain part length, should be 255 in simple-key or 13 in system-id. func checkKeyPart(key string, n int) bool { - if len(key) == 0 { + if key == "" { return false } first := key[0] // key's first char @@ -102,7 +102,7 @@ func isAlphaNum(c byte) bool { // // param n is remain part length, should be 240 exactly. func checkKeyTenant(key string, n int) bool { - if len(key) == 0 { + if key == "" { return false } return isAlphaNum(key[0]) && len(key[1:]) <= n && checkKeyRemain(key[1:]) @@ -191,7 +191,7 @@ func ParseTraceState(ts string) (TraceState, error) { for ts != "" { var memberStr string memberStr, ts, _ = strings.Cut(ts, listDelimiters) - if len(memberStr) == 0 { + if memberStr == "" { continue } diff --git a/vendor/go.opentelemetry.io/otel/verify_readmes.sh b/vendor/go.opentelemetry.io/otel/verify_readmes.sh deleted file mode 100644 index 1e87855ee..000000000 --- a/vendor/go.opentelemetry.io/otel/verify_readmes.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash - -# Copyright The OpenTelemetry Authors -# SPDX-License-Identifier: Apache-2.0 - -set -euo pipefail - -dirs=$(find . -type d -not -path "*/internal*" -not -path "*/test*" -not -path "*/example*" -not -path "*/.*" | sort) - -missingReadme=false -for dir in $dirs; do - if [ ! -f "$dir/README.md" ]; then - echo "couldn't find README.md for $dir" - missingReadme=true - fi -done - -if [ "$missingReadme" = true ] ; then - echo "Error: some READMEs couldn't be found." - exit 1 -fi diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index eb22002d8..bcaa5aa53 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.34.0" + return "1.38.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index ce4fe59b0..07145e254 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,13 +3,12 @@ module-sets: stable-v1: - version: v1.34.0 + version: v1.38.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus - go.opentelemetry.io/otel/bridge/opencensus/test - go.opentelemetry.io/otel/bridge/opentracing - - go.opentelemetry.io/otel/bridge/opentracing/test - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp - go.opentelemetry.io/otel/exporters/otlp/otlptrace @@ -23,20 +22,23 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.56.0 + version: v0.60.0 modules: - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.10.0 + version: v0.14.0 modules: - go.opentelemetry.io/otel/log + - go.opentelemetry.io/otel/log/logtest - go.opentelemetry.io/otel/sdk/log + - go.opentelemetry.io/otel/sdk/log/logtest - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: - version: v0.0.12 + version: v0.0.13 modules: - go.opentelemetry.io/otel/schema excluded-modules: - go.opentelemetry.io/otel/internal/tools + - go.opentelemetry.io/otel/trace/internal/telemetry/test diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go index dd1b73f1e..892864ea6 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go @@ -22,8 +22,6 @@ const _ = grpc.SupportPackageIsVersion7 // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type TraceServiceClient interface { - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. Export(ctx context.Context, in *ExportTraceServiceRequest, opts ...grpc.CallOption) (*ExportTraceServiceResponse, error) } @@ -48,8 +46,6 @@ func (c *traceServiceClient) Export(ctx context.Context, in *ExportTraceServiceR // All implementations must embed UnimplementedTraceServiceServer // for forward compatibility type TraceServiceServer interface { - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. Export(context.Context, *ExportTraceServiceRequest) (*ExportTraceServiceResponse, error) mustEmbedUnimplementedTraceServiceServer() } diff --git a/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go index 852209b09..a7c5d19bf 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go @@ -430,6 +430,101 @@ func (x *InstrumentationScope) GetDroppedAttributesCount() uint32 { return 0 } +// A reference to an Entity. +// Entity represents an object of interest associated with produced telemetry: e.g spans, metrics, profiles, or logs. +// +// Status: [Development] +type EntityRef struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The Schema URL, if known. This is the identifier of the Schema that the entity data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // + // This schema_url applies to the data in this message and to the Resource attributes + // referenced by id_keys and description_keys. + // TODO: discuss if we are happy with this somewhat complicated definition of what + // the schema_url applies to. + // + // This field obsoletes the schema_url field in ResourceMetrics/ResourceSpans/ResourceLogs. + SchemaUrl string `protobuf:"bytes,1,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` + // Defines the type of the entity. MUST not change during the lifetime of the entity. + // For example: "service" or "host". This field is required and MUST not be empty + // for valid entities. + Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + // Attribute Keys that identify the entity. + // MUST not change during the lifetime of the entity. The Id must contain at least one attribute. + // These keys MUST exist in the containing {message}.attributes. + IdKeys []string `protobuf:"bytes,3,rep,name=id_keys,json=idKeys,proto3" json:"id_keys,omitempty"` + // Descriptive (non-identifying) attribute keys of the entity. + // MAY change over the lifetime of the entity. MAY be empty. + // These attribute keys are not part of entity's identity. + // These keys MUST exist in the containing {message}.attributes. + DescriptionKeys []string `protobuf:"bytes,4,rep,name=description_keys,json=descriptionKeys,proto3" json:"description_keys,omitempty"` +} + +func (x *EntityRef) Reset() { + *x = EntityRef{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EntityRef) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EntityRef) ProtoMessage() {} + +func (x *EntityRef) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EntityRef.ProtoReflect.Descriptor instead. +func (*EntityRef) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{5} +} + +func (x *EntityRef) GetSchemaUrl() string { + if x != nil { + return x.SchemaUrl + } + return "" +} + +func (x *EntityRef) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *EntityRef) GetIdKeys() []string { + if x != nil { + return x.IdKeys + } + return nil +} + +func (x *EntityRef) GetDescriptionKeys() []string { + if x != nil { + return x.DescriptionKeys + } + return nil +} + var File_opentelemetry_proto_common_v1_common_proto protoreflect.FileDescriptor var file_opentelemetry_proto_common_v1_common_proto_rawDesc = []byte{ @@ -488,15 +583,23 @@ var file_opentelemetry_proto_common_v1_common_proto_rawDesc = []byte{ 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, - 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x7b, 0x0a, 0x20, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x28, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, - 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, - 0x76, 0x31, 0xaa, 0x02, 0x1d, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, - 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, - 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x82, 0x01, 0x0a, 0x09, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x52, 0x65, 0x66, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x75, 0x72, + 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x55, + 0x72, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x64, 0x5f, 0x6b, 0x65, 0x79, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x69, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x12, + 0x29, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, + 0x65, 0x79, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x7b, 0x0a, 0x20, 0x69, 0x6f, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x0b, + 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x28, 0x67, + 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, + 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0xaa, 0x02, 0x1d, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -511,13 +614,14 @@ func file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP() []byte { return file_opentelemetry_proto_common_v1_common_proto_rawDescData } -var file_opentelemetry_proto_common_v1_common_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_opentelemetry_proto_common_v1_common_proto_msgTypes = make([]protoimpl.MessageInfo, 6) var file_opentelemetry_proto_common_v1_common_proto_goTypes = []interface{}{ (*AnyValue)(nil), // 0: opentelemetry.proto.common.v1.AnyValue (*ArrayValue)(nil), // 1: opentelemetry.proto.common.v1.ArrayValue (*KeyValueList)(nil), // 2: opentelemetry.proto.common.v1.KeyValueList (*KeyValue)(nil), // 3: opentelemetry.proto.common.v1.KeyValue (*InstrumentationScope)(nil), // 4: opentelemetry.proto.common.v1.InstrumentationScope + (*EntityRef)(nil), // 5: opentelemetry.proto.common.v1.EntityRef } var file_opentelemetry_proto_common_v1_common_proto_depIdxs = []int32{ 1, // 0: opentelemetry.proto.common.v1.AnyValue.array_value:type_name -> opentelemetry.proto.common.v1.ArrayValue @@ -599,6 +703,18 @@ func file_opentelemetry_proto_common_v1_common_proto_init() { return nil } } + file_opentelemetry_proto_common_v1_common_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EntityRef); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } file_opentelemetry_proto_common_v1_common_proto_msgTypes[0].OneofWrappers = []interface{}{ (*AnyValue_StringValue)(nil), @@ -615,7 +731,7 @@ func file_opentelemetry_proto_common_v1_common_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_opentelemetry_proto_common_v1_common_proto_rawDesc, NumEnums: 0, - NumMessages: 5, + NumMessages: 6, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go b/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go index b7545b03b..eb7745d66 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go @@ -48,6 +48,12 @@ type Resource struct { // dropped_attributes_count is the number of dropped attributes. If the value is 0, then // no attributes were dropped. DroppedAttributesCount uint32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` + // Set of entities that participate in this Resource. + // + // Note: keys in the references MUST exist in attributes of this message. + // + // Status: [Development] + EntityRefs []*v1.EntityRef `protobuf:"bytes,3,rep,name=entity_refs,json=entityRefs,proto3" json:"entity_refs,omitempty"` } func (x *Resource) Reset() { @@ -96,6 +102,13 @@ func (x *Resource) GetDroppedAttributesCount() uint32 { return 0 } +func (x *Resource) GetEntityRefs() []*v1.EntityRef { + if x != nil { + return x.EntityRefs + } + return nil +} + var File_opentelemetry_proto_resource_v1_resource_proto protoreflect.FileDescriptor var file_opentelemetry_proto_resource_v1_resource_proto_rawDesc = []byte{ @@ -106,7 +119,7 @@ var file_opentelemetry_proto_resource_v1_resource_proto_rawDesc = []byte{ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x2a, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, - 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8d, 0x01, + 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd8, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, @@ -115,16 +128,21 @@ var file_opentelemetry_proto_resource_v1_resource_proto_rawDesc = []byte{ 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, - 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x83, 0x01, - 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, - 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, - 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x76, - 0x31, 0xaa, 0x02, 0x1f, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, - 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x49, 0x0a, + 0x0b, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x72, 0x65, 0x66, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x66, 0x52, 0x0a, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x66, 0x73, 0x42, 0x83, 0x01, 0x0a, 0x22, 0x69, 0x6f, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, + 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x2a, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, + 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x76, 0x31, 0xaa, 0x02, 0x1f, 0x4f, + 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x56, 0x31, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -141,16 +159,18 @@ func file_opentelemetry_proto_resource_v1_resource_proto_rawDescGZIP() []byte { var file_opentelemetry_proto_resource_v1_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_opentelemetry_proto_resource_v1_resource_proto_goTypes = []interface{}{ - (*Resource)(nil), // 0: opentelemetry.proto.resource.v1.Resource - (*v1.KeyValue)(nil), // 1: opentelemetry.proto.common.v1.KeyValue + (*Resource)(nil), // 0: opentelemetry.proto.resource.v1.Resource + (*v1.KeyValue)(nil), // 1: opentelemetry.proto.common.v1.KeyValue + (*v1.EntityRef)(nil), // 2: opentelemetry.proto.common.v1.EntityRef } var file_opentelemetry_proto_resource_v1_resource_proto_depIdxs = []int32{ 1, // 0: opentelemetry.proto.resource.v1.Resource.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue - 1, // [1:1] is the sub-list for method output_type - 1, // [1:1] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name + 2, // 1: opentelemetry.proto.resource.v1.Resource.entity_refs:type_name -> opentelemetry.proto.common.v1.EntityRef + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name } func init() { file_opentelemetry_proto_resource_v1_resource_proto_init() } diff --git a/vendor/go.uber.org/zap/.golangci.yml b/vendor/go.uber.org/zap/.golangci.yml index 2346df135..74faaa71d 100644 --- a/vendor/go.uber.org/zap/.golangci.yml +++ b/vendor/go.uber.org/zap/.golangci.yml @@ -25,7 +25,7 @@ linters-settings: govet: # These govet checks are disabled by default, but they're useful. enable: - - niliness + - nilness - reflectvaluecompare - sortslice - unusedwrite diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md index 6d6cd5f4d..86e7e6f98 100644 --- a/vendor/go.uber.org/zap/CHANGELOG.md +++ b/vendor/go.uber.org/zap/CHANGELOG.md @@ -3,6 +3,16 @@ All notable changes to this project will be documented in this file. This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## 1.27.1 (19 Nov 2025) +Enhancements: +* [#1501][]: prevent `Object` from panicking on nils +* [#1511][]: Fix a race condition in `WithLazy`. + +Thanks to @rabbbit, @alshopov, @jquirke, @arukiidou for their contributions to this release. + +[#1501]: https://github.com/uber-go/zap/pull/1501 +[#1511]: https://github.com/uber-go/zap/pull/1511 + ## 1.27.0 (20 Feb 2024) Enhancements: * [#1378][]: Add `WithLazy` method for `SugaredLogger`. diff --git a/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md b/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md index e327d9aa5..bc988b72e 100644 --- a/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md +++ b/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md @@ -71,5 +71,5 @@ This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]. -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ +[homepage]: https://contributor-covenant.org +[version]: https://contributor-covenant.org/version/1/4/ diff --git a/vendor/go.uber.org/zap/LICENSE b/vendor/go.uber.org/zap/LICENSE index 6652bed45..3883b9a7e 100644 --- a/vendor/go.uber.org/zap/LICENSE +++ b/vendor/go.uber.org/zap/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2016-2017 Uber Technologies, Inc. +Copyright (c) 2016-2024 Uber Technologies, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/go.uber.org/zap/Makefile b/vendor/go.uber.org/zap/Makefile index eb1cee53b..f9db385b3 100644 --- a/vendor/go.uber.org/zap/Makefile +++ b/vendor/go.uber.org/zap/Makefile @@ -24,7 +24,7 @@ golangci-lint: @$(foreach mod,$(MODULE_DIRS), \ (cd $(mod) && \ echo "[lint] golangci-lint: $(mod)" && \ - golangci-lint run --path-prefix $(mod)) &&) true + golangci-lint run --path-prefix $(mod) ./...) &&) true .PHONY: tidy tidy: diff --git a/vendor/go.uber.org/zap/field.go b/vendor/go.uber.org/zap/field.go index 6743930b8..1884afabc 100644 --- a/vendor/go.uber.org/zap/field.go +++ b/vendor/go.uber.org/zap/field.go @@ -398,6 +398,9 @@ func Durationp(key string, val *time.Duration) Field { // struct-like user-defined types to the logging context. The struct's // MarshalLogObject method is called lazily. func Object(key string, val zapcore.ObjectMarshaler) Field { + if val == nil { + return nilField(key) + } return Field{Key: key, Type: zapcore.ObjectMarshalerType, Interface: val} } @@ -431,6 +434,13 @@ func (d dictObject) MarshalLogObject(enc zapcore.ObjectEncoder) error { return nil } +// DictObject constructs a [zapcore.ObjectMarshaler] with the given list of fields. +// The resulting object marshaler can be used as input to [Object], [Objects], or +// any other functions that expect an object marshaler. +func DictObject(val ...Field) zapcore.ObjectMarshaler { + return dictObject(val) +} + // We discovered an issue where zap.Any can cause a performance degradation // when used in new goroutines. // diff --git a/vendor/go.uber.org/zap/http_handler.go b/vendor/go.uber.org/zap/http_handler.go index 2be8f6515..1cae2c164 100644 --- a/vendor/go.uber.org/zap/http_handler.go +++ b/vendor/go.uber.org/zap/http_handler.go @@ -71,7 +71,7 @@ import ( func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) { if err := lvl.serveHTTP(w, r); err != nil { w.WriteHeader(http.StatusInternalServerError) - fmt.Fprintf(w, "internal error: %v", err) + _, _ = fmt.Fprintf(w, "internal error: %v", err) } } diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go index c4d300323..2d0ef141b 100644 --- a/vendor/go.uber.org/zap/logger.go +++ b/vendor/go.uber.org/zap/logger.go @@ -381,7 +381,11 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { if stack.Count() == 0 { if log.addCaller { - fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC()) + _, _ = fmt.Fprintf( + log.errorOutput, + "%v Logger.check error: failed to get caller\n", + ent.Time.UTC(), + ) _ = log.errorOutput.Sync() } return ce diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go index 43d357ac9..04a3c1e63 100644 --- a/vendor/go.uber.org/zap/options.go +++ b/vendor/go.uber.org/zap/options.go @@ -125,7 +125,11 @@ func IncreaseLevel(lvl zapcore.LevelEnabler) Option { return optionFunc(func(log *Logger) { core, err := zapcore.NewIncreaseLevelCore(log.core, lvl) if err != nil { - fmt.Fprintf(log.errorOutput, "failed to IncreaseLevel: %v\n", err) + _, _ = fmt.Fprintf( + log.errorOutput, + "failed to IncreaseLevel: %v\n", + err, + ) } else { log.core = core } diff --git a/vendor/go.uber.org/zap/sink.go b/vendor/go.uber.org/zap/sink.go index 499772a00..92202280f 100644 --- a/vendor/go.uber.org/zap/sink.go +++ b/vendor/go.uber.org/zap/sink.go @@ -71,7 +71,7 @@ func newSinkRegistry() *sinkRegistry { return sr } -// RegisterScheme registers the given factory for the specific scheme. +// RegisterSink registers the given factory for the specific scheme. func (sr *sinkRegistry) RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { sr.mu.Lock() defer sr.mu.Unlock() diff --git a/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go index a40e93b3e..4b426a564 100644 --- a/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go +++ b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go @@ -188,32 +188,33 @@ func (s *BufferedWriteSyncer) flushLoop() { // Stop closes the buffer, cleans up background goroutines, and flushes // remaining unwritten data. func (s *BufferedWriteSyncer) Stop() (err error) { - var stopped bool - // Critical section. - func() { + stopped := func() bool { s.mu.Lock() defer s.mu.Unlock() if !s.initialized { - return + return false } - stopped = s.stopped - if stopped { - return + if s.stopped { + return false } s.stopped = true s.ticker.Stop() close(s.stop) // tell flushLoop to stop - <-s.done // and wait until it has + return true }() - // Don't call Sync on consecutive Stops. + // Not initialized, or already stopped, no need for any cleanup. if !stopped { - err = s.Sync() + return } - return err + // Wait for flushLoop to end outside of the lock, as it may need the lock to complete. + // See https://github.com/uber-go/zap/issues/1428 for details. + <-s.done + + return s.Sync() } diff --git a/vendor/go.uber.org/zap/zapcore/console_encoder.go b/vendor/go.uber.org/zap/zapcore/console_encoder.go index cc2b4e07b..98eea5154 100644 --- a/vendor/go.uber.org/zap/zapcore/console_encoder.go +++ b/vendor/go.uber.org/zap/zapcore/console_encoder.go @@ -105,7 +105,7 @@ func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, if i > 0 { line.AppendString(c.ConsoleSeparator) } - fmt.Fprint(line, arr.elems[i]) + _, _ = fmt.Fprint(line, arr.elems[i]) } putSliceEncoder(arr) diff --git a/vendor/go.uber.org/zap/zapcore/entry.go b/vendor/go.uber.org/zap/zapcore/entry.go index 459a5d7ce..841752f2e 100644 --- a/vendor/go.uber.org/zap/zapcore/entry.go +++ b/vendor/go.uber.org/zap/zapcore/entry.go @@ -241,7 +241,12 @@ func (ce *CheckedEntry) Write(fields ...Field) { // If the entry is dirty, log an internal error; because the // CheckedEntry is being used after it was returned to the pool, // the message may be an amalgamation from multiple call sites. - fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", ce.Time, ce.Entry) + _, _ = fmt.Fprintf( + ce.ErrorOutput, + "%v Unsafe CheckedEntry re-use near Entry %+v.\n", + ce.Time, + ce.Entry, + ) _ = ce.ErrorOutput.Sync() // ignore error } return @@ -253,7 +258,12 @@ func (ce *CheckedEntry) Write(fields ...Field) { err = multierr.Append(err, ce.cores[i].Write(ce.Entry, fields)) } if err != nil && ce.ErrorOutput != nil { - fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err) + _, _ = fmt.Fprintf( + ce.ErrorOutput, + "%v write error: %v\n", + ce.Time, + err, + ) _ = ce.ErrorOutput.Sync() // ignore error } diff --git a/vendor/go.uber.org/zap/zapcore/lazy_with.go b/vendor/go.uber.org/zap/zapcore/lazy_with.go index 05288d6a8..500809de0 100644 --- a/vendor/go.uber.org/zap/zapcore/lazy_with.go +++ b/vendor/go.uber.org/zap/zapcore/lazy_with.go @@ -23,7 +23,8 @@ package zapcore import "sync" type lazyWithCore struct { - Core + core Core + originalCore Core sync.Once fields []Field } @@ -32,23 +33,45 @@ type lazyWithCore struct { // the logger is written to (or is further chained in a lon-lazy manner). func NewLazyWith(core Core, fields []Field) Core { return &lazyWithCore{ - Core: core, - fields: fields, + core: nil, // core is allocated once `initOnce` is called. + originalCore: core, + fields: fields, } } func (d *lazyWithCore) initOnce() { d.Once.Do(func() { - d.Core = d.Core.With(d.fields) + d.core = d.originalCore.With(d.fields) }) } func (d *lazyWithCore) With(fields []Field) Core { d.initOnce() - return d.Core.With(fields) + return d.core.With(fields) } func (d *lazyWithCore) Check(e Entry, ce *CheckedEntry) *CheckedEntry { + // This is safe because `lazyWithCore` doesn't change the level. + // So we can delagate the level check, any not `initOnce` + // just for the check. + if !d.originalCore.Enabled(e.Level) { + return ce + } + d.initOnce() + return d.core.Check(e, ce) +} + +func (d *lazyWithCore) Enabled(level Level) bool { + // Like above, this is safe because `lazyWithCore` doesn't change the level. + return d.originalCore.Enabled(level) +} + +func (d *lazyWithCore) Write(e Entry, fields []Field) error { + d.initOnce() + return d.core.Write(e, fields) +} + +func (d *lazyWithCore) Sync() error { d.initOnce() - return d.Core.Check(e, ce) + return d.core.Sync() } diff --git a/vendor/go.uber.org/zap/zapcore/level.go b/vendor/go.uber.org/zap/zapcore/level.go index e01a24131..f3e166d67 100644 --- a/vendor/go.uber.org/zap/zapcore/level.go +++ b/vendor/go.uber.org/zap/zapcore/level.go @@ -179,19 +179,19 @@ func (l *Level) UnmarshalText(text []byte) error { func (l *Level) unmarshalText(text []byte) bool { switch string(text) { - case "debug", "DEBUG": + case "debug": *l = DebugLevel - case "info", "INFO", "": // make the zero value useful + case "info", "": // make the zero value useful *l = InfoLevel - case "warn", "WARN": + case "warn", "warning": *l = WarnLevel - case "error", "ERROR": + case "error": *l = ErrorLevel - case "dpanic", "DPANIC": + case "dpanic": *l = DPanicLevel - case "panic", "PANIC": + case "panic": *l = PanicLevel - case "fatal", "FATAL": + case "fatal": *l = FatalLevel default: return false diff --git a/vendor/go.yaml.in/yaml/v2/.travis.yml b/vendor/go.yaml.in/yaml/v2/.travis.yml new file mode 100644 index 000000000..7348c50c0 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v2/.travis.yml @@ -0,0 +1,17 @@ +language: go + +go: + - "1.4.x" + - "1.5.x" + - "1.6.x" + - "1.7.x" + - "1.8.x" + - "1.9.x" + - "1.10.x" + - "1.11.x" + - "1.12.x" + - "1.13.x" + - "1.14.x" + - "tip" + +go_import_path: gopkg.in/yaml.v2 diff --git a/vendor/go.yaml.in/yaml/v2/LICENSE b/vendor/go.yaml.in/yaml/v2/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/go.yaml.in/yaml/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml b/vendor/go.yaml.in/yaml/v2/LICENSE.libyaml similarity index 100% rename from vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml rename to vendor/go.yaml.in/yaml/v2/LICENSE.libyaml diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE b/vendor/go.yaml.in/yaml/v2/NOTICE similarity index 100% rename from vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE rename to vendor/go.yaml.in/yaml/v2/NOTICE diff --git a/vendor/go.yaml.in/yaml/v2/README.md b/vendor/go.yaml.in/yaml/v2/README.md new file mode 100644 index 000000000..c9388da42 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v2/README.md @@ -0,0 +1,131 @@ +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.1 and 1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *go.yaml.in/yaml/v2*. + +To install it, run: + + go get go.yaml.in/yaml/v2 + +API documentation +----------------- + +See: + +API stability +------------- + +The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "go.yaml.in/yaml/v2" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go b/vendor/go.yaml.in/yaml/v2/apic.go similarity index 100% rename from vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go rename to vendor/go.yaml.in/yaml/v2/apic.go diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go b/vendor/go.yaml.in/yaml/v2/decode.go similarity index 100% rename from vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go rename to vendor/go.yaml.in/yaml/v2/decode.go diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go b/vendor/go.yaml.in/yaml/v2/emitterc.go similarity index 100% rename from vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go rename to vendor/go.yaml.in/yaml/v2/emitterc.go diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go b/vendor/go.yaml.in/yaml/v2/encode.go similarity index 100% rename from vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go rename to vendor/go.yaml.in/yaml/v2/encode.go diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go b/vendor/go.yaml.in/yaml/v2/parserc.go similarity index 100% rename from vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go rename to vendor/go.yaml.in/yaml/v2/parserc.go diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go b/vendor/go.yaml.in/yaml/v2/readerc.go similarity index 100% rename from vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go rename to vendor/go.yaml.in/yaml/v2/readerc.go diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go b/vendor/go.yaml.in/yaml/v2/resolve.go similarity index 100% rename from vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go rename to vendor/go.yaml.in/yaml/v2/resolve.go diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go b/vendor/go.yaml.in/yaml/v2/scannerc.go similarity index 100% rename from vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go rename to vendor/go.yaml.in/yaml/v2/scannerc.go diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go b/vendor/go.yaml.in/yaml/v2/sorter.go similarity index 100% rename from vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go rename to vendor/go.yaml.in/yaml/v2/sorter.go diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go b/vendor/go.yaml.in/yaml/v2/writerc.go similarity index 100% rename from vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go rename to vendor/go.yaml.in/yaml/v2/writerc.go diff --git a/vendor/go.yaml.in/yaml/v2/yaml.go b/vendor/go.yaml.in/yaml/v2/yaml.go new file mode 100644 index 000000000..5248e1263 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v2/yaml.go @@ -0,0 +1,478 @@ +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/yaml/go-yaml +// +package yaml + +import ( + "errors" + "fmt" + "io" + "reflect" + "strings" + "sync" +) + +// MapSlice encodes and decodes as a YAML map. +// The order of keys is preserved when encoding and decoding. +type MapSlice []MapItem + +// MapItem is an item in a MapSlice. +type MapItem struct { + Key, Value interface{} +} + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. The UnmarshalYAML +// method receives a function that may be called to unmarshal the original +// YAML value into a field or variable. It is safe to call the unmarshal +// function parameter more than once if necessary. +type Unmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + return unmarshal(in, out, false) +} + +// UnmarshalStrict is like Unmarshal except that any fields that are found +// in the data that do not have corresponding struct members, or mapping +// keys that are duplicates, will result in +// an error. +func UnmarshalStrict(in []byte, out interface{}) (err error) { + return unmarshal(in, out, true) +} + +// A Decoder reads and decodes YAML values from an input stream. +type Decoder struct { + strict bool + parser *parser +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + parser: newParserFromReader(r), + } +} + +// SetStrict sets whether strict decoding behaviour is enabled when +// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict. +func (dec *Decoder) SetStrict(strict bool) { + dec.strict = strict +} + +// Decode reads the next YAML-encoded value from its input +// and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (dec *Decoder) Decode(v interface{}) (err error) { + d := newDecoder(dec.strict) + defer handleErr(&err) + node := dec.parser.parse() + if node == nil { + return io.EOF + } + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(node, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +func unmarshal(in []byte, out interface{}, strict bool) (err error) { + defer handleErr(&err) + d := newDecoder(strict) + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only marshalled if they are exported (have an upper case +// first letter), and are marshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be excluded if IsZero returns true. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +// An Encoder writes YAML values to an output stream. +type Encoder struct { + encoder *encoder +} + +// NewEncoder returns a new encoder that writes to w. +// The Encoder should be closed after use to flush all data +// to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + encoder: newEncoderWithWriter(w), + } +} + +// Encode writes the YAML encoding of v to the stream. +// If multiple items are encoded to the stream, the +// second and subsequent document will be preceded +// with a "---" document separator, but the first will not. +// +// See the documentation for Marshal for details about the conversion of Go +// values to YAML. +func (e *Encoder) Encode(v interface{}) (err error) { + defer handleErr(&err) + e.encoder.marshalDoc("", reflect.ValueOf(v)) + return nil +} + +// Close closes the encoder by writing any remaining data. +// It does not write a stream terminating string "...". +func (e *Encoder) Close() (err error) { + defer handleErr(&err) + e.encoder.finish() + return nil +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct: + sinfo, err := getStructInfo(field.Type) + if err != nil { + return nil, err + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + default: + //return nil, errors.New("Option ,inline needs a struct value or map field") + return nil, errors.New("Option ,inline needs a struct value field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +// IsZeroer is used to check whether an object is zero to +// determine whether it should be omitted when marshaling +// with the omitempty flag. One notable implementation +// is time.Time. +type IsZeroer interface { + IsZero() bool +} + +func isZero(v reflect.Value) bool { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} + +// FutureLineWrap globally disables line wrapping when encoding long strings. +// This is a temporary and thus deprecated method introduced to faciliate +// migration towards v3, which offers more control of line lengths on +// individual encodings, and has a default matching the behavior introduced +// by this function. +// +// The default formatting of v2 was erroneously changed in v2.3.0 and reverted +// in v2.4.0, at which point this function was introduced to help migration. +func FutureLineWrap() { + disableLineWrapping = true +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go b/vendor/go.yaml.in/yaml/v2/yamlh.go similarity index 100% rename from vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go rename to vendor/go.yaml.in/yaml/v2/yamlh.go diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go b/vendor/go.yaml.in/yaml/v2/yamlprivateh.go similarity index 100% rename from vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go rename to vendor/go.yaml.in/yaml/v2/yamlprivateh.go diff --git a/vendor/go.yaml.in/yaml/v3/LICENSE b/vendor/go.yaml.in/yaml/v3/LICENSE new file mode 100644 index 000000000..2683e4bb1 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/LICENSE @@ -0,0 +1,50 @@ + +This project is covered by two different licenses: MIT and Apache. + +#### MIT License #### + +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original MIT license, with the additional +copyright staring in 2011 when the project was ported over: + + apic.go emitterc.go parserc.go readerc.go scannerc.go + writerc.go yamlh.go yamlprivateh.go + +Copyright (c) 2006-2010 Kirill Simonov +Copyright (c) 2006-2011 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +### Apache License ### + +All the remaining project files are covered by the Apache license: + +Copyright (c) 2011-2019 Canonical Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/go.yaml.in/yaml/v3/NOTICE b/vendor/go.yaml.in/yaml/v3/NOTICE new file mode 100644 index 000000000..866d74a7a --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/NOTICE @@ -0,0 +1,13 @@ +Copyright 2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/go.yaml.in/yaml/v3/README.md b/vendor/go.yaml.in/yaml/v3/README.md new file mode 100644 index 000000000..15a85a635 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/README.md @@ -0,0 +1,171 @@ +go.yaml.in/yaml +=============== + +YAML Support for the Go Language + + +## Introduction + +The `yaml` package enables [Go](https://go.dev/) programs to comfortably encode +and decode [YAML](https://yaml.org/) values. + +It was originally developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a pure Go +port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) C library to +parse and generate YAML data quickly and reliably. + + +## Project Status + +This project started as a fork of the extremely popular [go-yaml]( +https://github.com/go-yaml/yaml/) +project, and is being maintained by the official [YAML organization]( +https://github.com/yaml/). + +The YAML team took over ongoing maintenance and development of the project after +discussion with go-yaml's author, @niemeyer, following his decision to +[label the project repository as "unmaintained"]( +https://github.com/go-yaml/yaml/blob/944c86a7d2/README.md) in April 2025. + +We have put together a team of dedicated maintainers including representatives +of go-yaml's most important downstream projects. + +We will strive to earn the trust of the various go-yaml forks to switch back to +this repository as their upstream. + +Please [contact us](https://cloud-native.slack.com/archives/C08PPAT8PS7) if you +would like to contribute or be involved. + + +## Compatibility + +The `yaml` package supports most of YAML 1.2, but preserves some behavior from +1.1 for backwards compatibility. + +Specifically, v3 of the `yaml` package: + +* Supports YAML 1.1 bools (`yes`/`no`, `on`/`off`) as long as they are being + decoded into a typed bool value. + Otherwise they behave as a string. + Booleans in YAML 1.2 are `true`/`false` only. +* Supports octals encoded and decoded as `0777` per YAML 1.1, rather than + `0o777` as specified in YAML 1.2, because most parsers still use the old + format. + Octals in the `0o777` format are supported though, so new files work. +* Does not support base-60 floats. + These are gone from YAML 1.2, and were actually never supported by this + package as it's clearly a poor choice. + + +## Installation and Usage + +The import path for the package is *go.yaml.in/yaml/v3*. + +To install it, run: + +```bash +go get go.yaml.in/yaml/v3 +``` + + +## API Documentation + +See: + + +## API Stability + +The package API for yaml v3 will remain stable as described in [gopkg.in]( +https://gopkg.in). + + +## Example + +```go +package main + +import ( + "fmt" + "log" + + "go.yaml.in/yaml/v3" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + + +## License + +The yaml package is licensed under the MIT and Apache License 2.0 licenses. +Please see the LICENSE file for details. diff --git a/vendor/go.yaml.in/yaml/v3/apic.go b/vendor/go.yaml.in/yaml/v3/apic.go new file mode 100644 index 000000000..05fd305da --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/apic.go @@ -0,0 +1,747 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "io" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// Reader read handler. +func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_reader_read_handler + parser.input_reader = r +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + best_width: -1, + } +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// yaml_writer_write_handler uses emitter.output_writer to write the +// emitted text. +func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_writer_write_handler + emitter.output_writer = w +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize( + event *yaml_event_t, + version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, + implicit bool, +) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } +} + +// Create ALIAS. +func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) bool { + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + anchor: anchor, + } + return true +} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/go.yaml.in/yaml/v3/decode.go b/vendor/go.yaml.in/yaml/v3/decode.go new file mode 100644 index 000000000..02e2b17bf --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/decode.go @@ -0,0 +1,1018 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "io" + "math" + "reflect" + "strconv" + "time" +) + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *Node + anchors map[string]*Node + doneInit bool + textless bool +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + if len(b) == 0 { + b = []byte{'\n'} + } + yaml_parser_set_input_string(&p.parser, b) + return &p +} + +func newParserFromReader(r io.Reader) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + yaml_parser_set_input_reader(&p.parser, r) + return &p +} + +func (p *parser) init() { + if p.doneInit { + return + } + p.anchors = make(map[string]*Node) + p.expect(yaml_STREAM_START_EVENT) + p.doneInit = true +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +// expect consumes an event from the event stream and +// checks that it's of the expected type. +func (p *parser) expect(e yaml_event_type_t) { + if p.event.typ == yaml_NO_EVENT { + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + } + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + if p.event.typ != e { + p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) + p.fail() + } + yaml_event_delete(&p.event) + p.event.typ = yaml_NO_EVENT +} + +// peek peeks at the next event in the event stream, +// puts the results into p.event and returns the event type. +func (p *parser) peek() yaml_event_type_t { + if p.event.typ != yaml_NO_EVENT { + return p.event.typ + } + // It's curious choice from the underlying API to generally return a + // positive result on success, but on this case return true in an error + // scenario. This was the source of bugs in the past (issue #666). + if !yaml_parser_parse(&p.parser, &p.event) || p.parser.error != yaml_NO_ERROR { + p.fail() + } + return p.event.typ +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } else if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *Node, anchor []byte) { + if anchor != nil { + n.Anchor = string(anchor) + p.anchors[n.Anchor] = n + } +} + +func (p *parser) parse() *Node { + p.init() + switch p.peek() { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + case yaml_TAIL_COMMENT_EVENT: + panic("internal error: unexpected tail comment event (please report)") + default: + panic("internal error: attempted to parse unknown event (please report): " + p.event.typ.String()) + } +} + +func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node { + var style Style + if tag != "" && tag != "!" { + tag = shortTag(tag) + style = TaggedStyle + } else if defaultTag != "" { + tag = defaultTag + } else if kind == ScalarNode { + tag, _ = resolve("", value) + } + n := &Node{ + Kind: kind, + Tag: tag, + Value: value, + Style: style, + } + if !p.textless { + n.Line = p.event.start_mark.line + 1 + n.Column = p.event.start_mark.column + 1 + n.HeadComment = string(p.event.head_comment) + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + } + return n +} + +func (p *parser) parseChild(parent *Node) *Node { + child := p.parse() + parent.Content = append(parent.Content, child) + return child +} + +func (p *parser) document() *Node { + n := p.node(DocumentNode, "", "", "") + p.doc = n + p.expect(yaml_DOCUMENT_START_EVENT) + p.parseChild(n) + if p.peek() == yaml_DOCUMENT_END_EVENT { + n.FootComment = string(p.event.foot_comment) + } + p.expect(yaml_DOCUMENT_END_EVENT) + return n +} + +func (p *parser) alias() *Node { + n := p.node(AliasNode, "", "", string(p.event.anchor)) + n.Alias = p.anchors[n.Value] + if n.Alias == nil { + failf("unknown anchor '%s' referenced", n.Value) + } + p.expect(yaml_ALIAS_EVENT) + return n +} + +func (p *parser) scalar() *Node { + var parsedStyle = p.event.scalar_style() + var nodeStyle Style + switch { + case parsedStyle&yaml_DOUBLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = DoubleQuotedStyle + case parsedStyle&yaml_SINGLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = SingleQuotedStyle + case parsedStyle&yaml_LITERAL_SCALAR_STYLE != 0: + nodeStyle = LiteralStyle + case parsedStyle&yaml_FOLDED_SCALAR_STYLE != 0: + nodeStyle = FoldedStyle + } + var nodeValue = string(p.event.value) + var nodeTag = string(p.event.tag) + var defaultTag string + if nodeStyle == 0 { + if nodeValue == "<<" { + defaultTag = mergeTag + } + } else { + defaultTag = strTag + } + n := p.node(ScalarNode, defaultTag, nodeTag, nodeValue) + n.Style |= nodeStyle + p.anchor(n, p.event.anchor) + p.expect(yaml_SCALAR_EVENT) + return n +} + +func (p *parser) sequence() *Node { + n := p.node(SequenceNode, seqTag, string(p.event.tag), "") + if p.event.sequence_style()&yaml_FLOW_SEQUENCE_STYLE != 0 { + n.Style |= FlowStyle + } + p.anchor(n, p.event.anchor) + p.expect(yaml_SEQUENCE_START_EVENT) + for p.peek() != yaml_SEQUENCE_END_EVENT { + p.parseChild(n) + } + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + p.expect(yaml_SEQUENCE_END_EVENT) + return n +} + +func (p *parser) mapping() *Node { + n := p.node(MappingNode, mapTag, string(p.event.tag), "") + block := true + if p.event.mapping_style()&yaml_FLOW_MAPPING_STYLE != 0 { + block = false + n.Style |= FlowStyle + } + p.anchor(n, p.event.anchor) + p.expect(yaml_MAPPING_START_EVENT) + for p.peek() != yaml_MAPPING_END_EVENT { + k := p.parseChild(n) + if block && k.FootComment != "" { + // Must be a foot comment for the prior value when being dedented. + if len(n.Content) > 2 { + n.Content[len(n.Content)-3].FootComment = k.FootComment + k.FootComment = "" + } + } + v := p.parseChild(n) + if k.FootComment == "" && v.FootComment != "" { + k.FootComment = v.FootComment + v.FootComment = "" + } + if p.peek() == yaml_TAIL_COMMENT_EVENT { + if k.FootComment == "" { + k.FootComment = string(p.event.foot_comment) + } + p.expect(yaml_TAIL_COMMENT_EVENT) + } + } + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 { + n.Content[len(n.Content)-2].FootComment = n.FootComment + n.FootComment = "" + } + p.expect(yaml_MAPPING_END_EVENT) + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *Node + aliases map[*Node]bool + terrors []string + + stringMapType reflect.Type + generalMapType reflect.Type + + knownFields bool + uniqueKeys bool + decodeCount int + aliasCount int + aliasDepth int + + mergedFields map[interface{}]bool +} + +var ( + nodeType = reflect.TypeOf(Node{}) + durationType = reflect.TypeOf(time.Duration(0)) + stringMapType = reflect.TypeOf(map[string]interface{}{}) + generalMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = generalMapType.Elem() + timeType = reflect.TypeOf(time.Time{}) + ptrTimeType = reflect.TypeOf(&time.Time{}) +) + +func newDecoder() *decoder { + d := &decoder{ + stringMapType: stringMapType, + generalMapType: generalMapType, + uniqueKeys: true, + } + d.aliases = make(map[*Node]bool) + return d +} + +func (d *decoder) terror(n *Node, tag string, out reflect.Value) { + if n.Tag != "" { + tag = n.Tag + } + value := n.Value + if tag != seqTag && tag != mapTag { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.Line, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) { + err := u.UnmarshalYAML(n) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.ShortTag() == nullTag { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + outi := out.Addr().Interface() + if u, ok := outi.(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + if u, ok := outi.(obsoleteUnmarshaler); ok { + good = d.callObsoleteUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +func (d *decoder) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) { + if n.ShortTag() == nullTag { + return reflect.Value{} + } + for _, num := range index { + for { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + continue + } + break + } + v = v.Field(num) + } + return v +} + +const ( + // 400,000 decode operations is ~500kb of dense object declarations, or + // ~5kb of dense object declarations with 10000% alias expansion + alias_ratio_range_low = 400000 + + // 4,000,000 decode operations is ~5MB of dense object declarations, or + // ~4.5MB of dense object declarations with 10% alias expansion + alias_ratio_range_high = 4000000 + + // alias_ratio_range is the range over which we scale allowed alias ratios + alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) +) + +func allowedAliasRatio(decodeCount int) float64 { + switch { + case decodeCount <= alias_ratio_range_low: + // allow 99% to come from alias expansion for small-to-medium documents + return 0.99 + case decodeCount >= alias_ratio_range_high: + // allow 10% to come from alias expansion for very large documents + return 0.10 + default: + // scale smoothly from 99% down to 10% over the range. + // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. + // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). + return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) + } +} + +func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) { + d.decodeCount++ + if d.aliasDepth > 0 { + d.aliasCount++ + } + if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { + failf("document contains excessive aliasing") + } + if out.Type() == nodeType { + out.Set(reflect.ValueOf(n).Elem()) + return true + } + switch n.Kind { + case DocumentNode: + return d.document(n, out) + case AliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.Kind { + case ScalarNode: + good = d.scalar(n, out) + case MappingNode: + good = d.mapping(n, out) + case SequenceNode: + good = d.sequence(n, out) + case 0: + if n.IsZero() { + return d.null(out) + } + fallthrough + default: + failf("cannot decode node with unknown kind %d", n.Kind) + } + return good +} + +func (d *decoder) document(n *Node, out reflect.Value) (good bool) { + if len(n.Content) == 1 { + d.doc = n + d.unmarshal(n.Content[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *Node, out reflect.Value) (good bool) { + if d.aliases[n] { + // TODO this could actually be allowed in some circumstances. + failf("anchor '%s' value contains itself", n.Value) + } + d.aliases[n] = true + d.aliasDepth++ + good = d.unmarshal(n.Alias, out) + d.aliasDepth-- + delete(d.aliases, n) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) null(out reflect.Value) bool { + if out.CanAddr() { + switch out.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + out.Set(reflect.Zero(out.Type())) + return true + } + } + return false +} + +func (d *decoder) scalar(n *Node, out reflect.Value) bool { + var tag string + var resolved interface{} + if n.indicatedString() { + tag = strTag + resolved = n.Value + } else { + tag, resolved = resolve(n.Tag, n.Value) + if tag == binaryTag { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + return d.null(out) + } + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + // We've resolved to exactly the type we want, so use that. + out.Set(resolvedv) + return true + } + // Perhaps we can use the value as a TextUnmarshaler to + // set its value. + if out.CanAddr() { + u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) + if ok { + var text []byte + if tag == binaryTag { + text = []byte(resolved.(string)) + } else { + // We let any value be unmarshaled into TextUnmarshaler. + // That might be more lax than we'd like, but the + // TextUnmarshaler itself should bowl out any dubious values. + text = []byte(n.Value) + } + err := u.UnmarshalText(text) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == binaryTag { + out.SetString(resolved.(string)) + return true + } + out.SetString(n.Value) + return true + case reflect.Interface: + out.Set(reflect.ValueOf(resolved)) + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + // This used to work in v2, but it's very unfriendly. + isDuration := out.Type() == durationType + + switch resolved := resolved.(type) { + case int: + if !isDuration && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case int64: + if !isDuration && !out.OverflowInt(resolved) { + out.SetInt(resolved) + return true + } + case uint64: + if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case float64: + if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + return true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + return true + case string: + // This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html). + // It only works if explicitly attempting to unmarshal into a typed bool value. + switch resolved { + case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON": + out.SetBool(true) + return true + case "n", "N", "no", "No", "NO", "off", "Off", "OFF": + out.SetBool(false) + return true + } + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + return true + case int64: + out.SetFloat(float64(resolved)) + return true + case uint64: + out.SetFloat(float64(resolved)) + return true + case float64: + out.SetFloat(resolved) + return true + } + case reflect.Struct: + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + out.Set(resolvedv) + return true + } + case reflect.Ptr: + panic("yaml internal error: please report the issue") + } + d.terror(n, tag, out) + return false +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, seqTag, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.Content[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) + if d.uniqueKeys { + nerrs := len(d.terrors) + for i := 0; i < l; i += 2 { + ni := n.Content[i] + for j := i + 2; j < l; j += 2 { + nj := n.Content[j] + if ni.Kind == nj.Kind && ni.Value == nj.Value { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: mapping key %#v already defined at line %d", nj.Line, nj.Value, ni.Line)) + } + } + } + if len(d.terrors) > nerrs { + return false + } + } + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Map: + // okay + case reflect.Interface: + iface := out + if isStringMap(n) { + out = reflect.MakeMap(d.stringMapType) + } else { + out = reflect.MakeMap(d.generalMapType) + } + iface.Set(out) + default: + d.terror(n, mapTag, out) + return false + } + + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + stringMapType := d.stringMapType + generalMapType := d.generalMapType + if outt.Elem() == ifaceType { + if outt.Key().Kind() == reflect.String { + d.stringMapType = outt + } else if outt.Key() == ifaceType { + d.generalMapType = outt + } + } + + mergedFields := d.mergedFields + d.mergedFields = nil + + var mergeNode *Node + + mapIsNew := false + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + mapIsNew = true + } + for i := 0; i < l; i += 2 { + if isMerge(n.Content[i]) { + mergeNode = n.Content[i+1] + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.Content[i], k) { + if mergedFields != nil { + ki := k.Interface() + if d.getPossiblyUnhashableKey(mergedFields, ki) { + continue + } + d.setPossiblyUnhashableKey(mergedFields, ki, true) + } + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.Content[i+1], e) || n.Content[i+1].ShortTag() == nullTag && (mapIsNew || !out.MapIndex(k).IsValid()) { + out.SetMapIndex(k, e) + } + } + } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } + + d.stringMapType = stringMapType + d.generalMapType = generalMapType + return true +} + +func isStringMap(n *Node) bool { + if n.Kind != MappingNode { + return false + } + l := len(n.Content) + for i := 0; i < l; i += 2 { + shortTag := n.Content[i].ShortTag() + if shortTag != strTag && shortTag != mergeTag { + return false + } + } + return true +} + +func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + elemType = inlineMap.Type().Elem() + } + + for _, index := range sinfo.InlineUnmarshalers { + field := d.fieldByIndex(n, out, index) + d.prepare(n, field) + } + + mergedFields := d.mergedFields + d.mergedFields = nil + var mergeNode *Node + var doneFields []bool + if d.uniqueKeys { + doneFields = make([]bool, len(sinfo.FieldsList)) + } + name := settableValueOf("") + l := len(n.Content) + for i := 0; i < l; i += 2 { + ni := n.Content[i] + if isMerge(ni) { + mergeNode = n.Content[i+1] + continue + } + if !d.unmarshal(ni, name) { + continue + } + sname := name.String() + if mergedFields != nil { + if mergedFields[sname] { + continue + } + mergedFields[sname] = true + } + if info, ok := sinfo.FieldsMap[sname]; ok { + if d.uniqueKeys { + if doneFields[info.Id] { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type())) + continue + } + doneFields[info.Id] = true + } + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = d.fieldByIndex(n, out, info.Inline) + } + d.unmarshal(n.Content[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.Content[i+1], value) + inlineMap.SetMapIndex(name, value) + } else if d.knownFields { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type())) + } + } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) setPossiblyUnhashableKey(m map[interface{}]bool, key interface{}, value bool) { + defer func() { + if err := recover(); err != nil { + failf("%v", err) + } + }() + m[key] = value +} + +func (d *decoder) getPossiblyUnhashableKey(m map[interface{}]bool, key interface{}) bool { + defer func() { + if err := recover(); err != nil { + failf("%v", err) + } + }() + return m[key] +} + +func (d *decoder) merge(parent *Node, merge *Node, out reflect.Value) { + mergedFields := d.mergedFields + if mergedFields == nil { + d.mergedFields = make(map[interface{}]bool) + for i := 0; i < len(parent.Content); i += 2 { + k := reflect.New(ifaceType).Elem() + if d.unmarshal(parent.Content[i], k) { + d.setPossiblyUnhashableKey(d.mergedFields, k.Interface(), true) + } + } + } + + switch merge.Kind { + case MappingNode: + d.unmarshal(merge, out) + case AliasNode: + if merge.Alias != nil && merge.Alias.Kind != MappingNode { + failWantMap() + } + d.unmarshal(merge, out) + case SequenceNode: + for i := 0; i < len(merge.Content); i++ { + ni := merge.Content[i] + if ni.Kind == AliasNode { + if ni.Alias != nil && ni.Alias.Kind != MappingNode { + failWantMap() + } + } else if ni.Kind != MappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } + + d.mergedFields = mergedFields +} + +func isMerge(n *Node) bool { + return n.Kind == ScalarNode && n.Value == "<<" && (n.Tag == "" || n.Tag == "!" || shortTag(n.Tag) == mergeTag) +} diff --git a/vendor/go.yaml.in/yaml/v3/emitterc.go b/vendor/go.yaml.in/yaml/v3/emitterc.go new file mode 100644 index 000000000..ab4e03ba7 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/emitterc.go @@ -0,0 +1,2054 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" + "fmt" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + if emitter.column == 0 { + emitter.space_above = true + } + emitter.column = 0 + emitter.line++ + // [Go] Do this here and below and drop from everywhere else (see commented lines). + emitter.indention = true + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + if emitter.column == 0 { + emitter.space_above = true + } + emitter.column = 0 + emitter.line++ + // [Go] Do this here and above and drop from everywhere else (see commented lines). + emitter.indention = true + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent_compact(emitter *yaml_emitter_t, flow, indentless bool, compact_seq bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + // [Go] This was changed so that indentations are more regular. + if emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE { + // The first indent inside a sequence will just skip the "- " indicator. + emitter.indent += 2 + } else { + // Everything else aligns to the chosen indentation. + emitter.indent = emitter.best_indent * ((emitter.indent + emitter.best_indent) / emitter.best_indent) + if compact_seq { + // The value compact_seq passed in is almost always set to `false` when this function is called, + // except when we are dealing with sequence nodes. So this gets triggered to subtract 2 only when we + // are increasing the indent to account for sequence nodes, which will be correct because we need to + // subtract 2 to account for the - at the beginning of the sequence node. + emitter.indent = emitter.indent - 2 + } + } + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true, false) + + case yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true, false) + + case yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + emitter.space_above = true + emitter.foot_indent = -1 + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical || true { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if len(emitter.head_comment) > 0 { + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !put_break(emitter) { + return false + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// yaml_emitter_increase_indent preserves the original signature and delegates to +// yaml_emitter_increase_indent_compact without compact-sequence indentation +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + return yaml_emitter_increase_indent_compact(emitter, flow, indentless, false) +} + +// yaml_emitter_process_line_comment preserves the original signature and delegates to +// yaml_emitter_process_line_comment_linebreak passing false for linebreak +func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool { + return yaml_emitter_process_line_comment_linebreak(emitter, false) +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !yaml_emitter_emit_node(emitter, event, true, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + // [Go] Force document foot separation. + emitter.foot_indent = 0 + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.foot_indent = -1 + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + if emitter.canonical && !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.column == 0 || emitter.canonical && !first { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if emitter.column == 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE) + } else { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + } + if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { + return false + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + if (emitter.canonical || len(emitter.head_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0) && !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + + if emitter.column == 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE) + } else { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + } + if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { + return false + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + // emitter.mapping context tells us if we are currently in a mapping context. + // emiiter.column tells us which column we are in in the yaml output. 0 is the first char of the column. + // emitter.indentation tells us if the last character was an indentation character. + // emitter.compact_sequence_indent tells us if '- ' is considered part of the indentation for sequence elements. + // So, `seq` means that we are in a mapping context, and we are either at the first char of the column or + // the last character was not an indentation character, and we consider '- ' part of the indentation + // for sequence elements. + seq := emitter.mapping_context && (emitter.column == 0 || !emitter.indention) && + emitter.compact_sequence_indent + if !yaml_emitter_increase_indent_compact(emitter, false, false, seq) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if len(emitter.line_comment) > 0 { + // [Go] A line comment was provided for the key. That's unusual as the + // scanner associates line comments with the value. Either way, + // save the line comment and render it appropriately later. + emitter.key_line_comment = emitter.line_comment + emitter.line_comment = nil + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + if len(emitter.key_line_comment) > 0 { + // [Go] Line comments are generally associated with the value, but when there's + // no value on the same line as a mapping key they end up attached to the + // key itself. + if event.typ == yaml_SCALAR_EVENT { + if len(emitter.line_comment) == 0 { + // A scalar is coming and it has no line comments by itself yet, + // so just let it handle the line comment as usual. If it has a + // line comment, we can't have both so the one from the key is lost. + emitter.line_comment = emitter.key_line_comment + emitter.key_line_comment = nil + } + } else if event.sequence_style() != yaml_FLOW_SEQUENCE_STYLE && (event.typ == yaml_MAPPING_START_EVENT || event.typ == yaml_SEQUENCE_START_EVENT) { + // An indented block follows, so write the comment right now. + emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment + if !yaml_emitter_process_line_comment(emitter) { + return false + } + emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +func yaml_emitter_silent_nil_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + return event.typ == yaml_SCALAR_EVENT && event.implicit && !emitter.canonical && len(emitter.scalar_data.value) == 0 +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) + } +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an anchor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Write a head comment. +func yaml_emitter_process_head_comment(emitter *yaml_emitter_t) bool { + if len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.tail_comment) { + return false + } + emitter.tail_comment = emitter.tail_comment[:0] + emitter.foot_indent = emitter.indent + if emitter.foot_indent < 0 { + emitter.foot_indent = 0 + } + } + + if len(emitter.head_comment) == 0 { + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.head_comment) { + return false + } + emitter.head_comment = emitter.head_comment[:0] + return true +} + +// Write an line comment. +func yaml_emitter_process_line_comment_linebreak(emitter *yaml_emitter_t, linebreak bool) bool { + if len(emitter.line_comment) == 0 { + // The next 3 lines are needed to resolve an issue with leading newlines + // See https://github.com/go-yaml/yaml/issues/755 + // When linebreak is set to true, put_break will be called and will add + // the needed newline. + if linebreak && !put_break(emitter) { + return false + } + return true + } + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !yaml_emitter_write_comment(emitter, emitter.line_comment) { + return false + } + emitter.line_comment = emitter.line_comment[:0] + return true +} + +// Write a foot comment. +func yaml_emitter_process_foot_comment(emitter *yaml_emitter_t) bool { + if len(emitter.foot_comment) == 0 { + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.foot_comment) { + return false + } + emitter.foot_comment = emitter.foot_comment[:0] + emitter.foot_indent = emitter.indent + if emitter.foot_indent < 0 { + emitter.foot_indent = 0 + } + return true +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + tab_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if value[i] == '\t' { + tab_characters = true + } else if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || tab_characters || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + if len(event.head_comment) > 0 { + emitter.head_comment = event.head_comment + } + if len(event.line_comment) > 0 { + emitter.line_comment = event.line_comment + } + if len(event.foot_comment) > 0 { + emitter.foot_comment = event.foot_comment + } + if len(event.tail_comment) > 0 { + emitter.tail_comment = event.tail_comment + } + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + if emitter.foot_indent == indent { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + //emitter.indention = true + emitter.space_above = false + emitter.foot_indent = -1 + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if len(value) > 0 && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + if len(value) > 0 { + emitter.whitespace = false + } + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !yaml_emitter_process_line_comment_linebreak(emitter, true) { + return false + } + //emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !yaml_emitter_process_line_comment_linebreak(emitter, true) { + return false + } + + //emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} + +func yaml_emitter_write_comment(emitter *yaml_emitter_t, comment []byte) bool { + breaks := false + pound := false + for i := 0; i < len(comment); { + if is_break(comment, i) { + if !write_break(emitter, comment, &i) { + return false + } + //emitter.indention = true + breaks = true + pound = false + } else { + if breaks && !yaml_emitter_write_indent(emitter) { + return false + } + if !pound { + if comment[i] != '#' && (!put(emitter, '#') || !put(emitter, ' ')) { + return false + } + pound = true + } + if !write(emitter, comment, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + if !breaks && !put_break(emitter) { + return false + } + + emitter.whitespace = true + //emitter.indention = true + return true +} diff --git a/vendor/go.yaml.in/yaml/v3/encode.go b/vendor/go.yaml.in/yaml/v3/encode.go new file mode 100644 index 000000000..de9e72a3e --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/encode.go @@ -0,0 +1,577 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding" + "fmt" + "io" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool + indent int + doneInit bool +} + +func newEncoder() *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func newEncoderWithWriter(w io.Writer) *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_writer(&e.emitter, w) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func (e *encoder) init() { + if e.doneInit { + return + } + if e.indent == 0 { + e.indent = 4 + } + e.emitter.best_indent = e.indent + yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) + e.emit() + e.doneInit = true +} + +func (e *encoder) finish() { + e.emitter.open_ended = false + yaml_stream_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + e.must(yaml_emitter_emit(&e.emitter, &e.event)) +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshalDoc(tag string, in reflect.Value) { + e.init() + var node *Node + if in.IsValid() { + node, _ = in.Interface().(*Node) + } + if node != nil && node.Kind == DocumentNode { + e.nodev(in) + } else { + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.emit() + e.marshal(tag, in) + yaml_document_end_event_initialize(&e.event, true) + e.emit() + } +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + tag = shortTag(tag) + if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { + e.nilv() + return + } + iface := in.Interface() + switch value := iface.(type) { + case *Node: + e.nodev(in) + return + case Node: + if !in.CanAddr() { + var n = reflect.New(in.Type()).Elem() + n.Set(in) + in = n + } + e.nodev(in.Addr()) + return + case time.Time: + e.timev(tag, in) + return + case *time.Time: + e.timev(tag, in.Elem()) + return + case time.Duration: + e.stringv(tag, reflect.ValueOf(value.String())) + return + case Marshaler: + v, err := value.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + e.marshal(tag, reflect.ValueOf(v)) + return + case encoding.TextMarshaler: + text, err := value.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + case nil: + e.nilv() + return + } + switch in.Kind() { + case reflect.Interface: + e.marshal(tag, in.Elem()) + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + e.marshal(tag, in.Elem()) + case reflect.Struct: + e.structv(tag, in) + case reflect.Slice, reflect.Array: + e.slicev(tag, in) + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + e.intv(tag, in) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) { + for _, num := range index { + for { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return reflect.Value{} + } + v = v.Elem() + continue + } + break + } + v = v.Field(num) + } + return v +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = e.fieldByIndex(in, info.Inline) + if !value.IsValid() { + continue + } + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) + e.emit() + f() + yaml_mapping_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +// isOldBool returns whether s is bool notation as defined in YAML 1.1. +// +// We continue to force strings that YAML 1.1 would interpret as booleans to be +// rendered as quotes strings so that the marshalled output valid for YAML 1.1 +// parsing. +func isOldBool(s string) (result bool) { + switch s { + case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON", + "n", "N", "no", "No", "NO", "off", "Off", "OFF": + return true + default: + return false + } +} + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + canUsePlain := true + switch { + case !utf8.ValidString(s): + if tag == binaryTag { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if tag != "" { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = binaryTag + s = encodeBase64(s) + case tag == "": + // Check to see if it would resolve to a specific + // tag when encoded unquoted. If it doesn't, + // there's no need to quote it. + rtag, _ := resolve("", s) + canUsePlain = rtag == strTag && !(isBase60Float(s) || isOldBool(s)) + } + // Note: it's possible for user code to emit invalid YAML + // if they explicitly specify a tag and a string containing + // text that's incompatible with that tag. + switch { + case strings.Contains(s, "\n"): + if e.flow { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } else { + style = yaml_LITERAL_SCALAR_STYLE + } + case canUsePlain: + style = yaml_PLAIN_SCALAR_STYLE + default: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style, nil, nil, nil, nil) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) timev(tag string, in reflect.Value) { + t := in.Interface().(time.Time) + s := t.Format(time.RFC3339Nano) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // Issue #352: When formatting, use the precision of the underlying value + precision := 64 + if in.Kind() == reflect.Float32 { + precision = 32 + } + + s := strconv.FormatFloat(in.Float(), 'g', -1, precision) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t, head, line, foot, tail []byte) { + // TODO Kill this function. Replace all initialize calls by their underlining Go literals. + implicit := tag == "" + if !implicit { + tag = longTag(tag) + } + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.event.head_comment = head + e.event.line_comment = line + e.event.foot_comment = foot + e.event.tail_comment = tail + e.emit() +} + +func (e *encoder) nodev(in reflect.Value) { + e.node(in.Interface().(*Node), "") +} + +func (e *encoder) node(node *Node, tail string) { + // Zero nodes behave as nil. + if node.Kind == 0 && node.IsZero() { + e.nilv() + return + } + + // If the tag was not explicitly requested, and dropping it won't change the + // implicit tag of the value, don't include it in the presentation. + var tag = node.Tag + var stag = shortTag(tag) + var forceQuoting bool + if tag != "" && node.Style&TaggedStyle == 0 { + if node.Kind == ScalarNode { + if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 { + tag = "" + } else { + rtag, _ := resolve("", node.Value) + if rtag == stag { + tag = "" + } else if stag == strTag { + tag = "" + forceQuoting = true + } + } + } else { + var rtag string + switch node.Kind { + case MappingNode: + rtag = mapTag + case SequenceNode: + rtag = seqTag + } + if rtag == stag { + tag = "" + } + } + } + + switch node.Kind { + case DocumentNode: + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + for _, node := range node.Content { + e.node(node, "") + } + yaml_document_end_event_initialize(&e.event, true) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case SequenceNode: + style := yaml_BLOCK_SEQUENCE_STYLE + if node.Style&FlowStyle != 0 { + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style)) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + for _, node := range node.Content { + e.node(node, "") + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case MappingNode: + style := yaml_BLOCK_MAPPING_STYLE + if node.Style&FlowStyle != 0 { + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style) + e.event.tail_comment = []byte(tail) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + + // The tail logic below moves the foot comment of prior keys to the following key, + // since the value for each key may be a nested structure and the foot needs to be + // processed only the entirety of the value is streamed. The last tail is processed + // with the mapping end event. + var tail string + for i := 0; i+1 < len(node.Content); i += 2 { + k := node.Content[i] + foot := k.FootComment + if foot != "" { + kopy := *k + kopy.FootComment = "" + k = &kopy + } + e.node(k, tail) + tail = foot + + v := node.Content[i+1] + e.node(v, "") + } + + yaml_mapping_end_event_initialize(&e.event) + e.event.tail_comment = []byte(tail) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case AliasNode: + yaml_alias_event_initialize(&e.event, []byte(node.Value)) + e.event.head_comment = []byte(node.HeadComment) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case ScalarNode: + value := node.Value + if !utf8.ValidString(value) { + if stag == binaryTag { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if stag != "" { + failf("cannot marshal invalid UTF-8 data as %s", stag) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = binaryTag + value = encodeBase64(value) + } + + style := yaml_PLAIN_SCALAR_STYLE + switch { + case node.Style&DoubleQuotedStyle != 0: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + case node.Style&SingleQuotedStyle != 0: + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + case node.Style&LiteralStyle != 0: + style = yaml_LITERAL_SCALAR_STYLE + case node.Style&FoldedStyle != 0: + style = yaml_FOLDED_SCALAR_STYLE + case strings.Contains(value, "\n"): + style = yaml_LITERAL_SCALAR_STYLE + case forceQuoting: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail)) + default: + failf("cannot encode node with unknown kind %d", node.Kind) + } +} diff --git a/vendor/go.yaml.in/yaml/v3/parserc.go b/vendor/go.yaml.in/yaml/v3/parserc.go new file mode 100644 index 000000000..25fe82363 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/parserc.go @@ -0,0 +1,1274 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + token := &parser.tokens[parser.tokens_head] + yaml_parser_unfold_comments(parser, token) + return token + } + return nil +} + +// yaml_parser_unfold_comments walks through the comments queue and joins all +// comments behind the position of the provided token into the respective +// top-level comment slices in the parser. +func yaml_parser_unfold_comments(parser *yaml_parser_t, token *yaml_token_t) { + for parser.comments_head < len(parser.comments) && token.start_mark.index >= parser.comments[parser.comments_head].token_mark.index { + comment := &parser.comments[parser.comments_head] + if len(comment.head) > 0 { + if token.typ == yaml_BLOCK_END_TOKEN { + // No heads on ends, so keep comment.head for a follow up token. + break + } + if len(parser.head_comment) > 0 { + parser.head_comment = append(parser.head_comment, '\n') + } + parser.head_comment = append(parser.head_comment, comment.head...) + } + if len(comment.foot) > 0 { + if len(parser.foot_comment) > 0 { + parser.foot_comment = append(parser.foot_comment, '\n') + } + parser.foot_comment = append(parser.foot_comment, comment.foot...) + } + if len(comment.line) > 0 { + if len(parser.line_comment) > 0 { + parser.line_comment = append(parser.line_comment, '\n') + } + parser.line_comment = append(parser.line_comment, comment.line...) + } + *comment = yaml_comment_t{} + parser.comments_head++ + } +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// +// * +// +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + var head_comment []byte + if len(parser.head_comment) > 0 { + // [Go] Scan the header comment backwards, and if an empty line is found, break + // the header so the part before the last empty line goes into the + // document header, while the bottom of it goes into a follow up event. + for i := len(parser.head_comment) - 1; i > 0; i-- { + if parser.head_comment[i] == '\n' { + if i == len(parser.head_comment)-1 { + head_comment = parser.head_comment[:i] + parser.head_comment = parser.head_comment[i+1:] + break + } else if parser.head_comment[i-1] == '\n' { + head_comment = parser.head_comment[:i-1] + parser.head_comment = parser.head_comment[i+1:] + break + } + } + } + } + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + + head_comment: head_comment, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +// *********** +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// +// ************* +// +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + yaml_parser_set_event_comments(parser, event) + if len(event.head_comment) > 0 && len(event.foot_comment) == 0 { + event.foot_comment = event.head_comment + event.head_comment = nil + } + return true +} + +func yaml_parser_set_event_comments(parser *yaml_parser_t, event *yaml_event_t) { + event.head_comment = parser.head_comment + event.line_comment = parser.line_comment + event.foot_comment = parser.foot_comment + parser.head_comment = nil + parser.line_comment = nil + parser.foot_comment = nil + parser.tail_comment = nil + parser.stem_comment = nil +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// +// block_node ::= ALIAS +// +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// +// flow_node ::= ALIAS +// +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// +// ************************* +// +// block_content ::= block_collection | flow_collection | SCALAR +// +// ****** +// +// flow_content ::= flow_collection | SCALAR +// +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + yaml_parser_set_event_comments(parser, event) + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + yaml_parser_set_event_comments(parser, event) + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + if parser.stem_comment != nil { + event.head_comment = parser.stem_comment + parser.stem_comment = nil + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + if parser.stem_comment != nil { + event.head_comment = parser.stem_comment + parser.stem_comment = nil + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// +// ******************** *********** * ********* +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + if token == nil { + return false + } + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + prior_head_len := len(parser.head_comment) + skip_token(parser) + yaml_parser_split_stem_comment(parser, prior_head_len) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + prior_head_len := len(parser.head_comment) + skip_token(parser) + yaml_parser_split_stem_comment(parser, prior_head_len) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Split stem comment from head comment. +// +// When a sequence or map is found under a sequence entry, the former head comment +// is assigned to the underlying sequence or map as a whole, not the individual +// sequence or map entry as would be expected otherwise. To handle this case the +// previous head comment is moved aside as the stem comment. +func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { + if stem_len == 0 { + return + } + + token := peek_token(parser) + if token == nil || token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN { + return + } + + parser.stem_comment = parser.head_comment[:stem_len] + if len(parser.head_comment) == stem_len { + parser.head_comment = nil + } else { + // Copy suffix to prevent very strange bugs if someone ever appends + // further bytes to the prefix in the stem_comment slice above. + parser.head_comment = append([]byte(nil), parser.head_comment[stem_len+1:]...) + } +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + if token == nil { + return false + } + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + // [Go] A tail comment was left from the prior mapping value processed. Emit an event + // as it needs to be processed with that value and not the following key. + if len(parser.tail_comment) > 0 { + *event = yaml_event_t{ + typ: yaml_TAIL_COMMENT_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + foot_comment: parser.tail_comment, + } + parser.tail_comment = nil + return true + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// +// * +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + if token == nil { + return false + } + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + + skip_token(parser) + return true +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// +// *** * +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// +// ***** * +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// +// * +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// - *** * +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// - ***** * +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/go.yaml.in/yaml/v3/readerc.go b/vendor/go.yaml.in/yaml/v3/readerc.go new file mode 100644 index 000000000..56af24536 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/readerc.go @@ -0,0 +1,434 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/go.yaml.in/yaml/v3/resolve.go b/vendor/go.yaml.in/yaml/v3/resolve.go new file mode 100644 index 000000000..64ae88805 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/resolve.go @@ -0,0 +1,326 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding/base64" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, boolTag, []string{"true", "True", "TRUE"}}, + {false, boolTag, []string{"false", "False", "FALSE"}}, + {nil, nullTag, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", mergeTag, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const ( + nullTag = "!!null" + boolTag = "!!bool" + strTag = "!!str" + intTag = "!!int" + floatTag = "!!float" + timestampTag = "!!timestamp" + seqTag = "!!seq" + mapTag = "!!map" + binaryTag = "!!binary" + mergeTag = "!!merge" +) + +var longTags = make(map[string]string) +var shortTags = make(map[string]string) + +func init() { + for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} { + ltag := longTag(stag) + longTags[stag] = ltag + shortTags[ltag] = stag + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + if strings.HasPrefix(tag, longTagPrefix) { + if stag, ok := shortTags[tag]; ok { + return stag + } + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + if ltag, ok := longTags[tag]; ok { + return ltag + } + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) + +func resolve(tag string, in string) (rtag string, out interface{}) { + tag = shortTag(tag) + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, strTag, binaryTag: + return + case floatTag: + if rtag == intTag { + switch v := out.(type) { + case int64: + rtag = floatTag + out = float64(v) + return + case int: + rtag = floatTag + out = float64(v) + return + } + } + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != strTag && tag != binaryTag { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return floatTag, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == timestampTag { + t, ok := parseTimestamp(in) + if ok { + return timestampTag, t + } + } + + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return intTag, uintv + } + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return floatTag, floatv + } + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return intTag, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt("-"+plain[3:], 2, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + } + // Octals as introduced in version 1.2 of the spec. + // Octals from the 1.1 spec, spelled as 0777, are still + // decoded by default in v3 as well for compatibility. + // May be dropped in v4 depending on how usage evolves. + if strings.HasPrefix(plain, "0o") { + intv, err := strconv.ParseInt(plain[2:], 8, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 8, 64) + if err == nil { + return intTag, uintv + } + } else if strings.HasPrefix(plain, "-0o") { + intv, err := strconv.ParseInt("-"+plain[3:], 8, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + } + default: + panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")") + } + } + return strTag, in +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} diff --git a/vendor/go.yaml.in/yaml/v3/scannerc.go b/vendor/go.yaml.in/yaml/v3/scannerc.go new file mode 100644 index 000000000..30b1f0892 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/scannerc.go @@ -0,0 +1,3040 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + if !is_blank(parser.buffer, parser.buffer_pos) { + parser.newlines = 0 + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + parser.newlines++ + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + parser.newlines++ + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + if !is_blank(parser.buffer, parser.buffer_pos) { + parser.newlines = 0 + } + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.newlines++ + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + // [Go] The comment parsing logic requires a lookahead of two tokens + // so that foot comments may be parsed in time of associating them + // with the tokens that are parsed before them, and also for line + // comments to be transformed into head comments in some edge cases. + if parser.tokens_head < len(parser.tokens)-2 { + // If a potential simple key is at the head position, we need to fetch + // the next token to disambiguate it. + head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] + if !ok { + break + } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok { + return false + } else if !valid { + break + } + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) (ok bool) { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + scan_mark := parser.mark + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // [Go] While unrolling indents, transform the head comments of prior + // indentation levels observed after scan_start into foot comments at + // the respective indexes. + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column, scan_mark) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + comment_mark := parser.mark + if len(parser.tokens) > 0 && (parser.flow_level == 0 && buf[pos] == ':' || parser.flow_level > 0 && buf[pos] == ',') { + // Associate any following comments with the prior token. + comment_mark = parser.tokens[len(parser.tokens)-1].start_mark + } + defer func() { + if !ok { + return + } + if len(parser.tokens) > 0 && parser.tokens[len(parser.tokens)-1].typ == yaml_BLOCK_ENTRY_TOKEN { + // Sequence indicators alone have no line comments. It becomes + // a head comment for whatever follows. + return + } + if !yaml_parser_scan_line_comment(parser, comment_mark) { + ok = false + return + } + }() + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] TODO Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { + if !simple_key.possible { + return false, true + } + + // The 1.2 specification says: + // + // "If the ? indicator is omitted, parsing needs to see past the + // implicit key to recognize it as such. To limit the amount of + // lookahead required, the “:” indicator must appear at most 1024 + // Unicode characters beyond the start of the key. In addition, the key + // is restricted to a single line." + // + if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { + // Check if the potential simple key to be removed is required. + if simple_key.required { + return false, yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + return false, true + } + return true, true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + } + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1 + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number) + } + return true +} + +// max_flow_level limits the flow_level +const max_flow_level = 10000 + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ + possible: false, + required: false, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + }) + + // Increase the flow level. + parser.flow_level++ + if parser.flow_level > max_flow_level { + return yaml_parser_set_scanner_error(parser, + "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_flow_level)) + } + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + last := len(parser.simple_keys) - 1 + delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number) + parser.simple_keys = parser.simple_keys[:last] + } + return true +} + +// max_indents limits the indents stack size +const max_indents = 10000 + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + if len(parser.indents) > max_indents { + return yaml_parser_set_scanner_error(parser, + "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_indents)) + } + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int, scan_mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + block_mark := scan_mark + block_mark.index-- + + // Loop through the indentation levels in the stack. + for parser.indent > column { + + // [Go] Reposition the end token before potential following + // foot comments of parent blocks. For that, search + // backwards for recent comments that were at the same + // indent as the block that is ending now. + stop_index := block_mark.index + for i := len(parser.comments) - 1; i >= 0; i-- { + comment := &parser.comments[i] + + if comment.end_mark.index < stop_index { + // Don't go back beyond the start of the comment/whitespace scan, unless column < 0. + // If requested indent column is < 0, then the document is over and everything else + // is a foot anyway. + break + } + if comment.start_mark.column == parser.indent+1 { + // This is a good match. But maybe there's a former comment + // at that same indent level, so keep searching. + block_mark = comment.start_mark + } + + // While the end of the former comment matches with + // the start of the following one, we know there's + // nothing in between and scanning is still safe. + stop_index = comment.scan_mark.index + } + + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: block_mark, + end_mark: block_mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + parser.simple_keys_by_tok = make(map[int]int) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { + return false + + } else if valid { + + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + delete(parser.simple_keys_by_tok, simple_key.token_number) + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + scan_mark := parser.mark + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if we just had a line comment under a sequence entry that + // looks more like a header to the following content. Similar to this: + // + // - # The comment + // - Some data + // + // If so, transform the line comment to a head comment and reposition. + if len(parser.comments) > 0 && len(parser.tokens) > 1 { + tokenA := parser.tokens[len(parser.tokens)-2] + tokenB := parser.tokens[len(parser.tokens)-1] + comment := &parser.comments[len(parser.comments)-1] + if tokenA.typ == yaml_BLOCK_SEQUENCE_START_TOKEN && tokenB.typ == yaml_BLOCK_ENTRY_TOKEN && len(comment.line) > 0 && !is_break(parser.buffer, parser.buffer_pos) { + // If it was in the prior line, reposition so it becomes a + // header of the follow up token. Otherwise, keep it in place + // so it becomes a header of the former. + comment.head = comment.line + comment.line = nil + if comment.start_mark.line == parser.mark.line-1 { + comment.token_mark = parser.mark + } + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + if !yaml_parser_scan_comments(parser, scan_mark) { + return false + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + // [Go] Discard this inline comment for the time being. + //if !yaml_parser_scan_line_comment(parser, start_mark) { + // return false + //} + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] TODO Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + hasTag = true + } + + if !hasTag { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + if !yaml_parser_scan_line_comment(parser, start_mark) { + return false + } + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab characters that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violates indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} + +func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t) bool { + if parser.newlines > 0 { + return true + } + + var start_mark yaml_mark_t + var text []byte + + for peek := 0; peek < 512; peek++ { + if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { + break + } + if is_blank(parser.buffer, parser.buffer_pos+peek) { + continue + } + if parser.buffer[parser.buffer_pos+peek] == '#' { + seen := parser.mark.index + peek + for { + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_breakz(parser.buffer, parser.buffer_pos) { + if parser.mark.index >= seen { + break + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } else if parser.mark.index >= seen { + if len(text) == 0 { + start_mark = parser.mark + } + text = read(parser, text) + } else { + skip(parser) + } + } + } + break + } + if len(text) > 0 { + parser.comments = append(parser.comments, yaml_comment_t{ + token_mark: token_mark, + start_mark: start_mark, + line: text, + }) + } + return true +} + +func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) bool { + token := parser.tokens[len(parser.tokens)-1] + + if token.typ == yaml_FLOW_ENTRY_TOKEN && len(parser.tokens) > 1 { + token = parser.tokens[len(parser.tokens)-2] + } + + var token_mark = token.start_mark + var start_mark yaml_mark_t + var next_indent = parser.indent + if next_indent < 0 { + next_indent = 0 + } + + var recent_empty = false + var first_empty = parser.newlines <= 1 + + var line = parser.mark.line + var column = parser.mark.column + + var text []byte + + // The foot line is the place where a comment must start to + // still be considered as a foot of the prior content. + // If there's some content in the currently parsed line, then + // the foot is the line below it. + var foot_line = -1 + if scan_mark.line > 0 { + foot_line = parser.mark.line - parser.newlines + 1 + if parser.newlines == 0 && parser.mark.column > 1 { + foot_line++ + } + } + + var peek = 0 + for ; peek < 512; peek++ { + if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { + break + } + column++ + if is_blank(parser.buffer, parser.buffer_pos+peek) { + continue + } + c := parser.buffer[parser.buffer_pos+peek] + var close_flow = parser.flow_level > 0 && (c == ']' || c == '}') + if close_flow || is_breakz(parser.buffer, parser.buffer_pos+peek) { + // Got line break or terminator. + if close_flow || !recent_empty { + if close_flow || first_empty && (start_mark.line == foot_line && token.typ != yaml_VALUE_TOKEN || start_mark.column-1 < next_indent) { + // This is the first empty line and there were no empty lines before, + // so this initial part of the comment is a foot of the prior token + // instead of being a head for the following one. Split it up. + // Alternatively, this might also be the last comment inside a flow + // scope, so it must be a footer. + if len(text) > 0 { + if start_mark.column-1 < next_indent { + // If dedented it's unrelated to the prior token. + token_mark = start_mark + } + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: token_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, + foot: text, + }) + scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} + token_mark = scan_mark + text = nil + } + } else { + if len(text) > 0 && parser.buffer[parser.buffer_pos+peek] != 0 { + text = append(text, '\n') + } + } + } + if !is_break(parser.buffer, parser.buffer_pos+peek) { + break + } + first_empty = false + recent_empty = true + column = 0 + line++ + continue + } + + if len(text) > 0 && (close_flow || column-1 < next_indent && column != start_mark.column) { + // The comment at the different indentation is a foot of the + // preceding data rather than a head of the upcoming one. + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: token_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, + foot: text, + }) + scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} + token_mark = scan_mark + text = nil + } + + if parser.buffer[parser.buffer_pos+peek] != '#' { + break + } + + if len(text) == 0 { + start_mark = yaml_mark_t{parser.mark.index + peek, line, column} + } else { + text = append(text, '\n') + } + + recent_empty = false + + // Consume until after the consumed comment line. + seen := parser.mark.index + peek + for { + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_breakz(parser.buffer, parser.buffer_pos) { + if parser.mark.index >= seen { + break + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } else if parser.mark.index >= seen { + text = read(parser, text) + } else { + skip(parser) + } + } + + peek = 0 + column = 0 + line = parser.mark.line + next_indent = parser.indent + if next_indent < 0 { + next_indent = 0 + } + } + + if len(text) > 0 { + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: start_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek - 1, line, column}, + head: text, + }) + } + return true +} diff --git a/vendor/go.yaml.in/yaml/v3/sorter.go b/vendor/go.yaml.in/yaml/v3/sorter.go new file mode 100644 index 000000000..9210ece7e --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/sorter.go @@ -0,0 +1,134 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + digits := false + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + digits = unicode.IsDigit(ar[i]) + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + if digits { + return al + } else { + return bl + } + } + var ai, bi int + var an, bn int64 + if ar[i] == '0' || br[i] == '0' { + for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + if ar[j] != '0' { + an = 1 + bn = 1 + break + } + } + } + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/go.yaml.in/yaml/v3/writerc.go b/vendor/go.yaml.in/yaml/v3/writerc.go new file mode 100644 index 000000000..266d0b092 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/writerc.go @@ -0,0 +1,48 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true +} diff --git a/vendor/go.yaml.in/yaml/v3/yaml.go b/vendor/go.yaml.in/yaml/v3/yaml.go new file mode 100644 index 000000000..0b101cd20 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/yaml.go @@ -0,0 +1,703 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/yaml/go-yaml +package yaml + +import ( + "errors" + "fmt" + "io" + "reflect" + "strings" + "sync" + "unicode/utf8" +) + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. +type Unmarshaler interface { + UnmarshalYAML(value *Node) error +} + +type obsoleteUnmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +func Unmarshal(in []byte, out interface{}) (err error) { + return unmarshal(in, out, false) +} + +// A Decoder reads and decodes YAML values from an input stream. +type Decoder struct { + parser *parser + knownFields bool +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + parser: newParserFromReader(r), + } +} + +// KnownFields ensures that the keys in decoded mappings to +// exist as fields in the struct being decoded into. +func (dec *Decoder) KnownFields(enable bool) { + dec.knownFields = enable +} + +// Decode reads the next YAML-encoded value from its input +// and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (dec *Decoder) Decode(v interface{}) (err error) { + d := newDecoder() + d.knownFields = dec.knownFields + defer handleErr(&err) + node := dec.parser.parse() + if node == nil { + return io.EOF + } + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(node, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Decode decodes the node and stores its data into the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (n *Node) Decode(v interface{}) (err error) { + d := newDecoder() + defer handleErr(&err) + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(n, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +func unmarshal(in []byte, out interface{}, strict bool) (err error) { + defer handleErr(&err) + d := newDecoder() + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only marshalled if they are exported (have an upper case +// first letter), and are marshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be excluded if IsZero returns true. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +// An Encoder writes YAML values to an output stream. +type Encoder struct { + encoder *encoder +} + +// NewEncoder returns a new encoder that writes to w. +// The Encoder should be closed after use to flush all data +// to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + encoder: newEncoderWithWriter(w), + } +} + +// Encode writes the YAML encoding of v to the stream. +// If multiple items are encoded to the stream, the +// second and subsequent document will be preceded +// with a "---" document separator, but the first will not. +// +// See the documentation for Marshal for details about the conversion of Go +// values to YAML. +func (e *Encoder) Encode(v interface{}) (err error) { + defer handleErr(&err) + e.encoder.marshalDoc("", reflect.ValueOf(v)) + return nil +} + +// Encode encodes value v and stores its representation in n. +// +// See the documentation for Marshal for details about the +// conversion of Go values into YAML. +func (n *Node) Encode(v interface{}) (err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(v)) + e.finish() + p := newParser(e.out) + p.textless = true + defer p.destroy() + doc := p.parse() + *n = *doc.Content[0] + return nil +} + +// SetIndent changes the used indentation used when encoding. +func (e *Encoder) SetIndent(spaces int) { + if spaces < 0 { + panic("yaml: cannot indent to a negative number of spaces") + } + e.encoder.indent = spaces +} + +// CompactSeqIndent makes it so that '- ' is considered part of the indentation. +func (e *Encoder) CompactSeqIndent() { + e.encoder.emitter.compact_sequence_indent = true +} + +// DefaultSeqIndent makes it so that '- ' is not considered part of the indentation. +func (e *Encoder) DefaultSeqIndent() { + e.encoder.emitter.compact_sequence_indent = false +} + +// Close closes the encoder by writing any remaining data. +// It does not write a stream terminating string "...". +func (e *Encoder) Close() (err error) { + defer handleErr(&err) + e.encoder.finish() + return nil +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +type Kind uint32 + +const ( + DocumentNode Kind = 1 << iota + SequenceNode + MappingNode + ScalarNode + AliasNode +) + +type Style uint32 + +const ( + TaggedStyle Style = 1 << iota + DoubleQuotedStyle + SingleQuotedStyle + LiteralStyle + FoldedStyle + FlowStyle +) + +// Node represents an element in the YAML document hierarchy. While documents +// are typically encoded and decoded into higher level types, such as structs +// and maps, Node is an intermediate representation that allows detailed +// control over the content being decoded or encoded. +// +// It's worth noting that although Node offers access into details such as +// line numbers, colums, and comments, the content when re-encoded will not +// have its original textual representation preserved. An effort is made to +// render the data plesantly, and to preserve comments near the data they +// describe, though. +// +// Values that make use of the Node type interact with the yaml package in the +// same way any other type would do, by encoding and decoding yaml data +// directly or indirectly into them. +// +// For example: +// +// var person struct { +// Name string +// Address yaml.Node +// } +// err := yaml.Unmarshal(data, &person) +// +// Or by itself: +// +// var person Node +// err := yaml.Unmarshal(data, &person) +type Node struct { + // Kind defines whether the node is a document, a mapping, a sequence, + // a scalar value, or an alias to another node. The specific data type of + // scalar nodes may be obtained via the ShortTag and LongTag methods. + Kind Kind + + // Style allows customizing the apperance of the node in the tree. + Style Style + + // Tag holds the YAML tag defining the data type for the value. + // When decoding, this field will always be set to the resolved tag, + // even when it wasn't explicitly provided in the YAML content. + // When encoding, if this field is unset the value type will be + // implied from the node properties, and if it is set, it will only + // be serialized into the representation if TaggedStyle is used or + // the implicit tag diverges from the provided one. + Tag string + + // Value holds the unescaped and unquoted represenation of the value. + Value string + + // Anchor holds the anchor name for this node, which allows aliases to point to it. + Anchor string + + // Alias holds the node that this alias points to. Only valid when Kind is AliasNode. + Alias *Node + + // Content holds contained nodes for documents, mappings, and sequences. + Content []*Node + + // HeadComment holds any comments in the lines preceding the node and + // not separated by an empty line. + HeadComment string + + // LineComment holds any comments at the end of the line where the node is in. + LineComment string + + // FootComment holds any comments following the node and before empty lines. + FootComment string + + // Line and Column hold the node position in the decoded YAML text. + // These fields are not respected when encoding the node. + Line int + Column int +} + +// IsZero returns whether the node has all of its fields unset. +func (n *Node) IsZero() bool { + return n.Kind == 0 && n.Style == 0 && n.Tag == "" && n.Value == "" && n.Anchor == "" && n.Alias == nil && n.Content == nil && + n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0 +} + +// LongTag returns the long form of the tag that indicates the data type for +// the node. If the Tag field isn't explicitly defined, one will be computed +// based on the node properties. +func (n *Node) LongTag() string { + return longTag(n.ShortTag()) +} + +// ShortTag returns the short form of the YAML tag that indicates data type for +// the node. If the Tag field isn't explicitly defined, one will be computed +// based on the node properties. +func (n *Node) ShortTag() string { + if n.indicatedString() { + return strTag + } + if n.Tag == "" || n.Tag == "!" { + switch n.Kind { + case MappingNode: + return mapTag + case SequenceNode: + return seqTag + case AliasNode: + if n.Alias != nil { + return n.Alias.ShortTag() + } + case ScalarNode: + tag, _ := resolve("", n.Value) + return tag + case 0: + // Special case to make the zero value convenient. + if n.IsZero() { + return nullTag + } + } + return "" + } + return shortTag(n.Tag) +} + +func (n *Node) indicatedString() bool { + return n.Kind == ScalarNode && + (shortTag(n.Tag) == strTag || + (n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0) +} + +// SetString is a convenience function that sets the node to a string value +// and defines its style in a pleasant way depending on its content. +func (n *Node) SetString(s string) { + n.Kind = ScalarNode + if utf8.ValidString(s) { + n.Value = s + n.Tag = strTag + } else { + n.Value = encodeBase64(s) + n.Tag = binaryTag + } + if strings.Contains(n.Value, "\n") { + n.Style = LiteralStyle + } +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int + + // InlineUnmarshalers holds indexes to inlined fields that + // contain unmarshaler values. + InlineUnmarshalers [][]int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex +var unmarshalerType reflect.Type + +func init() { + var v Unmarshaler + unmarshalerType = reflect.ValueOf(&v).Elem().Type() +} + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + inlineUnmarshalers := [][]int(nil) + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct, reflect.Ptr: + ftype := field.Type + for ftype.Kind() == reflect.Ptr { + ftype = ftype.Elem() + } + if ftype.Kind() != reflect.Struct { + return nil, errors.New("option ,inline may only be used on a struct or map field") + } + if reflect.PtrTo(ftype).Implements(unmarshalerType) { + inlineUnmarshalers = append(inlineUnmarshalers, []int{i}) + } else { + sinfo, err := getStructInfo(ftype) + if err != nil { + return nil, err + } + for _, index := range sinfo.InlineUnmarshalers { + inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...)) + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + } + default: + return nil, errors.New("option ,inline may only be used on a struct or map field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + InlineUnmarshalers: inlineUnmarshalers, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +// IsZeroer is used to check whether an object is zero to +// determine whether it should be omitted when marshaling +// with the omitempty flag. One notable implementation +// is time.Time. +type IsZeroer interface { + IsZero() bool +} + +func isZero(v reflect.Value) bool { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/go.yaml.in/yaml/v3/yamlh.go b/vendor/go.yaml.in/yaml/v3/yamlh.go new file mode 100644 index 000000000..f59aa40f6 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/yamlh.go @@ -0,0 +1,811 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "fmt" + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = 0 + + yaml_PLAIN_SCALAR_STYLE yaml_scalar_style_t = 1 << iota // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. + yaml_TAIL_COMMENT_EVENT +) + +var eventStrings = []string{ + yaml_NO_EVENT: "none", + yaml_STREAM_START_EVENT: "stream start", + yaml_STREAM_END_EVENT: "stream end", + yaml_DOCUMENT_START_EVENT: "document start", + yaml_DOCUMENT_END_EVENT: "document end", + yaml_ALIAS_EVENT: "alias", + yaml_SCALAR_EVENT: "scalar", + yaml_SEQUENCE_START_EVENT: "sequence start", + yaml_SEQUENCE_END_EVENT: "sequence end", + yaml_MAPPING_START_EVENT: "mapping start", + yaml_MAPPING_END_EVENT: "mapping end", + yaml_TAIL_COMMENT_EVENT: "tail comment", +} + +func (e yaml_event_type_t) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The comments + head_comment []byte + line_comment []byte + foot_comment []byte + tail_comment []byte + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// +// yaml_parser_set_input(). +// +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occurred. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + newlines int // The number of line breaks since last non-break/non-blank character + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Comments + + head_comment []byte // The current head comments + line_comment []byte // The current line comments + foot_comment []byte // The current foot comments + tail_comment []byte // Foot comment that happens at the end of a block. + stem_comment []byte // Comment in item preceding a nested structure (list inside list item, etc) + + comments []yaml_comment_t // The folded comments for all parsed tokens + comments_head int + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +type yaml_comment_t struct { + scan_mark yaml_mark_t // Position where scanning for comments started + token_mark yaml_mark_t // Position after which tokens will be associated with this comment + start_mark yaml_mark_t // Position of '#' comment mark + end_mark yaml_mark_t // Position where comment terminated + + head []byte + line []byte + foot []byte +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// +// yaml_emitter_set_output(). +// +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE // Expect the next item of a flow sequence, with the comma already written out + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE // Expect the next key of a flow mapping, with the comma already written out + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_writer io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + compact_sequence_indent bool // Is '- ' is considered part of the indentation for sequence elements? + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + space_above bool // Is there's an empty line above? + foot_indent int // The indent used to write the foot comment above, or -1 if none. + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Comments + head_comment []byte + line_comment []byte + foot_comment []byte + tail_comment []byte + + key_line_comment []byte + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/go.yaml.in/yaml/v3/yamlprivateh.go b/vendor/go.yaml.in/yaml/v3/yamlprivateh.go new file mode 100644 index 000000000..dea1ba961 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/yamlprivateh.go @@ -0,0 +1,198 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( + // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( + // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( + // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go index 757383ea1..da0df370d 100644 --- a/vendor/golang.org/x/exp/slices/slices.go +++ b/vendor/golang.org/x/exp/slices/slices.go @@ -10,16 +10,13 @@ import ( "slices" ) -// TODO(adonovan): when https://go.dev/issue/32816 is accepted, all of -// these functions should be annotated (provisionally with "//go:fix -// inline") so that tools can safely and automatically replace calls -// to exp/slices with calls to std slices by inlining them. - // Equal reports whether two slices are equal: the same length and all // elements equal. If the lengths are different, Equal returns false. // Otherwise, the elements are compared in increasing index order, and the // comparison stops at the first unequal pair. // Floating point NaNs are not considered equal. +// +//go:fix inline func Equal[S ~[]E, E comparable](s1, s2 S) bool { return slices.Equal(s1, s2) } @@ -29,6 +26,8 @@ func Equal[S ~[]E, E comparable](s1, s2 S) bool { // EqualFunc returns false. Otherwise, the elements are compared in // increasing index order, and the comparison stops at the first index // for which eq returns false. +// +//go:fix inline func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool { return slices.EqualFunc(s1, s2, eq) } @@ -40,6 +39,8 @@ func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) boo // If both slices are equal until one of them ends, the shorter slice is // considered less than the longer one. // The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2. +// +//go:fix inline func Compare[S ~[]E, E cmp.Ordered](s1, s2 S) int { return slices.Compare(s1, s2) } @@ -49,29 +50,39 @@ func Compare[S ~[]E, E cmp.Ordered](s1, s2 S) int { // The result is the first non-zero result of cmp; if cmp always // returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2), // and +1 if len(s1) > len(s2). +// +//go:fix inline func CompareFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int { return slices.CompareFunc(s1, s2, cmp) } // Index returns the index of the first occurrence of v in s, // or -1 if not present. +// +//go:fix inline func Index[S ~[]E, E comparable](s S, v E) int { return slices.Index(s, v) } // IndexFunc returns the first index i satisfying f(s[i]), // or -1 if none do. +// +//go:fix inline func IndexFunc[S ~[]E, E any](s S, f func(E) bool) int { return slices.IndexFunc(s, f) } // Contains reports whether v is present in s. +// +//go:fix inline func Contains[S ~[]E, E comparable](s S, v E) bool { return slices.Contains(s, v) } // ContainsFunc reports whether at least one // element e of s satisfies f(e). +// +//go:fix inline func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool { return slices.ContainsFunc(s, f) } @@ -83,6 +94,8 @@ func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool { // and r[i+len(v)] == value originally at r[i]. // Insert panics if i is out of range. // This function is O(len(s) + len(v)). +// +//go:fix inline func Insert[S ~[]E, E any](s S, i int, v ...E) S { return slices.Insert(s, i, v...) } @@ -92,6 +105,8 @@ func Insert[S ~[]E, E any](s S, i int, v ...E) S { // Delete is O(len(s)-i), so if many items must be deleted, it is better to // make a single call deleting them all together than to delete one at a time. // Delete zeroes the elements s[len(s)-(j-i):len(s)]. +// +//go:fix inline func Delete[S ~[]E, E any](s S, i, j int) S { return slices.Delete(s, i, j) } @@ -99,6 +114,8 @@ func Delete[S ~[]E, E any](s S, i, j int) S { // DeleteFunc removes any elements from s for which del returns true, // returning the modified slice. // DeleteFunc zeroes the elements between the new length and the original length. +// +//go:fix inline func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { return slices.DeleteFunc(s, del) } @@ -106,12 +123,16 @@ func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { // Replace replaces the elements s[i:j] by the given v, and returns the // modified slice. Replace panics if s[i:j] is not a valid slice of s. // When len(v) < (j-i), Replace zeroes the elements between the new length and the original length. +// +//go:fix inline func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { return slices.Replace(s, i, j, v...) } // Clone returns a copy of the slice. // The elements are copied using assignment, so this is a shallow clone. +// +//go:fix inline func Clone[S ~[]E, E any](s S) S { return slices.Clone(s) } @@ -121,6 +142,8 @@ func Clone[S ~[]E, E any](s S) S { // Compact modifies the contents of the slice s and returns the modified slice, // which may have a smaller length. // Compact zeroes the elements between the new length and the original length. +// +//go:fix inline func Compact[S ~[]E, E comparable](s S) S { return slices.Compact(s) } @@ -128,6 +151,8 @@ func Compact[S ~[]E, E comparable](s S) S { // CompactFunc is like [Compact] but uses an equality function to compare elements. // For runs of elements that compare equal, CompactFunc keeps the first one. // CompactFunc zeroes the elements between the new length and the original length. +// +//go:fix inline func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { return slices.CompactFunc(s, eq) } @@ -136,16 +161,22 @@ func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { // another n elements. After Grow(n), at least n elements can be appended // to the slice without another allocation. If n is negative or too large to // allocate the memory, Grow panics. +// +//go:fix inline func Grow[S ~[]E, E any](s S, n int) S { return slices.Grow(s, n) } // Clip removes unused capacity from the slice, returning s[:len(s):len(s)]. +// +//go:fix inline func Clip[S ~[]E, E any](s S) S { return slices.Clip(s) } // Reverse reverses the elements of the slice in place. +// +//go:fix inline func Reverse[S ~[]E, E any](s S) { slices.Reverse(s) } diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go index e270a7465..bd91a8d40 100644 --- a/vendor/golang.org/x/exp/slices/sort.go +++ b/vendor/golang.org/x/exp/slices/sort.go @@ -9,11 +9,10 @@ import ( "slices" ) -// TODO(adonovan): add a "//go:fix inline" annotation to each function -// in this file; see https://go.dev/issue/32816. - // Sort sorts a slice of any ordered type in ascending order. // When sorting floating-point numbers, NaNs are ordered before other values. +// +//go:fix inline func Sort[S ~[]E, E cmp.Ordered](x S) { slices.Sort(x) } @@ -27,23 +26,31 @@ func Sort[S ~[]E, E cmp.Ordered](x S) { // SortFunc requires that cmp is a strict weak ordering. // See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings. // To indicate 'uncomparable', return 0 from the function. +// +//go:fix inline func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { slices.SortFunc(x, cmp) } // SortStableFunc sorts the slice x while keeping the original order of equal // elements, using cmp to compare elements in the same way as [SortFunc]. +// +//go:fix inline func SortStableFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { slices.SortStableFunc(x, cmp) } // IsSorted reports whether x is sorted in ascending order. +// +//go:fix inline func IsSorted[S ~[]E, E cmp.Ordered](x S) bool { return slices.IsSorted(x) } // IsSortedFunc reports whether x is sorted in ascending order, with cmp as the // comparison function as defined by [SortFunc]. +// +//go:fix inline func IsSortedFunc[S ~[]E, E any](x S, cmp func(a, b E) int) bool { return slices.IsSortedFunc(x, cmp) } @@ -51,6 +58,8 @@ func IsSortedFunc[S ~[]E, E any](x S, cmp func(a, b E) int) bool { // Min returns the minimal value in x. It panics if x is empty. // For floating-point numbers, Min propagates NaNs (any NaN value in x // forces the output to be NaN). +// +//go:fix inline func Min[S ~[]E, E cmp.Ordered](x S) E { return slices.Min(x) } @@ -58,6 +67,8 @@ func Min[S ~[]E, E cmp.Ordered](x S) E { // MinFunc returns the minimal value in x, using cmp to compare elements. // It panics if x is empty. If there is more than one minimal element // according to the cmp function, MinFunc returns the first one. +// +//go:fix inline func MinFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { return slices.MinFunc(x, cmp) } @@ -65,6 +76,8 @@ func MinFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { // Max returns the maximal value in x. It panics if x is empty. // For floating-point E, Max propagates NaNs (any NaN value in x // forces the output to be NaN). +// +//go:fix inline func Max[S ~[]E, E cmp.Ordered](x S) E { return slices.Max(x) } @@ -72,6 +85,8 @@ func Max[S ~[]E, E cmp.Ordered](x S) E { // MaxFunc returns the maximal value in x, using cmp to compare elements. // It panics if x is empty. If there is more than one maximal element // according to the cmp function, MaxFunc returns the first one. +// +//go:fix inline func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { return slices.MaxFunc(x, cmp) } @@ -80,6 +95,8 @@ func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { // where target is found, or the position where target would appear in the // sort order; it also returns a bool saying whether the target is really found // in the slice. The slice must be sorted in increasing order. +// +//go:fix inline func BinarySearch[S ~[]E, E cmp.Ordered](x S, target E) (int, bool) { return slices.BinarySearch(x, target) } @@ -91,6 +108,8 @@ func BinarySearch[S ~[]E, E cmp.Ordered](x S, target E) (int, bool) { // or a positive number if the slice element follows the target. // cmp must implement the same ordering as the slice, such that if // cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice. +// +//go:fix inline func BinarySearchFunc[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool) { return slices.BinarySearchFunc(x, target, cmp) } diff --git a/vendor/golang.org/x/mod/module/module.go b/vendor/golang.org/x/mod/module/module.go index 2a364b229..16e1aa7ab 100644 --- a/vendor/golang.org/x/mod/module/module.go +++ b/vendor/golang.org/x/mod/module/module.go @@ -96,10 +96,11 @@ package module // Changes to the semantics in this file require approval from rsc. import ( + "cmp" "errors" "fmt" "path" - "sort" + "slices" "strings" "unicode" "unicode/utf8" @@ -657,17 +658,15 @@ func CanonicalVersion(v string) string { // optionally followed by a tie-breaking suffix introduced by a slash character, // like in "v0.0.1/go.mod". func Sort(list []Version) { - sort.Slice(list, func(i, j int) bool { - mi := list[i] - mj := list[j] - if mi.Path != mj.Path { - return mi.Path < mj.Path + slices.SortFunc(list, func(i, j Version) int { + if i.Path != j.Path { + return strings.Compare(i.Path, j.Path) } // To help go.sum formatting, allow version/file. // Compare semver prefix by semver rules, // file by string order. - vi := mi.Version - vj := mj.Version + vi := i.Version + vj := j.Version var fi, fj string if k := strings.Index(vi, "/"); k >= 0 { vi, fi = vi[:k], vi[k:] @@ -676,9 +675,9 @@ func Sort(list []Version) { vj, fj = vj[:k], vj[k:] } if vi != vj { - return semver.Compare(vi, vj) < 0 + return semver.Compare(vi, vj) } - return fi < fj + return cmp.Compare(fi, fj) }) } diff --git a/vendor/golang.org/x/mod/semver/semver.go b/vendor/golang.org/x/mod/semver/semver.go index 9a2dfd33a..628f8fd68 100644 --- a/vendor/golang.org/x/mod/semver/semver.go +++ b/vendor/golang.org/x/mod/semver/semver.go @@ -22,7 +22,10 @@ // as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0. package semver -import "sort" +import ( + "slices" + "strings" +) // parsed returns the parsed form of a semantic version string. type parsed struct { @@ -154,19 +157,22 @@ func Max(v, w string) string { // ByVersion implements [sort.Interface] for sorting semantic version strings. type ByVersion []string -func (vs ByVersion) Len() int { return len(vs) } -func (vs ByVersion) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] } -func (vs ByVersion) Less(i, j int) bool { - cmp := Compare(vs[i], vs[j]) - if cmp != 0 { - return cmp < 0 - } - return vs[i] < vs[j] -} +func (vs ByVersion) Len() int { return len(vs) } +func (vs ByVersion) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] } +func (vs ByVersion) Less(i, j int) bool { return compareVersion(vs[i], vs[j]) < 0 } -// Sort sorts a list of semantic version strings using [ByVersion]. +// Sort sorts a list of semantic version strings using [Compare] and falls back +// to use [strings.Compare] if both versions are considered equal. func Sort(list []string) { - sort.Sort(ByVersion(list)) + slices.SortFunc(list, compareVersion) +} + +func compareVersion(a, b string) int { + cmp := Compare(a, b) + if cmp != 0 { + return cmp + } + return strings.Compare(a, b) } func parse(v string) (p parsed, ok bool) { diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go new file mode 100644 index 000000000..24cea6882 --- /dev/null +++ b/vendor/golang.org/x/net/context/context.go @@ -0,0 +1,118 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package context has been superseded by the standard library [context] package. +// +// Deprecated: Use the standard library context package instead. +package context + +import ( + "context" // standard library's context, as of Go 1.7 + "time" +) + +// A Context carries a deadline, a cancellation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +// +//go:fix inline +type Context = context.Context + +// Canceled is the error returned by [Context.Err] when the context is canceled +// for some reason other than its deadline passing. +// +//go:fix inline +var Canceled = context.Canceled + +// DeadlineExceeded is the error returned by [Context.Err] when the context is canceled +// due to its deadline passing. +// +//go:fix inline +var DeadlineExceeded = context.DeadlineExceeded + +// Background returns a non-nil, empty Context. It is never canceled, has no +// values, and has no deadline. It is typically used by the main function, +// initialization, and tests, and as the top-level Context for incoming +// requests. +// +//go:fix inline +func Background() Context { return context.Background() } + +// TODO returns a non-nil, empty Context. Code should use context.TODO when +// it's unclear which Context to use or it is not yet available (because the +// surrounding function has not yet been extended to accept a Context +// parameter). +// +//go:fix inline +func TODO() Context { return context.TODO() } + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// A CancelFunc may be called by multiple goroutines simultaneously. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc = context.CancelFunc + +// WithCancel returns a derived context that points to the parent context +// but has a new Done channel. The returned context's Done channel is closed +// when the returned cancel function is called or when the parent context's +// Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this [Context] complete. +// +//go:fix inline +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + return context.WithCancel(parent) +} + +// WithDeadline returns a derived context that points to the parent context +// but has the deadline adjusted to be no later than d. If the parent's +// deadline is already earlier than d, WithDeadline(parent, d) is semantically +// equivalent to parent. The returned [Context.Done] channel is closed when +// the deadline expires, when the returned cancel function is called, +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this [Context] complete. +// +//go:fix inline +func WithDeadline(parent Context, d time.Time) (Context, CancelFunc) { + return context.WithDeadline(parent, d) +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this [Context] complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +// +//go:fix inline +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return context.WithTimeout(parent, timeout) +} + +// WithValue returns a derived context that points to the parent Context. +// In the derived context, the value associated with key is val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +// +// The provided key must be comparable and should not be of type +// string or any other built-in type to avoid collisions between +// packages using context. Users of WithValue should define their own +// types for keys. To avoid allocating when assigning to an +// interface{}, context keys often have concrete type +// struct{}. Alternatively, exported context key variables' static +// type should be a pointer or interface. +// +//go:fix inline +func WithValue(parent Context, key, val interface{}) Context { + return context.WithValue(parent, key, val) +} diff --git a/vendor/golang.org/x/net/html/escape.go b/vendor/golang.org/x/net/html/escape.go index 04c6bec21..12f227370 100644 --- a/vendor/golang.org/x/net/html/escape.go +++ b/vendor/golang.org/x/net/html/escape.go @@ -299,7 +299,7 @@ func escape(w writer, s string) error { case '\r': esc = " " default: - panic("unrecognized escape character") + panic("html: unrecognized escape character") } s = s[i+1:] if _, err := w.WriteString(esc); err != nil { diff --git a/vendor/golang.org/x/net/html/parse.go b/vendor/golang.org/x/net/html/parse.go index 518ee4c94..88fc0056a 100644 --- a/vendor/golang.org/x/net/html/parse.go +++ b/vendor/golang.org/x/net/html/parse.go @@ -136,7 +136,7 @@ func (p *parser) indexOfElementInScope(s scope, matchTags ...a.Atom) int { return -1 } default: - panic("unreachable") + panic(fmt.Sprintf("html: internal error: indexOfElementInScope unknown scope: %d", s)) } } switch s { @@ -179,7 +179,7 @@ func (p *parser) clearStackToContext(s scope) { return } default: - panic("unreachable") + panic(fmt.Sprintf("html: internal error: clearStackToContext unknown scope: %d", s)) } } } @@ -231,7 +231,14 @@ func (p *parser) addChild(n *Node) { } if n.Type == ElementNode { - p.oe = append(p.oe, n) + p.insertOpenElement(n) + } +} + +func (p *parser) insertOpenElement(n *Node) { + p.oe = append(p.oe, n) + if len(p.oe) > 512 { + panic("html: open stack of elements exceeds 512 nodes") } } @@ -810,7 +817,7 @@ func afterHeadIM(p *parser) bool { p.im = inFramesetIM return true case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Template, a.Title: - p.oe = append(p.oe, p.head) + p.insertOpenElement(p.head) defer p.oe.remove(p.head) return inHeadIM(p) case a.Head: @@ -1678,7 +1685,7 @@ func inTableBodyIM(p *parser) bool { return inTableIM(p) } -// Section 12.2.6.4.14. +// Section 13.2.6.4.14. func inRowIM(p *parser) bool { switch p.tok.Type { case StartTagToken: @@ -1690,7 +1697,9 @@ func inRowIM(p *parser) bool { p.im = inCellIM return true case a.Caption, a.Col, a.Colgroup, a.Tbody, a.Tfoot, a.Thead, a.Tr: - if p.popUntil(tableScope, a.Tr) { + if p.elementInScope(tableScope, a.Tr) { + p.clearStackToContext(tableRowScope) + p.oe.pop() p.im = inTableBodyIM return false } @@ -1700,22 +1709,28 @@ func inRowIM(p *parser) bool { case EndTagToken: switch p.tok.DataAtom { case a.Tr: - if p.popUntil(tableScope, a.Tr) { + if p.elementInScope(tableScope, a.Tr) { + p.clearStackToContext(tableRowScope) + p.oe.pop() p.im = inTableBodyIM return true } // Ignore the token. return true case a.Table: - if p.popUntil(tableScope, a.Tr) { + if p.elementInScope(tableScope, a.Tr) { + p.clearStackToContext(tableRowScope) + p.oe.pop() p.im = inTableBodyIM return false } // Ignore the token. return true case a.Tbody, a.Tfoot, a.Thead: - if p.elementInScope(tableScope, p.tok.DataAtom) { - p.parseImpliedToken(EndTagToken, a.Tr, a.Tr.String()) + if p.elementInScope(tableScope, p.tok.DataAtom) && p.elementInScope(tableScope, a.Tr) { + p.clearStackToContext(tableRowScope) + p.oe.pop() + p.im = inTableBodyIM return false } // Ignore the token. @@ -2222,16 +2237,20 @@ func parseForeignContent(p *parser) bool { p.acknowledgeSelfClosingTag() } case EndTagToken: + if strings.EqualFold(p.oe[len(p.oe)-1].Data, p.tok.Data) { + p.oe = p.oe[:len(p.oe)-1] + return true + } for i := len(p.oe) - 1; i >= 0; i-- { - if p.oe[i].Namespace == "" { - return p.im(p) - } if strings.EqualFold(p.oe[i].Data, p.tok.Data) { p.oe = p.oe[:i] + return true + } + if i > 0 && p.oe[i-1].Namespace == "" { break } } - return true + return p.im(p) default: // Ignore the token. } @@ -2312,9 +2331,13 @@ func (p *parser) parseCurrentToken() { } } -func (p *parser) parse() error { +func (p *parser) parse() (err error) { + defer func() { + if panicErr := recover(); panicErr != nil { + err = fmt.Errorf("%s", panicErr) + } + }() // Iterate until EOF. Any other error will cause an early return. - var err error for err != io.EOF { // CDATA sections are allowed only in foreign content. n := p.oe.top() @@ -2343,6 +2366,8 @@ func (p *parser) parse() error { // s. Conversely, explicit s in r's data can be silently dropped, // with no corresponding node in the resulting tree. // +// Parse will reject HTML that is nested deeper than 512 elements. +// // The input is assumed to be UTF-8 encoded. func Parse(r io.Reader) (*Node, error) { return ParseWithOptions(r) diff --git a/vendor/golang.org/x/net/html/render.go b/vendor/golang.org/x/net/html/render.go index e8c123345..0157d89e1 100644 --- a/vendor/golang.org/x/net/html/render.go +++ b/vendor/golang.org/x/net/html/render.go @@ -184,7 +184,7 @@ func render1(w writer, n *Node) error { return err } - // Add initial newline where there is danger of a newline beging ignored. + // Add initial newline where there is danger of a newline being ignored. if c := n.FirstChild; c != nil && c.Type == TextNode && strings.HasPrefix(c.Data, "\n") { switch n.Data { case "pre", "listing", "textarea": diff --git a/vendor/golang.org/x/net/http/httpproxy/proxy.go b/vendor/golang.org/x/net/http/httpproxy/proxy.go new file mode 100644 index 000000000..d89c257ae --- /dev/null +++ b/vendor/golang.org/x/net/http/httpproxy/proxy.go @@ -0,0 +1,373 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package httpproxy provides support for HTTP proxy determination +// based on environment variables, as provided by net/http's +// ProxyFromEnvironment function. +// +// The API is not subject to the Go 1 compatibility promise and may change at +// any time. +package httpproxy + +import ( + "errors" + "fmt" + "net" + "net/netip" + "net/url" + "os" + "strings" + "unicode/utf8" + + "golang.org/x/net/idna" +) + +// Config holds configuration for HTTP proxy settings. See +// FromEnvironment for details. +type Config struct { + // HTTPProxy represents the value of the HTTP_PROXY or + // http_proxy environment variable. It will be used as the proxy + // URL for HTTP requests unless overridden by NoProxy. + HTTPProxy string + + // HTTPSProxy represents the HTTPS_PROXY or https_proxy + // environment variable. It will be used as the proxy URL for + // HTTPS requests unless overridden by NoProxy. + HTTPSProxy string + + // NoProxy represents the NO_PROXY or no_proxy environment + // variable. It specifies a string that contains comma-separated values + // specifying hosts that should be excluded from proxying. Each value is + // represented by an IP address prefix (1.2.3.4), an IP address prefix in + // CIDR notation (1.2.3.4/8), a domain name, or a special DNS label (*). + // An IP address prefix and domain name can also include a literal port + // number (1.2.3.4:80). + // A domain name matches that name and all subdomains. A domain name with + // a leading "." matches subdomains only. For example "foo.com" matches + // "foo.com" and "bar.foo.com"; ".y.com" matches "x.y.com" but not "y.com". + // A single asterisk (*) indicates that no proxying should be done. + // A best effort is made to parse the string and errors are + // ignored. + NoProxy string + + // CGI holds whether the current process is running + // as a CGI handler (FromEnvironment infers this from the + // presence of a REQUEST_METHOD environment variable). + // When this is set, ProxyForURL will return an error + // when HTTPProxy applies, because a client could be + // setting HTTP_PROXY maliciously. See https://golang.org/s/cgihttpproxy. + CGI bool +} + +// config holds the parsed configuration for HTTP proxy settings. +type config struct { + // Config represents the original configuration as defined above. + Config + + // httpsProxy is the parsed URL of the HTTPSProxy if defined. + httpsProxy *url.URL + + // httpProxy is the parsed URL of the HTTPProxy if defined. + httpProxy *url.URL + + // ipMatchers represent all values in the NoProxy that are IP address + // prefixes or an IP address in CIDR notation. + ipMatchers []matcher + + // domainMatchers represent all values in the NoProxy that are a domain + // name or hostname & domain name + domainMatchers []matcher +} + +// FromEnvironment returns a Config instance populated from the +// environment variables HTTP_PROXY, HTTPS_PROXY and NO_PROXY (or the +// lowercase versions thereof). +// +// The environment values may be either a complete URL or a +// "host[:port]", in which case the "http" scheme is assumed. An error +// is returned if the value is a different form. +func FromEnvironment() *Config { + return &Config{ + HTTPProxy: getEnvAny("HTTP_PROXY", "http_proxy"), + HTTPSProxy: getEnvAny("HTTPS_PROXY", "https_proxy"), + NoProxy: getEnvAny("NO_PROXY", "no_proxy"), + CGI: os.Getenv("REQUEST_METHOD") != "", + } +} + +func getEnvAny(names ...string) string { + for _, n := range names { + if val := os.Getenv(n); val != "" { + return val + } + } + return "" +} + +// ProxyFunc returns a function that determines the proxy URL to use for +// a given request URL. Changing the contents of cfg will not affect +// proxy functions created earlier. +// +// A nil URL and nil error are returned if no proxy is defined in the +// environment, or a proxy should not be used for the given request, as +// defined by NO_PROXY. +// +// As a special case, if req.URL.Host is "localhost" or a loopback address +// (with or without a port number), then a nil URL and nil error will be returned. +func (cfg *Config) ProxyFunc() func(reqURL *url.URL) (*url.URL, error) { + // Preprocess the Config settings for more efficient evaluation. + cfg1 := &config{ + Config: *cfg, + } + cfg1.init() + return cfg1.proxyForURL +} + +func (cfg *config) proxyForURL(reqURL *url.URL) (*url.URL, error) { + var proxy *url.URL + if reqURL.Scheme == "https" { + proxy = cfg.httpsProxy + } else if reqURL.Scheme == "http" { + proxy = cfg.httpProxy + if proxy != nil && cfg.CGI { + return nil, errors.New("refusing to use HTTP_PROXY value in CGI environment; see golang.org/s/cgihttpproxy") + } + } + if proxy == nil { + return nil, nil + } + if !cfg.useProxy(canonicalAddr(reqURL)) { + return nil, nil + } + + return proxy, nil +} + +func parseProxy(proxy string) (*url.URL, error) { + if proxy == "" { + return nil, nil + } + + proxyURL, err := url.Parse(proxy) + if err != nil || proxyURL.Scheme == "" || proxyURL.Host == "" { + // proxy was bogus. Try prepending "http://" to it and + // see if that parses correctly. If not, we fall + // through and complain about the original one. + if proxyURL, err := url.Parse("http://" + proxy); err == nil { + return proxyURL, nil + } + } + if err != nil { + return nil, fmt.Errorf("invalid proxy address %q: %v", proxy, err) + } + return proxyURL, nil +} + +// useProxy reports whether requests to addr should use a proxy, +// according to the NO_PROXY or no_proxy environment variable. +// addr is always a canonicalAddr with a host and port. +func (cfg *config) useProxy(addr string) bool { + if len(addr) == 0 { + return true + } + host, port, err := net.SplitHostPort(addr) + if err != nil { + return false + } + if host == "localhost" { + return false + } + nip, err := netip.ParseAddr(host) + var ip net.IP + if err == nil { + ip = net.IP(nip.AsSlice()) + if ip.IsLoopback() { + return false + } + } + + addr = strings.ToLower(strings.TrimSpace(host)) + + if ip != nil { + for _, m := range cfg.ipMatchers { + if m.match(addr, port, ip) { + return false + } + } + } + for _, m := range cfg.domainMatchers { + if m.match(addr, port, ip) { + return false + } + } + return true +} + +func (c *config) init() { + if parsed, err := parseProxy(c.HTTPProxy); err == nil { + c.httpProxy = parsed + } + if parsed, err := parseProxy(c.HTTPSProxy); err == nil { + c.httpsProxy = parsed + } + + for _, p := range strings.Split(c.NoProxy, ",") { + p = strings.ToLower(strings.TrimSpace(p)) + if len(p) == 0 { + continue + } + + if p == "*" { + c.ipMatchers = []matcher{allMatch{}} + c.domainMatchers = []matcher{allMatch{}} + return + } + + // IPv4/CIDR, IPv6/CIDR + if _, pnet, err := net.ParseCIDR(p); err == nil { + c.ipMatchers = append(c.ipMatchers, cidrMatch{cidr: pnet}) + continue + } + + // IPv4:port, [IPv6]:port + phost, pport, err := net.SplitHostPort(p) + if err == nil { + if len(phost) == 0 { + // There is no host part, likely the entry is malformed; ignore. + continue + } + if phost[0] == '[' && phost[len(phost)-1] == ']' { + phost = phost[1 : len(phost)-1] + } + } else { + phost = p + } + // IPv4, IPv6 + if pip := net.ParseIP(phost); pip != nil { + c.ipMatchers = append(c.ipMatchers, ipMatch{ip: pip, port: pport}) + continue + } + + if len(phost) == 0 { + // There is no host part, likely the entry is malformed; ignore. + continue + } + + // domain.com or domain.com:80 + // foo.com matches bar.foo.com + // .domain.com or .domain.com:port + // *.domain.com or *.domain.com:port + if strings.HasPrefix(phost, "*.") { + phost = phost[1:] + } + matchHost := false + if phost[0] != '.' { + matchHost = true + phost = "." + phost + } + if v, err := idnaASCII(phost); err == nil { + phost = v + } + c.domainMatchers = append(c.domainMatchers, domainMatch{host: phost, port: pport, matchHost: matchHost}) + } +} + +var portMap = map[string]string{ + "http": "80", + "https": "443", + "socks5": "1080", +} + +// canonicalAddr returns url.Host but always with a ":port" suffix +func canonicalAddr(url *url.URL) string { + addr := url.Hostname() + if v, err := idnaASCII(addr); err == nil { + addr = v + } + port := url.Port() + if port == "" { + port = portMap[url.Scheme] + } + return net.JoinHostPort(addr, port) +} + +// Given a string of the form "host", "host:port", or "[ipv6::address]:port", +// return true if the string includes a port. +func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } + +func idnaASCII(v string) (string, error) { + // TODO: Consider removing this check after verifying performance is okay. + // Right now punycode verification, length checks, context checks, and the + // permissible character tests are all omitted. It also prevents the ToASCII + // call from salvaging an invalid IDN, when possible. As a result it may be + // possible to have two IDNs that appear identical to the user where the + // ASCII-only version causes an error downstream whereas the non-ASCII + // version does not. + // Note that for correct ASCII IDNs ToASCII will only do considerably more + // work, but it will not cause an allocation. + if isASCII(v) { + return v, nil + } + return idna.Lookup.ToASCII(v) +} + +func isASCII(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} + +// matcher represents the matching rule for a given value in the NO_PROXY list +type matcher interface { + // match returns true if the host and optional port or ip and optional port + // are allowed + match(host, port string, ip net.IP) bool +} + +// allMatch matches on all possible inputs +type allMatch struct{} + +func (a allMatch) match(host, port string, ip net.IP) bool { + return true +} + +type cidrMatch struct { + cidr *net.IPNet +} + +func (m cidrMatch) match(host, port string, ip net.IP) bool { + return m.cidr.Contains(ip) +} + +type ipMatch struct { + ip net.IP + port string +} + +func (m ipMatch) match(host, port string, ip net.IP) bool { + if m.ip.Equal(ip) { + return m.port == "" || m.port == port + } + return false +} + +type domainMatch struct { + host string + port string + + matchHost bool +} + +func (m domainMatch) match(host, port string, ip net.IP) bool { + if ip != nil { + return false + } + if strings.HasSuffix(host, m.host) || (m.matchHost && host == m.host[1:]) { + return m.port == "" || m.port == port + } + return false +} diff --git a/vendor/golang.org/x/net/http2/config.go b/vendor/golang.org/x/net/http2/config.go index ca645d9a1..8a7a89d01 100644 --- a/vendor/golang.org/x/net/http2/config.go +++ b/vendor/golang.org/x/net/http2/config.go @@ -27,6 +27,7 @@ import ( // - If the resulting value is zero or out of range, use a default. type http2Config struct { MaxConcurrentStreams uint32 + StrictMaxConcurrentRequests bool MaxDecoderHeaderTableSize uint32 MaxEncoderHeaderTableSize uint32 MaxReadFrameSize uint32 @@ -55,7 +56,7 @@ func configFromServer(h1 *http.Server, h2 *Server) http2Config { PermitProhibitedCipherSuites: h2.PermitProhibitedCipherSuites, CountError: h2.CountError, } - fillNetHTTPServerConfig(&conf, h1) + fillNetHTTPConfig(&conf, h1.HTTP2) setConfigDefaults(&conf, true) return conf } @@ -64,12 +65,13 @@ func configFromServer(h1 *http.Server, h2 *Server) http2Config { // (the net/http Transport). func configFromTransport(h2 *Transport) http2Config { conf := http2Config{ - MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize, - MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize, - MaxReadFrameSize: h2.MaxReadFrameSize, - SendPingTimeout: h2.ReadIdleTimeout, - PingTimeout: h2.PingTimeout, - WriteByteTimeout: h2.WriteByteTimeout, + StrictMaxConcurrentRequests: h2.StrictMaxConcurrentStreams, + MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize, + MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize, + MaxReadFrameSize: h2.MaxReadFrameSize, + SendPingTimeout: h2.ReadIdleTimeout, + PingTimeout: h2.PingTimeout, + WriteByteTimeout: h2.WriteByteTimeout, } // Unlike most config fields, where out-of-range values revert to the default, @@ -81,7 +83,7 @@ func configFromTransport(h2 *Transport) http2Config { } if h2.t1 != nil { - fillNetHTTPTransportConfig(&conf, h2.t1) + fillNetHTTPConfig(&conf, h2.t1.HTTP2) } setConfigDefaults(&conf, false) return conf @@ -120,3 +122,48 @@ func adjustHTTP1MaxHeaderSize(n int64) int64 { const typicalHeaders = 10 // conservative return n + typicalHeaders*perFieldOverhead } + +func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) { + if h2 == nil { + return + } + if h2.MaxConcurrentStreams != 0 { + conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) + } + if http2ConfigStrictMaxConcurrentRequests(h2) { + conf.StrictMaxConcurrentRequests = true + } + if h2.MaxEncoderHeaderTableSize != 0 { + conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize) + } + if h2.MaxDecoderHeaderTableSize != 0 { + conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize) + } + if h2.MaxConcurrentStreams != 0 { + conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) + } + if h2.MaxReadFrameSize != 0 { + conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize) + } + if h2.MaxReceiveBufferPerConnection != 0 { + conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection) + } + if h2.MaxReceiveBufferPerStream != 0 { + conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream) + } + if h2.SendPingTimeout != 0 { + conf.SendPingTimeout = h2.SendPingTimeout + } + if h2.PingTimeout != 0 { + conf.PingTimeout = h2.PingTimeout + } + if h2.WriteByteTimeout != 0 { + conf.WriteByteTimeout = h2.WriteByteTimeout + } + if h2.PermitProhibitedCipherSuites { + conf.PermitProhibitedCipherSuites = true + } + if h2.CountError != nil { + conf.CountError = h2.CountError + } +} diff --git a/vendor/golang.org/x/net/http2/config_go124.go b/vendor/golang.org/x/net/http2/config_go124.go deleted file mode 100644 index 5b516c55f..000000000 --- a/vendor/golang.org/x/net/http2/config_go124.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.24 - -package http2 - -import "net/http" - -// fillNetHTTPServerConfig sets fields in conf from srv.HTTP2. -func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) { - fillNetHTTPConfig(conf, srv.HTTP2) -} - -// fillNetHTTPTransportConfig sets fields in conf from tr.HTTP2. -func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) { - fillNetHTTPConfig(conf, tr.HTTP2) -} - -func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) { - if h2 == nil { - return - } - if h2.MaxConcurrentStreams != 0 { - conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) - } - if h2.MaxEncoderHeaderTableSize != 0 { - conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize) - } - if h2.MaxDecoderHeaderTableSize != 0 { - conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize) - } - if h2.MaxConcurrentStreams != 0 { - conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) - } - if h2.MaxReadFrameSize != 0 { - conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize) - } - if h2.MaxReceiveBufferPerConnection != 0 { - conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection) - } - if h2.MaxReceiveBufferPerStream != 0 { - conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream) - } - if h2.SendPingTimeout != 0 { - conf.SendPingTimeout = h2.SendPingTimeout - } - if h2.PingTimeout != 0 { - conf.PingTimeout = h2.PingTimeout - } - if h2.WriteByteTimeout != 0 { - conf.WriteByteTimeout = h2.WriteByteTimeout - } - if h2.PermitProhibitedCipherSuites { - conf.PermitProhibitedCipherSuites = true - } - if h2.CountError != nil { - conf.CountError = h2.CountError - } -} diff --git a/vendor/golang.org/x/net/http2/config_go125.go b/vendor/golang.org/x/net/http2/config_go125.go new file mode 100644 index 000000000..b4373fe33 --- /dev/null +++ b/vendor/golang.org/x/net/http2/config_go125.go @@ -0,0 +1,15 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.26 + +package http2 + +import ( + "net/http" +) + +func http2ConfigStrictMaxConcurrentRequests(h2 *http.HTTP2Config) bool { + return false +} diff --git a/vendor/golang.org/x/net/http2/config_go126.go b/vendor/golang.org/x/net/http2/config_go126.go new file mode 100644 index 000000000..6b071c149 --- /dev/null +++ b/vendor/golang.org/x/net/http2/config_go126.go @@ -0,0 +1,15 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.26 + +package http2 + +import ( + "net/http" +) + +func http2ConfigStrictMaxConcurrentRequests(h2 *http.HTTP2Config) bool { + return h2.StrictMaxConcurrentRequests +} diff --git a/vendor/golang.org/x/net/http2/config_pre_go124.go b/vendor/golang.org/x/net/http2/config_pre_go124.go deleted file mode 100644 index 060fd6c64..000000000 --- a/vendor/golang.org/x/net/http2/config_pre_go124.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.24 - -package http2 - -import "net/http" - -// Pre-Go 1.24 fallback. -// The Server.HTTP2 and Transport.HTTP2 config fields were added in Go 1.24. - -func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {} - -func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {} diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 97bd8b06f..9a4bd123c 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -39,7 +39,7 @@ const ( FrameContinuation FrameType = 0x9 ) -var frameName = map[FrameType]string{ +var frameNames = [...]string{ FrameData: "DATA", FrameHeaders: "HEADERS", FramePriority: "PRIORITY", @@ -53,10 +53,10 @@ var frameName = map[FrameType]string{ } func (t FrameType) String() string { - if s, ok := frameName[t]; ok { - return s + if int(t) < len(frameNames) { + return frameNames[t] } - return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t)) + return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", t) } // Flags is a bitmask of HTTP/2 flags. @@ -124,7 +124,7 @@ var flagName = map[FrameType]map[Flags]string{ // might be 0). type frameParser func(fc *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error) -var frameParsers = map[FrameType]frameParser{ +var frameParsers = [...]frameParser{ FrameData: parseDataFrame, FrameHeaders: parseHeadersFrame, FramePriority: parsePriorityFrame, @@ -138,8 +138,8 @@ var frameParsers = map[FrameType]frameParser{ } func typeFrameParser(t FrameType) frameParser { - if f := frameParsers[t]; f != nil { - return f + if int(t) < len(frameParsers) { + return frameParsers[t] } return parseUnknownFrame } @@ -280,6 +280,8 @@ type Framer struct { // lastHeaderStream is non-zero if the last frame was an // unfinished HEADERS/CONTINUATION. lastHeaderStream uint32 + // lastFrameType holds the type of the last frame for verifying frame order. + lastFrameType FrameType maxReadSize uint32 headerBuf [frameHeaderLen]byte @@ -347,7 +349,7 @@ func (fr *Framer) maxHeaderListSize() uint32 { func (f *Framer) startWrite(ftype FrameType, flags Flags, streamID uint32) { // Write the FrameHeader. f.wbuf = append(f.wbuf[:0], - 0, // 3 bytes of length, filled in in endWrite + 0, // 3 bytes of length, filled in endWrite 0, 0, byte(ftype), @@ -488,30 +490,41 @@ func terminalReadFrameError(err error) bool { return err != nil } -// ReadFrame reads a single frame. The returned Frame is only valid -// until the next call to ReadFrame. +// ReadFrameHeader reads the header of the next frame. +// It reads the 9-byte fixed frame header, and does not read any portion of the +// frame payload. The caller is responsible for consuming the payload, either +// with ReadFrameForHeader or directly from the Framer's io.Reader. // -// If the frame is larger than previously set with SetMaxReadFrameSize, the -// returned error is ErrFrameTooLarge. Other errors may be of type -// ConnectionError, StreamError, or anything else from the underlying -// reader. +// If the frame is larger than previously set with SetMaxReadFrameSize, it +// returns the frame header and ErrFrameTooLarge. // -// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID -// indicates the stream responsible for the error. -func (fr *Framer) ReadFrame() (Frame, error) { +// If the returned FrameHeader.StreamID is non-zero, it indicates the stream +// responsible for the error. +func (fr *Framer) ReadFrameHeader() (FrameHeader, error) { fr.errDetail = nil - if fr.lastFrame != nil { - fr.lastFrame.invalidate() - } fh, err := readFrameHeader(fr.headerBuf[:], fr.r) if err != nil { - return nil, err + return fh, err } if fh.Length > fr.maxReadSize { if fh == invalidHTTP1LookingFrameHeader() { - return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", err) + return fh, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", ErrFrameTooLarge) } - return nil, ErrFrameTooLarge + return fh, ErrFrameTooLarge + } + if err := fr.checkFrameOrder(fh); err != nil { + return fh, err + } + return fh, nil +} + +// ReadFrameForHeader reads the payload for the frame with the given FrameHeader. +// +// It behaves identically to ReadFrame, other than not checking the maximum +// frame size. +func (fr *Framer) ReadFrameForHeader(fh FrameHeader) (Frame, error) { + if fr.lastFrame != nil { + fr.lastFrame.invalidate() } payload := fr.getReadBuf(fh.Length) if _, err := io.ReadFull(fr.r, payload); err != nil { @@ -527,9 +540,7 @@ func (fr *Framer) ReadFrame() (Frame, error) { } return nil, err } - if err := fr.checkFrameOrder(f); err != nil { - return nil, err - } + fr.lastFrame = f if fr.logReads { fr.debugReadLoggerf("http2: Framer %p: read %v", fr, summarizeFrame(f)) } @@ -539,6 +550,24 @@ func (fr *Framer) ReadFrame() (Frame, error) { return f, nil } +// ReadFrame reads a single frame. The returned Frame is only valid +// until the next call to ReadFrame or ReadFrameBodyForHeader. +// +// If the frame is larger than previously set with SetMaxReadFrameSize, the +// returned error is ErrFrameTooLarge. Other errors may be of type +// ConnectionError, StreamError, or anything else from the underlying +// reader. +// +// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID +// indicates the stream responsible for the error. +func (fr *Framer) ReadFrame() (Frame, error) { + fh, err := fr.ReadFrameHeader() + if err != nil { + return nil, err + } + return fr.ReadFrameForHeader(fh) +} + // connError returns ConnectionError(code) but first // stashes away a public reason to the caller can optionally relay it // to the peer before hanging up on them. This might help others debug @@ -551,20 +580,19 @@ func (fr *Framer) connError(code ErrCode, reason string) error { // checkFrameOrder reports an error if f is an invalid frame to return // next from ReadFrame. Mostly it checks whether HEADERS and // CONTINUATION frames are contiguous. -func (fr *Framer) checkFrameOrder(f Frame) error { - last := fr.lastFrame - fr.lastFrame = f +func (fr *Framer) checkFrameOrder(fh FrameHeader) error { + lastType := fr.lastFrameType + fr.lastFrameType = fh.Type if fr.AllowIllegalReads { return nil } - fh := f.Header() if fr.lastHeaderStream != 0 { if fh.Type != FrameContinuation { return fr.connError(ErrCodeProtocol, fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d", fh.Type, fh.StreamID, - last.Header().Type, fr.lastHeaderStream)) + lastType, fr.lastHeaderStream)) } if fh.StreamID != fr.lastHeaderStream { return fr.connError(ErrCodeProtocol, @@ -1152,7 +1180,16 @@ type PriorityFrame struct { PriorityParam } -// PriorityParam are the stream prioritzation parameters. +var defaultRFC9218Priority = PriorityParam{ + incremental: 0, + urgency: 3, +} + +// Note that HTTP/2 has had two different prioritization schemes, and +// PriorityParam struct below is a superset of both schemes. The exported +// symbols are from RFC 7540 and the non-exported ones are from RFC 9218. + +// PriorityParam are the stream prioritization parameters. type PriorityParam struct { // StreamDep is a 31-bit stream identifier for the // stream that this stream depends on. Zero means no @@ -1167,6 +1204,20 @@ type PriorityParam struct { // the spec, "Add one to the value to obtain a weight between // 1 and 256." Weight uint8 + + // "The urgency (u) parameter value is Integer (see Section 3.3.1 of + // [STRUCTURED-FIELDS]), between 0 and 7 inclusive, in descending order of + // priority. The default is 3." + urgency uint8 + + // "The incremental (i) parameter value is Boolean (see Section 3.3.6 of + // [STRUCTURED-FIELDS]). It indicates if an HTTP response can be processed + // incrementally, i.e., provide some meaningful output as chunks of the + // response arrive." + // + // We use uint8 (i.e. 0 is false, 1 is true) instead of bool so we can + // avoid unnecessary type conversions and because either type takes 1 byte. + incremental uint8 } func (p PriorityParam) IsZero() bool { diff --git a/vendor/golang.org/x/net/http2/gotrack.go b/vendor/golang.org/x/net/http2/gotrack.go index 9933c9f8c..9921ca096 100644 --- a/vendor/golang.org/x/net/http2/gotrack.go +++ b/vendor/golang.org/x/net/http2/gotrack.go @@ -15,21 +15,32 @@ import ( "runtime" "strconv" "sync" + "sync/atomic" ) var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1" +// Setting DebugGoroutines to false during a test to disable goroutine debugging +// results in race detector complaints when a test leaves goroutines running before +// returning. Tests shouldn't do this, of course, but when they do it generally shows +// up as infrequent, hard-to-debug flakes. (See #66519.) +// +// Disable goroutine debugging during individual tests with an atomic bool. +// (Note that it's safe to enable/disable debugging mid-test, so the actual race condition +// here is harmless.) +var disableDebugGoroutines atomic.Bool + type goroutineLock uint64 func newGoroutineLock() goroutineLock { - if !DebugGoroutines { + if !DebugGoroutines || disableDebugGoroutines.Load() { return 0 } return goroutineLock(curGoroutineID()) } func (g goroutineLock) check() { - if !DebugGoroutines { + if !DebugGoroutines || disableDebugGoroutines.Load() { return } if curGoroutineID() != uint64(g) { @@ -38,7 +49,7 @@ func (g goroutineLock) check() { } func (g goroutineLock) checkNotOn() { - if !DebugGoroutines { + if !DebugGoroutines || disableDebugGoroutines.Load() { return } if curGoroutineID() == uint64(g) { diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 6c18ea230..105fe12fe 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -11,13 +11,10 @@ // requires Go 1.6 or later) // // See https://http2.github.io/ for more information on HTTP/2. -// -// See https://http2.golang.org/ for a test server running this code. package http2 // import "golang.org/x/net/http2" import ( "bufio" - "context" "crypto/tls" "errors" "fmt" @@ -37,7 +34,6 @@ var ( VerboseLogs bool logFrameWrites bool logFrameReads bool - inTests bool // Enabling extended CONNECT by causes browsers to attempt to use // WebSockets-over-HTTP/2. This results in problems when the server's websocket @@ -257,15 +253,13 @@ func (cw closeWaiter) Wait() { // idle memory usage with many connections. type bufferedWriter struct { _ incomparable - group synctestGroupInterface // immutable - conn net.Conn // immutable - bw *bufio.Writer // non-nil when data is buffered - byteTimeout time.Duration // immutable, WriteByteTimeout + conn net.Conn // immutable + bw *bufio.Writer // non-nil when data is buffered + byteTimeout time.Duration // immutable, WriteByteTimeout } -func newBufferedWriter(group synctestGroupInterface, conn net.Conn, timeout time.Duration) *bufferedWriter { +func newBufferedWriter(conn net.Conn, timeout time.Duration) *bufferedWriter { return &bufferedWriter{ - group: group, conn: conn, byteTimeout: timeout, } @@ -316,24 +310,18 @@ func (w *bufferedWriter) Flush() error { type bufferedWriterTimeoutWriter bufferedWriter func (w *bufferedWriterTimeoutWriter) Write(p []byte) (n int, err error) { - return writeWithByteTimeout(w.group, w.conn, w.byteTimeout, p) + return writeWithByteTimeout(w.conn, w.byteTimeout, p) } // writeWithByteTimeout writes to conn. // If more than timeout passes without any bytes being written to the connection, // the write fails. -func writeWithByteTimeout(group synctestGroupInterface, conn net.Conn, timeout time.Duration, p []byte) (n int, err error) { +func writeWithByteTimeout(conn net.Conn, timeout time.Duration, p []byte) (n int, err error) { if timeout <= 0 { return conn.Write(p) } for { - var now time.Time - if group == nil { - now = time.Now() - } else { - now = group.Now() - } - conn.SetWriteDeadline(now.Add(timeout)) + conn.SetWriteDeadline(time.Now().Add(timeout)) nn, err := conn.Write(p[n:]) n += nn if n == len(p) || nn == 0 || !errors.Is(err, os.ErrDeadlineExceeded) { @@ -419,14 +407,3 @@ func (s *sorter) SortStrings(ss []string) { // makes that struct also non-comparable, and generally doesn't add // any size (as long as it's first). type incomparable [0]func() - -// synctestGroupInterface is the methods of synctestGroup used by Server and Transport. -// It's defined as an interface here to let us keep synctestGroup entirely test-only -// and not a part of non-test builds. -type synctestGroupInterface interface { - Join() - Now() time.Time - NewTimer(d time.Duration) timer - AfterFunc(d time.Duration, f func()) timer - ContextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) -} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 51fca38f6..bdc5520eb 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -176,44 +176,15 @@ type Server struct { // so that we don't embed a Mutex in this struct, which will make the // struct non-copyable, which might break some callers. state *serverInternalState - - // Synchronization group used for testing. - // Outside of tests, this is nil. - group synctestGroupInterface -} - -func (s *Server) markNewGoroutine() { - if s.group != nil { - s.group.Join() - } -} - -func (s *Server) now() time.Time { - if s.group != nil { - return s.group.Now() - } - return time.Now() -} - -// newTimer creates a new time.Timer, or a synthetic timer in tests. -func (s *Server) newTimer(d time.Duration) timer { - if s.group != nil { - return s.group.NewTimer(d) - } - return timeTimer{time.NewTimer(d)} -} - -// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. -func (s *Server) afterFunc(d time.Duration, f func()) timer { - if s.group != nil { - return s.group.AfterFunc(d, f) - } - return timeTimer{time.AfterFunc(d, f)} } type serverInternalState struct { mu sync.Mutex activeConns map[*serverConn]struct{} + + // Pool of error channels. This is per-Server rather than global + // because channels can't be reused across synctest bubbles. + errChanPool sync.Pool } func (s *serverInternalState) registerConn(sc *serverConn) { @@ -245,6 +216,27 @@ func (s *serverInternalState) startGracefulShutdown() { s.mu.Unlock() } +// Global error channel pool used for uninitialized Servers. +// We use a per-Server pool when possible to avoid using channels across synctest bubbles. +var errChanPool = sync.Pool{ + New: func() any { return make(chan error, 1) }, +} + +func (s *serverInternalState) getErrChan() chan error { + if s == nil { + return errChanPool.Get().(chan error) // Server used without calling ConfigureServer + } + return s.errChanPool.Get().(chan error) +} + +func (s *serverInternalState) putErrChan(ch chan error) { + if s == nil { + errChanPool.Put(ch) // Server used without calling ConfigureServer + return + } + s.errChanPool.Put(ch) +} + // ConfigureServer adds HTTP/2 support to a net/http Server. // // The configuration conf may be nil. @@ -257,7 +249,10 @@ func ConfigureServer(s *http.Server, conf *Server) error { if conf == nil { conf = new(Server) } - conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})} + conf.state = &serverInternalState{ + activeConns: make(map[*serverConn]struct{}), + errChanPool: sync.Pool{New: func() any { return make(chan error, 1) }}, + } if h1, h2 := s, conf; h2.IdleTimeout == 0 { if h1.IdleTimeout != 0 { h2.IdleTimeout = h1.IdleTimeout @@ -423,6 +418,9 @@ func (o *ServeConnOpts) handler() http.Handler { // // The opts parameter is optional. If nil, default values are used. func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { + if opts == nil { + opts = &ServeConnOpts{} + } s.serveConn(c, opts, nil) } @@ -438,7 +436,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon conn: c, baseCtx: baseCtx, remoteAddrStr: c.RemoteAddr().String(), - bw: newBufferedWriter(s.group, c, conf.WriteByteTimeout), + bw: newBufferedWriter(c, conf.WriteByteTimeout), handler: opts.handler(), streams: make(map[uint32]*stream), readFrameCh: make(chan readFrameResult), @@ -638,11 +636,11 @@ type serverConn struct { pingSent bool sentPingData [8]byte goAwayCode ErrCode - shutdownTimer timer // nil until used - idleTimer timer // nil if unused + shutdownTimer *time.Timer // nil until used + idleTimer *time.Timer // nil if unused readIdleTimeout time.Duration pingTimeout time.Duration - readIdleTimer timer // nil if unused + readIdleTimer *time.Timer // nil if unused // Owned by the writeFrameAsync goroutine: headerWriteBuf bytes.Buffer @@ -687,12 +685,12 @@ type stream struct { flow outflow // limits writing from Handler to client inflow inflow // what the client is allowed to POST/etc to us state streamState - resetQueued bool // RST_STREAM queued for write; set by sc.resetStream - gotTrailerHeader bool // HEADER frame for trailers was seen - wroteHeaders bool // whether we wrote headers (not status 100) - readDeadline timer // nil if unused - writeDeadline timer // nil if unused - closeErr error // set before cw is closed + resetQueued bool // RST_STREAM queued for write; set by sc.resetStream + gotTrailerHeader bool // HEADER frame for trailers was seen + wroteHeaders bool // whether we wrote headers (not status 100) + readDeadline *time.Timer // nil if unused + writeDeadline *time.Timer // nil if unused + closeErr error // set before cw is closed trailer http.Header // accumulated trailers reqTrailer http.Header // handler's Request.Trailer @@ -848,7 +846,6 @@ type readFrameResult struct { // consumer is done with the frame. // It's run on its own goroutine. func (sc *serverConn) readFrames() { - sc.srv.markNewGoroutine() gate := make(chan struct{}) gateDone := func() { gate <- struct{}{} } for { @@ -881,7 +878,6 @@ type frameWriteResult struct { // At most one goroutine can be running writeFrameAsync at a time per // serverConn. func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) { - sc.srv.markNewGoroutine() var err error if wd == nil { err = wr.write.writeFrame(sc) @@ -965,22 +961,22 @@ func (sc *serverConn) serve(conf http2Config) { sc.setConnState(http.StateIdle) if sc.srv.IdleTimeout > 0 { - sc.idleTimer = sc.srv.afterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) + sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) defer sc.idleTimer.Stop() } if conf.SendPingTimeout > 0 { sc.readIdleTimeout = conf.SendPingTimeout - sc.readIdleTimer = sc.srv.afterFunc(conf.SendPingTimeout, sc.onReadIdleTimer) + sc.readIdleTimer = time.AfterFunc(conf.SendPingTimeout, sc.onReadIdleTimer) defer sc.readIdleTimer.Stop() } go sc.readFrames() // closed by defer sc.conn.Close above - settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer) + settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer) defer settingsTimer.Stop() - lastFrameTime := sc.srv.now() + lastFrameTime := time.Now() loopNum := 0 for { loopNum++ @@ -994,7 +990,7 @@ func (sc *serverConn) serve(conf http2Config) { case res := <-sc.wroteFrameCh: sc.wroteFrame(res) case res := <-sc.readFrameCh: - lastFrameTime = sc.srv.now() + lastFrameTime = time.Now() // Process any written frames before reading new frames from the client since a // written frame could have triggered a new stream to be started. if sc.writingFrameAsync { @@ -1077,7 +1073,7 @@ func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) { } pingAt := lastFrameReadTime.Add(sc.readIdleTimeout) - now := sc.srv.now() + now := time.Now() if pingAt.After(now) { // We received frames since arming the ping timer. // Reset it for the next possible timeout. @@ -1141,10 +1137,10 @@ func (sc *serverConn) readPreface() error { errc <- nil } }() - timer := sc.srv.newTimer(prefaceTimeout) // TODO: configurable on *Server? + timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server? defer timer.Stop() select { - case <-timer.C(): + case <-timer.C: return errPrefaceTimeout case err := <-errc: if err == nil { @@ -1156,10 +1152,6 @@ func (sc *serverConn) readPreface() error { } } -var errChanPool = sync.Pool{ - New: func() interface{} { return make(chan error, 1) }, -} - var writeDataPool = sync.Pool{ New: func() interface{} { return new(writeData) }, } @@ -1167,7 +1159,7 @@ var writeDataPool = sync.Pool{ // writeDataFromHandler writes DATA response frames from a handler on // the given stream. func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error { - ch := errChanPool.Get().(chan error) + ch := sc.srv.state.getErrChan() writeArg := writeDataPool.Get().(*writeData) *writeArg = writeData{stream.id, data, endStream} err := sc.writeFrameFromHandler(FrameWriteRequest{ @@ -1199,7 +1191,7 @@ func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStrea return errStreamClosed } } - errChanPool.Put(ch) + sc.srv.state.putErrChan(ch) if frameWriteDone { writeDataPool.Put(writeArg) } @@ -1513,7 +1505,7 @@ func (sc *serverConn) goAway(code ErrCode) { func (sc *serverConn) shutDownIn(d time.Duration) { sc.serveG.check() - sc.shutdownTimer = sc.srv.afterFunc(d, sc.onShutdownTimer) + sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer) } func (sc *serverConn) resetStream(se StreamError) { @@ -2118,7 +2110,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // (in Go 1.8), though. That's a more sane option anyway. if sc.hs.ReadTimeout > 0 { sc.conn.SetReadDeadline(time.Time{}) - st.readDeadline = sc.srv.afterFunc(sc.hs.ReadTimeout, st.onReadTimeout) + st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout) } return sc.scheduleHandler(id, rw, req, handler) @@ -2216,7 +2208,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream st.flow.add(sc.initialStreamSendWindowSize) st.inflow.init(sc.initialStreamRecvWindowSize) if sc.hs.WriteTimeout > 0 { - st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) + st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } sc.streams[id] = st @@ -2405,7 +2397,6 @@ func (sc *serverConn) handlerDone() { // Run on its own goroutine. func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { - sc.srv.markNewGoroutine() defer sc.sendServeMsg(handlerDoneMsg) didPanic := true defer func() { @@ -2454,7 +2445,7 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) erro // waiting for this frame to be written, so an http.Flush mid-handler // writes out the correct value of keys, before a handler later potentially // mutates it. - errc = errChanPool.Get().(chan error) + errc = sc.srv.state.getErrChan() } if err := sc.writeFrameFromHandler(FrameWriteRequest{ write: headerData, @@ -2466,7 +2457,7 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) erro if errc != nil { select { case err := <-errc: - errChanPool.Put(errc) + sc.srv.state.putErrChan(errc) return err case <-sc.doneServing: return errClientDisconnected @@ -2573,7 +2564,7 @@ func (b *requestBody) Read(p []byte) (n int, err error) { if err == io.EOF { b.sawEOF = true } - if b.conn == nil && inTests { + if b.conn == nil { return } b.conn.noteBodyReadFromHandler(b.stream, n, err) @@ -2702,7 +2693,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { var date string if _, ok := rws.snapHeader["Date"]; !ok { // TODO(bradfitz): be faster here, like net/http? measure. - date = rws.conn.srv.now().UTC().Format(http.TimeFormat) + date = time.Now().UTC().Format(http.TimeFormat) } for _, v := range rws.snapHeader["Trailer"] { @@ -2824,7 +2815,7 @@ func (rws *responseWriterState) promoteUndeclaredTrailers() { func (w *responseWriter) SetReadDeadline(deadline time.Time) error { st := w.rws.stream - if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) { + if !deadline.IsZero() && deadline.Before(time.Now()) { // If we're setting a deadline in the past, reset the stream immediately // so writes after SetWriteDeadline returns will fail. st.onReadTimeout() @@ -2840,9 +2831,9 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error { if deadline.IsZero() { st.readDeadline = nil } else if st.readDeadline == nil { - st.readDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onReadTimeout) + st.readDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onReadTimeout) } else { - st.readDeadline.Reset(deadline.Sub(sc.srv.now())) + st.readDeadline.Reset(deadline.Sub(time.Now())) } }) return nil @@ -2850,7 +2841,7 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error { func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { st := w.rws.stream - if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) { + if !deadline.IsZero() && deadline.Before(time.Now()) { // If we're setting a deadline in the past, reset the stream immediately // so writes after SetWriteDeadline returns will fail. st.onWriteTimeout() @@ -2866,9 +2857,9 @@ func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { if deadline.IsZero() { st.writeDeadline = nil } else if st.writeDeadline == nil { - st.writeDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onWriteTimeout) + st.writeDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onWriteTimeout) } else { - st.writeDeadline.Reset(deadline.Sub(sc.srv.now())) + st.writeDeadline.Reset(deadline.Sub(time.Now())) } }) return nil @@ -3147,7 +3138,7 @@ func (w *responseWriter) Push(target string, opts *http.PushOptions) error { method: opts.Method, url: u, header: cloneHeader(opts.Header), - done: errChanPool.Get().(chan error), + done: sc.srv.state.getErrChan(), } select { @@ -3164,7 +3155,7 @@ func (w *responseWriter) Push(target string, opts *http.PushOptions) error { case <-st.cw: return errStreamClosed case err := <-msg.done: - errChanPool.Put(msg.done) + sc.srv.state.putErrChan(msg.done) return err } } diff --git a/vendor/golang.org/x/net/http2/timer.go b/vendor/golang.org/x/net/http2/timer.go deleted file mode 100644 index 0b1c17b81..000000000 --- a/vendor/golang.org/x/net/http2/timer.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -package http2 - -import "time" - -// A timer is a time.Timer, as an interface which can be replaced in tests. -type timer = interface { - C() <-chan time.Time - Reset(d time.Duration) bool - Stop() bool -} - -// timeTimer adapts a time.Timer to the timer interface. -type timeTimer struct { - *time.Timer -} - -func (t timeTimer) C() <-chan time.Time { return t.Timer.C } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index f26356b9c..1965913e5 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -9,6 +9,7 @@ package http2 import ( "bufio" "bytes" + "compress/flate" "compress/gzip" "context" "crypto/rand" @@ -193,50 +194,6 @@ type Transport struct { type transportTestHooks struct { newclientconn func(*ClientConn) - group synctestGroupInterface -} - -func (t *Transport) markNewGoroutine() { - if t != nil && t.transportTestHooks != nil { - t.transportTestHooks.group.Join() - } -} - -func (t *Transport) now() time.Time { - if t != nil && t.transportTestHooks != nil { - return t.transportTestHooks.group.Now() - } - return time.Now() -} - -func (t *Transport) timeSince(when time.Time) time.Duration { - if t != nil && t.transportTestHooks != nil { - return t.now().Sub(when) - } - return time.Since(when) -} - -// newTimer creates a new time.Timer, or a synthetic timer in tests. -func (t *Transport) newTimer(d time.Duration) timer { - if t.transportTestHooks != nil { - return t.transportTestHooks.group.NewTimer(d) - } - return timeTimer{time.NewTimer(d)} -} - -// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. -func (t *Transport) afterFunc(d time.Duration, f func()) timer { - if t.transportTestHooks != nil { - return t.transportTestHooks.group.AfterFunc(d, f) - } - return timeTimer{time.AfterFunc(d, f)} -} - -func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { - if t.transportTestHooks != nil { - return t.transportTestHooks.group.ContextWithTimeout(ctx, d) - } - return context.WithTimeout(ctx, d) } func (t *Transport) maxHeaderListSize() uint32 { @@ -366,7 +323,7 @@ type ClientConn struct { readerErr error // set before readerDone is closed idleTimeout time.Duration // or 0 for never - idleTimer timer + idleTimer *time.Timer mu sync.Mutex // guards following cond *sync.Cond // hold mu; broadcast on flow/closed changes @@ -399,6 +356,7 @@ type ClientConn struct { readIdleTimeout time.Duration pingTimeout time.Duration extendedConnectAllowed bool + strictMaxConcurrentStreams bool // rstStreamPingsBlocked works around an unfortunate gRPC behavior. // gRPC strictly limits the number of PING frames that it will receive. @@ -534,14 +492,12 @@ func (cs *clientStream) closeReqBodyLocked() { cs.reqBodyClosed = make(chan struct{}) reqBodyClosed := cs.reqBodyClosed go func() { - cs.cc.t.markNewGoroutine() cs.reqBody.Close() close(reqBodyClosed) }() } type stickyErrWriter struct { - group synctestGroupInterface conn net.Conn timeout time.Duration err *error @@ -551,7 +507,7 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) { if *sew.err != nil { return 0, *sew.err } - n, err = writeWithByteTimeout(sew.group, sew.conn, sew.timeout, p) + n, err = writeWithByteTimeout(sew.conn, sew.timeout, p) *sew.err = err return n, err } @@ -650,9 +606,9 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res backoff := float64(uint(1) << (uint(retry) - 1)) backoff += backoff * (0.1 * mathrand.Float64()) d := time.Second * time.Duration(backoff) - tm := t.newTimer(d) + tm := time.NewTimer(d) select { - case <-tm.C(): + case <-tm.C: t.vlogf("RoundTrip retrying after failure: %v", roundTripErr) continue case <-req.Context().Done(): @@ -699,6 +655,7 @@ var ( errClientConnUnusable = errors.New("http2: client conn not usable") errClientConnNotEstablished = errors.New("http2: client conn could not be established") errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") + errClientConnForceClosed = errors.New("http2: client connection force closed via ClientConn.Close") ) // shouldRetryRequest is called by RoundTrip when a request fails to get @@ -829,7 +786,8 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro initialWindowSize: 65535, // spec default initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream, maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. - peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. + strictMaxConcurrentStreams: conf.StrictMaxConcurrentRequests, + peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. streams: make(map[uint32]*clientStream), singleUse: singleUse, seenSettingsChan: make(chan struct{}), @@ -838,14 +796,11 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro pingTimeout: conf.PingTimeout, pings: make(map[[8]byte]chan struct{}), reqHeaderMu: make(chan struct{}, 1), - lastActive: t.now(), + lastActive: time.Now(), } - var group synctestGroupInterface if t.transportTestHooks != nil { - t.markNewGoroutine() t.transportTestHooks.newclientconn(cc) c = cc.tconn - group = t.group } if VerboseLogs { t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) @@ -857,7 +812,6 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro // TODO: adjust this writer size to account for frame size + // MTU + crypto/tls record padding. cc.bw = bufio.NewWriter(stickyErrWriter{ - group: group, conn: c, timeout: conf.WriteByteTimeout, err: &cc.werr, @@ -906,7 +860,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro // Start the idle timer after the connection is fully initialized. if d := t.idleConnTimeout(); d != 0 { cc.idleTimeout = d - cc.idleTimer = t.afterFunc(d, cc.onIdleTimeout) + cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) } go cc.readLoop() @@ -917,7 +871,7 @@ func (cc *ClientConn) healthCheck() { pingTimeout := cc.pingTimeout // We don't need to periodically ping in the health check, because the readLoop of ClientConn will // trigger the healthCheck again if there is no frame received. - ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout) + ctx, cancel := context.WithTimeout(context.Background(), pingTimeout) defer cancel() cc.vlogf("http2: Transport sending health check") err := cc.Ping(ctx) @@ -1067,7 +1021,7 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { return } var maxConcurrentOkay bool - if cc.t.StrictMaxConcurrentStreams { + if cc.strictMaxConcurrentStreams { // We'll tell the caller we can take a new request to // prevent the caller from dialing a new TCP // connection, but then we'll block later before @@ -1120,7 +1074,7 @@ func (cc *ClientConn) tooIdleLocked() bool { // times are compared based on their wall time. We don't want // to reuse a connection that's been sitting idle during // VM/laptop suspend if monotonic time was also frozen. - return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && cc.t.timeSince(cc.lastIdle.Round(0)) > cc.idleTimeout + return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout } // onIdleTimeout is called from a time.AfterFunc goroutine. It will @@ -1186,7 +1140,6 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { done := make(chan struct{}) cancelled := false // guarded by cc.mu go func() { - cc.t.markNewGoroutine() cc.mu.Lock() defer cc.mu.Unlock() for { @@ -1257,8 +1210,7 @@ func (cc *ClientConn) closeForError(err error) { // // In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead. func (cc *ClientConn) Close() error { - err := errors.New("http2: client connection force closed via ClientConn.Close") - cc.closeForError(err) + cc.closeForError(errClientConnForceClosed) return nil } @@ -1427,7 +1379,6 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) // // It sends the request and performs post-request cleanup (closing Request.Body, etc.). func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream)) { - cs.cc.t.markNewGoroutine() err := cs.writeRequest(req, streamf) cs.cleanupWriteRequest(err) } @@ -1558,9 +1509,9 @@ func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStre var respHeaderTimer <-chan time.Time var respHeaderRecv chan struct{} if d := cc.responseHeaderTimeout(); d != 0 { - timer := cc.t.newTimer(d) + timer := time.NewTimer(d) defer timer.Stop() - respHeaderTimer = timer.C() + respHeaderTimer = timer.C respHeaderRecv = cs.respHeaderRecv } // Wait until the peer half-closes its end of the stream, @@ -1753,7 +1704,7 @@ func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { // Return a fatal error which aborts the retry loop. return errClientConnNotEstablished } - cc.lastActive = cc.t.now() + cc.lastActive = time.Now() if cc.closed || !cc.canTakeNewRequestLocked() { return errClientConnUnusable } @@ -2092,10 +2043,10 @@ func (cc *ClientConn) forgetStreamID(id uint32) { if len(cc.streams) != slen-1 { panic("forgetting unknown stream id") } - cc.lastActive = cc.t.now() + cc.lastActive = time.Now() if len(cc.streams) == 0 && cc.idleTimer != nil { cc.idleTimer.Reset(cc.idleTimeout) - cc.lastIdle = cc.t.now() + cc.lastIdle = time.Now() } // Wake up writeRequestBody via clientStream.awaitFlowControl and // wake up RoundTrip if there is a pending request. @@ -2121,7 +2072,6 @@ type clientConnReadLoop struct { // readLoop runs in its own goroutine and reads and dispatches frames. func (cc *ClientConn) readLoop() { - cc.t.markNewGoroutine() rl := &clientConnReadLoop{cc: cc} defer rl.cleanup() cc.readerErr = rl.run() @@ -2188,9 +2138,9 @@ func (rl *clientConnReadLoop) cleanup() { if cc.idleTimeout > 0 && unusedWaitTime > cc.idleTimeout { unusedWaitTime = cc.idleTimeout } - idleTime := cc.t.now().Sub(cc.lastActive) + idleTime := time.Now().Sub(cc.lastActive) if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime && !cc.closedOnIdle { - cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() { + cc.idleTimer = time.AfterFunc(unusedWaitTime-idleTime, func() { cc.t.connPool().MarkDead(cc) }) } else { @@ -2250,9 +2200,9 @@ func (rl *clientConnReadLoop) run() error { cc := rl.cc gotSettings := false readIdleTimeout := cc.readIdleTimeout - var t timer + var t *time.Timer if readIdleTimeout != 0 { - t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck) + t = time.AfterFunc(readIdleTimeout, cc.healthCheck) } for { f, err := cc.fr.ReadFrame() @@ -2998,7 +2948,6 @@ func (cc *ClientConn) Ping(ctx context.Context) error { var pingError error errc := make(chan struct{}) go func() { - cc.t.markNewGoroutine() cc.wmu.Lock() defer cc.wmu.Unlock() if pingError = cc.fr.WritePing(false, p); pingError != nil { @@ -3128,35 +3077,102 @@ type erringRoundTripper struct{ err error } func (rt erringRoundTripper) RoundTripErr() error { return rt.err } func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err } +var errConcurrentReadOnResBody = errors.New("http2: concurrent read on response body") + // gzipReader wraps a response body so it can lazily -// call gzip.NewReader on the first call to Read +// get gzip.Reader from the pool on the first call to Read. +// After Close is called it puts gzip.Reader to the pool immediately +// if there is no Read in progress or later when Read completes. type gzipReader struct { _ incomparable body io.ReadCloser // underlying Response.Body - zr *gzip.Reader // lazily-initialized gzip reader - zerr error // sticky error + mu sync.Mutex // guards zr and zerr + zr *gzip.Reader // stores gzip reader from the pool between reads + zerr error // sticky gzip reader init error or sentinel value to detect concurrent read and read after close } -func (gz *gzipReader) Read(p []byte) (n int, err error) { +type eofReader struct{} + +func (eofReader) Read([]byte) (int, error) { return 0, io.EOF } +func (eofReader) ReadByte() (byte, error) { return 0, io.EOF } + +var gzipPool = sync.Pool{New: func() any { return new(gzip.Reader) }} + +// gzipPoolGet gets a gzip.Reader from the pool and resets it to read from r. +func gzipPoolGet(r io.Reader) (*gzip.Reader, error) { + zr := gzipPool.Get().(*gzip.Reader) + if err := zr.Reset(r); err != nil { + gzipPoolPut(zr) + return nil, err + } + return zr, nil +} + +// gzipPoolPut puts a gzip.Reader back into the pool. +func gzipPoolPut(zr *gzip.Reader) { + // Reset will allocate bufio.Reader if we pass it anything + // other than a flate.Reader, so ensure that it's getting one. + var r flate.Reader = eofReader{} + zr.Reset(r) + gzipPool.Put(zr) +} + +// acquire returns a gzip.Reader for reading response body. +// The reader must be released after use. +func (gz *gzipReader) acquire() (*gzip.Reader, error) { + gz.mu.Lock() + defer gz.mu.Unlock() if gz.zerr != nil { - return 0, gz.zerr + return nil, gz.zerr } if gz.zr == nil { - gz.zr, err = gzip.NewReader(gz.body) - if err != nil { - gz.zerr = err - return 0, err + gz.zr, gz.zerr = gzipPoolGet(gz.body) + if gz.zerr != nil { + return nil, gz.zerr } } - return gz.zr.Read(p) + ret := gz.zr + gz.zr, gz.zerr = nil, errConcurrentReadOnResBody + return ret, nil } -func (gz *gzipReader) Close() error { - if err := gz.body.Close(); err != nil { - return err +// release returns the gzip.Reader to the pool if Close was called during Read. +func (gz *gzipReader) release(zr *gzip.Reader) { + gz.mu.Lock() + defer gz.mu.Unlock() + if gz.zerr == errConcurrentReadOnResBody { + gz.zr, gz.zerr = zr, nil + } else { // fs.ErrClosed + gzipPoolPut(zr) + } +} + +// close returns the gzip.Reader to the pool immediately or +// signals release to do so after Read completes. +func (gz *gzipReader) close() { + gz.mu.Lock() + defer gz.mu.Unlock() + if gz.zerr == nil && gz.zr != nil { + gzipPoolPut(gz.zr) + gz.zr = nil } gz.zerr = fs.ErrClosed - return nil +} + +func (gz *gzipReader) Read(p []byte) (n int, err error) { + zr, err := gz.acquire() + if err != nil { + return 0, err + } + defer gz.release(zr) + + return zr.Read(p) +} + +func (gz *gzipReader) Close() error { + gz.close() + + return gz.body.Close() } type errorReader struct{ err error } @@ -3228,7 +3244,7 @@ func traceGotConn(req *http.Request, cc *ClientConn, reused bool) { cc.mu.Lock() ci.WasIdle = len(cc.streams) == 0 && reused if ci.WasIdle && !cc.lastActive.IsZero() { - ci.IdleTime = cc.t.timeSince(cc.lastActive) + ci.IdleTime = time.Since(cc.lastActive) } cc.mu.Unlock() diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go index cc893adc2..7de27be52 100644 --- a/vendor/golang.org/x/net/http2/writesched.go +++ b/vendor/golang.org/x/net/http2/writesched.go @@ -42,6 +42,8 @@ type OpenStreamOptions struct { // PusherID is zero if the stream was initiated by the client. Otherwise, // PusherID names the stream that pushed the newly opened stream. PusherID uint32 + // priority is used to set the priority of the newly opened stream. + priority PriorityParam } // FrameWriteRequest is a request to write a frame. @@ -183,45 +185,75 @@ func (wr *FrameWriteRequest) replyToWriter(err error) { } // writeQueue is used by implementations of WriteScheduler. +// +// Each writeQueue contains a queue of FrameWriteRequests, meant to store all +// FrameWriteRequests associated with a given stream. This is implemented as a +// two-stage queue: currQueue[currPos:] and nextQueue. Removing an item is done +// by incrementing currPos of currQueue. Adding an item is done by appending it +// to the nextQueue. If currQueue is empty when trying to remove an item, we +// can swap currQueue and nextQueue to remedy the situation. +// This two-stage queue is analogous to the use of two lists in Okasaki's +// purely functional queue but without the overhead of reversing the list when +// swapping stages. +// +// writeQueue also contains prev and next, this can be used by implementations +// of WriteScheduler to construct data structures that represent the order of +// writing between different streams (e.g. circular linked list). type writeQueue struct { - s []FrameWriteRequest + currQueue []FrameWriteRequest + nextQueue []FrameWriteRequest + currPos int + prev, next *writeQueue } -func (q *writeQueue) empty() bool { return len(q.s) == 0 } +func (q *writeQueue) empty() bool { + return (len(q.currQueue) - q.currPos + len(q.nextQueue)) == 0 +} func (q *writeQueue) push(wr FrameWriteRequest) { - q.s = append(q.s, wr) + q.nextQueue = append(q.nextQueue, wr) } func (q *writeQueue) shift() FrameWriteRequest { - if len(q.s) == 0 { + if q.empty() { panic("invalid use of queue") } - wr := q.s[0] - // TODO: less copy-happy queue. - copy(q.s, q.s[1:]) - q.s[len(q.s)-1] = FrameWriteRequest{} - q.s = q.s[:len(q.s)-1] + if q.currPos >= len(q.currQueue) { + q.currQueue, q.currPos, q.nextQueue = q.nextQueue, 0, q.currQueue[:0] + } + wr := q.currQueue[q.currPos] + q.currQueue[q.currPos] = FrameWriteRequest{} + q.currPos++ return wr } +func (q *writeQueue) peek() *FrameWriteRequest { + if q.currPos < len(q.currQueue) { + return &q.currQueue[q.currPos] + } + if len(q.nextQueue) > 0 { + return &q.nextQueue[0] + } + return nil +} + // consume consumes up to n bytes from q.s[0]. If the frame is // entirely consumed, it is removed from the queue. If the frame // is partially consumed, the frame is kept with the consumed // bytes removed. Returns true iff any bytes were consumed. func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) { - if len(q.s) == 0 { + if q.empty() { return FrameWriteRequest{}, false } - consumed, rest, numresult := q.s[0].Consume(n) + consumed, rest, numresult := q.peek().Consume(n) switch numresult { case 0: return FrameWriteRequest{}, false case 1: q.shift() case 2: - q.s[0] = rest + *q.peek() = rest } return consumed, true } @@ -230,10 +262,15 @@ type writeQueuePool []*writeQueue // put inserts an unused writeQueue into the pool. func (p *writeQueuePool) put(q *writeQueue) { - for i := range q.s { - q.s[i] = FrameWriteRequest{} + for i := range q.currQueue { + q.currQueue[i] = FrameWriteRequest{} + } + for i := range q.nextQueue { + q.nextQueue[i] = FrameWriteRequest{} } - q.s = q.s[:0] + q.currQueue = q.currQueue[:0] + q.nextQueue = q.nextQueue[:0] + q.currPos = 0 *p = append(*p, q) } diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go deleted file mode 100644 index f6783339d..000000000 --- a/vendor/golang.org/x/net/http2/writesched_priority.go +++ /dev/null @@ -1,451 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "fmt" - "math" - "sort" -) - -// RFC 7540, Section 5.3.5: the default weight is 16. -const priorityDefaultWeight = 15 // 16 = 15 + 1 - -// PriorityWriteSchedulerConfig configures a priorityWriteScheduler. -type PriorityWriteSchedulerConfig struct { - // MaxClosedNodesInTree controls the maximum number of closed streams to - // retain in the priority tree. Setting this to zero saves a small amount - // of memory at the cost of performance. - // - // See RFC 7540, Section 5.3.4: - // "It is possible for a stream to become closed while prioritization - // information ... is in transit. ... This potentially creates suboptimal - // prioritization, since the stream could be given a priority that is - // different from what is intended. To avoid these problems, an endpoint - // SHOULD retain stream prioritization state for a period after streams - // become closed. The longer state is retained, the lower the chance that - // streams are assigned incorrect or default priority values." - MaxClosedNodesInTree int - - // MaxIdleNodesInTree controls the maximum number of idle streams to - // retain in the priority tree. Setting this to zero saves a small amount - // of memory at the cost of performance. - // - // See RFC 7540, Section 5.3.4: - // Similarly, streams that are in the "idle" state can be assigned - // priority or become a parent of other streams. This allows for the - // creation of a grouping node in the dependency tree, which enables - // more flexible expressions of priority. Idle streams begin with a - // default priority (Section 5.3.5). - MaxIdleNodesInTree int - - // ThrottleOutOfOrderWrites enables write throttling to help ensure that - // data is delivered in priority order. This works around a race where - // stream B depends on stream A and both streams are about to call Write - // to queue DATA frames. If B wins the race, a naive scheduler would eagerly - // write as much data from B as possible, but this is suboptimal because A - // is a higher-priority stream. With throttling enabled, we write a small - // amount of data from B to minimize the amount of bandwidth that B can - // steal from A. - ThrottleOutOfOrderWrites bool -} - -// NewPriorityWriteScheduler constructs a WriteScheduler that schedules -// frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3. -// If cfg is nil, default options are used. -func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler { - if cfg == nil { - // For justification of these defaults, see: - // https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY - cfg = &PriorityWriteSchedulerConfig{ - MaxClosedNodesInTree: 10, - MaxIdleNodesInTree: 10, - ThrottleOutOfOrderWrites: false, - } - } - - ws := &priorityWriteScheduler{ - nodes: make(map[uint32]*priorityNode), - maxClosedNodesInTree: cfg.MaxClosedNodesInTree, - maxIdleNodesInTree: cfg.MaxIdleNodesInTree, - enableWriteThrottle: cfg.ThrottleOutOfOrderWrites, - } - ws.nodes[0] = &ws.root - if cfg.ThrottleOutOfOrderWrites { - ws.writeThrottleLimit = 1024 - } else { - ws.writeThrottleLimit = math.MaxInt32 - } - return ws -} - -type priorityNodeState int - -const ( - priorityNodeOpen priorityNodeState = iota - priorityNodeClosed - priorityNodeIdle -) - -// priorityNode is a node in an HTTP/2 priority tree. -// Each node is associated with a single stream ID. -// See RFC 7540, Section 5.3. -type priorityNode struct { - q writeQueue // queue of pending frames to write - id uint32 // id of the stream, or 0 for the root of the tree - weight uint8 // the actual weight is weight+1, so the value is in [1,256] - state priorityNodeState // open | closed | idle - bytes int64 // number of bytes written by this node, or 0 if closed - subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree - - // These links form the priority tree. - parent *priorityNode - kids *priorityNode // start of the kids list - prev, next *priorityNode // doubly-linked list of siblings -} - -func (n *priorityNode) setParent(parent *priorityNode) { - if n == parent { - panic("setParent to self") - } - if n.parent == parent { - return - } - // Unlink from current parent. - if parent := n.parent; parent != nil { - if n.prev == nil { - parent.kids = n.next - } else { - n.prev.next = n.next - } - if n.next != nil { - n.next.prev = n.prev - } - } - // Link to new parent. - // If parent=nil, remove n from the tree. - // Always insert at the head of parent.kids (this is assumed by walkReadyInOrder). - n.parent = parent - if parent == nil { - n.next = nil - n.prev = nil - } else { - n.next = parent.kids - n.prev = nil - if n.next != nil { - n.next.prev = n - } - parent.kids = n - } -} - -func (n *priorityNode) addBytes(b int64) { - n.bytes += b - for ; n != nil; n = n.parent { - n.subtreeBytes += b - } -} - -// walkReadyInOrder iterates over the tree in priority order, calling f for each node -// with a non-empty write queue. When f returns true, this function returns true and the -// walk halts. tmp is used as scratch space for sorting. -// -// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true -// if any ancestor p of n is still open (ignoring the root node). -func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool { - if !n.q.empty() && f(n, openParent) { - return true - } - if n.kids == nil { - return false - } - - // Don't consider the root "open" when updating openParent since - // we can't send data frames on the root stream (only control frames). - if n.id != 0 { - openParent = openParent || (n.state == priorityNodeOpen) - } - - // Common case: only one kid or all kids have the same weight. - // Some clients don't use weights; other clients (like web browsers) - // use mostly-linear priority trees. - w := n.kids.weight - needSort := false - for k := n.kids.next; k != nil; k = k.next { - if k.weight != w { - needSort = true - break - } - } - if !needSort { - for k := n.kids; k != nil; k = k.next { - if k.walkReadyInOrder(openParent, tmp, f) { - return true - } - } - return false - } - - // Uncommon case: sort the child nodes. We remove the kids from the parent, - // then re-insert after sorting so we can reuse tmp for future sort calls. - *tmp = (*tmp)[:0] - for n.kids != nil { - *tmp = append(*tmp, n.kids) - n.kids.setParent(nil) - } - sort.Sort(sortPriorityNodeSiblings(*tmp)) - for i := len(*tmp) - 1; i >= 0; i-- { - (*tmp)[i].setParent(n) // setParent inserts at the head of n.kids - } - for k := n.kids; k != nil; k = k.next { - if k.walkReadyInOrder(openParent, tmp, f) { - return true - } - } - return false -} - -type sortPriorityNodeSiblings []*priorityNode - -func (z sortPriorityNodeSiblings) Len() int { return len(z) } -func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] } -func (z sortPriorityNodeSiblings) Less(i, k int) bool { - // Prefer the subtree that has sent fewer bytes relative to its weight. - // See sections 5.3.2 and 5.3.4. - wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes) - wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes) - if bi == 0 && bk == 0 { - return wi >= wk - } - if bk == 0 { - return false - } - return bi/bk <= wi/wk -} - -type priorityWriteScheduler struct { - // root is the root of the priority tree, where root.id = 0. - // The root queues control frames that are not associated with any stream. - root priorityNode - - // nodes maps stream ids to priority tree nodes. - nodes map[uint32]*priorityNode - - // maxID is the maximum stream id in nodes. - maxID uint32 - - // lists of nodes that have been closed or are idle, but are kept in - // the tree for improved prioritization. When the lengths exceed either - // maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded. - closedNodes, idleNodes []*priorityNode - - // From the config. - maxClosedNodesInTree int - maxIdleNodesInTree int - writeThrottleLimit int32 - enableWriteThrottle bool - - // tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations. - tmp []*priorityNode - - // pool of empty queues for reuse. - queuePool writeQueuePool -} - -func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { - // The stream may be currently idle but cannot be opened or closed. - if curr := ws.nodes[streamID]; curr != nil { - if curr.state != priorityNodeIdle { - panic(fmt.Sprintf("stream %d already opened", streamID)) - } - curr.state = priorityNodeOpen - return - } - - // RFC 7540, Section 5.3.5: - // "All streams are initially assigned a non-exclusive dependency on stream 0x0. - // Pushed streams initially depend on their associated stream. In both cases, - // streams are assigned a default weight of 16." - parent := ws.nodes[options.PusherID] - if parent == nil { - parent = &ws.root - } - n := &priorityNode{ - q: *ws.queuePool.get(), - id: streamID, - weight: priorityDefaultWeight, - state: priorityNodeOpen, - } - n.setParent(parent) - ws.nodes[streamID] = n - if streamID > ws.maxID { - ws.maxID = streamID - } -} - -func (ws *priorityWriteScheduler) CloseStream(streamID uint32) { - if streamID == 0 { - panic("violation of WriteScheduler interface: cannot close stream 0") - } - if ws.nodes[streamID] == nil { - panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID)) - } - if ws.nodes[streamID].state != priorityNodeOpen { - panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID)) - } - - n := ws.nodes[streamID] - n.state = priorityNodeClosed - n.addBytes(-n.bytes) - - q := n.q - ws.queuePool.put(&q) - n.q.s = nil - if ws.maxClosedNodesInTree > 0 { - ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n) - } else { - ws.removeNode(n) - } -} - -func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { - if streamID == 0 { - panic("adjustPriority on root") - } - - // If streamID does not exist, there are two cases: - // - A closed stream that has been removed (this will have ID <= maxID) - // - An idle stream that is being used for "grouping" (this will have ID > maxID) - n := ws.nodes[streamID] - if n == nil { - if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 { - return - } - ws.maxID = streamID - n = &priorityNode{ - q: *ws.queuePool.get(), - id: streamID, - weight: priorityDefaultWeight, - state: priorityNodeIdle, - } - n.setParent(&ws.root) - ws.nodes[streamID] = n - ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n) - } - - // Section 5.3.1: A dependency on a stream that is not currently in the tree - // results in that stream being given a default priority (Section 5.3.5). - parent := ws.nodes[priority.StreamDep] - if parent == nil { - n.setParent(&ws.root) - n.weight = priorityDefaultWeight - return - } - - // Ignore if the client tries to make a node its own parent. - if n == parent { - return - } - - // Section 5.3.3: - // "If a stream is made dependent on one of its own dependencies, the - // formerly dependent stream is first moved to be dependent on the - // reprioritized stream's previous parent. The moved dependency retains - // its weight." - // - // That is: if parent depends on n, move parent to depend on n.parent. - for x := parent.parent; x != nil; x = x.parent { - if x == n { - parent.setParent(n.parent) - break - } - } - - // Section 5.3.3: The exclusive flag causes the stream to become the sole - // dependency of its parent stream, causing other dependencies to become - // dependent on the exclusive stream. - if priority.Exclusive { - k := parent.kids - for k != nil { - next := k.next - if k != n { - k.setParent(n) - } - k = next - } - } - - n.setParent(parent) - n.weight = priority.Weight -} - -func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) { - var n *priorityNode - if wr.isControl() { - n = &ws.root - } else { - id := wr.StreamID() - n = ws.nodes[id] - if n == nil { - // id is an idle or closed stream. wr should not be a HEADERS or - // DATA frame. In other case, we push wr onto the root, rather - // than creating a new priorityNode. - if wr.DataSize() > 0 { - panic("add DATA on non-open stream") - } - n = &ws.root - } - } - n.q.push(wr) -} - -func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) { - ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool { - limit := int32(math.MaxInt32) - if openParent { - limit = ws.writeThrottleLimit - } - wr, ok = n.q.consume(limit) - if !ok { - return false - } - n.addBytes(int64(wr.DataSize())) - // If B depends on A and B continuously has data available but A - // does not, gradually increase the throttling limit to allow B to - // steal more and more bandwidth from A. - if openParent { - ws.writeThrottleLimit += 1024 - if ws.writeThrottleLimit < 0 { - ws.writeThrottleLimit = math.MaxInt32 - } - } else if ws.enableWriteThrottle { - ws.writeThrottleLimit = 1024 - } - return true - }) - return wr, ok -} - -func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) { - if maxSize == 0 { - return - } - if len(*list) == maxSize { - // Remove the oldest node, then shift left. - ws.removeNode((*list)[0]) - x := (*list)[1:] - copy(*list, x) - *list = (*list)[:len(x)] - } - *list = append(*list, n) -} - -func (ws *priorityWriteScheduler) removeNode(n *priorityNode) { - for n.kids != nil { - n.kids.setParent(n.parent) - } - n.setParent(nil) - delete(ws.nodes, n.id) -} diff --git a/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go b/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go new file mode 100644 index 000000000..4e33c29a2 --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go @@ -0,0 +1,450 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "fmt" + "math" + "sort" +) + +// RFC 7540, Section 5.3.5: the default weight is 16. +const priorityDefaultWeightRFC7540 = 15 // 16 = 15 + 1 + +// PriorityWriteSchedulerConfig configures a priorityWriteScheduler. +type PriorityWriteSchedulerConfig struct { + // MaxClosedNodesInTree controls the maximum number of closed streams to + // retain in the priority tree. Setting this to zero saves a small amount + // of memory at the cost of performance. + // + // See RFC 7540, Section 5.3.4: + // "It is possible for a stream to become closed while prioritization + // information ... is in transit. ... This potentially creates suboptimal + // prioritization, since the stream could be given a priority that is + // different from what is intended. To avoid these problems, an endpoint + // SHOULD retain stream prioritization state for a period after streams + // become closed. The longer state is retained, the lower the chance that + // streams are assigned incorrect or default priority values." + MaxClosedNodesInTree int + + // MaxIdleNodesInTree controls the maximum number of idle streams to + // retain in the priority tree. Setting this to zero saves a small amount + // of memory at the cost of performance. + // + // See RFC 7540, Section 5.3.4: + // Similarly, streams that are in the "idle" state can be assigned + // priority or become a parent of other streams. This allows for the + // creation of a grouping node in the dependency tree, which enables + // more flexible expressions of priority. Idle streams begin with a + // default priority (Section 5.3.5). + MaxIdleNodesInTree int + + // ThrottleOutOfOrderWrites enables write throttling to help ensure that + // data is delivered in priority order. This works around a race where + // stream B depends on stream A and both streams are about to call Write + // to queue DATA frames. If B wins the race, a naive scheduler would eagerly + // write as much data from B as possible, but this is suboptimal because A + // is a higher-priority stream. With throttling enabled, we write a small + // amount of data from B to minimize the amount of bandwidth that B can + // steal from A. + ThrottleOutOfOrderWrites bool +} + +// NewPriorityWriteScheduler constructs a WriteScheduler that schedules +// frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3. +// If cfg is nil, default options are used. +func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler { + if cfg == nil { + // For justification of these defaults, see: + // https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY + cfg = &PriorityWriteSchedulerConfig{ + MaxClosedNodesInTree: 10, + MaxIdleNodesInTree: 10, + ThrottleOutOfOrderWrites: false, + } + } + + ws := &priorityWriteSchedulerRFC7540{ + nodes: make(map[uint32]*priorityNodeRFC7540), + maxClosedNodesInTree: cfg.MaxClosedNodesInTree, + maxIdleNodesInTree: cfg.MaxIdleNodesInTree, + enableWriteThrottle: cfg.ThrottleOutOfOrderWrites, + } + ws.nodes[0] = &ws.root + if cfg.ThrottleOutOfOrderWrites { + ws.writeThrottleLimit = 1024 + } else { + ws.writeThrottleLimit = math.MaxInt32 + } + return ws +} + +type priorityNodeStateRFC7540 int + +const ( + priorityNodeOpenRFC7540 priorityNodeStateRFC7540 = iota + priorityNodeClosedRFC7540 + priorityNodeIdleRFC7540 +) + +// priorityNodeRFC7540 is a node in an HTTP/2 priority tree. +// Each node is associated with a single stream ID. +// See RFC 7540, Section 5.3. +type priorityNodeRFC7540 struct { + q writeQueue // queue of pending frames to write + id uint32 // id of the stream, or 0 for the root of the tree + weight uint8 // the actual weight is weight+1, so the value is in [1,256] + state priorityNodeStateRFC7540 // open | closed | idle + bytes int64 // number of bytes written by this node, or 0 if closed + subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree + + // These links form the priority tree. + parent *priorityNodeRFC7540 + kids *priorityNodeRFC7540 // start of the kids list + prev, next *priorityNodeRFC7540 // doubly-linked list of siblings +} + +func (n *priorityNodeRFC7540) setParent(parent *priorityNodeRFC7540) { + if n == parent { + panic("setParent to self") + } + if n.parent == parent { + return + } + // Unlink from current parent. + if parent := n.parent; parent != nil { + if n.prev == nil { + parent.kids = n.next + } else { + n.prev.next = n.next + } + if n.next != nil { + n.next.prev = n.prev + } + } + // Link to new parent. + // If parent=nil, remove n from the tree. + // Always insert at the head of parent.kids (this is assumed by walkReadyInOrder). + n.parent = parent + if parent == nil { + n.next = nil + n.prev = nil + } else { + n.next = parent.kids + n.prev = nil + if n.next != nil { + n.next.prev = n + } + parent.kids = n + } +} + +func (n *priorityNodeRFC7540) addBytes(b int64) { + n.bytes += b + for ; n != nil; n = n.parent { + n.subtreeBytes += b + } +} + +// walkReadyInOrder iterates over the tree in priority order, calling f for each node +// with a non-empty write queue. When f returns true, this function returns true and the +// walk halts. tmp is used as scratch space for sorting. +// +// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true +// if any ancestor p of n is still open (ignoring the root node). +func (n *priorityNodeRFC7540) walkReadyInOrder(openParent bool, tmp *[]*priorityNodeRFC7540, f func(*priorityNodeRFC7540, bool) bool) bool { + if !n.q.empty() && f(n, openParent) { + return true + } + if n.kids == nil { + return false + } + + // Don't consider the root "open" when updating openParent since + // we can't send data frames on the root stream (only control frames). + if n.id != 0 { + openParent = openParent || (n.state == priorityNodeOpenRFC7540) + } + + // Common case: only one kid or all kids have the same weight. + // Some clients don't use weights; other clients (like web browsers) + // use mostly-linear priority trees. + w := n.kids.weight + needSort := false + for k := n.kids.next; k != nil; k = k.next { + if k.weight != w { + needSort = true + break + } + } + if !needSort { + for k := n.kids; k != nil; k = k.next { + if k.walkReadyInOrder(openParent, tmp, f) { + return true + } + } + return false + } + + // Uncommon case: sort the child nodes. We remove the kids from the parent, + // then re-insert after sorting so we can reuse tmp for future sort calls. + *tmp = (*tmp)[:0] + for n.kids != nil { + *tmp = append(*tmp, n.kids) + n.kids.setParent(nil) + } + sort.Sort(sortPriorityNodeSiblingsRFC7540(*tmp)) + for i := len(*tmp) - 1; i >= 0; i-- { + (*tmp)[i].setParent(n) // setParent inserts at the head of n.kids + } + for k := n.kids; k != nil; k = k.next { + if k.walkReadyInOrder(openParent, tmp, f) { + return true + } + } + return false +} + +type sortPriorityNodeSiblingsRFC7540 []*priorityNodeRFC7540 + +func (z sortPriorityNodeSiblingsRFC7540) Len() int { return len(z) } +func (z sortPriorityNodeSiblingsRFC7540) Swap(i, k int) { z[i], z[k] = z[k], z[i] } +func (z sortPriorityNodeSiblingsRFC7540) Less(i, k int) bool { + // Prefer the subtree that has sent fewer bytes relative to its weight. + // See sections 5.3.2 and 5.3.4. + wi, bi := float64(z[i].weight)+1, float64(z[i].subtreeBytes) + wk, bk := float64(z[k].weight)+1, float64(z[k].subtreeBytes) + if bi == 0 && bk == 0 { + return wi >= wk + } + if bk == 0 { + return false + } + return bi/bk <= wi/wk +} + +type priorityWriteSchedulerRFC7540 struct { + // root is the root of the priority tree, where root.id = 0. + // The root queues control frames that are not associated with any stream. + root priorityNodeRFC7540 + + // nodes maps stream ids to priority tree nodes. + nodes map[uint32]*priorityNodeRFC7540 + + // maxID is the maximum stream id in nodes. + maxID uint32 + + // lists of nodes that have been closed or are idle, but are kept in + // the tree for improved prioritization. When the lengths exceed either + // maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded. + closedNodes, idleNodes []*priorityNodeRFC7540 + + // From the config. + maxClosedNodesInTree int + maxIdleNodesInTree int + writeThrottleLimit int32 + enableWriteThrottle bool + + // tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations. + tmp []*priorityNodeRFC7540 + + // pool of empty queues for reuse. + queuePool writeQueuePool +} + +func (ws *priorityWriteSchedulerRFC7540) OpenStream(streamID uint32, options OpenStreamOptions) { + // The stream may be currently idle but cannot be opened or closed. + if curr := ws.nodes[streamID]; curr != nil { + if curr.state != priorityNodeIdleRFC7540 { + panic(fmt.Sprintf("stream %d already opened", streamID)) + } + curr.state = priorityNodeOpenRFC7540 + return + } + + // RFC 7540, Section 5.3.5: + // "All streams are initially assigned a non-exclusive dependency on stream 0x0. + // Pushed streams initially depend on their associated stream. In both cases, + // streams are assigned a default weight of 16." + parent := ws.nodes[options.PusherID] + if parent == nil { + parent = &ws.root + } + n := &priorityNodeRFC7540{ + q: *ws.queuePool.get(), + id: streamID, + weight: priorityDefaultWeightRFC7540, + state: priorityNodeOpenRFC7540, + } + n.setParent(parent) + ws.nodes[streamID] = n + if streamID > ws.maxID { + ws.maxID = streamID + } +} + +func (ws *priorityWriteSchedulerRFC7540) CloseStream(streamID uint32) { + if streamID == 0 { + panic("violation of WriteScheduler interface: cannot close stream 0") + } + if ws.nodes[streamID] == nil { + panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID)) + } + if ws.nodes[streamID].state != priorityNodeOpenRFC7540 { + panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID)) + } + + n := ws.nodes[streamID] + n.state = priorityNodeClosedRFC7540 + n.addBytes(-n.bytes) + + q := n.q + ws.queuePool.put(&q) + if ws.maxClosedNodesInTree > 0 { + ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n) + } else { + ws.removeNode(n) + } +} + +func (ws *priorityWriteSchedulerRFC7540) AdjustStream(streamID uint32, priority PriorityParam) { + if streamID == 0 { + panic("adjustPriority on root") + } + + // If streamID does not exist, there are two cases: + // - A closed stream that has been removed (this will have ID <= maxID) + // - An idle stream that is being used for "grouping" (this will have ID > maxID) + n := ws.nodes[streamID] + if n == nil { + if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 { + return + } + ws.maxID = streamID + n = &priorityNodeRFC7540{ + q: *ws.queuePool.get(), + id: streamID, + weight: priorityDefaultWeightRFC7540, + state: priorityNodeIdleRFC7540, + } + n.setParent(&ws.root) + ws.nodes[streamID] = n + ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n) + } + + // Section 5.3.1: A dependency on a stream that is not currently in the tree + // results in that stream being given a default priority (Section 5.3.5). + parent := ws.nodes[priority.StreamDep] + if parent == nil { + n.setParent(&ws.root) + n.weight = priorityDefaultWeightRFC7540 + return + } + + // Ignore if the client tries to make a node its own parent. + if n == parent { + return + } + + // Section 5.3.3: + // "If a stream is made dependent on one of its own dependencies, the + // formerly dependent stream is first moved to be dependent on the + // reprioritized stream's previous parent. The moved dependency retains + // its weight." + // + // That is: if parent depends on n, move parent to depend on n.parent. + for x := parent.parent; x != nil; x = x.parent { + if x == n { + parent.setParent(n.parent) + break + } + } + + // Section 5.3.3: The exclusive flag causes the stream to become the sole + // dependency of its parent stream, causing other dependencies to become + // dependent on the exclusive stream. + if priority.Exclusive { + k := parent.kids + for k != nil { + next := k.next + if k != n { + k.setParent(n) + } + k = next + } + } + + n.setParent(parent) + n.weight = priority.Weight +} + +func (ws *priorityWriteSchedulerRFC7540) Push(wr FrameWriteRequest) { + var n *priorityNodeRFC7540 + if wr.isControl() { + n = &ws.root + } else { + id := wr.StreamID() + n = ws.nodes[id] + if n == nil { + // id is an idle or closed stream. wr should not be a HEADERS or + // DATA frame. In other case, we push wr onto the root, rather + // than creating a new priorityNode. + if wr.DataSize() > 0 { + panic("add DATA on non-open stream") + } + n = &ws.root + } + } + n.q.push(wr) +} + +func (ws *priorityWriteSchedulerRFC7540) Pop() (wr FrameWriteRequest, ok bool) { + ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNodeRFC7540, openParent bool) bool { + limit := int32(math.MaxInt32) + if openParent { + limit = ws.writeThrottleLimit + } + wr, ok = n.q.consume(limit) + if !ok { + return false + } + n.addBytes(int64(wr.DataSize())) + // If B depends on A and B continuously has data available but A + // does not, gradually increase the throttling limit to allow B to + // steal more and more bandwidth from A. + if openParent { + ws.writeThrottleLimit += 1024 + if ws.writeThrottleLimit < 0 { + ws.writeThrottleLimit = math.MaxInt32 + } + } else if ws.enableWriteThrottle { + ws.writeThrottleLimit = 1024 + } + return true + }) + return wr, ok +} + +func (ws *priorityWriteSchedulerRFC7540) addClosedOrIdleNode(list *[]*priorityNodeRFC7540, maxSize int, n *priorityNodeRFC7540) { + if maxSize == 0 { + return + } + if len(*list) == maxSize { + // Remove the oldest node, then shift left. + ws.removeNode((*list)[0]) + x := (*list)[1:] + copy(*list, x) + *list = (*list)[:len(x)] + } + *list = append(*list, n) +} + +func (ws *priorityWriteSchedulerRFC7540) removeNode(n *priorityNodeRFC7540) { + for n.kids != nil { + n.kids.setParent(n.parent) + } + n.setParent(nil) + delete(ws.nodes, n.id) +} diff --git a/vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go b/vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go new file mode 100644 index 000000000..cb4cadc32 --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go @@ -0,0 +1,209 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "fmt" + "math" +) + +type streamMetadata struct { + location *writeQueue + priority PriorityParam +} + +type priorityWriteSchedulerRFC9218 struct { + // control contains control frames (SETTINGS, PING, etc.). + control writeQueue + + // heads contain the head of a circular list of streams. + // We put these heads within a nested array that represents urgency and + // incremental, as defined in + // https://www.rfc-editor.org/rfc/rfc9218.html#name-priority-parameters. + // 8 represents u=0 up to u=7, and 2 represents i=false and i=true. + heads [8][2]*writeQueue + + // streams contains a mapping between each stream ID and their metadata, so + // we can quickly locate them when needing to, for example, adjust their + // priority. + streams map[uint32]streamMetadata + + // queuePool are empty queues for reuse. + queuePool writeQueuePool + + // prioritizeIncremental is used to determine whether we should prioritize + // incremental streams or not, when urgency is the same in a given Pop() + // call. + prioritizeIncremental bool +} + +func newPriorityWriteSchedulerRFC9218() WriteScheduler { + ws := &priorityWriteSchedulerRFC9218{ + streams: make(map[uint32]streamMetadata), + } + return ws +} + +func (ws *priorityWriteSchedulerRFC9218) OpenStream(streamID uint32, opt OpenStreamOptions) { + if ws.streams[streamID].location != nil { + panic(fmt.Errorf("stream %d already opened", streamID)) + } + q := ws.queuePool.get() + ws.streams[streamID] = streamMetadata{ + location: q, + priority: opt.priority, + } + + u, i := opt.priority.urgency, opt.priority.incremental + if ws.heads[u][i] == nil { + ws.heads[u][i] = q + q.next = q + q.prev = q + } else { + // Queues are stored in a ring. + // Insert the new stream before ws.head, putting it at the end of the list. + q.prev = ws.heads[u][i].prev + q.next = ws.heads[u][i] + q.prev.next = q + q.next.prev = q + } +} + +func (ws *priorityWriteSchedulerRFC9218) CloseStream(streamID uint32) { + metadata := ws.streams[streamID] + q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental + if q == nil { + return + } + if q.next == q { + // This was the only open stream. + ws.heads[u][i] = nil + } else { + q.prev.next = q.next + q.next.prev = q.prev + if ws.heads[u][i] == q { + ws.heads[u][i] = q.next + } + } + delete(ws.streams, streamID) + ws.queuePool.put(q) +} + +func (ws *priorityWriteSchedulerRFC9218) AdjustStream(streamID uint32, priority PriorityParam) { + metadata := ws.streams[streamID] + q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental + if q == nil { + return + } + + // Remove stream from current location. + if q.next == q { + // This was the only open stream. + ws.heads[u][i] = nil + } else { + q.prev.next = q.next + q.next.prev = q.prev + if ws.heads[u][i] == q { + ws.heads[u][i] = q.next + } + } + + // Insert stream to the new queue. + u, i = priority.urgency, priority.incremental + if ws.heads[u][i] == nil { + ws.heads[u][i] = q + q.next = q + q.prev = q + } else { + // Queues are stored in a ring. + // Insert the new stream before ws.head, putting it at the end of the list. + q.prev = ws.heads[u][i].prev + q.next = ws.heads[u][i] + q.prev.next = q + q.next.prev = q + } + + // Update the metadata. + ws.streams[streamID] = streamMetadata{ + location: q, + priority: priority, + } +} + +func (ws *priorityWriteSchedulerRFC9218) Push(wr FrameWriteRequest) { + if wr.isControl() { + ws.control.push(wr) + return + } + q := ws.streams[wr.StreamID()].location + if q == nil { + // This is a closed stream. + // wr should not be a HEADERS or DATA frame. + // We push the request onto the control queue. + if wr.DataSize() > 0 { + panic("add DATA on non-open stream") + } + ws.control.push(wr) + return + } + q.push(wr) +} + +func (ws *priorityWriteSchedulerRFC9218) Pop() (FrameWriteRequest, bool) { + // Control and RST_STREAM frames first. + if !ws.control.empty() { + return ws.control.shift(), true + } + + // On the next Pop(), we want to prioritize incremental if we prioritized + // non-incremental request of the same urgency this time. Vice-versa. + // i.e. when there are incremental and non-incremental requests at the same + // priority, we give 50% of our bandwidth to the incremental ones in + // aggregate and 50% to the first non-incremental one (since + // non-incremental streams do not use round-robin writes). + ws.prioritizeIncremental = !ws.prioritizeIncremental + + // Always prioritize lowest u (i.e. highest urgency level). + for u := range ws.heads { + for i := range ws.heads[u] { + // When we want to prioritize incremental, we try to pop i=true + // first before i=false when u is the same. + if ws.prioritizeIncremental { + i = (i + 1) % 2 + } + q := ws.heads[u][i] + if q == nil { + continue + } + for { + if wr, ok := q.consume(math.MaxInt32); ok { + if i == 1 { + // For incremental streams, we update head to q.next so + // we can round-robin between multiple streams that can + // immediately benefit from partial writes. + ws.heads[u][i] = q.next + } else { + // For non-incremental streams, we try to finish one to + // completion rather than doing round-robin. However, + // we update head here so that if q.consume() is !ok + // (e.g. the stream has no more frame to consume), head + // is updated to the next q that has frames to consume + // on future iterations. This way, we do not prioritize + // writing to unavailable stream on next Pop() calls, + // preventing head-of-line blocking. + ws.heads[u][i] = q + } + return wr, true + } + q = q.next + if q == ws.heads[u][i] { + break + } + } + + } + } + return FrameWriteRequest{}, false +} diff --git a/vendor/golang.org/x/net/http2/writesched_roundrobin.go b/vendor/golang.org/x/net/http2/writesched_roundrobin.go index 54fe86322..737cff9ec 100644 --- a/vendor/golang.org/x/net/http2/writesched_roundrobin.go +++ b/vendor/golang.org/x/net/http2/writesched_roundrobin.go @@ -25,7 +25,7 @@ type roundRobinWriteScheduler struct { } // newRoundRobinWriteScheduler constructs a new write scheduler. -// The round robin scheduler priorizes control frames +// The round robin scheduler prioritizes control frames // like SETTINGS and PING over DATA frames. // When there are no control frames to send, it performs a round-robin // selection from the ready streams. diff --git a/vendor/golang.org/x/net/internal/httpcommon/request.go b/vendor/golang.org/x/net/internal/httpcommon/request.go index 4b7055317..1e10f89eb 100644 --- a/vendor/golang.org/x/net/internal/httpcommon/request.go +++ b/vendor/golang.org/x/net/internal/httpcommon/request.go @@ -51,7 +51,7 @@ type EncodeHeadersParam struct { DefaultUserAgent string } -// EncodeHeadersParam is the result of EncodeHeaders. +// EncodeHeadersResult is the result of EncodeHeaders. type EncodeHeadersResult struct { HasBody bool HasTrailers bool @@ -399,7 +399,7 @@ type ServerRequestResult struct { // If the request should be rejected, this is a short string suitable for passing // to the http2 package's CountError function. - // It might be a bit odd to return errors this way rather than returing an error, + // It might be a bit odd to return errors this way rather than returning an error, // but this ensures we don't forget to include a CountError reason. InvalidReason string } diff --git a/vendor/golang.org/x/net/internal/socks/socks.go b/vendor/golang.org/x/net/internal/socks/socks.go index 84fcc32b6..8eedb84ce 100644 --- a/vendor/golang.org/x/net/internal/socks/socks.go +++ b/vendor/golang.org/x/net/internal/socks/socks.go @@ -297,7 +297,7 @@ func (up *UsernamePassword) Authenticate(ctx context.Context, rw io.ReadWriter, b = append(b, up.Username...) b = append(b, byte(len(up.Password))) b = append(b, up.Password...) - // TODO(mikio): handle IO deadlines and cancelation if + // TODO(mikio): handle IO deadlines and cancellation if // necessary if _, err := rw.Write(b); err != nil { return err diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go index c646a6952..3aaffdd1f 100644 --- a/vendor/golang.org/x/net/trace/events.go +++ b/vendor/golang.org/x/net/trace/events.go @@ -508,7 +508,7 @@ const eventsHTML = ` {{$el.When}} {{$el.ElapsedTime}} - {{$el.Title}} + {{$el.Title}} {{if $.Expanded}} diff --git a/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go b/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go new file mode 100644 index 000000000..3ee122d1e --- /dev/null +++ b/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go @@ -0,0 +1,124 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package clientcredentials implements the OAuth2.0 "client credentials" token flow, +// also known as "two-legged OAuth 2.0". +// +// This should be used when the client is acting on its own behalf or when the client +// is the resource owner. It may also be used when requesting access to protected +// resources based on an authorization previously arranged with the authorization +// server. +// +// See https://tools.ietf.org/html/rfc6749#section-4.4 +package clientcredentials // import "golang.org/x/oauth2/clientcredentials" + +import ( + "context" + "fmt" + "net/http" + "net/url" + "strings" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" +) + +// Config describes a 2-legged OAuth2 flow, with both the +// client application information and the server's endpoint URLs. +type Config struct { + // ClientID is the application's ID. + ClientID string + + // ClientSecret is the application's secret. + ClientSecret string + + // TokenURL is the resource server's token endpoint + // URL. This is a constant specific to each server. + TokenURL string + + // Scopes specifies optional requested permissions. + Scopes []string + + // EndpointParams specifies additional parameters for requests to the token endpoint. + EndpointParams url.Values + + // AuthStyle optionally specifies how the endpoint wants the + // client ID & client secret sent. The zero value means to + // auto-detect. + AuthStyle oauth2.AuthStyle + + // authStyleCache caches which auth style to use when Endpoint.AuthStyle is + // the zero value (AuthStyleAutoDetect). + authStyleCache internal.LazyAuthStyleCache +} + +// Token uses client credentials to retrieve a token. +// +// The provided context optionally controls which HTTP client is used. See the [oauth2.HTTPClient] variable. +func (c *Config) Token(ctx context.Context) (*oauth2.Token, error) { + return c.TokenSource(ctx).Token() +} + +// Client returns an HTTP client using the provided token. +// The token will auto-refresh as necessary. +// +// The provided context optionally controls which HTTP client +// is returned. See the [oauth2.HTTPClient] variable. +// +// The returned [http.Client] and its Transport should not be modified. +func (c *Config) Client(ctx context.Context) *http.Client { + return oauth2.NewClient(ctx, c.TokenSource(ctx)) +} + +// TokenSource returns a [oauth2.TokenSource] that returns t until t expires, +// automatically refreshing it as necessary using the provided context and the +// client ID and client secret. +// +// Most users will use [Config.Client] instead. +func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource { + source := &tokenSource{ + ctx: ctx, + conf: c, + } + return oauth2.ReuseTokenSource(nil, source) +} + +type tokenSource struct { + ctx context.Context + conf *Config +} + +// Token refreshes the token by using a new client credentials request. +// tokens received this way do not include a refresh token +func (c *tokenSource) Token() (*oauth2.Token, error) { + v := url.Values{ + "grant_type": {"client_credentials"}, + } + if len(c.conf.Scopes) > 0 { + v.Set("scope", strings.Join(c.conf.Scopes, " ")) + } + for k, p := range c.conf.EndpointParams { + // Allow grant_type to be overridden to allow interoperability with + // non-compliant implementations. + if _, ok := v[k]; ok && k != "grant_type" { + return nil, fmt.Errorf("oauth2: cannot overwrite parameter %q", k) + } + v[k] = p + } + + tk, err := internal.RetrieveToken(c.ctx, c.conf.ClientID, c.conf.ClientSecret, c.conf.TokenURL, v, internal.AuthStyle(c.conf.AuthStyle), c.conf.authStyleCache.Get()) + if err != nil { + if rErr, ok := err.(*internal.RetrieveError); ok { + return nil, (*oauth2.RetrieveError)(rErr) + } + return nil, err + } + t := &oauth2.Token{ + AccessToken: tk.AccessToken, + TokenType: tk.TokenType, + RefreshToken: tk.RefreshToken, + Expiry: tk.Expiry, + } + return t.WithExtra(tk.Raw), nil +} diff --git a/vendor/golang.org/x/oauth2/deviceauth.go b/vendor/golang.org/x/oauth2/deviceauth.go index e99c92f39..e783a9437 100644 --- a/vendor/golang.org/x/oauth2/deviceauth.go +++ b/vendor/golang.org/x/oauth2/deviceauth.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "io" + "mime" "net/http" "net/url" "strings" @@ -116,10 +117,38 @@ func retrieveDeviceAuth(ctx context.Context, c *Config, v url.Values) (*DeviceAu return nil, fmt.Errorf("oauth2: cannot auth device: %v", err) } if code := r.StatusCode; code < 200 || code > 299 { - return nil, &RetrieveError{ + retrieveError := &RetrieveError{ Response: r, Body: body, } + + content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) + switch content { + case "application/x-www-form-urlencoded", "text/plain": + // some endpoints return a query string + vals, err := url.ParseQuery(string(body)) + if err != nil { + return nil, retrieveError + } + retrieveError.ErrorCode = vals.Get("error") + retrieveError.ErrorDescription = vals.Get("error_description") + retrieveError.ErrorURI = vals.Get("error_uri") + default: + var tj struct { + // https://datatracker.ietf.org/doc/html/rfc6749#section-5.2 + ErrorCode string `json:"error"` + ErrorDescription string `json:"error_description"` + ErrorURI string `json:"error_uri"` + } + if json.Unmarshal(body, &tj) != nil { + return nil, retrieveError + } + retrieveError.ErrorCode = tj.ErrorCode + retrieveError.ErrorDescription = tj.ErrorDescription + retrieveError.ErrorURI = tj.ErrorURI + } + + return nil, retrieveError } da := &DeviceAuthResponse{} diff --git a/vendor/golang.org/x/oauth2/internal/doc.go b/vendor/golang.org/x/oauth2/internal/doc.go index 03265e888..8c7c475f2 100644 --- a/vendor/golang.org/x/oauth2/internal/doc.go +++ b/vendor/golang.org/x/oauth2/internal/doc.go @@ -2,5 +2,5 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package internal contains support packages for oauth2 package. +// Package internal contains support packages for [golang.org/x/oauth2]. package internal diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go index 14989beaf..71ea6ad1f 100644 --- a/vendor/golang.org/x/oauth2/internal/oauth2.go +++ b/vendor/golang.org/x/oauth2/internal/oauth2.go @@ -13,7 +13,7 @@ import ( ) // ParseKey converts the binary contents of a private key file -// to an *rsa.PrivateKey. It detects whether the private key is in a +// to an [*rsa.PrivateKey]. It detects whether the private key is in a // PEM container or not. If so, it extracts the private key // from PEM container before conversion. It only supports PEM // containers with no passphrase. diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go index e83ddeef0..8389f2462 100644 --- a/vendor/golang.org/x/oauth2/internal/token.go +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -10,7 +10,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "math" "mime" "net/http" @@ -26,9 +25,9 @@ import ( // the requests to access protected resources on the OAuth 2.0 // provider's backend. // -// This type is a mirror of oauth2.Token and exists to break +// This type is a mirror of [golang.org/x/oauth2.Token] and exists to break // an otherwise-circular dependency. Other internal packages -// should convert this Token into an oauth2.Token before use. +// should convert this Token into an [golang.org/x/oauth2.Token] before use. type Token struct { // AccessToken is the token that authorizes and authenticates // the requests. @@ -50,9 +49,16 @@ type Token struct { // mechanisms for that TokenSource will not be used. Expiry time.Time + // ExpiresIn is the OAuth2 wire format "expires_in" field, + // which specifies how many seconds later the token expires, + // relative to an unknown time base approximately around "now". + // It is the application's responsibility to populate + // `Expiry` from `ExpiresIn` when required. + ExpiresIn int64 `json:"expires_in,omitempty"` + // Raw optionally contains extra metadata from the server // when updating a token. - Raw interface{} + Raw any } // tokenJSON is the struct representing the HTTP response from OAuth2 @@ -99,14 +105,6 @@ func (e *expirationTime) UnmarshalJSON(b []byte) error { return nil } -// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op. -// -// Deprecated: this function no longer does anything. Caller code that -// wants to avoid potential extra HTTP requests made during -// auto-probing of the provider's auth style should set -// Endpoint.AuthStyle. -func RegisterBrokenAuthHeaderProvider(tokenURL string) {} - // AuthStyle is a copy of the golang.org/x/oauth2 package's AuthStyle type. type AuthStyle int @@ -143,6 +141,11 @@ func (lc *LazyAuthStyleCache) Get() *AuthStyleCache { return c } +type authStyleCacheKey struct { + url string + clientID string +} + // AuthStyleCache is the set of tokenURLs we've successfully used via // RetrieveToken and which style auth we ended up using. // It's called a cache, but it doesn't (yet?) shrink. It's expected that @@ -150,26 +153,26 @@ func (lc *LazyAuthStyleCache) Get() *AuthStyleCache { // small. type AuthStyleCache struct { mu sync.Mutex - m map[string]AuthStyle // keyed by tokenURL + m map[authStyleCacheKey]AuthStyle } // lookupAuthStyle reports which auth style we last used with tokenURL // when calling RetrieveToken and whether we have ever done so. -func (c *AuthStyleCache) lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) { +func (c *AuthStyleCache) lookupAuthStyle(tokenURL, clientID string) (style AuthStyle, ok bool) { c.mu.Lock() defer c.mu.Unlock() - style, ok = c.m[tokenURL] + style, ok = c.m[authStyleCacheKey{tokenURL, clientID}] return } // setAuthStyle adds an entry to authStyleCache, documented above. -func (c *AuthStyleCache) setAuthStyle(tokenURL string, v AuthStyle) { +func (c *AuthStyleCache) setAuthStyle(tokenURL, clientID string, v AuthStyle) { c.mu.Lock() defer c.mu.Unlock() if c.m == nil { - c.m = make(map[string]AuthStyle) + c.m = make(map[authStyleCacheKey]AuthStyle) } - c.m[tokenURL] = v + c.m[authStyleCacheKey{tokenURL, clientID}] = v } // newTokenRequest returns a new *http.Request to retrieve a new token @@ -210,9 +213,9 @@ func cloneURLValues(v url.Values) url.Values { } func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle, styleCache *AuthStyleCache) (*Token, error) { - needsAuthStyleProbe := authStyle == 0 + needsAuthStyleProbe := authStyle == AuthStyleUnknown if needsAuthStyleProbe { - if style, ok := styleCache.lookupAuthStyle(tokenURL); ok { + if style, ok := styleCache.lookupAuthStyle(tokenURL, clientID); ok { authStyle = style needsAuthStyleProbe = false } else { @@ -242,7 +245,7 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, token, err = doTokenRoundTrip(ctx, req) } if needsAuthStyleProbe && err == nil { - styleCache.setAuthStyle(tokenURL, authStyle) + styleCache.setAuthStyle(tokenURL, clientID, authStyle) } // Don't overwrite `RefreshToken` with an empty value // if this was a token refreshing request. @@ -257,7 +260,7 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { if err != nil { return nil, err } - body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20)) + body, err := io.ReadAll(io.LimitReader(r.Body, 1<<20)) r.Body.Close() if err != nil { return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) @@ -312,7 +315,8 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { TokenType: tj.TokenType, RefreshToken: tj.RefreshToken, Expiry: tj.expiry(), - Raw: make(map[string]interface{}), + ExpiresIn: int64(tj.ExpiresIn), + Raw: make(map[string]any), } json.Unmarshal(body, &token.Raw) // no error checks for optional fields } diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go index b9db01ddf..afc0aeb27 100644 --- a/vendor/golang.org/x/oauth2/internal/transport.go +++ b/vendor/golang.org/x/oauth2/internal/transport.go @@ -9,8 +9,8 @@ import ( "net/http" ) -// HTTPClient is the context key to use with golang.org/x/net/context's -// WithValue function to associate an *http.Client value with a context. +// HTTPClient is the context key to use with [context.WithValue] +// to associate an [*http.Client] value with a context. var HTTPClient ContextKey // ContextKey is just an empty struct. It exists so HTTPClient can be diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index 74f052aa9..5c527d31f 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -9,7 +9,6 @@ package oauth2 // import "golang.org/x/oauth2" import ( - "bytes" "context" "errors" "net/http" @@ -22,9 +21,9 @@ import ( ) // NoContext is the default context you should supply if not using -// your own context.Context (see https://golang.org/x/net/context). +// your own [context.Context]. // -// Deprecated: Use context.Background() or context.TODO() instead. +// Deprecated: Use [context.Background] or [context.TODO] instead. var NoContext = context.TODO() // RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op. @@ -37,8 +36,8 @@ func RegisterBrokenAuthHeaderProvider(tokenURL string) {} // Config describes a typical 3-legged OAuth2 flow, with both the // client application information and the server's endpoint URLs. -// For the client credentials 2-legged OAuth2 flow, see the clientcredentials -// package (https://golang.org/x/oauth2/clientcredentials). +// For the client credentials 2-legged OAuth2 flow, see the +// [golang.org/x/oauth2/clientcredentials] package. type Config struct { // ClientID is the application's ID. ClientID string @@ -46,7 +45,7 @@ type Config struct { // ClientSecret is the application's secret. ClientSecret string - // Endpoint contains the resource server's token endpoint + // Endpoint contains the authorization server's token endpoint // URLs. These are constants specific to each server and are // often available via site-specific packages, such as // google.Endpoint or github.Endpoint. @@ -99,7 +98,7 @@ const ( // in the POST body as application/x-www-form-urlencoded parameters. AuthStyleInParams AuthStyle = 1 - // AuthStyleInHeader sends the client_id and client_password + // AuthStyleInHeader sends the client_id and client_secret // using HTTP Basic Authorization. This is an optional style // described in the OAuth2 RFC 6749 section 2.3.1. AuthStyleInHeader AuthStyle = 2 @@ -135,7 +134,7 @@ type setParam struct{ k, v string } func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) } -// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters +// SetAuthURLParam builds an [AuthCodeOption] which passes key/value parameters // to a provider's authorization endpoint. func SetAuthURLParam(key, value string) AuthCodeOption { return setParam{key, value} @@ -148,8 +147,8 @@ func SetAuthURLParam(key, value string) AuthCodeOption { // request and callback. The authorization server includes this value when // redirecting the user agent back to the client. // -// Opts may include AccessTypeOnline or AccessTypeOffline, as well -// as ApprovalForce. +// Opts may include [AccessTypeOnline] or [AccessTypeOffline], as well +// as [ApprovalForce]. // // To protect against CSRF attacks, opts should include a PKCE challenge // (S256ChallengeOption). Not all servers support PKCE. An alternative is to @@ -158,7 +157,7 @@ func SetAuthURLParam(key, value string) AuthCodeOption { // PKCE), https://www.oauth.com/oauth2-servers/pkce/ and // https://www.ietf.org/archive/id/draft-ietf-oauth-v2-1-09.html#name-cross-site-request-forgery (describing both approaches) func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { - var buf bytes.Buffer + var buf strings.Builder buf.WriteString(c.Endpoint.AuthURL) v := url.Values{ "response_type": {"code"}, @@ -194,7 +193,7 @@ func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { // and when other authorization grant types are not available." // See https://tools.ietf.org/html/rfc6749#section-4.3 for more info. // -// The provided context optionally controls which HTTP client is used. See the HTTPClient variable. +// The provided context optionally controls which HTTP client is used. See the [HTTPClient] variable. func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) { v := url.Values{ "grant_type": {"password"}, @@ -212,10 +211,10 @@ func (c *Config) PasswordCredentialsToken(ctx context.Context, username, passwor // It is used after a resource provider redirects the user back // to the Redirect URI (the URL obtained from AuthCodeURL). // -// The provided context optionally controls which HTTP client is used. See the HTTPClient variable. +// The provided context optionally controls which HTTP client is used. See the [HTTPClient] variable. // -// The code will be in the *http.Request.FormValue("code"). Before -// calling Exchange, be sure to validate FormValue("state") if you are +// The code will be in the [http.Request.FormValue]("code"). Before +// calling Exchange, be sure to validate [http.Request.FormValue]("state") if you are // using it to protect against CSRF attacks. // // If using PKCE to protect against CSRF attacks, opts should include a @@ -242,10 +241,10 @@ func (c *Config) Client(ctx context.Context, t *Token) *http.Client { return NewClient(ctx, c.TokenSource(ctx, t)) } -// TokenSource returns a TokenSource that returns t until t expires, +// TokenSource returns a [TokenSource] that returns t until t expires, // automatically refreshing it as necessary using the provided context. // -// Most users will use Config.Client instead. +// Most users will use [Config.Client] instead. func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { tkr := &tokenRefresher{ ctx: ctx, @@ -260,7 +259,7 @@ func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { } } -// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token" +// tokenRefresher is a TokenSource that makes "grant_type=refresh_token" // HTTP requests to renew a token using a RefreshToken. type tokenRefresher struct { ctx context.Context // used to get HTTP requests @@ -288,7 +287,7 @@ func (tf *tokenRefresher) Token() (*Token, error) { if tf.refreshToken != tk.RefreshToken { tf.refreshToken = tk.RefreshToken } - return tk, err + return tk, nil } // reuseTokenSource is a TokenSource that holds a single token in memory @@ -305,8 +304,7 @@ type reuseTokenSource struct { } // Token returns the current token if it's still valid, else will -// refresh the current token (using r.Context for HTTP client -// information) and return the new one. +// refresh the current token and return the new one. func (s *reuseTokenSource) Token() (*Token, error) { s.mu.Lock() defer s.mu.Unlock() @@ -322,7 +320,7 @@ func (s *reuseTokenSource) Token() (*Token, error) { return t, nil } -// StaticTokenSource returns a TokenSource that always returns the same token. +// StaticTokenSource returns a [TokenSource] that always returns the same token. // Because the provided token t is never refreshed, StaticTokenSource is only // useful for tokens that never expire. func StaticTokenSource(t *Token) TokenSource { @@ -338,16 +336,16 @@ func (s staticTokenSource) Token() (*Token, error) { return s.t, nil } -// HTTPClient is the context key to use with golang.org/x/net/context's -// WithValue function to associate an *http.Client value with a context. +// HTTPClient is the context key to use with [context.WithValue] +// to associate a [*http.Client] value with a context. var HTTPClient internal.ContextKey -// NewClient creates an *http.Client from a Context and TokenSource. +// NewClient creates an [*http.Client] from a [context.Context] and [TokenSource]. // The returned client is not valid beyond the lifetime of the context. // -// Note that if a custom *http.Client is provided via the Context it +// Note that if a custom [*http.Client] is provided via the [context.Context] it // is used only for token acquisition and is not used to configure the -// *http.Client returned from NewClient. +// [*http.Client] returned from NewClient. // // As a special case, if src is nil, a non-OAuth2 client is returned // using the provided context. This exists to support related OAuth2 @@ -356,15 +354,19 @@ func NewClient(ctx context.Context, src TokenSource) *http.Client { if src == nil { return internal.ContextClient(ctx) } + cc := internal.ContextClient(ctx) return &http.Client{ Transport: &Transport{ - Base: internal.ContextClient(ctx).Transport, + Base: cc.Transport, Source: ReuseTokenSource(nil, src), }, + CheckRedirect: cc.CheckRedirect, + Jar: cc.Jar, + Timeout: cc.Timeout, } } -// ReuseTokenSource returns a TokenSource which repeatedly returns the +// ReuseTokenSource returns a [TokenSource] which repeatedly returns the // same token as long as it's valid, starting with t. // When its cached token is invalid, a new token is obtained from src. // @@ -372,10 +374,10 @@ func NewClient(ctx context.Context, src TokenSource) *http.Client { // (such as a file on disk) between runs of a program, rather than // obtaining new tokens unnecessarily. // -// The initial token t may be nil, in which case the TokenSource is +// The initial token t may be nil, in which case the [TokenSource] is // wrapped in a caching version if it isn't one already. This also // means it's always safe to wrap ReuseTokenSource around any other -// TokenSource without adverse effects. +// [TokenSource] without adverse effects. func ReuseTokenSource(t *Token, src TokenSource) TokenSource { // Don't wrap a reuseTokenSource in itself. That would work, // but cause an unnecessary number of mutex operations. @@ -393,8 +395,8 @@ func ReuseTokenSource(t *Token, src TokenSource) TokenSource { } } -// ReuseTokenSourceWithExpiry returns a TokenSource that acts in the same manner as the -// TokenSource returned by ReuseTokenSource, except the expiry buffer is +// ReuseTokenSourceWithExpiry returns a [TokenSource] that acts in the same manner as the +// [TokenSource] returned by [ReuseTokenSource], except the expiry buffer is // configurable. The expiration time of a token is calculated as // t.Expiry.Add(-earlyExpiry). func ReuseTokenSourceWithExpiry(t *Token, src TokenSource, earlyExpiry time.Duration) TokenSource { diff --git a/vendor/golang.org/x/oauth2/pkce.go b/vendor/golang.org/x/oauth2/pkce.go index 50593b6df..f99384f0f 100644 --- a/vendor/golang.org/x/oauth2/pkce.go +++ b/vendor/golang.org/x/oauth2/pkce.go @@ -1,6 +1,7 @@ // Copyright 2023 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. + package oauth2 import ( @@ -20,9 +21,9 @@ const ( // This follows recommendations in RFC 7636. // // A fresh verifier should be generated for each authorization. -// S256ChallengeOption(verifier) should then be passed to Config.AuthCodeURL -// (or Config.DeviceAccess) and VerifierOption(verifier) to Config.Exchange -// (or Config.DeviceAccessToken). +// The resulting verifier should be passed to [Config.AuthCodeURL] or [Config.DeviceAuth] +// with [S256ChallengeOption], and to [Config.Exchange] or [Config.DeviceAccessToken] +// with [VerifierOption]. func GenerateVerifier() string { // "RECOMMENDED that the output of a suitable random number generator be // used to create a 32-octet sequence. The octet sequence is then @@ -36,22 +37,22 @@ func GenerateVerifier() string { return base64.RawURLEncoding.EncodeToString(data) } -// VerifierOption returns a PKCE code verifier AuthCodeOption. It should be -// passed to Config.Exchange or Config.DeviceAccessToken only. +// VerifierOption returns a PKCE code verifier [AuthCodeOption]. It should only be +// passed to [Config.Exchange] or [Config.DeviceAccessToken]. func VerifierOption(verifier string) AuthCodeOption { return setParam{k: codeVerifierKey, v: verifier} } // S256ChallengeFromVerifier returns a PKCE code challenge derived from verifier with method S256. // -// Prefer to use S256ChallengeOption where possible. +// Prefer to use [S256ChallengeOption] where possible. func S256ChallengeFromVerifier(verifier string) string { sha := sha256.Sum256([]byte(verifier)) return base64.RawURLEncoding.EncodeToString(sha[:]) } -// S256ChallengeOption derives a PKCE code challenge derived from verifier with -// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAccess +// S256ChallengeOption derives a PKCE code challenge from the verifier with +// method S256. It should be passed to [Config.AuthCodeURL] or [Config.DeviceAuth] // only. func S256ChallengeOption(verifier string) AuthCodeOption { return challengeOption{ diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index 109997d77..e995eebb5 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -44,7 +44,7 @@ type Token struct { // Expiry is the optional expiration time of the access token. // - // If zero, TokenSource implementations will reuse the same + // If zero, [TokenSource] implementations will reuse the same // token forever and RefreshToken or equivalent // mechanisms for that TokenSource will not be used. Expiry time.Time `json:"expiry,omitempty"` @@ -58,7 +58,7 @@ type Token struct { // raw optionally contains extra metadata from the server // when updating a token. - raw interface{} + raw any // expiryDelta is used to calculate when a token is considered // expired, by subtracting from Expiry. If zero, defaultExpiryDelta @@ -86,16 +86,16 @@ func (t *Token) Type() string { // SetAuthHeader sets the Authorization header to r using the access // token in t. // -// This method is unnecessary when using Transport or an HTTP Client +// This method is unnecessary when using [Transport] or an HTTP Client // returned by this package. func (t *Token) SetAuthHeader(r *http.Request) { r.Header.Set("Authorization", t.Type()+" "+t.AccessToken) } -// WithExtra returns a new Token that's a clone of t, but using the +// WithExtra returns a new [Token] that's a clone of t, but using the // provided raw extra map. This is only intended for use by packages // implementing derivative OAuth2 flows. -func (t *Token) WithExtra(extra interface{}) *Token { +func (t *Token) WithExtra(extra any) *Token { t2 := new(Token) *t2 = *t t2.raw = extra @@ -103,10 +103,10 @@ func (t *Token) WithExtra(extra interface{}) *Token { } // Extra returns an extra field. -// Extra fields are key-value pairs returned by the server as a +// Extra fields are key-value pairs returned by the server as // part of the token retrieval response. -func (t *Token) Extra(key string) interface{} { - if raw, ok := t.raw.(map[string]interface{}); ok { +func (t *Token) Extra(key string) any { + if raw, ok := t.raw.(map[string]any); ok { return raw[key] } @@ -163,13 +163,14 @@ func tokenFromInternal(t *internal.Token) *Token { TokenType: t.TokenType, RefreshToken: t.RefreshToken, Expiry: t.Expiry, + ExpiresIn: t.ExpiresIn, raw: t.Raw, } } // retrieveToken takes a *Config and uses that to retrieve an *internal.Token. // This token is then mapped from *internal.Token into an *oauth2.Token which is returned along -// with an error.. +// with an error. func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle), c.authStyleCache.Get()) if err != nil { diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go index 90657915f..9922ec331 100644 --- a/vendor/golang.org/x/oauth2/transport.go +++ b/vendor/golang.org/x/oauth2/transport.go @@ -11,12 +11,12 @@ import ( "sync" ) -// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests, -// wrapping a base RoundTripper and adding an Authorization header -// with a token from the supplied Sources. +// Transport is an [http.RoundTripper] that makes OAuth 2.0 HTTP requests, +// wrapping a base [http.RoundTripper] and adding an Authorization header +// with a token from the supplied [TokenSource]. // // Transport is a low-level mechanism. Most code will use the -// higher-level Config.Client method instead. +// higher-level [Config.Client] method instead. type Transport struct { // Source supplies the token to add to outgoing requests' // Authorization headers. @@ -47,7 +47,7 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { return nil, err } - req2 := cloneRequest(req) // per RoundTripper contract + req2 := req.Clone(req.Context()) token.SetAuthHeader(req2) // req.Body is assumed to be closed by the base RoundTripper. @@ -58,7 +58,7 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { var cancelOnce sync.Once // CancelRequest does nothing. It used to be a legacy cancellation mechanism -// but now only it only logs on first use to warn that it's deprecated. +// but now only logs on first use to warn that it's deprecated. // // Deprecated: use contexts for cancellation instead. func (t *Transport) CancelRequest(req *http.Request) { @@ -73,17 +73,3 @@ func (t *Transport) base() http.RoundTripper { } return http.DefaultTransport } - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header, len(r.Header)) - for k, s := range r.Header { - r2.Header[k] = append([]string(nil), s...) - } - return r2 -} diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go index a4ea5d14f..f69fd7546 100644 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // Package errgroup provides synchronization, error propagation, and Context -// cancelation for groups of goroutines working on subtasks of a common task. +// cancellation for groups of goroutines working on subtasks of a common task. // // [errgroup.Group] is related to [sync.WaitGroup] but adds handling of tasks // returning errors. @@ -18,7 +18,7 @@ import ( type token struct{} // A Group is a collection of goroutines working on subtasks that are part of -// the same overall task. +// the same overall task. A Group should not be reused for different tasks. // // A zero Group is valid, has no limit on the number of active goroutines, // and does not cancel on error. @@ -61,11 +61,14 @@ func (g *Group) Wait() error { } // Go calls the given function in a new goroutine. +// +// The first call to Go must happen before a Wait. // It blocks until the new goroutine can be added without the number of -// active goroutines in the group exceeding the configured limit. +// goroutines in the group exceeding the configured limit. // -// The first call to return a non-nil error cancels the group's context, if the -// group was created by calling WithContext. The error will be returned by Wait. +// The first goroutine in the group that returns a non-nil error will +// cancel the associated Context, if any. The error will be returned +// by Wait. func (g *Group) Go(f func() error) { if g.sem != nil { g.sem <- token{} @@ -75,6 +78,18 @@ func (g *Group) Go(f func() error) { go func() { defer g.done() + // It is tempting to propagate panics from f() + // up to the goroutine that calls Wait, but + // it creates more problems than it solves: + // - it delays panics arbitrarily, + // making bugs harder to detect; + // - it turns f's panic stack into a mere value, + // hiding it from crash-monitoring tools; + // - it risks deadlocks that hide the panic entirely, + // if f's panic leaves the program in a state + // that prevents the Wait call from being reached. + // See #53757, #74275, #74304, #74306. + if err := f(); err != nil { g.errOnce.Do(func() { g.err = err @@ -129,8 +144,8 @@ func (g *Group) SetLimit(n int) { g.sem = nil return } - if len(g.sem) != 0 { - panic(fmt.Errorf("errgroup: modify limit while %v goroutines in the group are still active", len(g.sem))) + if active := len(g.sem); active != 0 { + panic(fmt.Errorf("errgroup: modify limit while %v goroutines in the group are still active", active)) } g.sem = make(chan token, n) } diff --git a/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go b/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go deleted file mode 100644 index 73687de74..000000000 --- a/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.5 - -package plan9 - -import "syscall" - -func fixwd() { - syscall.Fixwd() -} - -func Getwd() (wd string, err error) { - return syscall.Getwd() -} - -func Chdir(path string) error { - return syscall.Chdir(path) -} diff --git a/vendor/golang.org/x/sys/plan9/pwd_plan9.go b/vendor/golang.org/x/sys/plan9/pwd_plan9.go index fb9458218..7a76489db 100644 --- a/vendor/golang.org/x/sys/plan9/pwd_plan9.go +++ b/vendor/golang.org/x/sys/plan9/pwd_plan9.go @@ -2,22 +2,18 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !go1.5 - package plan9 +import "syscall" + func fixwd() { + syscall.Fixwd() } func Getwd() (wd string, err error) { - fd, err := open(".", O_RDONLY) - if err != nil { - return "", err - } - defer Close(fd) - return Fd2path(fd) + return syscall.Getwd() } func Chdir(path string) error { - return chdir(path) + return syscall.Chdir(path) } diff --git a/vendor/golang.org/x/sys/unix/affinity_linux.go b/vendor/golang.org/x/sys/unix/affinity_linux.go index 6e5c81acd..3ea470387 100644 --- a/vendor/golang.org/x/sys/unix/affinity_linux.go +++ b/vendor/golang.org/x/sys/unix/affinity_linux.go @@ -38,8 +38,15 @@ func SchedSetaffinity(pid int, set *CPUSet) error { // Zero clears the set s, so that it contains no CPUs. func (s *CPUSet) Zero() { + clear(s[:]) +} + +// Fill adds all possible CPU bits to the set s. On Linux, [SchedSetaffinity] +// will silently ignore any invalid CPU bits in [CPUSet] so this is an +// efficient way of resetting the CPU affinity of a process. +func (s *CPUSet) Fill() { for i := range s { - s[i] = 0 + s[i] = ^cpuMask(0) } } diff --git a/vendor/golang.org/x/sys/unix/fdset.go b/vendor/golang.org/x/sys/unix/fdset.go index 9e83d18cd..62ed12645 100644 --- a/vendor/golang.org/x/sys/unix/fdset.go +++ b/vendor/golang.org/x/sys/unix/fdset.go @@ -23,7 +23,5 @@ func (fds *FdSet) IsSet(fd int) bool { // Zero clears the set fds. func (fds *FdSet) Zero() { - for i := range fds.Bits { - fds.Bits[i] = 0 - } + clear(fds.Bits[:]) } diff --git a/vendor/golang.org/x/sys/unix/ifreq_linux.go b/vendor/golang.org/x/sys/unix/ifreq_linux.go index 848840ae4..309f5a2b0 100644 --- a/vendor/golang.org/x/sys/unix/ifreq_linux.go +++ b/vendor/golang.org/x/sys/unix/ifreq_linux.go @@ -111,9 +111,7 @@ func (ifr *Ifreq) SetUint32(v uint32) { // clear zeroes the ifreq's union field to prevent trailing garbage data from // being sent to the kernel if an ifreq is reused. func (ifr *Ifreq) clear() { - for i := range ifr.raw.Ifru { - ifr.raw.Ifru[i] = 0 - } + clear(ifr.raw.Ifru[:]) } // TODO(mdlayher): export as IfreqData? For now we can provide helpers such as diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh index e6f31d374..d0ed61191 100644 --- a/vendor/golang.org/x/sys/unix/mkall.sh +++ b/vendor/golang.org/x/sys/unix/mkall.sh @@ -49,6 +49,7 @@ esac if [[ "$GOOS" = "linux" ]]; then # Use the Docker-based build system # Files generated through docker (use $cmd so you can Ctl-C the build or run) + set -e $cmd docker build --tag generate:$GOOS $GOOS $cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")/.." && pwd):/build generate:$GOOS exit diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 6ab02b6c3..fd39be4ef 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -226,6 +226,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -255,6 +256,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -349,6 +351,9 @@ struct ltchars { #define _HIDIOCGRAWPHYS HIDIOCGRAWPHYS(_HIDIOCGRAWPHYS_LEN) #define _HIDIOCGRAWUNIQ HIDIOCGRAWUNIQ(_HIDIOCGRAWUNIQ_LEN) +// Renamed in v6.16, commit c6d732c38f93 ("net: ethtool: remove duplicate defines for family info") +#define ETHTOOL_FAMILY_NAME ETHTOOL_GENL_NAME +#define ETHTOOL_FAMILY_VERSION ETHTOOL_GENL_VERSION ' includes_NetBSD=' @@ -526,6 +531,7 @@ ccflags="$@" $2 ~ /^O[CNPFPL][A-Z]+[^_][A-Z]+$/ || $2 ~ /^(NL|CR|TAB|BS|VT|FF)DLY$/ || $2 ~ /^(NL|CR|TAB|BS|VT|FF)[0-9]$/ || + $2 ~ /^(DT|EI|ELF|EV|NN|NT|PF|SHF|SHN|SHT|STB|STT|VER)_/ || $2 ~ /^O?XTABS$/ || $2 ~ /^TC[IO](ON|OFF)$/ || $2 ~ /^IN_/ || @@ -608,7 +614,7 @@ ccflags="$@" $2 !~ /IOC_MAGIC/ && $2 ~ /^[A-Z][A-Z0-9_]+_MAGIC2?$/ || $2 ~ /^(VM|VMADDR)_/ || - $2 ~ /^IOCTL_VM_SOCKETS_/ || + $2 ~ /^(IOCTL_VM_SOCKETS_|IOCTL_MEI_)/ || $2 ~ /^(TASKSTATS|TS)_/ || $2 ~ /^CGROUPSTATS_/ || $2 ~ /^GENL_/ || diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 099867dee..7838ca5db 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -602,6 +602,95 @@ func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocI return } +const minIovec = 8 + +func Readv(fd int, iovs [][]byte) (n int, err error) { + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) + n, err = readv(fd, iovecs) + readvRacedetect(iovecs, n, err) + return n, err +} + +func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) { + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) + n, err = preadv(fd, iovecs, offset) + readvRacedetect(iovecs, n, err) + return n, err +} + +func Writev(fd int, iovs [][]byte) (n int, err error) { + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + n, err = writev(fd, iovecs) + writevRacedetect(iovecs, n) + return n, err +} + +func Pwritev(fd int, iovs [][]byte, offset int64) (n int, err error) { + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + n, err = pwritev(fd, iovecs, offset) + writevRacedetect(iovecs, n) + return n, err +} + +func appendBytes(vecs []Iovec, bs [][]byte) []Iovec { + for _, b := range bs { + var v Iovec + v.SetLen(len(b)) + if len(b) > 0 { + v.Base = &b[0] + } else { + v.Base = (*byte)(unsafe.Pointer(&_zero)) + } + vecs = append(vecs, v) + } + return vecs +} + +func writevRacedetect(iovecs []Iovec, n int) { + if !raceenabled { + return + } + for i := 0; n > 0 && i < len(iovecs); i++ { + m := int(iovecs[i].Len) + if m > n { + m = n + } + n -= m + if m > 0 { + raceReadRange(unsafe.Pointer(iovecs[i].Base), m) + } + } +} + +func readvRacedetect(iovecs []Iovec, n int, err error) { + if !raceenabled { + return + } + for i := 0; n > 0 && i < len(iovecs); i++ { + m := int(iovecs[i].Len) + if m > n { + m = n + } + n -= m + if m > 0 { + raceWriteRange(unsafe.Pointer(iovecs[i].Base), m) + } + } + if err == nil { + raceAcquire(unsafe.Pointer(&ioSync)) + } +} + //sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) @@ -705,3 +794,7 @@ func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocI //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys munmap(addr uintptr, length uintptr) (err error) +//sys readv(fd int, iovecs []Iovec) (n int, err error) +//sys preadv(fd int, iovecs []Iovec, offset int64) (n int, err error) +//sys writev(fd int, iovecs []Iovec) (n int, err error) +//sys pwritev(fd int, iovecs []Iovec, offset int64) (n int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 230a94549..06c0eea6f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -13,6 +13,7 @@ package unix import ( "encoding/binary" + "slices" "strconv" "syscall" "time" @@ -417,7 +418,7 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { return nil, 0, EINVAL } sa.raw.Family = AF_UNIX - for i := 0; i < n; i++ { + for i := range n { sa.raw.Path[i] = int8(name[i]) } // length is family (uint16), name, NUL. @@ -507,7 +508,7 @@ func (sa *SockaddrL2) sockaddr() (unsafe.Pointer, _Socklen, error) { psm := (*[2]byte)(unsafe.Pointer(&sa.raw.Psm)) psm[0] = byte(sa.PSM) psm[1] = byte(sa.PSM >> 8) - for i := 0; i < len(sa.Addr); i++ { + for i := range len(sa.Addr) { sa.raw.Bdaddr[i] = sa.Addr[len(sa.Addr)-1-i] } cid := (*[2]byte)(unsafe.Pointer(&sa.raw.Cid)) @@ -589,11 +590,11 @@ func (sa *SockaddrCAN) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Family = AF_CAN sa.raw.Ifindex = int32(sa.Ifindex) rx := (*[4]byte)(unsafe.Pointer(&sa.RxID)) - for i := 0; i < 4; i++ { + for i := range 4 { sa.raw.Addr[i] = rx[i] } tx := (*[4]byte)(unsafe.Pointer(&sa.TxID)) - for i := 0; i < 4; i++ { + for i := range 4 { sa.raw.Addr[i+4] = tx[i] } return unsafe.Pointer(&sa.raw), SizeofSockaddrCAN, nil @@ -618,11 +619,11 @@ func (sa *SockaddrCANJ1939) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Family = AF_CAN sa.raw.Ifindex = int32(sa.Ifindex) n := (*[8]byte)(unsafe.Pointer(&sa.Name)) - for i := 0; i < 8; i++ { + for i := range 8 { sa.raw.Addr[i] = n[i] } p := (*[4]byte)(unsafe.Pointer(&sa.PGN)) - for i := 0; i < 4; i++ { + for i := range 4 { sa.raw.Addr[i+8] = p[i] } sa.raw.Addr[12] = sa.Addr @@ -800,9 +801,7 @@ func (sa *SockaddrPPPoE) sockaddr() (unsafe.Pointer, _Socklen, error) { // one. The kernel expects SID to be in network byte order. binary.BigEndian.PutUint16(sa.raw[6:8], sa.SID) copy(sa.raw[8:14], sa.Remote) - for i := 14; i < 14+IFNAMSIZ; i++ { - sa.raw[i] = 0 - } + clear(sa.raw[14 : 14+IFNAMSIZ]) copy(sa.raw[14:], sa.Dev) return unsafe.Pointer(&sa.raw), SizeofSockaddrPPPoX, nil } @@ -911,7 +910,7 @@ func (sa *SockaddrIUCV) sockaddr() (unsafe.Pointer, _Socklen, error) { // These are EBCDIC encoded by the kernel, but we still need to pad them // with blanks. Initializing with blanks allows the caller to feed in either // a padded or an unpadded string. - for i := 0; i < 8; i++ { + for i := range 8 { sa.raw.Nodeid[i] = ' ' sa.raw.User_id[i] = ' ' sa.raw.Name[i] = ' ' @@ -1148,7 +1147,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { var user [8]byte var name [8]byte - for i := 0; i < 8; i++ { + for i := range 8 { user[i] = byte(pp.User_id[i]) name[i] = byte(pp.Name[i]) } @@ -1173,11 +1172,11 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { Ifindex: int(pp.Ifindex), } name := (*[8]byte)(unsafe.Pointer(&sa.Name)) - for i := 0; i < 8; i++ { + for i := range 8 { name[i] = pp.Addr[i] } pgn := (*[4]byte)(unsafe.Pointer(&sa.PGN)) - for i := 0; i < 4; i++ { + for i := range 4 { pgn[i] = pp.Addr[i+8] } addr := (*[1]byte)(unsafe.Pointer(&sa.Addr)) @@ -1188,11 +1187,11 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { Ifindex: int(pp.Ifindex), } rx := (*[4]byte)(unsafe.Pointer(&sa.RxID)) - for i := 0; i < 4; i++ { + for i := range 4 { rx[i] = pp.Addr[i] } tx := (*[4]byte)(unsafe.Pointer(&sa.TxID)) - for i := 0; i < 4; i++ { + for i := range 4 { tx[i] = pp.Addr[i+4] } return sa, nil @@ -2216,10 +2215,7 @@ func readvRacedetect(iovecs []Iovec, n int, err error) { return } for i := 0; n > 0 && i < len(iovecs); i++ { - m := int(iovecs[i].Len) - if m > n { - m = n - } + m := min(int(iovecs[i].Len), n) n -= m if m > 0 { raceWriteRange(unsafe.Pointer(iovecs[i].Base), m) @@ -2270,10 +2266,7 @@ func writevRacedetect(iovecs []Iovec, n int) { return } for i := 0; n > 0 && i < len(iovecs); i++ { - m := int(iovecs[i].Len) - if m > n { - m = n - } + m := min(int(iovecs[i].Len), n) n -= m if m > 0 { raceReadRange(unsafe.Pointer(iovecs[i].Base), m) @@ -2320,12 +2313,7 @@ func isGroupMember(gid int) bool { return false } - for _, g := range groups { - if g == gid { - return true - } - } - return false + return slices.Contains(groups, gid) } func isCapDacOverrideSet() bool { @@ -2655,3 +2643,9 @@ func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) { //sys Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error) //sys Mseal(b []byte, flags uint) (err error) + +//sys setMemPolicy(mode int, mask *CPUSet, size int) (err error) = SYS_SET_MEMPOLICY + +func SetMemPolicy(mode int, mask *CPUSet) error { + return setMemPolicy(mode, mask, _CPU_SETSIZE) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index 88162099a..34a467697 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -248,6 +248,23 @@ func Statvfs(path string, buf *Statvfs_t) (err error) { return Statvfs1(path, buf, ST_WAIT) } +func Getvfsstat(buf []Statvfs_t, flags int) (n int, err error) { + var ( + _p0 unsafe.Pointer + bufsize uintptr + ) + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + bufsize = unsafe.Sizeof(Statvfs_t{}) * uintptr(len(buf)) + } + r0, _, e1 := Syscall(SYS_GETVFSSTAT, uintptr(_p0), bufsize, uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + /* * Exposed directly */ diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index abc395547..18a3d9bda 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -629,7 +629,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Kill(pid int, signum syscall.Signal) (err error) //sys Lchown(path string, uid int, gid int) (err error) //sys Link(path string, link string) (err error) -//sys Listen(s int, backlog int) (err error) = libsocket.__xnet_llisten +//sys Listen(s int, backlog int) (err error) = libsocket.__xnet_listen //sys Lstat(path string, stat *Stat_t) (err error) //sys Madvise(b []byte, advice int) (err error) //sys Mkdir(path string, mode uint32) (err error) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 4f432bfe8..120a7b35d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -319,6 +319,7 @@ const ( AUDIT_INTEGRITY_POLICY_RULE = 0x70f AUDIT_INTEGRITY_RULE = 0x70d AUDIT_INTEGRITY_STATUS = 0x70a + AUDIT_INTEGRITY_USERSPACE = 0x710 AUDIT_IPC = 0x517 AUDIT_IPC_SET_PERM = 0x51f AUDIT_IPE_ACCESS = 0x58c @@ -327,6 +328,8 @@ const ( AUDIT_KERNEL = 0x7d0 AUDIT_KERNEL_OTHER = 0x524 AUDIT_KERN_MODULE = 0x532 + AUDIT_LANDLOCK_ACCESS = 0x58f + AUDIT_LANDLOCK_DOMAIN = 0x590 AUDIT_LAST_FEATURE = 0x1 AUDIT_LAST_KERN_ANOM_MSG = 0x707 AUDIT_LAST_USER_MSG = 0x4af @@ -491,6 +494,7 @@ const ( BPF_F_BEFORE = 0x8 BPF_F_ID = 0x20 BPF_F_NETFILTER_IP_DEFRAG = 0x1 + BPF_F_PREORDER = 0x40 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_REDIRECT_FLAGS = 0x19 BPF_F_REPLACE = 0x4 @@ -527,6 +531,7 @@ const ( BPF_LDX = 0x1 BPF_LEN = 0x80 BPF_LL_OFF = -0x200000 + BPF_LOAD_ACQ = 0x100 BPF_LSH = 0x60 BPF_MAJOR_VERSION = 0x1 BPF_MAXINSNS = 0x1000 @@ -554,6 +559,7 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_ST = 0x2 + BPF_STORE_REL = 0x110 BPF_STX = 0x3 BPF_SUB = 0x10 BPF_TAG_SIZE = 0x8 @@ -843,24 +849,90 @@ const ( DM_UUID_FLAG = 0x4000 DM_UUID_LEN = 0x81 DM_VERSION = 0xc138fd00 - DM_VERSION_EXTRA = "-ioctl (2023-03-01)" + DM_VERSION_EXTRA = "-ioctl (2025-04-28)" DM_VERSION_MAJOR = 0x4 - DM_VERSION_MINOR = 0x30 + DM_VERSION_MINOR = 0x32 DM_VERSION_PATCHLEVEL = 0x0 + DT_ADDRRNGHI = 0x6ffffeff + DT_ADDRRNGLO = 0x6ffffe00 DT_BLK = 0x6 DT_CHR = 0x2 + DT_DEBUG = 0x15 DT_DIR = 0x4 + DT_ENCODING = 0x20 DT_FIFO = 0x1 + DT_FINI = 0xd + DT_FLAGS_1 = 0x6ffffffb + DT_GNU_HASH = 0x6ffffef5 + DT_HASH = 0x4 + DT_HIOS = 0x6ffff000 + DT_HIPROC = 0x7fffffff + DT_INIT = 0xc + DT_JMPREL = 0x17 DT_LNK = 0xa + DT_LOOS = 0x6000000d + DT_LOPROC = 0x70000000 + DT_NEEDED = 0x1 + DT_NULL = 0x0 + DT_PLTGOT = 0x3 + DT_PLTREL = 0x14 + DT_PLTRELSZ = 0x2 DT_REG = 0x8 + DT_REL = 0x11 + DT_RELA = 0x7 + DT_RELACOUNT = 0x6ffffff9 + DT_RELAENT = 0x9 + DT_RELASZ = 0x8 + DT_RELCOUNT = 0x6ffffffa + DT_RELENT = 0x13 + DT_RELSZ = 0x12 + DT_RPATH = 0xf DT_SOCK = 0xc + DT_SONAME = 0xe + DT_STRSZ = 0xa + DT_STRTAB = 0x5 + DT_SYMBOLIC = 0x10 + DT_SYMENT = 0xb + DT_SYMTAB = 0x6 + DT_TEXTREL = 0x16 DT_UNKNOWN = 0x0 + DT_VALRNGHI = 0x6ffffdff + DT_VALRNGLO = 0x6ffffd00 + DT_VERDEF = 0x6ffffffc + DT_VERDEFNUM = 0x6ffffffd + DT_VERNEED = 0x6ffffffe + DT_VERNEEDNUM = 0x6fffffff + DT_VERSYM = 0x6ffffff0 DT_WHT = 0xe ECHO = 0x8 ECRYPTFS_SUPER_MAGIC = 0xf15f EFD_SEMAPHORE = 0x1 EFIVARFS_MAGIC = 0xde5e81e4 EFS_SUPER_MAGIC = 0x414a53 + EI_CLASS = 0x4 + EI_DATA = 0x5 + EI_MAG0 = 0x0 + EI_MAG1 = 0x1 + EI_MAG2 = 0x2 + EI_MAG3 = 0x3 + EI_NIDENT = 0x10 + EI_OSABI = 0x7 + EI_PAD = 0x8 + EI_VERSION = 0x6 + ELFCLASS32 = 0x1 + ELFCLASS64 = 0x2 + ELFCLASSNONE = 0x0 + ELFCLASSNUM = 0x3 + ELFDATA2LSB = 0x1 + ELFDATA2MSB = 0x2 + ELFDATANONE = 0x0 + ELFMAG = "\177ELF" + ELFMAG0 = 0x7f + ELFMAG1 = 'E' + ELFMAG2 = 'L' + ELFMAG3 = 'F' + ELFOSABI_LINUX = 0x3 + ELFOSABI_NONE = 0x0 EM_386 = 0x3 EM_486 = 0x6 EM_68K = 0x4 @@ -936,11 +1008,10 @@ const ( EPOLL_CTL_MOD = 0x3 EPOLL_IOC_TYPE = 0x8a EROFS_SUPER_MAGIC_V1 = 0xe0f5e1e2 - ESP_V4_FLOW = 0xa - ESP_V6_FLOW = 0xc - ETHER_FLOW = 0x12 ETHTOOL_BUSINFO_LEN = 0x20 ETHTOOL_EROMVERS_LEN = 0x20 + ETHTOOL_FAMILY_NAME = "ethtool" + ETHTOOL_FAMILY_VERSION = 0x1 ETHTOOL_FEC_AUTO = 0x2 ETHTOOL_FEC_BASER = 0x10 ETHTOOL_FEC_LLRS = 0x20 @@ -1147,14 +1218,24 @@ const ( ETH_P_WCCP = 0x883e ETH_P_X25 = 0x805 ETH_P_XDSA = 0xf8 + ET_CORE = 0x4 + ET_DYN = 0x3 + ET_EXEC = 0x2 + ET_HIPROC = 0xffff + ET_LOPROC = 0xff00 + ET_NONE = 0x0 + ET_REL = 0x1 EV_ABS = 0x3 EV_CNT = 0x20 + EV_CURRENT = 0x1 EV_FF = 0x15 EV_FF_STATUS = 0x17 EV_KEY = 0x1 EV_LED = 0x11 EV_MAX = 0x1f EV_MSC = 0x4 + EV_NONE = 0x0 + EV_NUM = 0x2 EV_PWR = 0x16 EV_REL = 0x2 EV_REP = 0x14 @@ -1203,13 +1284,18 @@ const ( FAN_DENY = 0x2 FAN_ENABLE_AUDIT = 0x40 FAN_EPIDFD = -0x2 + FAN_ERRNO_BITS = 0x8 + FAN_ERRNO_MASK = 0xff + FAN_ERRNO_SHIFT = 0x18 FAN_EVENT_INFO_TYPE_DFID = 0x3 FAN_EVENT_INFO_TYPE_DFID_NAME = 0x2 FAN_EVENT_INFO_TYPE_ERROR = 0x5 FAN_EVENT_INFO_TYPE_FID = 0x1 + FAN_EVENT_INFO_TYPE_MNT = 0x7 FAN_EVENT_INFO_TYPE_NEW_DFID_NAME = 0xc FAN_EVENT_INFO_TYPE_OLD_DFID_NAME = 0xa FAN_EVENT_INFO_TYPE_PIDFD = 0x4 + FAN_EVENT_INFO_TYPE_RANGE = 0x6 FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 FAN_FS_ERROR = 0x8000 @@ -1224,9 +1310,12 @@ const ( FAN_MARK_IGNORED_SURV_MODIFY = 0x40 FAN_MARK_IGNORE_SURV = 0x440 FAN_MARK_INODE = 0x0 + FAN_MARK_MNTNS = 0x110 FAN_MARK_MOUNT = 0x10 FAN_MARK_ONLYDIR = 0x8 FAN_MARK_REMOVE = 0x2 + FAN_MNT_ATTACH = 0x1000000 + FAN_MNT_DETACH = 0x2000000 FAN_MODIFY = 0x2 FAN_MOVE = 0xc0 FAN_MOVED_FROM = 0x40 @@ -1240,6 +1329,7 @@ const ( FAN_OPEN_EXEC = 0x1000 FAN_OPEN_EXEC_PERM = 0x40000 FAN_OPEN_PERM = 0x10000 + FAN_PRE_ACCESS = 0x100000 FAN_Q_OVERFLOW = 0x4000 FAN_RENAME = 0x10000000 FAN_REPORT_DFID_NAME = 0xc00 @@ -1247,6 +1337,7 @@ const ( FAN_REPORT_DIR_FID = 0x400 FAN_REPORT_FD_ERROR = 0x2000 FAN_REPORT_FID = 0x200 + FAN_REPORT_MNT = 0x4000 FAN_REPORT_NAME = 0x800 FAN_REPORT_PIDFD = 0x80 FAN_REPORT_TARGET_FID = 0x1000 @@ -1266,6 +1357,7 @@ const ( FIB_RULE_PERMANENT = 0x1 FIB_RULE_UNRESOLVED = 0x4 FIDEDUPERANGE = 0xc0189436 + FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED = 0x1 FSCRYPT_KEY_DESCRIPTOR_SIZE = 0x8 FSCRYPT_KEY_DESC_PREFIX = "fscrypt:" FSCRYPT_KEY_DESC_PREFIX_SIZE = 0x8 @@ -1523,6 +1615,8 @@ const ( IN_OPEN = 0x20 IN_Q_OVERFLOW = 0x4000 IN_UNMOUNT = 0x2000 + IOCTL_MEI_CONNECT_CLIENT = 0xc0104801 + IOCTL_MEI_CONNECT_CLIENT_VTAG = 0xc0144804 IPPROTO_AH = 0x33 IPPROTO_BEETPH = 0x5e IPPROTO_COMP = 0x6c @@ -1574,7 +1668,6 @@ const ( IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 IPV6_DSTOPTS = 0x3b - IPV6_FLOW = 0x11 IPV6_FREEBIND = 0x4e IPV6_HDRINCL = 0x24 IPV6_HOPLIMIT = 0x34 @@ -1625,7 +1718,6 @@ const ( IPV6_TRANSPARENT = 0x4b IPV6_UNICAST_HOPS = 0x10 IPV6_UNICAST_IF = 0x4c - IPV6_USER_FLOW = 0xe IPV6_V6ONLY = 0x1a IPV6_VERSION = 0x60 IPV6_VERSION_MASK = 0xf0 @@ -1687,7 +1779,6 @@ const ( IP_TTL = 0x2 IP_UNBLOCK_SOURCE = 0x25 IP_UNICAST_IF = 0x32 - IP_USER_FLOW = 0xd IP_XFRM_POLICY = 0x11 ISOFS_SUPER_MAGIC = 0x9660 ISTRIP = 0x20 @@ -1809,7 +1900,11 @@ const ( LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2 LANDLOCK_ACCESS_NET_BIND_TCP = 0x1 LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2 + LANDLOCK_CREATE_RULESET_ERRATA = 0x2 LANDLOCK_CREATE_RULESET_VERSION = 0x1 + LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON = 0x2 + LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF = 0x1 + LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF = 0x4 LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET = 0x1 LANDLOCK_SCOPE_SIGNAL = 0x2 LINUX_REBOOT_CMD_CAD_OFF = 0x0 @@ -2259,7 +2354,167 @@ const ( NLM_F_REPLACE = 0x100 NLM_F_REQUEST = 0x1 NLM_F_ROOT = 0x100 + NN_386_IOPERM = "LINUX" + NN_386_TLS = "LINUX" + NN_ARC_V2 = "LINUX" + NN_ARM_FPMR = "LINUX" + NN_ARM_GCS = "LINUX" + NN_ARM_HW_BREAK = "LINUX" + NN_ARM_HW_WATCH = "LINUX" + NN_ARM_PACA_KEYS = "LINUX" + NN_ARM_PACG_KEYS = "LINUX" + NN_ARM_PAC_ENABLED_KEYS = "LINUX" + NN_ARM_PAC_MASK = "LINUX" + NN_ARM_POE = "LINUX" + NN_ARM_SSVE = "LINUX" + NN_ARM_SVE = "LINUX" + NN_ARM_SYSTEM_CALL = "LINUX" + NN_ARM_TAGGED_ADDR_CTRL = "LINUX" + NN_ARM_TLS = "LINUX" + NN_ARM_VFP = "LINUX" + NN_ARM_ZA = "LINUX" + NN_ARM_ZT = "LINUX" + NN_AUXV = "CORE" + NN_FILE = "CORE" + NN_GNU_PROPERTY_TYPE_0 = "GNU" + NN_LOONGARCH_CPUCFG = "LINUX" + NN_LOONGARCH_CSR = "LINUX" + NN_LOONGARCH_HW_BREAK = "LINUX" + NN_LOONGARCH_HW_WATCH = "LINUX" + NN_LOONGARCH_LASX = "LINUX" + NN_LOONGARCH_LBT = "LINUX" + NN_LOONGARCH_LSX = "LINUX" + NN_MIPS_DSP = "LINUX" + NN_MIPS_FP_MODE = "LINUX" + NN_MIPS_MSA = "LINUX" + NN_PPC_DEXCR = "LINUX" + NN_PPC_DSCR = "LINUX" + NN_PPC_EBB = "LINUX" + NN_PPC_HASHKEYR = "LINUX" + NN_PPC_PKEY = "LINUX" + NN_PPC_PMU = "LINUX" + NN_PPC_PPR = "LINUX" + NN_PPC_SPE = "LINUX" + NN_PPC_TAR = "LINUX" + NN_PPC_TM_CDSCR = "LINUX" + NN_PPC_TM_CFPR = "LINUX" + NN_PPC_TM_CGPR = "LINUX" + NN_PPC_TM_CPPR = "LINUX" + NN_PPC_TM_CTAR = "LINUX" + NN_PPC_TM_CVMX = "LINUX" + NN_PPC_TM_CVSX = "LINUX" + NN_PPC_TM_SPR = "LINUX" + NN_PPC_VMX = "LINUX" + NN_PPC_VSX = "LINUX" + NN_PRFPREG = "CORE" + NN_PRPSINFO = "CORE" + NN_PRSTATUS = "CORE" + NN_PRXFPREG = "LINUX" + NN_RISCV_CSR = "LINUX" + NN_RISCV_TAGGED_ADDR_CTRL = "LINUX" + NN_RISCV_VECTOR = "LINUX" + NN_S390_CTRS = "LINUX" + NN_S390_GS_BC = "LINUX" + NN_S390_GS_CB = "LINUX" + NN_S390_HIGH_GPRS = "LINUX" + NN_S390_LAST_BREAK = "LINUX" + NN_S390_PREFIX = "LINUX" + NN_S390_PV_CPU_DATA = "LINUX" + NN_S390_RI_CB = "LINUX" + NN_S390_SYSTEM_CALL = "LINUX" + NN_S390_TDB = "LINUX" + NN_S390_TIMER = "LINUX" + NN_S390_TODCMP = "LINUX" + NN_S390_TODPREG = "LINUX" + NN_S390_VXRS_HIGH = "LINUX" + NN_S390_VXRS_LOW = "LINUX" + NN_SIGINFO = "CORE" + NN_TASKSTRUCT = "CORE" + NN_VMCOREDD = "LINUX" + NN_X86_SHSTK = "LINUX" + NN_X86_XSAVE_LAYOUT = "LINUX" + NN_X86_XSTATE = "LINUX" NSFS_MAGIC = 0x6e736673 + NT_386_IOPERM = 0x201 + NT_386_TLS = 0x200 + NT_ARC_V2 = 0x600 + NT_ARM_FPMR = 0x40e + NT_ARM_GCS = 0x410 + NT_ARM_HW_BREAK = 0x402 + NT_ARM_HW_WATCH = 0x403 + NT_ARM_PACA_KEYS = 0x407 + NT_ARM_PACG_KEYS = 0x408 + NT_ARM_PAC_ENABLED_KEYS = 0x40a + NT_ARM_PAC_MASK = 0x406 + NT_ARM_POE = 0x40f + NT_ARM_SSVE = 0x40b + NT_ARM_SVE = 0x405 + NT_ARM_SYSTEM_CALL = 0x404 + NT_ARM_TAGGED_ADDR_CTRL = 0x409 + NT_ARM_TLS = 0x401 + NT_ARM_VFP = 0x400 + NT_ARM_ZA = 0x40c + NT_ARM_ZT = 0x40d + NT_AUXV = 0x6 + NT_FILE = 0x46494c45 + NT_GNU_PROPERTY_TYPE_0 = 0x5 + NT_LOONGARCH_CPUCFG = 0xa00 + NT_LOONGARCH_CSR = 0xa01 + NT_LOONGARCH_HW_BREAK = 0xa05 + NT_LOONGARCH_HW_WATCH = 0xa06 + NT_LOONGARCH_LASX = 0xa03 + NT_LOONGARCH_LBT = 0xa04 + NT_LOONGARCH_LSX = 0xa02 + NT_MIPS_DSP = 0x800 + NT_MIPS_FP_MODE = 0x801 + NT_MIPS_MSA = 0x802 + NT_PPC_DEXCR = 0x111 + NT_PPC_DSCR = 0x105 + NT_PPC_EBB = 0x106 + NT_PPC_HASHKEYR = 0x112 + NT_PPC_PKEY = 0x110 + NT_PPC_PMU = 0x107 + NT_PPC_PPR = 0x104 + NT_PPC_SPE = 0x101 + NT_PPC_TAR = 0x103 + NT_PPC_TM_CDSCR = 0x10f + NT_PPC_TM_CFPR = 0x109 + NT_PPC_TM_CGPR = 0x108 + NT_PPC_TM_CPPR = 0x10e + NT_PPC_TM_CTAR = 0x10d + NT_PPC_TM_CVMX = 0x10a + NT_PPC_TM_CVSX = 0x10b + NT_PPC_TM_SPR = 0x10c + NT_PPC_VMX = 0x100 + NT_PPC_VSX = 0x102 + NT_PRFPREG = 0x2 + NT_PRPSINFO = 0x3 + NT_PRSTATUS = 0x1 + NT_PRXFPREG = 0x46e62b7f + NT_RISCV_CSR = 0x900 + NT_RISCV_TAGGED_ADDR_CTRL = 0x902 + NT_RISCV_VECTOR = 0x901 + NT_S390_CTRS = 0x304 + NT_S390_GS_BC = 0x30c + NT_S390_GS_CB = 0x30b + NT_S390_HIGH_GPRS = 0x300 + NT_S390_LAST_BREAK = 0x306 + NT_S390_PREFIX = 0x305 + NT_S390_PV_CPU_DATA = 0x30e + NT_S390_RI_CB = 0x30d + NT_S390_SYSTEM_CALL = 0x307 + NT_S390_TDB = 0x308 + NT_S390_TIMER = 0x301 + NT_S390_TODCMP = 0x302 + NT_S390_TODPREG = 0x303 + NT_S390_VXRS_HIGH = 0x30a + NT_S390_VXRS_LOW = 0x309 + NT_SIGINFO = 0x53494749 + NT_TASKSTRUCT = 0x4 + NT_VMCOREDD = 0x700 + NT_X86_SHSTK = 0x204 + NT_X86_XSAVE_LAYOUT = 0x205 + NT_X86_XSTATE = 0x202 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -2446,6 +2701,59 @@ const ( PERF_RECORD_MISC_USER = 0x2 PERF_SAMPLE_BRANCH_PLM_ALL = 0x7 PERF_SAMPLE_WEIGHT_TYPE = 0x1004000 + PF_ALG = 0x26 + PF_APPLETALK = 0x5 + PF_ASH = 0x12 + PF_ATMPVC = 0x8 + PF_ATMSVC = 0x14 + PF_AX25 = 0x3 + PF_BLUETOOTH = 0x1f + PF_BRIDGE = 0x7 + PF_CAIF = 0x25 + PF_CAN = 0x1d + PF_DECnet = 0xc + PF_ECONET = 0x13 + PF_FILE = 0x1 + PF_IB = 0x1b + PF_IEEE802154 = 0x24 + PF_INET = 0x2 + PF_INET6 = 0xa + PF_IPX = 0x4 + PF_IRDA = 0x17 + PF_ISDN = 0x22 + PF_IUCV = 0x20 + PF_KCM = 0x29 + PF_KEY = 0xf + PF_LLC = 0x1a + PF_LOCAL = 0x1 + PF_MAX = 0x2e + PF_MCTP = 0x2d + PF_MPLS = 0x1c + PF_NETBEUI = 0xd + PF_NETLINK = 0x10 + PF_NETROM = 0x6 + PF_NFC = 0x27 + PF_PACKET = 0x11 + PF_PHONET = 0x23 + PF_PPPOX = 0x18 + PF_QIPCRTR = 0x2a + PF_R = 0x4 + PF_RDS = 0x15 + PF_ROSE = 0xb + PF_ROUTE = 0x10 + PF_RXRPC = 0x21 + PF_SECURITY = 0xe + PF_SMC = 0x2b + PF_SNA = 0x16 + PF_TIPC = 0x1e + PF_UNIX = 0x1 + PF_UNSPEC = 0x0 + PF_VSOCK = 0x28 + PF_W = 0x2 + PF_WANPIPE = 0x19 + PF_X = 0x1 + PF_X25 = 0x9 + PF_XDP = 0x2c PID_FS_MAGIC = 0x50494446 PIPEFS_MAGIC = 0x50495045 PPPIOCGNPMODE = 0xc008744c @@ -2485,6 +2793,10 @@ const ( PR_FP_EXC_UND = 0x40000 PR_FP_MODE_FR = 0x1 PR_FP_MODE_FRE = 0x2 + PR_FUTEX_HASH = 0x4e + PR_FUTEX_HASH_GET_IMMUTABLE = 0x3 + PR_FUTEX_HASH_GET_SLOTS = 0x2 + PR_FUTEX_HASH_SET_SLOTS = 0x1 PR_GET_AUXV = 0x41555856 PR_GET_CHILD_SUBREAPER = 0x25 PR_GET_DUMPABLE = 0x3 @@ -2644,6 +2956,10 @@ const ( PR_TAGGED_ADDR_ENABLE = 0x1 PR_TASK_PERF_EVENTS_DISABLE = 0x1f PR_TASK_PERF_EVENTS_ENABLE = 0x20 + PR_TIMER_CREATE_RESTORE_IDS = 0x4d + PR_TIMER_CREATE_RESTORE_IDS_GET = 0x2 + PR_TIMER_CREATE_RESTORE_IDS_OFF = 0x0 + PR_TIMER_CREATE_RESTORE_IDS_ON = 0x1 PR_TIMING_STATISTICAL = 0x0 PR_TIMING_TIMESTAMP = 0x1 PR_TSC_ENABLE = 0x1 @@ -2724,6 +3040,7 @@ const ( PTRACE_SETREGSET = 0x4205 PTRACE_SETSIGINFO = 0x4203 PTRACE_SETSIGMASK = 0x420b + PTRACE_SET_SYSCALL_INFO = 0x4212 PTRACE_SET_SYSCALL_USER_DISPATCH_CONFIG = 0x4210 PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 @@ -2732,6 +3049,23 @@ const ( PTRACE_SYSCALL_INFO_NONE = 0x0 PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_TRACEME = 0x0 + PT_AARCH64_MEMTAG_MTE = 0x70000002 + PT_DYNAMIC = 0x2 + PT_GNU_EH_FRAME = 0x6474e550 + PT_GNU_PROPERTY = 0x6474e553 + PT_GNU_RELRO = 0x6474e552 + PT_GNU_STACK = 0x6474e551 + PT_HIOS = 0x6fffffff + PT_HIPROC = 0x7fffffff + PT_INTERP = 0x3 + PT_LOAD = 0x1 + PT_LOOS = 0x60000000 + PT_LOPROC = 0x70000000 + PT_NOTE = 0x4 + PT_NULL = 0x0 + PT_PHDR = 0x6 + PT_SHLIB = 0x5 + PT_TLS = 0x7 P_ALL = 0x0 P_PGID = 0x2 P_PID = 0x1 @@ -2787,7 +3121,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1e + RTA_MAX = 0x1f RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -2864,10 +3198,12 @@ const ( RTM_DELACTION = 0x31 RTM_DELADDR = 0x15 RTM_DELADDRLABEL = 0x49 + RTM_DELANYCAST = 0x3d RTM_DELCHAIN = 0x65 RTM_DELLINK = 0x11 RTM_DELLINKPROP = 0x6d RTM_DELMDB = 0x55 + RTM_DELMULTICAST = 0x39 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 RTM_DELNEXTHOP = 0x69 @@ -2917,11 +3253,13 @@ const ( RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 + RTM_NEWANYCAST = 0x3c RTM_NEWCACHEREPORT = 0x60 RTM_NEWCHAIN = 0x64 RTM_NEWLINK = 0x10 RTM_NEWLINKPROP = 0x6c RTM_NEWMDB = 0x54 + RTM_NEWMULTICAST = 0x38 RTM_NEWNDUSEROPT = 0x44 RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 @@ -2970,6 +3308,7 @@ const ( RTPROT_NTK = 0xf RTPROT_OPENR = 0x63 RTPROT_OSPF = 0xbc + RTPROT_OVN = 0x54 RTPROT_RA = 0x9 RTPROT_REDIRECT = 0x1 RTPROT_RIP = 0xbd @@ -2987,11 +3326,12 @@ const ( RUSAGE_THREAD = 0x1 RWF_APPEND = 0x10 RWF_ATOMIC = 0x40 + RWF_DONTCACHE = 0x80 RWF_DSYNC = 0x2 RWF_HIPRI = 0x1 RWF_NOAPPEND = 0x20 RWF_NOWAIT = 0x8 - RWF_SUPPORTED = 0x7f + RWF_SUPPORTED = 0xff RWF_SYNC = 0x4 RWF_WRITE_LIFE_NOT_SET = 0x0 SCHED_BATCH = 0x3 @@ -3059,6 +3399,47 @@ const ( SEEK_MAX = 0x4 SEEK_SET = 0x0 SELINUX_MAGIC = 0xf97cff8c + SHF_ALLOC = 0x2 + SHF_EXCLUDE = 0x8000000 + SHF_EXECINSTR = 0x4 + SHF_GROUP = 0x200 + SHF_INFO_LINK = 0x40 + SHF_LINK_ORDER = 0x80 + SHF_MASKOS = 0xff00000 + SHF_MASKPROC = 0xf0000000 + SHF_MERGE = 0x10 + SHF_ORDERED = 0x4000000 + SHF_OS_NONCONFORMING = 0x100 + SHF_RELA_LIVEPATCH = 0x100000 + SHF_RO_AFTER_INIT = 0x200000 + SHF_STRINGS = 0x20 + SHF_TLS = 0x400 + SHF_WRITE = 0x1 + SHN_ABS = 0xfff1 + SHN_COMMON = 0xfff2 + SHN_HIPROC = 0xff1f + SHN_HIRESERVE = 0xffff + SHN_LIVEPATCH = 0xff20 + SHN_LOPROC = 0xff00 + SHN_LORESERVE = 0xff00 + SHN_UNDEF = 0x0 + SHT_DYNAMIC = 0x6 + SHT_DYNSYM = 0xb + SHT_HASH = 0x5 + SHT_HIPROC = 0x7fffffff + SHT_HIUSER = 0xffffffff + SHT_LOPROC = 0x70000000 + SHT_LOUSER = 0x80000000 + SHT_NOBITS = 0x8 + SHT_NOTE = 0x7 + SHT_NULL = 0x0 + SHT_NUM = 0xc + SHT_PROGBITS = 0x1 + SHT_REL = 0x9 + SHT_RELA = 0x4 + SHT_SHLIB = 0xa + SHT_STRTAB = 0x3 + SHT_SYMTAB = 0x2 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -3271,6 +3652,7 @@ const ( STATX_BTIME = 0x800 STATX_CTIME = 0x80 STATX_DIOALIGN = 0x2000 + STATX_DIO_READ_ALIGN = 0x20000 STATX_GID = 0x10 STATX_INO = 0x100 STATX_MNT_ID = 0x1000 @@ -3284,6 +3666,16 @@ const ( STATX_UID = 0x8 STATX_WRITE_ATOMIC = 0x10000 STATX__RESERVED = 0x80000000 + STB_GLOBAL = 0x1 + STB_LOCAL = 0x0 + STB_WEAK = 0x2 + STT_COMMON = 0x5 + STT_FILE = 0x4 + STT_FUNC = 0x2 + STT_NOTYPE = 0x0 + STT_OBJECT = 0x1 + STT_SECTION = 0x3 + STT_TLS = 0x6 SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 @@ -3322,7 +3714,7 @@ const ( TASKSTATS_GENL_NAME = "TASKSTATS" TASKSTATS_GENL_VERSION = 0x1 TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0xe + TASKSTATS_VERSION = 0x10 TCIFLUSH = 0x0 TCIOFF = 0x2 TCIOFLUSH = 0x2 @@ -3392,8 +3784,6 @@ const ( TCP_TX_DELAY = 0x25 TCP_ULP = 0x1f TCP_USER_TIMEOUT = 0x12 - TCP_V4_FLOW = 0x1 - TCP_V6_FLOW = 0x5 TCP_WINDOW_CLAMP = 0xa TCP_ZEROCOPY_RECEIVE = 0x23 TFD_TIMER_ABSTIME = 0x1 @@ -3503,6 +3893,7 @@ const ( TP_STATUS_WRONG_FORMAT = 0x4 TRACEFS_MAGIC = 0x74726163 TS_COMM_LEN = 0x20 + UBI_IOCECNFO = 0xc01c6f06 UDF_SUPER_MAGIC = 0x15013346 UDP_CORK = 0x1 UDP_ENCAP = 0x64 @@ -3515,14 +3906,14 @@ const ( UDP_NO_CHECK6_RX = 0x66 UDP_NO_CHECK6_TX = 0x65 UDP_SEGMENT = 0x67 - UDP_V4_FLOW = 0x2 - UDP_V6_FLOW = 0x6 UMOUNT_NOFOLLOW = 0x8 USBDEVICE_SUPER_MAGIC = 0x9fa2 UTIME_NOW = 0x3fffffff UTIME_OMIT = 0x3ffffffe V9FS_MAGIC = 0x1021997 VERASE = 0x2 + VER_FLG_BASE = 0x1 + VER_FLG_WEAK = 0x2 VINTR = 0x0 VKILL = 0x3 VLNEXT = 0xf @@ -3559,7 +3950,7 @@ const ( WDIOS_TEMPPANIC = 0x4 WDIOS_UNKNOWN = -0x1 WEXITED = 0x4 - WGALLOWEDIP_A_MAX = 0x3 + WGALLOWEDIP_A_MAX = 0x4 WGDEVICE_A_MAX = 0x8 WGPEER_A_MAX = 0xa WG_CMD_MAX = 0x1 @@ -3673,6 +4064,7 @@ const ( XDP_SHARED_UMEM = 0x1 XDP_STATISTICS = 0x7 XDP_TXMD_FLAGS_CHECKSUM = 0x2 + XDP_TXMD_FLAGS_LAUNCH_TIME = 0x4 XDP_TXMD_FLAGS_TIMESTAMP = 0x1 XDP_TX_METADATA = 0x2 XDP_TX_RING = 0x3 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 75207613c..97a61fc5b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -115,6 +116,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 @@ -360,6 +363,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -372,6 +376,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index c68acda53..a0d6d498c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -115,6 +116,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 @@ -361,6 +364,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -373,6 +377,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index a8c607ab8..dd9c903f9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -114,6 +115,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 @@ -366,6 +369,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -378,6 +382,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 18563dd8d..384c61ca3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -119,6 +120,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 @@ -359,6 +362,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -371,6 +375,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 22912cdaa..6384c9831 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -115,6 +116,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 @@ -353,6 +356,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -365,6 +369,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 29344eb37..553c1c6f1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -114,6 +115,8 @@ const ( IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff @@ -359,6 +362,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 @@ -371,6 +375,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 20d51fb96..b3339f209 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -114,6 +115,8 @@ const ( IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff @@ -359,6 +362,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 @@ -371,6 +375,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 321b60902..177091d2b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -114,6 +115,8 @@ const ( IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 @@ -359,6 +362,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 @@ -371,6 +375,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 9bacdf1e2..c5abf156d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -114,6 +115,8 @@ const ( IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 @@ -359,6 +362,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 @@ -371,6 +375,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index c22427261..f1f3fadf5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -68,6 +68,7 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 @@ -114,6 +115,8 @@ const ( IEXTEN = 0x400 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff @@ -414,6 +417,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 @@ -426,6 +430,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x12 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x12 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 6270c8ee1..203ad9c54 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 @@ -114,6 +115,8 @@ const ( IEXTEN = 0x400 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff @@ -418,6 +421,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 @@ -430,6 +434,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x12 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x12 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 9966c1941..4b9abcb21 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -68,6 +68,7 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 @@ -114,6 +115,8 @@ const ( IEXTEN = 0x400 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 @@ -418,6 +421,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 @@ -430,6 +434,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x12 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x12 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 848e5fcc4..f87983037 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -114,6 +115,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 @@ -350,6 +353,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -362,6 +366,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 669b2adb8..64347eb35 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -114,6 +115,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff @@ -422,6 +425,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -434,6 +438,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 4834e5751..7d7191171 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -71,6 +71,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -118,6 +119,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x400000 IN_NONBLOCK = 0x4000 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff @@ -461,6 +464,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x2 SO_PASSPIDFD = 0x55 + SO_PASSRIGHTS = 0x5c SO_PASSSEC = 0x1f SO_PEEK_OFF = 0x26 SO_PEERCRED = 0x40 @@ -473,6 +477,7 @@ const ( SO_RCVBUFFORCE = 0x100b SO_RCVLOWAT = 0x800 SO_RCVMARK = 0x54 + SO_RCVPRIORITY = 0x5b SO_RCVTIMEO = 0x2000 SO_RCVTIMEO_NEW = 0x44 SO_RCVTIMEO_OLD = 0x2000 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index 24b346e1a..813c05b66 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -2512,6 +2512,90 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func readv(fd int, iovecs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_readv_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_readv_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readv readv "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func preadv(fd int, iovecs []Iovec, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_preadv_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_preadv_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_preadv preadv "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writev(fd int, iovecs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_writev_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_writev_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_writev writev "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwritev(fd int, iovecs []Iovec, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_pwritev_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pwritev_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pwritev pwritev "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := syscall_syscall(libc_fstat64_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index ebd213100..fda328582 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -738,6 +738,26 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_readv_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readv(SB) +GLOBL ·libc_readv_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readv_trampoline_addr(SB)/8, $libc_readv_trampoline<>(SB) + +TEXT libc_preadv_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_preadv(SB) +GLOBL ·libc_preadv_trampoline_addr(SB), RODATA, $8 +DATA ·libc_preadv_trampoline_addr(SB)/8, $libc_preadv_trampoline<>(SB) + +TEXT libc_writev_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_writev(SB) +GLOBL ·libc_writev_trampoline_addr(SB), RODATA, $8 +DATA ·libc_writev_trampoline_addr(SB)/8, $libc_writev_trampoline<>(SB) + +TEXT libc_pwritev_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pwritev(SB) +GLOBL ·libc_pwritev_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pwritev_trampoline_addr(SB)/8, $libc_pwritev_trampoline<>(SB) + TEXT libc_fstat64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstat64(SB) GLOBL ·libc_fstat64_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 824b9c2d5..e6f58f3c6 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -2512,6 +2512,90 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func readv(fd int, iovecs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_readv_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_readv_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readv readv "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func preadv(fd int, iovecs []Iovec, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_preadv_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_preadv_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_preadv preadv "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writev(fd int, iovecs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_writev_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_writev_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_writev writev "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwritev(fd int, iovecs []Iovec, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_pwritev_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pwritev_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pwritev pwritev "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index 4f178a229..7f8998b90 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -738,6 +738,26 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_readv_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readv(SB) +GLOBL ·libc_readv_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readv_trampoline_addr(SB)/8, $libc_readv_trampoline<>(SB) + +TEXT libc_preadv_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_preadv(SB) +GLOBL ·libc_preadv_trampoline_addr(SB), RODATA, $8 +DATA ·libc_preadv_trampoline_addr(SB)/8, $libc_preadv_trampoline<>(SB) + +TEXT libc_writev_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_writev(SB) +GLOBL ·libc_writev_trampoline_addr(SB), RODATA, $8 +DATA ·libc_writev_trampoline_addr(SB)/8, $libc_writev_trampoline<>(SB) + +TEXT libc_pwritev_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pwritev(SB) +GLOBL ·libc_pwritev_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pwritev_trampoline_addr(SB)/8, $libc_pwritev_trampoline<>(SB) + TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstat(SB) GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 5cc1e8eb2..8935d10a3 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -2238,3 +2238,13 @@ func Mseal(b []byte, flags uint) (err error) { } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setMemPolicy(mode int, mask *CPUSet, size int) (err error) { + _, _, e1 := Syscall(SYS_SET_MEMPOLICY, uintptr(mode), uintptr(unsafe.Pointer(mask)), uintptr(size)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index c6545413c..b4609c20c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -72,7 +72,7 @@ import ( //go:cgo_import_dynamic libc_kill kill "libc.so" //go:cgo_import_dynamic libc_lchown lchown "libc.so" //go:cgo_import_dynamic libc_link link "libc.so" -//go:cgo_import_dynamic libc___xnet_llisten __xnet_llisten "libsocket.so" +//go:cgo_import_dynamic libc___xnet_listen __xnet_listen "libsocket.so" //go:cgo_import_dynamic libc_lstat lstat "libc.so" //go:cgo_import_dynamic libc_madvise madvise "libc.so" //go:cgo_import_dynamic libc_mkdir mkdir "libc.so" @@ -221,7 +221,7 @@ import ( //go:linkname procKill libc_kill //go:linkname procLchown libc_lchown //go:linkname procLink libc_link -//go:linkname proc__xnet_llisten libc___xnet_llisten +//go:linkname proc__xnet_listen libc___xnet_listen //go:linkname procLstat libc_lstat //go:linkname procMadvise libc_madvise //go:linkname procMkdir libc_mkdir @@ -371,7 +371,7 @@ var ( procKill, procLchown, procLink, - proc__xnet_llisten, + proc__xnet_listen, procLstat, procMadvise, procMkdir, @@ -1178,7 +1178,7 @@ func Link(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Listen(s int, backlog int) (err error) { - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_llisten)), 2, uintptr(s), uintptr(backlog), 0, 0, 0, 0) + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_listen)), 2, uintptr(s), uintptr(backlog), 0, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index c79aaff30..aca56ee49 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -462,4 +462,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 5eb450695..2ea1ef58c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -385,4 +385,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 05e502974..d22c8af31 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -426,4 +426,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 38c53ec51..5ee264ae9 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -329,4 +329,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 31d2e71a1..f9f03ebf5 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -325,4 +325,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index f4184a336..87c2118e8 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -446,4 +446,5 @@ const ( SYS_GETXATTRAT = 4464 SYS_LISTXATTRAT = 4465 SYS_REMOVEXATTRAT = 4466 + SYS_OPEN_TREE_ATTR = 4467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 05b996227..391ad102f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -376,4 +376,5 @@ const ( SYS_GETXATTRAT = 5464 SYS_LISTXATTRAT = 5465 SYS_REMOVEXATTRAT = 5466 + SYS_OPEN_TREE_ATTR = 5467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 43a256e9e..565615775 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -376,4 +376,5 @@ const ( SYS_GETXATTRAT = 5464 SYS_LISTXATTRAT = 5465 SYS_REMOVEXATTRAT = 5466 + SYS_OPEN_TREE_ATTR = 5467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index eea5ddfc2..0482b52e3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -446,4 +446,5 @@ const ( SYS_GETXATTRAT = 4464 SYS_LISTXATTRAT = 4465 SYS_REMOVEXATTRAT = 4466 + SYS_OPEN_TREE_ATTR = 4467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index 0d777bfbb..71806f08f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -453,4 +453,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index b44636502..e35a71058 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -425,4 +425,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 0c7d21c18..2aea47670 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -425,4 +425,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 840539169..6c9bb4e56 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -330,4 +330,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index fcf1b790d..680bc9915 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -391,4 +391,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 52d15b5f9..620f27105 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -404,4 +404,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index a46abe647..c1a467017 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -114,8 +114,10 @@ type Statx_t struct { Atomic_write_unit_min uint32 Atomic_write_unit_max uint32 Atomic_write_segments_max uint32 + Dio_read_offset_align uint32 + Atomic_write_unit_max_opt uint32 _ [1]uint32 - _ [9]uint64 + _ [8]uint64 } type Fsid struct { @@ -199,7 +201,8 @@ type FscryptAddKeyArg struct { Key_spec FscryptKeySpecifier Raw_size uint32 Key_id uint32 - _ [8]uint32 + Flags uint32 + _ [7]uint32 } type FscryptRemoveKeyArg struct { @@ -629,6 +632,8 @@ const ( IFA_FLAGS = 0x8 IFA_RT_PRIORITY = 0x9 IFA_TARGET_NETNSID = 0xa + IFAL_LABEL = 0x2 + IFAL_ADDRESS = 0x1 RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -686,6 +691,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfAddrlblmsg = 0xc SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 @@ -737,6 +743,15 @@ type IfAddrmsg struct { Index uint32 } +type IfAddrlblmsg struct { + Family uint8 + _ uint8 + Prefixlen uint8 + Flags uint8 + Index uint32 + Seq uint32 +} + type IfaCacheinfo struct { Prefered uint32 Valid uint32 @@ -2226,8 +2241,11 @@ const ( NFT_PAYLOAD_LL_HEADER = 0x0 NFT_PAYLOAD_NETWORK_HEADER = 0x1 NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 + NFT_PAYLOAD_INNER_HEADER = 0x3 + NFT_PAYLOAD_TUN_HEADER = 0x4 NFT_PAYLOAD_CSUM_NONE = 0x0 NFT_PAYLOAD_CSUM_INET = 0x1 + NFT_PAYLOAD_CSUM_SCTP = 0x2 NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 NFTA_PAYLOAD_UNSPEC = 0x0 NFTA_PAYLOAD_DREG = 0x1 @@ -2314,6 +2332,11 @@ const ( NFT_CT_AVGPKT = 0x10 NFT_CT_ZONE = 0x11 NFT_CT_EVENTMASK = 0x12 + NFT_CT_SRC_IP = 0x13 + NFT_CT_DST_IP = 0x14 + NFT_CT_SRC_IP6 = 0x15 + NFT_CT_DST_IP6 = 0x16 + NFT_CT_ID = 0x17 NFTA_CT_UNSPEC = 0x0 NFTA_CT_DREG = 0x1 NFTA_CT_KEY = 0x2 @@ -2594,8 +2617,8 @@ const ( SOF_TIMESTAMPING_BIND_PHC = 0x8000 SOF_TIMESTAMPING_OPT_ID_TCP = 0x10000 - SOF_TIMESTAMPING_LAST = 0x20000 - SOF_TIMESTAMPING_MASK = 0x3ffff + SOF_TIMESTAMPING_LAST = 0x40000 + SOF_TIMESTAMPING_MASK = 0x7ffff SCM_TSTAMP_SND = 0x0 SCM_TSTAMP_SCHED = 0x1 @@ -3041,6 +3064,23 @@ const ( ) const ( + TCA_UNSPEC = 0x0 + TCA_KIND = 0x1 + TCA_OPTIONS = 0x2 + TCA_STATS = 0x3 + TCA_XSTATS = 0x4 + TCA_RATE = 0x5 + TCA_FCNT = 0x6 + TCA_STATS2 = 0x7 + TCA_STAB = 0x8 + TCA_PAD = 0x9 + TCA_DUMP_INVISIBLE = 0xa + TCA_CHAIN = 0xb + TCA_HW_OFFLOAD = 0xc + TCA_INGRESS_BLOCK = 0xd + TCA_EGRESS_BLOCK = 0xe + TCA_DUMP_FLAGS = 0xf + TCA_EXT_WARN_MSG = 0x10 RTNLGRP_NONE = 0x0 RTNLGRP_LINK = 0x1 RTNLGRP_NOTIFY = 0x2 @@ -3075,6 +3115,18 @@ const ( RTNLGRP_IPV6_MROUTE_R = 0x1f RTNLGRP_NEXTHOP = 0x20 RTNLGRP_BRVLAN = 0x21 + RTNLGRP_MCTP_IFADDR = 0x22 + RTNLGRP_TUNNEL = 0x23 + RTNLGRP_STATS = 0x24 + RTNLGRP_IPV4_MCADDR = 0x25 + RTNLGRP_IPV6_MCADDR = 0x26 + RTNLGRP_IPV6_ACADDR = 0x27 + TCA_ROOT_UNSPEC = 0x0 + TCA_ROOT_TAB = 0x1 + TCA_ROOT_FLAGS = 0x2 + TCA_ROOT_COUNT = 0x3 + TCA_ROOT_TIME_DELTA = 0x4 + TCA_ROOT_EXT_WARN_MSG = 0x5 ) type CapUserHeader struct { @@ -3538,6 +3590,8 @@ type Nhmsg struct { Flags uint32 } +const SizeofNhmsg = 0x8 + type NexthopGrp struct { Id uint32 Weight uint8 @@ -3545,6 +3599,8 @@ type NexthopGrp struct { Resvd2 uint16 } +const SizeofNexthopGrp = 0x8 + const ( NHA_UNSPEC = 0x0 NHA_ID = 0x1 @@ -3802,7 +3858,16 @@ const ( ETHTOOL_MSG_PSE_GET = 0x24 ETHTOOL_MSG_PSE_SET = 0x25 ETHTOOL_MSG_RSS_GET = 0x26 - ETHTOOL_MSG_USER_MAX = 0x2d + ETHTOOL_MSG_PLCA_GET_CFG = 0x27 + ETHTOOL_MSG_PLCA_SET_CFG = 0x28 + ETHTOOL_MSG_PLCA_GET_STATUS = 0x29 + ETHTOOL_MSG_MM_GET = 0x2a + ETHTOOL_MSG_MM_SET = 0x2b + ETHTOOL_MSG_MODULE_FW_FLASH_ACT = 0x2c + ETHTOOL_MSG_PHY_GET = 0x2d + ETHTOOL_MSG_TSCONFIG_GET = 0x2e + ETHTOOL_MSG_TSCONFIG_SET = 0x2f + ETHTOOL_MSG_USER_MAX = 0x2f ETHTOOL_MSG_KERNEL_NONE = 0x0 ETHTOOL_MSG_STRSET_GET_REPLY = 0x1 ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2 @@ -3842,7 +3907,17 @@ const ( ETHTOOL_MSG_MODULE_NTF = 0x24 ETHTOOL_MSG_PSE_GET_REPLY = 0x25 ETHTOOL_MSG_RSS_GET_REPLY = 0x26 - ETHTOOL_MSG_KERNEL_MAX = 0x2e + ETHTOOL_MSG_PLCA_GET_CFG_REPLY = 0x27 + ETHTOOL_MSG_PLCA_GET_STATUS_REPLY = 0x28 + ETHTOOL_MSG_PLCA_NTF = 0x29 + ETHTOOL_MSG_MM_GET_REPLY = 0x2a + ETHTOOL_MSG_MM_NTF = 0x2b + ETHTOOL_MSG_MODULE_FW_FLASH_NTF = 0x2c + ETHTOOL_MSG_PHY_GET_REPLY = 0x2d + ETHTOOL_MSG_PHY_NTF = 0x2e + ETHTOOL_MSG_TSCONFIG_GET_REPLY = 0x2f + ETHTOOL_MSG_TSCONFIG_SET_REPLY = 0x30 + ETHTOOL_MSG_KERNEL_MAX = 0x30 ETHTOOL_FLAG_COMPACT_BITSETS = 0x1 ETHTOOL_FLAG_OMIT_REPLY = 0x2 ETHTOOL_FLAG_STATS = 0x4 @@ -3949,7 +4024,12 @@ const ( ETHTOOL_A_RINGS_TCP_DATA_SPLIT = 0xb ETHTOOL_A_RINGS_CQE_SIZE = 0xc ETHTOOL_A_RINGS_TX_PUSH = 0xd - ETHTOOL_A_RINGS_MAX = 0x10 + ETHTOOL_A_RINGS_RX_PUSH = 0xe + ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN = 0xf + ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX = 0x10 + ETHTOOL_A_RINGS_HDS_THRESH = 0x11 + ETHTOOL_A_RINGS_HDS_THRESH_MAX = 0x12 + ETHTOOL_A_RINGS_MAX = 0x12 ETHTOOL_A_CHANNELS_UNSPEC = 0x0 ETHTOOL_A_CHANNELS_HEADER = 0x1 ETHTOOL_A_CHANNELS_RX_MAX = 0x2 @@ -4015,7 +4095,9 @@ const ( ETHTOOL_A_TSINFO_TX_TYPES = 0x3 ETHTOOL_A_TSINFO_RX_FILTERS = 0x4 ETHTOOL_A_TSINFO_PHC_INDEX = 0x5 - ETHTOOL_A_TSINFO_MAX = 0x6 + ETHTOOL_A_TSINFO_STATS = 0x6 + ETHTOOL_A_TSINFO_HWTSTAMP_PROVIDER = 0x7 + ETHTOOL_A_TSINFO_MAX = 0x9 ETHTOOL_A_CABLE_TEST_UNSPEC = 0x0 ETHTOOL_A_CABLE_TEST_HEADER = 0x1 ETHTOOL_A_CABLE_TEST_MAX = 0x1 @@ -4101,6 +4183,19 @@ const ( ETHTOOL_A_TUNNEL_INFO_MAX = 0x2 ) +const ( + TCP_V4_FLOW = 0x1 + UDP_V4_FLOW = 0x2 + TCP_V6_FLOW = 0x5 + UDP_V6_FLOW = 0x6 + ESP_V4_FLOW = 0xa + ESP_V6_FLOW = 0xc + IP_USER_FLOW = 0xd + IPV6_USER_FLOW = 0xe + IPV6_FLOW = 0x11 + ETHER_FLOW = 0x12 +) + const SPEED_UNKNOWN = -0x1 type EthtoolDrvinfo struct { @@ -4613,6 +4708,7 @@ const ( NL80211_ATTR_AKM_SUITES = 0x4c NL80211_ATTR_AP_ISOLATE = 0x60 NL80211_ATTR_AP_SETTINGS_FLAGS = 0x135 + NL80211_ATTR_ASSOC_SPP_AMSDU = 0x14a NL80211_ATTR_AUTH_DATA = 0x9c NL80211_ATTR_AUTH_TYPE = 0x35 NL80211_ATTR_BANDS = 0xef @@ -4623,6 +4719,7 @@ const ( NL80211_ATTR_BSS_BASIC_RATES = 0x24 NL80211_ATTR_BSS = 0x2f NL80211_ATTR_BSS_CTS_PROT = 0x1c + NL80211_ATTR_BSS_DUMP_INCLUDE_USE_DATA = 0x147 NL80211_ATTR_BSS_HT_OPMODE = 0x6d NL80211_ATTR_BSSID = 0xf5 NL80211_ATTR_BSS_SELECT = 0xe3 @@ -4682,6 +4779,7 @@ const ( NL80211_ATTR_DTIM_PERIOD = 0xd NL80211_ATTR_DURATION = 0x57 NL80211_ATTR_EHT_CAPABILITY = 0x136 + NL80211_ATTR_EMA_RNR_ELEMS = 0x145 NL80211_ATTR_EML_CAPABILITY = 0x13d NL80211_ATTR_EXT_CAPA = 0xa9 NL80211_ATTR_EXT_CAPA_MASK = 0xaa @@ -4717,6 +4815,7 @@ const ( NL80211_ATTR_HIDDEN_SSID = 0x7e NL80211_ATTR_HT_CAPABILITY = 0x1f NL80211_ATTR_HT_CAPABILITY_MASK = 0x94 + NL80211_ATTR_HW_TIMESTAMP_ENABLED = 0x144 NL80211_ATTR_IE_ASSOC_RESP = 0x80 NL80211_ATTR_IE = 0x2a NL80211_ATTR_IE_PROBE_RESP = 0x7f @@ -4747,9 +4846,10 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x14d + NL80211_ATTR_MAX = 0x151 NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce + NL80211_ATTR_MAX_HW_TIMESTAMP_PEERS = 0x143 NL80211_ATTR_MAX_MATCH_SETS = 0x85 NL80211_ATTR_MAX_NUM_AKM_SUITES = 0x13c NL80211_ATTR_MAX_NUM_PMKIDS = 0x56 @@ -4774,9 +4874,12 @@ const ( NL80211_ATTR_MGMT_SUBTYPE = 0x29 NL80211_ATTR_MLD_ADDR = 0x13a NL80211_ATTR_MLD_CAPA_AND_OPS = 0x13e + NL80211_ATTR_MLO_LINK_DISABLED = 0x146 NL80211_ATTR_MLO_LINK_ID = 0x139 NL80211_ATTR_MLO_LINKS = 0x138 NL80211_ATTR_MLO_SUPPORT = 0x13b + NL80211_ATTR_MLO_TTLM_DLINK = 0x148 + NL80211_ATTR_MLO_TTLM_ULINK = 0x149 NL80211_ATTR_MNTR_FLAGS = 0x17 NL80211_ATTR_MPATH_INFO = 0x1b NL80211_ATTR_MPATH_NEXT_HOP = 0x1a @@ -4809,12 +4912,14 @@ const ( NL80211_ATTR_PORT_AUTHORIZED = 0x103 NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN = 0x5 NL80211_ATTR_POWER_RULE_MAX_EIRP = 0x6 + NL80211_ATTR_POWER_RULE_PSD = 0x8 NL80211_ATTR_PREV_BSSID = 0x4f NL80211_ATTR_PRIVACY = 0x46 NL80211_ATTR_PROBE_RESP = 0x91 NL80211_ATTR_PROBE_RESP_OFFLOAD = 0x90 NL80211_ATTR_PROTOCOL_FEATURES = 0xad NL80211_ATTR_PS_STATE = 0x5d + NL80211_ATTR_PUNCT_BITMAP = 0x142 NL80211_ATTR_QOS_MAP = 0xc7 NL80211_ATTR_RADAR_BACKGROUND = 0x134 NL80211_ATTR_RADAR_EVENT = 0xa8 @@ -4943,7 +5048,9 @@ const ( NL80211_ATTR_WIPHY_FREQ = 0x26 NL80211_ATTR_WIPHY_FREQ_HINT = 0xc9 NL80211_ATTR_WIPHY_FREQ_OFFSET = 0x122 + NL80211_ATTR_WIPHY_INTERFACE_COMBINATIONS = 0x14c NL80211_ATTR_WIPHY_NAME = 0x2 + NL80211_ATTR_WIPHY_RADIOS = 0x14b NL80211_ATTR_WIPHY_RETRY_LONG = 0x3e NL80211_ATTR_WIPHY_RETRY_SHORT = 0x3d NL80211_ATTR_WIPHY_RTS_THRESHOLD = 0x40 @@ -4978,6 +5085,8 @@ const ( NL80211_BAND_ATTR_IFTYPE_DATA = 0x9 NL80211_BAND_ATTR_MAX = 0xd NL80211_BAND_ATTR_RATES = 0x2 + NL80211_BAND_ATTR_S1G_CAPA = 0xd + NL80211_BAND_ATTR_S1G_MCS_NSS_SET = 0xc NL80211_BAND_ATTR_VHT_CAPA = 0x8 NL80211_BAND_ATTR_VHT_MCS_SET = 0x7 NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MAC = 0x8 @@ -5001,6 +5110,10 @@ const ( NL80211_BSS_BEACON_INTERVAL = 0x4 NL80211_BSS_BEACON_TSF = 0xd NL80211_BSS_BSSID = 0x1 + NL80211_BSS_CANNOT_USE_6GHZ_PWR_MISMATCH = 0x2 + NL80211_BSS_CANNOT_USE_NSTR_NONPRIMARY = 0x1 + NL80211_BSS_CANNOT_USE_REASONS = 0x18 + NL80211_BSS_CANNOT_USE_UHB_PWR_MISMATCH = 0x2 NL80211_BSS_CAPABILITY = 0x5 NL80211_BSS_CHAIN_SIGNAL = 0x13 NL80211_BSS_CHAN_WIDTH_10 = 0x1 @@ -5032,6 +5145,9 @@ const ( NL80211_BSS_STATUS = 0x9 NL80211_BSS_STATUS_IBSS_JOINED = 0x2 NL80211_BSS_TSF = 0x3 + NL80211_BSS_USE_FOR = 0x17 + NL80211_BSS_USE_FOR_MLD_LINK = 0x2 + NL80211_BSS_USE_FOR_NORMAL = 0x1 NL80211_CHAN_HT20 = 0x1 NL80211_CHAN_HT40MINUS = 0x2 NL80211_CHAN_HT40PLUS = 0x3 @@ -5117,7 +5233,8 @@ const ( NL80211_CMD_LEAVE_IBSS = 0x2c NL80211_CMD_LEAVE_MESH = 0x45 NL80211_CMD_LEAVE_OCB = 0x6d - NL80211_CMD_MAX = 0x9b + NL80211_CMD_LINKS_REMOVED = 0x9a + NL80211_CMD_MAX = 0x9d NL80211_CMD_MICHAEL_MIC_FAILURE = 0x29 NL80211_CMD_MODIFY_LINK_STA = 0x97 NL80211_CMD_NAN_MATCH = 0x78 @@ -5161,6 +5278,7 @@ const ( NL80211_CMD_SET_COALESCE = 0x65 NL80211_CMD_SET_CQM = 0x3f NL80211_CMD_SET_FILS_AAD = 0x92 + NL80211_CMD_SET_HW_TIMESTAMP = 0x99 NL80211_CMD_SET_INTERFACE = 0x6 NL80211_CMD_SET_KEY = 0xa NL80211_CMD_SET_MAC_ACL = 0x5d @@ -5180,6 +5298,7 @@ const ( NL80211_CMD_SET_SAR_SPECS = 0x8c NL80211_CMD_SET_STATION = 0x12 NL80211_CMD_SET_TID_CONFIG = 0x89 + NL80211_CMD_SET_TID_TO_LINK_MAPPING = 0x9b NL80211_CMD_SET_TX_BITRATE_MASK = 0x39 NL80211_CMD_SET_WDS_PEER = 0x42 NL80211_CMD_SET_WIPHY = 0x2 @@ -5247,6 +5366,7 @@ const ( NL80211_EXT_FEATURE_AIRTIME_FAIRNESS = 0x21 NL80211_EXT_FEATURE_AP_PMKSA_CACHING = 0x22 NL80211_EXT_FEATURE_AQL = 0x28 + NL80211_EXT_FEATURE_AUTH_AND_DEAUTH_RANDOM_TA = 0x40 NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT = 0x2e NL80211_EXT_FEATURE_BEACON_PROTECTION = 0x29 NL80211_EXT_FEATURE_BEACON_RATE_HE = 0x36 @@ -5262,6 +5382,7 @@ const ( NL80211_EXT_FEATURE_CQM_RSSI_LIST = 0xd NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT = 0x1b NL80211_EXT_FEATURE_DEL_IBSS_STA = 0x2c + NL80211_EXT_FEATURE_DFS_CONCURRENT = 0x43 NL80211_EXT_FEATURE_DFS_OFFLOAD = 0x19 NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER = 0x20 NL80211_EXT_FEATURE_EXT_KEY_ID = 0x24 @@ -5281,9 +5402,12 @@ const ( NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION = 0x14 NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE = 0x13 NL80211_EXT_FEATURE_OPERATING_CHANNEL_VALIDATION = 0x31 + NL80211_EXT_FEATURE_OWE_OFFLOAD_AP = 0x42 + NL80211_EXT_FEATURE_OWE_OFFLOAD = 0x41 NL80211_EXT_FEATURE_POWERED_ADDR_CHANGE = 0x3d NL80211_EXT_FEATURE_PROTECTED_TWT = 0x2b NL80211_EXT_FEATURE_PROT_RANGE_NEGO_AND_MEASURE = 0x39 + NL80211_EXT_FEATURE_PUNCT = 0x3e NL80211_EXT_FEATURE_RADAR_BACKGROUND = 0x3c NL80211_EXT_FEATURE_RRM = 0x1 NL80211_EXT_FEATURE_SAE_OFFLOAD_AP = 0x33 @@ -5295,8 +5419,10 @@ const ( NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD = 0x23 NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI = 0xc NL80211_EXT_FEATURE_SECURE_LTF = 0x37 + NL80211_EXT_FEATURE_SECURE_NAN = 0x3f NL80211_EXT_FEATURE_SECURE_RTT = 0x38 NL80211_EXT_FEATURE_SET_SCAN_DWELL = 0x5 + NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT = 0x44 NL80211_EXT_FEATURE_STA_TX_PWR = 0x25 NL80211_EXT_FEATURE_TXQS = 0x1c NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP = 0x35 @@ -5343,7 +5469,10 @@ const ( NL80211_FREQUENCY_ATTR_2MHZ = 0x16 NL80211_FREQUENCY_ATTR_4MHZ = 0x17 NL80211_FREQUENCY_ATTR_8MHZ = 0x18 + NL80211_FREQUENCY_ATTR_ALLOW_6GHZ_VLP_AP = 0x21 + NL80211_FREQUENCY_ATTR_CAN_MONITOR = 0x20 NL80211_FREQUENCY_ATTR_DFS_CAC_TIME = 0xd + NL80211_FREQUENCY_ATTR_DFS_CONCURRENT = 0x1d NL80211_FREQUENCY_ATTR_DFS_STATE = 0x7 NL80211_FREQUENCY_ATTR_DFS_TIME = 0x8 NL80211_FREQUENCY_ATTR_DISABLED = 0x2 @@ -5351,12 +5480,14 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x21 + NL80211_FREQUENCY_ATTR_MAX = 0x22 NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc NL80211_FREQUENCY_ATTR_NO_20MHZ = 0x10 NL80211_FREQUENCY_ATTR_NO_320MHZ = 0x1a + NL80211_FREQUENCY_ATTR_NO_6GHZ_AFC_CLIENT = 0x1f + NL80211_FREQUENCY_ATTR_NO_6GHZ_VLP_CLIENT = 0x1e NL80211_FREQUENCY_ATTR_NO_80MHZ = 0xb NL80211_FREQUENCY_ATTR_NO_EHT = 0x1b NL80211_FREQUENCY_ATTR_NO_HE = 0x13 @@ -5364,8 +5495,11 @@ const ( NL80211_FREQUENCY_ATTR_NO_HT40_PLUS = 0xa NL80211_FREQUENCY_ATTR_NO_IBSS = 0x3 NL80211_FREQUENCY_ATTR_NO_IR = 0x3 + NL80211_FREQUENCY_ATTR_NO_UHB_AFC_CLIENT = 0x1f + NL80211_FREQUENCY_ATTR_NO_UHB_VLP_CLIENT = 0x1e NL80211_FREQUENCY_ATTR_OFFSET = 0x14 NL80211_FREQUENCY_ATTR_PASSIVE_SCAN = 0x3 + NL80211_FREQUENCY_ATTR_PSD = 0x1c NL80211_FREQUENCY_ATTR_RADAR = 0x5 NL80211_FREQUENCY_ATTR_WMM = 0x12 NL80211_FTM_RESP_ATTR_CIVICLOC = 0x3 @@ -5430,6 +5564,7 @@ const ( NL80211_IFTYPE_STATION = 0x2 NL80211_IFTYPE_UNSPECIFIED = 0x0 NL80211_IFTYPE_WDS = 0x5 + NL80211_KCK_EXT_LEN_32 = 0x20 NL80211_KCK_EXT_LEN = 0x18 NL80211_KCK_LEN = 0x10 NL80211_KEK_EXT_LEN = 0x20 @@ -5458,9 +5593,10 @@ const ( NL80211_MAX_SUPP_HT_RATES = 0x4d NL80211_MAX_SUPP_RATES = 0x20 NL80211_MAX_SUPP_REG_RULES = 0x80 + NL80211_MAX_SUPP_SELECTORS = 0x80 NL80211_MBSSID_CONFIG_ATTR_EMA = 0x5 NL80211_MBSSID_CONFIG_ATTR_INDEX = 0x3 - NL80211_MBSSID_CONFIG_ATTR_MAX = 0x5 + NL80211_MBSSID_CONFIG_ATTR_MAX = 0x6 NL80211_MBSSID_CONFIG_ATTR_MAX_EMA_PROFILE_PERIODICITY = 0x2 NL80211_MBSSID_CONFIG_ATTR_MAX_INTERFACES = 0x1 NL80211_MBSSID_CONFIG_ATTR_TX_IFINDEX = 0x4 @@ -5703,11 +5839,16 @@ const ( NL80211_RADAR_PRE_CAC_EXPIRED = 0x4 NL80211_RATE_INFO_10_MHZ_WIDTH = 0xb NL80211_RATE_INFO_160_MHZ_WIDTH = 0xa + NL80211_RATE_INFO_16_MHZ_WIDTH = 0x1d + NL80211_RATE_INFO_1_MHZ_WIDTH = 0x19 + NL80211_RATE_INFO_2_MHZ_WIDTH = 0x1a NL80211_RATE_INFO_320_MHZ_WIDTH = 0x12 NL80211_RATE_INFO_40_MHZ_WIDTH = 0x3 + NL80211_RATE_INFO_4_MHZ_WIDTH = 0x1b NL80211_RATE_INFO_5_MHZ_WIDTH = 0xc NL80211_RATE_INFO_80_MHZ_WIDTH = 0x8 NL80211_RATE_INFO_80P80_MHZ_WIDTH = 0x9 + NL80211_RATE_INFO_8_MHZ_WIDTH = 0x1c NL80211_RATE_INFO_BITRATE32 = 0x5 NL80211_RATE_INFO_BITRATE = 0x1 NL80211_RATE_INFO_EHT_GI_0_8 = 0x0 @@ -5753,6 +5894,8 @@ const ( NL80211_RATE_INFO_HE_RU_ALLOC = 0x11 NL80211_RATE_INFO_MAX = 0x1d NL80211_RATE_INFO_MCS = 0x2 + NL80211_RATE_INFO_S1G_MCS = 0x17 + NL80211_RATE_INFO_S1G_NSS = 0x18 NL80211_RATE_INFO_SHORT_GI = 0x4 NL80211_RATE_INFO_VHT_MCS = 0x6 NL80211_RATE_INFO_VHT_NSS = 0x7 @@ -5770,14 +5913,19 @@ const ( NL80211_REKEY_DATA_KEK = 0x1 NL80211_REKEY_DATA_REPLAY_CTR = 0x3 NL80211_REPLAY_CTR_LEN = 0x8 + NL80211_RRF_ALLOW_6GHZ_VLP_AP = 0x1000000 NL80211_RRF_AUTO_BW = 0x800 NL80211_RRF_DFS = 0x10 + NL80211_RRF_DFS_CONCURRENT = 0x200000 NL80211_RRF_GO_CONCURRENT = 0x1000 NL80211_RRF_IR_CONCURRENT = 0x1000 NL80211_RRF_NO_160MHZ = 0x10000 NL80211_RRF_NO_320MHZ = 0x40000 + NL80211_RRF_NO_6GHZ_AFC_CLIENT = 0x800000 + NL80211_RRF_NO_6GHZ_VLP_CLIENT = 0x400000 NL80211_RRF_NO_80MHZ = 0x8000 NL80211_RRF_NO_CCK = 0x2 + NL80211_RRF_NO_EHT = 0x80000 NL80211_RRF_NO_HE = 0x20000 NL80211_RRF_NO_HT40 = 0x6000 NL80211_RRF_NO_HT40MINUS = 0x2000 @@ -5788,7 +5936,10 @@ const ( NL80211_RRF_NO_IR = 0x80 NL80211_RRF_NO_OFDM = 0x1 NL80211_RRF_NO_OUTDOOR = 0x8 + NL80211_RRF_NO_UHB_AFC_CLIENT = 0x800000 + NL80211_RRF_NO_UHB_VLP_CLIENT = 0x400000 NL80211_RRF_PASSIVE_SCAN = 0x80 + NL80211_RRF_PSD = 0x100000 NL80211_RRF_PTMP_ONLY = 0x40 NL80211_RRF_PTP_ONLY = 0x20 NL80211_RXMGMT_FLAG_ANSWERED = 0x1 @@ -5849,6 +6000,7 @@ const ( NL80211_STA_FLAG_MAX_OLD_API = 0x6 NL80211_STA_FLAG_MFP = 0x4 NL80211_STA_FLAG_SHORT_PREAMBLE = 0x2 + NL80211_STA_FLAG_SPP_AMSDU = 0x8 NL80211_STA_FLAG_TDLS_PEER = 0x6 NL80211_STA_FLAG_WME = 0x3 NL80211_STA_INFO_ACK_SIGNAL_AVG = 0x23 @@ -6007,6 +6159,13 @@ const ( NL80211_VHT_CAPABILITY_LEN = 0xc NL80211_VHT_NSS_MAX = 0x8 NL80211_WIPHY_NAME_MAXLEN = 0x40 + NL80211_WIPHY_RADIO_ATTR_FREQ_RANGE = 0x2 + NL80211_WIPHY_RADIO_ATTR_INDEX = 0x1 + NL80211_WIPHY_RADIO_ATTR_INTERFACE_COMBINATION = 0x3 + NL80211_WIPHY_RADIO_ATTR_MAX = 0x4 + NL80211_WIPHY_RADIO_FREQ_ATTR_END = 0x2 + NL80211_WIPHY_RADIO_FREQ_ATTR_MAX = 0x2 + NL80211_WIPHY_RADIO_FREQ_ATTR_START = 0x1 NL80211_WMMR_AIFSN = 0x3 NL80211_WMMR_CW_MAX = 0x2 NL80211_WMMR_CW_MIN = 0x1 @@ -6038,6 +6197,7 @@ const ( NL80211_WOWLAN_TRIG_PKT_PATTERN = 0x4 NL80211_WOWLAN_TRIG_RFKILL_RELEASE = 0x9 NL80211_WOWLAN_TRIG_TCP_CONNECTION = 0xe + NL80211_WOWLAN_TRIG_UNPROTECTED_DEAUTH_DISASSOC = 0x14 NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211 = 0xa NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211_LEN = 0xb NL80211_WOWLAN_TRIG_WAKEUP_PKT_8023 = 0xc @@ -6176,3 +6336,30 @@ type SockDiagReq struct { } const RTM_NEWNVLAN = 0x70 + +const ( + MPOL_BIND = 0x2 + MPOL_DEFAULT = 0x0 + MPOL_F_ADDR = 0x2 + MPOL_F_MEMS_ALLOWED = 0x4 + MPOL_F_MOF = 0x8 + MPOL_F_MORON = 0x10 + MPOL_F_NODE = 0x1 + MPOL_F_NUMA_BALANCING = 0x2000 + MPOL_F_RELATIVE_NODES = 0x4000 + MPOL_F_SHARED = 0x1 + MPOL_F_STATIC_NODES = 0x8000 + MPOL_INTERLEAVE = 0x3 + MPOL_LOCAL = 0x4 + MPOL_MAX = 0x7 + MPOL_MF_INTERNAL = 0x10 + MPOL_MF_LAZY = 0x8 + MPOL_MF_MOVE_ALL = 0x4 + MPOL_MF_MOVE = 0x2 + MPOL_MF_STRICT = 0x1 + MPOL_MF_VALID = 0x7 + MPOL_MODE_FLAGS = 0xe000 + MPOL_PREFERRED = 0x1 + MPOL_PREFERRED_MANY = 0x5 + MPOL_WEIGHTED_INTERLEAVE = 0x6 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index fd402da43..485f2d3a1 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -282,7 +282,7 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -338,6 +338,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index eb7a5e186..ecbd1ad8b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -351,6 +351,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index d78ac108b..02f0463a4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -91,7 +91,7 @@ type Stat_t struct { Gid uint32 Rdev uint64 _ uint16 - _ [4]byte + _ [6]byte Size int64 Blksize int32 _ [4]byte @@ -273,7 +273,7 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -329,6 +329,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index cd06d47f1..6f4d400d2 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -330,6 +330,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go index 2f28fe26c..cd532cfa5 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go @@ -331,6 +331,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 71d6cac2f..413362085 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -278,7 +278,7 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -334,6 +334,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 8596d4535..eaa37eb71 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -333,6 +333,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index cd60ea186..98ae6a1e4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -333,6 +333,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index b0ae420c4..cae196159 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -278,7 +278,7 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -334,6 +334,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index 835972875..6ce3b4e02 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -90,7 +90,7 @@ type Stat_t struct { Gid uint32 Rdev uint64 _ uint16 - _ [4]byte + _ [6]byte Size int64 Blksize int32 _ [4]byte @@ -285,7 +285,7 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -341,6 +341,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 69eb6a5c6..c7429c6a1 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -340,6 +340,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 5f583cb62..4bf4baf4c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -340,6 +340,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index ad05b51a6..e9709d70a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -358,6 +358,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index cf3ce9003..fb44268ca 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -353,6 +353,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 590b56739..9c38265c7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -335,6 +335,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go index 439548ec9..50e8e6449 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -104,7 +104,7 @@ type Statvfs_t struct { Fsid uint32 Namemax uint32 Owner uint32 - Spare [4]uint32 + Spare [4]uint64 Fstypename [32]byte Mntonname [1024]byte Mntfromname [1024]byte diff --git a/vendor/golang.org/x/sys/windows/registry/key.go b/vendor/golang.org/x/sys/windows/registry/key.go index fd8632444..39aeeb644 100644 --- a/vendor/golang.org/x/sys/windows/registry/key.go +++ b/vendor/golang.org/x/sys/windows/registry/key.go @@ -164,7 +164,12 @@ loopItems: func CreateKey(k Key, path string, access uint32) (newk Key, openedExisting bool, err error) { var h syscall.Handle var d uint32 - err = regCreateKeyEx(syscall.Handle(k), syscall.StringToUTF16Ptr(path), + var pathPointer *uint16 + pathPointer, err = syscall.UTF16PtrFromString(path) + if err != nil { + return 0, false, err + } + err = regCreateKeyEx(syscall.Handle(k), pathPointer, 0, nil, _REG_OPTION_NON_VOLATILE, access, nil, &h, &d) if err != nil { return 0, false, err @@ -174,7 +179,11 @@ func CreateKey(k Key, path string, access uint32) (newk Key, openedExisting bool // DeleteKey deletes the subkey path of key k and its values. func DeleteKey(k Key, path string) error { - return regDeleteKey(syscall.Handle(k), syscall.StringToUTF16Ptr(path)) + pathPointer, err := syscall.UTF16PtrFromString(path) + if err != nil { + return err + } + return regDeleteKey(syscall.Handle(k), pathPointer) } // A KeyInfo describes the statistics of a key. It is returned by Stat. diff --git a/vendor/golang.org/x/sys/windows/registry/value.go b/vendor/golang.org/x/sys/windows/registry/value.go index 74db26b94..a1bcbb236 100644 --- a/vendor/golang.org/x/sys/windows/registry/value.go +++ b/vendor/golang.org/x/sys/windows/registry/value.go @@ -340,7 +340,11 @@ func (k Key) SetBinaryValue(name string, value []byte) error { // DeleteValue removes a named value from the key k. func (k Key) DeleteValue(name string) error { - return regDeleteValue(syscall.Handle(k), syscall.StringToUTF16Ptr(name)) + namePointer, err := syscall.UTF16PtrFromString(name) + if err != nil { + return err + } + return regDeleteValue(syscall.Handle(k), namePointer) } // ReadValueNames returns the value names of key k. diff --git a/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go index fc1835d8a..bc1ce4360 100644 --- a/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go @@ -52,7 +52,7 @@ var ( ) func regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) { - r0, _, _ := syscall.Syscall(procRegConnectRegistryW.Addr(), 3, uintptr(unsafe.Pointer(machinename)), uintptr(key), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.SyscallN(procRegConnectRegistryW.Addr(), uintptr(unsafe.Pointer(machinename)), uintptr(key), uintptr(unsafe.Pointer(result))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -60,7 +60,7 @@ func regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall } func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) { - r0, _, _ := syscall.Syscall9(procRegCreateKeyExW.Addr(), 9, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition))) + r0, _, _ := syscall.SyscallN(procRegCreateKeyExW.Addr(), uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -68,7 +68,7 @@ func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class * } func regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) { - r0, _, _ := syscall.Syscall(procRegDeleteKeyW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(subkey)), 0) + r0, _, _ := syscall.SyscallN(procRegDeleteKeyW.Addr(), uintptr(key), uintptr(unsafe.Pointer(subkey))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -76,7 +76,7 @@ func regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) { } func regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) { - r0, _, _ := syscall.Syscall(procRegDeleteValueW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(name)), 0) + r0, _, _ := syscall.SyscallN(procRegDeleteValueW.Addr(), uintptr(key), uintptr(unsafe.Pointer(name))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -84,7 +84,7 @@ func regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) { } func regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { - r0, _, _ := syscall.Syscall9(procRegEnumValueW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)), 0) + r0, _, _ := syscall.SyscallN(procRegEnumValueW.Addr(), uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -92,7 +92,7 @@ func regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint3 } func regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) { - r0, _, _ := syscall.Syscall9(procRegLoadMUIStringW.Addr(), 7, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(unsafe.Pointer(buflenCopied)), uintptr(flags), uintptr(unsafe.Pointer(dir)), 0, 0) + r0, _, _ := syscall.SyscallN(procRegLoadMUIStringW.Addr(), uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(unsafe.Pointer(buflenCopied)), uintptr(flags), uintptr(unsafe.Pointer(dir))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -100,7 +100,7 @@ func regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint } func regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) { - r0, _, _ := syscall.Syscall6(procRegSetValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(valueName)), uintptr(reserved), uintptr(vtype), uintptr(unsafe.Pointer(buf)), uintptr(bufsize)) + r0, _, _ := syscall.SyscallN(procRegSetValueExW.Addr(), uintptr(key), uintptr(unsafe.Pointer(valueName)), uintptr(reserved), uintptr(vtype), uintptr(unsafe.Pointer(buf)), uintptr(bufsize)) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -108,7 +108,7 @@ func regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype } func expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) + r0, _, e1 := syscall.SyscallN(procExpandEnvironmentStringsW.Addr(), uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) n = uint32(r0) if n == 0 { err = errnoErr(e1) diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go index b6e1ab76f..a8b0364c7 100644 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -1303,7 +1303,10 @@ func (selfRelativeSD *SECURITY_DESCRIPTOR) ToAbsolute() (absoluteSD *SECURITY_DE return nil, err } if absoluteSDSize > 0 { - absoluteSD = (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&make([]byte, absoluteSDSize)[0])) + absoluteSD = new(SECURITY_DESCRIPTOR) + if unsafe.Sizeof(*absoluteSD) < uintptr(absoluteSDSize) { + panic("sizeof(SECURITY_DESCRIPTOR) too small") + } } var ( dacl *ACL @@ -1312,19 +1315,55 @@ func (selfRelativeSD *SECURITY_DESCRIPTOR) ToAbsolute() (absoluteSD *SECURITY_DE group *SID ) if daclSize > 0 { - dacl = (*ACL)(unsafe.Pointer(&make([]byte, daclSize)[0])) + dacl = (*ACL)(unsafe.Pointer(unsafe.SliceData(make([]byte, daclSize)))) } if saclSize > 0 { - sacl = (*ACL)(unsafe.Pointer(&make([]byte, saclSize)[0])) + sacl = (*ACL)(unsafe.Pointer(unsafe.SliceData(make([]byte, saclSize)))) } if ownerSize > 0 { - owner = (*SID)(unsafe.Pointer(&make([]byte, ownerSize)[0])) + owner = (*SID)(unsafe.Pointer(unsafe.SliceData(make([]byte, ownerSize)))) } if groupSize > 0 { - group = (*SID)(unsafe.Pointer(&make([]byte, groupSize)[0])) + group = (*SID)(unsafe.Pointer(unsafe.SliceData(make([]byte, groupSize)))) } + // We call into Windows via makeAbsoluteSD, which sets up + // pointers within absoluteSD that point to other chunks of memory + // we pass into makeAbsoluteSD, and that happens outside the view of the GC. + // We therefore take some care here to then verify the pointers are as we expect + // and set them explicitly in view of the GC. See https://go.dev/issue/73199. + // TODO: consider weak pointers once Go 1.24 is appropriate. See suggestion in https://go.dev/cl/663575. err = makeAbsoluteSD(selfRelativeSD, absoluteSD, &absoluteSDSize, dacl, &daclSize, sacl, &saclSize, owner, &ownerSize, group, &groupSize) + if err != nil { + // Don't return absoluteSD, which might be partially initialized. + return nil, err + } + // Before using any fields, verify absoluteSD is in the format we expect according to Windows. + // See https://learn.microsoft.com/en-us/windows/win32/secauthz/absolute-and-self-relative-security-descriptors + absControl, _, err := absoluteSD.Control() + if err != nil { + panic("absoluteSD: " + err.Error()) + } + if absControl&SE_SELF_RELATIVE != 0 { + panic("absoluteSD not in absolute format") + } + if absoluteSD.dacl != dacl { + panic("dacl pointer mismatch") + } + if absoluteSD.sacl != sacl { + panic("sacl pointer mismatch") + } + if absoluteSD.owner != owner { + panic("owner pointer mismatch") + } + if absoluteSD.group != group { + panic("group pointer mismatch") + } + absoluteSD.dacl = dacl + absoluteSD.sacl = sacl + absoluteSD.owner = owner + absoluteSD.group = group + return } diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 4a3254386..69439df2a 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -321,6 +321,8 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetConsoleOutputCP(cp uint32) (err error) = kernel32.SetConsoleOutputCP //sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW //sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW +//sys GetNumberOfConsoleInputEvents(console Handle, numevents *uint32) (err error) = kernel32.GetNumberOfConsoleInputEvents +//sys FlushConsoleInputBuffer(console Handle) (err error) = kernel32.FlushConsoleInputBuffer //sys resizePseudoConsole(pconsole Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole //sys CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot //sys Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32FirstW @@ -870,6 +872,7 @@ const socket_error = uintptr(^uint32(0)) //sys WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSARecvFrom //sys WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSASendTo //sys WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo, group uint32, flags uint32) (handle Handle, err error) [failretval==InvalidHandle] = ws2_32.WSASocketW +//sys WSADuplicateSocket(s Handle, processID uint32, info *WSAProtocolInfo) (err error) [failretval!=0] = ws2_32.WSADuplicateSocketW //sys GetHostByName(name string) (h *Hostent, err error) [failretval==nil] = ws2_32.gethostbyname //sys GetServByName(name string, proto string) (s *Servent, err error) [failretval==nil] = ws2_32.getservbyname //sys Ntohs(netshort uint16) (u uint16) = ws2_32.ntohs @@ -889,8 +892,12 @@ const socket_error = uintptr(^uint32(0)) //sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar //sys getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) = iphlpapi.GetBestInterfaceEx //sys GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) = iphlpapi.GetIfEntry2Ex +//sys GetIpForwardEntry2(row *MibIpForwardRow2) (errcode error) = iphlpapi.GetIpForwardEntry2 +//sys GetIpForwardTable2(family uint16, table **MibIpForwardTable2) (errcode error) = iphlpapi.GetIpForwardTable2 //sys GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) = iphlpapi.GetUnicastIpAddressEntry +//sys FreeMibTable(memory unsafe.Pointer) = iphlpapi.FreeMibTable //sys NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyIpInterfaceChange +//sys NotifyRouteChange2(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyRouteChange2 //sys NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyUnicastIpAddressChange //sys CancelMibChangeNotify2(notificationHandle Handle) (errcode error) = iphlpapi.CancelMibChangeNotify2 @@ -913,6 +920,17 @@ type RawSockaddrInet6 struct { Scope_id uint32 } +// RawSockaddrInet is a union that contains an IPv4, an IPv6 address, or an address family. See +// https://learn.microsoft.com/en-us/windows/win32/api/ws2ipdef/ns-ws2ipdef-sockaddr_inet. +// +// A [*RawSockaddrInet] may be converted to a [*RawSockaddrInet4] or [*RawSockaddrInet6] using +// unsafe, depending on the address family. +type RawSockaddrInet struct { + Family uint16 + Port uint16 + Data [6]uint32 +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -1698,8 +1716,9 @@ func NewNTUnicodeString(s string) (*NTUnicodeString, error) { // Slice returns a uint16 slice that aliases the data in the NTUnicodeString. func (s *NTUnicodeString) Slice() []uint16 { - slice := unsafe.Slice(s.Buffer, s.MaximumLength) - return slice[:s.Length] + // Note: this rounds the length down, if it happens + // to (incorrectly) be odd. Probably safer than rounding up. + return unsafe.Slice(s.Buffer, s.MaximumLength/2)[:s.Length/2] } func (s *NTUnicodeString) String() string { diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 9d138de5f..6e4f50eb4 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -65,6 +65,22 @@ var signals = [...]string{ 15: "terminated", } +// File flags for [os.OpenFile]. The O_ prefix is used to indicate +// that these flags are specific to the OpenFile function. +const ( + O_FILE_FLAG_OPEN_NO_RECALL = FILE_FLAG_OPEN_NO_RECALL + O_FILE_FLAG_OPEN_REPARSE_POINT = FILE_FLAG_OPEN_REPARSE_POINT + O_FILE_FLAG_SESSION_AWARE = FILE_FLAG_SESSION_AWARE + O_FILE_FLAG_POSIX_SEMANTICS = FILE_FLAG_POSIX_SEMANTICS + O_FILE_FLAG_BACKUP_SEMANTICS = FILE_FLAG_BACKUP_SEMANTICS + O_FILE_FLAG_DELETE_ON_CLOSE = FILE_FLAG_DELETE_ON_CLOSE + O_FILE_FLAG_SEQUENTIAL_SCAN = FILE_FLAG_SEQUENTIAL_SCAN + O_FILE_FLAG_RANDOM_ACCESS = FILE_FLAG_RANDOM_ACCESS + O_FILE_FLAG_NO_BUFFERING = FILE_FLAG_NO_BUFFERING + O_FILE_FLAG_OVERLAPPED = FILE_FLAG_OVERLAPPED + O_FILE_FLAG_WRITE_THROUGH = FILE_FLAG_WRITE_THROUGH +) + const ( FILE_READ_DATA = 0x00000001 FILE_READ_ATTRIBUTES = 0x00000080 @@ -1074,6 +1090,7 @@ const ( IP_ADD_MEMBERSHIP = 0xc IP_DROP_MEMBERSHIP = 0xd IP_PKTINFO = 0x13 + IP_MTU_DISCOVER = 0x47 IPV6_V6ONLY = 0x1b IPV6_UNICAST_HOPS = 0x4 @@ -1083,6 +1100,7 @@ const ( IPV6_JOIN_GROUP = 0xc IPV6_LEAVE_GROUP = 0xd IPV6_PKTINFO = 0x13 + IPV6_MTU_DISCOVER = 0x47 MSG_OOB = 0x1 MSG_PEEK = 0x2 @@ -1132,6 +1150,15 @@ const ( WSASYS_STATUS_LEN = 128 ) +// enum PMTUD_STATE from ws2ipdef.h +const ( + IP_PMTUDISC_NOT_SET = 0 + IP_PMTUDISC_DO = 1 + IP_PMTUDISC_DONT = 2 + IP_PMTUDISC_PROBE = 3 + IP_PMTUDISC_MAX = 4 +) + type WSABuf struct { Len uint32 Buf *byte @@ -1146,6 +1173,22 @@ type WSAMsg struct { Flags uint32 } +type WSACMSGHDR struct { + Len uintptr + Level int32 + Type int32 +} + +type IN_PKTINFO struct { + Addr [4]byte + Ifindex uint32 +} + +type IN6_PKTINFO struct { + Addr [16]byte + Ifindex uint32 +} + // Flags for WSASocket const ( WSA_FLAG_OVERLAPPED = 0x01 @@ -1949,6 +1992,12 @@ const ( SYMBOLIC_LINK_FLAG_DIRECTORY = 0x1 ) +// FILE_ZERO_DATA_INFORMATION from winioctl.h +type FileZeroDataInformation struct { + FileOffset int64 + BeyondFinalZero int64 +} + const ( ComputerNameNetBIOS = 0 ComputerNameDnsHostname = 1 @@ -2271,6 +2320,82 @@ type MibIfRow2 struct { OutQLen uint64 } +// IP_ADDRESS_PREFIX stores an IP address prefix. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-ip_address_prefix. +type IpAddressPrefix struct { + Prefix RawSockaddrInet + PrefixLength uint8 +} + +// NL_ROUTE_ORIGIN enumeration from nldef.h or +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_route_origin. +const ( + NlroManual = 0 + NlroWellKnown = 1 + NlroDHCP = 2 + NlroRouterAdvertisement = 3 + Nlro6to4 = 4 +) + +// NL_ROUTE_ORIGIN enumeration from nldef.h or +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_route_protocol. +const ( + MIB_IPPROTO_OTHER = 1 + MIB_IPPROTO_LOCAL = 2 + MIB_IPPROTO_NETMGMT = 3 + MIB_IPPROTO_ICMP = 4 + MIB_IPPROTO_EGP = 5 + MIB_IPPROTO_GGP = 6 + MIB_IPPROTO_HELLO = 7 + MIB_IPPROTO_RIP = 8 + MIB_IPPROTO_IS_IS = 9 + MIB_IPPROTO_ES_IS = 10 + MIB_IPPROTO_CISCO = 11 + MIB_IPPROTO_BBN = 12 + MIB_IPPROTO_OSPF = 13 + MIB_IPPROTO_BGP = 14 + MIB_IPPROTO_IDPR = 15 + MIB_IPPROTO_EIGRP = 16 + MIB_IPPROTO_DVMRP = 17 + MIB_IPPROTO_RPL = 18 + MIB_IPPROTO_DHCP = 19 + MIB_IPPROTO_NT_AUTOSTATIC = 10002 + MIB_IPPROTO_NT_STATIC = 10006 + MIB_IPPROTO_NT_STATIC_NON_DOD = 10007 +) + +// MIB_IPFORWARD_ROW2 stores information about an IP route entry. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipforward_row2. +type MibIpForwardRow2 struct { + InterfaceLuid uint64 + InterfaceIndex uint32 + DestinationPrefix IpAddressPrefix + NextHop RawSockaddrInet + SitePrefixLength uint8 + ValidLifetime uint32 + PreferredLifetime uint32 + Metric uint32 + Protocol uint32 + Loopback uint8 + AutoconfigureAddress uint8 + Publish uint8 + Immortal uint8 + Age uint32 + Origin uint32 +} + +// MIB_IPFORWARD_TABLE2 contains a table of IP route entries. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipforward_table2. +type MibIpForwardTable2 struct { + NumEntries uint32 + Table [1]MibIpForwardRow2 +} + +// Rows returns the IP route entries in the table. +func (t *MibIpForwardTable2) Rows() []MibIpForwardRow2 { + return unsafe.Slice(&t.Table[0], t.NumEntries) +} + // MIB_UNICASTIPADDRESS_ROW stores information about a unicast IP address. See // https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_unicastipaddress_row. type MibUnicastIpAddressRow struct { @@ -2673,6 +2798,8 @@ type CommTimeouts struct { // NTUnicodeString is a UTF-16 string for NT native APIs, corresponding to UNICODE_STRING. type NTUnicodeString struct { + // Note: Length and MaximumLength are in *bytes*, not uint16s. + // They should always be even. Length uint16 MaximumLength uint16 Buffer *uint16 @@ -3601,3 +3728,213 @@ const ( KLF_NOTELLSHELL = 0x00000080 KLF_SETFORPROCESS = 0x00000100 ) + +// Virtual Key codes +// https://docs.microsoft.com/en-us/windows/win32/inputdev/virtual-key-codes +const ( + VK_LBUTTON = 0x01 + VK_RBUTTON = 0x02 + VK_CANCEL = 0x03 + VK_MBUTTON = 0x04 + VK_XBUTTON1 = 0x05 + VK_XBUTTON2 = 0x06 + VK_BACK = 0x08 + VK_TAB = 0x09 + VK_CLEAR = 0x0C + VK_RETURN = 0x0D + VK_SHIFT = 0x10 + VK_CONTROL = 0x11 + VK_MENU = 0x12 + VK_PAUSE = 0x13 + VK_CAPITAL = 0x14 + VK_KANA = 0x15 + VK_HANGEUL = 0x15 + VK_HANGUL = 0x15 + VK_IME_ON = 0x16 + VK_JUNJA = 0x17 + VK_FINAL = 0x18 + VK_HANJA = 0x19 + VK_KANJI = 0x19 + VK_IME_OFF = 0x1A + VK_ESCAPE = 0x1B + VK_CONVERT = 0x1C + VK_NONCONVERT = 0x1D + VK_ACCEPT = 0x1E + VK_MODECHANGE = 0x1F + VK_SPACE = 0x20 + VK_PRIOR = 0x21 + VK_NEXT = 0x22 + VK_END = 0x23 + VK_HOME = 0x24 + VK_LEFT = 0x25 + VK_UP = 0x26 + VK_RIGHT = 0x27 + VK_DOWN = 0x28 + VK_SELECT = 0x29 + VK_PRINT = 0x2A + VK_EXECUTE = 0x2B + VK_SNAPSHOT = 0x2C + VK_INSERT = 0x2D + VK_DELETE = 0x2E + VK_HELP = 0x2F + VK_LWIN = 0x5B + VK_RWIN = 0x5C + VK_APPS = 0x5D + VK_SLEEP = 0x5F + VK_NUMPAD0 = 0x60 + VK_NUMPAD1 = 0x61 + VK_NUMPAD2 = 0x62 + VK_NUMPAD3 = 0x63 + VK_NUMPAD4 = 0x64 + VK_NUMPAD5 = 0x65 + VK_NUMPAD6 = 0x66 + VK_NUMPAD7 = 0x67 + VK_NUMPAD8 = 0x68 + VK_NUMPAD9 = 0x69 + VK_MULTIPLY = 0x6A + VK_ADD = 0x6B + VK_SEPARATOR = 0x6C + VK_SUBTRACT = 0x6D + VK_DECIMAL = 0x6E + VK_DIVIDE = 0x6F + VK_F1 = 0x70 + VK_F2 = 0x71 + VK_F3 = 0x72 + VK_F4 = 0x73 + VK_F5 = 0x74 + VK_F6 = 0x75 + VK_F7 = 0x76 + VK_F8 = 0x77 + VK_F9 = 0x78 + VK_F10 = 0x79 + VK_F11 = 0x7A + VK_F12 = 0x7B + VK_F13 = 0x7C + VK_F14 = 0x7D + VK_F15 = 0x7E + VK_F16 = 0x7F + VK_F17 = 0x80 + VK_F18 = 0x81 + VK_F19 = 0x82 + VK_F20 = 0x83 + VK_F21 = 0x84 + VK_F22 = 0x85 + VK_F23 = 0x86 + VK_F24 = 0x87 + VK_NUMLOCK = 0x90 + VK_SCROLL = 0x91 + VK_OEM_NEC_EQUAL = 0x92 + VK_OEM_FJ_JISHO = 0x92 + VK_OEM_FJ_MASSHOU = 0x93 + VK_OEM_FJ_TOUROKU = 0x94 + VK_OEM_FJ_LOYA = 0x95 + VK_OEM_FJ_ROYA = 0x96 + VK_LSHIFT = 0xA0 + VK_RSHIFT = 0xA1 + VK_LCONTROL = 0xA2 + VK_RCONTROL = 0xA3 + VK_LMENU = 0xA4 + VK_RMENU = 0xA5 + VK_BROWSER_BACK = 0xA6 + VK_BROWSER_FORWARD = 0xA7 + VK_BROWSER_REFRESH = 0xA8 + VK_BROWSER_STOP = 0xA9 + VK_BROWSER_SEARCH = 0xAA + VK_BROWSER_FAVORITES = 0xAB + VK_BROWSER_HOME = 0xAC + VK_VOLUME_MUTE = 0xAD + VK_VOLUME_DOWN = 0xAE + VK_VOLUME_UP = 0xAF + VK_MEDIA_NEXT_TRACK = 0xB0 + VK_MEDIA_PREV_TRACK = 0xB1 + VK_MEDIA_STOP = 0xB2 + VK_MEDIA_PLAY_PAUSE = 0xB3 + VK_LAUNCH_MAIL = 0xB4 + VK_LAUNCH_MEDIA_SELECT = 0xB5 + VK_LAUNCH_APP1 = 0xB6 + VK_LAUNCH_APP2 = 0xB7 + VK_OEM_1 = 0xBA + VK_OEM_PLUS = 0xBB + VK_OEM_COMMA = 0xBC + VK_OEM_MINUS = 0xBD + VK_OEM_PERIOD = 0xBE + VK_OEM_2 = 0xBF + VK_OEM_3 = 0xC0 + VK_OEM_4 = 0xDB + VK_OEM_5 = 0xDC + VK_OEM_6 = 0xDD + VK_OEM_7 = 0xDE + VK_OEM_8 = 0xDF + VK_OEM_AX = 0xE1 + VK_OEM_102 = 0xE2 + VK_ICO_HELP = 0xE3 + VK_ICO_00 = 0xE4 + VK_PROCESSKEY = 0xE5 + VK_ICO_CLEAR = 0xE6 + VK_OEM_RESET = 0xE9 + VK_OEM_JUMP = 0xEA + VK_OEM_PA1 = 0xEB + VK_OEM_PA2 = 0xEC + VK_OEM_PA3 = 0xED + VK_OEM_WSCTRL = 0xEE + VK_OEM_CUSEL = 0xEF + VK_OEM_ATTN = 0xF0 + VK_OEM_FINISH = 0xF1 + VK_OEM_COPY = 0xF2 + VK_OEM_AUTO = 0xF3 + VK_OEM_ENLW = 0xF4 + VK_OEM_BACKTAB = 0xF5 + VK_ATTN = 0xF6 + VK_CRSEL = 0xF7 + VK_EXSEL = 0xF8 + VK_EREOF = 0xF9 + VK_PLAY = 0xFA + VK_ZOOM = 0xFB + VK_NONAME = 0xFC + VK_PA1 = 0xFD + VK_OEM_CLEAR = 0xFE +) + +// Mouse button constants. +// https://docs.microsoft.com/en-us/windows/console/mouse-event-record-str +const ( + FROM_LEFT_1ST_BUTTON_PRESSED = 0x0001 + RIGHTMOST_BUTTON_PRESSED = 0x0002 + FROM_LEFT_2ND_BUTTON_PRESSED = 0x0004 + FROM_LEFT_3RD_BUTTON_PRESSED = 0x0008 + FROM_LEFT_4TH_BUTTON_PRESSED = 0x0010 +) + +// Control key state constaints. +// https://docs.microsoft.com/en-us/windows/console/key-event-record-str +// https://docs.microsoft.com/en-us/windows/console/mouse-event-record-str +const ( + CAPSLOCK_ON = 0x0080 + ENHANCED_KEY = 0x0100 + LEFT_ALT_PRESSED = 0x0002 + LEFT_CTRL_PRESSED = 0x0008 + NUMLOCK_ON = 0x0020 + RIGHT_ALT_PRESSED = 0x0001 + RIGHT_CTRL_PRESSED = 0x0004 + SCROLLLOCK_ON = 0x0040 + SHIFT_PRESSED = 0x0010 +) + +// Mouse event record event flags. +// https://docs.microsoft.com/en-us/windows/console/mouse-event-record-str +const ( + MOUSE_MOVED = 0x0001 + DOUBLE_CLICK = 0x0002 + MOUSE_WHEELED = 0x0004 + MOUSE_HWHEELED = 0x0008 +) + +// Input Record Event Types +// https://learn.microsoft.com/en-us/windows/console/input-record-str +const ( + FOCUS_EVENT = 0x0010 + KEY_EVENT = 0x0001 + MENU_EVENT = 0x0008 + MOUSE_EVENT = 0x0002 + WINDOW_BUFFER_SIZE_EVENT = 0x0004 +) diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 01c0716c2..f25b7308a 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -182,13 +182,17 @@ var ( procDwmGetWindowAttribute = moddwmapi.NewProc("DwmGetWindowAttribute") procDwmSetWindowAttribute = moddwmapi.NewProc("DwmSetWindowAttribute") procCancelMibChangeNotify2 = modiphlpapi.NewProc("CancelMibChangeNotify2") + procFreeMibTable = modiphlpapi.NewProc("FreeMibTable") procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx") procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") procGetIfEntry2Ex = modiphlpapi.NewProc("GetIfEntry2Ex") + procGetIpForwardEntry2 = modiphlpapi.NewProc("GetIpForwardEntry2") + procGetIpForwardTable2 = modiphlpapi.NewProc("GetIpForwardTable2") procGetUnicastIpAddressEntry = modiphlpapi.NewProc("GetUnicastIpAddressEntry") procNotifyIpInterfaceChange = modiphlpapi.NewProc("NotifyIpInterfaceChange") + procNotifyRouteChange2 = modiphlpapi.NewProc("NotifyRouteChange2") procNotifyUnicastIpAddressChange = modiphlpapi.NewProc("NotifyUnicastIpAddressChange") procAddDllDirectory = modkernel32.NewProc("AddDllDirectory") procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") @@ -238,6 +242,7 @@ var ( procFindResourceW = modkernel32.NewProc("FindResourceW") procFindVolumeClose = modkernel32.NewProc("FindVolumeClose") procFindVolumeMountPointClose = modkernel32.NewProc("FindVolumeMountPointClose") + procFlushConsoleInputBuffer = modkernel32.NewProc("FlushConsoleInputBuffer") procFlushFileBuffers = modkernel32.NewProc("FlushFileBuffers") procFlushViewOfFile = modkernel32.NewProc("FlushViewOfFile") procFormatMessageW = modkernel32.NewProc("FormatMessageW") @@ -284,6 +289,7 @@ var ( procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") procGetNamedPipeServerProcessId = modkernel32.NewProc("GetNamedPipeServerProcessId") + procGetNumberOfConsoleInputEvents = modkernel32.NewProc("GetNumberOfConsoleInputEvents") procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult") procGetPriorityClass = modkernel32.NewProc("GetPriorityClass") procGetProcAddress = modkernel32.NewProc("GetProcAddress") @@ -511,6 +517,7 @@ var ( procFreeAddrInfoW = modws2_32.NewProc("FreeAddrInfoW") procGetAddrInfoW = modws2_32.NewProc("GetAddrInfoW") procWSACleanup = modws2_32.NewProc("WSACleanup") + procWSADuplicateSocketW = modws2_32.NewProc("WSADuplicateSocketW") procWSAEnumProtocolsW = modws2_32.NewProc("WSAEnumProtocolsW") procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") procWSAIoctl = modws2_32.NewProc("WSAIoctl") @@ -545,25 +552,25 @@ var ( ) func cm_Get_DevNode_Status(status *uint32, problemNumber *uint32, devInst DEVINST, flags uint32) (ret CONFIGRET) { - r0, _, _ := syscall.Syscall6(procCM_Get_DevNode_Status.Addr(), 4, uintptr(unsafe.Pointer(status)), uintptr(unsafe.Pointer(problemNumber)), uintptr(devInst), uintptr(flags), 0, 0) + r0, _, _ := syscall.SyscallN(procCM_Get_DevNode_Status.Addr(), uintptr(unsafe.Pointer(status)), uintptr(unsafe.Pointer(problemNumber)), uintptr(devInst), uintptr(flags)) ret = CONFIGRET(r0) return } func cm_Get_Device_Interface_List(interfaceClass *GUID, deviceID *uint16, buffer *uint16, bufferLen uint32, flags uint32) (ret CONFIGRET) { - r0, _, _ := syscall.Syscall6(procCM_Get_Device_Interface_ListW.Addr(), 5, uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(flags), 0) + r0, _, _ := syscall.SyscallN(procCM_Get_Device_Interface_ListW.Addr(), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(flags)) ret = CONFIGRET(r0) return } func cm_Get_Device_Interface_List_Size(len *uint32, interfaceClass *GUID, deviceID *uint16, flags uint32) (ret CONFIGRET) { - r0, _, _ := syscall.Syscall6(procCM_Get_Device_Interface_List_SizeW.Addr(), 4, uintptr(unsafe.Pointer(len)), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(flags), 0, 0) + r0, _, _ := syscall.SyscallN(procCM_Get_Device_Interface_List_SizeW.Addr(), uintptr(unsafe.Pointer(len)), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(flags)) ret = CONFIGRET(r0) return } func cm_MapCrToWin32Err(configRet CONFIGRET, defaultWin32Error Errno) (ret Errno) { - r0, _, _ := syscall.Syscall(procCM_MapCrToWin32Err.Addr(), 2, uintptr(configRet), uintptr(defaultWin32Error), 0) + r0, _, _ := syscall.SyscallN(procCM_MapCrToWin32Err.Addr(), uintptr(configRet), uintptr(defaultWin32Error)) ret = Errno(r0) return } @@ -573,7 +580,7 @@ func AdjustTokenGroups(token Token, resetToDefault bool, newstate *Tokengroups, if resetToDefault { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procAdjustTokenGroups.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) + r1, _, e1 := syscall.SyscallN(procAdjustTokenGroups.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) if r1 == 0 { err = errnoErr(e1) } @@ -585,7 +592,7 @@ func AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tok if disableAllPrivileges { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) + r1, _, e1 := syscall.SyscallN(procAdjustTokenPrivileges.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) if r1 == 0 { err = errnoErr(e1) } @@ -593,7 +600,7 @@ func AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tok } func AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) { - r1, _, e1 := syscall.Syscall12(procAllocateAndInitializeSid.Addr(), 11, uintptr(unsafe.Pointer(identAuth)), uintptr(subAuth), uintptr(subAuth0), uintptr(subAuth1), uintptr(subAuth2), uintptr(subAuth3), uintptr(subAuth4), uintptr(subAuth5), uintptr(subAuth6), uintptr(subAuth7), uintptr(unsafe.Pointer(sid)), 0) + r1, _, e1 := syscall.SyscallN(procAllocateAndInitializeSid.Addr(), uintptr(unsafe.Pointer(identAuth)), uintptr(subAuth), uintptr(subAuth0), uintptr(subAuth1), uintptr(subAuth2), uintptr(subAuth3), uintptr(subAuth4), uintptr(subAuth5), uintptr(subAuth6), uintptr(subAuth7), uintptr(unsafe.Pointer(sid))) if r1 == 0 { err = errnoErr(e1) } @@ -601,7 +608,7 @@ func AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, s } func buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries uint32, accessEntries *EXPLICIT_ACCESS, countAuditEntries uint32, auditEntries *EXPLICIT_ACCESS, oldSecurityDescriptor *SECURITY_DESCRIPTOR, sizeNewSecurityDescriptor *uint32, newSecurityDescriptor **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procBuildSecurityDescriptorW.Addr(), 9, uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(countAccessEntries), uintptr(unsafe.Pointer(accessEntries)), uintptr(countAuditEntries), uintptr(unsafe.Pointer(auditEntries)), uintptr(unsafe.Pointer(oldSecurityDescriptor)), uintptr(unsafe.Pointer(sizeNewSecurityDescriptor)), uintptr(unsafe.Pointer(newSecurityDescriptor))) + r0, _, _ := syscall.SyscallN(procBuildSecurityDescriptorW.Addr(), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(countAccessEntries), uintptr(unsafe.Pointer(accessEntries)), uintptr(countAuditEntries), uintptr(unsafe.Pointer(auditEntries)), uintptr(unsafe.Pointer(oldSecurityDescriptor)), uintptr(unsafe.Pointer(sizeNewSecurityDescriptor)), uintptr(unsafe.Pointer(newSecurityDescriptor))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -609,7 +616,7 @@ func buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries } func ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) { - r1, _, e1 := syscall.Syscall(procChangeServiceConfig2W.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(info))) + r1, _, e1 := syscall.SyscallN(procChangeServiceConfig2W.Addr(), uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(info))) if r1 == 0 { err = errnoErr(e1) } @@ -617,7 +624,7 @@ func ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err err } func ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, errorControl uint32, binaryPathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16, displayName *uint16) (err error) { - r1, _, e1 := syscall.Syscall12(procChangeServiceConfigW.Addr(), 11, uintptr(service), uintptr(serviceType), uintptr(startType), uintptr(errorControl), uintptr(unsafe.Pointer(binaryPathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), uintptr(unsafe.Pointer(displayName)), 0) + r1, _, e1 := syscall.SyscallN(procChangeServiceConfigW.Addr(), uintptr(service), uintptr(serviceType), uintptr(startType), uintptr(errorControl), uintptr(unsafe.Pointer(binaryPathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), uintptr(unsafe.Pointer(displayName))) if r1 == 0 { err = errnoErr(e1) } @@ -625,7 +632,7 @@ func ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, e } func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) { - r1, _, e1 := syscall.Syscall(procCheckTokenMembership.Addr(), 3, uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember))) + r1, _, e1 := syscall.SyscallN(procCheckTokenMembership.Addr(), uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember))) if r1 == 0 { err = errnoErr(e1) } @@ -633,7 +640,7 @@ func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) ( } func CloseServiceHandle(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCloseServiceHandle.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procCloseServiceHandle.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -641,7 +648,7 @@ func CloseServiceHandle(handle Handle) (err error) { } func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procControlService.Addr(), 3, uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status))) + r1, _, e1 := syscall.SyscallN(procControlService.Addr(), uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status))) if r1 == 0 { err = errnoErr(e1) } @@ -649,7 +656,7 @@ func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err } func convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR, revision uint32, securityInformation SECURITY_INFORMATION, str **uint16, strLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(securityInformation), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(strLen)), 0) + r1, _, e1 := syscall.SyscallN(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(securityInformation), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(strLen))) if r1 == 0 { err = errnoErr(e1) } @@ -657,7 +664,7 @@ func convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR } func ConvertSidToStringSid(sid *SID, stringSid **uint16) (err error) { - r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(stringSid)), 0) + r1, _, e1 := syscall.SyscallN(procConvertSidToStringSidW.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(stringSid))) if r1 == 0 { err = errnoErr(e1) } @@ -674,7 +681,7 @@ func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision ui } func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) + r1, _, e1 := syscall.SyscallN(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size))) if r1 == 0 { err = errnoErr(e1) } @@ -682,7 +689,7 @@ func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision } func ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) { - r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(stringSid)), uintptr(unsafe.Pointer(sid)), 0) + r1, _, e1 := syscall.SyscallN(procConvertStringSidToSidW.Addr(), uintptr(unsafe.Pointer(stringSid)), uintptr(unsafe.Pointer(sid))) if r1 == 0 { err = errnoErr(e1) } @@ -690,7 +697,7 @@ func ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) { } func CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) { - r1, _, e1 := syscall.Syscall(procCopySid.Addr(), 3, uintptr(destSidLen), uintptr(unsafe.Pointer(destSid)), uintptr(unsafe.Pointer(srcSid))) + r1, _, e1 := syscall.SyscallN(procCopySid.Addr(), uintptr(destSidLen), uintptr(unsafe.Pointer(destSid)), uintptr(unsafe.Pointer(srcSid))) if r1 == 0 { err = errnoErr(e1) } @@ -702,7 +709,7 @@ func CreateProcessAsUser(token Token, appName *uint16, commandLine *uint16, proc if inheritHandles { _p0 = 1 } - r1, _, e1 := syscall.Syscall12(procCreateProcessAsUserW.Addr(), 11, uintptr(token), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0) + r1, _, e1 := syscall.SyscallN(procCreateProcessAsUserW.Addr(), uintptr(token), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo))) if r1 == 0 { err = errnoErr(e1) } @@ -710,7 +717,7 @@ func CreateProcessAsUser(token Token, appName *uint16, commandLine *uint16, proc } func CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall15(procCreateServiceW.Addr(), 13, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(unsafe.Pointer(displayName)), uintptr(access), uintptr(srvType), uintptr(startType), uintptr(errCtl), uintptr(unsafe.Pointer(pathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateServiceW.Addr(), uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(unsafe.Pointer(displayName)), uintptr(access), uintptr(srvType), uintptr(startType), uintptr(errCtl), uintptr(unsafe.Pointer(pathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -719,7 +726,7 @@ func CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access } func createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, sizeSid *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCreateWellKnownSid.Addr(), 4, uintptr(sidType), uintptr(unsafe.Pointer(domainSid)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sizeSid)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCreateWellKnownSid.Addr(), uintptr(sidType), uintptr(unsafe.Pointer(domainSid)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sizeSid))) if r1 == 0 { err = errnoErr(e1) } @@ -727,7 +734,7 @@ func createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, s } func CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16, provtype uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCryptAcquireContextW.Addr(), 5, uintptr(unsafe.Pointer(provhandle)), uintptr(unsafe.Pointer(container)), uintptr(unsafe.Pointer(provider)), uintptr(provtype), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procCryptAcquireContextW.Addr(), uintptr(unsafe.Pointer(provhandle)), uintptr(unsafe.Pointer(container)), uintptr(unsafe.Pointer(provider)), uintptr(provtype), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -735,7 +742,7 @@ func CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16 } func CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) { - r1, _, e1 := syscall.Syscall(procCryptGenRandom.Addr(), 3, uintptr(provhandle), uintptr(buflen), uintptr(unsafe.Pointer(buf))) + r1, _, e1 := syscall.SyscallN(procCryptGenRandom.Addr(), uintptr(provhandle), uintptr(buflen), uintptr(unsafe.Pointer(buf))) if r1 == 0 { err = errnoErr(e1) } @@ -743,7 +750,7 @@ func CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) { } func CryptReleaseContext(provhandle Handle, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCryptReleaseContext.Addr(), 2, uintptr(provhandle), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procCryptReleaseContext.Addr(), uintptr(provhandle), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -751,7 +758,7 @@ func CryptReleaseContext(provhandle Handle, flags uint32) (err error) { } func DeleteService(service Handle) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteService.Addr(), 1, uintptr(service), 0, 0) + r1, _, e1 := syscall.SyscallN(procDeleteService.Addr(), uintptr(service)) if r1 == 0 { err = errnoErr(e1) } @@ -759,7 +766,7 @@ func DeleteService(service Handle) (err error) { } func DeregisterEventSource(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procDeregisterEventSource.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procDeregisterEventSource.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -767,7 +774,7 @@ func DeregisterEventSource(handle Handle) (err error) { } func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes *SecurityAttributes, impersonationLevel uint32, tokenType uint32, newToken *Token) (err error) { - r1, _, e1 := syscall.Syscall6(procDuplicateTokenEx.Addr(), 6, uintptr(existingToken), uintptr(desiredAccess), uintptr(unsafe.Pointer(tokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(newToken))) + r1, _, e1 := syscall.SyscallN(procDuplicateTokenEx.Addr(), uintptr(existingToken), uintptr(desiredAccess), uintptr(unsafe.Pointer(tokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(newToken))) if r1 == 0 { err = errnoErr(e1) } @@ -775,7 +782,7 @@ func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes } func EnumDependentServices(service Handle, activityState uint32, services *ENUM_SERVICE_STATUS, buffSize uint32, bytesNeeded *uint32, servicesReturned *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procEnumDependentServicesW.Addr(), 6, uintptr(service), uintptr(activityState), uintptr(unsafe.Pointer(services)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned))) + r1, _, e1 := syscall.SyscallN(procEnumDependentServicesW.Addr(), uintptr(service), uintptr(activityState), uintptr(unsafe.Pointer(services)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned))) if r1 == 0 { err = errnoErr(e1) } @@ -783,7 +790,7 @@ func EnumDependentServices(service Handle, activityState uint32, services *ENUM_ } func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) { - r1, _, e1 := syscall.Syscall12(procEnumServicesStatusExW.Addr(), 10, uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName)), 0, 0) + r1, _, e1 := syscall.SyscallN(procEnumServicesStatusExW.Addr(), uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName))) if r1 == 0 { err = errnoErr(e1) } @@ -791,13 +798,13 @@ func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serv } func EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) { - r0, _, _ := syscall.Syscall(procEqualSid.Addr(), 2, uintptr(unsafe.Pointer(sid1)), uintptr(unsafe.Pointer(sid2)), 0) + r0, _, _ := syscall.SyscallN(procEqualSid.Addr(), uintptr(unsafe.Pointer(sid1)), uintptr(unsafe.Pointer(sid2))) isEqual = r0 != 0 return } func FreeSid(sid *SID) (err error) { - r1, _, e1 := syscall.Syscall(procFreeSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r1, _, e1 := syscall.SyscallN(procFreeSid.Addr(), uintptr(unsafe.Pointer(sid))) if r1 != 0 { err = errnoErr(e1) } @@ -805,7 +812,7 @@ func FreeSid(sid *SID) (err error) { } func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (err error) { - r1, _, e1 := syscall.Syscall(procGetAce.Addr(), 3, uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce))) + r1, _, e1 := syscall.SyscallN(procGetAce.Addr(), uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce))) if r1 == 0 { err = errnoErr(e1) } @@ -813,7 +820,7 @@ func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (err error) { } func GetLengthSid(sid *SID) (len uint32) { - r0, _, _ := syscall.Syscall(procGetLengthSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetLengthSid.Addr(), uintptr(unsafe.Pointer(sid))) len = uint32(r0) return } @@ -828,7 +835,7 @@ func getNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, security } func _getNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procGetNamedSecurityInfoW.Addr(), 8, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) + r0, _, _ := syscall.SyscallN(procGetNamedSecurityInfoW.Addr(), uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -836,7 +843,7 @@ func _getNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securi } func getSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, control *SECURITY_DESCRIPTOR_CONTROL, revision *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(control)), uintptr(unsafe.Pointer(revision))) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(control)), uintptr(unsafe.Pointer(revision))) if r1 == 0 { err = errnoErr(e1) } @@ -852,7 +859,7 @@ func getSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent *bool, dacl if *daclDefaulted { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorDacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(&_p1))) *daclPresent = _p0 != 0 *daclDefaulted = _p1 != 0 if r1 == 0 { @@ -866,7 +873,7 @@ func getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefau if *groupDefaulted { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(&_p0))) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorGroup.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(&_p0))) *groupDefaulted = _p0 != 0 if r1 == 0 { err = errnoErr(e1) @@ -875,7 +882,7 @@ func getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefau } func getSecurityDescriptorLength(sd *SECURITY_DESCRIPTOR) (len uint32) { - r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetSecurityDescriptorLength.Addr(), uintptr(unsafe.Pointer(sd))) len = uint32(r0) return } @@ -885,7 +892,7 @@ func getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefau if *ownerDefaulted { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(&_p0))) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorOwner.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(&_p0))) *ownerDefaulted = _p0 != 0 if r1 == 0 { err = errnoErr(e1) @@ -894,7 +901,7 @@ func getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefau } func getSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) (ret error) { - r0, _, _ := syscall.Syscall(procGetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) + r0, _, _ := syscall.SyscallN(procGetSecurityDescriptorRMControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -910,7 +917,7 @@ func getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl if *saclDefaulted { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorSacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(&_p1))) *saclPresent = _p0 != 0 *saclDefaulted = _p1 != 0 if r1 == 0 { @@ -920,7 +927,7 @@ func getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl } func getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) + r0, _, _ := syscall.SyscallN(procGetSecurityInfo.Addr(), uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -928,25 +935,25 @@ func getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformati } func getSidIdentifierAuthority(sid *SID) (authority *SidIdentifierAuthority) { - r0, _, _ := syscall.Syscall(procGetSidIdentifierAuthority.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetSidIdentifierAuthority.Addr(), uintptr(unsafe.Pointer(sid))) authority = (*SidIdentifierAuthority)(unsafe.Pointer(r0)) return } func getSidSubAuthority(sid *SID, index uint32) (subAuthority *uint32) { - r0, _, _ := syscall.Syscall(procGetSidSubAuthority.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(index), 0) + r0, _, _ := syscall.SyscallN(procGetSidSubAuthority.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(index)) subAuthority = (*uint32)(unsafe.Pointer(r0)) return } func getSidSubAuthorityCount(sid *SID) (count *uint8) { - r0, _, _ := syscall.Syscall(procGetSidSubAuthorityCount.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetSidSubAuthorityCount.Addr(), uintptr(unsafe.Pointer(sid))) count = (*uint8)(unsafe.Pointer(r0)) return } func GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetTokenInformation.Addr(), 5, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen)), 0) + r1, _, e1 := syscall.SyscallN(procGetTokenInformation.Addr(), uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen))) if r1 == 0 { err = errnoErr(e1) } @@ -954,7 +961,7 @@ func GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint } func ImpersonateSelf(impersonationlevel uint32) (err error) { - r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(impersonationlevel), 0, 0) + r1, _, e1 := syscall.SyscallN(procImpersonateSelf.Addr(), uintptr(impersonationlevel)) if r1 == 0 { err = errnoErr(e1) } @@ -962,7 +969,7 @@ func ImpersonateSelf(impersonationlevel uint32) (err error) { } func initializeSecurityDescriptor(absoluteSD *SECURITY_DESCRIPTOR, revision uint32) (err error) { - r1, _, e1 := syscall.Syscall(procInitializeSecurityDescriptor.Addr(), 2, uintptr(unsafe.Pointer(absoluteSD)), uintptr(revision), 0) + r1, _, e1 := syscall.SyscallN(procInitializeSecurityDescriptor.Addr(), uintptr(unsafe.Pointer(absoluteSD)), uintptr(revision)) if r1 == 0 { err = errnoErr(e1) } @@ -978,7 +985,7 @@ func InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint if rebootAfterShutdown { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procInitiateSystemShutdownExW.Addr(), 6, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(message)), uintptr(timeout), uintptr(_p0), uintptr(_p1), uintptr(reason)) + r1, _, e1 := syscall.SyscallN(procInitiateSystemShutdownExW.Addr(), uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(message)), uintptr(timeout), uintptr(_p0), uintptr(_p1), uintptr(reason)) if r1 == 0 { err = errnoErr(e1) } @@ -986,7 +993,7 @@ func InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint } func isTokenRestricted(tokenHandle Token) (ret bool, err error) { - r0, _, e1 := syscall.Syscall(procIsTokenRestricted.Addr(), 1, uintptr(tokenHandle), 0, 0) + r0, _, e1 := syscall.SyscallN(procIsTokenRestricted.Addr(), uintptr(tokenHandle)) ret = r0 != 0 if !ret { err = errnoErr(e1) @@ -995,25 +1002,25 @@ func isTokenRestricted(tokenHandle Token) (ret bool, err error) { } func isValidSecurityDescriptor(sd *SECURITY_DESCRIPTOR) (isValid bool) { - r0, _, _ := syscall.Syscall(procIsValidSecurityDescriptor.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) + r0, _, _ := syscall.SyscallN(procIsValidSecurityDescriptor.Addr(), uintptr(unsafe.Pointer(sd))) isValid = r0 != 0 return } func isValidSid(sid *SID) (isValid bool) { - r0, _, _ := syscall.Syscall(procIsValidSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r0, _, _ := syscall.SyscallN(procIsValidSid.Addr(), uintptr(unsafe.Pointer(sid))) isValid = r0 != 0 return } func isWellKnownSid(sid *SID, sidType WELL_KNOWN_SID_TYPE) (isWellKnown bool) { - r0, _, _ := syscall.Syscall(procIsWellKnownSid.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(sidType), 0) + r0, _, _ := syscall.SyscallN(procIsWellKnownSid.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(sidType)) isWellKnown = r0 != 0 return } func LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) + r1, _, e1 := syscall.SyscallN(procLookupAccountNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use))) if r1 == 0 { err = errnoErr(e1) } @@ -1021,7 +1028,7 @@ func LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen } func LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) + r1, _, e1 := syscall.SyscallN(procLookupAccountSidW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use))) if r1 == 0 { err = errnoErr(e1) } @@ -1029,7 +1036,7 @@ func LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint3 } func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) { - r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) + r1, _, e1 := syscall.SyscallN(procLookupPrivilegeValueW.Addr(), uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) if r1 == 0 { err = errnoErr(e1) } @@ -1037,7 +1044,7 @@ func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err err } func makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DESCRIPTOR, absoluteSDSize *uint32, dacl *ACL, daclSize *uint32, sacl *ACL, saclSize *uint32, owner *SID, ownerSize *uint32, group *SID, groupSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall12(procMakeAbsoluteSD.Addr(), 11, uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(absoluteSDSize)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclSize)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(saclSize)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(ownerSize)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(groupSize)), 0) + r1, _, e1 := syscall.SyscallN(procMakeAbsoluteSD.Addr(), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(absoluteSDSize)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclSize)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(saclSize)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(ownerSize)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(groupSize))) if r1 == 0 { err = errnoErr(e1) } @@ -1045,7 +1052,7 @@ func makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DE } func makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procMakeSelfRelativeSD.Addr(), 3, uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(selfRelativeSDSize))) + r1, _, e1 := syscall.SyscallN(procMakeSelfRelativeSD.Addr(), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(selfRelativeSDSize))) if r1 == 0 { err = errnoErr(e1) } @@ -1053,7 +1060,7 @@ func makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURIT } func NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERVICE_NOTIFY) (ret error) { - r0, _, _ := syscall.Syscall(procNotifyServiceStatusChangeW.Addr(), 3, uintptr(service), uintptr(notifyMask), uintptr(unsafe.Pointer(notifier))) + r0, _, _ := syscall.SyscallN(procNotifyServiceStatusChangeW.Addr(), uintptr(service), uintptr(notifyMask), uintptr(unsafe.Pointer(notifier))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1061,7 +1068,7 @@ func NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERV } func OpenProcessToken(process Handle, access uint32, token *Token) (err error) { - r1, _, e1 := syscall.Syscall(procOpenProcessToken.Addr(), 3, uintptr(process), uintptr(access), uintptr(unsafe.Pointer(token))) + r1, _, e1 := syscall.SyscallN(procOpenProcessToken.Addr(), uintptr(process), uintptr(access), uintptr(unsafe.Pointer(token))) if r1 == 0 { err = errnoErr(e1) } @@ -1069,7 +1076,7 @@ func OpenProcessToken(process Handle, access uint32, token *Token) (err error) { } func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procOpenSCManagerW.Addr(), 3, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access)) + r0, _, e1 := syscall.SyscallN(procOpenSCManagerW.Addr(), uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1078,7 +1085,7 @@ func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (ha } func OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procOpenServiceW.Addr(), 3, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access)) + r0, _, e1 := syscall.SyscallN(procOpenServiceW.Addr(), uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1091,7 +1098,7 @@ func OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token if openAsSelf { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) + r1, _, e1 := syscall.SyscallN(procOpenThreadToken.Addr(), uintptr(thread), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token))) if r1 == 0 { err = errnoErr(e1) } @@ -1099,7 +1106,7 @@ func OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token } func QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceConfig2W.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceConfig2W.Addr(), uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -1107,7 +1114,7 @@ func QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize } func QueryServiceConfig(service Handle, serviceConfig *QUERY_SERVICE_CONFIG, bufSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceConfigW.Addr(), 4, uintptr(service), uintptr(unsafe.Pointer(serviceConfig)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceConfigW.Addr(), uintptr(service), uintptr(unsafe.Pointer(serviceConfig)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -1119,7 +1126,7 @@ func QueryServiceDynamicInformation(service Handle, infoLevel uint32, dynamicInf if err != nil { return } - r1, _, e1 := syscall.Syscall(procQueryServiceDynamicInformation.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(dynamicInfo)) + r1, _, e1 := syscall.SyscallN(procQueryServiceDynamicInformation.Addr(), uintptr(service), uintptr(infoLevel), uintptr(dynamicInfo)) if r1 == 0 { err = errnoErr(e1) } @@ -1127,7 +1134,7 @@ func QueryServiceDynamicInformation(service Handle, infoLevel uint32, dynamicInf } func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceLockStatusW.Addr(), 4, uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceLockStatusW.Addr(), uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -1135,7 +1142,7 @@ func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, b } func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procQueryServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(status)), 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceStatus.Addr(), uintptr(service), uintptr(unsafe.Pointer(status))) if r1 == 0 { err = errnoErr(e1) } @@ -1143,7 +1150,7 @@ func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) { } func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceStatusEx.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceStatusEx.Addr(), uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -1151,7 +1158,7 @@ func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize } func RegCloseKey(key Handle) (regerrno error) { - r0, _, _ := syscall.Syscall(procRegCloseKey.Addr(), 1, uintptr(key), 0, 0) + r0, _, _ := syscall.SyscallN(procRegCloseKey.Addr(), uintptr(key)) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1159,7 +1166,7 @@ func RegCloseKey(key Handle) (regerrno error) { } func RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) { - r0, _, _ := syscall.Syscall9(procRegEnumKeyExW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(lastWriteTime)), 0) + r0, _, _ := syscall.SyscallN(procRegEnumKeyExW.Addr(), uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(lastWriteTime))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1175,7 +1182,7 @@ func RegNotifyChangeKeyValue(key Handle, watchSubtree bool, notifyFilter uint32, if asynchronous { _p1 = 1 } - r0, _, _ := syscall.Syscall6(procRegNotifyChangeKeyValue.Addr(), 5, uintptr(key), uintptr(_p0), uintptr(notifyFilter), uintptr(event), uintptr(_p1), 0) + r0, _, _ := syscall.SyscallN(procRegNotifyChangeKeyValue.Addr(), uintptr(key), uintptr(_p0), uintptr(notifyFilter), uintptr(event), uintptr(_p1)) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1183,7 +1190,7 @@ func RegNotifyChangeKeyValue(key Handle, watchSubtree bool, notifyFilter uint32, } func RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint32, result *Handle) (regerrno error) { - r0, _, _ := syscall.Syscall6(procRegOpenKeyExW.Addr(), 5, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(options), uintptr(desiredAccess), uintptr(unsafe.Pointer(result)), 0) + r0, _, _ := syscall.SyscallN(procRegOpenKeyExW.Addr(), uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(options), uintptr(desiredAccess), uintptr(unsafe.Pointer(result))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1191,7 +1198,7 @@ func RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint } func RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) { - r0, _, _ := syscall.Syscall12(procRegQueryInfoKeyW.Addr(), 12, uintptr(key), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(subkeysLen)), uintptr(unsafe.Pointer(maxSubkeyLen)), uintptr(unsafe.Pointer(maxClassLen)), uintptr(unsafe.Pointer(valuesLen)), uintptr(unsafe.Pointer(maxValueNameLen)), uintptr(unsafe.Pointer(maxValueLen)), uintptr(unsafe.Pointer(saLen)), uintptr(unsafe.Pointer(lastWriteTime))) + r0, _, _ := syscall.SyscallN(procRegQueryInfoKeyW.Addr(), uintptr(key), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(subkeysLen)), uintptr(unsafe.Pointer(maxSubkeyLen)), uintptr(unsafe.Pointer(maxClassLen)), uintptr(unsafe.Pointer(valuesLen)), uintptr(unsafe.Pointer(maxValueNameLen)), uintptr(unsafe.Pointer(maxValueLen)), uintptr(unsafe.Pointer(saLen)), uintptr(unsafe.Pointer(lastWriteTime))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1199,7 +1206,7 @@ func RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint } func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { - r0, _, _ := syscall.Syscall6(procRegQueryValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen))) + r0, _, _ := syscall.SyscallN(procRegQueryValueExW.Addr(), uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1207,7 +1214,7 @@ func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32 } func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procRegisterEventSourceW.Addr(), 2, uintptr(unsafe.Pointer(uncServerName)), uintptr(unsafe.Pointer(sourceName)), 0) + r0, _, e1 := syscall.SyscallN(procRegisterEventSourceW.Addr(), uintptr(unsafe.Pointer(uncServerName)), uintptr(unsafe.Pointer(sourceName))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1216,7 +1223,7 @@ func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Hand } func RegisterServiceCtrlHandlerEx(serviceName *uint16, handlerProc uintptr, context uintptr) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procRegisterServiceCtrlHandlerExW.Addr(), 3, uintptr(unsafe.Pointer(serviceName)), uintptr(handlerProc), uintptr(context)) + r0, _, e1 := syscall.SyscallN(procRegisterServiceCtrlHandlerExW.Addr(), uintptr(unsafe.Pointer(serviceName)), uintptr(handlerProc), uintptr(context)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1225,7 +1232,7 @@ func RegisterServiceCtrlHandlerEx(serviceName *uint16, handlerProc uintptr, cont } func ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrSId uintptr, numStrings uint16, dataSize uint32, strings **uint16, rawData *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procReportEventW.Addr(), 9, uintptr(log), uintptr(etype), uintptr(category), uintptr(eventId), uintptr(usrSId), uintptr(numStrings), uintptr(dataSize), uintptr(unsafe.Pointer(strings)), uintptr(unsafe.Pointer(rawData))) + r1, _, e1 := syscall.SyscallN(procReportEventW.Addr(), uintptr(log), uintptr(etype), uintptr(category), uintptr(eventId), uintptr(usrSId), uintptr(numStrings), uintptr(dataSize), uintptr(unsafe.Pointer(strings)), uintptr(unsafe.Pointer(rawData))) if r1 == 0 { err = errnoErr(e1) } @@ -1233,7 +1240,7 @@ func ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrS } func RevertToSelf() (err error) { - r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) + r1, _, e1 := syscall.SyscallN(procRevertToSelf.Addr()) if r1 == 0 { err = errnoErr(e1) } @@ -1241,7 +1248,7 @@ func RevertToSelf() (err error) { } func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) { - r0, _, _ := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(countExplicitEntries), uintptr(unsafe.Pointer(explicitEntries)), uintptr(unsafe.Pointer(oldACL)), uintptr(unsafe.Pointer(newACL)), 0, 0) + r0, _, _ := syscall.SyscallN(procSetEntriesInAclW.Addr(), uintptr(countExplicitEntries), uintptr(unsafe.Pointer(explicitEntries)), uintptr(unsafe.Pointer(oldACL)), uintptr(unsafe.Pointer(newACL))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1249,7 +1256,7 @@ func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCE } func SetKernelObjectSecurity(handle Handle, securityInformation SECURITY_INFORMATION, securityDescriptor *SECURITY_DESCRIPTOR) (err error) { - r1, _, e1 := syscall.Syscall(procSetKernelObjectSecurity.Addr(), 3, uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor))) + r1, _, e1 := syscall.SyscallN(procSetKernelObjectSecurity.Addr(), uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor))) if r1 == 0 { err = errnoErr(e1) } @@ -1266,7 +1273,7 @@ func SetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, security } func _SetNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { - r0, _, _ := syscall.Syscall9(procSetNamedSecurityInfoW.Addr(), 7, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) + r0, _, _ := syscall.SyscallN(procSetNamedSecurityInfoW.Addr(), uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1274,7 +1281,7 @@ func _SetNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securi } func setSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) (err error) { - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(controlBitsOfInterest), uintptr(controlBitsToSet)) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(controlBitsOfInterest), uintptr(controlBitsToSet)) if r1 == 0 { err = errnoErr(e1) } @@ -1290,7 +1297,7 @@ func setSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent bool, dacl * if daclDefaulted { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(dacl)), uintptr(_p1), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorDacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(dacl)), uintptr(_p1)) if r1 == 0 { err = errnoErr(e1) } @@ -1302,7 +1309,7 @@ func setSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group *SID, groupDefaul if groupDefaulted { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(_p0)) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorGroup.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -1314,7 +1321,7 @@ func setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaul if ownerDefaulted { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(_p0)) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorOwner.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -1322,7 +1329,7 @@ func setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaul } func setSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) { - syscall.Syscall(procSetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) + syscall.SyscallN(procSetSecurityDescriptorRMControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl))) return } @@ -1335,7 +1342,7 @@ func setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl * if saclDefaulted { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(sacl)), uintptr(_p1), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorSacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(sacl)), uintptr(_p1)) if r1 == 0 { err = errnoErr(e1) } @@ -1343,7 +1350,7 @@ func setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl * } func SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { - r0, _, _ := syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) + r0, _, _ := syscall.SyscallN(procSetSecurityInfo.Addr(), uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1351,7 +1358,7 @@ func SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformati } func SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procSetServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(serviceStatus)), 0) + r1, _, e1 := syscall.SyscallN(procSetServiceStatus.Addr(), uintptr(service), uintptr(unsafe.Pointer(serviceStatus))) if r1 == 0 { err = errnoErr(e1) } @@ -1359,7 +1366,7 @@ func SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) } func SetThreadToken(thread *Handle, token Token) (err error) { - r1, _, e1 := syscall.Syscall(procSetThreadToken.Addr(), 2, uintptr(unsafe.Pointer(thread)), uintptr(token), 0) + r1, _, e1 := syscall.SyscallN(procSetThreadToken.Addr(), uintptr(unsafe.Pointer(thread)), uintptr(token)) if r1 == 0 { err = errnoErr(e1) } @@ -1367,7 +1374,7 @@ func SetThreadToken(thread *Handle, token Token) (err error) { } func SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetTokenInformation.Addr(), 4, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetTokenInformation.Addr(), uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen)) if r1 == 0 { err = errnoErr(e1) } @@ -1375,7 +1382,7 @@ func SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint } func StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) { - r1, _, e1 := syscall.Syscall(procStartServiceCtrlDispatcherW.Addr(), 1, uintptr(unsafe.Pointer(serviceTable)), 0, 0) + r1, _, e1 := syscall.SyscallN(procStartServiceCtrlDispatcherW.Addr(), uintptr(unsafe.Pointer(serviceTable))) if r1 == 0 { err = errnoErr(e1) } @@ -1383,7 +1390,7 @@ func StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) { } func StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) { - r1, _, e1 := syscall.Syscall(procStartServiceW.Addr(), 3, uintptr(service), uintptr(numArgs), uintptr(unsafe.Pointer(argVectors))) + r1, _, e1 := syscall.SyscallN(procStartServiceW.Addr(), uintptr(service), uintptr(numArgs), uintptr(unsafe.Pointer(argVectors))) if r1 == 0 { err = errnoErr(e1) } @@ -1391,7 +1398,7 @@ func StartService(service Handle, numArgs uint32, argVectors **uint16) (err erro } func CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) { - r1, _, e1 := syscall.Syscall6(procCertAddCertificateContextToStore.Addr(), 4, uintptr(store), uintptr(unsafe.Pointer(certContext)), uintptr(addDisposition), uintptr(unsafe.Pointer(storeContext)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCertAddCertificateContextToStore.Addr(), uintptr(store), uintptr(unsafe.Pointer(certContext)), uintptr(addDisposition), uintptr(unsafe.Pointer(storeContext))) if r1 == 0 { err = errnoErr(e1) } @@ -1399,7 +1406,7 @@ func CertAddCertificateContextToStore(store Handle, certContext *CertContext, ad } func CertCloseStore(store Handle, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCertCloseStore.Addr(), 2, uintptr(store), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procCertCloseStore.Addr(), uintptr(store), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -1407,7 +1414,7 @@ func CertCloseStore(store Handle, flags uint32) (err error) { } func CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) { - r0, _, e1 := syscall.Syscall(procCertCreateCertificateContext.Addr(), 3, uintptr(certEncodingType), uintptr(unsafe.Pointer(certEncoded)), uintptr(encodedLen)) + r0, _, e1 := syscall.SyscallN(procCertCreateCertificateContext.Addr(), uintptr(certEncodingType), uintptr(unsafe.Pointer(certEncoded)), uintptr(encodedLen)) context = (*CertContext)(unsafe.Pointer(r0)) if context == nil { err = errnoErr(e1) @@ -1416,7 +1423,7 @@ func CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, en } func CertDeleteCertificateFromStore(certContext *CertContext) (err error) { - r1, _, e1 := syscall.Syscall(procCertDeleteCertificateFromStore.Addr(), 1, uintptr(unsafe.Pointer(certContext)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCertDeleteCertificateFromStore.Addr(), uintptr(unsafe.Pointer(certContext))) if r1 == 0 { err = errnoErr(e1) } @@ -1424,13 +1431,13 @@ func CertDeleteCertificateFromStore(certContext *CertContext) (err error) { } func CertDuplicateCertificateContext(certContext *CertContext) (dupContext *CertContext) { - r0, _, _ := syscall.Syscall(procCertDuplicateCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(certContext)), 0, 0) + r0, _, _ := syscall.SyscallN(procCertDuplicateCertificateContext.Addr(), uintptr(unsafe.Pointer(certContext))) dupContext = (*CertContext)(unsafe.Pointer(r0)) return } func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) { - r0, _, e1 := syscall.Syscall(procCertEnumCertificatesInStore.Addr(), 2, uintptr(store), uintptr(unsafe.Pointer(prevContext)), 0) + r0, _, e1 := syscall.SyscallN(procCertEnumCertificatesInStore.Addr(), uintptr(store), uintptr(unsafe.Pointer(prevContext))) context = (*CertContext)(unsafe.Pointer(r0)) if context == nil { err = errnoErr(e1) @@ -1439,7 +1446,7 @@ func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (contex } func CertFindCertificateInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevCertContext *CertContext) (cert *CertContext, err error) { - r0, _, e1 := syscall.Syscall6(procCertFindCertificateInStore.Addr(), 6, uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevCertContext))) + r0, _, e1 := syscall.SyscallN(procCertFindCertificateInStore.Addr(), uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevCertContext))) cert = (*CertContext)(unsafe.Pointer(r0)) if cert == nil { err = errnoErr(e1) @@ -1448,7 +1455,7 @@ func CertFindCertificateInStore(store Handle, certEncodingType uint32, findFlags } func CertFindChainInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevChainContext *CertChainContext) (certchain *CertChainContext, err error) { - r0, _, e1 := syscall.Syscall6(procCertFindChainInStore.Addr(), 6, uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevChainContext))) + r0, _, e1 := syscall.SyscallN(procCertFindChainInStore.Addr(), uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevChainContext))) certchain = (*CertChainContext)(unsafe.Pointer(r0)) if certchain == nil { err = errnoErr(e1) @@ -1457,18 +1464,18 @@ func CertFindChainInStore(store Handle, certEncodingType uint32, findFlags uint3 } func CertFindExtension(objId *byte, countExtensions uint32, extensions *CertExtension) (ret *CertExtension) { - r0, _, _ := syscall.Syscall(procCertFindExtension.Addr(), 3, uintptr(unsafe.Pointer(objId)), uintptr(countExtensions), uintptr(unsafe.Pointer(extensions))) + r0, _, _ := syscall.SyscallN(procCertFindExtension.Addr(), uintptr(unsafe.Pointer(objId)), uintptr(countExtensions), uintptr(unsafe.Pointer(extensions))) ret = (*CertExtension)(unsafe.Pointer(r0)) return } func CertFreeCertificateChain(ctx *CertChainContext) { - syscall.Syscall(procCertFreeCertificateChain.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) + syscall.SyscallN(procCertFreeCertificateChain.Addr(), uintptr(unsafe.Pointer(ctx))) return } func CertFreeCertificateContext(ctx *CertContext) (err error) { - r1, _, e1 := syscall.Syscall(procCertFreeCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCertFreeCertificateContext.Addr(), uintptr(unsafe.Pointer(ctx))) if r1 == 0 { err = errnoErr(e1) } @@ -1476,7 +1483,7 @@ func CertFreeCertificateContext(ctx *CertContext) (err error) { } func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) { - r1, _, e1 := syscall.Syscall9(procCertGetCertificateChain.Addr(), 8, uintptr(engine), uintptr(unsafe.Pointer(leaf)), uintptr(unsafe.Pointer(time)), uintptr(additionalStore), uintptr(unsafe.Pointer(para)), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(chainCtx)), 0) + r1, _, e1 := syscall.SyscallN(procCertGetCertificateChain.Addr(), uintptr(engine), uintptr(unsafe.Pointer(leaf)), uintptr(unsafe.Pointer(time)), uintptr(additionalStore), uintptr(unsafe.Pointer(para)), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(chainCtx))) if r1 == 0 { err = errnoErr(e1) } @@ -1484,13 +1491,13 @@ func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, a } func CertGetNameString(certContext *CertContext, nameType uint32, flags uint32, typePara unsafe.Pointer, name *uint16, size uint32) (chars uint32) { - r0, _, _ := syscall.Syscall6(procCertGetNameStringW.Addr(), 6, uintptr(unsafe.Pointer(certContext)), uintptr(nameType), uintptr(flags), uintptr(typePara), uintptr(unsafe.Pointer(name)), uintptr(size)) + r0, _, _ := syscall.SyscallN(procCertGetNameStringW.Addr(), uintptr(unsafe.Pointer(certContext)), uintptr(nameType), uintptr(flags), uintptr(typePara), uintptr(unsafe.Pointer(name)), uintptr(size)) chars = uint32(r0) return } func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCertOpenStore.Addr(), 5, uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para), 0) + r0, _, e1 := syscall.SyscallN(procCertOpenStore.Addr(), uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1499,7 +1506,7 @@ func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptPr } func CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) { - r0, _, e1 := syscall.Syscall(procCertOpenSystemStoreW.Addr(), 2, uintptr(hprov), uintptr(unsafe.Pointer(name)), 0) + r0, _, e1 := syscall.SyscallN(procCertOpenSystemStoreW.Addr(), uintptr(hprov), uintptr(unsafe.Pointer(name))) store = Handle(r0) if store == 0 { err = errnoErr(e1) @@ -1508,7 +1515,7 @@ func CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) { } func CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) { - r1, _, e1 := syscall.Syscall6(procCertVerifyCertificateChainPolicy.Addr(), 4, uintptr(policyOID), uintptr(unsafe.Pointer(chain)), uintptr(unsafe.Pointer(para)), uintptr(unsafe.Pointer(status)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCertVerifyCertificateChainPolicy.Addr(), uintptr(policyOID), uintptr(unsafe.Pointer(chain)), uintptr(unsafe.Pointer(para)), uintptr(unsafe.Pointer(status))) if r1 == 0 { err = errnoErr(e1) } @@ -1520,7 +1527,7 @@ func CryptAcquireCertificatePrivateKey(cert *CertContext, flags uint32, paramete if *callerFreeProvOrNCryptKey { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procCryptAcquireCertificatePrivateKey.Addr(), 6, uintptr(unsafe.Pointer(cert)), uintptr(flags), uintptr(parameters), uintptr(unsafe.Pointer(cryptProvOrNCryptKey)), uintptr(unsafe.Pointer(keySpec)), uintptr(unsafe.Pointer(&_p0))) + r1, _, e1 := syscall.SyscallN(procCryptAcquireCertificatePrivateKey.Addr(), uintptr(unsafe.Pointer(cert)), uintptr(flags), uintptr(parameters), uintptr(unsafe.Pointer(cryptProvOrNCryptKey)), uintptr(unsafe.Pointer(keySpec)), uintptr(unsafe.Pointer(&_p0))) *callerFreeProvOrNCryptKey = _p0 != 0 if r1 == 0 { err = errnoErr(e1) @@ -1529,7 +1536,7 @@ func CryptAcquireCertificatePrivateKey(cert *CertContext, flags uint32, paramete } func CryptDecodeObject(encodingType uint32, structType *byte, encodedBytes *byte, lenEncodedBytes uint32, flags uint32, decoded unsafe.Pointer, decodedLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procCryptDecodeObject.Addr(), 7, uintptr(encodingType), uintptr(unsafe.Pointer(structType)), uintptr(unsafe.Pointer(encodedBytes)), uintptr(lenEncodedBytes), uintptr(flags), uintptr(decoded), uintptr(unsafe.Pointer(decodedLen)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCryptDecodeObject.Addr(), uintptr(encodingType), uintptr(unsafe.Pointer(structType)), uintptr(unsafe.Pointer(encodedBytes)), uintptr(lenEncodedBytes), uintptr(flags), uintptr(decoded), uintptr(unsafe.Pointer(decodedLen))) if r1 == 0 { err = errnoErr(e1) } @@ -1537,7 +1544,7 @@ func CryptDecodeObject(encodingType uint32, structType *byte, encodedBytes *byte } func CryptProtectData(dataIn *DataBlob, name *uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) { - r1, _, e1 := syscall.Syscall9(procCryptProtectData.Addr(), 7, uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCryptProtectData.Addr(), uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut))) if r1 == 0 { err = errnoErr(e1) } @@ -1545,7 +1552,7 @@ func CryptProtectData(dataIn *DataBlob, name *uint16, optionalEntropy *DataBlob, } func CryptQueryObject(objectType uint32, object unsafe.Pointer, expectedContentTypeFlags uint32, expectedFormatTypeFlags uint32, flags uint32, msgAndCertEncodingType *uint32, contentType *uint32, formatType *uint32, certStore *Handle, msg *Handle, context *unsafe.Pointer) (err error) { - r1, _, e1 := syscall.Syscall12(procCryptQueryObject.Addr(), 11, uintptr(objectType), uintptr(object), uintptr(expectedContentTypeFlags), uintptr(expectedFormatTypeFlags), uintptr(flags), uintptr(unsafe.Pointer(msgAndCertEncodingType)), uintptr(unsafe.Pointer(contentType)), uintptr(unsafe.Pointer(formatType)), uintptr(unsafe.Pointer(certStore)), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(context)), 0) + r1, _, e1 := syscall.SyscallN(procCryptQueryObject.Addr(), uintptr(objectType), uintptr(object), uintptr(expectedContentTypeFlags), uintptr(expectedFormatTypeFlags), uintptr(flags), uintptr(unsafe.Pointer(msgAndCertEncodingType)), uintptr(unsafe.Pointer(contentType)), uintptr(unsafe.Pointer(formatType)), uintptr(unsafe.Pointer(certStore)), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(context))) if r1 == 0 { err = errnoErr(e1) } @@ -1553,7 +1560,7 @@ func CryptQueryObject(objectType uint32, object unsafe.Pointer, expectedContentT } func CryptUnprotectData(dataIn *DataBlob, name **uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) { - r1, _, e1 := syscall.Syscall9(procCryptUnprotectData.Addr(), 7, uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCryptUnprotectData.Addr(), uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut))) if r1 == 0 { err = errnoErr(e1) } @@ -1561,7 +1568,7 @@ func CryptUnprotectData(dataIn *DataBlob, name **uint16, optionalEntropy *DataBl } func PFXImportCertStore(pfx *CryptDataBlob, password *uint16, flags uint32) (store Handle, err error) { - r0, _, e1 := syscall.Syscall(procPFXImportCertStore.Addr(), 3, uintptr(unsafe.Pointer(pfx)), uintptr(unsafe.Pointer(password)), uintptr(flags)) + r0, _, e1 := syscall.SyscallN(procPFXImportCertStore.Addr(), uintptr(unsafe.Pointer(pfx)), uintptr(unsafe.Pointer(password)), uintptr(flags)) store = Handle(r0) if store == 0 { err = errnoErr(e1) @@ -1570,7 +1577,7 @@ func PFXImportCertStore(pfx *CryptDataBlob, password *uint16, flags uint32) (sto } func DnsNameCompare(name1 *uint16, name2 *uint16) (same bool) { - r0, _, _ := syscall.Syscall(procDnsNameCompare_W.Addr(), 2, uintptr(unsafe.Pointer(name1)), uintptr(unsafe.Pointer(name2)), 0) + r0, _, _ := syscall.SyscallN(procDnsNameCompare_W.Addr(), uintptr(unsafe.Pointer(name1)), uintptr(unsafe.Pointer(name2))) same = r0 != 0 return } @@ -1585,7 +1592,7 @@ func DnsQuery(name string, qtype uint16, options uint32, extra *byte, qrs **DNSR } func _DnsQuery(name *uint16, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) { - r0, _, _ := syscall.Syscall6(procDnsQuery_W.Addr(), 6, uintptr(unsafe.Pointer(name)), uintptr(qtype), uintptr(options), uintptr(unsafe.Pointer(extra)), uintptr(unsafe.Pointer(qrs)), uintptr(unsafe.Pointer(pr))) + r0, _, _ := syscall.SyscallN(procDnsQuery_W.Addr(), uintptr(unsafe.Pointer(name)), uintptr(qtype), uintptr(options), uintptr(unsafe.Pointer(extra)), uintptr(unsafe.Pointer(qrs)), uintptr(unsafe.Pointer(pr))) if r0 != 0 { status = syscall.Errno(r0) } @@ -1593,12 +1600,12 @@ func _DnsQuery(name *uint16, qtype uint16, options uint32, extra *byte, qrs **DN } func DnsRecordListFree(rl *DNSRecord, freetype uint32) { - syscall.Syscall(procDnsRecordListFree.Addr(), 2, uintptr(unsafe.Pointer(rl)), uintptr(freetype), 0) + syscall.SyscallN(procDnsRecordListFree.Addr(), uintptr(unsafe.Pointer(rl)), uintptr(freetype)) return } func DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) { - r0, _, _ := syscall.Syscall6(procDwmGetWindowAttribute.Addr(), 4, uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size), 0, 0) + r0, _, _ := syscall.SyscallN(procDwmGetWindowAttribute.Addr(), uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size)) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1606,7 +1613,7 @@ func DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, si } func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) { - r0, _, _ := syscall.Syscall6(procDwmSetWindowAttribute.Addr(), 4, uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size), 0, 0) + r0, _, _ := syscall.SyscallN(procDwmSetWindowAttribute.Addr(), uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size)) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1614,15 +1621,20 @@ func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, si } func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) { - r0, _, _ := syscall.Syscall(procCancelMibChangeNotify2.Addr(), 1, uintptr(notificationHandle), 0, 0) + r0, _, _ := syscall.SyscallN(procCancelMibChangeNotify2.Addr(), uintptr(notificationHandle)) if r0 != 0 { errcode = syscall.Errno(r0) } return } +func FreeMibTable(memory unsafe.Pointer) { + syscall.SyscallN(procFreeMibTable.Addr(), uintptr(memory)) + return +} + func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { - r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) + r0, _, _ := syscall.SyscallN(procGetAdaptersAddresses.Addr(), uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1630,7 +1642,7 @@ func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapter } func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) { - r0, _, _ := syscall.Syscall(procGetAdaptersInfo.Addr(), 2, uintptr(unsafe.Pointer(ai)), uintptr(unsafe.Pointer(ol)), 0) + r0, _, _ := syscall.SyscallN(procGetAdaptersInfo.Addr(), uintptr(unsafe.Pointer(ai)), uintptr(unsafe.Pointer(ol))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1638,7 +1650,7 @@ func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) { } func getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) { - r0, _, _ := syscall.Syscall(procGetBestInterfaceEx.Addr(), 2, uintptr(sockaddr), uintptr(unsafe.Pointer(pdwBestIfIndex)), 0) + r0, _, _ := syscall.SyscallN(procGetBestInterfaceEx.Addr(), uintptr(sockaddr), uintptr(unsafe.Pointer(pdwBestIfIndex))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1646,7 +1658,7 @@ func getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcod } func GetIfEntry(pIfRow *MibIfRow) (errcode error) { - r0, _, _ := syscall.Syscall(procGetIfEntry.Addr(), 1, uintptr(unsafe.Pointer(pIfRow)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetIfEntry.Addr(), uintptr(unsafe.Pointer(pIfRow))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1654,7 +1666,23 @@ func GetIfEntry(pIfRow *MibIfRow) (errcode error) { } func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) { - r0, _, _ := syscall.Syscall(procGetIfEntry2Ex.Addr(), 2, uintptr(level), uintptr(unsafe.Pointer(row)), 0) + r0, _, _ := syscall.SyscallN(procGetIfEntry2Ex.Addr(), uintptr(level), uintptr(unsafe.Pointer(row))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func GetIpForwardEntry2(row *MibIpForwardRow2) (errcode error) { + r0, _, _ := syscall.SyscallN(procGetIpForwardEntry2.Addr(), uintptr(unsafe.Pointer(row))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func GetIpForwardTable2(family uint16, table **MibIpForwardTable2) (errcode error) { + r0, _, _ := syscall.SyscallN(procGetIpForwardTable2.Addr(), uintptr(family), uintptr(unsafe.Pointer(table))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1662,7 +1690,7 @@ func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) { } func GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) { - r0, _, _ := syscall.Syscall(procGetUnicastIpAddressEntry.Addr(), 1, uintptr(unsafe.Pointer(row)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetUnicastIpAddressEntry.Addr(), uintptr(unsafe.Pointer(row))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1674,7 +1702,19 @@ func NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsa if initialNotification { _p0 = 1 } - r0, _, _ := syscall.Syscall6(procNotifyIpInterfaceChange.Addr(), 5, uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)), 0) + r0, _, _ := syscall.SyscallN(procNotifyIpInterfaceChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func NotifyRouteChange2(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) { + var _p0 uint32 + if initialNotification { + _p0 = 1 + } + r0, _, _ := syscall.SyscallN(procNotifyRouteChange2.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1686,7 +1726,7 @@ func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext if initialNotification { _p0 = 1 } - r0, _, _ := syscall.Syscall6(procNotifyUnicastIpAddressChange.Addr(), 5, uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)), 0) + r0, _, _ := syscall.SyscallN(procNotifyUnicastIpAddressChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1694,7 +1734,7 @@ func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext } func AddDllDirectory(path *uint16) (cookie uintptr, err error) { - r0, _, e1 := syscall.Syscall(procAddDllDirectory.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r0, _, e1 := syscall.SyscallN(procAddDllDirectory.Addr(), uintptr(unsafe.Pointer(path))) cookie = uintptr(r0) if cookie == 0 { err = errnoErr(e1) @@ -1703,7 +1743,7 @@ func AddDllDirectory(path *uint16) (cookie uintptr, err error) { } func AssignProcessToJobObject(job Handle, process Handle) (err error) { - r1, _, e1 := syscall.Syscall(procAssignProcessToJobObject.Addr(), 2, uintptr(job), uintptr(process), 0) + r1, _, e1 := syscall.SyscallN(procAssignProcessToJobObject.Addr(), uintptr(job), uintptr(process)) if r1 == 0 { err = errnoErr(e1) } @@ -1711,7 +1751,7 @@ func AssignProcessToJobObject(job Handle, process Handle) (err error) { } func CancelIo(s Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCancelIo.Addr(), 1, uintptr(s), 0, 0) + r1, _, e1 := syscall.SyscallN(procCancelIo.Addr(), uintptr(s)) if r1 == 0 { err = errnoErr(e1) } @@ -1719,7 +1759,7 @@ func CancelIo(s Handle) (err error) { } func CancelIoEx(s Handle, o *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(s), uintptr(unsafe.Pointer(o)), 0) + r1, _, e1 := syscall.SyscallN(procCancelIoEx.Addr(), uintptr(s), uintptr(unsafe.Pointer(o))) if r1 == 0 { err = errnoErr(e1) } @@ -1727,7 +1767,7 @@ func CancelIoEx(s Handle, o *Overlapped) (err error) { } func ClearCommBreak(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procClearCommBreak.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procClearCommBreak.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -1735,7 +1775,7 @@ func ClearCommBreak(handle Handle) (err error) { } func ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error) { - r1, _, e1 := syscall.Syscall(procClearCommError.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpErrors)), uintptr(unsafe.Pointer(lpStat))) + r1, _, e1 := syscall.SyscallN(procClearCommError.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpErrors)), uintptr(unsafe.Pointer(lpStat))) if r1 == 0 { err = errnoErr(e1) } @@ -1743,7 +1783,7 @@ func ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error } func CloseHandle(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCloseHandle.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procCloseHandle.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -1751,12 +1791,12 @@ func CloseHandle(handle Handle) (err error) { } func ClosePseudoConsole(console Handle) { - syscall.Syscall(procClosePseudoConsole.Addr(), 1, uintptr(console), 0, 0) + syscall.SyscallN(procClosePseudoConsole.Addr(), uintptr(console)) return } func ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procConnectNamedPipe.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -1764,7 +1804,7 @@ func ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) { } func CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) { - r1, _, e1 := syscall.Syscall(procCreateDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(sa)), 0) + r1, _, e1 := syscall.SyscallN(procCreateDirectoryW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(sa))) if r1 == 0 { err = errnoErr(e1) } @@ -1772,7 +1812,7 @@ func CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) { } func CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateEventExW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateEventExW.Addr(), uintptr(unsafe.Pointer(eventAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess)) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1781,7 +1821,7 @@ func CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, d } func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateEventW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name)), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateEventW.Addr(), uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1790,7 +1830,7 @@ func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialStat } func CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateFileMappingW.Addr(), 6, uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name))) + r0, _, e1 := syscall.SyscallN(procCreateFileMappingW.Addr(), uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1799,7 +1839,7 @@ func CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxS } func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateFileW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -1808,7 +1848,7 @@ func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes } func CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procCreateHardLinkW.Addr(), 3, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(existingfilename)), uintptr(reserved)) + r1, _, e1 := syscall.SyscallN(procCreateHardLinkW.Addr(), uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(existingfilename)), uintptr(reserved)) if r1&0xff == 0 { err = errnoErr(e1) } @@ -1816,7 +1856,7 @@ func CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr } func CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uintptr, threadcnt uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(filehandle), uintptr(cphandle), uintptr(key), uintptr(threadcnt), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateIoCompletionPort.Addr(), uintptr(filehandle), uintptr(cphandle), uintptr(key), uintptr(threadcnt)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1825,7 +1865,7 @@ func CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uintptr, thr } func CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procCreateJobObjectW.Addr(), 2, uintptr(unsafe.Pointer(jobAttr)), uintptr(unsafe.Pointer(name)), 0) + r0, _, e1 := syscall.SyscallN(procCreateJobObjectW.Addr(), uintptr(unsafe.Pointer(jobAttr)), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1834,7 +1874,7 @@ func CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, } func CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateMutexExW.Addr(), 4, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateMutexExW.Addr(), uintptr(unsafe.Pointer(mutexAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess)) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1847,7 +1887,7 @@ func CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16 if initialOwner { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procCreateMutexW.Addr(), 3, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(_p0), uintptr(unsafe.Pointer(name))) + r0, _, e1 := syscall.SyscallN(procCreateMutexW.Addr(), uintptr(unsafe.Pointer(mutexAttrs)), uintptr(_p0), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1856,7 +1896,7 @@ func CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16 } func CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) + r0, _, e1 := syscall.SyscallN(procCreateNamedPipeW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa))) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -1865,7 +1905,7 @@ func CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances u } func CreatePipe(readhandle *Handle, writehandle *Handle, sa *SecurityAttributes, size uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCreatePipe.Addr(), 4, uintptr(unsafe.Pointer(readhandle)), uintptr(unsafe.Pointer(writehandle)), uintptr(unsafe.Pointer(sa)), uintptr(size), 0, 0) + r1, _, e1 := syscall.SyscallN(procCreatePipe.Addr(), uintptr(unsafe.Pointer(readhandle)), uintptr(unsafe.Pointer(writehandle)), uintptr(unsafe.Pointer(sa)), uintptr(size)) if r1 == 0 { err = errnoErr(e1) } @@ -1877,7 +1917,7 @@ func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityA if inheritHandles { _p0 = 1 } - r1, _, e1 := syscall.Syscall12(procCreateProcessW.Addr(), 10, uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCreateProcessW.Addr(), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo))) if r1 == 0 { err = errnoErr(e1) } @@ -1885,7 +1925,7 @@ func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityA } func createPseudoConsole(size uint32, in Handle, out Handle, flags uint32, pconsole *Handle) (hr error) { - r0, _, _ := syscall.Syscall6(procCreatePseudoConsole.Addr(), 5, uintptr(size), uintptr(in), uintptr(out), uintptr(flags), uintptr(unsafe.Pointer(pconsole)), 0) + r0, _, _ := syscall.SyscallN(procCreatePseudoConsole.Addr(), uintptr(size), uintptr(in), uintptr(out), uintptr(flags), uintptr(unsafe.Pointer(pconsole))) if r0 != 0 { hr = syscall.Errno(r0) } @@ -1893,7 +1933,7 @@ func createPseudoConsole(size uint32, in Handle, out Handle, flags uint32, pcons } func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCreateSymbolicLinkW.Addr(), 3, uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags)) + r1, _, e1 := syscall.SyscallN(procCreateSymbolicLinkW.Addr(), uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags)) if r1&0xff == 0 { err = errnoErr(e1) } @@ -1901,7 +1941,7 @@ func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags u } func CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procCreateToolhelp32Snapshot.Addr(), 2, uintptr(flags), uintptr(processId), 0) + r0, _, e1 := syscall.SyscallN(procCreateToolhelp32Snapshot.Addr(), uintptr(flags), uintptr(processId)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -1910,7 +1950,7 @@ func CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, er } func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDefineDosDeviceW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath))) + r1, _, e1 := syscall.SyscallN(procDefineDosDeviceW.Addr(), uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath))) if r1 == 0 { err = errnoErr(e1) } @@ -1918,7 +1958,7 @@ func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err } func DeleteFile(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteFileW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r1, _, e1 := syscall.SyscallN(procDeleteFileW.Addr(), uintptr(unsafe.Pointer(path))) if r1 == 0 { err = errnoErr(e1) } @@ -1926,12 +1966,12 @@ func DeleteFile(path *uint16) (err error) { } func deleteProcThreadAttributeList(attrlist *ProcThreadAttributeList) { - syscall.Syscall(procDeleteProcThreadAttributeList.Addr(), 1, uintptr(unsafe.Pointer(attrlist)), 0, 0) + syscall.SyscallN(procDeleteProcThreadAttributeList.Addr(), uintptr(unsafe.Pointer(attrlist))) return } func DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteVolumeMountPointW.Addr(), 1, uintptr(unsafe.Pointer(volumeMountPoint)), 0, 0) + r1, _, e1 := syscall.SyscallN(procDeleteVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(volumeMountPoint))) if r1 == 0 { err = errnoErr(e1) } @@ -1939,7 +1979,7 @@ func DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) { } func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall9(procDeviceIoControl.Addr(), 8, uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procDeviceIoControl.Addr(), uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -1947,7 +1987,7 @@ func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBuff } func DisconnectNamedPipe(pipe Handle) (err error) { - r1, _, e1 := syscall.Syscall(procDisconnectNamedPipe.Addr(), 1, uintptr(pipe), 0, 0) + r1, _, e1 := syscall.SyscallN(procDisconnectNamedPipe.Addr(), uintptr(pipe)) if r1 == 0 { err = errnoErr(e1) } @@ -1959,7 +1999,7 @@ func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetP if bInheritHandle { _p0 = 1 } - r1, _, e1 := syscall.Syscall9(procDuplicateHandle.Addr(), 7, uintptr(hSourceProcessHandle), uintptr(hSourceHandle), uintptr(hTargetProcessHandle), uintptr(unsafe.Pointer(lpTargetHandle)), uintptr(dwDesiredAccess), uintptr(_p0), uintptr(dwOptions), 0, 0) + r1, _, e1 := syscall.SyscallN(procDuplicateHandle.Addr(), uintptr(hSourceProcessHandle), uintptr(hSourceHandle), uintptr(hTargetProcessHandle), uintptr(unsafe.Pointer(lpTargetHandle)), uintptr(dwDesiredAccess), uintptr(_p0), uintptr(dwOptions)) if r1 == 0 { err = errnoErr(e1) } @@ -1967,7 +2007,7 @@ func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetP } func EscapeCommFunction(handle Handle, dwFunc uint32) (err error) { - r1, _, e1 := syscall.Syscall(procEscapeCommFunction.Addr(), 2, uintptr(handle), uintptr(dwFunc), 0) + r1, _, e1 := syscall.SyscallN(procEscapeCommFunction.Addr(), uintptr(handle), uintptr(dwFunc)) if r1 == 0 { err = errnoErr(e1) } @@ -1975,12 +2015,12 @@ func EscapeCommFunction(handle Handle, dwFunc uint32) (err error) { } func ExitProcess(exitcode uint32) { - syscall.Syscall(procExitProcess.Addr(), 1, uintptr(exitcode), 0, 0) + syscall.SyscallN(procExitProcess.Addr(), uintptr(exitcode)) return } func ExpandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) + r0, _, e1 := syscall.SyscallN(procExpandEnvironmentStringsW.Addr(), uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -1989,7 +2029,7 @@ func ExpandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, } func FindClose(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindClose.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindClose.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -1997,7 +2037,7 @@ func FindClose(handle Handle) (err error) { } func FindCloseChangeNotification(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindCloseChangeNotification.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindCloseChangeNotification.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -2018,7 +2058,7 @@ func _FindFirstChangeNotification(path *uint16, watchSubtree bool, notifyFilter if watchSubtree { _p1 = 1 } - r0, _, e1 := syscall.Syscall(procFindFirstChangeNotificationW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(_p1), uintptr(notifyFilter)) + r0, _, e1 := syscall.SyscallN(procFindFirstChangeNotificationW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(_p1), uintptr(notifyFilter)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -2027,7 +2067,7 @@ func _FindFirstChangeNotification(path *uint16, watchSubtree bool, notifyFilter } func findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstFileW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data)), 0) + r0, _, e1 := syscall.SyscallN(procFindFirstFileW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data))) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -2036,7 +2076,7 @@ func findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err erro } func FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, bufferLength uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) + r0, _, e1 := syscall.SyscallN(procFindFirstVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -2045,7 +2085,7 @@ func FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, b } func FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstVolumeW.Addr(), 2, uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength), 0) + r0, _, e1 := syscall.SyscallN(procFindFirstVolumeW.Addr(), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -2054,7 +2094,7 @@ func FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, er } func FindNextChangeNotification(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextChangeNotification.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindNextChangeNotification.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -2062,7 +2102,7 @@ func FindNextChangeNotification(handle Handle) (err error) { } func findNextFile1(handle Handle, data *win32finddata1) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextFileW.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) + r1, _, e1 := syscall.SyscallN(procFindNextFileW.Addr(), uintptr(handle), uintptr(unsafe.Pointer(data))) if r1 == 0 { err = errnoErr(e1) } @@ -2070,7 +2110,7 @@ func findNextFile1(handle Handle, data *win32finddata1) (err error) { } func FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uint16, bufferLength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextVolumeMountPointW.Addr(), 3, uintptr(findVolumeMountPoint), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) + r1, _, e1 := syscall.SyscallN(procFindNextVolumeMountPointW.Addr(), uintptr(findVolumeMountPoint), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) if r1 == 0 { err = errnoErr(e1) } @@ -2078,7 +2118,7 @@ func FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uin } func FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextVolumeW.Addr(), 3, uintptr(findVolume), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength)) + r1, _, e1 := syscall.SyscallN(procFindNextVolumeW.Addr(), uintptr(findVolume), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength)) if r1 == 0 { err = errnoErr(e1) } @@ -2086,7 +2126,7 @@ func FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) } func findResource(module Handle, name uintptr, resType uintptr) (resInfo Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindResourceW.Addr(), 3, uintptr(module), uintptr(name), uintptr(resType)) + r0, _, e1 := syscall.SyscallN(procFindResourceW.Addr(), uintptr(module), uintptr(name), uintptr(resType)) resInfo = Handle(r0) if resInfo == 0 { err = errnoErr(e1) @@ -2095,7 +2135,7 @@ func findResource(module Handle, name uintptr, resType uintptr) (resInfo Handle, } func FindVolumeClose(findVolume Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindVolumeClose.Addr(), 1, uintptr(findVolume), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindVolumeClose.Addr(), uintptr(findVolume)) if r1 == 0 { err = errnoErr(e1) } @@ -2103,7 +2143,15 @@ func FindVolumeClose(findVolume Handle) (err error) { } func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindVolumeMountPointClose.Addr(), 1, uintptr(findVolumeMountPoint), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindVolumeMountPointClose.Addr(), uintptr(findVolumeMountPoint)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func FlushConsoleInputBuffer(console Handle) (err error) { + r1, _, e1 := syscall.SyscallN(procFlushConsoleInputBuffer.Addr(), uintptr(console)) if r1 == 0 { err = errnoErr(e1) } @@ -2111,7 +2159,7 @@ func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) { } func FlushFileBuffers(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFlushFileBuffers.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFlushFileBuffers.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -2119,7 +2167,7 @@ func FlushFileBuffers(handle Handle) (err error) { } func FlushViewOfFile(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procFlushViewOfFile.Addr(), 2, uintptr(addr), uintptr(length), 0) + r1, _, e1 := syscall.SyscallN(procFlushViewOfFile.Addr(), uintptr(addr), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -2131,7 +2179,7 @@ func FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, bu if len(buf) > 0 { _p0 = &buf[0] } - r0, _, e1 := syscall.Syscall9(procFormatMessageW.Addr(), 7, uintptr(flags), uintptr(msgsrc), uintptr(msgid), uintptr(langid), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(args)), 0, 0) + r0, _, e1 := syscall.SyscallN(procFormatMessageW.Addr(), uintptr(flags), uintptr(msgsrc), uintptr(msgid), uintptr(langid), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(args))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2140,7 +2188,7 @@ func FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, bu } func FreeEnvironmentStrings(envs *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procFreeEnvironmentStringsW.Addr(), 1, uintptr(unsafe.Pointer(envs)), 0, 0) + r1, _, e1 := syscall.SyscallN(procFreeEnvironmentStringsW.Addr(), uintptr(unsafe.Pointer(envs))) if r1 == 0 { err = errnoErr(e1) } @@ -2148,7 +2196,7 @@ func FreeEnvironmentStrings(envs *uint16) (err error) { } func FreeLibrary(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFreeLibrary.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFreeLibrary.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -2156,7 +2204,7 @@ func FreeLibrary(handle Handle) (err error) { } func GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGenerateConsoleCtrlEvent.Addr(), 2, uintptr(ctrlEvent), uintptr(processGroupID), 0) + r1, _, e1 := syscall.SyscallN(procGenerateConsoleCtrlEvent.Addr(), uintptr(ctrlEvent), uintptr(processGroupID)) if r1 == 0 { err = errnoErr(e1) } @@ -2164,19 +2212,19 @@ func GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err erro } func GetACP() (acp uint32) { - r0, _, _ := syscall.Syscall(procGetACP.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetACP.Addr()) acp = uint32(r0) return } func GetActiveProcessorCount(groupNumber uint16) (ret uint32) { - r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0) + r0, _, _ := syscall.SyscallN(procGetActiveProcessorCount.Addr(), uintptr(groupNumber)) ret = uint32(r0) return } func GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetCommModemStatus.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpModemStat)), 0) + r1, _, e1 := syscall.SyscallN(procGetCommModemStatus.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpModemStat))) if r1 == 0 { err = errnoErr(e1) } @@ -2184,7 +2232,7 @@ func GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) { } func GetCommState(handle Handle, lpDCB *DCB) (err error) { - r1, _, e1 := syscall.Syscall(procGetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) + r1, _, e1 := syscall.SyscallN(procGetCommState.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpDCB))) if r1 == 0 { err = errnoErr(e1) } @@ -2192,7 +2240,7 @@ func GetCommState(handle Handle, lpDCB *DCB) (err error) { } func GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { - r1, _, e1 := syscall.Syscall(procGetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) + r1, _, e1 := syscall.SyscallN(procGetCommTimeouts.Addr(), uintptr(handle), uintptr(unsafe.Pointer(timeouts))) if r1 == 0 { err = errnoErr(e1) } @@ -2200,13 +2248,13 @@ func GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { } func GetCommandLine() (cmd *uint16) { - r0, _, _ := syscall.Syscall(procGetCommandLineW.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetCommandLineW.Addr()) cmd = (*uint16)(unsafe.Pointer(r0)) return } func GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetComputerNameExW.Addr(), 3, uintptr(nametype), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) + r1, _, e1 := syscall.SyscallN(procGetComputerNameExW.Addr(), uintptr(nametype), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) if r1 == 0 { err = errnoErr(e1) } @@ -2214,7 +2262,7 @@ func GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) { } func GetComputerName(buf *uint16, n *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetComputerNameW.Addr(), 2, uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)), 0) + r1, _, e1 := syscall.SyscallN(procGetComputerNameW.Addr(), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) if r1 == 0 { err = errnoErr(e1) } @@ -2222,7 +2270,7 @@ func GetComputerName(buf *uint16, n *uint32) (err error) { } func GetConsoleCP() (cp uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetConsoleCP.Addr(), 0, 0, 0, 0) + r0, _, e1 := syscall.SyscallN(procGetConsoleCP.Addr()) cp = uint32(r0) if cp == 0 { err = errnoErr(e1) @@ -2231,7 +2279,7 @@ func GetConsoleCP() (cp uint32, err error) { } func GetConsoleMode(console Handle, mode *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0) + r1, _, e1 := syscall.SyscallN(procGetConsoleMode.Addr(), uintptr(console), uintptr(unsafe.Pointer(mode))) if r1 == 0 { err = errnoErr(e1) } @@ -2239,7 +2287,7 @@ func GetConsoleMode(console Handle, mode *uint32) (err error) { } func GetConsoleOutputCP() (cp uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetConsoleOutputCP.Addr(), 0, 0, 0, 0) + r0, _, e1 := syscall.SyscallN(procGetConsoleOutputCP.Addr()) cp = uint32(r0) if cp == 0 { err = errnoErr(e1) @@ -2248,7 +2296,7 @@ func GetConsoleOutputCP() (cp uint32, err error) { } func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) { - r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0) + r1, _, e1 := syscall.SyscallN(procGetConsoleScreenBufferInfo.Addr(), uintptr(console), uintptr(unsafe.Pointer(info))) if r1 == 0 { err = errnoErr(e1) } @@ -2256,7 +2304,7 @@ func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) ( } func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetCurrentDirectoryW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) + r0, _, e1 := syscall.SyscallN(procGetCurrentDirectoryW.Addr(), uintptr(buflen), uintptr(unsafe.Pointer(buf))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2265,19 +2313,19 @@ func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) { } func GetCurrentProcessId() (pid uint32) { - r0, _, _ := syscall.Syscall(procGetCurrentProcessId.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetCurrentProcessId.Addr()) pid = uint32(r0) return } func GetCurrentThreadId() (id uint32) { - r0, _, _ := syscall.Syscall(procGetCurrentThreadId.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetCurrentThreadId.Addr()) id = uint32(r0) return } func GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint64, totalNumberOfBytes *uint64, totalNumberOfFreeBytes *uint64) (err error) { - r1, _, e1 := syscall.Syscall6(procGetDiskFreeSpaceExW.Addr(), 4, uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetDiskFreeSpaceExW.Addr(), uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes))) if r1 == 0 { err = errnoErr(e1) } @@ -2285,13 +2333,13 @@ func GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint6 } func GetDriveType(rootPathName *uint16) (driveType uint32) { - r0, _, _ := syscall.Syscall(procGetDriveTypeW.Addr(), 1, uintptr(unsafe.Pointer(rootPathName)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetDriveTypeW.Addr(), uintptr(unsafe.Pointer(rootPathName))) driveType = uint32(r0) return } func GetEnvironmentStrings() (envs *uint16, err error) { - r0, _, e1 := syscall.Syscall(procGetEnvironmentStringsW.Addr(), 0, 0, 0, 0) + r0, _, e1 := syscall.SyscallN(procGetEnvironmentStringsW.Addr()) envs = (*uint16)(unsafe.Pointer(r0)) if envs == nil { err = errnoErr(e1) @@ -2300,7 +2348,7 @@ func GetEnvironmentStrings() (envs *uint16, err error) { } func GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetEnvironmentVariableW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(size)) + r0, _, e1 := syscall.SyscallN(procGetEnvironmentVariableW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(size)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2309,7 +2357,7 @@ func GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32 } func GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetExitCodeProcess.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(exitcode)), 0) + r1, _, e1 := syscall.SyscallN(procGetExitCodeProcess.Addr(), uintptr(handle), uintptr(unsafe.Pointer(exitcode))) if r1 == 0 { err = errnoErr(e1) } @@ -2317,7 +2365,7 @@ func GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) { } func GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) { - r1, _, e1 := syscall.Syscall(procGetFileAttributesExW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(level), uintptr(unsafe.Pointer(info))) + r1, _, e1 := syscall.SyscallN(procGetFileAttributesExW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(level), uintptr(unsafe.Pointer(info))) if r1 == 0 { err = errnoErr(e1) } @@ -2325,7 +2373,7 @@ func GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) { } func GetFileAttributes(name *uint16) (attrs uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetFileAttributesW.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetFileAttributesW.Addr(), uintptr(unsafe.Pointer(name))) attrs = uint32(r0) if attrs == INVALID_FILE_ATTRIBUTES { err = errnoErr(e1) @@ -2334,7 +2382,7 @@ func GetFileAttributes(name *uint16) (attrs uint32, err error) { } func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error) { - r1, _, e1 := syscall.Syscall(procGetFileInformationByHandle.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) + r1, _, e1 := syscall.SyscallN(procGetFileInformationByHandle.Addr(), uintptr(handle), uintptr(unsafe.Pointer(data))) if r1 == 0 { err = errnoErr(e1) } @@ -2342,7 +2390,7 @@ func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (e } func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, outBufferLen uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferLen), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetFileInformationByHandleEx.Addr(), uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferLen)) if r1 == 0 { err = errnoErr(e1) } @@ -2350,7 +2398,7 @@ func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, } func GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { - r1, _, e1 := syscall.Syscall6(procGetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetFileTime.Addr(), uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime))) if r1 == 0 { err = errnoErr(e1) } @@ -2358,7 +2406,7 @@ func GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetim } func GetFileType(filehandle Handle) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetFileType.Addr(), 1, uintptr(filehandle), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetFileType.Addr(), uintptr(filehandle)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2367,7 +2415,7 @@ func GetFileType(filehandle Handle) (n uint32, err error) { } func GetFinalPathNameByHandle(file Handle, filePath *uint16, filePathSize uint32, flags uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall6(procGetFinalPathNameByHandleW.Addr(), 4, uintptr(file), uintptr(unsafe.Pointer(filePath)), uintptr(filePathSize), uintptr(flags), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetFinalPathNameByHandleW.Addr(), uintptr(file), uintptr(unsafe.Pointer(filePath)), uintptr(filePathSize), uintptr(flags)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2376,7 +2424,7 @@ func GetFinalPathNameByHandle(file Handle, filePath *uint16, filePathSize uint32 } func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall6(procGetFullPathNameW.Addr(), 4, uintptr(unsafe.Pointer(path)), uintptr(buflen), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(fname)), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetFullPathNameW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(buflen), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(fname))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2385,13 +2433,13 @@ func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) ( } func GetLargePageMinimum() (size uintptr) { - r0, _, _ := syscall.Syscall(procGetLargePageMinimum.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetLargePageMinimum.Addr()) size = uintptr(r0) return } func GetLastError() (lasterr error) { - r0, _, _ := syscall.Syscall(procGetLastError.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetLastError.Addr()) if r0 != 0 { lasterr = syscall.Errno(r0) } @@ -2399,7 +2447,7 @@ func GetLastError() (lasterr error) { } func GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLogicalDriveStringsW.Addr(), 2, uintptr(bufferLength), uintptr(unsafe.Pointer(buffer)), 0) + r0, _, e1 := syscall.SyscallN(procGetLogicalDriveStringsW.Addr(), uintptr(bufferLength), uintptr(unsafe.Pointer(buffer))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2408,7 +2456,7 @@ func GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err } func GetLogicalDrives() (drivesBitMask uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLogicalDrives.Addr(), 0, 0, 0, 0) + r0, _, e1 := syscall.SyscallN(procGetLogicalDrives.Addr()) drivesBitMask = uint32(r0) if drivesBitMask == 0 { err = errnoErr(e1) @@ -2417,7 +2465,7 @@ func GetLogicalDrives() (drivesBitMask uint32, err error) { } func GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLongPathNameW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(buf)), uintptr(buflen)) + r0, _, e1 := syscall.SyscallN(procGetLongPathNameW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(buf)), uintptr(buflen)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2426,13 +2474,13 @@ func GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err er } func GetMaximumProcessorCount(groupNumber uint16) (ret uint32) { - r0, _, _ := syscall.Syscall(procGetMaximumProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0) + r0, _, _ := syscall.SyscallN(procGetMaximumProcessorCount.Addr(), uintptr(groupNumber)) ret = uint32(r0) return } func GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetModuleFileNameW.Addr(), 3, uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size)) + r0, _, e1 := syscall.SyscallN(procGetModuleFileNameW.Addr(), uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2441,7 +2489,7 @@ func GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, } func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) { - r1, _, e1 := syscall.Syscall(procGetModuleHandleExW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(moduleName)), uintptr(unsafe.Pointer(module))) + r1, _, e1 := syscall.SyscallN(procGetModuleHandleExW.Addr(), uintptr(flags), uintptr(unsafe.Pointer(moduleName)), uintptr(unsafe.Pointer(module))) if r1 == 0 { err = errnoErr(e1) } @@ -2449,7 +2497,7 @@ func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err er } func GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetNamedPipeClientProcessId.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(clientProcessID)), 0) + r1, _, e1 := syscall.SyscallN(procGetNamedPipeClientProcessId.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(clientProcessID))) if r1 == 0 { err = errnoErr(e1) } @@ -2457,7 +2505,7 @@ func GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err erro } func GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetNamedPipeHandleStateW.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize)) if r1 == 0 { err = errnoErr(e1) } @@ -2465,7 +2513,7 @@ func GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, m } func GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) + r1, _, e1 := syscall.SyscallN(procGetNamedPipeInfo.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances))) if r1 == 0 { err = errnoErr(e1) } @@ -2473,7 +2521,15 @@ func GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint3 } func GetNamedPipeServerProcessId(pipe Handle, serverProcessID *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetNamedPipeServerProcessId.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(serverProcessID)), 0) + r1, _, e1 := syscall.SyscallN(procGetNamedPipeServerProcessId.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(serverProcessID))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetNumberOfConsoleInputEvents(console Handle, numevents *uint32) (err error) { + r1, _, e1 := syscall.SyscallN(procGetNumberOfConsoleInputEvents.Addr(), uintptr(console), uintptr(unsafe.Pointer(numevents))) if r1 == 0 { err = errnoErr(e1) } @@ -2485,7 +2541,7 @@ func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wa if wait { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procGetOverlappedResult.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(done)), uintptr(_p0), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetOverlappedResult.Addr(), uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(done)), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -2493,7 +2549,7 @@ func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wa } func GetPriorityClass(process Handle) (ret uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetPriorityClass.Addr(), 1, uintptr(process), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetPriorityClass.Addr(), uintptr(process)) ret = uint32(r0) if ret == 0 { err = errnoErr(e1) @@ -2511,7 +2567,7 @@ func GetProcAddress(module Handle, procname string) (proc uintptr, err error) { } func _GetProcAddress(module Handle, procname *byte) (proc uintptr, err error) { - r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), uintptr(unsafe.Pointer(procname)), 0) + r0, _, e1 := syscall.SyscallN(procGetProcAddress.Addr(), uintptr(module), uintptr(unsafe.Pointer(procname))) proc = uintptr(r0) if proc == 0 { err = errnoErr(e1) @@ -2520,7 +2576,7 @@ func _GetProcAddress(module Handle, procname *byte) (proc uintptr, err error) { } func GetProcessId(process Handle) (id uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetProcessId.Addr(), 1, uintptr(process), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetProcessId.Addr(), uintptr(process)) id = uint32(r0) if id == 0 { err = errnoErr(e1) @@ -2529,7 +2585,7 @@ func GetProcessId(process Handle) (id uint32, err error) { } func getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetProcessPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetProcessPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -2537,7 +2593,7 @@ func getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uin } func GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetProcessShutdownParameters.Addr(), 2, uintptr(unsafe.Pointer(level)), uintptr(unsafe.Pointer(flags)), 0) + r1, _, e1 := syscall.SyscallN(procGetProcessShutdownParameters.Addr(), uintptr(unsafe.Pointer(level)), uintptr(unsafe.Pointer(flags))) if r1 == 0 { err = errnoErr(e1) } @@ -2545,7 +2601,7 @@ func GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) { } func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) { - r1, _, e1 := syscall.Syscall6(procGetProcessTimes.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime)), 0) + r1, _, e1 := syscall.SyscallN(procGetProcessTimes.Addr(), uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime))) if r1 == 0 { err = errnoErr(e1) } @@ -2553,12 +2609,12 @@ func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, } func GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) { - syscall.Syscall6(procGetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(unsafe.Pointer(lpMinimumWorkingSetSize)), uintptr(unsafe.Pointer(lpMaximumWorkingSetSize)), uintptr(unsafe.Pointer(flags)), 0, 0) + syscall.SyscallN(procGetProcessWorkingSetSizeEx.Addr(), uintptr(hProcess), uintptr(unsafe.Pointer(lpMinimumWorkingSetSize)), uintptr(unsafe.Pointer(lpMaximumWorkingSetSize)), uintptr(unsafe.Pointer(flags))) return } func GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uintptr, overlapped **Overlapped, timeout uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout), 0) + r1, _, e1 := syscall.SyscallN(procGetQueuedCompletionStatus.Addr(), uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout)) if r1 == 0 { err = errnoErr(e1) } @@ -2566,7 +2622,7 @@ func GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uintptr, overl } func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetShortPathNameW.Addr(), 3, uintptr(unsafe.Pointer(longpath)), uintptr(unsafe.Pointer(shortpath)), uintptr(buflen)) + r0, _, e1 := syscall.SyscallN(procGetShortPathNameW.Addr(), uintptr(unsafe.Pointer(longpath)), uintptr(unsafe.Pointer(shortpath)), uintptr(buflen)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2575,12 +2631,12 @@ func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uin } func getStartupInfo(startupInfo *StartupInfo) { - syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0) + syscall.SyscallN(procGetStartupInfoW.Addr(), uintptr(unsafe.Pointer(startupInfo))) return } func GetStdHandle(stdhandle uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procGetStdHandle.Addr(), 1, uintptr(stdhandle), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetStdHandle.Addr(), uintptr(stdhandle)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -2589,7 +2645,7 @@ func GetStdHandle(stdhandle uint32) (handle Handle, err error) { } func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetSystemDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + r0, _, e1 := syscall.SyscallN(procGetSystemDirectoryW.Addr(), uintptr(unsafe.Pointer(dir)), uintptr(dirLen)) len = uint32(r0) if len == 0 { err = errnoErr(e1) @@ -2598,7 +2654,7 @@ func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { } func getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetSystemPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetSystemPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -2606,17 +2662,17 @@ func getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint } func GetSystemTimeAsFileTime(time *Filetime) { - syscall.Syscall(procGetSystemTimeAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) + syscall.SyscallN(procGetSystemTimeAsFileTime.Addr(), uintptr(unsafe.Pointer(time))) return } func GetSystemTimePreciseAsFileTime(time *Filetime) { - syscall.Syscall(procGetSystemTimePreciseAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) + syscall.SyscallN(procGetSystemTimePreciseAsFileTime.Addr(), uintptr(unsafe.Pointer(time))) return } func getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetSystemWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + r0, _, e1 := syscall.SyscallN(procGetSystemWindowsDirectoryW.Addr(), uintptr(unsafe.Pointer(dir)), uintptr(dirLen)) len = uint32(r0) if len == 0 { err = errnoErr(e1) @@ -2625,7 +2681,7 @@ func getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err erro } func GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetTempPathW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) + r0, _, e1 := syscall.SyscallN(procGetTempPathW.Addr(), uintptr(buflen), uintptr(unsafe.Pointer(buf))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2634,7 +2690,7 @@ func GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) { } func getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetThreadPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetThreadPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -2642,13 +2698,13 @@ func getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint } func getTickCount64() (ms uint64) { - r0, _, _ := syscall.Syscall(procGetTickCount64.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetTickCount64.Addr()) ms = uint64(r0) return } func GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetTimeZoneInformation.Addr(), 1, uintptr(unsafe.Pointer(tzi)), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetTimeZoneInformation.Addr(), uintptr(unsafe.Pointer(tzi))) rc = uint32(r0) if rc == 0xffffffff { err = errnoErr(e1) @@ -2657,7 +2713,7 @@ func GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) { } func getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetUserPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetUserPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -2665,7 +2721,7 @@ func getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16 } func GetVersion() (ver uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetVersion.Addr(), 0, 0, 0, 0) + r0, _, e1 := syscall.SyscallN(procGetVersion.Addr()) ver = uint32(r0) if ver == 0 { err = errnoErr(e1) @@ -2674,7 +2730,7 @@ func GetVersion() (ver uint32, err error) { } func GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetVolumeInformationByHandleW.Addr(), 8, uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) + r1, _, e1 := syscall.SyscallN(procGetVolumeInformationByHandleW.Addr(), uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize)) if r1 == 0 { err = errnoErr(e1) } @@ -2682,7 +2738,7 @@ func GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeN } func GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetVolumeInformationW.Addr(), 8, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) + r1, _, e1 := syscall.SyscallN(procGetVolumeInformationW.Addr(), uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize)) if r1 == 0 { err = errnoErr(e1) } @@ -2690,7 +2746,7 @@ func GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volume } func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetVolumeNameForVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength)) + r1, _, e1 := syscall.SyscallN(procGetVolumeNameForVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength)) if r1 == 0 { err = errnoErr(e1) } @@ -2698,7 +2754,7 @@ func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint } func GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetVolumePathNameW.Addr(), 3, uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(volumePathName)), uintptr(bufferLength)) + r1, _, e1 := syscall.SyscallN(procGetVolumePathNameW.Addr(), uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(volumePathName)), uintptr(bufferLength)) if r1 == 0 { err = errnoErr(e1) } @@ -2706,7 +2762,7 @@ func GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength ui } func GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16, bufferLength uint32, returnLength *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetVolumePathNamesForVolumeNameW.Addr(), 4, uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(volumePathNames)), uintptr(bufferLength), uintptr(unsafe.Pointer(returnLength)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetVolumePathNamesForVolumeNameW.Addr(), uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(volumePathNames)), uintptr(bufferLength), uintptr(unsafe.Pointer(returnLength))) if r1 == 0 { err = errnoErr(e1) } @@ -2714,7 +2770,7 @@ func GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16 } func getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + r0, _, e1 := syscall.SyscallN(procGetWindowsDirectoryW.Addr(), uintptr(unsafe.Pointer(dir)), uintptr(dirLen)) len = uint32(r0) if len == 0 { err = errnoErr(e1) @@ -2723,7 +2779,7 @@ func getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { } func initializeProcThreadAttributeList(attrlist *ProcThreadAttributeList, attrcount uint32, flags uint32, size *uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procInitializeProcThreadAttributeList.Addr(), 4, uintptr(unsafe.Pointer(attrlist)), uintptr(attrcount), uintptr(flags), uintptr(unsafe.Pointer(size)), 0, 0) + r1, _, e1 := syscall.SyscallN(procInitializeProcThreadAttributeList.Addr(), uintptr(unsafe.Pointer(attrlist)), uintptr(attrcount), uintptr(flags), uintptr(unsafe.Pointer(size))) if r1 == 0 { err = errnoErr(e1) } @@ -2735,7 +2791,7 @@ func IsWow64Process(handle Handle, isWow64 *bool) (err error) { if *isWow64 { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procIsWow64Process.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(&_p0)), 0) + r1, _, e1 := syscall.SyscallN(procIsWow64Process.Addr(), uintptr(handle), uintptr(unsafe.Pointer(&_p0))) *isWow64 = _p0 != 0 if r1 == 0 { err = errnoErr(e1) @@ -2748,7 +2804,7 @@ func IsWow64Process2(handle Handle, processMachine *uint16, nativeMachine *uint1 if err != nil { return } - r1, _, e1 := syscall.Syscall(procIsWow64Process2.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(processMachine)), uintptr(unsafe.Pointer(nativeMachine))) + r1, _, e1 := syscall.SyscallN(procIsWow64Process2.Addr(), uintptr(handle), uintptr(unsafe.Pointer(processMachine)), uintptr(unsafe.Pointer(nativeMachine))) if r1 == 0 { err = errnoErr(e1) } @@ -2765,7 +2821,7 @@ func LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, e } func _LoadLibraryEx(libname *uint16, zero Handle, flags uintptr) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadLibraryExW.Addr(), 3, uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags)) + r0, _, e1 := syscall.SyscallN(procLoadLibraryExW.Addr(), uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2783,7 +2839,7 @@ func LoadLibrary(libname string) (handle Handle, err error) { } func _LoadLibrary(libname *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadLibraryW.Addr(), 1, uintptr(unsafe.Pointer(libname)), 0, 0) + r0, _, e1 := syscall.SyscallN(procLoadLibraryW.Addr(), uintptr(unsafe.Pointer(libname))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2792,7 +2848,7 @@ func _LoadLibrary(libname *uint16) (handle Handle, err error) { } func LoadResource(module Handle, resInfo Handle) (resData Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0) + r0, _, e1 := syscall.SyscallN(procLoadResource.Addr(), uintptr(module), uintptr(resInfo)) resData = Handle(r0) if resData == 0 { err = errnoErr(e1) @@ -2801,7 +2857,7 @@ func LoadResource(module Handle, resInfo Handle) (resData Handle, err error) { } func LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error) { - r0, _, e1 := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(length), 0) + r0, _, e1 := syscall.SyscallN(procLocalAlloc.Addr(), uintptr(flags), uintptr(length)) ptr = uintptr(r0) if ptr == 0 { err = errnoErr(e1) @@ -2810,7 +2866,7 @@ func LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error) { } func LocalFree(hmem Handle) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLocalFree.Addr(), 1, uintptr(hmem), 0, 0) + r0, _, e1 := syscall.SyscallN(procLocalFree.Addr(), uintptr(hmem)) handle = Handle(r0) if handle != 0 { err = errnoErr(e1) @@ -2819,7 +2875,7 @@ func LocalFree(hmem Handle) (handle Handle, err error) { } func LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) + r1, _, e1 := syscall.SyscallN(procLockFileEx.Addr(), uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -2827,7 +2883,7 @@ func LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, byt } func LockResource(resData Handle) (addr uintptr, err error) { - r0, _, e1 := syscall.Syscall(procLockResource.Addr(), 1, uintptr(resData), 0, 0) + r0, _, e1 := syscall.SyscallN(procLockResource.Addr(), uintptr(resData)) addr = uintptr(r0) if addr == 0 { err = errnoErr(e1) @@ -2836,7 +2892,7 @@ func LockResource(resData Handle) (addr uintptr, err error) { } func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) { - r0, _, e1 := syscall.Syscall6(procMapViewOfFile.Addr(), 5, uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length), 0) + r0, _, e1 := syscall.SyscallN(procMapViewOfFile.Addr(), uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length)) addr = uintptr(r0) if addr == 0 { err = errnoErr(e1) @@ -2845,7 +2901,7 @@ func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow ui } func Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procModule32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)), 0) + r1, _, e1 := syscall.SyscallN(procModule32FirstW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -2853,7 +2909,7 @@ func Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) { } func Module32Next(snapshot Handle, moduleEntry *ModuleEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procModule32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)), 0) + r1, _, e1 := syscall.SyscallN(procModule32NextW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -2861,7 +2917,7 @@ func Module32Next(snapshot Handle, moduleEntry *ModuleEntry32) (err error) { } func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) + r1, _, e1 := syscall.SyscallN(procMoveFileExW.Addr(), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -2869,7 +2925,7 @@ func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) { } func MoveFile(from *uint16, to *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procMoveFileW.Addr(), 2, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), 0) + r1, _, e1 := syscall.SyscallN(procMoveFileW.Addr(), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to))) if r1 == 0 { err = errnoErr(e1) } @@ -2877,7 +2933,7 @@ func MoveFile(from *uint16, to *uint16) (err error) { } func MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) { - r0, _, e1 := syscall.Syscall6(procMultiByteToWideChar.Addr(), 6, uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar)) + r0, _, e1 := syscall.SyscallN(procMultiByteToWideChar.Addr(), uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar)) nwrite = int32(r0) if nwrite == 0 { err = errnoErr(e1) @@ -2890,7 +2946,7 @@ func OpenEvent(desiredAccess uint32, inheritHandle bool, name *uint16) (handle H if inheritHandle { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procOpenEventW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) + r0, _, e1 := syscall.SyscallN(procOpenEventW.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2903,7 +2959,7 @@ func OpenMutex(desiredAccess uint32, inheritHandle bool, name *uint16) (handle H if inheritHandle { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procOpenMutexW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) + r0, _, e1 := syscall.SyscallN(procOpenMutexW.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2916,7 +2972,7 @@ func OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (ha if inheritHandle { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procOpenProcess.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(processId)) + r0, _, e1 := syscall.SyscallN(procOpenProcess.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(processId)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2929,7 +2985,7 @@ func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (hand if inheritHandle { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procOpenThread.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(threadId)) + r0, _, e1 := syscall.SyscallN(procOpenThread.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(threadId)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2938,7 +2994,7 @@ func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (hand } func PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uintptr, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procPostQueuedCompletionStatus.Addr(), 4, uintptr(cphandle), uintptr(qty), uintptr(key), uintptr(unsafe.Pointer(overlapped)), 0, 0) + r1, _, e1 := syscall.SyscallN(procPostQueuedCompletionStatus.Addr(), uintptr(cphandle), uintptr(qty), uintptr(key), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -2946,7 +3002,7 @@ func PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uintptr, overla } func Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procProcess32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) + r1, _, e1 := syscall.SyscallN(procProcess32FirstW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(procEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -2954,7 +3010,7 @@ func Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) { } func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procProcess32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) + r1, _, e1 := syscall.SyscallN(procProcess32NextW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(procEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -2962,7 +3018,7 @@ func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) { } func ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procProcessIdToSessionId.Addr(), 2, uintptr(pid), uintptr(unsafe.Pointer(sessionid)), 0) + r1, _, e1 := syscall.SyscallN(procProcessIdToSessionId.Addr(), uintptr(pid), uintptr(unsafe.Pointer(sessionid))) if r1 == 0 { err = errnoErr(e1) } @@ -2970,7 +3026,7 @@ func ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) { } func PulseEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procPulseEvent.Addr(), 1, uintptr(event), 0, 0) + r1, _, e1 := syscall.SyscallN(procPulseEvent.Addr(), uintptr(event)) if r1 == 0 { err = errnoErr(e1) } @@ -2978,7 +3034,7 @@ func PulseEvent(event Handle) (err error) { } func PurgeComm(handle Handle, dwFlags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procPurgeComm.Addr(), 2, uintptr(handle), uintptr(dwFlags), 0) + r1, _, e1 := syscall.SyscallN(procPurgeComm.Addr(), uintptr(handle), uintptr(dwFlags)) if r1 == 0 { err = errnoErr(e1) } @@ -2986,7 +3042,7 @@ func PurgeComm(handle Handle, dwFlags uint32) (err error) { } func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procQueryDosDeviceW.Addr(), 3, uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) + r0, _, e1 := syscall.SyscallN(procQueryDosDeviceW.Addr(), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2995,7 +3051,7 @@ func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint3 } func QueryFullProcessImageName(proc Handle, flags uint32, exeName *uint16, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryFullProcessImageNameW.Addr(), 4, uintptr(proc), uintptr(flags), uintptr(unsafe.Pointer(exeName)), uintptr(unsafe.Pointer(size)), 0, 0) + r1, _, e1 := syscall.SyscallN(procQueryFullProcessImageNameW.Addr(), uintptr(proc), uintptr(flags), uintptr(unsafe.Pointer(exeName)), uintptr(unsafe.Pointer(size))) if r1 == 0 { err = errnoErr(e1) } @@ -3003,7 +3059,7 @@ func QueryFullProcessImageName(proc Handle, flags uint32, exeName *uint16, size } func QueryInformationJobObject(job Handle, JobObjectInformationClass int32, JobObjectInformation uintptr, JobObjectInformationLength uint32, retlen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), uintptr(unsafe.Pointer(retlen)), 0) + r1, _, e1 := syscall.SyscallN(procQueryInformationJobObject.Addr(), uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), uintptr(unsafe.Pointer(retlen))) if r1 == 0 { err = errnoErr(e1) } @@ -3011,7 +3067,7 @@ func QueryInformationJobObject(job Handle, JobObjectInformationClass int32, JobO } func ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) { - r1, _, e1 := syscall.Syscall6(procReadConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(toread), uintptr(unsafe.Pointer(read)), uintptr(unsafe.Pointer(inputControl)), 0) + r1, _, e1 := syscall.SyscallN(procReadConsoleW.Addr(), uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(toread), uintptr(unsafe.Pointer(read)), uintptr(unsafe.Pointer(inputControl))) if r1 == 0 { err = errnoErr(e1) } @@ -3023,7 +3079,7 @@ func ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree if watchSubTree { _p0 = 1 } - r1, _, e1 := syscall.Syscall9(procReadDirectoryChangesW.Addr(), 8, uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(_p0), uintptr(mask), uintptr(unsafe.Pointer(retlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine), 0) + r1, _, e1 := syscall.SyscallN(procReadDirectoryChangesW.Addr(), uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(_p0), uintptr(mask), uintptr(unsafe.Pointer(retlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine)) if r1 == 0 { err = errnoErr(e1) } @@ -3035,7 +3091,7 @@ func readFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) ( if len(buf) > 0 { _p0 = &buf[0] } - r1, _, e1 := syscall.Syscall6(procReadFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procReadFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -3043,7 +3099,7 @@ func readFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) ( } func ReadProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size uintptr, numberOfBytesRead *uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procReadProcessMemory.Addr(), 5, uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesRead)), 0) + r1, _, e1 := syscall.SyscallN(procReadProcessMemory.Addr(), uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesRead))) if r1 == 0 { err = errnoErr(e1) } @@ -3051,7 +3107,7 @@ func ReadProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size u } func ReleaseMutex(mutex Handle) (err error) { - r1, _, e1 := syscall.Syscall(procReleaseMutex.Addr(), 1, uintptr(mutex), 0, 0) + r1, _, e1 := syscall.SyscallN(procReleaseMutex.Addr(), uintptr(mutex)) if r1 == 0 { err = errnoErr(e1) } @@ -3059,7 +3115,7 @@ func ReleaseMutex(mutex Handle) (err error) { } func RemoveDirectory(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procRemoveDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r1, _, e1 := syscall.SyscallN(procRemoveDirectoryW.Addr(), uintptr(unsafe.Pointer(path))) if r1 == 0 { err = errnoErr(e1) } @@ -3067,7 +3123,7 @@ func RemoveDirectory(path *uint16) (err error) { } func RemoveDllDirectory(cookie uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procRemoveDllDirectory.Addr(), 1, uintptr(cookie), 0, 0) + r1, _, e1 := syscall.SyscallN(procRemoveDllDirectory.Addr(), uintptr(cookie)) if r1 == 0 { err = errnoErr(e1) } @@ -3075,7 +3131,7 @@ func RemoveDllDirectory(cookie uintptr) (err error) { } func ResetEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procResetEvent.Addr(), 1, uintptr(event), 0, 0) + r1, _, e1 := syscall.SyscallN(procResetEvent.Addr(), uintptr(event)) if r1 == 0 { err = errnoErr(e1) } @@ -3083,7 +3139,7 @@ func ResetEvent(event Handle) (err error) { } func resizePseudoConsole(pconsole Handle, size uint32) (hr error) { - r0, _, _ := syscall.Syscall(procResizePseudoConsole.Addr(), 2, uintptr(pconsole), uintptr(size), 0) + r0, _, _ := syscall.SyscallN(procResizePseudoConsole.Addr(), uintptr(pconsole), uintptr(size)) if r0 != 0 { hr = syscall.Errno(r0) } @@ -3091,7 +3147,7 @@ func resizePseudoConsole(pconsole Handle, size uint32) (hr error) { } func ResumeThread(thread Handle) (ret uint32, err error) { - r0, _, e1 := syscall.Syscall(procResumeThread.Addr(), 1, uintptr(thread), 0, 0) + r0, _, e1 := syscall.SyscallN(procResumeThread.Addr(), uintptr(thread)) ret = uint32(r0) if ret == 0xffffffff { err = errnoErr(e1) @@ -3100,7 +3156,7 @@ func ResumeThread(thread Handle) (ret uint32, err error) { } func SetCommBreak(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetCommBreak.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetCommBreak.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -3108,7 +3164,7 @@ func SetCommBreak(handle Handle) (err error) { } func SetCommMask(handle Handle, dwEvtMask uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetCommMask.Addr(), 2, uintptr(handle), uintptr(dwEvtMask), 0) + r1, _, e1 := syscall.SyscallN(procSetCommMask.Addr(), uintptr(handle), uintptr(dwEvtMask)) if r1 == 0 { err = errnoErr(e1) } @@ -3116,7 +3172,7 @@ func SetCommMask(handle Handle, dwEvtMask uint32) (err error) { } func SetCommState(handle Handle, lpDCB *DCB) (err error) { - r1, _, e1 := syscall.Syscall(procSetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) + r1, _, e1 := syscall.SyscallN(procSetCommState.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpDCB))) if r1 == 0 { err = errnoErr(e1) } @@ -3124,7 +3180,7 @@ func SetCommState(handle Handle, lpDCB *DCB) (err error) { } func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { - r1, _, e1 := syscall.Syscall(procSetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) + r1, _, e1 := syscall.SyscallN(procSetCommTimeouts.Addr(), uintptr(handle), uintptr(unsafe.Pointer(timeouts))) if r1 == 0 { err = errnoErr(e1) } @@ -3132,7 +3188,7 @@ func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { } func SetConsoleCP(cp uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetConsoleCP.Addr(), 1, uintptr(cp), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetConsoleCP.Addr(), uintptr(cp)) if r1 == 0 { err = errnoErr(e1) } @@ -3140,7 +3196,7 @@ func SetConsoleCP(cp uint32) (err error) { } func setConsoleCursorPosition(console Handle, position uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetConsoleCursorPosition.Addr(), 2, uintptr(console), uintptr(position), 0) + r1, _, e1 := syscall.SyscallN(procSetConsoleCursorPosition.Addr(), uintptr(console), uintptr(position)) if r1 == 0 { err = errnoErr(e1) } @@ -3148,7 +3204,7 @@ func setConsoleCursorPosition(console Handle, position uint32) (err error) { } func SetConsoleMode(console Handle, mode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(console), uintptr(mode), 0) + r1, _, e1 := syscall.SyscallN(procSetConsoleMode.Addr(), uintptr(console), uintptr(mode)) if r1 == 0 { err = errnoErr(e1) } @@ -3156,7 +3212,7 @@ func SetConsoleMode(console Handle, mode uint32) (err error) { } func SetConsoleOutputCP(cp uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetConsoleOutputCP.Addr(), 1, uintptr(cp), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetConsoleOutputCP.Addr(), uintptr(cp)) if r1 == 0 { err = errnoErr(e1) } @@ -3164,7 +3220,7 @@ func SetConsoleOutputCP(cp uint32) (err error) { } func SetCurrentDirectory(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetCurrentDirectoryW.Addr(), uintptr(unsafe.Pointer(path))) if r1 == 0 { err = errnoErr(e1) } @@ -3172,7 +3228,7 @@ func SetCurrentDirectory(path *uint16) (err error) { } func SetDefaultDllDirectories(directoryFlags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetDefaultDllDirectories.Addr(), 1, uintptr(directoryFlags), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetDefaultDllDirectories.Addr(), uintptr(directoryFlags)) if r1 == 0 { err = errnoErr(e1) } @@ -3189,7 +3245,7 @@ func SetDllDirectory(path string) (err error) { } func _SetDllDirectory(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetDllDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetDllDirectoryW.Addr(), uintptr(unsafe.Pointer(path))) if r1 == 0 { err = errnoErr(e1) } @@ -3197,7 +3253,7 @@ func _SetDllDirectory(path *uint16) (err error) { } func SetEndOfFile(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetEndOfFile.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetEndOfFile.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -3205,7 +3261,7 @@ func SetEndOfFile(handle Handle) (err error) { } func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0) + r1, _, e1 := syscall.SyscallN(procSetEnvironmentVariableW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value))) if r1 == 0 { err = errnoErr(e1) } @@ -3213,13 +3269,13 @@ func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { } func SetErrorMode(mode uint32) (ret uint32) { - r0, _, _ := syscall.Syscall(procSetErrorMode.Addr(), 1, uintptr(mode), 0, 0) + r0, _, _ := syscall.SyscallN(procSetErrorMode.Addr(), uintptr(mode)) ret = uint32(r0) return } func SetEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetEvent.Addr(), 1, uintptr(event), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetEvent.Addr(), uintptr(event)) if r1 == 0 { err = errnoErr(e1) } @@ -3227,7 +3283,7 @@ func SetEvent(event Handle) (err error) { } func SetFileAttributes(name *uint16, attrs uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileAttributesW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(attrs), 0) + r1, _, e1 := syscall.SyscallN(procSetFileAttributesW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(attrs)) if r1 == 0 { err = errnoErr(e1) } @@ -3235,7 +3291,7 @@ func SetFileAttributes(name *uint16, attrs uint32) (err error) { } func SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(handle), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procSetFileCompletionNotificationModes.Addr(), uintptr(handle), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3243,7 +3299,7 @@ func SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) } func SetFileInformationByHandle(handle Handle, class uint32, inBuffer *byte, inBufferLen uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetFileInformationByHandle.Addr(), uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen)) if r1 == 0 { err = errnoErr(e1) } @@ -3251,7 +3307,7 @@ func SetFileInformationByHandle(handle Handle, class uint32, inBuffer *byte, inB } func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) { - r0, _, e1 := syscall.Syscall6(procSetFilePointer.Addr(), 4, uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence), 0, 0) + r0, _, e1 := syscall.SyscallN(procSetFilePointer.Addr(), uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence)) newlowoffset = uint32(r0) if newlowoffset == 0xffffffff { err = errnoErr(e1) @@ -3260,7 +3316,7 @@ func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence } func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { - r1, _, e1 := syscall.Syscall6(procSetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetFileTime.Addr(), uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime))) if r1 == 0 { err = errnoErr(e1) } @@ -3268,7 +3324,7 @@ func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetim } func SetFileValidData(handle Handle, validDataLength int64) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0) + r1, _, e1 := syscall.SyscallN(procSetFileValidData.Addr(), uintptr(handle), uintptr(validDataLength)) if r1 == 0 { err = errnoErr(e1) } @@ -3276,7 +3332,7 @@ func SetFileValidData(handle Handle, validDataLength int64) (err error) { } func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetHandleInformation.Addr(), 3, uintptr(handle), uintptr(mask), uintptr(flags)) + r1, _, e1 := syscall.SyscallN(procSetHandleInformation.Addr(), uintptr(handle), uintptr(mask), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3284,7 +3340,7 @@ func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) } func SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) { - r0, _, e1 := syscall.Syscall6(procSetInformationJobObject.Addr(), 4, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), 0, 0) + r0, _, e1 := syscall.SyscallN(procSetInformationJobObject.Addr(), uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength)) ret = int(r0) if ret == 0 { err = errnoErr(e1) @@ -3293,7 +3349,7 @@ func SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobOb } func SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetNamedPipeHandleState.Addr(), 4, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetNamedPipeHandleState.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout))) if r1 == 0 { err = errnoErr(e1) } @@ -3301,7 +3357,7 @@ func SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uin } func SetPriorityClass(process Handle, priorityClass uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetPriorityClass.Addr(), 2, uintptr(process), uintptr(priorityClass), 0) + r1, _, e1 := syscall.SyscallN(procSetPriorityClass.Addr(), uintptr(process), uintptr(priorityClass)) if r1 == 0 { err = errnoErr(e1) } @@ -3313,7 +3369,7 @@ func SetProcessPriorityBoost(process Handle, disable bool) (err error) { if disable { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procSetProcessPriorityBoost.Addr(), 2, uintptr(process), uintptr(_p0), 0) + r1, _, e1 := syscall.SyscallN(procSetProcessPriorityBoost.Addr(), uintptr(process), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -3321,7 +3377,7 @@ func SetProcessPriorityBoost(process Handle, disable bool) (err error) { } func SetProcessShutdownParameters(level uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetProcessShutdownParameters.Addr(), 2, uintptr(level), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procSetProcessShutdownParameters.Addr(), uintptr(level), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3329,7 +3385,7 @@ func SetProcessShutdownParameters(level uint32, flags uint32) (err error) { } func SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(dwMinimumWorkingSetSize), uintptr(dwMaximumWorkingSetSize), uintptr(flags), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetProcessWorkingSetSizeEx.Addr(), uintptr(hProcess), uintptr(dwMinimumWorkingSetSize), uintptr(dwMaximumWorkingSetSize), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3337,7 +3393,7 @@ func SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr } func SetStdHandle(stdhandle uint32, handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetStdHandle.Addr(), 2, uintptr(stdhandle), uintptr(handle), 0) + r1, _, e1 := syscall.SyscallN(procSetStdHandle.Addr(), uintptr(stdhandle), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -3345,7 +3401,7 @@ func SetStdHandle(stdhandle uint32, handle Handle) (err error) { } func SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetVolumeLabelW.Addr(), 2, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeName)), 0) + r1, _, e1 := syscall.SyscallN(procSetVolumeLabelW.Addr(), uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeName))) if r1 == 0 { err = errnoErr(e1) } @@ -3353,7 +3409,7 @@ func SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) { } func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetVolumeMountPointW.Addr(), 2, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), 0) + r1, _, e1 := syscall.SyscallN(procSetVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName))) if r1 == 0 { err = errnoErr(e1) } @@ -3361,7 +3417,7 @@ func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err erro } func SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetupComm.Addr(), 3, uintptr(handle), uintptr(dwInQueue), uintptr(dwOutQueue)) + r1, _, e1 := syscall.SyscallN(procSetupComm.Addr(), uintptr(handle), uintptr(dwInQueue), uintptr(dwOutQueue)) if r1 == 0 { err = errnoErr(e1) } @@ -3369,7 +3425,7 @@ func SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) { } func SizeofResource(module Handle, resInfo Handle) (size uint32, err error) { - r0, _, e1 := syscall.Syscall(procSizeofResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0) + r0, _, e1 := syscall.SyscallN(procSizeofResource.Addr(), uintptr(module), uintptr(resInfo)) size = uint32(r0) if size == 0 { err = errnoErr(e1) @@ -3382,13 +3438,13 @@ func SleepEx(milliseconds uint32, alertable bool) (ret uint32) { if alertable { _p0 = 1 } - r0, _, _ := syscall.Syscall(procSleepEx.Addr(), 2, uintptr(milliseconds), uintptr(_p0), 0) + r0, _, _ := syscall.SyscallN(procSleepEx.Addr(), uintptr(milliseconds), uintptr(_p0)) ret = uint32(r0) return } func TerminateJobObject(job Handle, exitCode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procTerminateJobObject.Addr(), 2, uintptr(job), uintptr(exitCode), 0) + r1, _, e1 := syscall.SyscallN(procTerminateJobObject.Addr(), uintptr(job), uintptr(exitCode)) if r1 == 0 { err = errnoErr(e1) } @@ -3396,7 +3452,7 @@ func TerminateJobObject(job Handle, exitCode uint32) (err error) { } func TerminateProcess(handle Handle, exitcode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procTerminateProcess.Addr(), 2, uintptr(handle), uintptr(exitcode), 0) + r1, _, e1 := syscall.SyscallN(procTerminateProcess.Addr(), uintptr(handle), uintptr(exitcode)) if r1 == 0 { err = errnoErr(e1) } @@ -3404,7 +3460,7 @@ func TerminateProcess(handle Handle, exitcode uint32) (err error) { } func Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procThread32First.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) + r1, _, e1 := syscall.SyscallN(procThread32First.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -3412,7 +3468,7 @@ func Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) { } func Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procThread32Next.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) + r1, _, e1 := syscall.SyscallN(procThread32Next.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -3420,7 +3476,7 @@ func Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) { } func UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procUnlockFileEx.Addr(), 5, uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procUnlockFileEx.Addr(), uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -3428,7 +3484,7 @@ func UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint3 } func UnmapViewOfFile(addr uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procUnmapViewOfFile.Addr(), 1, uintptr(addr), 0, 0) + r1, _, e1 := syscall.SyscallN(procUnmapViewOfFile.Addr(), uintptr(addr)) if r1 == 0 { err = errnoErr(e1) } @@ -3436,7 +3492,7 @@ func UnmapViewOfFile(addr uintptr) (err error) { } func updateProcThreadAttribute(attrlist *ProcThreadAttributeList, flags uint32, attr uintptr, value unsafe.Pointer, size uintptr, prevvalue unsafe.Pointer, returnedsize *uintptr) (err error) { - r1, _, e1 := syscall.Syscall9(procUpdateProcThreadAttribute.Addr(), 7, uintptr(unsafe.Pointer(attrlist)), uintptr(flags), uintptr(attr), uintptr(value), uintptr(size), uintptr(prevvalue), uintptr(unsafe.Pointer(returnedsize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procUpdateProcThreadAttribute.Addr(), uintptr(unsafe.Pointer(attrlist)), uintptr(flags), uintptr(attr), uintptr(value), uintptr(size), uintptr(prevvalue), uintptr(unsafe.Pointer(returnedsize))) if r1 == 0 { err = errnoErr(e1) } @@ -3444,7 +3500,7 @@ func updateProcThreadAttribute(attrlist *ProcThreadAttributeList, flags uint32, } func VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint32) (value uintptr, err error) { - r0, _, e1 := syscall.Syscall6(procVirtualAlloc.Addr(), 4, uintptr(address), uintptr(size), uintptr(alloctype), uintptr(protect), 0, 0) + r0, _, e1 := syscall.SyscallN(procVirtualAlloc.Addr(), uintptr(address), uintptr(size), uintptr(alloctype), uintptr(protect)) value = uintptr(r0) if value == 0 { err = errnoErr(e1) @@ -3453,7 +3509,7 @@ func VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint3 } func VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualFree.Addr(), 3, uintptr(address), uintptr(size), uintptr(freetype)) + r1, _, e1 := syscall.SyscallN(procVirtualFree.Addr(), uintptr(address), uintptr(size), uintptr(freetype)) if r1 == 0 { err = errnoErr(e1) } @@ -3461,7 +3517,7 @@ func VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) { } func VirtualLock(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualLock.Addr(), 2, uintptr(addr), uintptr(length), 0) + r1, _, e1 := syscall.SyscallN(procVirtualLock.Addr(), uintptr(addr), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -3469,7 +3525,7 @@ func VirtualLock(addr uintptr, length uintptr) (err error) { } func VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procVirtualProtect.Addr(), 4, uintptr(address), uintptr(size), uintptr(newprotect), uintptr(unsafe.Pointer(oldprotect)), 0, 0) + r1, _, e1 := syscall.SyscallN(procVirtualProtect.Addr(), uintptr(address), uintptr(size), uintptr(newprotect), uintptr(unsafe.Pointer(oldprotect))) if r1 == 0 { err = errnoErr(e1) } @@ -3477,7 +3533,7 @@ func VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect } func VirtualProtectEx(process Handle, address uintptr, size uintptr, newProtect uint32, oldProtect *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procVirtualProtectEx.Addr(), 5, uintptr(process), uintptr(address), uintptr(size), uintptr(newProtect), uintptr(unsafe.Pointer(oldProtect)), 0) + r1, _, e1 := syscall.SyscallN(procVirtualProtectEx.Addr(), uintptr(process), uintptr(address), uintptr(size), uintptr(newProtect), uintptr(unsafe.Pointer(oldProtect))) if r1 == 0 { err = errnoErr(e1) } @@ -3485,7 +3541,7 @@ func VirtualProtectEx(process Handle, address uintptr, size uintptr, newProtect } func VirtualQuery(address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualQuery.Addr(), 3, uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length)) + r1, _, e1 := syscall.SyscallN(procVirtualQuery.Addr(), uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -3493,7 +3549,7 @@ func VirtualQuery(address uintptr, buffer *MemoryBasicInformation, length uintpt } func VirtualQueryEx(process Handle, address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procVirtualQueryEx.Addr(), 4, uintptr(process), uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length), 0, 0) + r1, _, e1 := syscall.SyscallN(procVirtualQueryEx.Addr(), uintptr(process), uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -3501,7 +3557,7 @@ func VirtualQueryEx(process Handle, address uintptr, buffer *MemoryBasicInformat } func VirtualUnlock(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualUnlock.Addr(), 2, uintptr(addr), uintptr(length), 0) + r1, _, e1 := syscall.SyscallN(procVirtualUnlock.Addr(), uintptr(addr), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -3509,13 +3565,13 @@ func VirtualUnlock(addr uintptr, length uintptr) (err error) { } func WTSGetActiveConsoleSessionId() (sessionID uint32) { - r0, _, _ := syscall.Syscall(procWTSGetActiveConsoleSessionId.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procWTSGetActiveConsoleSessionId.Addr()) sessionID = uint32(r0) return } func WaitCommEvent(handle Handle, lpEvtMask *uint32, lpOverlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procWaitCommEvent.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpEvtMask)), uintptr(unsafe.Pointer(lpOverlapped))) + r1, _, e1 := syscall.SyscallN(procWaitCommEvent.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpEvtMask)), uintptr(unsafe.Pointer(lpOverlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -3527,7 +3583,7 @@ func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMil if waitAll { _p0 = 1 } - r0, _, e1 := syscall.Syscall6(procWaitForMultipleObjects.Addr(), 4, uintptr(count), uintptr(handles), uintptr(_p0), uintptr(waitMilliseconds), 0, 0) + r0, _, e1 := syscall.SyscallN(procWaitForMultipleObjects.Addr(), uintptr(count), uintptr(handles), uintptr(_p0), uintptr(waitMilliseconds)) event = uint32(r0) if event == 0xffffffff { err = errnoErr(e1) @@ -3536,7 +3592,7 @@ func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMil } func WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) { - r0, _, e1 := syscall.Syscall(procWaitForSingleObject.Addr(), 2, uintptr(handle), uintptr(waitMilliseconds), 0) + r0, _, e1 := syscall.SyscallN(procWaitForSingleObject.Addr(), uintptr(handle), uintptr(waitMilliseconds)) event = uint32(r0) if event == 0xffffffff { err = errnoErr(e1) @@ -3545,7 +3601,7 @@ func WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, } func WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) { - r1, _, e1 := syscall.Syscall6(procWriteConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(towrite), uintptr(unsafe.Pointer(written)), uintptr(unsafe.Pointer(reserved)), 0) + r1, _, e1 := syscall.SyscallN(procWriteConsoleW.Addr(), uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(towrite), uintptr(unsafe.Pointer(written)), uintptr(unsafe.Pointer(reserved))) if r1 == 0 { err = errnoErr(e1) } @@ -3557,7 +3613,7 @@ func writeFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) if len(buf) > 0 { _p0 = &buf[0] } - r1, _, e1 := syscall.Syscall6(procWriteFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procWriteFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -3565,7 +3621,7 @@ func writeFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) } func WriteProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size uintptr, numberOfBytesWritten *uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procWriteProcessMemory.Addr(), 5, uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesWritten)), 0) + r1, _, e1 := syscall.SyscallN(procWriteProcessMemory.Addr(), uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesWritten))) if r1 == 0 { err = errnoErr(e1) } @@ -3573,7 +3629,7 @@ func WriteProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size } func AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, recvd *uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall9(procAcceptEx.Addr(), 8, uintptr(ls), uintptr(as), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procAcceptEx.Addr(), uintptr(ls), uintptr(as), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -3581,12 +3637,12 @@ func AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32 } func GetAcceptExSockaddrs(buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, lrsa **RawSockaddrAny, lrsalen *int32, rrsa **RawSockaddrAny, rrsalen *int32) { - syscall.Syscall9(procGetAcceptExSockaddrs.Addr(), 8, uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(lrsa)), uintptr(unsafe.Pointer(lrsalen)), uintptr(unsafe.Pointer(rrsa)), uintptr(unsafe.Pointer(rrsalen)), 0) + syscall.SyscallN(procGetAcceptExSockaddrs.Addr(), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(lrsa)), uintptr(unsafe.Pointer(lrsalen)), uintptr(unsafe.Pointer(rrsa)), uintptr(unsafe.Pointer(rrsalen))) return } func TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procTransmitFile.Addr(), 7, uintptr(s), uintptr(handle), uintptr(bytesToWrite), uintptr(bytsPerSend), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transmitFileBuf)), uintptr(flags), 0, 0) + r1, _, e1 := syscall.SyscallN(procTransmitFile.Addr(), uintptr(s), uintptr(handle), uintptr(bytesToWrite), uintptr(bytsPerSend), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transmitFileBuf)), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3594,7 +3650,7 @@ func TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint } func NetApiBufferFree(buf *byte) (neterr error) { - r0, _, _ := syscall.Syscall(procNetApiBufferFree.Addr(), 1, uintptr(unsafe.Pointer(buf)), 0, 0) + r0, _, _ := syscall.SyscallN(procNetApiBufferFree.Addr(), uintptr(unsafe.Pointer(buf))) if r0 != 0 { neterr = syscall.Errno(r0) } @@ -3602,7 +3658,7 @@ func NetApiBufferFree(buf *byte) (neterr error) { } func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) { - r0, _, _ := syscall.Syscall(procNetGetJoinInformation.Addr(), 3, uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bufType))) + r0, _, _ := syscall.SyscallN(procNetGetJoinInformation.Addr(), uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bufType))) if r0 != 0 { neterr = syscall.Errno(r0) } @@ -3610,7 +3666,7 @@ func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (nete } func NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32, resumeHandle *uint32) (neterr error) { - r0, _, _ := syscall.Syscall9(procNetUserEnum.Addr(), 8, uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(filter), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), uintptr(unsafe.Pointer(resumeHandle)), 0) + r0, _, _ := syscall.SyscallN(procNetUserEnum.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(filter), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), uintptr(unsafe.Pointer(resumeHandle))) if r0 != 0 { neterr = syscall.Errno(r0) } @@ -3618,7 +3674,7 @@ func NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, pr } func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) { - r0, _, _ := syscall.Syscall6(procNetUserGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)), 0, 0) + r0, _, _ := syscall.SyscallN(procNetUserGetInfo.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf))) if r0 != 0 { neterr = syscall.Errno(r0) } @@ -3626,7 +3682,7 @@ func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **by } func NtCreateFile(handle *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, allocationSize *int64, attributes uint32, share uint32, disposition uint32, options uint32, eabuffer uintptr, ealength uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall12(procNtCreateFile.Addr(), 11, uintptr(unsafe.Pointer(handle)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(attributes), uintptr(share), uintptr(disposition), uintptr(options), uintptr(eabuffer), uintptr(ealength), 0) + r0, _, _ := syscall.SyscallN(procNtCreateFile.Addr(), uintptr(unsafe.Pointer(handle)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(attributes), uintptr(share), uintptr(disposition), uintptr(options), uintptr(eabuffer), uintptr(ealength)) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3634,7 +3690,7 @@ func NtCreateFile(handle *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO } func NtCreateNamedPipeFile(pipe *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (ntstatus error) { - r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0) + r0, _, _ := syscall.SyscallN(procNtCreateNamedPipeFile.Addr(), uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3642,7 +3698,7 @@ func NtCreateNamedPipeFile(pipe *Handle, access uint32, oa *OBJECT_ATTRIBUTES, i } func NtQueryInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32, retLen *uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procNtQueryInformationProcess.Addr(), 5, uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), uintptr(unsafe.Pointer(retLen)), 0) + r0, _, _ := syscall.SyscallN(procNtQueryInformationProcess.Addr(), uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), uintptr(unsafe.Pointer(retLen))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3650,7 +3706,7 @@ func NtQueryInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe } func NtQuerySystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoLen uint32, retLen *uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen), uintptr(unsafe.Pointer(retLen)), 0, 0) + r0, _, _ := syscall.SyscallN(procNtQuerySystemInformation.Addr(), uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen), uintptr(unsafe.Pointer(retLen))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3658,7 +3714,7 @@ func NtQuerySystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInf } func NtSetInformationFile(handle Handle, iosb *IO_STATUS_BLOCK, inBuffer *byte, inBufferLen uint32, class uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), uintptr(class), 0) + r0, _, _ := syscall.SyscallN(procNtSetInformationFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), uintptr(class)) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3666,7 +3722,7 @@ func NtSetInformationFile(handle Handle, iosb *IO_STATUS_BLOCK, inBuffer *byte, } func NtSetInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procNtSetInformationProcess.Addr(), 4, uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), 0, 0) + r0, _, _ := syscall.SyscallN(procNtSetInformationProcess.Addr(), uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen)) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3674,7 +3730,7 @@ func NtSetInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.P } func NtSetSystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoLen uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall(procNtSetSystemInformation.Addr(), 3, uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen)) + r0, _, _ := syscall.SyscallN(procNtSetSystemInformation.Addr(), uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen)) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3682,13 +3738,13 @@ func NtSetSystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoL } func RtlAddFunctionTable(functionTable *RUNTIME_FUNCTION, entryCount uint32, baseAddress uintptr) (ret bool) { - r0, _, _ := syscall.Syscall(procRtlAddFunctionTable.Addr(), 3, uintptr(unsafe.Pointer(functionTable)), uintptr(entryCount), uintptr(baseAddress)) + r0, _, _ := syscall.SyscallN(procRtlAddFunctionTable.Addr(), uintptr(unsafe.Pointer(functionTable)), uintptr(entryCount), uintptr(baseAddress)) ret = r0 != 0 return } func RtlDefaultNpAcl(acl **ACL) (ntstatus error) { - r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(acl)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlDefaultNpAcl.Addr(), uintptr(unsafe.Pointer(acl))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3696,13 +3752,13 @@ func RtlDefaultNpAcl(acl **ACL) (ntstatus error) { } func RtlDeleteFunctionTable(functionTable *RUNTIME_FUNCTION) (ret bool) { - r0, _, _ := syscall.Syscall(procRtlDeleteFunctionTable.Addr(), 1, uintptr(unsafe.Pointer(functionTable)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlDeleteFunctionTable.Addr(), uintptr(unsafe.Pointer(functionTable))) ret = r0 != 0 return } func RtlDosPathNameToNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U_WithStatus.Addr(), 4, uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlDosPathNameToNtPathName_U_WithStatus.Addr(), uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3710,7 +3766,7 @@ func RtlDosPathNameToNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFile } func RtlDosPathNameToRelativeNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procRtlDosPathNameToRelativeNtPathName_U_WithStatus.Addr(), 4, uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlDosPathNameToRelativeNtPathName_U_WithStatus.Addr(), uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3718,18 +3774,18 @@ func RtlDosPathNameToRelativeNtPathName(dosName *uint16, ntName *NTUnicodeString } func RtlGetCurrentPeb() (peb *PEB) { - r0, _, _ := syscall.Syscall(procRtlGetCurrentPeb.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procRtlGetCurrentPeb.Addr()) peb = (*PEB)(unsafe.Pointer(r0)) return } func rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) { - syscall.Syscall(procRtlGetNtVersionNumbers.Addr(), 3, uintptr(unsafe.Pointer(majorVersion)), uintptr(unsafe.Pointer(minorVersion)), uintptr(unsafe.Pointer(buildNumber))) + syscall.SyscallN(procRtlGetNtVersionNumbers.Addr(), uintptr(unsafe.Pointer(majorVersion)), uintptr(unsafe.Pointer(minorVersion)), uintptr(unsafe.Pointer(buildNumber))) return } func rtlGetVersion(info *OsVersionInfoEx) (ntstatus error) { - r0, _, _ := syscall.Syscall(procRtlGetVersion.Addr(), 1, uintptr(unsafe.Pointer(info)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlGetVersion.Addr(), uintptr(unsafe.Pointer(info))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3737,23 +3793,23 @@ func rtlGetVersion(info *OsVersionInfoEx) (ntstatus error) { } func RtlInitString(destinationString *NTString, sourceString *byte) { - syscall.Syscall(procRtlInitString.Addr(), 2, uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString)), 0) + syscall.SyscallN(procRtlInitString.Addr(), uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString))) return } func RtlInitUnicodeString(destinationString *NTUnicodeString, sourceString *uint16) { - syscall.Syscall(procRtlInitUnicodeString.Addr(), 2, uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString)), 0) + syscall.SyscallN(procRtlInitUnicodeString.Addr(), uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString))) return } func rtlNtStatusToDosErrorNoTeb(ntstatus NTStatus) (ret syscall.Errno) { - r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(ntstatus), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlNtStatusToDosErrorNoTeb.Addr(), uintptr(ntstatus)) ret = syscall.Errno(r0) return } func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) { - r0, _, _ := syscall.Syscall(procCLSIDFromString.Addr(), 2, uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid)), 0) + r0, _, _ := syscall.SyscallN(procCLSIDFromString.Addr(), uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3761,7 +3817,7 @@ func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) { } func coCreateGuid(pguid *GUID) (ret error) { - r0, _, _ := syscall.Syscall(procCoCreateGuid.Addr(), 1, uintptr(unsafe.Pointer(pguid)), 0, 0) + r0, _, _ := syscall.SyscallN(procCoCreateGuid.Addr(), uintptr(unsafe.Pointer(pguid))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3769,7 +3825,7 @@ func coCreateGuid(pguid *GUID) (ret error) { } func CoGetObject(name *uint16, bindOpts *BIND_OPTS3, guid *GUID, functionTable **uintptr) (ret error) { - r0, _, _ := syscall.Syscall6(procCoGetObject.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bindOpts)), uintptr(unsafe.Pointer(guid)), uintptr(unsafe.Pointer(functionTable)), 0, 0) + r0, _, _ := syscall.SyscallN(procCoGetObject.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bindOpts)), uintptr(unsafe.Pointer(guid)), uintptr(unsafe.Pointer(functionTable))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3777,7 +3833,7 @@ func CoGetObject(name *uint16, bindOpts *BIND_OPTS3, guid *GUID, functionTable * } func CoInitializeEx(reserved uintptr, coInit uint32) (ret error) { - r0, _, _ := syscall.Syscall(procCoInitializeEx.Addr(), 2, uintptr(reserved), uintptr(coInit), 0) + r0, _, _ := syscall.SyscallN(procCoInitializeEx.Addr(), uintptr(reserved), uintptr(coInit)) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3785,23 +3841,23 @@ func CoInitializeEx(reserved uintptr, coInit uint32) (ret error) { } func CoTaskMemFree(address unsafe.Pointer) { - syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(address), 0, 0) + syscall.SyscallN(procCoTaskMemFree.Addr(), uintptr(address)) return } func CoUninitialize() { - syscall.Syscall(procCoUninitialize.Addr(), 0, 0, 0, 0) + syscall.SyscallN(procCoUninitialize.Addr()) return } func stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) { - r0, _, _ := syscall.Syscall(procStringFromGUID2.Addr(), 3, uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax)) + r0, _, _ := syscall.SyscallN(procStringFromGUID2.Addr(), uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax)) chars = int32(r0) return } func EnumProcessModules(process Handle, module *Handle, cb uint32, cbNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procEnumProcessModules.Addr(), 4, uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), 0, 0) + r1, _, e1 := syscall.SyscallN(procEnumProcessModules.Addr(), uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -3809,7 +3865,7 @@ func EnumProcessModules(process Handle, module *Handle, cb uint32, cbNeeded *uin } func EnumProcessModulesEx(process Handle, module *Handle, cb uint32, cbNeeded *uint32, filterFlag uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procEnumProcessModulesEx.Addr(), 5, uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), uintptr(filterFlag), 0) + r1, _, e1 := syscall.SyscallN(procEnumProcessModulesEx.Addr(), uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), uintptr(filterFlag)) if r1 == 0 { err = errnoErr(e1) } @@ -3817,7 +3873,7 @@ func EnumProcessModulesEx(process Handle, module *Handle, cb uint32, cbNeeded *u } func enumProcesses(processIds *uint32, nSize uint32, bytesReturned *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procEnumProcesses.Addr(), 3, uintptr(unsafe.Pointer(processIds)), uintptr(nSize), uintptr(unsafe.Pointer(bytesReturned))) + r1, _, e1 := syscall.SyscallN(procEnumProcesses.Addr(), uintptr(unsafe.Pointer(processIds)), uintptr(nSize), uintptr(unsafe.Pointer(bytesReturned))) if r1 == 0 { err = errnoErr(e1) } @@ -3825,7 +3881,7 @@ func enumProcesses(processIds *uint32, nSize uint32, bytesReturned *uint32) (err } func GetModuleBaseName(process Handle, module Handle, baseName *uint16, size uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetModuleBaseNameW.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(baseName)), uintptr(size), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetModuleBaseNameW.Addr(), uintptr(process), uintptr(module), uintptr(unsafe.Pointer(baseName)), uintptr(size)) if r1 == 0 { err = errnoErr(e1) } @@ -3833,7 +3889,7 @@ func GetModuleBaseName(process Handle, module Handle, baseName *uint16, size uin } func GetModuleFileNameEx(process Handle, module Handle, filename *uint16, size uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetModuleFileNameExW.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetModuleFileNameExW.Addr(), uintptr(process), uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size)) if r1 == 0 { err = errnoErr(e1) } @@ -3841,7 +3897,7 @@ func GetModuleFileNameEx(process Handle, module Handle, filename *uint16, size u } func GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetModuleInformation.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(modinfo)), uintptr(cb), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetModuleInformation.Addr(), uintptr(process), uintptr(module), uintptr(unsafe.Pointer(modinfo)), uintptr(cb)) if r1 == 0 { err = errnoErr(e1) } @@ -3849,7 +3905,7 @@ func GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb } func QueryWorkingSetEx(process Handle, pv uintptr, cb uint32) (err error) { - r1, _, e1 := syscall.Syscall(procQueryWorkingSetEx.Addr(), 3, uintptr(process), uintptr(pv), uintptr(cb)) + r1, _, e1 := syscall.SyscallN(procQueryWorkingSetEx.Addr(), uintptr(process), uintptr(pv), uintptr(cb)) if r1 == 0 { err = errnoErr(e1) } @@ -3861,7 +3917,7 @@ func SubscribeServiceChangeNotifications(service Handle, eventType uint32, callb if ret != nil { return } - r0, _, _ := syscall.Syscall6(procSubscribeServiceChangeNotifications.Addr(), 5, uintptr(service), uintptr(eventType), uintptr(callback), uintptr(callbackCtx), uintptr(unsafe.Pointer(subscription)), 0) + r0, _, _ := syscall.SyscallN(procSubscribeServiceChangeNotifications.Addr(), uintptr(service), uintptr(eventType), uintptr(callback), uintptr(callbackCtx), uintptr(unsafe.Pointer(subscription))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3873,12 +3929,12 @@ func UnsubscribeServiceChangeNotifications(subscription uintptr) (err error) { if err != nil { return } - syscall.Syscall(procUnsubscribeServiceChangeNotifications.Addr(), 1, uintptr(subscription), 0, 0) + syscall.SyscallN(procUnsubscribeServiceChangeNotifications.Addr(), uintptr(subscription)) return } func GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetUserNameExW.Addr(), 3, uintptr(nameFormat), uintptr(unsafe.Pointer(nameBuffre)), uintptr(unsafe.Pointer(nSize))) + r1, _, e1 := syscall.SyscallN(procGetUserNameExW.Addr(), uintptr(nameFormat), uintptr(unsafe.Pointer(nameBuffre)), uintptr(unsafe.Pointer(nSize))) if r1&0xff == 0 { err = errnoErr(e1) } @@ -3886,7 +3942,7 @@ func GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err er } func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procTranslateNameW.Addr(), 5, uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize)), 0) + r1, _, e1 := syscall.SyscallN(procTranslateNameW.Addr(), uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize))) if r1&0xff == 0 { err = errnoErr(e1) } @@ -3894,7 +3950,7 @@ func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint } func SetupDiBuildDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiBuildDriverInfoList.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) + r1, _, e1 := syscall.SyscallN(procSetupDiBuildDriverInfoList.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) if r1 == 0 { err = errnoErr(e1) } @@ -3902,7 +3958,7 @@ func SetupDiBuildDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoDa } func SetupDiCallClassInstaller(installFunction DI_FUNCTION, deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiCallClassInstaller.Addr(), 3, uintptr(installFunction), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData))) + r1, _, e1 := syscall.SyscallN(procSetupDiCallClassInstaller.Addr(), uintptr(installFunction), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -3910,7 +3966,7 @@ func SetupDiCallClassInstaller(installFunction DI_FUNCTION, deviceInfoSet DevInf } func SetupDiCancelDriverInfoSearch(deviceInfoSet DevInfo) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiCancelDriverInfoSearch.Addr(), 1, uintptr(deviceInfoSet), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiCancelDriverInfoSearch.Addr(), uintptr(deviceInfoSet)) if r1 == 0 { err = errnoErr(e1) } @@ -3918,7 +3974,7 @@ func SetupDiCancelDriverInfoSearch(deviceInfoSet DevInfo) (err error) { } func setupDiClassGuidsFromNameEx(className *uint16, classGuidList *GUID, classGuidListSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiClassGuidsFromNameExW.Addr(), 6, uintptr(unsafe.Pointer(className)), uintptr(unsafe.Pointer(classGuidList)), uintptr(classGuidListSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) + r1, _, e1 := syscall.SyscallN(procSetupDiClassGuidsFromNameExW.Addr(), uintptr(unsafe.Pointer(className)), uintptr(unsafe.Pointer(classGuidList)), uintptr(classGuidListSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) if r1 == 0 { err = errnoErr(e1) } @@ -3926,7 +3982,7 @@ func setupDiClassGuidsFromNameEx(className *uint16, classGuidList *GUID, classGu } func setupDiClassNameFromGuidEx(classGUID *GUID, className *uint16, classNameSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiClassNameFromGuidExW.Addr(), 6, uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(className)), uintptr(classNameSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) + r1, _, e1 := syscall.SyscallN(procSetupDiClassNameFromGuidExW.Addr(), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(className)), uintptr(classNameSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) if r1 == 0 { err = errnoErr(e1) } @@ -3934,7 +3990,7 @@ func setupDiClassNameFromGuidEx(classGUID *GUID, className *uint16, classNameSiz } func setupDiCreateDeviceInfoListEx(classGUID *GUID, hwndParent uintptr, machineName *uint16, reserved uintptr) (handle DevInfo, err error) { - r0, _, e1 := syscall.Syscall6(procSetupDiCreateDeviceInfoListExW.Addr(), 4, uintptr(unsafe.Pointer(classGUID)), uintptr(hwndParent), uintptr(unsafe.Pointer(machineName)), uintptr(reserved), 0, 0) + r0, _, e1 := syscall.SyscallN(procSetupDiCreateDeviceInfoListExW.Addr(), uintptr(unsafe.Pointer(classGUID)), uintptr(hwndParent), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) handle = DevInfo(r0) if handle == DevInfo(InvalidHandle) { err = errnoErr(e1) @@ -3943,7 +3999,7 @@ func setupDiCreateDeviceInfoListEx(classGUID *GUID, hwndParent uintptr, machineN } func setupDiCreateDeviceInfo(deviceInfoSet DevInfo, DeviceName *uint16, classGUID *GUID, DeviceDescription *uint16, hwndParent uintptr, CreationFlags DICD, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall9(procSetupDiCreateDeviceInfoW.Addr(), 7, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(DeviceName)), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(DeviceDescription)), uintptr(hwndParent), uintptr(CreationFlags), uintptr(unsafe.Pointer(deviceInfoData)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiCreateDeviceInfoW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(DeviceName)), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(DeviceDescription)), uintptr(hwndParent), uintptr(CreationFlags), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -3951,7 +4007,7 @@ func setupDiCreateDeviceInfo(deviceInfoSet DevInfo, DeviceName *uint16, classGUI } func SetupDiDestroyDeviceInfoList(deviceInfoSet DevInfo) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiDestroyDeviceInfoList.Addr(), 1, uintptr(deviceInfoSet), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiDestroyDeviceInfoList.Addr(), uintptr(deviceInfoSet)) if r1 == 0 { err = errnoErr(e1) } @@ -3959,7 +4015,7 @@ func SetupDiDestroyDeviceInfoList(deviceInfoSet DevInfo) (err error) { } func SetupDiDestroyDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiDestroyDriverInfoList.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) + r1, _, e1 := syscall.SyscallN(procSetupDiDestroyDriverInfoList.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) if r1 == 0 { err = errnoErr(e1) } @@ -3967,7 +4023,7 @@ func SetupDiDestroyDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfo } func setupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex uint32, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiEnumDeviceInfo.Addr(), 3, uintptr(deviceInfoSet), uintptr(memberIndex), uintptr(unsafe.Pointer(deviceInfoData))) + r1, _, e1 := syscall.SyscallN(procSetupDiEnumDeviceInfo.Addr(), uintptr(deviceInfoSet), uintptr(memberIndex), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -3975,7 +4031,7 @@ func setupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex uint32, deviceInfo } func setupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT, memberIndex uint32, driverInfoData *DrvInfoData) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiEnumDriverInfoW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType), uintptr(memberIndex), uintptr(unsafe.Pointer(driverInfoData)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiEnumDriverInfoW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType), uintptr(memberIndex), uintptr(unsafe.Pointer(driverInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -3983,7 +4039,7 @@ func setupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, d } func setupDiGetClassDevsEx(classGUID *GUID, Enumerator *uint16, hwndParent uintptr, Flags DIGCF, deviceInfoSet DevInfo, machineName *uint16, reserved uintptr) (handle DevInfo, err error) { - r0, _, e1 := syscall.Syscall9(procSetupDiGetClassDevsExW.Addr(), 7, uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(Enumerator)), uintptr(hwndParent), uintptr(Flags), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(machineName)), uintptr(reserved), 0, 0) + r0, _, e1 := syscall.SyscallN(procSetupDiGetClassDevsExW.Addr(), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(Enumerator)), uintptr(hwndParent), uintptr(Flags), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) handle = DevInfo(r0) if handle == DevInfo(InvalidHandle) { err = errnoErr(e1) @@ -3992,7 +4048,7 @@ func setupDiGetClassDevsEx(classGUID *GUID, Enumerator *uint16, hwndParent uintp } func SetupDiGetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32, requiredSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiGetClassInstallParamsW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), uintptr(unsafe.Pointer(requiredSize)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetClassInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), uintptr(unsafe.Pointer(requiredSize))) if r1 == 0 { err = errnoErr(e1) } @@ -4000,7 +4056,7 @@ func SetupDiGetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfo } func setupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo, deviceInfoSetDetailData *DevInfoListDetailData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiGetDeviceInfoListDetailW.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoSetDetailData)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceInfoListDetailW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoSetDetailData))) if r1 == 0 { err = errnoErr(e1) } @@ -4008,7 +4064,7 @@ func setupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo, deviceInfoSetDetailDa } func setupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiGetDeviceInstallParamsW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) if r1 == 0 { err = errnoErr(e1) } @@ -4016,7 +4072,7 @@ func setupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInf } func setupDiGetDeviceInstanceId(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, instanceId *uint16, instanceIdSize uint32, instanceIdRequiredSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiGetDeviceInstanceIdW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(instanceId)), uintptr(instanceIdSize), uintptr(unsafe.Pointer(instanceIdRequiredSize)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceInstanceIdW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(instanceId)), uintptr(instanceIdSize), uintptr(unsafe.Pointer(instanceIdRequiredSize))) if r1 == 0 { err = errnoErr(e1) } @@ -4024,7 +4080,7 @@ func setupDiGetDeviceInstanceId(deviceInfoSet DevInfo, deviceInfoData *DevInfoDa } func setupDiGetDeviceProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, propertyKey *DEVPROPKEY, propertyType *DEVPROPTYPE, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procSetupDiGetDevicePropertyW.Addr(), 8, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDevicePropertyW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -4032,7 +4088,7 @@ func setupDiGetDeviceProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func setupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyRegDataType *uint32, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procSetupDiGetDeviceRegistryPropertyW.Addr(), 7, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyRegDataType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceRegistryPropertyW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyRegDataType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize))) if r1 == 0 { err = errnoErr(e1) } @@ -4040,7 +4096,7 @@ func setupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *Dev } func setupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData, driverInfoDetailData *DrvInfoDetailData, driverInfoDetailDataSize uint32, requiredSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiGetDriverInfoDetailW.Addr(), 6, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)), uintptr(unsafe.Pointer(driverInfoDetailData)), uintptr(driverInfoDetailDataSize), uintptr(unsafe.Pointer(requiredSize))) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDriverInfoDetailW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)), uintptr(unsafe.Pointer(driverInfoDetailData)), uintptr(driverInfoDetailDataSize), uintptr(unsafe.Pointer(requiredSize))) if r1 == 0 { err = errnoErr(e1) } @@ -4048,7 +4104,7 @@ func setupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoDa } func setupDiGetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiGetSelectedDevice.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetSelectedDevice.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -4056,7 +4112,7 @@ func setupDiGetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func setupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiGetSelectedDriverW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) + r1, _, e1 := syscall.SyscallN(procSetupDiGetSelectedDriverW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -4064,7 +4120,7 @@ func setupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func SetupDiOpenDevRegKey(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, Scope DICS_FLAG, HwProfile uint32, KeyType DIREG, samDesired uint32) (key Handle, err error) { - r0, _, e1 := syscall.Syscall6(procSetupDiOpenDevRegKey.Addr(), 6, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(Scope), uintptr(HwProfile), uintptr(KeyType), uintptr(samDesired)) + r0, _, e1 := syscall.SyscallN(procSetupDiOpenDevRegKey.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(Scope), uintptr(HwProfile), uintptr(KeyType), uintptr(samDesired)) key = Handle(r0) if key == InvalidHandle { err = errnoErr(e1) @@ -4073,7 +4129,7 @@ func SetupDiOpenDevRegKey(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, Sc } func SetupDiSetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiSetClassInstallParamsW.Addr(), 4, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiSetClassInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize)) if r1 == 0 { err = errnoErr(e1) } @@ -4081,7 +4137,7 @@ func SetupDiSetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfo } func SetupDiSetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiSetDeviceInstallParamsW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) + r1, _, e1 := syscall.SyscallN(procSetupDiSetDeviceInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) if r1 == 0 { err = errnoErr(e1) } @@ -4089,7 +4145,7 @@ func SetupDiSetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInf } func setupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyBuffer *byte, propertyBufferSize uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiSetDeviceRegistryPropertyW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiSetDeviceRegistryPropertyW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize)) if r1 == 0 { err = errnoErr(e1) } @@ -4097,7 +4153,7 @@ func setupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *Dev } func SetupDiSetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiSetSelectedDevice.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiSetSelectedDevice.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -4105,7 +4161,7 @@ func SetupDiSetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func SetupDiSetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiSetSelectedDriverW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) + r1, _, e1 := syscall.SyscallN(procSetupDiSetSelectedDriverW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -4113,7 +4169,7 @@ func SetupDiSetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func setupUninstallOEMInf(infFileName *uint16, flags SUOI, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procSetupUninstallOEMInfW.Addr(), 3, uintptr(unsafe.Pointer(infFileName)), uintptr(flags), uintptr(reserved)) + r1, _, e1 := syscall.SyscallN(procSetupUninstallOEMInfW.Addr(), uintptr(unsafe.Pointer(infFileName)), uintptr(flags), uintptr(reserved)) if r1 == 0 { err = errnoErr(e1) } @@ -4121,7 +4177,7 @@ func setupUninstallOEMInf(infFileName *uint16, flags SUOI, reserved uintptr) (er } func commandLineToArgv(cmd *uint16, argc *int32) (argv **uint16, err error) { - r0, _, e1 := syscall.Syscall(procCommandLineToArgvW.Addr(), 2, uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc)), 0) + r0, _, e1 := syscall.SyscallN(procCommandLineToArgvW.Addr(), uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc))) argv = (**uint16)(unsafe.Pointer(r0)) if argv == nil { err = errnoErr(e1) @@ -4130,7 +4186,7 @@ func commandLineToArgv(cmd *uint16, argc *int32) (argv **uint16, err error) { } func shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) { - r0, _, _ := syscall.Syscall6(procSHGetKnownFolderPath.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(token), uintptr(unsafe.Pointer(path)), 0, 0) + r0, _, _ := syscall.SyscallN(procSHGetKnownFolderPath.Addr(), uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(token), uintptr(unsafe.Pointer(path))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -4138,7 +4194,7 @@ func shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **u } func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) { - r1, _, e1 := syscall.Syscall6(procShellExecuteW.Addr(), 6, uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd)) + r1, _, e1 := syscall.SyscallN(procShellExecuteW.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd)) if r1 <= 32 { err = errnoErr(e1) } @@ -4146,12 +4202,12 @@ func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *ui } func EnumChildWindows(hwnd HWND, enumFunc uintptr, param unsafe.Pointer) { - syscall.Syscall(procEnumChildWindows.Addr(), 3, uintptr(hwnd), uintptr(enumFunc), uintptr(param)) + syscall.SyscallN(procEnumChildWindows.Addr(), uintptr(hwnd), uintptr(enumFunc), uintptr(param)) return } func EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) { - r1, _, e1 := syscall.Syscall(procEnumWindows.Addr(), 2, uintptr(enumFunc), uintptr(param), 0) + r1, _, e1 := syscall.SyscallN(procEnumWindows.Addr(), uintptr(enumFunc), uintptr(param)) if r1 == 0 { err = errnoErr(e1) } @@ -4159,7 +4215,7 @@ func EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) { } func ExitWindowsEx(flags uint32, reason uint32) (err error) { - r1, _, e1 := syscall.Syscall(procExitWindowsEx.Addr(), 2, uintptr(flags), uintptr(reason), 0) + r1, _, e1 := syscall.SyscallN(procExitWindowsEx.Addr(), uintptr(flags), uintptr(reason)) if r1 == 0 { err = errnoErr(e1) } @@ -4167,7 +4223,7 @@ func ExitWindowsEx(flags uint32, reason uint32) (err error) { } func GetClassName(hwnd HWND, className *uint16, maxCount int32) (copied int32, err error) { - r0, _, e1 := syscall.Syscall(procGetClassNameW.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(className)), uintptr(maxCount)) + r0, _, e1 := syscall.SyscallN(procGetClassNameW.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(className)), uintptr(maxCount)) copied = int32(r0) if copied == 0 { err = errnoErr(e1) @@ -4176,19 +4232,19 @@ func GetClassName(hwnd HWND, className *uint16, maxCount int32) (copied int32, e } func GetDesktopWindow() (hwnd HWND) { - r0, _, _ := syscall.Syscall(procGetDesktopWindow.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetDesktopWindow.Addr()) hwnd = HWND(r0) return } func GetForegroundWindow() (hwnd HWND) { - r0, _, _ := syscall.Syscall(procGetForegroundWindow.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetForegroundWindow.Addr()) hwnd = HWND(r0) return } func GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) { - r1, _, e1 := syscall.Syscall(procGetGUIThreadInfo.Addr(), 2, uintptr(thread), uintptr(unsafe.Pointer(info)), 0) + r1, _, e1 := syscall.SyscallN(procGetGUIThreadInfo.Addr(), uintptr(thread), uintptr(unsafe.Pointer(info))) if r1 == 0 { err = errnoErr(e1) } @@ -4196,19 +4252,19 @@ func GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) { } func GetKeyboardLayout(tid uint32) (hkl Handle) { - r0, _, _ := syscall.Syscall(procGetKeyboardLayout.Addr(), 1, uintptr(tid), 0, 0) + r0, _, _ := syscall.SyscallN(procGetKeyboardLayout.Addr(), uintptr(tid)) hkl = Handle(r0) return } func GetShellWindow() (shellWindow HWND) { - r0, _, _ := syscall.Syscall(procGetShellWindow.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetShellWindow.Addr()) shellWindow = HWND(r0) return } func GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetWindowThreadProcessId.Addr(), 2, uintptr(hwnd), uintptr(unsafe.Pointer(pid)), 0) + r0, _, e1 := syscall.SyscallN(procGetWindowThreadProcessId.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(pid))) tid = uint32(r0) if tid == 0 { err = errnoErr(e1) @@ -4217,25 +4273,25 @@ func GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) { } func IsWindow(hwnd HWND) (isWindow bool) { - r0, _, _ := syscall.Syscall(procIsWindow.Addr(), 1, uintptr(hwnd), 0, 0) + r0, _, _ := syscall.SyscallN(procIsWindow.Addr(), uintptr(hwnd)) isWindow = r0 != 0 return } func IsWindowUnicode(hwnd HWND) (isUnicode bool) { - r0, _, _ := syscall.Syscall(procIsWindowUnicode.Addr(), 1, uintptr(hwnd), 0, 0) + r0, _, _ := syscall.SyscallN(procIsWindowUnicode.Addr(), uintptr(hwnd)) isUnicode = r0 != 0 return } func IsWindowVisible(hwnd HWND) (isVisible bool) { - r0, _, _ := syscall.Syscall(procIsWindowVisible.Addr(), 1, uintptr(hwnd), 0, 0) + r0, _, _ := syscall.SyscallN(procIsWindowVisible.Addr(), uintptr(hwnd)) isVisible = r0 != 0 return } func LoadKeyboardLayout(name *uint16, flags uint32) (hkl Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadKeyboardLayoutW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(flags), 0) + r0, _, e1 := syscall.SyscallN(procLoadKeyboardLayoutW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(flags)) hkl = Handle(r0) if hkl == 0 { err = errnoErr(e1) @@ -4244,7 +4300,7 @@ func LoadKeyboardLayout(name *uint16, flags uint32) (hkl Handle, err error) { } func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) { - r0, _, e1 := syscall.Syscall6(procMessageBoxW.Addr(), 4, uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype), 0, 0) + r0, _, e1 := syscall.SyscallN(procMessageBoxW.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype)) ret = int32(r0) if ret == 0 { err = errnoErr(e1) @@ -4253,13 +4309,13 @@ func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret i } func ToUnicodeEx(vkey uint32, scancode uint32, keystate *byte, pwszBuff *uint16, cchBuff int32, flags uint32, hkl Handle) (ret int32) { - r0, _, _ := syscall.Syscall9(procToUnicodeEx.Addr(), 7, uintptr(vkey), uintptr(scancode), uintptr(unsafe.Pointer(keystate)), uintptr(unsafe.Pointer(pwszBuff)), uintptr(cchBuff), uintptr(flags), uintptr(hkl), 0, 0) + r0, _, _ := syscall.SyscallN(procToUnicodeEx.Addr(), uintptr(vkey), uintptr(scancode), uintptr(unsafe.Pointer(keystate)), uintptr(unsafe.Pointer(pwszBuff)), uintptr(cchBuff), uintptr(flags), uintptr(hkl)) ret = int32(r0) return } func UnloadKeyboardLayout(hkl Handle) (err error) { - r1, _, e1 := syscall.Syscall(procUnloadKeyboardLayout.Addr(), 1, uintptr(hkl), 0, 0) + r1, _, e1 := syscall.SyscallN(procUnloadKeyboardLayout.Addr(), uintptr(hkl)) if r1 == 0 { err = errnoErr(e1) } @@ -4271,7 +4327,7 @@ func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) ( if inheritExisting { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procCreateEnvironmentBlock.Addr(), 3, uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0)) + r1, _, e1 := syscall.SyscallN(procCreateEnvironmentBlock.Addr(), uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -4279,7 +4335,7 @@ func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) ( } func DestroyEnvironmentBlock(block *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDestroyEnvironmentBlock.Addr(), 1, uintptr(unsafe.Pointer(block)), 0, 0) + r1, _, e1 := syscall.SyscallN(procDestroyEnvironmentBlock.Addr(), uintptr(unsafe.Pointer(block))) if r1 == 0 { err = errnoErr(e1) } @@ -4287,7 +4343,7 @@ func DestroyEnvironmentBlock(block *uint16) (err error) { } func GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetUserProfileDirectoryW.Addr(), 3, uintptr(t), uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen))) + r1, _, e1 := syscall.SyscallN(procGetUserProfileDirectoryW.Addr(), uintptr(t), uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen))) if r1 == 0 { err = errnoErr(e1) } @@ -4304,7 +4360,7 @@ func GetFileVersionInfoSize(filename string, zeroHandle *Handle) (bufSize uint32 } func _GetFileVersionInfoSize(filename *uint16, zeroHandle *Handle) (bufSize uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetFileVersionInfoSizeW.Addr(), 2, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(zeroHandle)), 0) + r0, _, e1 := syscall.SyscallN(procGetFileVersionInfoSizeW.Addr(), uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(zeroHandle))) bufSize = uint32(r0) if bufSize == 0 { err = errnoErr(e1) @@ -4322,7 +4378,7 @@ func GetFileVersionInfo(filename string, handle uint32, bufSize uint32, buffer u } func _GetFileVersionInfo(filename *uint16, handle uint32, bufSize uint32, buffer unsafe.Pointer) (err error) { - r1, _, e1 := syscall.Syscall6(procGetFileVersionInfoW.Addr(), 4, uintptr(unsafe.Pointer(filename)), uintptr(handle), uintptr(bufSize), uintptr(buffer), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetFileVersionInfoW.Addr(), uintptr(unsafe.Pointer(filename)), uintptr(handle), uintptr(bufSize), uintptr(buffer)) if r1 == 0 { err = errnoErr(e1) } @@ -4339,7 +4395,7 @@ func VerQueryValue(block unsafe.Pointer, subBlock string, pointerToBufferPointer } func _VerQueryValue(block unsafe.Pointer, subBlock *uint16, pointerToBufferPointer unsafe.Pointer, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procVerQueryValueW.Addr(), 4, uintptr(block), uintptr(unsafe.Pointer(subBlock)), uintptr(pointerToBufferPointer), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procVerQueryValueW.Addr(), uintptr(block), uintptr(unsafe.Pointer(subBlock)), uintptr(pointerToBufferPointer), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -4347,7 +4403,7 @@ func _VerQueryValue(block unsafe.Pointer, subBlock *uint16, pointerToBufferPoint } func TimeBeginPeriod(period uint32) (err error) { - r1, _, e1 := syscall.Syscall(proctimeBeginPeriod.Addr(), 1, uintptr(period), 0, 0) + r1, _, e1 := syscall.SyscallN(proctimeBeginPeriod.Addr(), uintptr(period)) if r1 != 0 { err = errnoErr(e1) } @@ -4355,7 +4411,7 @@ func TimeBeginPeriod(period uint32) (err error) { } func TimeEndPeriod(period uint32) (err error) { - r1, _, e1 := syscall.Syscall(proctimeEndPeriod.Addr(), 1, uintptr(period), 0, 0) + r1, _, e1 := syscall.SyscallN(proctimeEndPeriod.Addr(), uintptr(period)) if r1 != 0 { err = errnoErr(e1) } @@ -4363,7 +4419,7 @@ func TimeEndPeriod(period uint32) (err error) { } func WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) { - r0, _, _ := syscall.Syscall(procWinVerifyTrustEx.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(actionId)), uintptr(unsafe.Pointer(data))) + r0, _, _ := syscall.SyscallN(procWinVerifyTrustEx.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(actionId)), uintptr(unsafe.Pointer(data))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -4371,12 +4427,12 @@ func WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) } func FreeAddrInfoW(addrinfo *AddrinfoW) { - syscall.Syscall(procFreeAddrInfoW.Addr(), 1, uintptr(unsafe.Pointer(addrinfo)), 0, 0) + syscall.SyscallN(procFreeAddrInfoW.Addr(), uintptr(unsafe.Pointer(addrinfo))) return } func GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, result **AddrinfoW) (sockerr error) { - r0, _, _ := syscall.Syscall6(procGetAddrInfoW.Addr(), 4, uintptr(unsafe.Pointer(nodename)), uintptr(unsafe.Pointer(servicename)), uintptr(unsafe.Pointer(hints)), uintptr(unsafe.Pointer(result)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetAddrInfoW.Addr(), uintptr(unsafe.Pointer(nodename)), uintptr(unsafe.Pointer(servicename)), uintptr(unsafe.Pointer(hints)), uintptr(unsafe.Pointer(result))) if r0 != 0 { sockerr = syscall.Errno(r0) } @@ -4384,15 +4440,23 @@ func GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, resul } func WSACleanup() (err error) { - r1, _, e1 := syscall.Syscall(procWSACleanup.Addr(), 0, 0, 0, 0) + r1, _, e1 := syscall.SyscallN(procWSACleanup.Addr()) if r1 == socket_error { err = errnoErr(e1) } return } +func WSADuplicateSocket(s Handle, processID uint32, info *WSAProtocolInfo) (err error) { + r1, _, e1 := syscall.SyscallN(procWSADuplicateSocketW.Addr(), uintptr(s), uintptr(processID), uintptr(unsafe.Pointer(info))) + if r1 != 0 { + err = errnoErr(e1) + } + return +} + func WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) { - r0, _, e1 := syscall.Syscall(procWSAEnumProtocolsW.Addr(), 3, uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength))) + r0, _, e1 := syscall.SyscallN(procWSAEnumProtocolsW.Addr(), uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength))) n = int32(r0) if n == -1 { err = errnoErr(e1) @@ -4405,7 +4469,7 @@ func WSAGetOverlappedResult(h Handle, o *Overlapped, bytes *uint32, wait bool, f if wait { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0) + r1, _, e1 := syscall.SyscallN(procWSAGetOverlappedResult.Addr(), uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags))) if r1 == 0 { err = errnoErr(e1) } @@ -4413,7 +4477,7 @@ func WSAGetOverlappedResult(h Handle, o *Overlapped, bytes *uint32, wait bool, f } func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) { - r1, _, e1 := syscall.Syscall9(procWSAIoctl.Addr(), 9, uintptr(s), uintptr(iocc), uintptr(unsafe.Pointer(inbuf)), uintptr(cbif), uintptr(unsafe.Pointer(outbuf)), uintptr(cbob), uintptr(unsafe.Pointer(cbbr)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine)) + r1, _, e1 := syscall.SyscallN(procWSAIoctl.Addr(), uintptr(s), uintptr(iocc), uintptr(unsafe.Pointer(inbuf)), uintptr(cbif), uintptr(unsafe.Pointer(outbuf)), uintptr(cbob), uintptr(unsafe.Pointer(cbbr)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine)) if r1 == socket_error { err = errnoErr(e1) } @@ -4421,7 +4485,7 @@ func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbo } func WSALookupServiceBegin(querySet *WSAQUERYSET, flags uint32, handle *Handle) (err error) { - r1, _, e1 := syscall.Syscall(procWSALookupServiceBeginW.Addr(), 3, uintptr(unsafe.Pointer(querySet)), uintptr(flags), uintptr(unsafe.Pointer(handle))) + r1, _, e1 := syscall.SyscallN(procWSALookupServiceBeginW.Addr(), uintptr(unsafe.Pointer(querySet)), uintptr(flags), uintptr(unsafe.Pointer(handle))) if r1 == socket_error { err = errnoErr(e1) } @@ -4429,7 +4493,7 @@ func WSALookupServiceBegin(querySet *WSAQUERYSET, flags uint32, handle *Handle) } func WSALookupServiceEnd(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procWSALookupServiceEnd.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procWSALookupServiceEnd.Addr(), uintptr(handle)) if r1 == socket_error { err = errnoErr(e1) } @@ -4437,7 +4501,7 @@ func WSALookupServiceEnd(handle Handle) (err error) { } func WSALookupServiceNext(handle Handle, flags uint32, size *int32, querySet *WSAQUERYSET) (err error) { - r1, _, e1 := syscall.Syscall6(procWSALookupServiceNextW.Addr(), 4, uintptr(handle), uintptr(flags), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(querySet)), 0, 0) + r1, _, e1 := syscall.SyscallN(procWSALookupServiceNextW.Addr(), uintptr(handle), uintptr(flags), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(querySet))) if r1 == socket_error { err = errnoErr(e1) } @@ -4445,7 +4509,7 @@ func WSALookupServiceNext(handle Handle, flags uint32, size *int32, querySet *WS } func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSARecv.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) + r1, _, e1 := syscall.SyscallN(procWSARecv.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) if r1 == socket_error { err = errnoErr(e1) } @@ -4453,7 +4517,7 @@ func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32 } func WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSARecvFrom.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + r1, _, e1 := syscall.SyscallN(procWSARecvFrom.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) if r1 == socket_error { err = errnoErr(e1) } @@ -4461,7 +4525,7 @@ func WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *ui } func WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSASend.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) + r1, _, e1 := syscall.SyscallN(procWSASend.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) if r1 == socket_error { err = errnoErr(e1) } @@ -4469,7 +4533,7 @@ func WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, } func WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSASendTo.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(to)), uintptr(tolen), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + r1, _, e1 := syscall.SyscallN(procWSASendTo.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(to)), uintptr(tolen), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) if r1 == socket_error { err = errnoErr(e1) } @@ -4477,7 +4541,7 @@ func WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32 } func WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo, group uint32, flags uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procWSASocketW.Addr(), 6, uintptr(af), uintptr(typ), uintptr(protocol), uintptr(unsafe.Pointer(protoInfo)), uintptr(group), uintptr(flags)) + r0, _, e1 := syscall.SyscallN(procWSASocketW.Addr(), uintptr(af), uintptr(typ), uintptr(protocol), uintptr(unsafe.Pointer(protoInfo)), uintptr(group), uintptr(flags)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -4486,7 +4550,7 @@ func WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo, } func WSAStartup(verreq uint32, data *WSAData) (sockerr error) { - r0, _, _ := syscall.Syscall(procWSAStartup.Addr(), 2, uintptr(verreq), uintptr(unsafe.Pointer(data)), 0) + r0, _, _ := syscall.SyscallN(procWSAStartup.Addr(), uintptr(verreq), uintptr(unsafe.Pointer(data))) if r0 != 0 { sockerr = syscall.Errno(r0) } @@ -4494,7 +4558,7 @@ func WSAStartup(verreq uint32, data *WSAData) (sockerr error) { } func bind(s Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + r1, _, e1 := syscall.SyscallN(procbind.Addr(), uintptr(s), uintptr(name), uintptr(namelen)) if r1 == socket_error { err = errnoErr(e1) } @@ -4502,7 +4566,7 @@ func bind(s Handle, name unsafe.Pointer, namelen int32) (err error) { } func Closesocket(s Handle) (err error) { - r1, _, e1 := syscall.Syscall(procclosesocket.Addr(), 1, uintptr(s), 0, 0) + r1, _, e1 := syscall.SyscallN(procclosesocket.Addr(), uintptr(s)) if r1 == socket_error { err = errnoErr(e1) } @@ -4510,7 +4574,7 @@ func Closesocket(s Handle) (err error) { } func connect(s Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.Syscall(procconnect.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + r1, _, e1 := syscall.SyscallN(procconnect.Addr(), uintptr(s), uintptr(name), uintptr(namelen)) if r1 == socket_error { err = errnoErr(e1) } @@ -4527,7 +4591,7 @@ func GetHostByName(name string) (h *Hostent, err error) { } func _GetHostByName(name *byte) (h *Hostent, err error) { - r0, _, e1 := syscall.Syscall(procgethostbyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + r0, _, e1 := syscall.SyscallN(procgethostbyname.Addr(), uintptr(unsafe.Pointer(name))) h = (*Hostent)(unsafe.Pointer(r0)) if h == nil { err = errnoErr(e1) @@ -4536,7 +4600,7 @@ func _GetHostByName(name *byte) (h *Hostent, err error) { } func getpeername(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { - r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r1, _, e1 := syscall.SyscallN(procgetpeername.Addr(), uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if r1 == socket_error { err = errnoErr(e1) } @@ -4553,7 +4617,7 @@ func GetProtoByName(name string) (p *Protoent, err error) { } func _GetProtoByName(name *byte) (p *Protoent, err error) { - r0, _, e1 := syscall.Syscall(procgetprotobyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + r0, _, e1 := syscall.SyscallN(procgetprotobyname.Addr(), uintptr(unsafe.Pointer(name))) p = (*Protoent)(unsafe.Pointer(r0)) if p == nil { err = errnoErr(e1) @@ -4576,7 +4640,7 @@ func GetServByName(name string, proto string) (s *Servent, err error) { } func _GetServByName(name *byte, proto *byte) (s *Servent, err error) { - r0, _, e1 := syscall.Syscall(procgetservbyname.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(proto)), 0) + r0, _, e1 := syscall.SyscallN(procgetservbyname.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(proto))) s = (*Servent)(unsafe.Pointer(r0)) if s == nil { err = errnoErr(e1) @@ -4585,7 +4649,7 @@ func _GetServByName(name *byte, proto *byte) (s *Servent, err error) { } func getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { - r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r1, _, e1 := syscall.SyscallN(procgetsockname.Addr(), uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if r1 == socket_error { err = errnoErr(e1) } @@ -4593,7 +4657,7 @@ func getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { } func Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int32) (err error) { - r1, _, e1 := syscall.Syscall6(procgetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(unsafe.Pointer(optlen)), 0) + r1, _, e1 := syscall.SyscallN(procgetsockopt.Addr(), uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(unsafe.Pointer(optlen))) if r1 == socket_error { err = errnoErr(e1) } @@ -4601,7 +4665,7 @@ func Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int3 } func listen(s Handle, backlog int32) (err error) { - r1, _, e1 := syscall.Syscall(proclisten.Addr(), 2, uintptr(s), uintptr(backlog), 0) + r1, _, e1 := syscall.SyscallN(proclisten.Addr(), uintptr(s), uintptr(backlog)) if r1 == socket_error { err = errnoErr(e1) } @@ -4609,7 +4673,7 @@ func listen(s Handle, backlog int32) (err error) { } func Ntohs(netshort uint16) (u uint16) { - r0, _, _ := syscall.Syscall(procntohs.Addr(), 1, uintptr(netshort), 0, 0) + r0, _, _ := syscall.SyscallN(procntohs.Addr(), uintptr(netshort)) u = uint16(r0) return } @@ -4619,7 +4683,7 @@ func recvfrom(s Handle, buf []byte, flags int32, from *RawSockaddrAny, fromlen * if len(buf) > 0 { _p0 = &buf[0] } - r0, _, e1 := syscall.Syscall6(procrecvfrom.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + r0, _, e1 := syscall.SyscallN(procrecvfrom.Addr(), uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int32(r0) if n == -1 { err = errnoErr(e1) @@ -4632,7 +4696,7 @@ func sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) ( if len(buf) > 0 { _p0 = &buf[0] } - r1, _, e1 := syscall.Syscall6(procsendto.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(tolen)) + r1, _, e1 := syscall.SyscallN(procsendto.Addr(), uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(tolen)) if r1 == socket_error { err = errnoErr(e1) } @@ -4640,7 +4704,7 @@ func sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) ( } func Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32) (err error) { - r1, _, e1 := syscall.Syscall6(procsetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(optlen), 0) + r1, _, e1 := syscall.SyscallN(procsetsockopt.Addr(), uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(optlen)) if r1 == socket_error { err = errnoErr(e1) } @@ -4648,7 +4712,7 @@ func Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32 } func shutdown(s Handle, how int32) (err error) { - r1, _, e1 := syscall.Syscall(procshutdown.Addr(), 2, uintptr(s), uintptr(how), 0) + r1, _, e1 := syscall.SyscallN(procshutdown.Addr(), uintptr(s), uintptr(how)) if r1 == socket_error { err = errnoErr(e1) } @@ -4656,7 +4720,7 @@ func shutdown(s Handle, how int32) (err error) { } func socket(af int32, typ int32, protocol int32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procsocket.Addr(), 3, uintptr(af), uintptr(typ), uintptr(protocol)) + r0, _, e1 := syscall.SyscallN(procsocket.Addr(), uintptr(af), uintptr(typ), uintptr(protocol)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -4665,7 +4729,7 @@ func socket(af int32, typ int32, protocol int32) (handle Handle, err error) { } func WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessions **WTS_SESSION_INFO, count *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procWTSEnumerateSessionsW.Addr(), 5, uintptr(handle), uintptr(reserved), uintptr(version), uintptr(unsafe.Pointer(sessions)), uintptr(unsafe.Pointer(count)), 0) + r1, _, e1 := syscall.SyscallN(procWTSEnumerateSessionsW.Addr(), uintptr(handle), uintptr(reserved), uintptr(version), uintptr(unsafe.Pointer(sessions)), uintptr(unsafe.Pointer(count))) if r1 == 0 { err = errnoErr(e1) } @@ -4673,12 +4737,12 @@ func WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessio } func WTSFreeMemory(ptr uintptr) { - syscall.Syscall(procWTSFreeMemory.Addr(), 1, uintptr(ptr), 0, 0) + syscall.SyscallN(procWTSFreeMemory.Addr(), uintptr(ptr)) return } func WTSQueryUserToken(session uint32, token *Token) (err error) { - r1, _, e1 := syscall.Syscall(procWTSQueryUserToken.Addr(), 2, uintptr(session), uintptr(unsafe.Pointer(token)), 0) + r1, _, e1 := syscall.SyscallN(procWTSQueryUserToken.Addr(), uintptr(session), uintptr(unsafe.Pointer(token))) if r1 == 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/term/term_windows.go b/vendor/golang.org/x/term/term_windows.go index df6bf948e..0ddd81c02 100644 --- a/vendor/golang.org/x/term/term_windows.go +++ b/vendor/golang.org/x/term/term_windows.go @@ -20,12 +20,14 @@ func isTerminal(fd int) bool { return err == nil } +// This is intended to be used on a console input handle. +// See https://learn.microsoft.com/en-us/windows/console/setconsolemode func makeRaw(fd int) (*State, error) { var st uint32 if err := windows.GetConsoleMode(windows.Handle(fd), &st); err != nil { return nil, err } - raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT) + raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT) raw |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT if err := windows.SetConsoleMode(windows.Handle(fd), raw); err != nil { return nil, err diff --git a/vendor/golang.org/x/term/terminal.go b/vendor/golang.org/x/term/terminal.go index f636667fb..9255449b9 100644 --- a/vendor/golang.org/x/term/terminal.go +++ b/vendor/golang.org/x/term/terminal.go @@ -6,6 +6,7 @@ package term import ( "bytes" + "fmt" "io" "runtime" "strconv" @@ -36,6 +37,26 @@ var vt100EscapeCodes = EscapeCodes{ Reset: []byte{keyEscape, '[', '0', 'm'}, } +// A History provides a (possibly bounded) queue of input lines read by [Terminal.ReadLine]. +type History interface { + // Add will be called by [Terminal.ReadLine] to add + // a new, most recent entry to the history. + // It is allowed to drop any entry, including + // the entry being added (e.g., if it's deemed an invalid entry), + // the least-recent entry (e.g., to keep the history bounded), + // or any other entry. + Add(entry string) + + // Len returns the number of entries in the history. + Len() int + + // At returns an entry from the history. + // Index 0 is the most-recently added entry and + // index Len()-1 is the least-recently added entry. + // If index is < 0 or >= Len(), it panics. + At(idx int) string +} + // Terminal contains the state for running a VT100 terminal that is capable of // reading lines of input. type Terminal struct { @@ -44,6 +65,8 @@ type Terminal struct { // bytes, as an index into |line|). If it returns ok=false, the key // press is processed normally. Otherwise it returns a replacement line // and the new cursor position. + // + // This will be disabled during ReadPassword. AutoCompleteCallback func(line string, pos int, key rune) (newLine string, newPos int, ok bool) // Escape contains a pointer to the escape codes for this terminal. @@ -84,9 +107,14 @@ type Terminal struct { remainder []byte inBuf [256]byte - // history contains previously entered commands so that they can be - // accessed with the up and down keys. - history stRingBuffer + // History records and retrieves lines of input read by [ReadLine] which + // a user can retrieve and navigate using the up and down arrow keys. + // + // It is not safe to call ReadLine concurrently with any methods on History. + // + // [NewTerminal] sets this to a default implementation that records the + // last 100 lines of input. + History History // historyIndex stores the currently accessed history entry, where zero // means the immediately previous entry. historyIndex int @@ -109,6 +137,7 @@ func NewTerminal(c io.ReadWriter, prompt string) *Terminal { termHeight: 24, echo: true, historyIndex: -1, + History: &stRingBuffer{}, } } @@ -117,6 +146,7 @@ const ( keyCtrlD = 4 keyCtrlU = 21 keyEnter = '\r' + keyLF = '\n' keyEscape = 27 keyBackspace = 127 keyUnknown = 0xd800 /* UTF-16 surrogate area */ + iota @@ -383,7 +413,7 @@ func (t *Terminal) eraseNPreviousChars(n int) { } } -// countToLeftWord returns then number of characters from the cursor to the +// countToLeftWord returns the number of characters from the cursor to the // start of the previous word. func (t *Terminal) countToLeftWord() int { if t.pos == 0 { @@ -408,7 +438,7 @@ func (t *Terminal) countToLeftWord() int { return t.pos - pos } -// countToRightWord returns then number of characters from the cursor to the +// countToRightWord returns the number of characters from the cursor to the // start of the next word. func (t *Terminal) countToRightWord() int { pos := t.pos @@ -448,10 +478,27 @@ func visualLength(runes []rune) int { return length } +// historyAt unlocks the terminal and relocks it while calling History.At. +func (t *Terminal) historyAt(idx int) (string, bool) { + t.lock.Unlock() // Unlock to avoid deadlock if History methods use the output writer. + defer t.lock.Lock() // panic in At (or Len) protection. + if idx < 0 || idx >= t.History.Len() { + return "", false + } + return t.History.At(idx), true +} + +// historyAdd unlocks the terminal and relocks it while calling History.Add. +func (t *Terminal) historyAdd(entry string) { + t.lock.Unlock() // Unlock to avoid deadlock if History methods use the output writer. + defer t.lock.Lock() // panic in Add protection. + t.History.Add(entry) +} + // handleKey processes the given key and, optionally, returns a line of text // that the user has entered. func (t *Terminal) handleKey(key rune) (line string, ok bool) { - if t.pasteActive && key != keyEnter { + if t.pasteActive && key != keyEnter && key != keyLF { t.addKeyToLine(key) return } @@ -495,7 +542,7 @@ func (t *Terminal) handleKey(key rune) (line string, ok bool) { t.pos = len(t.line) t.moveCursorToPos(t.pos) case keyUp: - entry, ok := t.history.NthPreviousEntry(t.historyIndex + 1) + entry, ok := t.historyAt(t.historyIndex + 1) if !ok { return "", false } @@ -514,14 +561,14 @@ func (t *Terminal) handleKey(key rune) (line string, ok bool) { t.setLine(runes, len(runes)) t.historyIndex-- default: - entry, ok := t.history.NthPreviousEntry(t.historyIndex - 1) + entry, ok := t.historyAt(t.historyIndex - 1) if ok { t.historyIndex-- runes := []rune(entry) t.setLine(runes, len(runes)) } } - case keyEnter: + case keyEnter, keyLF: t.moveCursorToPos(len(t.line)) t.queue([]rune("\r\n")) line = string(t.line) @@ -692,6 +739,8 @@ func (t *Terminal) Write(buf []byte) (n int, err error) { // ReadPassword temporarily changes the prompt and reads a password, without // echo, from the terminal. +// +// The AutoCompleteCallback is disabled during this call. func (t *Terminal) ReadPassword(prompt string) (line string, err error) { t.lock.Lock() defer t.lock.Unlock() @@ -699,6 +748,11 @@ func (t *Terminal) ReadPassword(prompt string) (line string, err error) { oldPrompt := t.prompt t.prompt = []rune(prompt) t.echo = false + oldAutoCompleteCallback := t.AutoCompleteCallback + t.AutoCompleteCallback = nil + defer func() { + t.AutoCompleteCallback = oldAutoCompleteCallback + }() line, err = t.readLine() @@ -759,6 +813,10 @@ func (t *Terminal) readLine() (line string, err error) { if !t.pasteActive { lineIsPasted = false } + // If we have CR, consume LF if present (CRLF sequence) to avoid returning an extra empty line. + if key == keyEnter && len(rest) > 0 && rest[0] == keyLF { + rest = rest[1:] + } line, lineOk = t.handleKey(key) } if len(rest) > 0 { @@ -772,7 +830,7 @@ func (t *Terminal) readLine() (line string, err error) { if lineOk { if t.echo { t.historyIndex = -1 - t.history.Add(line) + t.historyAdd(line) } if lineIsPasted { err = ErrPasteIndicator @@ -929,19 +987,23 @@ func (s *stRingBuffer) Add(a string) { } } -// NthPreviousEntry returns the value passed to the nth previous call to Add. +func (s *stRingBuffer) Len() int { + return s.size +} + +// At returns the value passed to the nth previous call to Add. // If n is zero then the immediately prior value is returned, if one, then the // next most recent, and so on. If such an element doesn't exist then ok is // false. -func (s *stRingBuffer) NthPreviousEntry(n int) (value string, ok bool) { +func (s *stRingBuffer) At(n int) string { if n < 0 || n >= s.size { - return "", false + panic(fmt.Sprintf("term: history index [%d] out of range [0,%d)", n, s.size)) } index := s.head - n if index < 0 { index += s.max } - return s.entries[index], true + return s.entries[index] } // readPasswordLine reads from reader until it finds \n or io.EOF. diff --git a/vendor/golang.org/x/text/unicode/bidi/core.go b/vendor/golang.org/x/text/unicode/bidi/core.go index 9d2ae547b..fb8273236 100644 --- a/vendor/golang.org/x/text/unicode/bidi/core.go +++ b/vendor/golang.org/x/text/unicode/bidi/core.go @@ -427,13 +427,6 @@ type isolatingRunSequence struct { func (i *isolatingRunSequence) Len() int { return len(i.indexes) } -func maxLevel(a, b level) level { - if a > b { - return a - } - return b -} - // Rule X10, second bullet: Determine the start-of-sequence (sos) and end-of-sequence (eos) types, // either L or R, for each isolating run sequence. func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence { @@ -474,8 +467,8 @@ func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence { indexes: indexes, types: types, level: level, - sos: typeForLevel(maxLevel(prevLevel, level)), - eos: typeForLevel(maxLevel(succLevel, level)), + sos: typeForLevel(max(prevLevel, level)), + eos: typeForLevel(max(succLevel, level)), } } diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index 93a798ab6..563270c15 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -85,7 +85,7 @@ func (lim *Limiter) Burst() int { // TokensAt returns the number of tokens available at time t. func (lim *Limiter) TokensAt(t time.Time) float64 { lim.mu.Lock() - _, tokens := lim.advance(t) // does not mutate lim + tokens := lim.advance(t) // does not mutate lim lim.mu.Unlock() return tokens } @@ -186,7 +186,7 @@ func (r *Reservation) CancelAt(t time.Time) { return } // advance time to now - t, tokens := r.lim.advance(t) + tokens := r.lim.advance(t) // calculate new number of tokens tokens += restoreTokens if burst := float64(r.lim.burst); tokens > burst { @@ -195,7 +195,7 @@ func (r *Reservation) CancelAt(t time.Time) { // update state r.lim.last = t r.lim.tokens = tokens - if r.timeToAct == r.lim.lastEvent { + if r.timeToAct.Equal(r.lim.lastEvent) { prevEvent := r.timeToAct.Add(r.limit.durationFromTokens(float64(-r.tokens))) if !prevEvent.Before(t) { r.lim.lastEvent = prevEvent @@ -307,7 +307,7 @@ func (lim *Limiter) SetLimitAt(t time.Time, newLimit Limit) { lim.mu.Lock() defer lim.mu.Unlock() - t, tokens := lim.advance(t) + tokens := lim.advance(t) lim.last = t lim.tokens = tokens @@ -324,7 +324,7 @@ func (lim *Limiter) SetBurstAt(t time.Time, newBurst int) { lim.mu.Lock() defer lim.mu.Unlock() - t, tokens := lim.advance(t) + tokens := lim.advance(t) lim.last = t lim.tokens = tokens @@ -347,7 +347,7 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) } } - t, tokens := lim.advance(t) + tokens := lim.advance(t) // Calculate the remaining number of tokens resulting from the request. tokens -= float64(n) @@ -380,10 +380,11 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) return r } -// advance calculates and returns an updated state for lim resulting from the passage of time. +// advance calculates and returns an updated number of tokens for lim +// resulting from the passage of time. // lim is not changed. // advance requires that lim.mu is held. -func (lim *Limiter) advance(t time.Time) (newT time.Time, newTokens float64) { +func (lim *Limiter) advance(t time.Time) (newTokens float64) { last := lim.last if t.Before(last) { last = t @@ -396,7 +397,7 @@ func (lim *Limiter) advance(t time.Time) (newT time.Time, newTokens float64) { if burst := float64(lim.burst); tokens > burst { tokens = burst } - return t, tokens + return tokens } // durationFromTokens is a unit conversion function from the number of tokens to the duration @@ -405,8 +406,15 @@ func (limit Limit) durationFromTokens(tokens float64) time.Duration { if limit <= 0 { return InfDuration } - seconds := tokens / float64(limit) - return time.Duration(float64(time.Second) * seconds) + + duration := (tokens / float64(limit)) * float64(time.Second) + + // Cap the duration to the maximum representable int64 value, to avoid overflow. + if duration > float64(math.MaxInt64) { + return InfDuration + } + + return time.Duration(duration) } // tokensFromDuration is a unit conversion function from a time duration to the number of tokens diff --git a/vendor/golang.org/x/time/rate/sometimes.go b/vendor/golang.org/x/time/rate/sometimes.go index 6ba99ddb6..9b8393269 100644 --- a/vendor/golang.org/x/time/rate/sometimes.go +++ b/vendor/golang.org/x/time/rate/sometimes.go @@ -61,7 +61,9 @@ func (s *Sometimes) Do(f func()) { (s.Every > 0 && s.count%s.Every == 0) || (s.Interval > 0 && time.Since(s.last) >= s.Interval) { f() - s.last = time.Now() + if s.Interval > 0 { + s.last = time.Now() + } } s.count++ } diff --git a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go index 6e34df461..0fb4e7eea 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go @@ -113,7 +113,7 @@ func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Nod // childrenOf elides the FuncType node beneath FuncDecl. // Add it back here for TypeParams, Params, Results, // all FieldLists). But we don't add it back for the "func" token - // even though it is is the tree at FuncDecl.Type.Func. + // even though it is the tree at FuncDecl.Type.Func. if decl, ok := node.(*ast.FuncDecl); ok { if fields, ok := child.(*ast.FieldList); ok && fields != decl.Recv { path = append(path, decl.Type) @@ -207,6 +207,9 @@ func childrenOf(n ast.Node) []ast.Node { return false // no recursion }) + // TODO(adonovan): be more careful about missing (!Pos.Valid) + // tokens in trees produced from invalid input. + // Then add fake Nodes for bare tokens. switch n := n.(type) { case *ast.ArrayType: @@ -226,9 +229,12 @@ func childrenOf(n ast.Node) []ast.Node { children = append(children, tok(n.OpPos, len(n.Op.String()))) case *ast.BlockStmt: - children = append(children, - tok(n.Lbrace, len("{")), - tok(n.Rbrace, len("}"))) + if n.Lbrace.IsValid() { + children = append(children, tok(n.Lbrace, len("{"))) + } + if n.Rbrace.IsValid() { + children = append(children, tok(n.Rbrace, len("}"))) + } case *ast.BranchStmt: children = append(children, @@ -304,9 +310,12 @@ func childrenOf(n ast.Node) []ast.Node { // TODO(adonovan): Field.{Doc,Comment,Tag}? case *ast.FieldList: - children = append(children, - tok(n.Opening, len("(")), // or len("[") - tok(n.Closing, len(")"))) // or len("]") + if n.Opening.IsValid() { + children = append(children, tok(n.Opening, len("("))) + } + if n.Closing.IsValid() { + children = append(children, tok(n.Closing, len(")"))) + } case *ast.File: // TODO test: Doc diff --git a/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/vendor/golang.org/x/tools/go/ast/astutil/imports.go index a6b5ed0a8..5bacc0fa4 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/imports.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/imports.go @@ -9,6 +9,7 @@ import ( "fmt" "go/ast" "go/token" + "slices" "strconv" "strings" ) @@ -186,7 +187,7 @@ func AddNamedImport(fset *token.FileSet, f *ast.File, name, path string) (added spec.(*ast.ImportSpec).Path.ValuePos = first.Pos() first.Specs = append(first.Specs, spec) } - f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) + f.Decls = slices.Delete(f.Decls, i, i+1) i-- } @@ -208,48 +209,46 @@ func DeleteImport(fset *token.FileSet, f *ast.File, path string) (deleted bool) // DeleteNamedImport deletes the import with the given name and path from the file f, if present. // If there are duplicate import declarations, all matching ones are deleted. func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (deleted bool) { - var delspecs []*ast.ImportSpec - var delcomments []*ast.CommentGroup + var ( + delspecs = make(map[*ast.ImportSpec]bool) + delcomments = make(map[*ast.CommentGroup]bool) + ) // Find the import nodes that import path, if any. for i := 0; i < len(f.Decls); i++ { - decl := f.Decls[i] - gen, ok := decl.(*ast.GenDecl) + gen, ok := f.Decls[i].(*ast.GenDecl) if !ok || gen.Tok != token.IMPORT { continue } for j := 0; j < len(gen.Specs); j++ { - spec := gen.Specs[j] - impspec := spec.(*ast.ImportSpec) + impspec := gen.Specs[j].(*ast.ImportSpec) if importName(impspec) != name || importPath(impspec) != path { continue } // We found an import spec that imports path. // Delete it. - delspecs = append(delspecs, impspec) + delspecs[impspec] = true deleted = true - copy(gen.Specs[j:], gen.Specs[j+1:]) - gen.Specs = gen.Specs[:len(gen.Specs)-1] + gen.Specs = slices.Delete(gen.Specs, j, j+1) // If this was the last import spec in this decl, // delete the decl, too. if len(gen.Specs) == 0 { - copy(f.Decls[i:], f.Decls[i+1:]) - f.Decls = f.Decls[:len(f.Decls)-1] + f.Decls = slices.Delete(f.Decls, i, i+1) i-- break } else if len(gen.Specs) == 1 { if impspec.Doc != nil { - delcomments = append(delcomments, impspec.Doc) + delcomments[impspec.Doc] = true } if impspec.Comment != nil { - delcomments = append(delcomments, impspec.Comment) + delcomments[impspec.Comment] = true } for _, cg := range f.Comments { // Found comment on the same line as the import spec. if cg.End() < impspec.Pos() && fset.Position(cg.End()).Line == fset.Position(impspec.Pos()).Line { - delcomments = append(delcomments, cg) + delcomments[cg] = true break } } @@ -293,38 +292,21 @@ func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (del } // Delete imports from f.Imports. - for i := 0; i < len(f.Imports); i++ { - imp := f.Imports[i] - for j, del := range delspecs { - if imp == del { - copy(f.Imports[i:], f.Imports[i+1:]) - f.Imports = f.Imports[:len(f.Imports)-1] - copy(delspecs[j:], delspecs[j+1:]) - delspecs = delspecs[:len(delspecs)-1] - i-- - break - } - } + before := len(f.Imports) + f.Imports = slices.DeleteFunc(f.Imports, func(imp *ast.ImportSpec) bool { + _, ok := delspecs[imp] + return ok + }) + if len(f.Imports)+len(delspecs) != before { + // This can happen when the AST is invalid (i.e. imports differ between f.Decls and f.Imports). + panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs)) } // Delete comments from f.Comments. - for i := 0; i < len(f.Comments); i++ { - cg := f.Comments[i] - for j, del := range delcomments { - if cg == del { - copy(f.Comments[i:], f.Comments[i+1:]) - f.Comments = f.Comments[:len(f.Comments)-1] - copy(delcomments[j:], delcomments[j+1:]) - delcomments = delcomments[:len(delcomments)-1] - i-- - break - } - } - } - - if len(delspecs) > 0 { - panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs)) - } + f.Comments = slices.DeleteFunc(f.Comments, func(cg *ast.CommentGroup) bool { + _, ok := delcomments[cg] + return ok + }) return } diff --git a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go index 58934f766..4ad054930 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go @@ -67,6 +67,10 @@ var abort = new(int) // singleton, to signal termination of Apply // // The methods Replace, Delete, InsertBefore, and InsertAfter // can be used to change the AST without disrupting Apply. +// +// This type is not to be confused with [inspector.Cursor] from +// package [golang.org/x/tools/go/ast/inspector], which provides +// stateless navigation of immutable syntax trees. type Cursor struct { parent ast.Node name string @@ -183,7 +187,7 @@ type application struct { func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast.Node) { // convert typed nil into untyped nil - if v := reflect.ValueOf(n); v.Kind() == reflect.Ptr && v.IsNil() { + if v := reflect.ValueOf(n); v.Kind() == reflect.Pointer && v.IsNil() { n = nil } diff --git a/vendor/golang.org/x/tools/go/ast/astutil/util.go b/vendor/golang.org/x/tools/go/ast/astutil/util.go index ca71e3e10..c820b2084 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/util.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/util.go @@ -8,4 +8,6 @@ import "go/ast" // Unparen returns e with any enclosing parentheses stripped. // Deprecated: use [ast.Unparen]. +// +//go:fix inline func Unparen(e ast.Expr) ast.Expr { return ast.Unparen(e) } diff --git a/vendor/golang.org/x/tools/go/ast/edge/edge.go b/vendor/golang.org/x/tools/go/ast/edge/edge.go new file mode 100644 index 000000000..4f6ccfd6e --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/edge/edge.go @@ -0,0 +1,295 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package edge defines identifiers for each field of an ast.Node +// struct type that refers to another Node. +package edge + +import ( + "fmt" + "go/ast" + "reflect" +) + +// A Kind describes a field of an ast.Node struct. +type Kind uint8 + +// String returns a description of the edge kind. +func (k Kind) String() string { + if k == Invalid { + return "" + } + info := fieldInfos[k] + return fmt.Sprintf("%v.%s", info.nodeType.Elem().Name(), info.name) +} + +// NodeType returns the pointer-to-struct type of the ast.Node implementation. +func (k Kind) NodeType() reflect.Type { return fieldInfos[k].nodeType } + +// FieldName returns the name of the field. +func (k Kind) FieldName() string { return fieldInfos[k].name } + +// FieldType returns the declared type of the field. +func (k Kind) FieldType() reflect.Type { return fieldInfos[k].fieldType } + +// Get returns the direct child of n identified by (k, idx). +// n's type must match k.NodeType(). +// idx must be a valid slice index, or -1 for a non-slice. +func (k Kind) Get(n ast.Node, idx int) ast.Node { + if k.NodeType() != reflect.TypeOf(n) { + panic(fmt.Sprintf("%v.Get(%T): invalid node type", k, n)) + } + v := reflect.ValueOf(n).Elem().Field(fieldInfos[k].index) + if idx != -1 { + v = v.Index(idx) // asserts valid index + } else { + // (The type assertion below asserts that v is not a slice.) + } + return v.Interface().(ast.Node) // may be nil +} + +const ( + Invalid Kind = iota // for nodes at the root of the traversal + + // Kinds are sorted alphabetically. + // Numbering is not stable. + // Each is named Type_Field, where Type is the + // ast.Node struct type and Field is the name of the field + + ArrayType_Elt + ArrayType_Len + AssignStmt_Lhs + AssignStmt_Rhs + BinaryExpr_X + BinaryExpr_Y + BlockStmt_List + BranchStmt_Label + CallExpr_Args + CallExpr_Fun + CaseClause_Body + CaseClause_List + ChanType_Value + CommClause_Body + CommClause_Comm + CommentGroup_List + CompositeLit_Elts + CompositeLit_Type + DeclStmt_Decl + DeferStmt_Call + Ellipsis_Elt + ExprStmt_X + FieldList_List + Field_Comment + Field_Doc + Field_Names + Field_Tag + Field_Type + File_Decls + File_Doc + File_Name + ForStmt_Body + ForStmt_Cond + ForStmt_Init + ForStmt_Post + FuncDecl_Body + FuncDecl_Doc + FuncDecl_Name + FuncDecl_Recv + FuncDecl_Type + FuncLit_Body + FuncLit_Type + FuncType_Params + FuncType_Results + FuncType_TypeParams + GenDecl_Doc + GenDecl_Specs + GoStmt_Call + IfStmt_Body + IfStmt_Cond + IfStmt_Else + IfStmt_Init + ImportSpec_Comment + ImportSpec_Doc + ImportSpec_Name + ImportSpec_Path + IncDecStmt_X + IndexExpr_Index + IndexExpr_X + IndexListExpr_Indices + IndexListExpr_X + InterfaceType_Methods + KeyValueExpr_Key + KeyValueExpr_Value + LabeledStmt_Label + LabeledStmt_Stmt + MapType_Key + MapType_Value + ParenExpr_X + RangeStmt_Body + RangeStmt_Key + RangeStmt_Value + RangeStmt_X + ReturnStmt_Results + SelectStmt_Body + SelectorExpr_Sel + SelectorExpr_X + SendStmt_Chan + SendStmt_Value + SliceExpr_High + SliceExpr_Low + SliceExpr_Max + SliceExpr_X + StarExpr_X + StructType_Fields + SwitchStmt_Body + SwitchStmt_Init + SwitchStmt_Tag + TypeAssertExpr_Type + TypeAssertExpr_X + TypeSpec_Comment + TypeSpec_Doc + TypeSpec_Name + TypeSpec_Type + TypeSpec_TypeParams + TypeSwitchStmt_Assign + TypeSwitchStmt_Body + TypeSwitchStmt_Init + UnaryExpr_X + ValueSpec_Comment + ValueSpec_Doc + ValueSpec_Names + ValueSpec_Type + ValueSpec_Values + + maxKind +) + +// Assert that the encoding fits in 7 bits, +// as the inspector relies on this. +// (We are currently at 104.) +var _ = [1 << 7]struct{}{}[maxKind] + +type fieldInfo struct { + nodeType reflect.Type // pointer-to-struct type of ast.Node implementation + name string + index int + fieldType reflect.Type +} + +func info[N ast.Node](fieldName string) fieldInfo { + nodePtrType := reflect.TypeFor[N]() + f, ok := nodePtrType.Elem().FieldByName(fieldName) + if !ok { + panic(fieldName) + } + return fieldInfo{nodePtrType, fieldName, f.Index[0], f.Type} +} + +var fieldInfos = [...]fieldInfo{ + Invalid: {}, + ArrayType_Elt: info[*ast.ArrayType]("Elt"), + ArrayType_Len: info[*ast.ArrayType]("Len"), + AssignStmt_Lhs: info[*ast.AssignStmt]("Lhs"), + AssignStmt_Rhs: info[*ast.AssignStmt]("Rhs"), + BinaryExpr_X: info[*ast.BinaryExpr]("X"), + BinaryExpr_Y: info[*ast.BinaryExpr]("Y"), + BlockStmt_List: info[*ast.BlockStmt]("List"), + BranchStmt_Label: info[*ast.BranchStmt]("Label"), + CallExpr_Args: info[*ast.CallExpr]("Args"), + CallExpr_Fun: info[*ast.CallExpr]("Fun"), + CaseClause_Body: info[*ast.CaseClause]("Body"), + CaseClause_List: info[*ast.CaseClause]("List"), + ChanType_Value: info[*ast.ChanType]("Value"), + CommClause_Body: info[*ast.CommClause]("Body"), + CommClause_Comm: info[*ast.CommClause]("Comm"), + CommentGroup_List: info[*ast.CommentGroup]("List"), + CompositeLit_Elts: info[*ast.CompositeLit]("Elts"), + CompositeLit_Type: info[*ast.CompositeLit]("Type"), + DeclStmt_Decl: info[*ast.DeclStmt]("Decl"), + DeferStmt_Call: info[*ast.DeferStmt]("Call"), + Ellipsis_Elt: info[*ast.Ellipsis]("Elt"), + ExprStmt_X: info[*ast.ExprStmt]("X"), + FieldList_List: info[*ast.FieldList]("List"), + Field_Comment: info[*ast.Field]("Comment"), + Field_Doc: info[*ast.Field]("Doc"), + Field_Names: info[*ast.Field]("Names"), + Field_Tag: info[*ast.Field]("Tag"), + Field_Type: info[*ast.Field]("Type"), + File_Decls: info[*ast.File]("Decls"), + File_Doc: info[*ast.File]("Doc"), + File_Name: info[*ast.File]("Name"), + ForStmt_Body: info[*ast.ForStmt]("Body"), + ForStmt_Cond: info[*ast.ForStmt]("Cond"), + ForStmt_Init: info[*ast.ForStmt]("Init"), + ForStmt_Post: info[*ast.ForStmt]("Post"), + FuncDecl_Body: info[*ast.FuncDecl]("Body"), + FuncDecl_Doc: info[*ast.FuncDecl]("Doc"), + FuncDecl_Name: info[*ast.FuncDecl]("Name"), + FuncDecl_Recv: info[*ast.FuncDecl]("Recv"), + FuncDecl_Type: info[*ast.FuncDecl]("Type"), + FuncLit_Body: info[*ast.FuncLit]("Body"), + FuncLit_Type: info[*ast.FuncLit]("Type"), + FuncType_Params: info[*ast.FuncType]("Params"), + FuncType_Results: info[*ast.FuncType]("Results"), + FuncType_TypeParams: info[*ast.FuncType]("TypeParams"), + GenDecl_Doc: info[*ast.GenDecl]("Doc"), + GenDecl_Specs: info[*ast.GenDecl]("Specs"), + GoStmt_Call: info[*ast.GoStmt]("Call"), + IfStmt_Body: info[*ast.IfStmt]("Body"), + IfStmt_Cond: info[*ast.IfStmt]("Cond"), + IfStmt_Else: info[*ast.IfStmt]("Else"), + IfStmt_Init: info[*ast.IfStmt]("Init"), + ImportSpec_Comment: info[*ast.ImportSpec]("Comment"), + ImportSpec_Doc: info[*ast.ImportSpec]("Doc"), + ImportSpec_Name: info[*ast.ImportSpec]("Name"), + ImportSpec_Path: info[*ast.ImportSpec]("Path"), + IncDecStmt_X: info[*ast.IncDecStmt]("X"), + IndexExpr_Index: info[*ast.IndexExpr]("Index"), + IndexExpr_X: info[*ast.IndexExpr]("X"), + IndexListExpr_Indices: info[*ast.IndexListExpr]("Indices"), + IndexListExpr_X: info[*ast.IndexListExpr]("X"), + InterfaceType_Methods: info[*ast.InterfaceType]("Methods"), + KeyValueExpr_Key: info[*ast.KeyValueExpr]("Key"), + KeyValueExpr_Value: info[*ast.KeyValueExpr]("Value"), + LabeledStmt_Label: info[*ast.LabeledStmt]("Label"), + LabeledStmt_Stmt: info[*ast.LabeledStmt]("Stmt"), + MapType_Key: info[*ast.MapType]("Key"), + MapType_Value: info[*ast.MapType]("Value"), + ParenExpr_X: info[*ast.ParenExpr]("X"), + RangeStmt_Body: info[*ast.RangeStmt]("Body"), + RangeStmt_Key: info[*ast.RangeStmt]("Key"), + RangeStmt_Value: info[*ast.RangeStmt]("Value"), + RangeStmt_X: info[*ast.RangeStmt]("X"), + ReturnStmt_Results: info[*ast.ReturnStmt]("Results"), + SelectStmt_Body: info[*ast.SelectStmt]("Body"), + SelectorExpr_Sel: info[*ast.SelectorExpr]("Sel"), + SelectorExpr_X: info[*ast.SelectorExpr]("X"), + SendStmt_Chan: info[*ast.SendStmt]("Chan"), + SendStmt_Value: info[*ast.SendStmt]("Value"), + SliceExpr_High: info[*ast.SliceExpr]("High"), + SliceExpr_Low: info[*ast.SliceExpr]("Low"), + SliceExpr_Max: info[*ast.SliceExpr]("Max"), + SliceExpr_X: info[*ast.SliceExpr]("X"), + StarExpr_X: info[*ast.StarExpr]("X"), + StructType_Fields: info[*ast.StructType]("Fields"), + SwitchStmt_Body: info[*ast.SwitchStmt]("Body"), + SwitchStmt_Init: info[*ast.SwitchStmt]("Init"), + SwitchStmt_Tag: info[*ast.SwitchStmt]("Tag"), + TypeAssertExpr_Type: info[*ast.TypeAssertExpr]("Type"), + TypeAssertExpr_X: info[*ast.TypeAssertExpr]("X"), + TypeSpec_Comment: info[*ast.TypeSpec]("Comment"), + TypeSpec_Doc: info[*ast.TypeSpec]("Doc"), + TypeSpec_Name: info[*ast.TypeSpec]("Name"), + TypeSpec_Type: info[*ast.TypeSpec]("Type"), + TypeSpec_TypeParams: info[*ast.TypeSpec]("TypeParams"), + TypeSwitchStmt_Assign: info[*ast.TypeSwitchStmt]("Assign"), + TypeSwitchStmt_Body: info[*ast.TypeSwitchStmt]("Body"), + TypeSwitchStmt_Init: info[*ast.TypeSwitchStmt]("Init"), + UnaryExpr_X: info[*ast.UnaryExpr]("X"), + ValueSpec_Comment: info[*ast.ValueSpec]("Comment"), + ValueSpec_Doc: info[*ast.ValueSpec]("Doc"), + ValueSpec_Names: info[*ast.ValueSpec]("Names"), + ValueSpec_Type: info[*ast.ValueSpec]("Type"), + ValueSpec_Values: info[*ast.ValueSpec]("Values"), +} diff --git a/vendor/golang.org/x/tools/go/ast/inspector/cursor.go b/vendor/golang.org/x/tools/go/ast/inspector/cursor.go new file mode 100644 index 000000000..7e72d3c28 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/inspector/cursor.go @@ -0,0 +1,502 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package inspector + +import ( + "fmt" + "go/ast" + "go/token" + "iter" + "reflect" + + "golang.org/x/tools/go/ast/edge" +) + +// A Cursor represents an [ast.Node]. It is immutable. +// +// Two Cursors compare equal if they represent the same node. +// +// Call [Inspector.Root] to obtain a valid cursor for the virtual root +// node of the traversal. +// +// Use the following methods to navigate efficiently around the tree: +// - for ancestors, use [Cursor.Parent] and [Cursor.Enclosing]; +// - for children, use [Cursor.Child], [Cursor.Children], +// [Cursor.FirstChild], and [Cursor.LastChild]; +// - for siblings, use [Cursor.PrevSibling] and [Cursor.NextSibling]; +// - for descendants, use [Cursor.FindByPos], [Cursor.FindNode], +// [Cursor.Inspect], and [Cursor.Preorder]. +// +// Use the [Cursor.ChildAt] and [Cursor.ParentEdge] methods for +// information about the edges in a tree: which field (and slice +// element) of the parent node holds the child. +type Cursor struct { + in *Inspector + index int32 // index of push node; -1 for virtual root node +} + +// Root returns a cursor for the virtual root node, +// whose children are the files provided to [New]. +// +// Its [Cursor.Node] method return nil. +func (in *Inspector) Root() Cursor { + return Cursor{in, -1} +} + +// At returns the cursor at the specified index in the traversal, +// which must have been obtained from [Cursor.Index] on a Cursor +// belonging to the same Inspector (see [Cursor.Inspector]). +func (in *Inspector) At(index int32) Cursor { + if index < 0 { + panic("negative index") + } + if int(index) >= len(in.events) { + panic("index out of range for this inspector") + } + if in.events[index].index < index { + panic("invalid index") // (a push, not a pop) + } + return Cursor{in, index} +} + +// Inspector returns the cursor's Inspector. +func (c Cursor) Inspector() *Inspector { return c.in } + +// Index returns the index of this cursor position within the package. +// +// Clients should not assume anything about the numeric Index value +// except that it increases monotonically throughout the traversal. +// It is provided for use with [At]. +// +// Index must not be called on the Root node. +func (c Cursor) Index() int32 { + if c.index < 0 { + panic("Index called on Root node") + } + return c.index +} + +// Node returns the node at the current cursor position, +// or nil for the cursor returned by [Inspector.Root]. +func (c Cursor) Node() ast.Node { + if c.index < 0 { + return nil + } + return c.in.events[c.index].node +} + +// String returns information about the cursor's node, if any. +func (c Cursor) String() string { + if c.in == nil { + return "(invalid)" + } + if c.index < 0 { + return "(root)" + } + return reflect.TypeOf(c.Node()).String() +} + +// indices return the [start, end) half-open interval of event indices. +func (c Cursor) indices() (int32, int32) { + if c.index < 0 { + return 0, int32(len(c.in.events)) // root: all events + } else { + return c.index, c.in.events[c.index].index + 1 // just one subtree + } +} + +// Preorder returns an iterator over the nodes of the subtree +// represented by c in depth-first order. Each node in the sequence is +// represented by a Cursor that allows access to the Node, but may +// also be used to start a new traversal, or to obtain the stack of +// nodes enclosing the cursor. +// +// The traversal sequence is determined by [ast.Inspect]. The types +// argument, if non-empty, enables type-based filtering of events. The +// function f if is called only for nodes whose type matches an +// element of the types slice. +// +// If you need control over descent into subtrees, +// or need both pre- and post-order notifications, use [Cursor.Inspect] +func (c Cursor) Preorder(types ...ast.Node) iter.Seq[Cursor] { + mask := maskOf(types) + + return func(yield func(Cursor) bool) { + events := c.in.events + + for i, limit := c.indices(); i < limit; { + ev := events[i] + if ev.index > i { // push? + if ev.typ&mask != 0 && !yield(Cursor{c.in, i}) { + break + } + pop := ev.index + if events[pop].typ&mask == 0 { + // Subtree does not contain types: skip. + i = pop + 1 + continue + } + } + i++ + } + } +} + +// Inspect visits the nodes of the subtree represented by c in +// depth-first order. It calls f(n) for each node n before it +// visits n's children. If f returns true, Inspect invokes f +// recursively for each of the non-nil children of the node. +// +// Each node is represented by a Cursor that allows access to the +// Node, but may also be used to start a new traversal, or to obtain +// the stack of nodes enclosing the cursor. +// +// The complete traversal sequence is determined by [ast.Inspect]. +// The types argument, if non-empty, enables type-based filtering of +// events. The function f if is called only for nodes whose type +// matches an element of the types slice. +func (c Cursor) Inspect(types []ast.Node, f func(c Cursor) (descend bool)) { + mask := maskOf(types) + events := c.in.events + for i, limit := c.indices(); i < limit; { + ev := events[i] + if ev.index > i { + // push + pop := ev.index + if ev.typ&mask != 0 && !f(Cursor{c.in, i}) || + events[pop].typ&mask == 0 { + // The user opted not to descend, or the + // subtree does not contain types: + // skip past the pop. + i = pop + 1 + continue + } + } + i++ + } +} + +// Enclosing returns an iterator over the nodes enclosing the current +// current node, starting with the Cursor itself. +// +// Enclosing must not be called on the Root node (whose [Cursor.Node] returns nil). +// +// The types argument, if non-empty, enables type-based filtering of +// events: the sequence includes only enclosing nodes whose type +// matches an element of the types slice. +func (c Cursor) Enclosing(types ...ast.Node) iter.Seq[Cursor] { + if c.index < 0 { + panic("Cursor.Enclosing called on Root node") + } + + mask := maskOf(types) + + return func(yield func(Cursor) bool) { + events := c.in.events + for i := c.index; i >= 0; i = events[i].parent { + if events[i].typ&mask != 0 && !yield(Cursor{c.in, i}) { + break + } + } + } +} + +// Parent returns the parent of the current node. +// +// Parent must not be called on the Root node (whose [Cursor.Node] returns nil). +func (c Cursor) Parent() Cursor { + if c.index < 0 { + panic("Cursor.Parent called on Root node") + } + + return Cursor{c.in, c.in.events[c.index].parent} +} + +// ParentEdge returns the identity of the field in the parent node +// that holds this cursor's node, and if it is a list, the index within it. +// +// For example, f(x, y) is a CallExpr whose three children are Idents. +// f has edge kind [edge.CallExpr_Fun] and index -1. +// x and y have kind [edge.CallExpr_Args] and indices 0 and 1, respectively. +// +// If called on a child of the Root node, it returns ([edge.Invalid], -1). +// +// ParentEdge must not be called on the Root node (whose [Cursor.Node] returns nil). +func (c Cursor) ParentEdge() (edge.Kind, int) { + if c.index < 0 { + panic("Cursor.ParentEdge called on Root node") + } + events := c.in.events + pop := events[c.index].index + return unpackEdgeKindAndIndex(events[pop].parent) +} + +// ChildAt returns the cursor for the child of the +// current node identified by its edge and index. +// The index must be -1 if the edge.Kind is not a slice. +// The indicated child node must exist. +// +// ChildAt must not be called on the Root node (whose [Cursor.Node] returns nil). +// +// Invariant: c.Parent().ChildAt(c.ParentEdge()) == c. +func (c Cursor) ChildAt(k edge.Kind, idx int) Cursor { + target := packEdgeKindAndIndex(k, idx) + + // Unfortunately there's no shortcut to looping. + events := c.in.events + i := c.index + 1 + for { + pop := events[i].index + if pop < i { + break + } + if events[pop].parent == target { + return Cursor{c.in, i} + } + i = pop + 1 + } + panic(fmt.Sprintf("ChildAt(%v, %d): no such child of %v", k, idx, c)) +} + +// Child returns the cursor for n, which must be a direct child of c's Node. +// +// Child must not be called on the Root node (whose [Cursor.Node] returns nil). +func (c Cursor) Child(n ast.Node) Cursor { + if c.index < 0 { + panic("Cursor.Child called on Root node") + } + + if false { + // reference implementation + for child := range c.Children() { + if child.Node() == n { + return child + } + } + + } else { + // optimized implementation + events := c.in.events + for i := c.index + 1; events[i].index > i; i = events[i].index + 1 { + if events[i].node == n { + return Cursor{c.in, i} + } + } + } + panic(fmt.Sprintf("Child(%T): not a child of %v", n, c)) +} + +// NextSibling returns the cursor for the next sibling node in the same list +// (for example, of files, decls, specs, statements, fields, or expressions) as +// the current node. It returns (zero, false) if the node is the last node in +// the list, or is not part of a list. +// +// NextSibling must not be called on the Root node. +// +// See note at [Cursor.Children]. +func (c Cursor) NextSibling() (Cursor, bool) { + if c.index < 0 { + panic("Cursor.NextSibling called on Root node") + } + + events := c.in.events + i := events[c.index].index + 1 // after corresponding pop + if i < int32(len(events)) { + if events[i].index > i { // push? + return Cursor{c.in, i}, true + } + } + return Cursor{}, false +} + +// PrevSibling returns the cursor for the previous sibling node in the +// same list (for example, of files, decls, specs, statements, fields, +// or expressions) as the current node. It returns zero if the node is +// the first node in the list, or is not part of a list. +// +// It must not be called on the Root node. +// +// See note at [Cursor.Children]. +func (c Cursor) PrevSibling() (Cursor, bool) { + if c.index < 0 { + panic("Cursor.PrevSibling called on Root node") + } + + events := c.in.events + i := c.index - 1 + if i >= 0 { + if j := events[i].index; j < i { // pop? + return Cursor{c.in, j}, true + } + } + return Cursor{}, false +} + +// FirstChild returns the first direct child of the current node, +// or zero if it has no children. +func (c Cursor) FirstChild() (Cursor, bool) { + events := c.in.events + i := c.index + 1 // i=0 if c is root + if i < int32(len(events)) && events[i].index > i { // push? + return Cursor{c.in, i}, true + } + return Cursor{}, false +} + +// LastChild returns the last direct child of the current node, +// or zero if it has no children. +func (c Cursor) LastChild() (Cursor, bool) { + events := c.in.events + if c.index < 0 { // root? + if len(events) > 0 { + // return push of final event (a pop) + return Cursor{c.in, events[len(events)-1].index}, true + } + } else { + j := events[c.index].index - 1 // before corresponding pop + // Inv: j == c.index if c has no children + // or j is last child's pop. + if j > c.index { // c has children + return Cursor{c.in, events[j].index}, true + } + } + return Cursor{}, false +} + +// Children returns an iterator over the direct children of the +// current node, if any. +// +// When using Children, NextChild, and PrevChild, bear in mind that a +// Node's children may come from different fields, some of which may +// be lists of nodes without a distinguished intervening container +// such as [ast.BlockStmt]. +// +// For example, [ast.CaseClause] has a field List of expressions and a +// field Body of statements, so the children of a CaseClause are a mix +// of expressions and statements. Other nodes that have "uncontained" +// list fields include: +// +// - [ast.ValueSpec] (Names, Values) +// - [ast.CompositeLit] (Type, Elts) +// - [ast.IndexListExpr] (X, Indices) +// - [ast.CallExpr] (Fun, Args) +// - [ast.AssignStmt] (Lhs, Rhs) +// +// So, do not assume that the previous sibling of an ast.Stmt is also +// an ast.Stmt, or if it is, that they are executed sequentially, +// unless you have established that, say, its parent is a BlockStmt +// or its [Cursor.ParentEdge] is [edge.BlockStmt_List]. +// For example, given "for S1; ; S2 {}", the predecessor of S2 is S1, +// even though they are not executed in sequence. +func (c Cursor) Children() iter.Seq[Cursor] { + return func(yield func(Cursor) bool) { + c, ok := c.FirstChild() + for ok && yield(c) { + c, ok = c.NextSibling() + } + } +} + +// Contains reports whether c contains or is equal to c2. +// +// Both Cursors must belong to the same [Inspector]; +// neither may be its Root node. +func (c Cursor) Contains(c2 Cursor) bool { + if c.in != c2.in { + panic("different inspectors") + } + events := c.in.events + return c.index <= c2.index && events[c2.index].index <= events[c.index].index +} + +// FindNode returns the cursor for node n if it belongs to the subtree +// rooted at c. It returns zero if n is not found. +func (c Cursor) FindNode(n ast.Node) (Cursor, bool) { + + // FindNode is equivalent to this code, + // but more convenient and 15-20% faster: + if false { + for candidate := range c.Preorder(n) { + if candidate.Node() == n { + return candidate, true + } + } + return Cursor{}, false + } + + // TODO(adonovan): opt: should we assume Node.Pos is accurate + // and combine type-based filtering with position filtering + // like FindByPos? + + mask := maskOf([]ast.Node{n}) + events := c.in.events + + for i, limit := c.indices(); i < limit; i++ { + ev := events[i] + if ev.index > i { // push? + if ev.typ&mask != 0 && ev.node == n { + return Cursor{c.in, i}, true + } + pop := ev.index + if events[pop].typ&mask == 0 { + // Subtree does not contain type of n: skip. + i = pop + } + } + } + return Cursor{}, false +} + +// FindByPos returns the cursor for the innermost node n in the tree +// rooted at c such that n.Pos() <= start && end <= n.End(). +// (For an *ast.File, it uses the bounds n.FileStart-n.FileEnd.) +// +// It returns zero if none is found. +// Precondition: start <= end. +// +// See also [astutil.PathEnclosingInterval], which +// tolerates adjoining whitespace. +func (c Cursor) FindByPos(start, end token.Pos) (Cursor, bool) { + if end < start { + panic("end < start") + } + events := c.in.events + + // This algorithm could be implemented using c.Inspect, + // but it is about 2.5x slower. + + best := int32(-1) // push index of latest (=innermost) node containing range + for i, limit := c.indices(); i < limit; i++ { + ev := events[i] + if ev.index > i { // push? + n := ev.node + var nodeEnd token.Pos + if file, ok := n.(*ast.File); ok { + nodeEnd = file.FileEnd + // Note: files may be out of Pos order. + if file.FileStart > start { + i = ev.index // disjoint, after; skip to next file + continue + } + } else { + nodeEnd = n.End() + if n.Pos() > start { + break // disjoint, after; stop + } + } + // Inv: node.{Pos,FileStart} <= start + if end <= nodeEnd { + // node fully contains target range + best = i + } else if nodeEnd < start { + i = ev.index // disjoint, before; skip forward + } + } + } + if best >= 0 { + return Cursor{c.in, best}, true + } + return Cursor{}, false +} diff --git a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go new file mode 100644 index 000000000..a703cdfcf --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go @@ -0,0 +1,311 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package inspector provides helper functions for traversal over the +// syntax trees of a package, including node filtering by type, and +// materialization of the traversal stack. +// +// During construction, the inspector does a complete traversal and +// builds a list of push/pop events and their node type. Subsequent +// method calls that request a traversal scan this list, rather than walk +// the AST, and perform type filtering using efficient bit sets. +// This representation is sometimes called a "balanced parenthesis tree." +// +// Experiments suggest the inspector's traversals are about 2.5x faster +// than [ast.Inspect], but it may take around 5 traversals for this +// benefit to amortize the inspector's construction cost. +// If efficiency is the primary concern, do not use Inspector for +// one-off traversals. +// +// The [Cursor] type provides a more flexible API for efficient +// navigation of syntax trees in all four "cardinal directions". For +// example, traversals may be nested, so you can find each node of +// type A and then search within it for nodes of type B. Or you can +// traverse from a node to its immediate neighbors: its parent, its +// previous and next sibling, or its first and last child. We +// recommend using methods of Cursor in preference to Inspector where +// possible. +package inspector + +// There are four orthogonal features in a traversal: +// 1 type filtering +// 2 pruning +// 3 postorder calls to f +// 4 stack +// Rather than offer all of them in the API, +// only a few combinations are exposed: +// - Preorder is the fastest and has fewest features, +// but is the most commonly needed traversal. +// - Nodes and WithStack both provide pruning and postorder calls, +// even though few clients need it, because supporting two versions +// is not justified. +// More combinations could be supported by expressing them as +// wrappers around a more generic traversal, but this was measured +// and found to degrade performance significantly (30%). + +import ( + "go/ast" + + "golang.org/x/tools/go/ast/edge" +) + +// An Inspector provides methods for inspecting +// (traversing) the syntax trees of a package. +type Inspector struct { + events []event +} + +func packEdgeKindAndIndex(ek edge.Kind, index int) int32 { + return int32(uint32(index+1)<<7 | uint32(ek)) +} + +// unpackEdgeKindAndIndex unpacks the edge kind and edge index (within +// an []ast.Node slice) from the parent field of a pop event. +func unpackEdgeKindAndIndex(x int32) (edge.Kind, int) { + // The "parent" field of a pop node holds the + // edge Kind in the lower 7 bits and the index+1 + // in the upper 25. + return edge.Kind(x & 0x7f), int(x>>7) - 1 +} + +// New returns an Inspector for the specified syntax trees. +func New(files []*ast.File) *Inspector { + return &Inspector{traverse(files)} +} + +// An event represents a push or a pop +// of an ast.Node during a traversal. +type event struct { + node ast.Node + typ uint64 // typeOf(node) on push event, or union of typ strictly between push and pop events on pop events + index int32 // index of corresponding push or pop event + parent int32 // index of parent's push node (push nodes only), or packed edge kind/index (pop nodes only) +} + +// TODO: Experiment with storing only the second word of event.node (unsafe.Pointer). +// Type can be recovered from the sole bit in typ. +// [Tried this, wasn't faster. --adonovan] + +// Preorder visits all the nodes of the files supplied to New in +// depth-first order. It calls f(n) for each node n before it visits +// n's children. +// +// The complete traversal sequence is determined by [ast.Inspect]. +// The types argument, if non-empty, enables type-based filtering of +// events. The function f is called only for nodes whose type +// matches an element of the types slice. +// +// The [Cursor.Preorder] method provides a richer alternative interface. +// Example: +// +// for c := range in.Root().Preorder(types) { ... } +func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) { + // Because it avoids postorder calls to f, and the pruning + // check, Preorder is almost twice as fast as Nodes. The two + // features seem to contribute similar slowdowns (~1.4x each). + + // This function is equivalent to the PreorderSeq call below, + // but to avoid the additional dynamic call (which adds 13-35% + // to the benchmarks), we expand it out. + // + // in.PreorderSeq(types...)(func(n ast.Node) bool { + // f(n) + // return true + // }) + + mask := maskOf(types) + for i := int32(0); i < int32(len(in.events)); { + ev := in.events[i] + if ev.index > i { + // push + if ev.typ&mask != 0 { + f(ev.node) + } + pop := ev.index + if in.events[pop].typ&mask == 0 { + // Subtrees do not contain types: skip them and pop. + i = pop + 1 + continue + } + } + i++ + } +} + +// Nodes visits the nodes of the files supplied to New in depth-first +// order. It calls f(n, true) for each node n before it visits n's +// children. If f returns true, Nodes invokes f recursively for each +// of the non-nil children of the node, followed by a call of +// f(n, false). +// +// The complete traversal sequence is determined by [ast.Inspect]. +// The types argument, if non-empty, enables type-based filtering of +// events. The function f if is called only for nodes whose type +// matches an element of the types slice. +// +// The [Cursor.Inspect] method provides a richer alternative interface. +// Example: +// +// in.Root().Inspect(types, func(c Cursor) bool { +// ... +// return true +// } +func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proceed bool)) { + mask := maskOf(types) + for i := int32(0); i < int32(len(in.events)); { + ev := in.events[i] + if ev.index > i { + // push + pop := ev.index + if ev.typ&mask != 0 { + if !f(ev.node, true) { + i = pop + 1 // jump to corresponding pop + 1 + continue + } + } + if in.events[pop].typ&mask == 0 { + // Subtrees do not contain types: skip them. + i = pop + continue + } + } else { + // pop + push := ev.index + if in.events[push].typ&mask != 0 { + f(ev.node, false) + } + } + i++ + } +} + +// WithStack visits nodes in a similar manner to Nodes, but it +// supplies each call to f an additional argument, the current +// traversal stack. The stack's first element is the outermost node, +// an *ast.File; its last is the innermost, n. +// +// The [Cursor.Inspect] method provides a richer alternative interface. +// Example: +// +// in.Root().Inspect(types, func(c Cursor) bool { +// stack := slices.Collect(c.Enclosing()) +// ... +// return true +// }) +func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, stack []ast.Node) (proceed bool)) { + mask := maskOf(types) + var stack []ast.Node + for i := int32(0); i < int32(len(in.events)); { + ev := in.events[i] + if ev.index > i { + // push + pop := ev.index + stack = append(stack, ev.node) + if ev.typ&mask != 0 { + if !f(ev.node, true, stack) { + i = pop + 1 + stack = stack[:len(stack)-1] + continue + } + } + if in.events[pop].typ&mask == 0 { + // Subtrees does not contain types: skip them. + i = pop + continue + } + } else { + // pop + push := ev.index + if in.events[push].typ&mask != 0 { + f(ev.node, false, stack) + } + stack = stack[:len(stack)-1] + } + i++ + } +} + +// traverse builds the table of events representing a traversal. +func traverse(files []*ast.File) []event { + // Preallocate approximate number of events + // based on source file extent of the declarations. + // (We use End-Pos not FileStart-FileEnd to neglect + // the effect of long doc comments.) + // This makes traverse faster by 4x (!). + var extent int + for _, f := range files { + extent += int(f.End() - f.Pos()) + } + // This estimate is based on the net/http package. + capacity := min(extent*33/100, 1e6) // impose some reasonable maximum (1M) + + v := &visitor{ + events: make([]event, 0, capacity), + stack: []item{{index: -1}}, // include an extra event so file nodes have a parent + } + for _, file := range files { + walk(v, edge.Invalid, -1, file) + } + return v.events +} + +type visitor struct { + events []event + stack []item +} + +type item struct { + index int32 // index of current node's push event + parentIndex int32 // index of parent node's push event + typAccum uint64 // accumulated type bits of current node's descendants + edgeKindAndIndex int32 // edge.Kind and index, bit packed +} + +func (v *visitor) push(ek edge.Kind, eindex int, node ast.Node) { + var ( + index = int32(len(v.events)) + parentIndex = v.stack[len(v.stack)-1].index + ) + v.events = append(v.events, event{ + node: node, + parent: parentIndex, + typ: typeOf(node), + index: 0, // (pop index is set later by visitor.pop) + }) + v.stack = append(v.stack, item{ + index: index, + parentIndex: parentIndex, + edgeKindAndIndex: packEdgeKindAndIndex(ek, eindex), + }) + + // 2B nodes ought to be enough for anyone! + if int32(len(v.events)) < 0 { + panic("event index exceeded int32") + } + + // 32M elements in an []ast.Node ought to be enough for anyone! + if ek2, eindex2 := unpackEdgeKindAndIndex(packEdgeKindAndIndex(ek, eindex)); ek2 != ek || eindex2 != eindex { + panic("Node slice index exceeded uint25") + } +} + +func (v *visitor) pop(node ast.Node) { + top := len(v.stack) - 1 + current := v.stack[top] + + push := &v.events[current.index] + parent := &v.stack[top-1] + + push.index = int32(len(v.events)) // make push event refer to pop + parent.typAccum |= current.typAccum | push.typ // accumulate type bits into parent + + v.stack = v.stack[:top] + + v.events = append(v.events, event{ + node: node, + typ: current.typAccum, + index: current.index, + parent: current.edgeKindAndIndex, // see [unpackEdgeKindAndIndex] + }) +} diff --git a/vendor/golang.org/x/tools/go/ast/inspector/iter.go b/vendor/golang.org/x/tools/go/ast/inspector/iter.go new file mode 100644 index 000000000..c576dc70a --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/inspector/iter.go @@ -0,0 +1,85 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.23 + +package inspector + +import ( + "go/ast" + "iter" +) + +// PreorderSeq returns an iterator that visits all the +// nodes of the files supplied to New in depth-first order. +// It visits each node n before n's children. +// The complete traversal sequence is determined by ast.Inspect. +// +// The types argument, if non-empty, enables type-based +// filtering of events: only nodes whose type matches an +// element of the types slice are included in the sequence. +func (in *Inspector) PreorderSeq(types ...ast.Node) iter.Seq[ast.Node] { + + // This implementation is identical to Preorder, + // except that it supports breaking out of the loop. + + return func(yield func(ast.Node) bool) { + mask := maskOf(types) + for i := int32(0); i < int32(len(in.events)); { + ev := in.events[i] + if ev.index > i { + // push + if ev.typ&mask != 0 { + if !yield(ev.node) { + break + } + } + pop := ev.index + if in.events[pop].typ&mask == 0 { + // Subtrees do not contain types: skip them and pop. + i = pop + 1 + continue + } + } + i++ + } + } +} + +// All[N] returns an iterator over all the nodes of type N. +// N must be a pointer-to-struct type that implements ast.Node. +// +// Example: +// +// for call := range All[*ast.CallExpr](in) { ... } +func All[N interface { + *S + ast.Node +}, S any](in *Inspector) iter.Seq[N] { + + // To avoid additional dynamic call overheads, + // we duplicate rather than call the logic of PreorderSeq. + + mask := typeOf((N)(nil)) + return func(yield func(N) bool) { + for i := int32(0); i < int32(len(in.events)); { + ev := in.events[i] + if ev.index > i { + // push + if ev.typ&mask != 0 { + if !yield(ev.node.(N)) { + break + } + } + pop := ev.index + if in.events[pop].typ&mask == 0 { + // Subtrees do not contain types: skip them and pop. + i = pop + 1 + continue + } + } + i++ + } + } +} diff --git a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go new file mode 100644 index 000000000..9852331a3 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go @@ -0,0 +1,227 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package inspector + +// This file defines func typeOf(ast.Node) uint64. +// +// The initial map-based implementation was too slow; +// see https://go-review.googlesource.com/c/tools/+/135655/1/go/ast/inspector/inspector.go#196 + +import ( + "go/ast" + "math" +) + +const ( + nArrayType = iota + nAssignStmt + nBadDecl + nBadExpr + nBadStmt + nBasicLit + nBinaryExpr + nBlockStmt + nBranchStmt + nCallExpr + nCaseClause + nChanType + nCommClause + nComment + nCommentGroup + nCompositeLit + nDeclStmt + nDeferStmt + nEllipsis + nEmptyStmt + nExprStmt + nField + nFieldList + nFile + nForStmt + nFuncDecl + nFuncLit + nFuncType + nGenDecl + nGoStmt + nIdent + nIfStmt + nImportSpec + nIncDecStmt + nIndexExpr + nIndexListExpr + nInterfaceType + nKeyValueExpr + nLabeledStmt + nMapType + nPackage + nParenExpr + nRangeStmt + nReturnStmt + nSelectStmt + nSelectorExpr + nSendStmt + nSliceExpr + nStarExpr + nStructType + nSwitchStmt + nTypeAssertExpr + nTypeSpec + nTypeSwitchStmt + nUnaryExpr + nValueSpec +) + +// typeOf returns a distinct single-bit value that represents the type of n. +// +// Various implementations were benchmarked with BenchmarkNewInspector: +// +// GOGC=off +// - type switch 4.9-5.5ms 2.1ms +// - binary search over a sorted list of types 5.5-5.9ms 2.5ms +// - linear scan, frequency-ordered list 5.9-6.1ms 2.7ms +// - linear scan, unordered list 6.4ms 2.7ms +// - hash table 6.5ms 3.1ms +// +// A perfect hash seemed like overkill. +// +// The compiler's switch statement is the clear winner +// as it produces a binary tree in code, +// with constant conditions and good branch prediction. +// (Sadly it is the most verbose in source code.) +// Binary search suffered from poor branch prediction. +func typeOf(n ast.Node) uint64 { + // Fast path: nearly half of all nodes are identifiers. + if _, ok := n.(*ast.Ident); ok { + return 1 << nIdent + } + + // These cases include all nodes encountered by ast.Inspect. + switch n.(type) { + case *ast.ArrayType: + return 1 << nArrayType + case *ast.AssignStmt: + return 1 << nAssignStmt + case *ast.BadDecl: + return 1 << nBadDecl + case *ast.BadExpr: + return 1 << nBadExpr + case *ast.BadStmt: + return 1 << nBadStmt + case *ast.BasicLit: + return 1 << nBasicLit + case *ast.BinaryExpr: + return 1 << nBinaryExpr + case *ast.BlockStmt: + return 1 << nBlockStmt + case *ast.BranchStmt: + return 1 << nBranchStmt + case *ast.CallExpr: + return 1 << nCallExpr + case *ast.CaseClause: + return 1 << nCaseClause + case *ast.ChanType: + return 1 << nChanType + case *ast.CommClause: + return 1 << nCommClause + case *ast.Comment: + return 1 << nComment + case *ast.CommentGroup: + return 1 << nCommentGroup + case *ast.CompositeLit: + return 1 << nCompositeLit + case *ast.DeclStmt: + return 1 << nDeclStmt + case *ast.DeferStmt: + return 1 << nDeferStmt + case *ast.Ellipsis: + return 1 << nEllipsis + case *ast.EmptyStmt: + return 1 << nEmptyStmt + case *ast.ExprStmt: + return 1 << nExprStmt + case *ast.Field: + return 1 << nField + case *ast.FieldList: + return 1 << nFieldList + case *ast.File: + return 1 << nFile + case *ast.ForStmt: + return 1 << nForStmt + case *ast.FuncDecl: + return 1 << nFuncDecl + case *ast.FuncLit: + return 1 << nFuncLit + case *ast.FuncType: + return 1 << nFuncType + case *ast.GenDecl: + return 1 << nGenDecl + case *ast.GoStmt: + return 1 << nGoStmt + case *ast.Ident: + return 1 << nIdent + case *ast.IfStmt: + return 1 << nIfStmt + case *ast.ImportSpec: + return 1 << nImportSpec + case *ast.IncDecStmt: + return 1 << nIncDecStmt + case *ast.IndexExpr: + return 1 << nIndexExpr + case *ast.IndexListExpr: + return 1 << nIndexListExpr + case *ast.InterfaceType: + return 1 << nInterfaceType + case *ast.KeyValueExpr: + return 1 << nKeyValueExpr + case *ast.LabeledStmt: + return 1 << nLabeledStmt + case *ast.MapType: + return 1 << nMapType + case *ast.Package: + return 1 << nPackage + case *ast.ParenExpr: + return 1 << nParenExpr + case *ast.RangeStmt: + return 1 << nRangeStmt + case *ast.ReturnStmt: + return 1 << nReturnStmt + case *ast.SelectStmt: + return 1 << nSelectStmt + case *ast.SelectorExpr: + return 1 << nSelectorExpr + case *ast.SendStmt: + return 1 << nSendStmt + case *ast.SliceExpr: + return 1 << nSliceExpr + case *ast.StarExpr: + return 1 << nStarExpr + case *ast.StructType: + return 1 << nStructType + case *ast.SwitchStmt: + return 1 << nSwitchStmt + case *ast.TypeAssertExpr: + return 1 << nTypeAssertExpr + case *ast.TypeSpec: + return 1 << nTypeSpec + case *ast.TypeSwitchStmt: + return 1 << nTypeSwitchStmt + case *ast.UnaryExpr: + return 1 << nUnaryExpr + case *ast.ValueSpec: + return 1 << nValueSpec + } + return 0 +} + +func maskOf(nodes []ast.Node) uint64 { + if len(nodes) == 0 { + return math.MaxUint64 // match all node types + } + var mask uint64 + for _, n := range nodes { + mask |= typeOf(n) + } + return mask +} diff --git a/vendor/golang.org/x/tools/go/ast/inspector/walk.go b/vendor/golang.org/x/tools/go/ast/inspector/walk.go new file mode 100644 index 000000000..5f1c93c8a --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/inspector/walk.go @@ -0,0 +1,341 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package inspector + +// This file is a fork of ast.Inspect to reduce unnecessary dynamic +// calls and to gather edge information. +// +// Consistency with the original is ensured by TestInspectAllNodes. + +import ( + "fmt" + "go/ast" + + "golang.org/x/tools/go/ast/edge" +) + +func walkList[N ast.Node](v *visitor, ek edge.Kind, list []N) { + for i, node := range list { + walk(v, ek, i, node) + } +} + +func walk(v *visitor, ek edge.Kind, index int, node ast.Node) { + v.push(ek, index, node) + + // walk children + // (the order of the cases matches the order + // of the corresponding node types in ast.go) + switch n := node.(type) { + // Comments and fields + case *ast.Comment: + // nothing to do + + case *ast.CommentGroup: + walkList(v, edge.CommentGroup_List, n.List) + + case *ast.Field: + if n.Doc != nil { + walk(v, edge.Field_Doc, -1, n.Doc) + } + walkList(v, edge.Field_Names, n.Names) + if n.Type != nil { + walk(v, edge.Field_Type, -1, n.Type) + } + if n.Tag != nil { + walk(v, edge.Field_Tag, -1, n.Tag) + } + if n.Comment != nil { + walk(v, edge.Field_Comment, -1, n.Comment) + } + + case *ast.FieldList: + walkList(v, edge.FieldList_List, n.List) + + // Expressions + case *ast.BadExpr, *ast.Ident, *ast.BasicLit: + // nothing to do + + case *ast.Ellipsis: + if n.Elt != nil { + walk(v, edge.Ellipsis_Elt, -1, n.Elt) + } + + case *ast.FuncLit: + walk(v, edge.FuncLit_Type, -1, n.Type) + walk(v, edge.FuncLit_Body, -1, n.Body) + + case *ast.CompositeLit: + if n.Type != nil { + walk(v, edge.CompositeLit_Type, -1, n.Type) + } + walkList(v, edge.CompositeLit_Elts, n.Elts) + + case *ast.ParenExpr: + walk(v, edge.ParenExpr_X, -1, n.X) + + case *ast.SelectorExpr: + walk(v, edge.SelectorExpr_X, -1, n.X) + walk(v, edge.SelectorExpr_Sel, -1, n.Sel) + + case *ast.IndexExpr: + walk(v, edge.IndexExpr_X, -1, n.X) + walk(v, edge.IndexExpr_Index, -1, n.Index) + + case *ast.IndexListExpr: + walk(v, edge.IndexListExpr_X, -1, n.X) + walkList(v, edge.IndexListExpr_Indices, n.Indices) + + case *ast.SliceExpr: + walk(v, edge.SliceExpr_X, -1, n.X) + if n.Low != nil { + walk(v, edge.SliceExpr_Low, -1, n.Low) + } + if n.High != nil { + walk(v, edge.SliceExpr_High, -1, n.High) + } + if n.Max != nil { + walk(v, edge.SliceExpr_Max, -1, n.Max) + } + + case *ast.TypeAssertExpr: + walk(v, edge.TypeAssertExpr_X, -1, n.X) + if n.Type != nil { + walk(v, edge.TypeAssertExpr_Type, -1, n.Type) + } + + case *ast.CallExpr: + walk(v, edge.CallExpr_Fun, -1, n.Fun) + walkList(v, edge.CallExpr_Args, n.Args) + + case *ast.StarExpr: + walk(v, edge.StarExpr_X, -1, n.X) + + case *ast.UnaryExpr: + walk(v, edge.UnaryExpr_X, -1, n.X) + + case *ast.BinaryExpr: + walk(v, edge.BinaryExpr_X, -1, n.X) + walk(v, edge.BinaryExpr_Y, -1, n.Y) + + case *ast.KeyValueExpr: + walk(v, edge.KeyValueExpr_Key, -1, n.Key) + walk(v, edge.KeyValueExpr_Value, -1, n.Value) + + // Types + case *ast.ArrayType: + if n.Len != nil { + walk(v, edge.ArrayType_Len, -1, n.Len) + } + walk(v, edge.ArrayType_Elt, -1, n.Elt) + + case *ast.StructType: + walk(v, edge.StructType_Fields, -1, n.Fields) + + case *ast.FuncType: + if n.TypeParams != nil { + walk(v, edge.FuncType_TypeParams, -1, n.TypeParams) + } + if n.Params != nil { + walk(v, edge.FuncType_Params, -1, n.Params) + } + if n.Results != nil { + walk(v, edge.FuncType_Results, -1, n.Results) + } + + case *ast.InterfaceType: + walk(v, edge.InterfaceType_Methods, -1, n.Methods) + + case *ast.MapType: + walk(v, edge.MapType_Key, -1, n.Key) + walk(v, edge.MapType_Value, -1, n.Value) + + case *ast.ChanType: + walk(v, edge.ChanType_Value, -1, n.Value) + + // Statements + case *ast.BadStmt: + // nothing to do + + case *ast.DeclStmt: + walk(v, edge.DeclStmt_Decl, -1, n.Decl) + + case *ast.EmptyStmt: + // nothing to do + + case *ast.LabeledStmt: + walk(v, edge.LabeledStmt_Label, -1, n.Label) + walk(v, edge.LabeledStmt_Stmt, -1, n.Stmt) + + case *ast.ExprStmt: + walk(v, edge.ExprStmt_X, -1, n.X) + + case *ast.SendStmt: + walk(v, edge.SendStmt_Chan, -1, n.Chan) + walk(v, edge.SendStmt_Value, -1, n.Value) + + case *ast.IncDecStmt: + walk(v, edge.IncDecStmt_X, -1, n.X) + + case *ast.AssignStmt: + walkList(v, edge.AssignStmt_Lhs, n.Lhs) + walkList(v, edge.AssignStmt_Rhs, n.Rhs) + + case *ast.GoStmt: + walk(v, edge.GoStmt_Call, -1, n.Call) + + case *ast.DeferStmt: + walk(v, edge.DeferStmt_Call, -1, n.Call) + + case *ast.ReturnStmt: + walkList(v, edge.ReturnStmt_Results, n.Results) + + case *ast.BranchStmt: + if n.Label != nil { + walk(v, edge.BranchStmt_Label, -1, n.Label) + } + + case *ast.BlockStmt: + walkList(v, edge.BlockStmt_List, n.List) + + case *ast.IfStmt: + if n.Init != nil { + walk(v, edge.IfStmt_Init, -1, n.Init) + } + walk(v, edge.IfStmt_Cond, -1, n.Cond) + walk(v, edge.IfStmt_Body, -1, n.Body) + if n.Else != nil { + walk(v, edge.IfStmt_Else, -1, n.Else) + } + + case *ast.CaseClause: + walkList(v, edge.CaseClause_List, n.List) + walkList(v, edge.CaseClause_Body, n.Body) + + case *ast.SwitchStmt: + if n.Init != nil { + walk(v, edge.SwitchStmt_Init, -1, n.Init) + } + if n.Tag != nil { + walk(v, edge.SwitchStmt_Tag, -1, n.Tag) + } + walk(v, edge.SwitchStmt_Body, -1, n.Body) + + case *ast.TypeSwitchStmt: + if n.Init != nil { + walk(v, edge.TypeSwitchStmt_Init, -1, n.Init) + } + walk(v, edge.TypeSwitchStmt_Assign, -1, n.Assign) + walk(v, edge.TypeSwitchStmt_Body, -1, n.Body) + + case *ast.CommClause: + if n.Comm != nil { + walk(v, edge.CommClause_Comm, -1, n.Comm) + } + walkList(v, edge.CommClause_Body, n.Body) + + case *ast.SelectStmt: + walk(v, edge.SelectStmt_Body, -1, n.Body) + + case *ast.ForStmt: + if n.Init != nil { + walk(v, edge.ForStmt_Init, -1, n.Init) + } + if n.Cond != nil { + walk(v, edge.ForStmt_Cond, -1, n.Cond) + } + if n.Post != nil { + walk(v, edge.ForStmt_Post, -1, n.Post) + } + walk(v, edge.ForStmt_Body, -1, n.Body) + + case *ast.RangeStmt: + if n.Key != nil { + walk(v, edge.RangeStmt_Key, -1, n.Key) + } + if n.Value != nil { + walk(v, edge.RangeStmt_Value, -1, n.Value) + } + walk(v, edge.RangeStmt_X, -1, n.X) + walk(v, edge.RangeStmt_Body, -1, n.Body) + + // Declarations + case *ast.ImportSpec: + if n.Doc != nil { + walk(v, edge.ImportSpec_Doc, -1, n.Doc) + } + if n.Name != nil { + walk(v, edge.ImportSpec_Name, -1, n.Name) + } + walk(v, edge.ImportSpec_Path, -1, n.Path) + if n.Comment != nil { + walk(v, edge.ImportSpec_Comment, -1, n.Comment) + } + + case *ast.ValueSpec: + if n.Doc != nil { + walk(v, edge.ValueSpec_Doc, -1, n.Doc) + } + walkList(v, edge.ValueSpec_Names, n.Names) + if n.Type != nil { + walk(v, edge.ValueSpec_Type, -1, n.Type) + } + walkList(v, edge.ValueSpec_Values, n.Values) + if n.Comment != nil { + walk(v, edge.ValueSpec_Comment, -1, n.Comment) + } + + case *ast.TypeSpec: + if n.Doc != nil { + walk(v, edge.TypeSpec_Doc, -1, n.Doc) + } + walk(v, edge.TypeSpec_Name, -1, n.Name) + if n.TypeParams != nil { + walk(v, edge.TypeSpec_TypeParams, -1, n.TypeParams) + } + walk(v, edge.TypeSpec_Type, -1, n.Type) + if n.Comment != nil { + walk(v, edge.TypeSpec_Comment, -1, n.Comment) + } + + case *ast.BadDecl: + // nothing to do + + case *ast.GenDecl: + if n.Doc != nil { + walk(v, edge.GenDecl_Doc, -1, n.Doc) + } + walkList(v, edge.GenDecl_Specs, n.Specs) + + case *ast.FuncDecl: + if n.Doc != nil { + walk(v, edge.FuncDecl_Doc, -1, n.Doc) + } + if n.Recv != nil { + walk(v, edge.FuncDecl_Recv, -1, n.Recv) + } + walk(v, edge.FuncDecl_Name, -1, n.Name) + walk(v, edge.FuncDecl_Type, -1, n.Type) + if n.Body != nil { + walk(v, edge.FuncDecl_Body, -1, n.Body) + } + + case *ast.File: + if n.Doc != nil { + walk(v, edge.File_Doc, -1, n.Doc) + } + walk(v, edge.File_Name, -1, n.Name) + walkList(v, edge.File_Decls, n.Decls) + // don't walk n.Comments - they have been + // visited already through the individual + // nodes + + default: + // (includes *ast.Package) + panic(fmt.Sprintf("Walk: unexpected node type %T", n)) + } + + v.pop(node) +} diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go index 65fe2628e..7b90bc923 100644 --- a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go +++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -193,10 +193,7 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, return pkg, err default: - l := len(data) - if l > 10 { - l = 10 - } + l := min(len(data), 10) return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), path) } } diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go index f1931d10e..366aab6b2 100644 --- a/vendor/golang.org/x/tools/go/packages/doc.go +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -76,6 +76,8 @@ uninterpreted to Load, so that it can interpret them according to the conventions of the underlying build system. See the Example function for typical usage. +See also [golang.org/x/tools/go/packages/internal/linecount] +for an example application. # The driver protocol diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go index 91bd62e83..f37bc6510 100644 --- a/vendor/golang.org/x/tools/go/packages/external.go +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -90,7 +90,7 @@ func findExternalDriver(cfg *Config) driver { const toolPrefix = "GOPACKAGESDRIVER=" tool := "" for _, env := range cfg.Env { - if val := strings.TrimPrefix(env, toolPrefix); val != env { + if val, ok := strings.CutPrefix(env, toolPrefix); ok { tool = val } } diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index 870271ed5..680a70ca8 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -224,13 +224,22 @@ extractQueries: return response.dr, nil } +// abs returns an absolute representation of path, based on cfg.Dir. +func (cfg *Config) abs(path string) (string, error) { + if filepath.IsAbs(path) { + return path, nil + } + // In case cfg.Dir is relative, pass it to filepath.Abs. + return filepath.Abs(filepath.Join(cfg.Dir, path)) +} + func (state *golistState) runContainsQueries(response *responseDeduper, queries []string) error { for _, query := range queries { // TODO(matloob): Do only one query per directory. fdir := filepath.Dir(query) // Pass absolute path of directory to go list so that it knows to treat it as a directory, // not a package path. - pattern, err := filepath.Abs(fdir) + pattern, err := state.cfg.abs(fdir) if err != nil { return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err) } @@ -322,6 +331,7 @@ type jsonPackage struct { ImportPath string Dir string Name string + Target string Export string GoFiles []string CompiledGoFiles []string @@ -354,12 +364,6 @@ type jsonPackage struct { DepsErrors []*packagesinternal.PackageError } -type jsonPackageError struct { - ImportStack []string - Pos string - Err string -} - func otherFiles(p *jsonPackage) [][]string { return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles} } @@ -506,6 +510,7 @@ func (state *golistState) createDriverResponse(words ...string) (*DriverResponse Name: p.Name, ID: p.ImportPath, Dir: p.Dir, + Target: p.Target, GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles), CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles), OtherFiles: absJoin(p.Dir, otherFiles(p)...), @@ -701,9 +706,8 @@ func (state *golistState) getGoVersion() (int, error) { // getPkgPath finds the package path of a directory if it's relative to a root // directory. func (state *golistState) getPkgPath(dir string) (string, bool, error) { - absDir, err := filepath.Abs(dir) - if err != nil { - return "", false, err + if !filepath.IsAbs(dir) { + panic("non-absolute dir passed to getPkgPath") } roots, err := state.determineRootDirs() if err != nil { @@ -713,7 +717,7 @@ func (state *golistState) getPkgPath(dir string) (string, bool, error) { for rdir, rpath := range roots { // Make sure that the directory is in the module, // to avoid creating a path relative to another module. - if !strings.HasPrefix(absDir, rdir) { + if !strings.HasPrefix(dir, rdir) { continue } // TODO(matloob): This doesn't properly handle symlinks. @@ -811,6 +815,9 @@ func jsonFlag(cfg *Config, goVersion int) string { if cfg.Mode&NeedEmbedPatterns != 0 { addFields("EmbedPatterns") } + if cfg.Mode&NeedTarget != 0 { + addFields("Target") + } return "-json=" + strings.Join(fields, ",") } @@ -846,8 +853,6 @@ func (state *golistState) cfgInvocation() gocommand.Invocation { cfg := state.cfg return gocommand.Invocation{ BuildFlags: cfg.BuildFlags, - ModFile: cfg.modFile, - ModFlag: cfg.modFlag, CleanEnv: cfg.Env != nil, Env: cfg.Env, Logf: cfg.Logf, diff --git a/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/vendor/golang.org/x/tools/go/packages/golist_overlay.go index d823c474a..d9d5a45cd 100644 --- a/vendor/golang.org/x/tools/go/packages/golist_overlay.go +++ b/vendor/golang.org/x/tools/go/packages/golist_overlay.go @@ -55,7 +55,7 @@ func (state *golistState) determineRootDirsModules() (map[string]string, error) } if mod.Dir != "" && mod.Path != "" { // This is a valid module; add it to the map. - absDir, err := filepath.Abs(mod.Dir) + absDir, err := state.cfg.abs(mod.Dir) if err != nil { return nil, err } diff --git a/vendor/golang.org/x/tools/go/packages/loadmode_string.go b/vendor/golang.org/x/tools/go/packages/loadmode_string.go index 969da4c26..69eec9f44 100644 --- a/vendor/golang.org/x/tools/go/packages/loadmode_string.go +++ b/vendor/golang.org/x/tools/go/packages/loadmode_string.go @@ -27,6 +27,7 @@ var modes = [...]struct { {NeedModule, "NeedModule"}, {NeedEmbedFiles, "NeedEmbedFiles"}, {NeedEmbedPatterns, "NeedEmbedPatterns"}, + {NeedTarget, "NeedTarget"}, } func (mode LoadMode) String() string { diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 9dedf9777..060ab08ef 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -59,10 +59,10 @@ import ( // // Unfortunately there are a number of open bugs related to // interactions among the LoadMode bits: -// - https://github.com/golang/go/issues/56633 -// - https://github.com/golang/go/issues/56677 -// - https://github.com/golang/go/issues/58726 -// - https://github.com/golang/go/issues/63517 +// - https://go.dev/issue/56633 +// - https://go.dev/issue/56677 +// - https://go.dev/issue/58726 +// - https://go.dev/issue/63517 type LoadMode int const ( @@ -118,6 +118,9 @@ const ( // NeedEmbedPatterns adds EmbedPatterns. NeedEmbedPatterns + // NeedTarget adds Target. + NeedTarget + // Be sure to update loadmode_string.go when adding new items! ) @@ -138,6 +141,8 @@ const ( LoadAllSyntax = LoadSyntax | NeedDeps // Deprecated: NeedExportsFile is a historical misspelling of NeedExportFile. + // + //go:fix inline NeedExportsFile = NeedExportFile ) @@ -158,7 +163,7 @@ type Config struct { // If the user provides a logger, debug logging is enabled. // If the GOPACKAGESDEBUG environment variable is set to true, // but the logger is nil, default to log.Printf. - Logf func(format string, args ...interface{}) + Logf func(format string, args ...any) // Dir is the directory in which to run the build system's query tool // that provides information about the packages. @@ -224,14 +229,6 @@ type Config struct { // consistent package metadata about unsaved files. However, // drivers may vary in their level of support for overlays. Overlay map[string][]byte - - // -- Hidden configuration fields only for use in x/tools -- - - // modFile will be used for -modfile in go command invocations. - modFile string - - // modFlag will be used for -modfile in go command invocations. - modFlag string } // Load loads and returns the Go packages named by the given patterns. @@ -479,6 +476,10 @@ type Package struct { // information for the package as provided by the build system. ExportFile string + // Target is the absolute install path of the .a file, for libraries, + // and of the executable file, for binaries. + Target string + // Imports maps import paths appearing in the package's Go source files // to corresponding loaded Packages. Imports map[string]*Package @@ -557,15 +558,9 @@ type ModuleError struct { } func init() { - packagesinternal.GetDepsErrors = func(p interface{}) []*packagesinternal.PackageError { + packagesinternal.GetDepsErrors = func(p any) []*packagesinternal.PackageError { return p.(*Package).depsErrors } - packagesinternal.SetModFile = func(config interface{}, value string) { - config.(*Config).modFile = value - } - packagesinternal.SetModFlag = func(config interface{}, value string) { - config.(*Config).modFlag = value - } packagesinternal.TypecheckCgo = int(typecheckCgo) packagesinternal.DepsErrors = int(needInternalDepsErrors) } @@ -732,7 +727,7 @@ func newLoader(cfg *Config) *loader { if debug { ld.Config.Logf = log.Printf } else { - ld.Config.Logf = func(format string, args ...interface{}) {} + ld.Config.Logf = func(format string, args ...any) {} } } if ld.Config.Mode == 0 { diff --git a/vendor/golang.org/x/tools/go/packages/visit.go b/vendor/golang.org/x/tools/go/packages/visit.go index df14ffd94..af6a60d75 100644 --- a/vendor/golang.org/x/tools/go/packages/visit.go +++ b/vendor/golang.org/x/tools/go/packages/visit.go @@ -5,9 +5,11 @@ package packages import ( + "cmp" "fmt" + "iter" "os" - "sort" + "slices" ) // Visit visits all the packages in the import graph whose roots are @@ -16,6 +18,20 @@ import ( // package's dependencies have been visited (postorder). // The boolean result of pre(pkg) determines whether // the imports of package pkg are visited. +// +// Example: +// +// pkgs, err := Load(...) +// if err != nil { ... } +// Visit(pkgs, nil, func(pkg *Package) { +// log.Println(pkg) +// }) +// +// In most cases, it is more convenient to use [Postorder]: +// +// for pkg := range Postorder(pkgs) { +// log.Println(pkg) +// } func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) { seen := make(map[*Package]bool) var visit func(*Package) @@ -24,13 +40,8 @@ func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) { seen[pkg] = true if pre == nil || pre(pkg) { - paths := make([]string, 0, len(pkg.Imports)) - for path := range pkg.Imports { - paths = append(paths, path) - } - sort.Strings(paths) // Imports is a map, this makes visit stable - for _, path := range paths { - visit(pkg.Imports[path]) + for _, imp := range sorted(pkg.Imports) { // for determinism + visit(imp) } } @@ -50,7 +61,7 @@ func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) { func PrintErrors(pkgs []*Package) int { var n int errModules := make(map[*Module]bool) - Visit(pkgs, nil, func(pkg *Package) { + for pkg := range Postorder(pkgs) { for _, err := range pkg.Errors { fmt.Fprintln(os.Stderr, err) n++ @@ -63,6 +74,60 @@ func PrintErrors(pkgs []*Package) int { fmt.Fprintln(os.Stderr, mod.Error.Err) n++ } - }) + } return n } + +// Postorder returns an iterator over the the packages in +// the import graph whose roots are pkg. +// Packages are enumerated in dependencies-first order. +func Postorder(pkgs []*Package) iter.Seq[*Package] { + return func(yield func(*Package) bool) { + seen := make(map[*Package]bool) + var visit func(*Package) bool + visit = func(pkg *Package) bool { + if !seen[pkg] { + seen[pkg] = true + for _, imp := range sorted(pkg.Imports) { // for determinism + if !visit(imp) { + return false + } + } + if !yield(pkg) { + return false + } + } + return true + } + for _, pkg := range pkgs { + if !visit(pkg) { + break + } + } + } +} + +// -- copied from golang.org.x/tools/gopls/internal/util/moremaps -- + +// sorted returns an iterator over the entries of m in key order. +func sorted[M ~map[K]V, K cmp.Ordered, V any](m M) iter.Seq2[K, V] { + // TODO(adonovan): use maps.Sorted if proposal #68598 is accepted. + return func(yield func(K, V) bool) { + keys := keySlice(m) + slices.Sort(keys) + for _, k := range keys { + if !yield(k, m[k]) { + break + } + } + } +} + +// KeySlice returns the keys of the map M, like slices.Collect(maps.Keys(m)). +func keySlice[M ~map[K]V, K comparable, V any](m M) []K { + r := make([]K, 0, len(m)) + for k := range m { + r = append(r, k) + } + return r +} diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go index 16ed3c178..6c0c74968 100644 --- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -603,7 +603,7 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { type hasTypeParams interface { TypeParams() *types.TypeParamList } - // abstraction of *types.{Named,TypeParam} + // abstraction of *types.{Alias,Named,TypeParam} type hasObj interface { Obj() *types.TypeName } @@ -698,7 +698,10 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { } else if false && aliases.Enabled() { // The Enabled check is too expensive, so for now we // simply assume that aliases are not enabled. - // TODO(adonovan): replace with "if true {" when go1.24 is assured. + // + // Now that go1.24 is assured, we should be able to + // replace this with "if true {", but it causes tests + // to fail. TODO(adonovan): investigate. return nil, fmt.Errorf("cannot apply %q to %s (got %T, want alias)", code, t, t) } diff --git a/vendor/golang.org/x/tools/go/types/typeutil/callee.go b/vendor/golang.org/x/tools/go/types/typeutil/callee.go index 754380351..5f10f56cb 100644 --- a/vendor/golang.org/x/tools/go/types/typeutil/callee.go +++ b/vendor/golang.org/x/tools/go/types/typeutil/callee.go @@ -7,45 +7,23 @@ package typeutil import ( "go/ast" "go/types" - - "golang.org/x/tools/internal/typeparams" + _ "unsafe" // for linkname ) // Callee returns the named target of a function call, if any: // a function, method, builtin, or variable. // // Functions and methods may potentially have type parameters. +// +// Note: for calls of instantiated functions and methods, Callee returns +// the corresponding generic function or method on the generic type. func Callee(info *types.Info, call *ast.CallExpr) types.Object { - fun := ast.Unparen(call.Fun) - - // Look through type instantiation if necessary. - isInstance := false - switch fun.(type) { - case *ast.IndexExpr, *ast.IndexListExpr: - // When extracting the callee from an *IndexExpr, we need to check that - // it is a *types.Func and not a *types.Var. - // Example: Don't match a slice m within the expression `m[0]()`. - isInstance = true - fun, _, _, _ = typeparams.UnpackIndexExpr(fun) - } - - var obj types.Object - switch fun := fun.(type) { - case *ast.Ident: - obj = info.Uses[fun] // type, var, builtin, or declared func - case *ast.SelectorExpr: - if sel, ok := info.Selections[fun]; ok { - obj = sel.Obj() // method or field - } else { - obj = info.Uses[fun.Sel] // qualified identifier? - } + obj := info.Uses[usedIdent(info, call.Fun)] + if obj == nil { + return nil } if _, ok := obj.(*types.TypeName); ok { - return nil // T(x) is a conversion, not a call - } - // A Func is required to match instantiations. - if _, ok := obj.(*types.Func); isInstance && !ok { - return nil // Was not a Func. + return nil } return obj } @@ -56,13 +34,52 @@ func Callee(info *types.Info, call *ast.CallExpr) types.Object { // Note: for calls of instantiated functions and methods, StaticCallee returns // the corresponding generic function or method on the generic type. func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func { - if f, ok := Callee(info, call).(*types.Func); ok && !interfaceMethod(f) { - return f + obj := info.Uses[usedIdent(info, call.Fun)] + fn, _ := obj.(*types.Func) + if fn == nil || interfaceMethod(fn) { + return nil + } + return fn +} + +// usedIdent is the implementation of [internal/typesinternal.UsedIdent]. +// It returns the identifier associated with e. +// See typesinternal.UsedIdent for a fuller description. +// This function should live in typesinternal, but cannot because it would +// create an import cycle. +// +//go:linkname usedIdent golang.org/x/tools/go/types/typeutil.usedIdent +func usedIdent(info *types.Info, e ast.Expr) *ast.Ident { + if info.Types == nil || info.Uses == nil { + panic("one of info.Types or info.Uses is nil; both must be populated") + } + // Look through type instantiation if necessary. + switch d := ast.Unparen(e).(type) { + case *ast.IndexExpr: + if info.Types[d.Index].IsType() { + e = d.X + } + case *ast.IndexListExpr: + e = d.X + } + + switch e := ast.Unparen(e).(type) { + // info.Uses always has the object we want, even for selector expressions. + // We don't need info.Selections. + // See go/types/recording.go:recordSelection. + case *ast.Ident: + return e + case *ast.SelectorExpr: + return e.Sel } return nil } +// interfaceMethod reports whether its argument is a method of an interface. +// This function should live in typesinternal, but cannot because it would create an import cycle. +// +//go:linkname interfaceMethod golang.org/x/tools/go/types/typeutil.interfaceMethod func interfaceMethod(f *types.Func) bool { - recv := f.Type().(*types.Signature).Recv() + recv := f.Signature().Recv() return recv != nil && types.IsInterface(recv.Type()) } diff --git a/vendor/golang.org/x/tools/go/types/typeutil/map.go b/vendor/golang.org/x/tools/go/types/typeutil/map.go index 8d824f714..f035a0b6b 100644 --- a/vendor/golang.org/x/tools/go/types/typeutil/map.go +++ b/vendor/golang.org/x/tools/go/types/typeutil/map.go @@ -2,30 +2,34 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package typeutil defines various utilities for types, such as Map, -// a mapping from types.Type to any values. -package typeutil // import "golang.org/x/tools/go/types/typeutil" +// Package typeutil defines various utilities for types, such as [Map], +// a hash table that maps [types.Type] to any value. +package typeutil import ( "bytes" "fmt" "go/types" - "reflect" + "hash/maphash" "golang.org/x/tools/internal/typeparams" ) // Map is a hash-table-based mapping from types (types.Type) to -// arbitrary any values. The concrete types that implement +// arbitrary values. The concrete types that implement // the Type interface are pointers. Since they are not canonicalized, // == cannot be used to check for equivalence, and thus we cannot // simply use a Go map. // // Just as with map[K]V, a nil *Map is a valid empty map. // -// Not thread-safe. +// Read-only map operations ([Map.At], [Map.Len], and so on) may +// safely be called concurrently. +// +// TODO(adonovan): deprecate in favor of https://go.dev/issues/69420 +// and 69559, if the latter proposals for a generic hash-map type and +// a types.Hash function are accepted. type Map struct { - hasher Hasher // shared by many Maps table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused length int // number of map entries } @@ -36,35 +40,17 @@ type entry struct { value any } -// SetHasher sets the hasher used by Map. -// -// All Hashers are functionally equivalent but contain internal state -// used to cache the results of hashing previously seen types. -// -// A single Hasher created by MakeHasher() may be shared among many -// Maps. This is recommended if the instances have many keys in -// common, as it will amortize the cost of hash computation. -// -// A Hasher may grow without bound as new types are seen. Even when a -// type is deleted from the map, the Hasher never shrinks, since other -// types in the map may reference the deleted type indirectly. +// SetHasher has no effect. // -// Hashers are not thread-safe, and read-only operations such as -// Map.Lookup require updates to the hasher, so a full Mutex lock (not a -// read-lock) is require around all Map operations if a shared -// hasher is accessed from multiple threads. -// -// If SetHasher is not called, the Map will create a private hasher at -// the first call to Insert. -func (m *Map) SetHasher(hasher Hasher) { - m.hasher = hasher -} +// It is a relic of an optimization that is no longer profitable. Do +// not use [Hasher], [MakeHasher], or [SetHasher] in new code. +func (m *Map) SetHasher(Hasher) {} // Delete removes the entry with the given key, if any. // It returns true if the entry was found. func (m *Map) Delete(key types.Type) bool { if m != nil && m.table != nil { - hash := m.hasher.Hash(key) + hash := hash(key) bucket := m.table[hash] for i, e := range bucket { if e.key != nil && types.Identical(key, e.key) { @@ -83,7 +69,7 @@ func (m *Map) Delete(key types.Type) bool { // The result is nil if the entry is not present. func (m *Map) At(key types.Type) any { if m != nil && m.table != nil { - for _, e := range m.table[m.hasher.Hash(key)] { + for _, e := range m.table[hash(key)] { if e.key != nil && types.Identical(key, e.key) { return e.value } @@ -96,7 +82,7 @@ func (m *Map) At(key types.Type) any { // and returns the previous entry, if any. func (m *Map) Set(key types.Type, value any) (prev any) { if m.table != nil { - hash := m.hasher.Hash(key) + hash := hash(key) bucket := m.table[hash] var hole *entry for i, e := range bucket { @@ -115,10 +101,7 @@ func (m *Map) Set(key types.Type, value any) (prev any) { m.table[hash] = append(bucket, entry{key, value}) } } else { - if m.hasher.memo == nil { - m.hasher = MakeHasher() - } - hash := m.hasher.Hash(key) + hash := hash(key) m.table = map[uint32][]entry{hash: {entry{key, value}}} } @@ -195,53 +178,35 @@ func (m *Map) KeysString() string { return m.toString(false) } -//////////////////////////////////////////////////////////////////////// -// Hasher - -// A Hasher maps each type to its hash value. -// For efficiency, a hasher uses memoization; thus its memory -// footprint grows monotonically over time. -// Hashers are not thread-safe. -// Hashers have reference semantics. -// Call MakeHasher to create a Hasher. -type Hasher struct { - memo map[types.Type]uint32 - - // ptrMap records pointer identity. - ptrMap map[any]uint32 - - // sigTParams holds type parameters from the signature being hashed. - // Signatures are considered identical modulo renaming of type parameters, so - // within the scope of a signature type the identity of the signature's type - // parameters is just their index. - // - // Since the language does not currently support referring to uninstantiated - // generic types or functions, and instantiated signatures do not have type - // parameter lists, we should never encounter a second non-empty type - // parameter list when hashing a generic signature. - sigTParams *types.TypeParamList -} +// -- Hasher -- -// MakeHasher returns a new Hasher instance. -func MakeHasher() Hasher { - return Hasher{ - memo: make(map[types.Type]uint32), - ptrMap: make(map[any]uint32), - sigTParams: nil, - } +// hash returns the hash of type t. +// TODO(adonovan): replace by types.Hash when Go proposal #69420 is accepted. +func hash(t types.Type) uint32 { + return theHasher.Hash(t) } +// A Hasher provides a [Hasher.Hash] method to map a type to its hash value. +// Hashers are stateless, and all are equivalent. +type Hasher struct{} + +var theHasher Hasher + +// MakeHasher returns Hasher{}. +// Hashers are stateless; all are equivalent. +func MakeHasher() Hasher { return theHasher } + // Hash computes a hash value for the given type t such that // Identical(t, t') => Hash(t) == Hash(t'). func (h Hasher) Hash(t types.Type) uint32 { - hash, ok := h.memo[t] - if !ok { - hash = h.hashFor(t) - h.memo[t] = hash - } - return hash + return hasher{inGenericSig: false}.hash(t) } +// hasher holds the state of a single Hash traversal: whether we are +// inside the signature of a generic function; this is used to +// optimize [hasher.hashTypeParam]. +type hasher struct{ inGenericSig bool } + // hashString computes the Fowler–Noll–Vo hash of s. func hashString(s string) uint32 { var h uint32 @@ -252,21 +217,21 @@ func hashString(s string) uint32 { return h } -// hashFor computes the hash of t. -func (h Hasher) hashFor(t types.Type) uint32 { +// hash computes the hash of t. +func (h hasher) hash(t types.Type) uint32 { // See Identical for rationale. switch t := t.(type) { case *types.Basic: return uint32(t.Kind()) case *types.Alias: - return h.Hash(types.Unalias(t)) + return h.hash(types.Unalias(t)) case *types.Array: - return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem()) + return 9043 + 2*uint32(t.Len()) + 3*h.hash(t.Elem()) case *types.Slice: - return 9049 + 2*h.Hash(t.Elem()) + return 9049 + 2*h.hash(t.Elem()) case *types.Struct: var hash uint32 = 9059 @@ -277,12 +242,12 @@ func (h Hasher) hashFor(t types.Type) uint32 { } hash += hashString(t.Tag(i)) hash += hashString(f.Name()) // (ignore f.Pkg) - hash += h.Hash(f.Type()) + hash += h.hash(f.Type()) } return hash case *types.Pointer: - return 9067 + 2*h.Hash(t.Elem()) + return 9067 + 2*h.hash(t.Elem()) case *types.Signature: var hash uint32 = 9091 @@ -290,33 +255,14 @@ func (h Hasher) hashFor(t types.Type) uint32 { hash *= 8863 } - // Use a separate hasher for types inside of the signature, where type - // parameter identity is modified to be (index, constraint). We must use a - // new memo for this hasher as type identity may be affected by this - // masking. For example, in func[T any](*T), the identity of *T depends on - // whether we are mapping the argument in isolation, or recursively as part - // of hashing the signature. - // - // We should never encounter a generic signature while hashing another - // generic signature, but defensively set sigTParams only if h.mask is - // unset. tparams := t.TypeParams() - if h.sigTParams == nil && tparams.Len() != 0 { - h = Hasher{ - // There may be something more efficient than discarding the existing - // memo, but it would require detecting whether types are 'tainted' by - // references to type parameters. - memo: make(map[types.Type]uint32), - // Re-using ptrMap ensures that pointer identity is preserved in this - // hasher. - ptrMap: h.ptrMap, - sigTParams: tparams, - } - } + if n := tparams.Len(); n > 0 { + h.inGenericSig = true // affects constraints, params, and results - for i := 0; i < tparams.Len(); i++ { - tparam := tparams.At(i) - hash += 7 * h.Hash(tparam.Constraint()) + for i := range n { + tparam := tparams.At(i) + hash += 7 * h.hash(tparam.Constraint()) + } } return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results()) @@ -350,17 +296,17 @@ func (h Hasher) hashFor(t types.Type) uint32 { return hash case *types.Map: - return 9109 + 2*h.Hash(t.Key()) + 3*h.Hash(t.Elem()) + return 9109 + 2*h.hash(t.Key()) + 3*h.hash(t.Elem()) case *types.Chan: - return 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem()) + return 9127 + 2*uint32(t.Dir()) + 3*h.hash(t.Elem()) case *types.Named: - hash := h.hashPtr(t.Obj()) + hash := h.hashTypeName(t.Obj()) targs := t.TypeArgs() for i := 0; i < targs.Len(); i++ { targ := targs.At(i) - hash += 2 * h.Hash(targ) + hash += 2 * h.hash(targ) } return hash @@ -374,17 +320,17 @@ func (h Hasher) hashFor(t types.Type) uint32 { panic(fmt.Sprintf("%T: %v", t, t)) } -func (h Hasher) hashTuple(tuple *types.Tuple) uint32 { +func (h hasher) hashTuple(tuple *types.Tuple) uint32 { // See go/types.identicalTypes for rationale. n := tuple.Len() hash := 9137 + 2*uint32(n) - for i := 0; i < n; i++ { - hash += 3 * h.Hash(tuple.At(i).Type()) + for i := range n { + hash += 3 * h.hash(tuple.At(i).Type()) } return hash } -func (h Hasher) hashUnion(t *types.Union) uint32 { +func (h hasher) hashUnion(t *types.Union) uint32 { // Hash type restrictions. terms, err := typeparams.UnionTermSet(t) // if err != nil t has invalid type restrictions. Fall back on a non-zero @@ -395,11 +341,11 @@ func (h Hasher) hashUnion(t *types.Union) uint32 { return h.hashTermSet(terms) } -func (h Hasher) hashTermSet(terms []*types.Term) uint32 { +func (h hasher) hashTermSet(terms []*types.Term) uint32 { hash := 9157 + 2*uint32(len(terms)) for _, term := range terms { // term order is not significant. - termHash := h.Hash(term.Type()) + termHash := h.hash(term.Type()) if term.Tilde() { termHash *= 9161 } @@ -408,36 +354,33 @@ func (h Hasher) hashTermSet(terms []*types.Term) uint32 { return hash } -// hashTypeParam returns a hash of the type parameter t, with a hash value -// depending on whether t is contained in h.sigTParams. -// -// If h.sigTParams is set and contains t, then we are in the process of hashing -// a signature, and the hash value of t must depend only on t's index and -// constraint: signatures are considered identical modulo type parameter -// renaming. To avoid infinite recursion, we only hash the type parameter -// index, and rely on types.Identical to handle signatures where constraints -// are not identical. -// -// Otherwise the hash of t depends only on t's pointer identity. -func (h Hasher) hashTypeParam(t *types.TypeParam) uint32 { - if h.sigTParams != nil { - i := t.Index() - if i >= 0 && i < h.sigTParams.Len() && t == h.sigTParams.At(i) { - return 9173 + 3*uint32(i) - } +// hashTypeParam returns the hash of a type parameter. +func (h hasher) hashTypeParam(t *types.TypeParam) uint32 { + // Within the signature of a generic function, TypeParams are + // identical if they have the same index and constraint, so we + // hash them based on index. + // + // When we are outside a generic function, free TypeParams are + // identical iff they are the same object, so we can use a + // more discriminating hash consistent with object identity. + // This optimization saves [Map] about 4% when hashing all the + // types.Info.Types in the forward closure of net/http. + if !h.inGenericSig { + // Optimization: outside a generic function signature, + // use a more discrimating hash consistent with object identity. + return h.hashTypeName(t.Obj()) } - return h.hashPtr(t.Obj()) + return 9173 + 3*uint32(t.Index()) } -// hashPtr hashes the pointer identity of ptr. It uses h.ptrMap to ensure that -// pointers values are not dependent on the GC. -func (h Hasher) hashPtr(ptr any) uint32 { - if hash, ok := h.ptrMap[ptr]; ok { - return hash - } - hash := uint32(reflect.ValueOf(ptr).Pointer()) - h.ptrMap[ptr] = hash - return hash +var theSeed = maphash.MakeSeed() + +// hashTypeName hashes the pointer of tname. +func (hasher) hashTypeName(tname *types.TypeName) uint32 { + // Since types.Identical uses == to compare TypeNames, + // the Hash function uses maphash.Comparable. + hash := maphash.Comparable(theSeed, tname) + return uint32(hash ^ (hash >> 32)) } // shallowHash computes a hash of t without looking at any of its @@ -454,7 +397,7 @@ func (h Hasher) hashPtr(ptr any) uint32 { // include m itself; there is no mention of the named type X that // might help us break the cycle. // (See comment in go/types.identical, case *Interface, for more.) -func (h Hasher) shallowHash(t types.Type) uint32 { +func (h hasher) shallowHash(t types.Type) uint32 { // t is the type of an interface method (Signature), // its params or results (Tuples), or their immediate // elements (mostly Slice, Pointer, Basic, Named), @@ -475,7 +418,7 @@ func (h Hasher) shallowHash(t types.Type) uint32 { case *types.Tuple: n := t.Len() hash := 9137 + 2*uint32(n) - for i := 0; i < n; i++ { + for i := range n { hash += 53471161 * h.shallowHash(t.At(i).Type()) } return hash @@ -508,10 +451,10 @@ func (h Hasher) shallowHash(t types.Type) uint32 { return 9127 case *types.Named: - return h.hashPtr(t.Obj()) + return h.hashTypeName(t.Obj()) case *types.TypeParam: - return h.hashPtr(t.Obj()) + return h.hashTypeParam(t) } panic(fmt.Sprintf("shallowHash: %T: %v", t, t)) } diff --git a/vendor/golang.org/x/tools/imports/forward.go b/vendor/golang.org/x/tools/imports/forward.go index cb6db8893..22ae77772 100644 --- a/vendor/golang.org/x/tools/imports/forward.go +++ b/vendor/golang.org/x/tools/imports/forward.go @@ -69,9 +69,3 @@ func Process(filename string, src []byte, opt *Options) ([]byte, error) { } return intimp.Process(filename, src, intopt) } - -// VendorlessPath returns the devendorized version of the import path ipath. -// For example, VendorlessPath("foo/bar/vendor/a/b") returns "a/b". -func VendorlessPath(ipath string) string { - return intimp.VendorlessPath(ipath) -} diff --git a/vendor/golang.org/x/tools/internal/event/core/event.go b/vendor/golang.org/x/tools/internal/event/core/event.go index a6cf0e64a..ade5d1e79 100644 --- a/vendor/golang.org/x/tools/internal/event/core/event.go +++ b/vendor/golang.org/x/tools/internal/event/core/event.go @@ -28,11 +28,6 @@ type Event struct { dynamic []label.Label // dynamically sized storage for remaining labels } -// eventLabelMap implements label.Map for a the labels of an Event. -type eventLabelMap struct { - event Event -} - func (ev Event) At() time.Time { return ev.at } func (ev Event) Format(f fmt.State, r rune) { diff --git a/vendor/golang.org/x/tools/internal/event/keys/keys.go b/vendor/golang.org/x/tools/internal/event/keys/keys.go index a02206e30..4cfa51b61 100644 --- a/vendor/golang.org/x/tools/internal/event/keys/keys.go +++ b/vendor/golang.org/x/tools/internal/event/keys/keys.go @@ -32,7 +32,7 @@ func (k *Value) Format(w io.Writer, buf []byte, l label.Label) { } // Get can be used to get a label for the key from a label.Map. -func (k *Value) Get(lm label.Map) interface{} { +func (k *Value) Get(lm label.Map) any { if t := lm.Find(k); t.Valid() { return k.From(t) } @@ -40,10 +40,10 @@ func (k *Value) Get(lm label.Map) interface{} { } // From can be used to get a value from a Label. -func (k *Value) From(t label.Label) interface{} { return t.UnpackValue() } +func (k *Value) From(t label.Label) any { return t.UnpackValue() } // Of creates a new Label with this key and the supplied value. -func (k *Value) Of(value interface{}) label.Label { return label.OfValue(k, value) } +func (k *Value) Of(value any) label.Label { return label.OfValue(k, value) } // Tag represents a key for tagging labels that have no value. // These are used when the existence of the label is the entire information it diff --git a/vendor/golang.org/x/tools/internal/event/label/label.go b/vendor/golang.org/x/tools/internal/event/label/label.go index 0f526e1f9..92a391057 100644 --- a/vendor/golang.org/x/tools/internal/event/label/label.go +++ b/vendor/golang.org/x/tools/internal/event/label/label.go @@ -8,6 +8,7 @@ import ( "fmt" "io" "reflect" + "slices" "unsafe" ) @@ -32,7 +33,7 @@ type Key interface { type Label struct { key Key packed uint64 - untyped interface{} + untyped any } // Map is the interface to a collection of Labels indexed by key. @@ -76,13 +77,13 @@ type mapChain struct { // OfValue creates a new label from the key and value. // This method is for implementing new key types, label creation should // normally be done with the Of method of the key. -func OfValue(k Key, value interface{}) Label { return Label{key: k, untyped: value} } +func OfValue(k Key, value any) Label { return Label{key: k, untyped: value} } // UnpackValue assumes the label was built using LabelOfValue and returns the value // that was passed to that constructor. // This method is for implementing new key types, for type safety normal // access should be done with the From method of the key. -func (t Label) UnpackValue() interface{} { return t.untyped } +func (t Label) UnpackValue() any { return t.untyped } // Of64 creates a new label from a key and a uint64. This is often // used for non uint64 values that can be packed into a uint64. @@ -154,10 +155,8 @@ func (f *filter) Valid(index int) bool { func (f *filter) Label(index int) Label { l := f.underlying.Label(index) - for _, f := range f.keys { - if l.Key() == f { - return Label{} - } + if slices.Contains(f.keys, l.Key()) { + return Label{} } return l } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go index d79a605ed..734c46198 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go @@ -14,7 +14,7 @@ import ( "sync" ) -func errorf(format string, args ...interface{}) { +func errorf(format string, args ...any) { panic(fmt.Sprintf(format, args...)) } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go b/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go index 6f5d8a213..5662a311d 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go @@ -2,52 +2,183 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go. - -// This file implements FindExportData. +// This file should be kept in sync with $GOROOT/src/internal/exportdata/exportdata.go. +// This file also additionally implements FindExportData for gcexportdata.NewReader. package gcimporter import ( "bufio" + "bytes" + "errors" "fmt" + "go/build" "io" - "strconv" + "os" + "os/exec" + "path/filepath" "strings" + "sync" ) -func readGopackHeader(r *bufio.Reader) (name string, size int64, err error) { - // See $GOROOT/include/ar.h. - hdr := make([]byte, 16+12+6+6+8+10+2) - _, err = io.ReadFull(r, hdr) +// FindExportData positions the reader r at the beginning of the +// export data section of an underlying cmd/compile created archive +// file by reading from it. The reader must be positioned at the +// start of the file before calling this function. +// This returns the length of the export data in bytes. +// +// This function is needed by [gcexportdata.Read], which must +// accept inputs produced by the last two releases of cmd/compile, +// plus tip. +func FindExportData(r *bufio.Reader) (size int64, err error) { + arsize, err := FindPackageDefinition(r) + if err != nil { + return + } + size = int64(arsize) + + objapi, headers, err := ReadObjectHeaders(r) + if err != nil { + return + } + size -= int64(len(objapi)) + for _, h := range headers { + size -= int64(len(h)) + } + + // Check for the binary export data section header "$$B\n". + // TODO(taking): Unify with ReadExportDataHeader so that it stops at the 'u' instead of reading + line, err := r.ReadSlice('\n') if err != nil { return } - // leave for debugging - if false { - fmt.Printf("header: %s", hdr) + hdr := string(line) + if hdr != "$$B\n" { + err = fmt.Errorf("unknown export data header: %q", hdr) + return } - s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10])) - length, err := strconv.Atoi(s) - size = int64(length) - if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' { - err = fmt.Errorf("invalid archive header") + size -= int64(len(hdr)) + + // For files with a binary export data header "$$B\n", + // these are always terminated by an end-of-section marker "\n$$\n". + // So the last bytes must always be this constant. + // + // The end-of-section marker is not a part of the export data itself. + // Do not include these in size. + // + // It would be nice to have sanity check that the final bytes after + // the export data are indeed the end-of-section marker. The split + // of gcexportdata.NewReader and gcexportdata.Read make checking this + // ugly so gcimporter gives up enforcing this. The compiler and go/types + // importer do enforce this, which seems good enough. + const endofsection = "\n$$\n" + size -= int64(len(endofsection)) + + if size < 0 { + err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", arsize, size) return } - name = strings.TrimSpace(string(hdr[:16])) + return } -// FindExportData positions the reader r at the beginning of the -// export data section of an underlying cmd/compile created archive -// file by reading from it. The reader must be positioned at the -// start of the file before calling this function. -// The size result is the length of the export data in bytes. +// ReadUnified reads the contents of the unified export data from a reader r +// that contains the contents of a GC-created archive file. // -// This function is needed by [gcexportdata.Read], which must -// accept inputs produced by the last two releases of cmd/compile, -// plus tip. -func FindExportData(r *bufio.Reader) (size int64, err error) { +// On success, the reader will be positioned after the end-of-section marker "\n$$\n". +// +// Supported GC-created archive files have 4 layers of nesting: +// - An archive file containing a package definition file. +// - The package definition file contains headers followed by a data section. +// Headers are lines (≤ 4kb) that do not start with "$$". +// - The data section starts with "$$B\n" followed by export data followed +// by an end of section marker "\n$$\n". (The section start "$$\n" is no +// longer supported.) +// - The export data starts with a format byte ('u') followed by the in +// the given format. (See ReadExportDataHeader for older formats.) +// +// Putting this together, the bytes in a GC-created archive files are expected +// to look like the following. +// See cmd/internal/archive for more details on ar file headers. +// +// | \n | ar file signature +// | __.PKGDEF...size...\n | ar header for __.PKGDEF including size. +// | go object <...>\n | objabi header +// | \n | other headers such as build id +// | $$B\n | binary format marker +// | u\n | unified export +// | $$\n | end-of-section marker +// | [optional padding] | padding byte (0x0A) if size is odd +// | [ar file header] | other ar files +// | [ar file data] | +func ReadUnified(r *bufio.Reader) (data []byte, err error) { + // We historically guaranteed headers at the default buffer size (4096) work. + // This ensures we can use ReadSlice throughout. + const minBufferSize = 4096 + r = bufio.NewReaderSize(r, minBufferSize) + + size, err := FindPackageDefinition(r) + if err != nil { + return + } + n := size + + objapi, headers, err := ReadObjectHeaders(r) + if err != nil { + return + } + n -= len(objapi) + for _, h := range headers { + n -= len(h) + } + + hdrlen, err := ReadExportDataHeader(r) + if err != nil { + return + } + n -= hdrlen + + // size also includes the end of section marker. Remove that many bytes from the end. + const marker = "\n$$\n" + n -= len(marker) + + if n < 0 { + err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", size, n) + return + } + + // Read n bytes from buf. + data = make([]byte, n) + _, err = io.ReadFull(r, data) + if err != nil { + return + } + + // Check for marker at the end. + var suffix [len(marker)]byte + _, err = io.ReadFull(r, suffix[:]) + if err != nil { + return + } + if s := string(suffix[:]); s != marker { + err = fmt.Errorf("read %q instead of end-of-section marker (%q)", s, marker) + return + } + + return +} + +// FindPackageDefinition positions the reader r at the beginning of a package +// definition file ("__.PKGDEF") within a GC-created archive by reading +// from it, and returns the size of the package definition file in the archive. +// +// The reader must be positioned at the start of the archive file before calling +// this function, and "__.PKGDEF" is assumed to be the first file in the archive. +// +// See cmd/internal/archive for details on the archive format. +func FindPackageDefinition(r *bufio.Reader) (size int, err error) { + // Uses ReadSlice to limit risk of malformed inputs. + // Read first line to make sure this is an object file. line, err := r.ReadSlice('\n') if err != nil { @@ -61,56 +192,230 @@ func FindExportData(r *bufio.Reader) (size int64, err error) { return } - // Archive file. Scan to __.PKGDEF. - var name string - if name, size, err = readGopackHeader(r); err != nil { + // package export block should be first + size = readArchiveHeader(r, "__.PKGDEF") + if size <= 0 { + err = fmt.Errorf("not a package file") return } - arsize := size - // First entry should be __.PKGDEF. - if name != "__.PKGDEF" { - err = fmt.Errorf("go archive is missing __.PKGDEF") - return - } + return +} + +// ReadObjectHeaders reads object headers from the reader. Object headers are +// lines that do not start with an end-of-section marker "$$". The first header +// is the objabi header. On success, the reader will be positioned at the beginning +// of the end-of-section marker. +// +// It returns an error if any header does not fit in r.Size() bytes. +func ReadObjectHeaders(r *bufio.Reader) (objapi string, headers []string, err error) { + // line is a temporary buffer for headers. + // Use bounded reads (ReadSlice, Peek) to limit risk of malformed inputs. + var line []byte - // Read first line of __.PKGDEF data, so that line - // is once again the first line of the input. + // objapi header should be the first line if line, err = r.ReadSlice('\n'); err != nil { err = fmt.Errorf("can't find export data (%v)", err) return } - size -= int64(len(line)) + objapi = string(line) - // Now at __.PKGDEF in archive or still at beginning of file. - // Either way, line should begin with "go object ". - if !strings.HasPrefix(string(line), "go object ") { - err = fmt.Errorf("not a Go object file") + // objapi header begins with "go object ". + if !strings.HasPrefix(objapi, "go object ") { + err = fmt.Errorf("not a go object file: %s", objapi) return } - // Skip over object headers to get to the export data section header "$$B\n". - // Object headers are lines that do not start with '$'. - for line[0] != '$' { - if line, err = r.ReadSlice('\n'); err != nil { - err = fmt.Errorf("can't find export data (%v)", err) + // process remaining object header lines + for { + // check for an end of section marker "$$" + line, err = r.Peek(2) + if err != nil { + return + } + if string(line) == "$$" { + return // stop + } + + // read next header + line, err = r.ReadSlice('\n') + if err != nil { return } - size -= int64(len(line)) + headers = append(headers, string(line)) } +} - // Check for the binary export data section header "$$B\n". - hdr := string(line) - if hdr != "$$B\n" { - err = fmt.Errorf("unknown export data header: %q", hdr) +// ReadExportDataHeader reads the export data header and format from r. +// It returns the number of bytes read, or an error if the format is no longer +// supported or it failed to read. +// +// The only currently supported format is binary export data in the +// unified export format. +func ReadExportDataHeader(r *bufio.Reader) (n int, err error) { + // Read export data header. + line, err := r.ReadSlice('\n') + if err != nil { return } - // TODO(taking): Remove end-of-section marker "\n$$\n" from size. - if size < 0 { - err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", arsize, size) + hdr := string(line) + switch hdr { + case "$$\n": + err = fmt.Errorf("old textual export format no longer supported (recompile package)") + return + + case "$$B\n": + var format byte + format, err = r.ReadByte() + if err != nil { + return + } + // The unified export format starts with a 'u'. + switch format { + case 'u': + default: + // Older no longer supported export formats include: + // indexed export format which started with an 'i'; and + // the older binary export format which started with a 'c', + // 'd', or 'v' (from "version"). + err = fmt.Errorf("binary export format %q is no longer supported (recompile package)", format) + return + } + + default: + err = fmt.Errorf("unknown export data header: %q", hdr) return } + n = len(hdr) + 1 // + 1 is for 'u' return } + +// FindPkg returns the filename and unique package id for an import +// path based on package information provided by build.Import (using +// the build.Default build.Context). A relative srcDir is interpreted +// relative to the current working directory. +// +// FindPkg is only used in tests within x/tools. +func FindPkg(path, srcDir string) (filename, id string, err error) { + // TODO(taking): Move internal/exportdata.FindPkg into its own file, + // and then this copy into a _test package. + if path == "" { + return "", "", errors.New("path is empty") + } + + var noext string + switch { + default: + // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x" + // Don't require the source files to be present. + if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282 + srcDir = abs + } + var bp *build.Package + bp, err = build.Import(path, srcDir, build.FindOnly|build.AllowBinary) + if bp.PkgObj == "" { + if bp.Goroot && bp.Dir != "" { + filename, err = lookupGorootExport(bp.Dir) + if err == nil { + _, err = os.Stat(filename) + } + if err == nil { + return filename, bp.ImportPath, nil + } + } + goto notfound + } else { + noext = strings.TrimSuffix(bp.PkgObj, ".a") + } + id = bp.ImportPath + + case build.IsLocalImport(path): + // "./x" -> "/this/directory/x.ext", "/this/directory/x" + noext = filepath.Join(srcDir, path) + id = noext + + case filepath.IsAbs(path): + // for completeness only - go/build.Import + // does not support absolute imports + // "/x" -> "/x.ext", "/x" + noext = path + id = path + } + + if false { // for debugging + if path != id { + fmt.Printf("%s -> %s\n", path, id) + } + } + + // try extensions + for _, ext := range pkgExts { + filename = noext + ext + f, statErr := os.Stat(filename) + if statErr == nil && !f.IsDir() { + return filename, id, nil + } + if err == nil { + err = statErr + } + } + +notfound: + if err == nil { + return "", path, fmt.Errorf("can't find import: %q", path) + } + return "", path, fmt.Errorf("can't find import: %q: %w", path, err) +} + +var pkgExts = [...]string{".a", ".o"} // a file from the build cache will have no extension + +var exportMap sync.Map // package dir → func() (string, error) + +// lookupGorootExport returns the location of the export data +// (normally found in the build cache, but located in GOROOT/pkg +// in prior Go releases) for the package located in pkgDir. +// +// (We use the package's directory instead of its import path +// mainly to simplify handling of the packages in src/vendor +// and cmd/vendor.) +// +// lookupGorootExport is only used in tests within x/tools. +func lookupGorootExport(pkgDir string) (string, error) { + f, ok := exportMap.Load(pkgDir) + if !ok { + var ( + listOnce sync.Once + exportPath string + err error + ) + f, _ = exportMap.LoadOrStore(pkgDir, func() (string, error) { + listOnce.Do(func() { + cmd := exec.Command(filepath.Join(build.Default.GOROOT, "bin", "go"), "list", "-export", "-f", "{{.Export}}", pkgDir) + cmd.Dir = build.Default.GOROOT + cmd.Env = append(os.Environ(), "PWD="+cmd.Dir, "GOROOT="+build.Default.GOROOT) + var output []byte + output, err = cmd.Output() + if err != nil { + if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 { + err = errors.New(string(ee.Stderr)) + } + return + } + + exports := strings.Split(string(bytes.TrimSpace(output)), "\n") + if len(exports) != 1 { + err = fmt.Errorf("go list reported %d exports; expected 1", len(exports)) + return + } + + exportPath = exports[0] + }) + + return exportPath, err + }) + } + + return f.(func() (string, error))() +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go index dbbca8604..3dbd21d1b 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go @@ -23,17 +23,11 @@ package gcimporter // import "golang.org/x/tools/internal/gcimporter" import ( "bufio" - "bytes" "fmt" - "go/build" "go/token" "go/types" "io" "os" - "os/exec" - "path/filepath" - "strings" - "sync" ) const ( @@ -45,127 +39,14 @@ const ( trace = false ) -var exportMap sync.Map // package dir → func() (string, bool) - -// lookupGorootExport returns the location of the export data -// (normally found in the build cache, but located in GOROOT/pkg -// in prior Go releases) for the package located in pkgDir. -// -// (We use the package's directory instead of its import path -// mainly to simplify handling of the packages in src/vendor -// and cmd/vendor.) -func lookupGorootExport(pkgDir string) (string, bool) { - f, ok := exportMap.Load(pkgDir) - if !ok { - var ( - listOnce sync.Once - exportPath string - ) - f, _ = exportMap.LoadOrStore(pkgDir, func() (string, bool) { - listOnce.Do(func() { - cmd := exec.Command("go", "list", "-export", "-f", "{{.Export}}", pkgDir) - cmd.Dir = build.Default.GOROOT - var output []byte - output, err := cmd.Output() - if err != nil { - return - } - - exports := strings.Split(string(bytes.TrimSpace(output)), "\n") - if len(exports) != 1 { - return - } - - exportPath = exports[0] - }) - - return exportPath, exportPath != "" - }) - } - - return f.(func() (string, bool))() -} - -var pkgExts = [...]string{".a", ".o"} - -// FindPkg returns the filename and unique package id for an import -// path based on package information provided by build.Import (using -// the build.Default build.Context). A relative srcDir is interpreted -// relative to the current working directory. -// If no file was found, an empty filename is returned. -func FindPkg(path, srcDir string) (filename, id string) { - if path == "" { - return - } - - var noext string - switch { - default: - // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x" - // Don't require the source files to be present. - if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282 - srcDir = abs - } - bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary) - if bp.PkgObj == "" { - var ok bool - if bp.Goroot && bp.Dir != "" { - filename, ok = lookupGorootExport(bp.Dir) - } - if !ok { - id = path // make sure we have an id to print in error message - return - } - } else { - noext = strings.TrimSuffix(bp.PkgObj, ".a") - id = bp.ImportPath - } - - case build.IsLocalImport(path): - // "./x" -> "/this/directory/x.ext", "/this/directory/x" - noext = filepath.Join(srcDir, path) - id = noext - - case filepath.IsAbs(path): - // for completeness only - go/build.Import - // does not support absolute imports - // "/x" -> "/x.ext", "/x" - noext = path - id = path - } - - if false { // for debugging - if path != id { - fmt.Printf("%s -> %s\n", path, id) - } - } - - if filename != "" { - if f, err := os.Stat(filename); err == nil && !f.IsDir() { - return - } - } - - // try extensions - for _, ext := range pkgExts { - filename = noext + ext - if f, err := os.Stat(filename); err == nil && !f.IsDir() { - return - } - } - - filename = "" // not found - return -} - // Import imports a gc-generated package given its import path and srcDir, adds // the corresponding package object to the packages map, and returns the object. // The packages map must contain all packages already imported. // -// TODO(taking): Import is only used in tests. Move to gcimporter_test. -func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { +// Import is only used in tests. +func Import(fset *token.FileSet, packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { var rc io.ReadCloser - var filename, id string + var id string if lookup != nil { // With custom lookup specified, assume that caller has // converted path to a canonical import path for use in the map. @@ -184,12 +65,13 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func } rc = f } else { - filename, id = FindPkg(path, srcDir) + var filename string + filename, id, err = FindPkg(path, srcDir) if filename == "" { if path == "unsafe" { return types.Unsafe, nil } - return nil, fmt.Errorf("can't find import: %q", id) + return nil, err } // no need to re-import if the package was imported completely before @@ -212,54 +94,15 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func } defer rc.Close() - var size int64 buf := bufio.NewReader(rc) - if size, err = FindExportData(buf); err != nil { - return - } - - var data []byte - data, err = io.ReadAll(buf) + data, err := ReadUnified(buf) if err != nil { + err = fmt.Errorf("import %q: %v", path, err) return } - if len(data) == 0 { - return nil, fmt.Errorf("no data to load a package from for path %s", id) - } - - // TODO(gri): allow clients of go/importer to provide a FileSet. - // Or, define a new standard go/types/gcexportdata package. - fset := token.NewFileSet() - - // Select appropriate importer. - switch data[0] { - case 'v', 'c', 'd': - // binary: emitted by cmd/compile till go1.10; obsolete. - return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - case 'i': - // indexed: emitted by cmd/compile till go1.19; - // now used only for serializing go/types. - // See https://github.com/golang/go/issues/69491. - _, pkg, err := IImportData(fset, packages, data[1:], id) - return pkg, err + // unified: emitted by cmd/compile since go1.20. + _, pkg, err = UImportData(fset, packages, data, id) - case 'u': - // unified: emitted by cmd/compile since go1.20. - _, pkg, err := UImportData(fset, packages, data[1:size], id) - return pkg, err - - default: - l := len(data) - if l > 10 { - l = 10 - } - return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id) - } + return } - -type byPath []*types.Package - -func (a byPath) Len() int { return len(a) } -func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go index 7dfc31a37..4a4357d2b 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -236,6 +236,7 @@ import ( "io" "math/big" "reflect" + "slices" "sort" "strconv" "strings" @@ -271,10 +272,10 @@ import ( // file system, be sure to include a cryptographic digest of the executable in // the key to avoid version skew. // -// If the provided reportf func is non-nil, it will be used for reporting bugs -// encountered during export. -// TODO(rfindley): remove reportf when we are confident enough in the new -// objectpath encoding. +// If the provided reportf func is non-nil, it is used for reporting +// bugs (e.g. recovered panics) encountered during export, enabling us +// to obtain via telemetry the stack that would otherwise be lost by +// merely returning an error. func IExportShallow(fset *token.FileSet, pkg *types.Package, reportf ReportFunc) ([]byte, error) { // In principle this operation can only fail if out.Write fails, // but that's impossible for bytes.Buffer---and as a matter of @@ -283,7 +284,7 @@ func IExportShallow(fset *token.FileSet, pkg *types.Package, reportf ReportFunc) // TODO(adonovan): use byte slices throughout, avoiding copying. const bundle, shallow = false, true var out bytes.Buffer - err := iexportCommon(&out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg}) + err := iexportCommon(&out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg}, reportf) return out.Bytes(), err } @@ -310,7 +311,7 @@ func IImportShallow(fset *token.FileSet, getPackages GetPackagesFunc, data []byt } // ReportFunc is the type of a function used to report formatted bugs. -type ReportFunc = func(string, ...interface{}) +type ReportFunc = func(string, ...any) // Current bundled export format version. Increase with each format change. // 0: initial implementation @@ -323,20 +324,27 @@ const bundleVersion = 0 // so that calls to IImportData can override with a provided package path. func IExportData(out io.Writer, fset *token.FileSet, pkg *types.Package) error { const bundle, shallow = false, false - return iexportCommon(out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg}) + return iexportCommon(out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg}, nil) } // IExportBundle writes an indexed export bundle for pkgs to out. func IExportBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error { const bundle, shallow = true, false - return iexportCommon(out, fset, bundle, shallow, iexportVersion, pkgs) + return iexportCommon(out, fset, bundle, shallow, iexportVersion, pkgs, nil) } -func iexportCommon(out io.Writer, fset *token.FileSet, bundle, shallow bool, version int, pkgs []*types.Package) (err error) { +func iexportCommon(out io.Writer, fset *token.FileSet, bundle, shallow bool, version int, pkgs []*types.Package, reportf ReportFunc) (err error) { if !debug { defer func() { if e := recover(); e != nil { + // Report the stack via telemetry (see #71067). + if reportf != nil { + reportf("panic in exporter") + } if ierr, ok := e.(internalError); ok { + // internalError usually means we exported a + // bad go/types data structure: a violation + // of an implicit precondition of Export. err = ierr return } @@ -458,7 +466,7 @@ func (p *iexporter) encodeFile(w *intWriter, file *token.File, needed []uint64) w.uint64(size) // Sort the set of needed offsets. Duplicates are harmless. - sort.Slice(needed, func(i, j int) bool { return needed[i] < needed[j] }) + slices.Sort(needed) lines := file.Lines() // byte offset of each line start w.uint64(uint64(len(lines))) @@ -561,7 +569,6 @@ func (p *iexporter) exportName(obj types.Object) (res string) { type iexporter struct { fset *token.FileSet - out *bytes.Buffer version int shallow bool // don't put types from other packages in the index @@ -597,7 +604,7 @@ type filePositions struct { needed []uint64 // unordered list of needed file offsets } -func (p *iexporter) trace(format string, args ...interface{}) { +func (p *iexporter) trace(format string, args ...any) { if !trace { // Call sites should also be guarded, but having this check here allows // easily enabling/disabling debug trace statements. @@ -812,7 +819,7 @@ func (p *iexporter) doDecl(obj types.Object) { n := named.NumMethods() w.uint64(uint64(n)) - for i := 0; i < n; i++ { + for i := range n { m := named.Method(i) w.pos(m.Pos()) w.string(m.Name()) @@ -1089,7 +1096,7 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { w.pkg(fieldPkg) w.uint64(uint64(n)) - for i := 0; i < n; i++ { + for i := range n { f := t.Field(i) if w.p.shallow { w.objectPath(f) @@ -1138,7 +1145,7 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { w.startType(unionType) nt := t.Len() w.uint64(uint64(nt)) - for i := 0; i < nt; i++ { + for i := range nt { term := t.Term(i) w.bool(term.Tilde()) w.typ(term.Type(), pkg) @@ -1267,7 +1274,7 @@ func tparamName(exportName string) string { func (w *exportWriter) paramList(tup *types.Tuple) { n := tup.Len() w.uint64(uint64(n)) - for i := 0; i < n; i++ { + for i := range n { w.param(tup.At(i)) } } @@ -1583,6 +1590,6 @@ func (e internalError) Error() string { return "gcimporter: " + string(e) } // "internalErrorf" as the former is used for bugs, whose cause is // internal inconsistency, whereas the latter is used for ordinary // situations like bad input, whose cause is external. -func internalErrorf(format string, args ...interface{}) error { +func internalErrorf(format string, args ...any) error { return internalError(fmt.Sprintf(format, args...)) } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index e260c0e8d..82e6c9d2d 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -5,8 +5,6 @@ // Indexed package import. // See iexport.go for the export data format. -// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go. - package gcimporter import ( @@ -18,6 +16,7 @@ import ( "go/types" "io" "math/big" + "slices" "sort" "strings" @@ -316,7 +315,7 @@ func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte pkgs = pkgList[:1] // record all referenced packages as imports - list := append(([]*types.Package)(nil), pkgList[1:]...) + list := slices.Clone(pkgList[1:]) sort.Sort(byPath(list)) pkgs[0].SetImports(list) } @@ -402,7 +401,7 @@ type iimporter struct { indent int // for tracing support } -func (p *iimporter) trace(format string, args ...interface{}) { +func (p *iimporter) trace(format string, args ...any) { if !trace { // Call sites should also be guarded, but having this check here allows // easily enabling/disabling debug trace statements. @@ -673,7 +672,9 @@ func (r *importReader) obj(name string) { case varTag: typ := r.typ() - r.declare(types.NewVar(pos, r.currPkg, name, typ)) + v := types.NewVar(pos, r.currPkg, name, typ) + typesinternal.SetVarKind(v, typesinternal.PackageVar) + r.declare(v) default: errorf("unexpected tag: %v", tag) @@ -1111,3 +1112,9 @@ func (r *importReader) byte() byte { } return x } + +type byPath []*types.Package + +func (a byPath) Len() int { return len(a) } +func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go deleted file mode 100644 index 7586bfaca..000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.22 && !go1.24 - -package gcimporter - -import ( - "go/token" - "go/types" - "unsafe" -) - -// TODO(rfindley): delete this workaround once go1.24 is assured. - -func init() { - // Update markBlack so that it correctly sets the color - // of imported TypeNames. - // - // See the doc comment for markBlack for details. - - type color uint32 - const ( - white color = iota - black - grey - ) - type object struct { - _ *types.Scope - _ token.Pos - _ *types.Package - _ string - _ types.Type - _ uint32 - color_ color - _ token.Pos - } - type typeName struct { - object - } - - // If the size of types.TypeName changes, this will fail to compile. - const delta = int64(unsafe.Sizeof(typeName{})) - int64(unsafe.Sizeof(types.TypeName{})) - var _ [-delta * delta]int - - markBlack = func(obj *types.TypeName) { - type uP = unsafe.Pointer - var ptr *typeName - *(*uP)(uP(&ptr)) = uP(obj) - ptr.color_ = black - } -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support.go b/vendor/golang.org/x/tools/internal/gcimporter/support.go new file mode 100644 index 000000000..4af810dc4 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/support.go @@ -0,0 +1,30 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gcimporter + +import ( + "bufio" + "io" + "strconv" + "strings" +) + +// Copy of $GOROOT/src/cmd/internal/archive.ReadHeader. +func readArchiveHeader(b *bufio.Reader, name string) int { + // architecture-independent object file output + const HeaderSize = 60 + + var buf [HeaderSize]byte + if _, err := io.ReadFull(b, buf[:]); err != nil { + return -1 + } + aname := strings.Trim(string(buf[0:16]), " ") + if !strings.HasPrefix(aname, name) { + return -1 + } + asize := strings.Trim(string(buf[48:58]), " ") + i, _ := strconv.Atoi(asize) + return i +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go index 1db408613..37b4a39e9 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go @@ -11,10 +11,10 @@ import ( "go/token" "go/types" "sort" - "strings" "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/pkgbits" + "golang.org/x/tools/internal/typesinternal" ) // A pkgReader holds the shared state for reading a unified IR package @@ -71,7 +71,6 @@ func UImportData(fset *token.FileSet, imports map[string]*types.Package, data [] } s := string(data) - s = s[:strings.LastIndex(s, "\n$$\n")] input := pkgbits.NewPkgDecoder(path, s) pkg = readUnifiedPackage(fset, nil, imports, input) return @@ -266,7 +265,12 @@ func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types.Package { func (r *reader) doPkg() *types.Package { path := r.String() switch path { - case "": + // cmd/compile emits path="main" for main packages because + // that's the linker symbol prefix it used; but we need + // the package's path as it would be reported by go list, + // hence "main" below. + // See test at go/packages.TestMainPackagePathInModeTypes. + case "", "main": path = r.p.PkgPath() case "builtin": return nil // universe @@ -569,7 +573,8 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { sig := fn.Type().(*types.Signature) recv := types.NewVar(fn.Pos(), fn.Pkg(), "", named) - methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignature(recv, sig.Params(), sig.Results(), sig.Variadic())) + typesinternal.SetVarKind(recv, typesinternal.RecvVar) + methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignatureType(recv, nil, nil, sig.Params(), sig.Results(), sig.Variadic())) } embeds := make([]types.Type, iface.NumEmbeddeds()) @@ -616,7 +621,9 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { case pkgbits.ObjVar: pos := r.pos() typ := r.typ() - declare(types.NewVar(pos, objPkg, objName, typ)) + v := types.NewVar(pos, objPkg, objName, typ) + typesinternal.SetVarKind(v, typesinternal.PackageVar) + declare(v) } } diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go index e333efc87..58721202d 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -28,7 +28,7 @@ import ( "golang.org/x/tools/internal/event/label" ) -// An Runner will run go command invocations and serialize +// A Runner will run go command invocations and serialize // them if it sees a concurrency error. type Runner struct { // once guards the runner initialization. @@ -141,7 +141,7 @@ func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stde // Wait for all in-progress go commands to return before proceeding, // to avoid load concurrency errors. - for i := 0; i < maxInFlight; i++ { + for range maxInFlight { select { case <-ctx.Done(): return ctx.Err(), ctx.Err() @@ -179,7 +179,7 @@ type Invocation struct { CleanEnv bool Env []string WorkingDir string - Logf func(format string, args ...interface{}) + Logf func(format string, args ...any) } // Postcondition: both error results have same nilness. @@ -388,7 +388,9 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { case err := <-resChan: return err case <-timer.C: - HandleHangingGoCommand(startTime, cmd) + // HandleHangingGoCommand terminates this process. + // Pass off resChan in case we can collect the command error. + handleHangingGoCommand(startTime, cmd, resChan) case <-ctx.Done(): } } else { @@ -413,8 +415,6 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { } // Didn't shut down in response to interrupt. Kill it hard. - // TODO(rfindley): per advice from bcmills@, it may be better to send SIGQUIT - // on certain platforms, such as unix. if err := cmd.Process.Kill(); err != nil && !errors.Is(err, os.ErrProcessDone) && debug { log.Printf("error killing the Go command: %v", err) } @@ -422,15 +422,17 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { return <-resChan } -func HandleHangingGoCommand(start time.Time, cmd *exec.Cmd) { +// handleHangingGoCommand outputs debugging information to help diagnose the +// cause of a hanging Go command, and then exits with log.Fatalf. +func handleHangingGoCommand(start time.Time, cmd *exec.Cmd, resChan chan error) { switch runtime.GOOS { - case "linux", "darwin", "freebsd", "netbsd": + case "linux", "darwin", "freebsd", "netbsd", "openbsd": fmt.Fprintln(os.Stderr, `DETECTED A HANGING GO COMMAND -The gopls test runner has detected a hanging go command. In order to debug -this, the output of ps and lsof/fstat is printed below. + The gopls test runner has detected a hanging go command. In order to debug + this, the output of ps and lsof/fstat is printed below. -See golang/go#54461 for more details.`) + See golang/go#54461 for more details.`) fmt.Fprintln(os.Stderr, "\nps axo ppid,pid,command:") fmt.Fprintln(os.Stderr, "-------------------------") @@ -438,7 +440,7 @@ See golang/go#54461 for more details.`) psCmd.Stdout = os.Stderr psCmd.Stderr = os.Stderr if err := psCmd.Run(); err != nil { - panic(fmt.Sprintf("running ps: %v", err)) + log.Printf("Handling hanging Go command: running ps: %v", err) } listFiles := "lsof" @@ -452,10 +454,24 @@ See golang/go#54461 for more details.`) listFilesCmd.Stdout = os.Stderr listFilesCmd.Stderr = os.Stderr if err := listFilesCmd.Run(); err != nil { - panic(fmt.Sprintf("running %s: %v", listFiles, err)) + log.Printf("Handling hanging Go command: running %s: %v", listFiles, err) + } + // Try to extract information about the slow go process by issuing a SIGQUIT. + if err := cmd.Process.Signal(sigStuckProcess); err == nil { + select { + case err := <-resChan: + stderr := "not a bytes.Buffer" + if buf, _ := cmd.Stderr.(*bytes.Buffer); buf != nil { + stderr = buf.String() + } + log.Printf("Quit hanging go command:\n\terr:%v\n\tstderr:\n%v\n\n", err, stderr) + case <-time.After(5 * time.Second): + } + } else { + log.Printf("Sending signal %d to hanging go command: %v", sigStuckProcess, err) } } - panic(fmt.Sprintf("detected hanging go command (golang/go#54461); waited %s\n\tcommand:%s\n\tpid:%d", time.Since(start), cmd, cmd.Process.Pid)) + log.Fatalf("detected hanging go command (golang/go#54461); waited %s\n\tcommand:%s\n\tpid:%d", time.Since(start), cmd, cmd.Process.Pid) } func cmdDebugStr(cmd *exec.Cmd) string { diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke_notunix.go b/vendor/golang.org/x/tools/internal/gocommand/invoke_notunix.go new file mode 100644 index 000000000..469c648e4 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke_notunix.go @@ -0,0 +1,13 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !unix + +package gocommand + +import "os" + +// sigStuckProcess is the signal to send to kill a hanging subprocess. +// On Unix we send SIGQUIT, but on non-Unix we only have os.Kill. +var sigStuckProcess = os.Kill diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke_unix.go b/vendor/golang.org/x/tools/internal/gocommand/invoke_unix.go new file mode 100644 index 000000000..169d37c8e --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke_unix.go @@ -0,0 +1,13 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix + +package gocommand + +import "syscall" + +// Sigstuckprocess is the signal to send to kill a hanging subprocess. +// Send SIGQUIT to get a stack trace. +var sigStuckProcess = syscall.SIGQUIT diff --git a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go index 836151551..5252144d0 100644 --- a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go +++ b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go @@ -14,6 +14,7 @@ import ( "os" "path/filepath" "runtime" + "slices" "strings" "sync" "time" @@ -22,7 +23,7 @@ import ( // Options controls the behavior of a Walk call. type Options struct { // If Logf is non-nil, debug logging is enabled through this function. - Logf func(format string, args ...interface{}) + Logf func(format string, args ...any) // Search module caches. Also disables legacy goimports ignore rules. ModulesEnabled bool @@ -81,7 +82,7 @@ func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root // walkDir creates a walker and starts fastwalk with this walker. func walkDir(root Root, add func(Root, string), skip func(root Root, dir string) bool, opts Options) { if opts.Logf == nil { - opts.Logf = func(format string, args ...interface{}) {} + opts.Logf = func(format string, args ...any) {} } if _, err := os.Stat(root.Path); os.IsNotExist(err) { opts.Logf("skipping nonexistent directory: %v", root.Path) @@ -195,10 +196,8 @@ func (w *walker) getIgnoredDirs(path string) []string { // shouldSkipDir reports whether the file should be skipped or not. func (w *walker) shouldSkipDir(dir string) bool { - for _, ignoredDir := range w.ignoredDirs { - if dir == ignoredDir { - return true - } + if slices.Contains(w.ignoredDirs, dir) { + return true } if w.skip != nil { // Check with the user specified callback. diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go index 5ae576977..1b4dc0cb5 100644 --- a/vendor/golang.org/x/tools/internal/imports/fix.go +++ b/vendor/golang.org/x/tools/internal/imports/fix.go @@ -16,6 +16,7 @@ import ( "go/types" "io/fs" "io/ioutil" + "maps" "os" "path" "path/filepath" @@ -41,7 +42,7 @@ var importToGroup = []func(localPrefix, importPath string) (num int, ok bool){ if localPrefix == "" { return } - for _, p := range strings.Split(localPrefix, ",") { + for p := range strings.SplitSeq(localPrefix, ",") { if strings.HasPrefix(importPath, p) || strings.TrimSuffix(p, "/") == importPath { return 3, true } @@ -289,8 +290,8 @@ func (p *pass) loadPackageNames(ctx context.Context, imports []*ImportInfo) erro return nil } -// if there is a trailing major version, remove it -func withoutVersion(nm string) string { +// WithoutVersion removes a trailing major version, if there is one. +func WithoutVersion(nm string) string { if v := path.Base(nm); len(v) > 0 && v[0] == 'v' { if _, err := strconv.Atoi(v[1:]); err == nil { // this is, for instance, called with rand/v2 and returns rand @@ -312,7 +313,7 @@ func (p *pass) importIdentifier(imp *ImportInfo) string { } known := p.knownPackages[imp.ImportPath] if known != nil && known.Name != "" { - return withoutVersion(known.Name) + return WithoutVersion(known.Name) } return ImportPathToAssumedName(imp.ImportPath) } @@ -559,7 +560,7 @@ func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *P return err } apply(fset, f, fixes) - return err + return nil } // getFixes gets the import fixes that need to be made to f in order to fix the imports. @@ -585,7 +586,7 @@ func getFixesWithSource(ctx context.Context, fset *token.FileSet, f *ast.File, f srcDir := filepath.Dir(abs) if logf != nil { - logf("fixImports(filename=%q), srcDir=%q ...", filename, abs, srcDir) + logf("fixImports(filename=%q), srcDir=%q ...", filename, srcDir) } // First pass: looking only at f, and using the naive algorithm to @@ -780,7 +781,7 @@ func GetAllCandidates(ctx context.Context, wrapped func(ImportFix), searchPrefix return true }, dirFound: func(pkg *pkg) bool { - if !canUse(filename, pkg.dir) { + if !CanUse(filename, pkg.dir) { return false } // Try the assumed package name first, then a simpler path match @@ -815,7 +816,7 @@ func GetImportPaths(ctx context.Context, wrapped func(ImportFix), searchPrefix, return true }, dirFound: func(pkg *pkg) bool { - if !canUse(filename, pkg.dir) { + if !CanUse(filename, pkg.dir) { return false } return strings.HasPrefix(pkg.importPathShort, searchPrefix) @@ -927,7 +928,7 @@ type ProcessEnv struct { WorkingDir string // If Logf is non-nil, debug logging is enabled through this function. - Logf func(format string, args ...interface{}) + Logf func(format string, args ...any) // If set, ModCache holds a shared cache of directory info to use across // multiple ProcessEnvs. @@ -968,9 +969,7 @@ func (e *ProcessEnv) CopyConfig() *ProcessEnv { resolver: nil, Env: map[string]string{}, } - for k, v := range e.Env { - copy.Env[k] = v - } + maps.Copy(copy.Env, e.Env) return copy } @@ -1003,9 +1002,7 @@ func (e *ProcessEnv) init() error { if err := json.Unmarshal(stdout.Bytes(), &goEnv); err != nil { return err } - for k, v := range goEnv { - e.Env[k] = v - } + maps.Copy(e.Env, goEnv) e.initialized = true return nil } @@ -1030,7 +1027,7 @@ func (e *ProcessEnv) GetResolver() (Resolver, error) { // // For gopls, we can optionally explicitly choose a resolver type, since we // already know the view type. - if len(e.Env["GOMOD"]) == 0 && len(e.Env["GOWORK"]) == 0 { + if e.Env["GOMOD"] == "" && (e.Env["GOWORK"] == "" || e.Env["GOWORK"] == "off") { e.resolver = newGopathResolver(e) e.logf("created gopath resolver") } else if r, err := newModuleResolver(e, e.ModCache); err != nil { @@ -1132,6 +1129,9 @@ func addStdlibCandidates(pass *pass, refs References) error { // but we have no way of figuring out what the user is using // TODO: investigate using the toolchain version to disambiguate in the stdlib add("math/rand/v2") + // math/rand has an overlapping API + // TestIssue66407 fails without this + add("math/rand") continue } for importPath := range stdlib.PackageSymbols { @@ -1250,7 +1250,6 @@ func ImportPathToAssumedName(importPath string) string { // gopathResolver implements resolver for GOPATH workspaces. type gopathResolver struct { env *ProcessEnv - walked bool cache *DirInfoCache scanSema chan struct{} // scanSema prevents concurrent scans. } @@ -1736,7 +1735,7 @@ func (s *symbolSearcher) searchOne(ctx context.Context, c pkgDistance, symbols m // searching for "client.New") func pkgIsCandidate(filename string, refs References, pkg *pkg) bool { // Check "internal" and "vendor" visibility: - if !canUse(filename, pkg.dir) { + if !CanUse(filename, pkg.dir) { return false } @@ -1759,9 +1758,9 @@ func pkgIsCandidate(filename string, refs References, pkg *pkg) bool { return false } -// canUse reports whether the package in dir is usable from filename, +// CanUse reports whether the package in dir is usable from filename, // respecting the Go "internal" and "vendor" visibility rules. -func canUse(filename, dir string) bool { +func CanUse(filename, dir string) bool { // Fast path check, before any allocations. If it doesn't contain vendor // or internal, it's not tricky: // Note that this can false-negative on directories like "notinternal", diff --git a/vendor/golang.org/x/tools/internal/imports/imports.go b/vendor/golang.org/x/tools/internal/imports/imports.go index 2215a1288..b5f5218b5 100644 --- a/vendor/golang.org/x/tools/internal/imports/imports.go +++ b/vendor/golang.org/x/tools/internal/imports/imports.go @@ -93,7 +93,7 @@ func FixImports(ctx context.Context, filename string, src []byte, goroot string, // env is needed. func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, extraMode parser.Mode) (formatted []byte, err error) { // Don't use parse() -- we don't care about fragments or statement lists - // here, and we need to work with unparseable files. + // here, and we need to work with unparsable files. fileSet := token.NewFileSet() parserMode := parser.SkipObjectResolution if opt.Comments { diff --git a/vendor/golang.org/x/tools/internal/imports/mod.go b/vendor/golang.org/x/tools/internal/imports/mod.go index 8555e3f83..df94ec818 100644 --- a/vendor/golang.org/x/tools/internal/imports/mod.go +++ b/vendor/golang.org/x/tools/internal/imports/mod.go @@ -13,6 +13,7 @@ import ( "path" "path/filepath" "regexp" + "slices" "sort" "strconv" "strings" @@ -150,8 +151,8 @@ func newModuleResolver(e *ProcessEnv, moduleCacheCache *DirInfoCache) (*ModuleRe Path: "", Dir: filepath.Join(filepath.Dir(goWork), "vendor"), } - r.modsByModPath = append(append([]*gocommand.ModuleJSON{}, mainModsVendor...), r.dummyVendorMod) - r.modsByDir = append(append([]*gocommand.ModuleJSON{}, mainModsVendor...), r.dummyVendorMod) + r.modsByModPath = append(slices.Clone(mainModsVendor), r.dummyVendorMod) + r.modsByDir = append(slices.Clone(mainModsVendor), r.dummyVendorMod) } } else { // Vendor mode is off, so run go list -m ... to find everything. diff --git a/vendor/golang.org/x/tools/internal/imports/mod_cache.go b/vendor/golang.org/x/tools/internal/imports/mod_cache.go index b1192696b..b96c9d4bf 100644 --- a/vendor/golang.org/x/tools/internal/imports/mod_cache.go +++ b/vendor/golang.org/x/tools/internal/imports/mod_cache.go @@ -128,7 +128,7 @@ func (d *DirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener // are going to be. Setting an arbitrary limit makes it much easier. const maxInFlight = 10 sema := make(chan struct{}, maxInFlight) - for i := 0; i < maxInFlight; i++ { + for range maxInFlight { sema <- struct{}{} } @@ -156,7 +156,7 @@ func (d *DirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener d.mu.Lock() delete(d.listeners, cookie) d.mu.Unlock() - for i := 0; i < maxInFlight; i++ { + for range maxInFlight { <-sema } } diff --git a/vendor/golang.org/x/tools/internal/imports/sortimports.go b/vendor/golang.org/x/tools/internal/imports/sortimports.go index da8194fd9..67c17bc43 100644 --- a/vendor/golang.org/x/tools/internal/imports/sortimports.go +++ b/vendor/golang.org/x/tools/internal/imports/sortimports.go @@ -11,6 +11,7 @@ import ( "go/ast" "go/token" "log" + "slices" "sort" "strconv" ) @@ -30,7 +31,7 @@ func sortImports(localPrefix string, tokFile *token.File, f *ast.File) { if len(d.Specs) == 0 { // Empty import block, remove it. - f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) + f.Decls = slices.Delete(f.Decls, i, i+1) } if !d.Lparen.IsValid() { @@ -91,7 +92,7 @@ func mergeImports(f *ast.File) { spec.(*ast.ImportSpec).Path.ValuePos = first.Pos() first.Specs = append(first.Specs, spec) } - f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) + f.Decls = slices.Delete(f.Decls, i, i+1) i-- } } diff --git a/vendor/golang.org/x/tools/internal/imports/source_env.go b/vendor/golang.org/x/tools/internal/imports/source_env.go index d14abaa31..ec996c3cc 100644 --- a/vendor/golang.org/x/tools/internal/imports/source_env.go +++ b/vendor/golang.org/x/tools/internal/imports/source_env.go @@ -67,7 +67,7 @@ func (s *ProcessEnvSource) ResolveReferences(ctx context.Context, filename strin // same package name. Don't try to import ourselves. return false } - if !canUse(filename, pkg.dir) { + if !CanUse(filename, pkg.dir) { return false } mu.Lock() diff --git a/vendor/golang.org/x/tools/internal/imports/source_modindex.go b/vendor/golang.org/x/tools/internal/imports/source_modindex.go index 05229f06c..ca745d4a1 100644 --- a/vendor/golang.org/x/tools/internal/imports/source_modindex.go +++ b/vendor/golang.org/x/tools/internal/imports/source_modindex.go @@ -15,6 +15,10 @@ import ( // This code is here rather than in the modindex package // to avoid import loops +// TODO(adonovan): this code is only used by a test in this package. +// Can we delete it? Or is there a plan to call NewIndexSource from +// cmd/goimports? + // implements Source using modindex, so only for module cache. // // this is perhaps over-engineered. A new Index is read at first use. @@ -22,8 +26,8 @@ import ( // is read if the index changed. It is not clear the Mutex is needed. type IndexSource struct { modcachedir string - mutex sync.Mutex - ix *modindex.Index + mu sync.Mutex + index *modindex.Index // (access via getIndex) expires time.Time } @@ -39,13 +43,14 @@ func (s *IndexSource) LoadPackageNames(ctx context.Context, srcDir string, paths } func (s *IndexSource) ResolveReferences(ctx context.Context, filename string, missing References) ([]*Result, error) { - if err := s.maybeReadIndex(); err != nil { + index, err := s.getIndex() + if err != nil { return nil, err } var cs []modindex.Candidate for pkg, nms := range missing { for nm := range nms { - x := s.ix.Lookup(pkg, nm, false) + x := index.Lookup(pkg, nm, false) cs = append(cs, x...) } } @@ -74,30 +79,22 @@ func (s *IndexSource) ResolveReferences(ctx context.Context, filename string, mi return ans, nil } -func (s *IndexSource) maybeReadIndex() error { - s.mutex.Lock() - defer s.mutex.Unlock() - - var readIndex bool - if time.Now().After(s.expires) { - ok, err := modindex.Update(s.modcachedir) - if err != nil { - return err - } - if ok { - readIndex = true - } - } +func (s *IndexSource) getIndex() (*modindex.Index, error) { + s.mu.Lock() + defer s.mu.Unlock() - if readIndex || s.ix == nil { - ix, err := modindex.ReadIndex(s.modcachedir) + // (s.index = nil => s.expires is zero, + // so the first condition is strictly redundant. + // But it makes the postcondition very clear.) + if s.index == nil || time.Now().After(s.expires) { + index, err := modindex.Update(s.modcachedir) if err != nil { - return err + return nil, err } - s.ix = ix - // for now refresh every 15 minutes - s.expires = time.Now().Add(time.Minute * 15) + s.index = index + s.expires = index.ValidAt.Add(15 * time.Minute) // (refresh period) } + // Inv: s.index != nil - return nil + return s.index, nil } diff --git a/vendor/golang.org/x/tools/internal/modindex/directories.go b/vendor/golang.org/x/tools/internal/modindex/directories.go index 1e1a02f23..9a963744b 100644 --- a/vendor/golang.org/x/tools/internal/modindex/directories.go +++ b/vendor/golang.org/x/tools/internal/modindex/directories.go @@ -10,7 +10,6 @@ import ( "os" "path/filepath" "regexp" - "slices" "strings" "sync" "time" @@ -20,50 +19,48 @@ import ( ) type directory struct { - path Relpath + path string // relative to GOMODCACHE importPath string version string // semantic version - syms []symbol } -// filterDirs groups the directories by import path, -// sorting the ones with the same import path by semantic version, -// most recent first. -func byImportPath(dirs []Relpath) (map[string][]*directory, error) { - ans := make(map[string][]*directory) // key is import path - for _, d := range dirs { - ip, sv, err := DirToImportPathVersion(d) +// bestDirByImportPath returns the best directory for each import +// path, where "best" means most recent semantic version. These import +// paths are inferred from the GOMODCACHE-relative dir names in dirs. +func bestDirByImportPath(dirs []string) (map[string]directory, error) { + dirsByPath := make(map[string]directory) + for _, dir := range dirs { + importPath, version, err := dirToImportPathVersion(dir) if err != nil { return nil, err } - ans[ip] = append(ans[ip], &directory{ - path: d, - importPath: ip, - version: sv, - }) - } - for k, v := range ans { - semanticSort(v) - ans[k] = v + new := directory{ + path: dir, + importPath: importPath, + version: version, + } + if old, ok := dirsByPath[importPath]; !ok || compareDirectory(new, old) < 0 { + dirsByPath[importPath] = new + } } - return ans, nil + return dirsByPath, nil } -// sort the directories by semantic version, latest first -func semanticSort(v []*directory) { - slices.SortFunc(v, func(l, r *directory) int { - if n := semver.Compare(l.version, r.version); n != 0 { - return -n // latest first - } - return strings.Compare(string(l.path), string(r.path)) - }) +// compareDirectory defines an ordering of path@version directories, +// by descending version, then by ascending path. +func compareDirectory(x, y directory) int { + if sign := -semver.Compare(x.version, y.version); sign != 0 { + return sign // latest first + } + return strings.Compare(string(x.path), string(y.path)) } // modCacheRegexp splits a relpathpath into module, module version, and package. var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`) -// DirToImportPathVersion computes import path and semantic version -func DirToImportPathVersion(dir Relpath) (string, string, error) { +// dirToImportPathVersion computes import path and semantic version +// from a GOMODCACHE-relative directory name. +func dirToImportPathVersion(dir string) (string, string, error) { m := modCacheRegexp.FindStringSubmatch(string(dir)) // m[1] is the module path // m[2] is the version major.minor.patch(-
     1 && flds[1][1] == 'D',
     			}
    -			if flds[1] == "F" {
    +			if px.Type == Func {
     				n, err := strconv.Atoi(flds[2])
     				if err != nil {
     					continue // should never happen
    @@ -88,7 +120,7 @@ func (ix *Index) Lookup(pkg, name string, prefix bool) []Candidate {
     				px.Results = int16(n)
     				if len(flds) >= 4 {
     					sig := strings.Split(flds[3], " ")
    -					for i := 0; i < len(sig); i++ {
    +					for i := range sig {
     						// $ cannot otherwise occur. removing the spaces
     						// almost works, but for chan struct{}, e.g.
     						sig[i] = strings.Replace(sig[i], "$", " ", -1)
    @@ -104,13 +136,14 @@ func (ix *Index) Lookup(pkg, name string, prefix bool) []Candidate {
     
     func toFields(sig []string) []Field {
     	ans := make([]Field, len(sig)/2)
    -	for i := 0; i < len(ans); i++ {
    +	for i := range ans {
     		ans[i] = Field{Arg: sig[2*i], Type: sig[2*i+1]}
     	}
     	return ans
     }
     
     // benchmarks show this is measurably better than strings.Split
    +// split into first 4 fields separated by single space
     func fastSplit(x string) []string {
     	ans := make([]string, 0, 4)
     	nxt := 0
    diff --git a/vendor/golang.org/x/tools/internal/modindex/modindex.go b/vendor/golang.org/x/tools/internal/modindex/modindex.go
    index 355a53e71..5fa285d98 100644
    --- a/vendor/golang.org/x/tools/internal/modindex/modindex.go
    +++ b/vendor/golang.org/x/tools/internal/modindex/modindex.go
    @@ -2,17 +2,21 @@
     // Use of this source code is governed by a BSD-style
     // license that can be found in the LICENSE file.
     
    -// Package modindex contains code for building and searching an index to
    -// the Go module cache. The directory containing the index, returned by
    -// IndexDir(), contains a file index-name- that contains the name
    +// Package modindex contains code for building and searching an
    +// [Index] of the Go module cache.
    +package modindex
    +
    +// The directory containing the index, returned by
    +// [IndexDir], contains a file index-name- that contains the name
     // of the current index. We believe writing that short file is atomic.
    -// ReadIndex reads that file to get the file name of the index.
    +// [Read] reads that file to get the file name of the index.
     // WriteIndex writes an index with a unique name and then
     // writes that name into a new version of index-name-.
     // ( stands for the CurrentVersion of the index format.)
    -package modindex
     
     import (
    +	"maps"
    +	"os"
     	"path/filepath"
     	"slices"
     	"strings"
    @@ -21,144 +25,95 @@ import (
     	"golang.org/x/mod/semver"
     )
     
    -// Create always creates a new index for the go module cache that is in cachedir.
    -func Create(cachedir string) error {
    -	_, err := indexModCache(cachedir, true)
    -	return err
    -}
    -
    -// Update the index for the go module cache that is in cachedir,
    -// If there is no existing index it will build one.
    -// If there are changed directories since the last index, it will
    -// write a new one and return true. Otherwise it returns false.
    -func Update(cachedir string) (bool, error) {
    -	return indexModCache(cachedir, false)
    +// Update updates the index for the specified Go
    +// module cache directory, creating it as needed.
    +// On success it returns the current index.
    +func Update(gomodcache string) (*Index, error) {
    +	prev, err := Read(gomodcache)
    +	if err != nil {
    +		if !os.IsNotExist(err) {
    +			return nil, err
    +		}
    +		prev = nil
    +	}
    +	return update(gomodcache, prev)
     }
     
    -// indexModCache writes an index current as of when it is called.
    -// If clear is true the index is constructed from all of GOMODCACHE
    -// otherwise the index is constructed from the last previous index
    -// and the updates to the cache. It returns true if it wrote an index,
    -// false otherwise.
    -func indexModCache(cachedir string, clear bool) (bool, error) {
    -	cachedir, err := filepath.Abs(cachedir)
    +// update builds, writes, and returns the current index.
    +//
    +// If old is nil, the new index is built from all of GOMODCACHE;
    +// otherwise it is built from the old index plus cache updates
    +// since the previous index's time.
    +func update(gomodcache string, old *Index) (*Index, error) {
    +	gomodcache, err := filepath.Abs(gomodcache)
     	if err != nil {
    -		return false, err
    +		return nil, err
     	}
    -	cd := Abspath(cachedir)
    -	future := time.Now().Add(24 * time.Hour) // safely in the future
    -	ok, err := modindexTimed(future, cd, clear)
    +	new, changed, err := build(gomodcache, old)
     	if err != nil {
    -		return false, err
    +		return nil, err
     	}
    -	return ok, nil
    -}
    -
    -// modindexTimed writes an index current as of onlyBefore.
    -// If clear is true the index is constructed from all of GOMODCACHE
    -// otherwise the index is constructed from the last previous index
    -// and all the updates to the cache before onlyBefore.
    -// It returns true if it wrote a new index, false if it wrote nothing.
    -func modindexTimed(onlyBefore time.Time, cachedir Abspath, clear bool) (bool, error) {
    -	var curIndex *Index
    -	if !clear {
    -		var err error
    -		curIndex, err = ReadIndex(string(cachedir))
    -		if clear && err != nil {
    -			return false, err
    +	if old == nil || changed {
    +		if err := write(gomodcache, new); err != nil {
    +			return nil, err
     		}
    -		// TODO(pjw): check that most of those directories still exist
    -	}
    -	cfg := &work{
    -		onlyBefore: onlyBefore,
    -		oldIndex:   curIndex,
    -		cacheDir:   cachedir,
    -	}
    -	if curIndex != nil {
    -		cfg.onlyAfter = curIndex.Changed
    -	}
    -	if err := cfg.buildIndex(); err != nil {
    -		return false, err
     	}
    -	if len(cfg.newIndex.Entries) == 0 && curIndex != nil {
    -		// no changes from existing curIndex, don't write a new index
    -		return false, nil
    -	}
    -	if err := cfg.writeIndex(); err != nil {
    -		return false, err
    -	}
    -	return true, nil
    -}
    -
    -type work struct {
    -	onlyBefore time.Time // do not use directories later than this
    -	onlyAfter  time.Time // only interested in directories after this
    -	// directories from before onlyAfter come from oldIndex
    -	oldIndex *Index
    -	newIndex *Index
    -	cacheDir Abspath
    +	return new, nil
     }
     
    -func (w *work) buildIndex() error {
    -	// The effective date of the new index should be at least
    -	// slightly earlier than when the directories are scanned
    -	// so set it now.
    -	w.newIndex = &Index{Changed: time.Now(), Cachedir: w.cacheDir}
    -	dirs := findDirs(string(w.cacheDir), w.onlyAfter, w.onlyBefore)
    -	if len(dirs) == 0 {
    -		return nil
    +// build returns a new index for the specified Go module cache (an
    +// absolute path).
    +//
    +// If an old index is provided, only directories more recent than it
    +// that it are scanned; older directories are provided by the old
    +// Index.
    +//
    +// The boolean result indicates whether new entries were found.
    +func build(gomodcache string, old *Index) (*Index, bool, error) {
    +	// Set the time window.
    +	var start time.Time // = dawn of time
    +	if old != nil {
    +		start = old.ValidAt
     	}
    -	newdirs, err := byImportPath(dirs)
    +	now := time.Now()
    +	end := now.Add(24 * time.Hour) // safely in the future
    +
    +	// Enumerate GOMODCACHE package directories.
    +	// Choose the best (latest) package for each import path.
    +	pkgDirs := findDirs(gomodcache, start, end)
    +	dirByPath, err := bestDirByImportPath(pkgDirs)
     	if err != nil {
    -		return err
    +		return nil, false, err
     	}
    -	// for each import path it might occur only in newdirs,
    -	// only in w.oldIndex, or in both.
    -	// If it occurs in both, use the semantically later one
    -	if w.oldIndex != nil {
    -		for _, e := range w.oldIndex.Entries {
    -			found, ok := newdirs[e.ImportPath]
    -			if !ok {
    -				w.newIndex.Entries = append(w.newIndex.Entries, e)
    -				continue // use this one, there is no new one
    -			}
    -			if semver.Compare(found[0].version, e.Version) > 0 {
    -				// use the new one
    -			} else {
    -				// use the old one, forget the new one
    -				w.newIndex.Entries = append(w.newIndex.Entries, e)
    -				delete(newdirs, e.ImportPath)
    +
    +	// For each import path it might occur only in
    +	// dirByPath, only in old, or in both.
    +	// If both, use the semantically later one.
    +	var entries []Entry
    +	if old != nil {
    +		for _, entry := range old.Entries {
    +			dir, ok := dirByPath[entry.ImportPath]
    +			if !ok || semver.Compare(dir.version, entry.Version) <= 0 {
    +				// New dir is missing or not more recent; use old entry.
    +				entries = append(entries, entry)
    +				delete(dirByPath, entry.ImportPath)
     			}
     		}
     	}
    -	// get symbol information for all the new diredtories
    -	getSymbols(w.cacheDir, newdirs)
    -	// assemble the new index entries
    -	for k, v := range newdirs {
    -		d := v[0]
    -		pkg, names := processSyms(d.syms)
    -		if pkg == "" {
    -			continue // PJW: does this ever happen?
    -		}
    -		entry := Entry{
    -			PkgName:    pkg,
    -			Dir:        d.path,
    -			ImportPath: k,
    -			Version:    d.version,
    -			Names:      names,
    -		}
    -		w.newIndex.Entries = append(w.newIndex.Entries, entry)
    -	}
    -	// sort the entries in the new index
    -	slices.SortFunc(w.newIndex.Entries, func(l, r Entry) int {
    -		if n := strings.Compare(l.PkgName, r.PkgName); n != 0 {
    +
    +	// Extract symbol information for all the new directories.
    +	newEntries := extractSymbols(gomodcache, maps.Values(dirByPath))
    +	entries = append(entries, newEntries...)
    +	slices.SortFunc(entries, func(x, y Entry) int {
    +		if n := strings.Compare(x.PkgName, y.PkgName); n != 0 {
     			return n
     		}
    -		return strings.Compare(l.ImportPath, r.ImportPath)
    +		return strings.Compare(x.ImportPath, y.ImportPath)
     	})
    -	return nil
    -}
     
    -func (w *work) writeIndex() error {
    -	return writeIndex(w.cacheDir, w.newIndex)
    +	return &Index{
    +		GOMODCACHE: gomodcache,
    +		ValidAt:    now, // time before the directories were scanned
    +		Entries:    entries,
    +	}, len(newEntries) > 0, nil
     }
    diff --git a/vendor/golang.org/x/tools/internal/modindex/symbols.go b/vendor/golang.org/x/tools/internal/modindex/symbols.go
    index 2e285ed99..8e9702d84 100644
    --- a/vendor/golang.org/x/tools/internal/modindex/symbols.go
    +++ b/vendor/golang.org/x/tools/internal/modindex/symbols.go
    @@ -10,63 +10,91 @@ import (
     	"go/parser"
     	"go/token"
     	"go/types"
    +	"iter"
     	"os"
     	"path/filepath"
    +	"runtime"
     	"slices"
     	"strings"
    +	"sync"
     
     	"golang.org/x/sync/errgroup"
     )
     
     // The name of a symbol contains information about the symbol:
    -//  T for types
    -//  C for consts
    -//  V for vars
    +//  T for types, TD if the type is deprecated
    +//  C for consts, CD if the const is deprecated
    +//  V for vars, VD if the var is deprecated
     // and for funcs:  F  ( )*
     // any spaces in  are replaced by $s so that the fields
    -// of the name are space separated
    +// of the name are space separated. F is replaced by FD if the func
    +// is deprecated.
     type symbol struct {
     	pkg  string // name of the symbols's package
     	name string // declared name
    -	kind string // T, C, V, or F
    +	kind string // T, C, V, or F, followed by D if deprecated
     	sig  string // signature information, for F
     }
     
    -// find the symbols for the best directories
    -func getSymbols(cd Abspath, dirs map[string][]*directory) {
    +// extractSymbols returns a (new, unordered) array of Entries, one for
    +// each provided package directory, describing its exported symbols.
    +func extractSymbols(cwd string, dirs iter.Seq[directory]) []Entry {
    +	var (
    +		mu      sync.Mutex
    +		entries []Entry
    +	)
    +
     	var g errgroup.Group
    -	g.SetLimit(-1) // maybe throttle this some day
    -	for _, vv := range dirs {
    -		// throttling some day?
    -		d := vv[0]
    +	g.SetLimit(max(2, runtime.GOMAXPROCS(0)/2))
    +	for dir := range dirs {
     		g.Go(func() error {
    -			thedir := filepath.Join(string(cd), string(d.path))
    -			mode := parser.SkipObjectResolution
    +			thedir := filepath.Join(cwd, string(dir.path))
    +			mode := parser.SkipObjectResolution | parser.ParseComments
     
    -			fi, err := os.ReadDir(thedir)
    +			// Parse all Go files in dir and extract symbols.
    +			dirents, err := os.ReadDir(thedir)
     			if err != nil {
     				return nil // log this someday?
     			}
    -			for _, fx := range fi {
    -				if !strings.HasSuffix(fx.Name(), ".go") || strings.HasSuffix(fx.Name(), "_test.go") {
    +			var syms []symbol
    +			for _, dirent := range dirents {
    +				if !strings.HasSuffix(dirent.Name(), ".go") ||
    +					strings.HasSuffix(dirent.Name(), "_test.go") {
     					continue
     				}
    -				fname := filepath.Join(thedir, fx.Name())
    +				fname := filepath.Join(thedir, dirent.Name())
     				tr, err := parser.ParseFile(token.NewFileSet(), fname, nil, mode)
     				if err != nil {
     					continue // ignore errors, someday log them?
     				}
    -				d.syms = append(d.syms, getFileExports(tr)...)
    +				syms = append(syms, getFileExports(tr)...)
    +			}
    +
    +			// Create an entry for the package.
    +			pkg, names := processSyms(syms)
    +			if pkg != "" {
    +				mu.Lock()
    +				defer mu.Unlock()
    +				entries = append(entries, Entry{
    +					PkgName:    pkg,
    +					Dir:        dir.path,
    +					ImportPath: dir.importPath,
    +					Version:    dir.version,
    +					Names:      names,
    +				})
     			}
    +
     			return nil
     		})
     	}
    -	g.Wait()
    +	g.Wait() // ignore error
    +
    +	return entries
     }
     
     func getFileExports(f *ast.File) []symbol {
     	pkg := f.Name.Name
    -	if pkg == "main" {
    +	if pkg == "main" || pkg == "" {
     		return nil
     	}
     	var ans []symbol
    @@ -84,6 +112,9 @@ func getFileExports(f *ast.File) []symbol {
     			// generic functions just like non-generic ones.
     			sig := dtype.Params
     			kind := "F"
    +			if isDeprecated(decl.Doc) {
    +				kind += "D"
    +			}
     			result := []string{fmt.Sprintf("%d", dtype.Results.NumFields())}
     			for _, x := range sig.List {
     				// This code creates a string representing the type.
    @@ -105,9 +136,9 @@ func getFileExports(f *ast.File) []symbol {
     				// The only place a $ can occur seems to be in a struct tag, which
     				// can be an arbitrary string literal, and ExprString does not presently
     				// print struct tags. So for this to happen the type of a formal parameter
    -				// has to be a explict struct, e.g. foo(x struct{a int "$"}) and ExprString
    +				// has to be a explicit struct, e.g. foo(x struct{a int "$"}) and ExprString
     				// would have to show the struct tag. Even testing for this case seems
    -				// a waste of effort, but let's not ignore such pathologies
    +				// a waste of effort, but let's remember the possibility
     				if strings.Contains(tp, "$") {
     					continue
     				}
    @@ -127,12 +158,16 @@ func getFileExports(f *ast.File) []symbol {
     				ans = append(ans, *s)
     			}
     		case *ast.GenDecl:
    +			depr := isDeprecated(decl.Doc)
     			switch decl.Tok {
     			case token.CONST, token.VAR:
     				tp := "V"
     				if decl.Tok == token.CONST {
     					tp = "C"
     				}
    +				if depr {
    +					tp += "D"
    +				}
     				for _, sp := range decl.Specs {
     					for _, x := range sp.(*ast.ValueSpec).Names {
     						if s := newsym(pkg, x.Name, tp, ""); s != nil {
    @@ -141,8 +176,12 @@ func getFileExports(f *ast.File) []symbol {
     					}
     				}
     			case token.TYPE:
    +				tp := "T"
    +				if depr {
    +					tp += "D"
    +				}
     				for _, sp := range decl.Specs {
    -					if s := newsym(pkg, sp.(*ast.TypeSpec).Name.Name, "T", ""); s != nil {
    +					if s := newsym(pkg, sp.(*ast.TypeSpec).Name.Name, tp, ""); s != nil {
     						ans = append(ans, *s)
     					}
     				}
    @@ -160,6 +199,21 @@ func newsym(pkg, name, kind, sig string) *symbol {
     	return &sym
     }
     
    +func isDeprecated(doc *ast.CommentGroup) bool {
    +	if doc == nil {
    +		return false
    +	}
    +	// go.dev/wiki/Deprecated Paragraph starting 'Deprecated:'
    +	// This code fails for /* Deprecated: */, but it's the code from
    +	// gopls/internal/analysis/deprecated
    +	for line := range strings.SplitSeq(doc.Text(), "\n\n") {
    +		if strings.HasPrefix(line, "Deprecated:") {
    +			return true
    +		}
    +	}
    +	return false
    +}
    +
     // return the package name and the value for the symbols.
     // if there are multiple packages, choose one arbitrarily
     // the returned slice is sorted lexicographically
    @@ -173,17 +227,18 @@ func processSyms(syms []symbol) (string, []string) {
     	pkg := syms[0].pkg
     	var names []string
     	for _, s := range syms {
    +		if s.pkg != pkg {
    +			// Symbols came from two files in same dir
    +			// with different package declarations.
    +			continue
    +		}
     		var nx string
    -		if s.pkg == pkg {
    -			if s.sig != "" {
    -				nx = fmt.Sprintf("%s %s %s", s.name, s.kind, s.sig)
    -			} else {
    -				nx = fmt.Sprintf("%s %s", s.name, s.kind)
    -			}
    -			names = append(names, nx)
    +		if s.sig != "" {
    +			nx = fmt.Sprintf("%s %s %s", s.name, s.kind, s.sig)
     		} else {
    -			continue // PJW: do we want to keep track of these?
    +			nx = fmt.Sprintf("%s %s", s.name, s.kind)
     		}
    +		names = append(names, nx)
     	}
     	return pkg, names
     }
    diff --git a/vendor/golang.org/x/tools/internal/modindex/types.go b/vendor/golang.org/x/tools/internal/modindex/types.go
    deleted file mode 100644
    index ece448863..000000000
    --- a/vendor/golang.org/x/tools/internal/modindex/types.go
    +++ /dev/null
    @@ -1,25 +0,0 @@
    -// Copyright 2024 The Go Authors. All rights reserved.
    -// Use of this source code is governed by a BSD-style
    -// license that can be found in the LICENSE file.
    -
    -package modindex
    -
    -import (
    -	"strings"
    -)
    -
    -// some special types to avoid confusions
    -
    -// distinguish various types of directory names. It's easy to get confused.
    -type Abspath string // absolute paths
    -type Relpath string // paths with GOMODCACHE prefix removed
    -
    -func toRelpath(cachedir Abspath, s string) Relpath {
    -	if strings.HasPrefix(s, string(cachedir)) {
    -		if s == string(cachedir) {
    -			return Relpath("")
    -		}
    -		return Relpath(s[len(cachedir)+1:])
    -	}
    -	return Relpath(s)
    -}
    diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
    index 66e69b438..929b470be 100644
    --- a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
    +++ b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
    @@ -5,7 +5,9 @@
     // Package packagesinternal exposes internal-only fields from go/packages.
     package packagesinternal
     
    -var GetDepsErrors = func(p interface{}) []*PackageError { return nil }
    +import "fmt"
    +
    +var GetDepsErrors = func(p any) []*PackageError { return nil }
     
     type PackageError struct {
     	ImportStack []string // shortest path from package named on command line to this one
    @@ -13,8 +15,9 @@ type PackageError struct {
     	Err         string   // the error itself
     }
     
    +func (err PackageError) String() string {
    +	return fmt.Sprintf("%s: %s (import stack: %s)", err.Pos, err.Err, err.ImportStack)
    +}
    +
     var TypecheckCgo int
     var DepsErrors int // must be set as a LoadMode to call GetDepsErrors
    -
    -var SetModFlag = func(config interface{}, value string) {}
    -var SetModFile = func(config interface{}, value string) {}
    diff --git a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go
    index f6cb37c5c..c0aba26c4 100644
    --- a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go
    +++ b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go
    @@ -259,7 +259,7 @@ func (r *Decoder) rawUvarint() uint64 {
     func readUvarint(r *strings.Reader) (uint64, error) {
     	var x uint64
     	var s uint
    -	for i := 0; i < binary.MaxVarintLen64; i++ {
    +	for i := range binary.MaxVarintLen64 {
     		b, err := r.ReadByte()
     		if err != nil {
     			if i > 0 && err == io.EOF {
    diff --git a/vendor/golang.org/x/tools/internal/stdlib/deps.go b/vendor/golang.org/x/tools/internal/stdlib/deps.go
    new file mode 100644
    index 000000000..96ad6c582
    --- /dev/null
    +++ b/vendor/golang.org/x/tools/internal/stdlib/deps.go
    @@ -0,0 +1,365 @@
    +// Copyright 2025 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +// Code generated by generate.go. DO NOT EDIT.
    +
    +package stdlib
    +
    +type pkginfo struct {
    +	name string
    +	deps string // list of indices of dependencies, as varint-encoded deltas
    +}
    +
    +var deps = [...]pkginfo{
    +	{"archive/tar", "\x03k\x03E;\x01\n\x01$\x01\x01\x02\x05\b\x02\x01\x02\x02\f"},
    +	{"archive/zip", "\x02\x04a\a\x03\x12\x021;\x01+\x05\x01\x0f\x03\x02\x0e\x04"},
    +	{"bufio", "\x03k\x83\x01D\x14"},
    +	{"bytes", "n*Y\x03\fG\x02\x02"},
    +	{"cmp", ""},
    +	{"compress/bzip2", "\x02\x02\xed\x01A"},
    +	{"compress/flate", "\x02l\x03\x80\x01\f\x033\x01\x03"},
    +	{"compress/gzip", "\x02\x04a\a\x03\x14lT"},
    +	{"compress/lzw", "\x02l\x03\x80\x01"},
    +	{"compress/zlib", "\x02\x04a\a\x03\x12\x01m"},
    +	{"container/heap", "\xb3\x02"},
    +	{"container/list", ""},
    +	{"container/ring", ""},
    +	{"context", "n\\m\x01\r"},
    +	{"crypto", "\x83\x01nC"},
    +	{"crypto/aes", "\x10\n\a\x93\x02"},
    +	{"crypto/cipher", "\x03\x1e\x01\x01\x1e\x11\x1c+X"},
    +	{"crypto/des", "\x10\x13\x1e-+\x9b\x01\x03"},
    +	{"crypto/dsa", "A\x04)\x83\x01\r"},
    +	{"crypto/ecdh", "\x03\v\f\x0e\x04\x15\x04\r\x1c\x83\x01"},
    +	{"crypto/ecdsa", "\x0e\x05\x03\x04\x01\x0e\a\v\x05\x01\x04\f\x01\x1c\x83\x01\r\x05K\x01"},
    +	{"crypto/ed25519", "\x0e\x1c\x11\x06\n\a\x1c\x83\x01C"},
    +	{"crypto/elliptic", "0>\x83\x01\r9"},
    +	{"crypto/fips140", " \x05"},
    +	{"crypto/hkdf", "-\x13\x01-\x15"},
    +	{"crypto/hmac", "\x1a\x14\x12\x01\x111"},
    +	{"crypto/internal/boring", "\x0e\x02\rf"},
    +	{"crypto/internal/boring/bbig", "\x1a\xe4\x01M"},
    +	{"crypto/internal/boring/bcache", "\xb8\x02\x13"},
    +	{"crypto/internal/boring/sig", ""},
    +	{"crypto/internal/cryptotest", "\x03\r\n\x06$\x0e\x19\x06\x12\x12 \x04\a\t\x16\x01\x11\x11\x1b\x01\a\x05\b\x03\x05\v"},
    +	{"crypto/internal/entropy", "F"},
    +	{"crypto/internal/fips140", "?/\x15\xa7\x01\v\x16"},
    +	{"crypto/internal/fips140/aes", "\x03\x1d\x03\x02\x13\x05\x01\x01\x05*\x92\x014"},
    +	{"crypto/internal/fips140/aes/gcm", " \x01\x02\x02\x02\x11\x05\x01\x06*\x8f\x01"},
    +	{"crypto/internal/fips140/alias", "\xcb\x02"},
    +	{"crypto/internal/fips140/bigmod", "%\x18\x01\x06*\x92\x01"},
    +	{"crypto/internal/fips140/check", " \x0e\x06\t\x02\xb2\x01Z"},
    +	{"crypto/internal/fips140/check/checktest", "%\x85\x02!"},
    +	{"crypto/internal/fips140/drbg", "\x03\x1c\x01\x01\x04\x13\x05\b\x01(\x83\x01\x0f7"},
    +	{"crypto/internal/fips140/ecdh", "\x03\x1d\x05\x02\t\r1\x83\x01\x0f7"},
    +	{"crypto/internal/fips140/ecdsa", "\x03\x1d\x04\x01\x02\a\x02\x068\x15nF"},
    +	{"crypto/internal/fips140/ed25519", "\x03\x1d\x05\x02\x04\v8\xc6\x01\x03"},
    +	{"crypto/internal/fips140/edwards25519", "%\a\f\x051\x92\x017"},
    +	{"crypto/internal/fips140/edwards25519/field", "%\x13\x051\x92\x01"},
    +	{"crypto/internal/fips140/hkdf", "\x03\x1d\x05\t\x06:\x15"},
    +	{"crypto/internal/fips140/hmac", "\x03\x1d\x14\x01\x018\x15"},
    +	{"crypto/internal/fips140/mlkem", "\x03\x1d\x05\x02\x0e\x03\x051"},
    +	{"crypto/internal/fips140/nistec", "%\f\a\x051\x92\x01*\r\x14"},
    +	{"crypto/internal/fips140/nistec/fiat", "%\x136\x92\x01"},
    +	{"crypto/internal/fips140/pbkdf2", "\x03\x1d\x05\t\x06:\x15"},
    +	{"crypto/internal/fips140/rsa", "\x03\x1d\x04\x01\x02\r\x01\x01\x026\x15nF"},
    +	{"crypto/internal/fips140/sha256", "\x03\x1d\x1d\x01\x06*\x15}"},
    +	{"crypto/internal/fips140/sha3", "\x03\x1d\x18\x05\x010\x92\x01K"},
    +	{"crypto/internal/fips140/sha512", "\x03\x1d\x1d\x01\x06*\x15}"},
    +	{"crypto/internal/fips140/ssh", "%^"},
    +	{"crypto/internal/fips140/subtle", "#\x1a\xc3\x01"},
    +	{"crypto/internal/fips140/tls12", "\x03\x1d\x05\t\x06\x028\x15"},
    +	{"crypto/internal/fips140/tls13", "\x03\x1d\x05\b\a\t1\x15"},
    +	{"crypto/internal/fips140cache", "\xaa\x02\r&"},
    +	{"crypto/internal/fips140deps", ""},
    +	{"crypto/internal/fips140deps/byteorder", "\x99\x01"},
    +	{"crypto/internal/fips140deps/cpu", "\xae\x01\a"},
    +	{"crypto/internal/fips140deps/godebug", "\xb6\x01"},
    +	{"crypto/internal/fips140hash", "5\x1b3\xc8\x01"},
    +	{"crypto/internal/fips140only", "'\r\x01\x01M3;"},
    +	{"crypto/internal/fips140test", ""},
    +	{"crypto/internal/hpke", "\x0e\x01\x01\x03\x053#+gM"},
    +	{"crypto/internal/impl", "\xb5\x02"},
    +	{"crypto/internal/randutil", "\xf1\x01\x12"},
    +	{"crypto/internal/sysrand", "nn! \r\r\x01\x01\f\x06"},
    +	{"crypto/internal/sysrand/internal/seccomp", "n"},
    +	{"crypto/md5", "\x0e3-\x15\x16g"},
    +	{"crypto/mlkem", "/"},
    +	{"crypto/pbkdf2", "2\x0e\x01-\x15"},
    +	{"crypto/rand", "\x1a\x06\a\x1a\x04\x01(\x83\x01\rM"},
    +	{"crypto/rc4", "#\x1e-\xc6\x01"},
    +	{"crypto/rsa", "\x0e\f\x01\t\x0f\r\x01\x04\x06\a\x1c\x03\x123;\f\x01"},
    +	{"crypto/sha1", "\x0e\f'\x03*\x15\x16\x15R"},
    +	{"crypto/sha256", "\x0e\f\x1aO"},
    +	{"crypto/sha3", "\x0e'N\xc8\x01"},
    +	{"crypto/sha512", "\x0e\f\x1cM"},
    +	{"crypto/subtle", "8\x9b\x01W"},
    +	{"crypto/tls", "\x03\b\x02\x01\x01\x01\x01\x02\x01\x01\x01\x02\x01\x01\a\x01\r\n\x01\t\x05\x03\x01\x01\x01\x01\x02\x01\x02\x01\x17\x02\x03\x12\x16\x15\b;\x16\x16\r\b\x01\x01\x01\x02\x01\r\x06\x02\x01\x0f"},
    +	{"crypto/tls/internal/fips140tls", "\x17\xa1\x02"},
    +	{"crypto/x509", "\x03\v\x01\x01\x01\x01\x01\x01\x01\x012\x05\x01\x01\x02\x05\x0e\x06\x02\x02\x03E\x038\x01\x02\b\x01\x01\x02\a\x10\x05\x01\x06\x02\x05\n\x01\x02\x0e\x02\x01\x01\x02\x03\x01"},
    +	{"crypto/x509/pkix", "d\x06\a\x8d\x01G"},
    +	{"database/sql", "\x03\nK\x16\x03\x80\x01\v\a\"\x05\b\x02\x03\x01\r\x02\x02\x02"},
    +	{"database/sql/driver", "\ra\x03\xb4\x01\x0f\x11"},
    +	{"debug/buildinfo", "\x03X\x02\x01\x01\b\a\x03e\x19\x02\x01+\x0f\x1f"},
    +	{"debug/dwarf", "\x03d\a\x03\x80\x011\x11\x01\x01"},
    +	{"debug/elf", "\x03\x06Q\r\a\x03e\x1a\x01,\x17\x01\x16"},
    +	{"debug/gosym", "\x03d\n\xc2\x01\x01\x01\x02"},
    +	{"debug/macho", "\x03\x06Q\r\ne\x1b,\x17\x01"},
    +	{"debug/pe", "\x03\x06Q\r\a\x03e\x1b,\x17\x01\x16"},
    +	{"debug/plan9obj", "g\a\x03e\x1b,"},
    +	{"embed", "n*@\x19\x01S"},
    +	{"embed/internal/embedtest", ""},
    +	{"encoding", ""},
    +	{"encoding/ascii85", "\xf1\x01C"},
    +	{"encoding/asn1", "\x03k\x03\x8c\x01\x01'\r\x02\x01\x10\x03\x01"},
    +	{"encoding/base32", "\xf1\x01A\x02"},
    +	{"encoding/base64", "\x99\x01XA\x02"},
    +	{"encoding/binary", "n\x83\x01\f(\r\x05"},
    +	{"encoding/csv", "\x02\x01k\x03\x80\x01D\x12\x02"},
    +	{"encoding/gob", "\x02`\x05\a\x03e\x1b\v\x01\x03\x1d\b\x12\x01\x0f\x02"},
    +	{"encoding/hex", "n\x03\x80\x01A\x03"},
    +	{"encoding/json", "\x03\x01^\x04\b\x03\x80\x01\f(\r\x02\x01\x02\x10\x01\x01\x02"},
    +	{"encoding/pem", "\x03c\b\x83\x01A\x03"},
    +	{"encoding/xml", "\x02\x01_\f\x03\x80\x014\x05\n\x01\x02\x10\x02"},
    +	{"errors", "\xca\x01\x81\x01"},
    +	{"expvar", "kK?\b\v\x15\r\b\x02\x03\x01\x11"},
    +	{"flag", "b\f\x03\x80\x01,\b\x05\b\x02\x01\x10"},
    +	{"fmt", "nE>\f \b\r\x02\x03\x12"},
    +	{"go/ast", "\x03\x01m\x0e\x01q\x03)\b\r\x02\x01"},
    +	{"go/build", "\x02\x01k\x03\x01\x02\x02\a\x02\x01\x17\x1f\x04\x02\t\x19\x13\x01+\x01\x04\x01\a\b\x02\x01\x12\x02\x02"},
    +	{"go/build/constraint", "n\xc6\x01\x01\x12\x02"},
    +	{"go/constant", "q\x0f}\x01\x024\x01\x02\x12"},
    +	{"go/doc", "\x04m\x01\x05\t>31\x10\x02\x01\x12\x02"},
    +	{"go/doc/comment", "\x03n\xc1\x01\x01\x01\x01\x12\x02"},
    +	{"go/format", "\x03n\x01\v\x01\x02qD"},
    +	{"go/importer", "s\a\x01\x01\x04\x01p9"},
    +	{"go/internal/gccgoimporter", "\x02\x01X\x13\x03\x04\v\x01n\x02,\x01\x05\x11\x01\f\b"},
    +	{"go/internal/gcimporter", "\x02o\x0f\x010\x05\x0e-,\x15\x03\x02"},
    +	{"go/internal/srcimporter", "q\x01\x01\n\x03\x01p,\x01\x05\x12\x02\x14"},
    +	{"go/parser", "\x03k\x03\x01\x02\v\x01q\x01+\x06\x12"},
    +	{"go/printer", "q\x01\x02\x03\tq\f \x15\x02\x01\x02\v\x05\x02"},
    +	{"go/scanner", "\x03n\x0fq2\x10\x01\x13\x02"},
    +	{"go/token", "\x04m\x83\x01>\x02\x03\x01\x0f\x02"},
    +	{"go/types", "\x03\x01\x06d\x03\x01\x03\b\x03\x02\x15\x1f\x061\x04\x03\t \x06\a\b\x01\x01\x01\x02\x01\x0f\x02\x02"},
    +	{"go/version", "\xbb\x01z"},
    +	{"hash", "\xf1\x01"},
    +	{"hash/adler32", "n\x15\x16"},
    +	{"hash/crc32", "n\x15\x16\x15\x89\x01\x01\x13"},
    +	{"hash/crc64", "n\x15\x16\x9e\x01"},
    +	{"hash/fnv", "n\x15\x16g"},
    +	{"hash/maphash", "\x83\x01\x11!\x03\x93\x01"},
    +	{"html", "\xb5\x02\x02\x12"},
    +	{"html/template", "\x03h\x06\x18-;\x01\n!\x05\x01\x02\x03\f\x01\x02\f\x01\x03\x02"},
    +	{"image", "\x02l\x1ee\x0f4\x03\x01"},
    +	{"image/color", ""},
    +	{"image/color/palette", "\x8c\x01"},
    +	{"image/draw", "\x8b\x01\x01\x04"},
    +	{"image/gif", "\x02\x01\x05f\x03\x1a\x01\x01\x01\vX"},
    +	{"image/internal/imageutil", "\x8b\x01"},
    +	{"image/jpeg", "\x02l\x1d\x01\x04a"},
    +	{"image/png", "\x02\a^\n\x12\x02\x06\x01eC"},
    +	{"index/suffixarray", "\x03d\a\x83\x01\f+\n\x01"},
    +	{"internal/abi", "\xb5\x01\x96\x01"},
    +	{"internal/asan", "\xcb\x02"},
    +	{"internal/bisect", "\xaa\x02\r\x01"},
    +	{"internal/buildcfg", "qGe\x06\x02\x05\n\x01"},
    +	{"internal/bytealg", "\xae\x01\x9d\x01"},
    +	{"internal/byteorder", ""},
    +	{"internal/cfg", ""},
    +	{"internal/cgrouptest", "q[Q\x06\x0f\x02\x01\x04\x01"},
    +	{"internal/chacha8rand", "\x99\x01\x15\a\x96\x01"},
    +	{"internal/copyright", ""},
    +	{"internal/coverage", ""},
    +	{"internal/coverage/calloc", ""},
    +	{"internal/coverage/cfile", "k\x06\x16\x17\x01\x02\x01\x01\x01\x01\x01\x01\x01#\x02$,\x06\a\n\x01\x03\r\x06"},
    +	{"internal/coverage/cformat", "\x04m-\x04O\v6\x01\x02\r"},
    +	{"internal/coverage/cmerge", "q-_"},
    +	{"internal/coverage/decodecounter", "g\n-\v\x02F,\x17\x17"},
    +	{"internal/coverage/decodemeta", "\x02e\n\x16\x17\v\x02F,"},
    +	{"internal/coverage/encodecounter", "\x02e\n-\f\x01\x02D\v!\x15"},
    +	{"internal/coverage/encodemeta", "\x02\x01d\n\x12\x04\x17\r\x02D,."},
    +	{"internal/coverage/pods", "\x04m-\x7f\x06\x05\n\x02\x01"},
    +	{"internal/coverage/rtcov", "\xcb\x02"},
    +	{"internal/coverage/slicereader", "g\n\x80\x01Z"},
    +	{"internal/coverage/slicewriter", "q\x80\x01"},
    +	{"internal/coverage/stringtab", "q8\x04D"},
    +	{"internal/coverage/test", ""},
    +	{"internal/coverage/uleb128", ""},
    +	{"internal/cpu", "\xcb\x02"},
    +	{"internal/dag", "\x04m\xc1\x01\x03"},
    +	{"internal/diff", "\x03n\xc2\x01\x02"},
    +	{"internal/exportdata", "\x02\x01k\x03\x02c\x1b,\x01\x05\x11\x01\x02"},
    +	{"internal/filepathlite", "n*@\x1a@"},
    +	{"internal/fmtsort", "\x04\xa1\x02\r"},
    +	{"internal/fuzz", "\x03\nB\x18\x04\x03\x03\x01\v\x036;\f\x03\x1d\x01\x05\x02\x05\n\x01\x02\x01\x01\f\x04\x02"},
    +	{"internal/goarch", ""},
    +	{"internal/godebug", "\x96\x01!\x80\x01\x01\x13"},
    +	{"internal/godebugs", ""},
    +	{"internal/goexperiment", ""},
    +	{"internal/goos", ""},
    +	{"internal/goroot", "\x9d\x02\x01\x05\x12\x02"},
    +	{"internal/gover", "\x04"},
    +	{"internal/goversion", ""},
    +	{"internal/itoa", ""},
    +	{"internal/lazyregexp", "\x9d\x02\v\r\x02"},
    +	{"internal/lazytemplate", "\xf1\x01,\x18\x02\f"},
    +	{"internal/msan", "\xcb\x02"},
    +	{"internal/nettrace", ""},
    +	{"internal/obscuretestdata", "f\x8b\x01,"},
    +	{"internal/oserror", "n"},
    +	{"internal/pkgbits", "\x03L\x18\a\x03\x04\vq\r\x1f\r\n\x01"},
    +	{"internal/platform", ""},
    +	{"internal/poll", "nO\x1f\x159\r\x01\x01\f\x06"},
    +	{"internal/profile", "\x03\x04g\x03\x80\x017\v\x01\x01\x10"},
    +	{"internal/profilerecord", ""},
    +	{"internal/race", "\x94\x01\xb7\x01"},
    +	{"internal/reflectlite", "\x94\x01!9\b\x13\x01\a\x03E;\x01\x03\a\x01\x03\x02\x02\x01\x02\x06\x02\x01\x01\n\x01\x01\x05\x01\x02\x05\b\x01\x01\x01\x02\x01\r\x02\x02\x02\b\x01\x01\x01"},
    +	{"net/http/cgi", "\x02Q\x1b\x03\x80\x01\x04\a\v\x01\x13\x01\x01\x01\x04\x01\x05\x02\b\x02\x01\x10\x0e"},
    +	{"net/http/cookiejar", "\x04j\x03\x96\x01\x01\b\f\x16\x03\x02\x0e\x04"},
    +	{"net/http/fcgi", "\x02\x01\nZ\a\x03\x80\x01\x16\x01\x01\x14\x18\x02\x0e"},
    +	{"net/http/httptest", "\x02\x01\nF\x02\x1b\x01\x80\x01\x04\x12\x01\n\t\x02\x17\x01\x02\x0e\x0e"},
    +	{"net/http/httptrace", "\rFnF\x14\n "},
    +	{"net/http/httputil", "\x02\x01\na\x03\x80\x01\x04\x0f\x03\x01\x05\x02\x01\v\x01\x19\x02\x0e\x0e"},
    +	{"net/http/internal", "\x02\x01k\x03\x80\x01"},
    +	{"net/http/internal/ascii", "\xb5\x02\x12"},
    +	{"net/http/internal/httpcommon", "\ra\x03\x9c\x01\x0e\x01\x17\x01\x01\x02\x1c\x02"},
    +	{"net/http/internal/testcert", "\xb5\x02"},
    +	{"net/http/pprof", "\x02\x01\nd\x18-\x11*\x04\x13\x14\x01\r\x04\x03\x01\x02\x01\x10"},
    +	{"net/internal/cgotest", ""},
    +	{"net/internal/socktest", "q\xc6\x01\x02"},
    +	{"net/mail", "\x02l\x03\x80\x01\x04\x0f\x03\x14\x1a\x02\x0e\x04"},
    +	{"net/netip", "\x04j*\x01$@\x034\x16"},
    +	{"net/rpc", "\x02g\x05\x03\x0f\ng\x04\x12\x01\x1d\r\x03\x02"},
    +	{"net/rpc/jsonrpc", "k\x03\x03\x80\x01\x16\x11\x1f"},
    +	{"net/smtp", "\x19/\v\x13\b\x03\x80\x01\x16\x14\x1a"},
    +	{"net/textproto", "\x02\x01k\x03\x80\x01\f\n-\x01\x02\x14"},
    +	{"net/url", "n\x03\x8b\x01&\x10\x02\x01\x16"},
    +	{"os", "n*\x01\x19\x03\b\t\x12\x03\x01\x05\x10\x018\b\x05\x01\x01\f\x06"},
    +	{"os/exec", "\x03\naH%\x01\x15\x01+\x06\a\n\x01\x04\f"},
    +	{"os/exec/internal/fdtest", "\xb9\x02"},
    +	{"os/signal", "\r\x90\x02\x15\x05\x02"},
    +	{"os/user", "\x02\x01k\x03\x80\x01,\r\n\x01\x02"},
    +	{"path", "n*\xb1\x01"},
    +	{"path/filepath", "n*\x1a@+\r\b\x03\x04\x10"},
    +	{"plugin", "n"},
    +	{"reflect", "n&\x04\x1d\b\f\x06\x04\x1b\x06\t-\n\x03\x10\x02\x02"},
    +	{"reflect/internal/example1", ""},
    +	{"reflect/internal/example2", ""},
    +	{"regexp", "\x03\xee\x018\t\x02\x01\x02\x10\x02"},
    +	{"regexp/syntax", "\xb2\x02\x01\x01\x01\x02\x10\x02"},
    +	{"runtime", "\x94\x01\x04\x01\x03\f\x06\a\x02\x01\x01\x0f\x03\x01\x01\x01\x01\x01\x02\x01\x01\x04\x10c"},
    +	{"runtime/coverage", "\xa0\x01Q"},
    +	{"runtime/debug", "qUW\r\b\x02\x01\x10\x06"},
    +	{"runtime/metrics", "\xb7\x01F-!"},
    +	{"runtime/pprof", "\x02\x01\x01\x03\x06Z\a\x03#4)\f \r\b\x01\x01\x01\x02\x02\t\x03\x06"},
    +	{"runtime/race", "\xb0\x02"},
    +	{"runtime/race/internal/amd64v1", ""},
    +	{"runtime/trace", "\ra\x03w\t9\b\x05\x01\r\x06"},
    +	{"slices", "\x04\xf0\x01\fK"},
    +	{"sort", "\xca\x0162"},
    +	{"strconv", "n*@%\x03I"},
    +	{"strings", "n&\x04@\x19\x03\f7\x10\x02\x02"},
    +	{"structs", ""},
    +	{"sync", "\xc9\x01\x10\x01P\x0e\x13"},
    +	{"sync/atomic", "\xcb\x02"},
    +	{"syscall", "n'\x03\x01\x1c\b\x03\x03\x06\vV\b\x05\x01\x13"},
    +	{"testing", "\x03\na\x02\x01X\x14\x14\f\x05\x1b\x06\x02\x05\x02\x05\x01\x02\x01\x02\x01\r\x02\x02\x02"},
    +	{"testing/fstest", "n\x03\x80\x01\x01\n&\x10\x03\b\b"},
    +	{"testing/internal/testdeps", "\x02\v\xa7\x01-\x10,\x03\x05\x03\x06\a\x02\x0e"},
    +	{"testing/iotest", "\x03k\x03\x80\x01\x04"},
    +	{"testing/quick", "p\x01\x8c\x01\x05#\x10\x10"},
    +	{"testing/slogtest", "\ra\x03\x86\x01.\x05\x10\v"},
    +	{"testing/synctest", "\xda\x01`\x11"},
    +	{"text/scanner", "\x03n\x80\x01,*\x02"},
    +	{"text/tabwriter", "q\x80\x01X"},
    +	{"text/template", "n\x03B>\x01\n \x01\x05\x01\x02\x05\v\x02\r\x03\x02"},
    +	{"text/template/parse", "\x03n\xb9\x01\n\x01\x12\x02"},
    +	{"time", "n*\x1e\"(*\r\x02\x12"},
    +	{"time/tzdata", "n\xcb\x01\x12"},
    +	{"unicode", ""},
    +	{"unicode/utf16", ""},
    +	{"unicode/utf8", ""},
    +	{"unique", "\x94\x01!#\x01Q\r\x01\x13\x12"},
    +	{"unsafe", ""},
    +	{"vendor/golang.org/x/crypto/chacha20", "\x10W\a\x92\x01*&"},
    +	{"vendor/golang.org/x/crypto/chacha20poly1305", "\x10W\a\xde\x01\x04\x01\a"},
    +	{"vendor/golang.org/x/crypto/cryptobyte", "d\n\x03\x8d\x01' \n"},
    +	{"vendor/golang.org/x/crypto/cryptobyte/asn1", ""},
    +	{"vendor/golang.org/x/crypto/internal/alias", "\xcb\x02"},
    +	{"vendor/golang.org/x/crypto/internal/poly1305", "R\x15\x99\x01"},
    +	{"vendor/golang.org/x/net/dns/dnsmessage", "n"},
    +	{"vendor/golang.org/x/net/http/httpguts", "\x87\x02\x14\x1a\x14\r"},
    +	{"vendor/golang.org/x/net/http/httpproxy", "n\x03\x96\x01\x10\x05\x01\x18\x14\r"},
    +	{"vendor/golang.org/x/net/http2/hpack", "\x03k\x03\x80\x01F"},
    +	{"vendor/golang.org/x/net/idna", "q\x8c\x018\x14\x10\x02\x01"},
    +	{"vendor/golang.org/x/net/nettest", "\x03d\a\x03\x80\x01\x11\x05\x16\x01\f\n\x01\x02\x02\x01\v"},
    +	{"vendor/golang.org/x/sys/cpu", "\x9d\x02\r\n\x01\x16"},
    +	{"vendor/golang.org/x/text/secure/bidirule", "n\xdb\x01\x11\x01"},
    +	{"vendor/golang.org/x/text/transform", "\x03k\x83\x01X"},
    +	{"vendor/golang.org/x/text/unicode/bidi", "\x03\bf\x84\x01>\x16"},
    +	{"vendor/golang.org/x/text/unicode/norm", "g\n\x80\x01F\x12\x11"},
    +	{"weak", "\x94\x01\x96\x01!"},
    +}
    diff --git a/vendor/golang.org/x/tools/internal/stdlib/import.go b/vendor/golang.org/x/tools/internal/stdlib/import.go
    new file mode 100644
    index 000000000..f6909878a
    --- /dev/null
    +++ b/vendor/golang.org/x/tools/internal/stdlib/import.go
    @@ -0,0 +1,89 @@
    +// Copyright 2025 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +package stdlib
    +
    +// This file provides the API for the import graph of the standard library.
    +//
    +// Be aware that the compiler-generated code for every package
    +// implicitly depends on package "runtime" and a handful of others
    +// (see runtimePkgs in GOROOT/src/cmd/internal/objabi/pkgspecial.go).
    +
    +import (
    +	"encoding/binary"
    +	"iter"
    +	"slices"
    +	"strings"
    +)
    +
    +// Imports returns the sequence of packages directly imported by the
    +// named standard packages, in name order.
    +// The imports of an unknown package are the empty set.
    +//
    +// The graph is built into the application and may differ from the
    +// graph in the Go source tree being analyzed by the application.
    +func Imports(pkgs ...string) iter.Seq[string] {
    +	return func(yield func(string) bool) {
    +		for _, pkg := range pkgs {
    +			if i, ok := find(pkg); ok {
    +				var depIndex uint64
    +				for data := []byte(deps[i].deps); len(data) > 0; {
    +					delta, n := binary.Uvarint(data)
    +					depIndex += delta
    +					if !yield(deps[depIndex].name) {
    +						return
    +					}
    +					data = data[n:]
    +				}
    +			}
    +		}
    +	}
    +}
    +
    +// Dependencies returns the set of all dependencies of the named
    +// standard packages, including the initial package,
    +// in a deterministic topological order.
    +// The dependencies of an unknown package are the empty set.
    +//
    +// The graph is built into the application and may differ from the
    +// graph in the Go source tree being analyzed by the application.
    +func Dependencies(pkgs ...string) iter.Seq[string] {
    +	return func(yield func(string) bool) {
    +		for _, pkg := range pkgs {
    +			if i, ok := find(pkg); ok {
    +				var seen [1 + len(deps)/8]byte // bit set of seen packages
    +				var visit func(i int) bool
    +				visit = func(i int) bool {
    +					bit := byte(1) << (i % 8)
    +					if seen[i/8]&bit == 0 {
    +						seen[i/8] |= bit
    +						var depIndex uint64
    +						for data := []byte(deps[i].deps); len(data) > 0; {
    +							delta, n := binary.Uvarint(data)
    +							depIndex += delta
    +							if !visit(int(depIndex)) {
    +								return false
    +							}
    +							data = data[n:]
    +						}
    +						if !yield(deps[i].name) {
    +							return false
    +						}
    +					}
    +					return true
    +				}
    +				if !visit(i) {
    +					return
    +				}
    +			}
    +		}
    +	}
    +}
    +
    +// find returns the index of pkg in the deps table.
    +func find(pkg string) (int, bool) {
    +	return slices.BinarySearchFunc(deps[:], pkg, func(p pkginfo, n string) int {
    +		return strings.Compare(p.name, n)
    +	})
    +}
    diff --git a/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/vendor/golang.org/x/tools/internal/stdlib/manifest.go
    index cdaac9ab3..c1faa50d3 100644
    --- a/vendor/golang.org/x/tools/internal/stdlib/manifest.go
    +++ b/vendor/golang.org/x/tools/internal/stdlib/manifest.go
    @@ -1,4 +1,4 @@
    -// Copyright 2024 The Go Authors. All rights reserved.
    +// Copyright 2025 The Go Authors. All rights reserved.
     // Use of this source code is governed by a BSD-style
     // license that can be found in the LICENSE file.
     
    @@ -8,17424 +8,17719 @@ package stdlib
     
     var PackageSymbols = map[string][]Symbol{
     	"archive/tar": {
    -		{"(*Header).FileInfo", Method, 1},
    -		{"(*Reader).Next", Method, 0},
    -		{"(*Reader).Read", Method, 0},
    -		{"(*Writer).AddFS", Method, 22},
    -		{"(*Writer).Close", Method, 0},
    -		{"(*Writer).Flush", Method, 0},
    -		{"(*Writer).Write", Method, 0},
    -		{"(*Writer).WriteHeader", Method, 0},
    -		{"(Format).String", Method, 10},
    -		{"ErrFieldTooLong", Var, 0},
    -		{"ErrHeader", Var, 0},
    -		{"ErrInsecurePath", Var, 20},
    -		{"ErrWriteAfterClose", Var, 0},
    -		{"ErrWriteTooLong", Var, 0},
    -		{"FileInfoHeader", Func, 1},
    -		{"FileInfoNames", Type, 23},
    -		{"Format", Type, 10},
    -		{"FormatGNU", Const, 10},
    -		{"FormatPAX", Const, 10},
    -		{"FormatUSTAR", Const, 10},
    -		{"FormatUnknown", Const, 10},
    -		{"Header", Type, 0},
    -		{"Header.AccessTime", Field, 0},
    -		{"Header.ChangeTime", Field, 0},
    -		{"Header.Devmajor", Field, 0},
    -		{"Header.Devminor", Field, 0},
    -		{"Header.Format", Field, 10},
    -		{"Header.Gid", Field, 0},
    -		{"Header.Gname", Field, 0},
    -		{"Header.Linkname", Field, 0},
    -		{"Header.ModTime", Field, 0},
    -		{"Header.Mode", Field, 0},
    -		{"Header.Name", Field, 0},
    -		{"Header.PAXRecords", Field, 10},
    -		{"Header.Size", Field, 0},
    -		{"Header.Typeflag", Field, 0},
    -		{"Header.Uid", Field, 0},
    -		{"Header.Uname", Field, 0},
    -		{"Header.Xattrs", Field, 3},
    -		{"NewReader", Func, 0},
    -		{"NewWriter", Func, 0},
    -		{"Reader", Type, 0},
    -		{"TypeBlock", Const, 0},
    -		{"TypeChar", Const, 0},
    -		{"TypeCont", Const, 0},
    -		{"TypeDir", Const, 0},
    -		{"TypeFifo", Const, 0},
    -		{"TypeGNULongLink", Const, 1},
    -		{"TypeGNULongName", Const, 1},
    -		{"TypeGNUSparse", Const, 3},
    -		{"TypeLink", Const, 0},
    -		{"TypeReg", Const, 0},
    -		{"TypeRegA", Const, 0},
    -		{"TypeSymlink", Const, 0},
    -		{"TypeXGlobalHeader", Const, 0},
    -		{"TypeXHeader", Const, 0},
    -		{"Writer", Type, 0},
    +		{"(*Header).FileInfo", Method, 1, ""},
    +		{"(*Reader).Next", Method, 0, ""},
    +		{"(*Reader).Read", Method, 0, ""},
    +		{"(*Writer).AddFS", Method, 22, ""},
    +		{"(*Writer).Close", Method, 0, ""},
    +		{"(*Writer).Flush", Method, 0, ""},
    +		{"(*Writer).Write", Method, 0, ""},
    +		{"(*Writer).WriteHeader", Method, 0, ""},
    +		{"(Format).String", Method, 10, ""},
    +		{"ErrFieldTooLong", Var, 0, ""},
    +		{"ErrHeader", Var, 0, ""},
    +		{"ErrInsecurePath", Var, 20, ""},
    +		{"ErrWriteAfterClose", Var, 0, ""},
    +		{"ErrWriteTooLong", Var, 0, ""},
    +		{"FileInfoHeader", Func, 1, "func(fi fs.FileInfo, link string) (*Header, error)"},
    +		{"FileInfoNames", Type, 23, ""},
    +		{"Format", Type, 10, ""},
    +		{"FormatGNU", Const, 10, ""},
    +		{"FormatPAX", Const, 10, ""},
    +		{"FormatUSTAR", Const, 10, ""},
    +		{"FormatUnknown", Const, 10, ""},
    +		{"Header", Type, 0, ""},
    +		{"Header.AccessTime", Field, 0, ""},
    +		{"Header.ChangeTime", Field, 0, ""},
    +		{"Header.Devmajor", Field, 0, ""},
    +		{"Header.Devminor", Field, 0, ""},
    +		{"Header.Format", Field, 10, ""},
    +		{"Header.Gid", Field, 0, ""},
    +		{"Header.Gname", Field, 0, ""},
    +		{"Header.Linkname", Field, 0, ""},
    +		{"Header.ModTime", Field, 0, ""},
    +		{"Header.Mode", Field, 0, ""},
    +		{"Header.Name", Field, 0, ""},
    +		{"Header.PAXRecords", Field, 10, ""},
    +		{"Header.Size", Field, 0, ""},
    +		{"Header.Typeflag", Field, 0, ""},
    +		{"Header.Uid", Field, 0, ""},
    +		{"Header.Uname", Field, 0, ""},
    +		{"Header.Xattrs", Field, 3, ""},
    +		{"NewReader", Func, 0, "func(r io.Reader) *Reader"},
    +		{"NewWriter", Func, 0, "func(w io.Writer) *Writer"},
    +		{"Reader", Type, 0, ""},
    +		{"TypeBlock", Const, 0, ""},
    +		{"TypeChar", Const, 0, ""},
    +		{"TypeCont", Const, 0, ""},
    +		{"TypeDir", Const, 0, ""},
    +		{"TypeFifo", Const, 0, ""},
    +		{"TypeGNULongLink", Const, 1, ""},
    +		{"TypeGNULongName", Const, 1, ""},
    +		{"TypeGNUSparse", Const, 3, ""},
    +		{"TypeLink", Const, 0, ""},
    +		{"TypeReg", Const, 0, ""},
    +		{"TypeRegA", Const, 0, ""},
    +		{"TypeSymlink", Const, 0, ""},
    +		{"TypeXGlobalHeader", Const, 0, ""},
    +		{"TypeXHeader", Const, 0, ""},
    +		{"Writer", Type, 0, ""},
     	},
     	"archive/zip": {
    -		{"(*File).DataOffset", Method, 2},
    -		{"(*File).FileInfo", Method, 0},
    -		{"(*File).ModTime", Method, 0},
    -		{"(*File).Mode", Method, 0},
    -		{"(*File).Open", Method, 0},
    -		{"(*File).OpenRaw", Method, 17},
    -		{"(*File).SetModTime", Method, 0},
    -		{"(*File).SetMode", Method, 0},
    -		{"(*FileHeader).FileInfo", Method, 0},
    -		{"(*FileHeader).ModTime", Method, 0},
    -		{"(*FileHeader).Mode", Method, 0},
    -		{"(*FileHeader).SetModTime", Method, 0},
    -		{"(*FileHeader).SetMode", Method, 0},
    -		{"(*ReadCloser).Close", Method, 0},
    -		{"(*ReadCloser).Open", Method, 16},
    -		{"(*ReadCloser).RegisterDecompressor", Method, 6},
    -		{"(*Reader).Open", Method, 16},
    -		{"(*Reader).RegisterDecompressor", Method, 6},
    -		{"(*Writer).AddFS", Method, 22},
    -		{"(*Writer).Close", Method, 0},
    -		{"(*Writer).Copy", Method, 17},
    -		{"(*Writer).Create", Method, 0},
    -		{"(*Writer).CreateHeader", Method, 0},
    -		{"(*Writer).CreateRaw", Method, 17},
    -		{"(*Writer).Flush", Method, 4},
    -		{"(*Writer).RegisterCompressor", Method, 6},
    -		{"(*Writer).SetComment", Method, 10},
    -		{"(*Writer).SetOffset", Method, 5},
    -		{"Compressor", Type, 2},
    -		{"Decompressor", Type, 2},
    -		{"Deflate", Const, 0},
    -		{"ErrAlgorithm", Var, 0},
    -		{"ErrChecksum", Var, 0},
    -		{"ErrFormat", Var, 0},
    -		{"ErrInsecurePath", Var, 20},
    -		{"File", Type, 0},
    -		{"File.FileHeader", Field, 0},
    -		{"FileHeader", Type, 0},
    -		{"FileHeader.CRC32", Field, 0},
    -		{"FileHeader.Comment", Field, 0},
    -		{"FileHeader.CompressedSize", Field, 0},
    -		{"FileHeader.CompressedSize64", Field, 1},
    -		{"FileHeader.CreatorVersion", Field, 0},
    -		{"FileHeader.ExternalAttrs", Field, 0},
    -		{"FileHeader.Extra", Field, 0},
    -		{"FileHeader.Flags", Field, 0},
    -		{"FileHeader.Method", Field, 0},
    -		{"FileHeader.Modified", Field, 10},
    -		{"FileHeader.ModifiedDate", Field, 0},
    -		{"FileHeader.ModifiedTime", Field, 0},
    -		{"FileHeader.Name", Field, 0},
    -		{"FileHeader.NonUTF8", Field, 10},
    -		{"FileHeader.ReaderVersion", Field, 0},
    -		{"FileHeader.UncompressedSize", Field, 0},
    -		{"FileHeader.UncompressedSize64", Field, 1},
    -		{"FileInfoHeader", Func, 0},
    -		{"NewReader", Func, 0},
    -		{"NewWriter", Func, 0},
    -		{"OpenReader", Func, 0},
    -		{"ReadCloser", Type, 0},
    -		{"ReadCloser.Reader", Field, 0},
    -		{"Reader", Type, 0},
    -		{"Reader.Comment", Field, 0},
    -		{"Reader.File", Field, 0},
    -		{"RegisterCompressor", Func, 2},
    -		{"RegisterDecompressor", Func, 2},
    -		{"Store", Const, 0},
    -		{"Writer", Type, 0},
    +		{"(*File).DataOffset", Method, 2, ""},
    +		{"(*File).FileInfo", Method, 0, ""},
    +		{"(*File).ModTime", Method, 0, ""},
    +		{"(*File).Mode", Method, 0, ""},
    +		{"(*File).Open", Method, 0, ""},
    +		{"(*File).OpenRaw", Method, 17, ""},
    +		{"(*File).SetModTime", Method, 0, ""},
    +		{"(*File).SetMode", Method, 0, ""},
    +		{"(*FileHeader).FileInfo", Method, 0, ""},
    +		{"(*FileHeader).ModTime", Method, 0, ""},
    +		{"(*FileHeader).Mode", Method, 0, ""},
    +		{"(*FileHeader).SetModTime", Method, 0, ""},
    +		{"(*FileHeader).SetMode", Method, 0, ""},
    +		{"(*ReadCloser).Close", Method, 0, ""},
    +		{"(*ReadCloser).Open", Method, 16, ""},
    +		{"(*ReadCloser).RegisterDecompressor", Method, 6, ""},
    +		{"(*Reader).Open", Method, 16, ""},
    +		{"(*Reader).RegisterDecompressor", Method, 6, ""},
    +		{"(*Writer).AddFS", Method, 22, ""},
    +		{"(*Writer).Close", Method, 0, ""},
    +		{"(*Writer).Copy", Method, 17, ""},
    +		{"(*Writer).Create", Method, 0, ""},
    +		{"(*Writer).CreateHeader", Method, 0, ""},
    +		{"(*Writer).CreateRaw", Method, 17, ""},
    +		{"(*Writer).Flush", Method, 4, ""},
    +		{"(*Writer).RegisterCompressor", Method, 6, ""},
    +		{"(*Writer).SetComment", Method, 10, ""},
    +		{"(*Writer).SetOffset", Method, 5, ""},
    +		{"Compressor", Type, 2, ""},
    +		{"Decompressor", Type, 2, ""},
    +		{"Deflate", Const, 0, ""},
    +		{"ErrAlgorithm", Var, 0, ""},
    +		{"ErrChecksum", Var, 0, ""},
    +		{"ErrFormat", Var, 0, ""},
    +		{"ErrInsecurePath", Var, 20, ""},
    +		{"File", Type, 0, ""},
    +		{"File.FileHeader", Field, 0, ""},
    +		{"FileHeader", Type, 0, ""},
    +		{"FileHeader.CRC32", Field, 0, ""},
    +		{"FileHeader.Comment", Field, 0, ""},
    +		{"FileHeader.CompressedSize", Field, 0, ""},
    +		{"FileHeader.CompressedSize64", Field, 1, ""},
    +		{"FileHeader.CreatorVersion", Field, 0, ""},
    +		{"FileHeader.ExternalAttrs", Field, 0, ""},
    +		{"FileHeader.Extra", Field, 0, ""},
    +		{"FileHeader.Flags", Field, 0, ""},
    +		{"FileHeader.Method", Field, 0, ""},
    +		{"FileHeader.Modified", Field, 10, ""},
    +		{"FileHeader.ModifiedDate", Field, 0, ""},
    +		{"FileHeader.ModifiedTime", Field, 0, ""},
    +		{"FileHeader.Name", Field, 0, ""},
    +		{"FileHeader.NonUTF8", Field, 10, ""},
    +		{"FileHeader.ReaderVersion", Field, 0, ""},
    +		{"FileHeader.UncompressedSize", Field, 0, ""},
    +		{"FileHeader.UncompressedSize64", Field, 1, ""},
    +		{"FileInfoHeader", Func, 0, "func(fi fs.FileInfo) (*FileHeader, error)"},
    +		{"NewReader", Func, 0, "func(r io.ReaderAt, size int64) (*Reader, error)"},
    +		{"NewWriter", Func, 0, "func(w io.Writer) *Writer"},
    +		{"OpenReader", Func, 0, "func(name string) (*ReadCloser, error)"},
    +		{"ReadCloser", Type, 0, ""},
    +		{"ReadCloser.Reader", Field, 0, ""},
    +		{"Reader", Type, 0, ""},
    +		{"Reader.Comment", Field, 0, ""},
    +		{"Reader.File", Field, 0, ""},
    +		{"RegisterCompressor", Func, 2, "func(method uint16, comp Compressor)"},
    +		{"RegisterDecompressor", Func, 2, "func(method uint16, dcomp Decompressor)"},
    +		{"Store", Const, 0, ""},
    +		{"Writer", Type, 0, ""},
     	},
     	"bufio": {
    -		{"(*Reader).Buffered", Method, 0},
    -		{"(*Reader).Discard", Method, 5},
    -		{"(*Reader).Peek", Method, 0},
    -		{"(*Reader).Read", Method, 0},
    -		{"(*Reader).ReadByte", Method, 0},
    -		{"(*Reader).ReadBytes", Method, 0},
    -		{"(*Reader).ReadLine", Method, 0},
    -		{"(*Reader).ReadRune", Method, 0},
    -		{"(*Reader).ReadSlice", Method, 0},
    -		{"(*Reader).ReadString", Method, 0},
    -		{"(*Reader).Reset", Method, 2},
    -		{"(*Reader).Size", Method, 10},
    -		{"(*Reader).UnreadByte", Method, 0},
    -		{"(*Reader).UnreadRune", Method, 0},
    -		{"(*Reader).WriteTo", Method, 1},
    -		{"(*Scanner).Buffer", Method, 6},
    -		{"(*Scanner).Bytes", Method, 1},
    -		{"(*Scanner).Err", Method, 1},
    -		{"(*Scanner).Scan", Method, 1},
    -		{"(*Scanner).Split", Method, 1},
    -		{"(*Scanner).Text", Method, 1},
    -		{"(*Writer).Available", Method, 0},
    -		{"(*Writer).AvailableBuffer", Method, 18},
    -		{"(*Writer).Buffered", Method, 0},
    -		{"(*Writer).Flush", Method, 0},
    -		{"(*Writer).ReadFrom", Method, 1},
    -		{"(*Writer).Reset", Method, 2},
    -		{"(*Writer).Size", Method, 10},
    -		{"(*Writer).Write", Method, 0},
    -		{"(*Writer).WriteByte", Method, 0},
    -		{"(*Writer).WriteRune", Method, 0},
    -		{"(*Writer).WriteString", Method, 0},
    -		{"(ReadWriter).Available", Method, 0},
    -		{"(ReadWriter).AvailableBuffer", Method, 18},
    -		{"(ReadWriter).Discard", Method, 5},
    -		{"(ReadWriter).Flush", Method, 0},
    -		{"(ReadWriter).Peek", Method, 0},
    -		{"(ReadWriter).Read", Method, 0},
    -		{"(ReadWriter).ReadByte", Method, 0},
    -		{"(ReadWriter).ReadBytes", Method, 0},
    -		{"(ReadWriter).ReadFrom", Method, 1},
    -		{"(ReadWriter).ReadLine", Method, 0},
    -		{"(ReadWriter).ReadRune", Method, 0},
    -		{"(ReadWriter).ReadSlice", Method, 0},
    -		{"(ReadWriter).ReadString", Method, 0},
    -		{"(ReadWriter).UnreadByte", Method, 0},
    -		{"(ReadWriter).UnreadRune", Method, 0},
    -		{"(ReadWriter).Write", Method, 0},
    -		{"(ReadWriter).WriteByte", Method, 0},
    -		{"(ReadWriter).WriteRune", Method, 0},
    -		{"(ReadWriter).WriteString", Method, 0},
    -		{"(ReadWriter).WriteTo", Method, 1},
    -		{"ErrAdvanceTooFar", Var, 1},
    -		{"ErrBadReadCount", Var, 15},
    -		{"ErrBufferFull", Var, 0},
    -		{"ErrFinalToken", Var, 6},
    -		{"ErrInvalidUnreadByte", Var, 0},
    -		{"ErrInvalidUnreadRune", Var, 0},
    -		{"ErrNegativeAdvance", Var, 1},
    -		{"ErrNegativeCount", Var, 0},
    -		{"ErrTooLong", Var, 1},
    -		{"MaxScanTokenSize", Const, 1},
    -		{"NewReadWriter", Func, 0},
    -		{"NewReader", Func, 0},
    -		{"NewReaderSize", Func, 0},
    -		{"NewScanner", Func, 1},
    -		{"NewWriter", Func, 0},
    -		{"NewWriterSize", Func, 0},
    -		{"ReadWriter", Type, 0},
    -		{"ReadWriter.Reader", Field, 0},
    -		{"ReadWriter.Writer", Field, 0},
    -		{"Reader", Type, 0},
    -		{"ScanBytes", Func, 1},
    -		{"ScanLines", Func, 1},
    -		{"ScanRunes", Func, 1},
    -		{"ScanWords", Func, 1},
    -		{"Scanner", Type, 1},
    -		{"SplitFunc", Type, 1},
    -		{"Writer", Type, 0},
    +		{"(*Reader).Buffered", Method, 0, ""},
    +		{"(*Reader).Discard", Method, 5, ""},
    +		{"(*Reader).Peek", Method, 0, ""},
    +		{"(*Reader).Read", Method, 0, ""},
    +		{"(*Reader).ReadByte", Method, 0, ""},
    +		{"(*Reader).ReadBytes", Method, 0, ""},
    +		{"(*Reader).ReadLine", Method, 0, ""},
    +		{"(*Reader).ReadRune", Method, 0, ""},
    +		{"(*Reader).ReadSlice", Method, 0, ""},
    +		{"(*Reader).ReadString", Method, 0, ""},
    +		{"(*Reader).Reset", Method, 2, ""},
    +		{"(*Reader).Size", Method, 10, ""},
    +		{"(*Reader).UnreadByte", Method, 0, ""},
    +		{"(*Reader).UnreadRune", Method, 0, ""},
    +		{"(*Reader).WriteTo", Method, 1, ""},
    +		{"(*Scanner).Buffer", Method, 6, ""},
    +		{"(*Scanner).Bytes", Method, 1, ""},
    +		{"(*Scanner).Err", Method, 1, ""},
    +		{"(*Scanner).Scan", Method, 1, ""},
    +		{"(*Scanner).Split", Method, 1, ""},
    +		{"(*Scanner).Text", Method, 1, ""},
    +		{"(*Writer).Available", Method, 0, ""},
    +		{"(*Writer).AvailableBuffer", Method, 18, ""},
    +		{"(*Writer).Buffered", Method, 0, ""},
    +		{"(*Writer).Flush", Method, 0, ""},
    +		{"(*Writer).ReadFrom", Method, 1, ""},
    +		{"(*Writer).Reset", Method, 2, ""},
    +		{"(*Writer).Size", Method, 10, ""},
    +		{"(*Writer).Write", Method, 0, ""},
    +		{"(*Writer).WriteByte", Method, 0, ""},
    +		{"(*Writer).WriteRune", Method, 0, ""},
    +		{"(*Writer).WriteString", Method, 0, ""},
    +		{"(ReadWriter).Available", Method, 0, ""},
    +		{"(ReadWriter).AvailableBuffer", Method, 18, ""},
    +		{"(ReadWriter).Discard", Method, 5, ""},
    +		{"(ReadWriter).Flush", Method, 0, ""},
    +		{"(ReadWriter).Peek", Method, 0, ""},
    +		{"(ReadWriter).Read", Method, 0, ""},
    +		{"(ReadWriter).ReadByte", Method, 0, ""},
    +		{"(ReadWriter).ReadBytes", Method, 0, ""},
    +		{"(ReadWriter).ReadFrom", Method, 1, ""},
    +		{"(ReadWriter).ReadLine", Method, 0, ""},
    +		{"(ReadWriter).ReadRune", Method, 0, ""},
    +		{"(ReadWriter).ReadSlice", Method, 0, ""},
    +		{"(ReadWriter).ReadString", Method, 0, ""},
    +		{"(ReadWriter).UnreadByte", Method, 0, ""},
    +		{"(ReadWriter).UnreadRune", Method, 0, ""},
    +		{"(ReadWriter).Write", Method, 0, ""},
    +		{"(ReadWriter).WriteByte", Method, 0, ""},
    +		{"(ReadWriter).WriteRune", Method, 0, ""},
    +		{"(ReadWriter).WriteString", Method, 0, ""},
    +		{"(ReadWriter).WriteTo", Method, 1, ""},
    +		{"ErrAdvanceTooFar", Var, 1, ""},
    +		{"ErrBadReadCount", Var, 15, ""},
    +		{"ErrBufferFull", Var, 0, ""},
    +		{"ErrFinalToken", Var, 6, ""},
    +		{"ErrInvalidUnreadByte", Var, 0, ""},
    +		{"ErrInvalidUnreadRune", Var, 0, ""},
    +		{"ErrNegativeAdvance", Var, 1, ""},
    +		{"ErrNegativeCount", Var, 0, ""},
    +		{"ErrTooLong", Var, 1, ""},
    +		{"MaxScanTokenSize", Const, 1, ""},
    +		{"NewReadWriter", Func, 0, "func(r *Reader, w *Writer) *ReadWriter"},
    +		{"NewReader", Func, 0, "func(rd io.Reader) *Reader"},
    +		{"NewReaderSize", Func, 0, "func(rd io.Reader, size int) *Reader"},
    +		{"NewScanner", Func, 1, "func(r io.Reader) *Scanner"},
    +		{"NewWriter", Func, 0, "func(w io.Writer) *Writer"},
    +		{"NewWriterSize", Func, 0, "func(w io.Writer, size int) *Writer"},
    +		{"ReadWriter", Type, 0, ""},
    +		{"ReadWriter.Reader", Field, 0, ""},
    +		{"ReadWriter.Writer", Field, 0, ""},
    +		{"Reader", Type, 0, ""},
    +		{"ScanBytes", Func, 1, "func(data []byte, atEOF bool) (advance int, token []byte, err error)"},
    +		{"ScanLines", Func, 1, "func(data []byte, atEOF bool) (advance int, token []byte, err error)"},
    +		{"ScanRunes", Func, 1, "func(data []byte, atEOF bool) (advance int, token []byte, err error)"},
    +		{"ScanWords", Func, 1, "func(data []byte, atEOF bool) (advance int, token []byte, err error)"},
    +		{"Scanner", Type, 1, ""},
    +		{"SplitFunc", Type, 1, ""},
    +		{"Writer", Type, 0, ""},
     	},
     	"bytes": {
    -		{"(*Buffer).Available", Method, 21},
    -		{"(*Buffer).AvailableBuffer", Method, 21},
    -		{"(*Buffer).Bytes", Method, 0},
    -		{"(*Buffer).Cap", Method, 5},
    -		{"(*Buffer).Grow", Method, 1},
    -		{"(*Buffer).Len", Method, 0},
    -		{"(*Buffer).Next", Method, 0},
    -		{"(*Buffer).Read", Method, 0},
    -		{"(*Buffer).ReadByte", Method, 0},
    -		{"(*Buffer).ReadBytes", Method, 0},
    -		{"(*Buffer).ReadFrom", Method, 0},
    -		{"(*Buffer).ReadRune", Method, 0},
    -		{"(*Buffer).ReadString", Method, 0},
    -		{"(*Buffer).Reset", Method, 0},
    -		{"(*Buffer).String", Method, 0},
    -		{"(*Buffer).Truncate", Method, 0},
    -		{"(*Buffer).UnreadByte", Method, 0},
    -		{"(*Buffer).UnreadRune", Method, 0},
    -		{"(*Buffer).Write", Method, 0},
    -		{"(*Buffer).WriteByte", Method, 0},
    -		{"(*Buffer).WriteRune", Method, 0},
    -		{"(*Buffer).WriteString", Method, 0},
    -		{"(*Buffer).WriteTo", Method, 0},
    -		{"(*Reader).Len", Method, 0},
    -		{"(*Reader).Read", Method, 0},
    -		{"(*Reader).ReadAt", Method, 0},
    -		{"(*Reader).ReadByte", Method, 0},
    -		{"(*Reader).ReadRune", Method, 0},
    -		{"(*Reader).Reset", Method, 7},
    -		{"(*Reader).Seek", Method, 0},
    -		{"(*Reader).Size", Method, 5},
    -		{"(*Reader).UnreadByte", Method, 0},
    -		{"(*Reader).UnreadRune", Method, 0},
    -		{"(*Reader).WriteTo", Method, 1},
    -		{"Buffer", Type, 0},
    -		{"Clone", Func, 20},
    -		{"Compare", Func, 0},
    -		{"Contains", Func, 0},
    -		{"ContainsAny", Func, 7},
    -		{"ContainsFunc", Func, 21},
    -		{"ContainsRune", Func, 7},
    -		{"Count", Func, 0},
    -		{"Cut", Func, 18},
    -		{"CutPrefix", Func, 20},
    -		{"CutSuffix", Func, 20},
    -		{"Equal", Func, 0},
    -		{"EqualFold", Func, 0},
    -		{"ErrTooLarge", Var, 0},
    -		{"Fields", Func, 0},
    -		{"FieldsFunc", Func, 0},
    -		{"HasPrefix", Func, 0},
    -		{"HasSuffix", Func, 0},
    -		{"Index", Func, 0},
    -		{"IndexAny", Func, 0},
    -		{"IndexByte", Func, 0},
    -		{"IndexFunc", Func, 0},
    -		{"IndexRune", Func, 0},
    -		{"Join", Func, 0},
    -		{"LastIndex", Func, 0},
    -		{"LastIndexAny", Func, 0},
    -		{"LastIndexByte", Func, 5},
    -		{"LastIndexFunc", Func, 0},
    -		{"Map", Func, 0},
    -		{"MinRead", Const, 0},
    -		{"NewBuffer", Func, 0},
    -		{"NewBufferString", Func, 0},
    -		{"NewReader", Func, 0},
    -		{"Reader", Type, 0},
    -		{"Repeat", Func, 0},
    -		{"Replace", Func, 0},
    -		{"ReplaceAll", Func, 12},
    -		{"Runes", Func, 0},
    -		{"Split", Func, 0},
    -		{"SplitAfter", Func, 0},
    -		{"SplitAfterN", Func, 0},
    -		{"SplitN", Func, 0},
    -		{"Title", Func, 0},
    -		{"ToLower", Func, 0},
    -		{"ToLowerSpecial", Func, 0},
    -		{"ToTitle", Func, 0},
    -		{"ToTitleSpecial", Func, 0},
    -		{"ToUpper", Func, 0},
    -		{"ToUpperSpecial", Func, 0},
    -		{"ToValidUTF8", Func, 13},
    -		{"Trim", Func, 0},
    -		{"TrimFunc", Func, 0},
    -		{"TrimLeft", Func, 0},
    -		{"TrimLeftFunc", Func, 0},
    -		{"TrimPrefix", Func, 1},
    -		{"TrimRight", Func, 0},
    -		{"TrimRightFunc", Func, 0},
    -		{"TrimSpace", Func, 0},
    -		{"TrimSuffix", Func, 1},
    +		{"(*Buffer).Available", Method, 21, ""},
    +		{"(*Buffer).AvailableBuffer", Method, 21, ""},
    +		{"(*Buffer).Bytes", Method, 0, ""},
    +		{"(*Buffer).Cap", Method, 5, ""},
    +		{"(*Buffer).Grow", Method, 1, ""},
    +		{"(*Buffer).Len", Method, 0, ""},
    +		{"(*Buffer).Next", Method, 0, ""},
    +		{"(*Buffer).Read", Method, 0, ""},
    +		{"(*Buffer).ReadByte", Method, 0, ""},
    +		{"(*Buffer).ReadBytes", Method, 0, ""},
    +		{"(*Buffer).ReadFrom", Method, 0, ""},
    +		{"(*Buffer).ReadRune", Method, 0, ""},
    +		{"(*Buffer).ReadString", Method, 0, ""},
    +		{"(*Buffer).Reset", Method, 0, ""},
    +		{"(*Buffer).String", Method, 0, ""},
    +		{"(*Buffer).Truncate", Method, 0, ""},
    +		{"(*Buffer).UnreadByte", Method, 0, ""},
    +		{"(*Buffer).UnreadRune", Method, 0, ""},
    +		{"(*Buffer).Write", Method, 0, ""},
    +		{"(*Buffer).WriteByte", Method, 0, ""},
    +		{"(*Buffer).WriteRune", Method, 0, ""},
    +		{"(*Buffer).WriteString", Method, 0, ""},
    +		{"(*Buffer).WriteTo", Method, 0, ""},
    +		{"(*Reader).Len", Method, 0, ""},
    +		{"(*Reader).Read", Method, 0, ""},
    +		{"(*Reader).ReadAt", Method, 0, ""},
    +		{"(*Reader).ReadByte", Method, 0, ""},
    +		{"(*Reader).ReadRune", Method, 0, ""},
    +		{"(*Reader).Reset", Method, 7, ""},
    +		{"(*Reader).Seek", Method, 0, ""},
    +		{"(*Reader).Size", Method, 5, ""},
    +		{"(*Reader).UnreadByte", Method, 0, ""},
    +		{"(*Reader).UnreadRune", Method, 0, ""},
    +		{"(*Reader).WriteTo", Method, 1, ""},
    +		{"Buffer", Type, 0, ""},
    +		{"Clone", Func, 20, "func(b []byte) []byte"},
    +		{"Compare", Func, 0, "func(a []byte, b []byte) int"},
    +		{"Contains", Func, 0, "func(b []byte, subslice []byte) bool"},
    +		{"ContainsAny", Func, 7, "func(b []byte, chars string) bool"},
    +		{"ContainsFunc", Func, 21, "func(b []byte, f func(rune) bool) bool"},
    +		{"ContainsRune", Func, 7, "func(b []byte, r rune) bool"},
    +		{"Count", Func, 0, "func(s []byte, sep []byte) int"},
    +		{"Cut", Func, 18, "func(s []byte, sep []byte) (before []byte, after []byte, found bool)"},
    +		{"CutPrefix", Func, 20, "func(s []byte, prefix []byte) (after []byte, found bool)"},
    +		{"CutSuffix", Func, 20, "func(s []byte, suffix []byte) (before []byte, found bool)"},
    +		{"Equal", Func, 0, "func(a []byte, b []byte) bool"},
    +		{"EqualFold", Func, 0, "func(s []byte, t []byte) bool"},
    +		{"ErrTooLarge", Var, 0, ""},
    +		{"Fields", Func, 0, "func(s []byte) [][]byte"},
    +		{"FieldsFunc", Func, 0, "func(s []byte, f func(rune) bool) [][]byte"},
    +		{"FieldsFuncSeq", Func, 24, "func(s []byte, f func(rune) bool) iter.Seq[[]byte]"},
    +		{"FieldsSeq", Func, 24, "func(s []byte) iter.Seq[[]byte]"},
    +		{"HasPrefix", Func, 0, "func(s []byte, prefix []byte) bool"},
    +		{"HasSuffix", Func, 0, "func(s []byte, suffix []byte) bool"},
    +		{"Index", Func, 0, "func(s []byte, sep []byte) int"},
    +		{"IndexAny", Func, 0, "func(s []byte, chars string) int"},
    +		{"IndexByte", Func, 0, "func(b []byte, c byte) int"},
    +		{"IndexFunc", Func, 0, "func(s []byte, f func(r rune) bool) int"},
    +		{"IndexRune", Func, 0, "func(s []byte, r rune) int"},
    +		{"Join", Func, 0, "func(s [][]byte, sep []byte) []byte"},
    +		{"LastIndex", Func, 0, "func(s []byte, sep []byte) int"},
    +		{"LastIndexAny", Func, 0, "func(s []byte, chars string) int"},
    +		{"LastIndexByte", Func, 5, "func(s []byte, c byte) int"},
    +		{"LastIndexFunc", Func, 0, "func(s []byte, f func(r rune) bool) int"},
    +		{"Lines", Func, 24, "func(s []byte) iter.Seq[[]byte]"},
    +		{"Map", Func, 0, "func(mapping func(r rune) rune, s []byte) []byte"},
    +		{"MinRead", Const, 0, ""},
    +		{"NewBuffer", Func, 0, "func(buf []byte) *Buffer"},
    +		{"NewBufferString", Func, 0, "func(s string) *Buffer"},
    +		{"NewReader", Func, 0, "func(b []byte) *Reader"},
    +		{"Reader", Type, 0, ""},
    +		{"Repeat", Func, 0, "func(b []byte, count int) []byte"},
    +		{"Replace", Func, 0, "func(s []byte, old []byte, new []byte, n int) []byte"},
    +		{"ReplaceAll", Func, 12, "func(s []byte, old []byte, new []byte) []byte"},
    +		{"Runes", Func, 0, "func(s []byte) []rune"},
    +		{"Split", Func, 0, "func(s []byte, sep []byte) [][]byte"},
    +		{"SplitAfter", Func, 0, "func(s []byte, sep []byte) [][]byte"},
    +		{"SplitAfterN", Func, 0, "func(s []byte, sep []byte, n int) [][]byte"},
    +		{"SplitAfterSeq", Func, 24, "func(s []byte, sep []byte) iter.Seq[[]byte]"},
    +		{"SplitN", Func, 0, "func(s []byte, sep []byte, n int) [][]byte"},
    +		{"SplitSeq", Func, 24, "func(s []byte, sep []byte) iter.Seq[[]byte]"},
    +		{"Title", Func, 0, "func(s []byte) []byte"},
    +		{"ToLower", Func, 0, "func(s []byte) []byte"},
    +		{"ToLowerSpecial", Func, 0, "func(c unicode.SpecialCase, s []byte) []byte"},
    +		{"ToTitle", Func, 0, "func(s []byte) []byte"},
    +		{"ToTitleSpecial", Func, 0, "func(c unicode.SpecialCase, s []byte) []byte"},
    +		{"ToUpper", Func, 0, "func(s []byte) []byte"},
    +		{"ToUpperSpecial", Func, 0, "func(c unicode.SpecialCase, s []byte) []byte"},
    +		{"ToValidUTF8", Func, 13, "func(s []byte, replacement []byte) []byte"},
    +		{"Trim", Func, 0, "func(s []byte, cutset string) []byte"},
    +		{"TrimFunc", Func, 0, "func(s []byte, f func(r rune) bool) []byte"},
    +		{"TrimLeft", Func, 0, "func(s []byte, cutset string) []byte"},
    +		{"TrimLeftFunc", Func, 0, "func(s []byte, f func(r rune) bool) []byte"},
    +		{"TrimPrefix", Func, 1, "func(s []byte, prefix []byte) []byte"},
    +		{"TrimRight", Func, 0, "func(s []byte, cutset string) []byte"},
    +		{"TrimRightFunc", Func, 0, "func(s []byte, f func(r rune) bool) []byte"},
    +		{"TrimSpace", Func, 0, "func(s []byte) []byte"},
    +		{"TrimSuffix", Func, 1, "func(s []byte, suffix []byte) []byte"},
     	},
     	"cmp": {
    -		{"Compare", Func, 21},
    -		{"Less", Func, 21},
    -		{"Or", Func, 22},
    -		{"Ordered", Type, 21},
    +		{"Compare", Func, 21, "func[T Ordered](x T, y T) int"},
    +		{"Less", Func, 21, "func[T Ordered](x T, y T) bool"},
    +		{"Or", Func, 22, "func[T comparable](vals ...T) T"},
    +		{"Ordered", Type, 21, ""},
     	},
     	"compress/bzip2": {
    -		{"(StructuralError).Error", Method, 0},
    -		{"NewReader", Func, 0},
    -		{"StructuralError", Type, 0},
    +		{"(StructuralError).Error", Method, 0, ""},
    +		{"NewReader", Func, 0, "func(r io.Reader) io.Reader"},
    +		{"StructuralError", Type, 0, ""},
     	},
     	"compress/flate": {
    -		{"(*ReadError).Error", Method, 0},
    -		{"(*WriteError).Error", Method, 0},
    -		{"(*Writer).Close", Method, 0},
    -		{"(*Writer).Flush", Method, 0},
    -		{"(*Writer).Reset", Method, 2},
    -		{"(*Writer).Write", Method, 0},
    -		{"(CorruptInputError).Error", Method, 0},
    -		{"(InternalError).Error", Method, 0},
    -		{"BestCompression", Const, 0},
    -		{"BestSpeed", Const, 0},
    -		{"CorruptInputError", Type, 0},
    -		{"DefaultCompression", Const, 0},
    -		{"HuffmanOnly", Const, 7},
    -		{"InternalError", Type, 0},
    -		{"NewReader", Func, 0},
    -		{"NewReaderDict", Func, 0},
    -		{"NewWriter", Func, 0},
    -		{"NewWriterDict", Func, 0},
    -		{"NoCompression", Const, 0},
    -		{"ReadError", Type, 0},
    -		{"ReadError.Err", Field, 0},
    -		{"ReadError.Offset", Field, 0},
    -		{"Reader", Type, 0},
    -		{"Resetter", Type, 4},
    -		{"WriteError", Type, 0},
    -		{"WriteError.Err", Field, 0},
    -		{"WriteError.Offset", Field, 0},
    -		{"Writer", Type, 0},
    +		{"(*ReadError).Error", Method, 0, ""},
    +		{"(*WriteError).Error", Method, 0, ""},
    +		{"(*Writer).Close", Method, 0, ""},
    +		{"(*Writer).Flush", Method, 0, ""},
    +		{"(*Writer).Reset", Method, 2, ""},
    +		{"(*Writer).Write", Method, 0, ""},
    +		{"(CorruptInputError).Error", Method, 0, ""},
    +		{"(InternalError).Error", Method, 0, ""},
    +		{"BestCompression", Const, 0, ""},
    +		{"BestSpeed", Const, 0, ""},
    +		{"CorruptInputError", Type, 0, ""},
    +		{"DefaultCompression", Const, 0, ""},
    +		{"HuffmanOnly", Const, 7, ""},
    +		{"InternalError", Type, 0, ""},
    +		{"NewReader", Func, 0, "func(r io.Reader) io.ReadCloser"},
    +		{"NewReaderDict", Func, 0, "func(r io.Reader, dict []byte) io.ReadCloser"},
    +		{"NewWriter", Func, 0, "func(w io.Writer, level int) (*Writer, error)"},
    +		{"NewWriterDict", Func, 0, "func(w io.Writer, level int, dict []byte) (*Writer, error)"},
    +		{"NoCompression", Const, 0, ""},
    +		{"ReadError", Type, 0, ""},
    +		{"ReadError.Err", Field, 0, ""},
    +		{"ReadError.Offset", Field, 0, ""},
    +		{"Reader", Type, 0, ""},
    +		{"Resetter", Type, 4, ""},
    +		{"WriteError", Type, 0, ""},
    +		{"WriteError.Err", Field, 0, ""},
    +		{"WriteError.Offset", Field, 0, ""},
    +		{"Writer", Type, 0, ""},
     	},
     	"compress/gzip": {
    -		{"(*Reader).Close", Method, 0},
    -		{"(*Reader).Multistream", Method, 4},
    -		{"(*Reader).Read", Method, 0},
    -		{"(*Reader).Reset", Method, 3},
    -		{"(*Writer).Close", Method, 0},
    -		{"(*Writer).Flush", Method, 1},
    -		{"(*Writer).Reset", Method, 2},
    -		{"(*Writer).Write", Method, 0},
    -		{"BestCompression", Const, 0},
    -		{"BestSpeed", Const, 0},
    -		{"DefaultCompression", Const, 0},
    -		{"ErrChecksum", Var, 0},
    -		{"ErrHeader", Var, 0},
    -		{"Header", Type, 0},
    -		{"Header.Comment", Field, 0},
    -		{"Header.Extra", Field, 0},
    -		{"Header.ModTime", Field, 0},
    -		{"Header.Name", Field, 0},
    -		{"Header.OS", Field, 0},
    -		{"HuffmanOnly", Const, 8},
    -		{"NewReader", Func, 0},
    -		{"NewWriter", Func, 0},
    -		{"NewWriterLevel", Func, 0},
    -		{"NoCompression", Const, 0},
    -		{"Reader", Type, 0},
    -		{"Reader.Header", Field, 0},
    -		{"Writer", Type, 0},
    -		{"Writer.Header", Field, 0},
    +		{"(*Reader).Close", Method, 0, ""},
    +		{"(*Reader).Multistream", Method, 4, ""},
    +		{"(*Reader).Read", Method, 0, ""},
    +		{"(*Reader).Reset", Method, 3, ""},
    +		{"(*Writer).Close", Method, 0, ""},
    +		{"(*Writer).Flush", Method, 1, ""},
    +		{"(*Writer).Reset", Method, 2, ""},
    +		{"(*Writer).Write", Method, 0, ""},
    +		{"BestCompression", Const, 0, ""},
    +		{"BestSpeed", Const, 0, ""},
    +		{"DefaultCompression", Const, 0, ""},
    +		{"ErrChecksum", Var, 0, ""},
    +		{"ErrHeader", Var, 0, ""},
    +		{"Header", Type, 0, ""},
    +		{"Header.Comment", Field, 0, ""},
    +		{"Header.Extra", Field, 0, ""},
    +		{"Header.ModTime", Field, 0, ""},
    +		{"Header.Name", Field, 0, ""},
    +		{"Header.OS", Field, 0, ""},
    +		{"HuffmanOnly", Const, 8, ""},
    +		{"NewReader", Func, 0, "func(r io.Reader) (*Reader, error)"},
    +		{"NewWriter", Func, 0, "func(w io.Writer) *Writer"},
    +		{"NewWriterLevel", Func, 0, "func(w io.Writer, level int) (*Writer, error)"},
    +		{"NoCompression", Const, 0, ""},
    +		{"Reader", Type, 0, ""},
    +		{"Reader.Header", Field, 0, ""},
    +		{"Writer", Type, 0, ""},
    +		{"Writer.Header", Field, 0, ""},
     	},
     	"compress/lzw": {
    -		{"(*Reader).Close", Method, 17},
    -		{"(*Reader).Read", Method, 17},
    -		{"(*Reader).Reset", Method, 17},
    -		{"(*Writer).Close", Method, 17},
    -		{"(*Writer).Reset", Method, 17},
    -		{"(*Writer).Write", Method, 17},
    -		{"LSB", Const, 0},
    -		{"MSB", Const, 0},
    -		{"NewReader", Func, 0},
    -		{"NewWriter", Func, 0},
    -		{"Order", Type, 0},
    -		{"Reader", Type, 17},
    -		{"Writer", Type, 17},
    +		{"(*Reader).Close", Method, 17, ""},
    +		{"(*Reader).Read", Method, 17, ""},
    +		{"(*Reader).Reset", Method, 17, ""},
    +		{"(*Writer).Close", Method, 17, ""},
    +		{"(*Writer).Reset", Method, 17, ""},
    +		{"(*Writer).Write", Method, 17, ""},
    +		{"LSB", Const, 0, ""},
    +		{"MSB", Const, 0, ""},
    +		{"NewReader", Func, 0, "func(r io.Reader, order Order, litWidth int) io.ReadCloser"},
    +		{"NewWriter", Func, 0, "func(w io.Writer, order Order, litWidth int) io.WriteCloser"},
    +		{"Order", Type, 0, ""},
    +		{"Reader", Type, 17, ""},
    +		{"Writer", Type, 17, ""},
     	},
     	"compress/zlib": {
    -		{"(*Writer).Close", Method, 0},
    -		{"(*Writer).Flush", Method, 0},
    -		{"(*Writer).Reset", Method, 2},
    -		{"(*Writer).Write", Method, 0},
    -		{"BestCompression", Const, 0},
    -		{"BestSpeed", Const, 0},
    -		{"DefaultCompression", Const, 0},
    -		{"ErrChecksum", Var, 0},
    -		{"ErrDictionary", Var, 0},
    -		{"ErrHeader", Var, 0},
    -		{"HuffmanOnly", Const, 8},
    -		{"NewReader", Func, 0},
    -		{"NewReaderDict", Func, 0},
    -		{"NewWriter", Func, 0},
    -		{"NewWriterLevel", Func, 0},
    -		{"NewWriterLevelDict", Func, 0},
    -		{"NoCompression", Const, 0},
    -		{"Resetter", Type, 4},
    -		{"Writer", Type, 0},
    +		{"(*Writer).Close", Method, 0, ""},
    +		{"(*Writer).Flush", Method, 0, ""},
    +		{"(*Writer).Reset", Method, 2, ""},
    +		{"(*Writer).Write", Method, 0, ""},
    +		{"BestCompression", Const, 0, ""},
    +		{"BestSpeed", Const, 0, ""},
    +		{"DefaultCompression", Const, 0, ""},
    +		{"ErrChecksum", Var, 0, ""},
    +		{"ErrDictionary", Var, 0, ""},
    +		{"ErrHeader", Var, 0, ""},
    +		{"HuffmanOnly", Const, 8, ""},
    +		{"NewReader", Func, 0, "func(r io.Reader) (io.ReadCloser, error)"},
    +		{"NewReaderDict", Func, 0, "func(r io.Reader, dict []byte) (io.ReadCloser, error)"},
    +		{"NewWriter", Func, 0, "func(w io.Writer) *Writer"},
    +		{"NewWriterLevel", Func, 0, "func(w io.Writer, level int) (*Writer, error)"},
    +		{"NewWriterLevelDict", Func, 0, "func(w io.Writer, level int, dict []byte) (*Writer, error)"},
    +		{"NoCompression", Const, 0, ""},
    +		{"Resetter", Type, 4, ""},
    +		{"Writer", Type, 0, ""},
     	},
     	"container/heap": {
    -		{"Fix", Func, 2},
    -		{"Init", Func, 0},
    -		{"Interface", Type, 0},
    -		{"Pop", Func, 0},
    -		{"Push", Func, 0},
    -		{"Remove", Func, 0},
    +		{"Fix", Func, 2, "func(h Interface, i int)"},
    +		{"Init", Func, 0, "func(h Interface)"},
    +		{"Interface", Type, 0, ""},
    +		{"Pop", Func, 0, "func(h Interface) any"},
    +		{"Push", Func, 0, "func(h Interface, x any)"},
    +		{"Remove", Func, 0, "func(h Interface, i int) any"},
     	},
     	"container/list": {
    -		{"(*Element).Next", Method, 0},
    -		{"(*Element).Prev", Method, 0},
    -		{"(*List).Back", Method, 0},
    -		{"(*List).Front", Method, 0},
    -		{"(*List).Init", Method, 0},
    -		{"(*List).InsertAfter", Method, 0},
    -		{"(*List).InsertBefore", Method, 0},
    -		{"(*List).Len", Method, 0},
    -		{"(*List).MoveAfter", Method, 2},
    -		{"(*List).MoveBefore", Method, 2},
    -		{"(*List).MoveToBack", Method, 0},
    -		{"(*List).MoveToFront", Method, 0},
    -		{"(*List).PushBack", Method, 0},
    -		{"(*List).PushBackList", Method, 0},
    -		{"(*List).PushFront", Method, 0},
    -		{"(*List).PushFrontList", Method, 0},
    -		{"(*List).Remove", Method, 0},
    -		{"Element", Type, 0},
    -		{"Element.Value", Field, 0},
    -		{"List", Type, 0},
    -		{"New", Func, 0},
    +		{"(*Element).Next", Method, 0, ""},
    +		{"(*Element).Prev", Method, 0, ""},
    +		{"(*List).Back", Method, 0, ""},
    +		{"(*List).Front", Method, 0, ""},
    +		{"(*List).Init", Method, 0, ""},
    +		{"(*List).InsertAfter", Method, 0, ""},
    +		{"(*List).InsertBefore", Method, 0, ""},
    +		{"(*List).Len", Method, 0, ""},
    +		{"(*List).MoveAfter", Method, 2, ""},
    +		{"(*List).MoveBefore", Method, 2, ""},
    +		{"(*List).MoveToBack", Method, 0, ""},
    +		{"(*List).MoveToFront", Method, 0, ""},
    +		{"(*List).PushBack", Method, 0, ""},
    +		{"(*List).PushBackList", Method, 0, ""},
    +		{"(*List).PushFront", Method, 0, ""},
    +		{"(*List).PushFrontList", Method, 0, ""},
    +		{"(*List).Remove", Method, 0, ""},
    +		{"Element", Type, 0, ""},
    +		{"Element.Value", Field, 0, ""},
    +		{"List", Type, 0, ""},
    +		{"New", Func, 0, "func() *List"},
     	},
     	"container/ring": {
    -		{"(*Ring).Do", Method, 0},
    -		{"(*Ring).Len", Method, 0},
    -		{"(*Ring).Link", Method, 0},
    -		{"(*Ring).Move", Method, 0},
    -		{"(*Ring).Next", Method, 0},
    -		{"(*Ring).Prev", Method, 0},
    -		{"(*Ring).Unlink", Method, 0},
    -		{"New", Func, 0},
    -		{"Ring", Type, 0},
    -		{"Ring.Value", Field, 0},
    +		{"(*Ring).Do", Method, 0, ""},
    +		{"(*Ring).Len", Method, 0, ""},
    +		{"(*Ring).Link", Method, 0, ""},
    +		{"(*Ring).Move", Method, 0, ""},
    +		{"(*Ring).Next", Method, 0, ""},
    +		{"(*Ring).Prev", Method, 0, ""},
    +		{"(*Ring).Unlink", Method, 0, ""},
    +		{"New", Func, 0, "func(n int) *Ring"},
    +		{"Ring", Type, 0, ""},
    +		{"Ring.Value", Field, 0, ""},
     	},
     	"context": {
    -		{"AfterFunc", Func, 21},
    -		{"Background", Func, 7},
    -		{"CancelCauseFunc", Type, 20},
    -		{"CancelFunc", Type, 7},
    -		{"Canceled", Var, 7},
    -		{"Cause", Func, 20},
    -		{"Context", Type, 7},
    -		{"DeadlineExceeded", Var, 7},
    -		{"TODO", Func, 7},
    -		{"WithCancel", Func, 7},
    -		{"WithCancelCause", Func, 20},
    -		{"WithDeadline", Func, 7},
    -		{"WithDeadlineCause", Func, 21},
    -		{"WithTimeout", Func, 7},
    -		{"WithTimeoutCause", Func, 21},
    -		{"WithValue", Func, 7},
    -		{"WithoutCancel", Func, 21},
    +		{"AfterFunc", Func, 21, "func(ctx Context, f func()) (stop func() bool)"},
    +		{"Background", Func, 7, "func() Context"},
    +		{"CancelCauseFunc", Type, 20, ""},
    +		{"CancelFunc", Type, 7, ""},
    +		{"Canceled", Var, 7, ""},
    +		{"Cause", Func, 20, "func(c Context) error"},
    +		{"Context", Type, 7, ""},
    +		{"DeadlineExceeded", Var, 7, ""},
    +		{"TODO", Func, 7, "func() Context"},
    +		{"WithCancel", Func, 7, "func(parent Context) (ctx Context, cancel CancelFunc)"},
    +		{"WithCancelCause", Func, 20, "func(parent Context) (ctx Context, cancel CancelCauseFunc)"},
    +		{"WithDeadline", Func, 7, "func(parent Context, d time.Time) (Context, CancelFunc)"},
    +		{"WithDeadlineCause", Func, 21, "func(parent Context, d time.Time, cause error) (Context, CancelFunc)"},
    +		{"WithTimeout", Func, 7, "func(parent Context, timeout time.Duration) (Context, CancelFunc)"},
    +		{"WithTimeoutCause", Func, 21, "func(parent Context, timeout time.Duration, cause error) (Context, CancelFunc)"},
    +		{"WithValue", Func, 7, "func(parent Context, key any, val any) Context"},
    +		{"WithoutCancel", Func, 21, "func(parent Context) Context"},
     	},
     	"crypto": {
    -		{"(Hash).Available", Method, 0},
    -		{"(Hash).HashFunc", Method, 4},
    -		{"(Hash).New", Method, 0},
    -		{"(Hash).Size", Method, 0},
    -		{"(Hash).String", Method, 15},
    -		{"BLAKE2b_256", Const, 9},
    -		{"BLAKE2b_384", Const, 9},
    -		{"BLAKE2b_512", Const, 9},
    -		{"BLAKE2s_256", Const, 9},
    -		{"Decrypter", Type, 5},
    -		{"DecrypterOpts", Type, 5},
    -		{"Hash", Type, 0},
    -		{"MD4", Const, 0},
    -		{"MD5", Const, 0},
    -		{"MD5SHA1", Const, 0},
    -		{"PrivateKey", Type, 0},
    -		{"PublicKey", Type, 2},
    -		{"RIPEMD160", Const, 0},
    -		{"RegisterHash", Func, 0},
    -		{"SHA1", Const, 0},
    -		{"SHA224", Const, 0},
    -		{"SHA256", Const, 0},
    -		{"SHA384", Const, 0},
    -		{"SHA3_224", Const, 4},
    -		{"SHA3_256", Const, 4},
    -		{"SHA3_384", Const, 4},
    -		{"SHA3_512", Const, 4},
    -		{"SHA512", Const, 0},
    -		{"SHA512_224", Const, 5},
    -		{"SHA512_256", Const, 5},
    -		{"Signer", Type, 4},
    -		{"SignerOpts", Type, 4},
    +		{"(Hash).Available", Method, 0, ""},
    +		{"(Hash).HashFunc", Method, 4, ""},
    +		{"(Hash).New", Method, 0, ""},
    +		{"(Hash).Size", Method, 0, ""},
    +		{"(Hash).String", Method, 15, ""},
    +		{"BLAKE2b_256", Const, 9, ""},
    +		{"BLAKE2b_384", Const, 9, ""},
    +		{"BLAKE2b_512", Const, 9, ""},
    +		{"BLAKE2s_256", Const, 9, ""},
    +		{"Decrypter", Type, 5, ""},
    +		{"DecrypterOpts", Type, 5, ""},
    +		{"Hash", Type, 0, ""},
    +		{"MD4", Const, 0, ""},
    +		{"MD5", Const, 0, ""},
    +		{"MD5SHA1", Const, 0, ""},
    +		{"MessageSigner", Type, 25, ""},
    +		{"PrivateKey", Type, 0, ""},
    +		{"PublicKey", Type, 2, ""},
    +		{"RIPEMD160", Const, 0, ""},
    +		{"RegisterHash", Func, 0, "func(h Hash, f func() hash.Hash)"},
    +		{"SHA1", Const, 0, ""},
    +		{"SHA224", Const, 0, ""},
    +		{"SHA256", Const, 0, ""},
    +		{"SHA384", Const, 0, ""},
    +		{"SHA3_224", Const, 4, ""},
    +		{"SHA3_256", Const, 4, ""},
    +		{"SHA3_384", Const, 4, ""},
    +		{"SHA3_512", Const, 4, ""},
    +		{"SHA512", Const, 0, ""},
    +		{"SHA512_224", Const, 5, ""},
    +		{"SHA512_256", Const, 5, ""},
    +		{"SignMessage", Func, 25, "func(signer Signer, rand io.Reader, msg []byte, opts SignerOpts) (signature []byte, err error)"},
    +		{"Signer", Type, 4, ""},
    +		{"SignerOpts", Type, 4, ""},
     	},
     	"crypto/aes": {
    -		{"(KeySizeError).Error", Method, 0},
    -		{"BlockSize", Const, 0},
    -		{"KeySizeError", Type, 0},
    -		{"NewCipher", Func, 0},
    +		{"(KeySizeError).Error", Method, 0, ""},
    +		{"BlockSize", Const, 0, ""},
    +		{"KeySizeError", Type, 0, ""},
    +		{"NewCipher", Func, 0, "func(key []byte) (cipher.Block, error)"},
     	},
     	"crypto/cipher": {
    -		{"(StreamReader).Read", Method, 0},
    -		{"(StreamWriter).Close", Method, 0},
    -		{"(StreamWriter).Write", Method, 0},
    -		{"AEAD", Type, 2},
    -		{"Block", Type, 0},
    -		{"BlockMode", Type, 0},
    -		{"NewCBCDecrypter", Func, 0},
    -		{"NewCBCEncrypter", Func, 0},
    -		{"NewCFBDecrypter", Func, 0},
    -		{"NewCFBEncrypter", Func, 0},
    -		{"NewCTR", Func, 0},
    -		{"NewGCM", Func, 2},
    -		{"NewGCMWithNonceSize", Func, 5},
    -		{"NewGCMWithTagSize", Func, 11},
    -		{"NewOFB", Func, 0},
    -		{"Stream", Type, 0},
    -		{"StreamReader", Type, 0},
    -		{"StreamReader.R", Field, 0},
    -		{"StreamReader.S", Field, 0},
    -		{"StreamWriter", Type, 0},
    -		{"StreamWriter.Err", Field, 0},
    -		{"StreamWriter.S", Field, 0},
    -		{"StreamWriter.W", Field, 0},
    +		{"(StreamReader).Read", Method, 0, ""},
    +		{"(StreamWriter).Close", Method, 0, ""},
    +		{"(StreamWriter).Write", Method, 0, ""},
    +		{"AEAD", Type, 2, ""},
    +		{"Block", Type, 0, ""},
    +		{"BlockMode", Type, 0, ""},
    +		{"NewCBCDecrypter", Func, 0, "func(b Block, iv []byte) BlockMode"},
    +		{"NewCBCEncrypter", Func, 0, "func(b Block, iv []byte) BlockMode"},
    +		{"NewCFBDecrypter", Func, 0, "func(block Block, iv []byte) Stream"},
    +		{"NewCFBEncrypter", Func, 0, "func(block Block, iv []byte) Stream"},
    +		{"NewCTR", Func, 0, "func(block Block, iv []byte) Stream"},
    +		{"NewGCM", Func, 2, "func(cipher Block) (AEAD, error)"},
    +		{"NewGCMWithNonceSize", Func, 5, "func(cipher Block, size int) (AEAD, error)"},
    +		{"NewGCMWithRandomNonce", Func, 24, "func(cipher Block) (AEAD, error)"},
    +		{"NewGCMWithTagSize", Func, 11, "func(cipher Block, tagSize int) (AEAD, error)"},
    +		{"NewOFB", Func, 0, "func(b Block, iv []byte) Stream"},
    +		{"Stream", Type, 0, ""},
    +		{"StreamReader", Type, 0, ""},
    +		{"StreamReader.R", Field, 0, ""},
    +		{"StreamReader.S", Field, 0, ""},
    +		{"StreamWriter", Type, 0, ""},
    +		{"StreamWriter.Err", Field, 0, ""},
    +		{"StreamWriter.S", Field, 0, ""},
    +		{"StreamWriter.W", Field, 0, ""},
     	},
     	"crypto/des": {
    -		{"(KeySizeError).Error", Method, 0},
    -		{"BlockSize", Const, 0},
    -		{"KeySizeError", Type, 0},
    -		{"NewCipher", Func, 0},
    -		{"NewTripleDESCipher", Func, 0},
    +		{"(KeySizeError).Error", Method, 0, ""},
    +		{"BlockSize", Const, 0, ""},
    +		{"KeySizeError", Type, 0, ""},
    +		{"NewCipher", Func, 0, "func(key []byte) (cipher.Block, error)"},
    +		{"NewTripleDESCipher", Func, 0, "func(key []byte) (cipher.Block, error)"},
     	},
     	"crypto/dsa": {
    -		{"ErrInvalidPublicKey", Var, 0},
    -		{"GenerateKey", Func, 0},
    -		{"GenerateParameters", Func, 0},
    -		{"L1024N160", Const, 0},
    -		{"L2048N224", Const, 0},
    -		{"L2048N256", Const, 0},
    -		{"L3072N256", Const, 0},
    -		{"ParameterSizes", Type, 0},
    -		{"Parameters", Type, 0},
    -		{"Parameters.G", Field, 0},
    -		{"Parameters.P", Field, 0},
    -		{"Parameters.Q", Field, 0},
    -		{"PrivateKey", Type, 0},
    -		{"PrivateKey.PublicKey", Field, 0},
    -		{"PrivateKey.X", Field, 0},
    -		{"PublicKey", Type, 0},
    -		{"PublicKey.Parameters", Field, 0},
    -		{"PublicKey.Y", Field, 0},
    -		{"Sign", Func, 0},
    -		{"Verify", Func, 0},
    +		{"ErrInvalidPublicKey", Var, 0, ""},
    +		{"GenerateKey", Func, 0, "func(priv *PrivateKey, rand io.Reader) error"},
    +		{"GenerateParameters", Func, 0, "func(params *Parameters, rand io.Reader, sizes ParameterSizes) error"},
    +		{"L1024N160", Const, 0, ""},
    +		{"L2048N224", Const, 0, ""},
    +		{"L2048N256", Const, 0, ""},
    +		{"L3072N256", Const, 0, ""},
    +		{"ParameterSizes", Type, 0, ""},
    +		{"Parameters", Type, 0, ""},
    +		{"Parameters.G", Field, 0, ""},
    +		{"Parameters.P", Field, 0, ""},
    +		{"Parameters.Q", Field, 0, ""},
    +		{"PrivateKey", Type, 0, ""},
    +		{"PrivateKey.PublicKey", Field, 0, ""},
    +		{"PrivateKey.X", Field, 0, ""},
    +		{"PublicKey", Type, 0, ""},
    +		{"PublicKey.Parameters", Field, 0, ""},
    +		{"PublicKey.Y", Field, 0, ""},
    +		{"Sign", Func, 0, "func(rand io.Reader, priv *PrivateKey, hash []byte) (r *big.Int, s *big.Int, err error)"},
    +		{"Verify", Func, 0, "func(pub *PublicKey, hash []byte, r *big.Int, s *big.Int) bool"},
     	},
     	"crypto/ecdh": {
    -		{"(*PrivateKey).Bytes", Method, 20},
    -		{"(*PrivateKey).Curve", Method, 20},
    -		{"(*PrivateKey).ECDH", Method, 20},
    -		{"(*PrivateKey).Equal", Method, 20},
    -		{"(*PrivateKey).Public", Method, 20},
    -		{"(*PrivateKey).PublicKey", Method, 20},
    -		{"(*PublicKey).Bytes", Method, 20},
    -		{"(*PublicKey).Curve", Method, 20},
    -		{"(*PublicKey).Equal", Method, 20},
    -		{"Curve", Type, 20},
    -		{"P256", Func, 20},
    -		{"P384", Func, 20},
    -		{"P521", Func, 20},
    -		{"PrivateKey", Type, 20},
    -		{"PublicKey", Type, 20},
    -		{"X25519", Func, 20},
    +		{"(*PrivateKey).Bytes", Method, 20, ""},
    +		{"(*PrivateKey).Curve", Method, 20, ""},
    +		{"(*PrivateKey).ECDH", Method, 20, ""},
    +		{"(*PrivateKey).Equal", Method, 20, ""},
    +		{"(*PrivateKey).Public", Method, 20, ""},
    +		{"(*PrivateKey).PublicKey", Method, 20, ""},
    +		{"(*PublicKey).Bytes", Method, 20, ""},
    +		{"(*PublicKey).Curve", Method, 20, ""},
    +		{"(*PublicKey).Equal", Method, 20, ""},
    +		{"Curve", Type, 20, ""},
    +		{"P256", Func, 20, "func() Curve"},
    +		{"P384", Func, 20, "func() Curve"},
    +		{"P521", Func, 20, "func() Curve"},
    +		{"PrivateKey", Type, 20, ""},
    +		{"PublicKey", Type, 20, ""},
    +		{"X25519", Func, 20, "func() Curve"},
     	},
     	"crypto/ecdsa": {
    -		{"(*PrivateKey).ECDH", Method, 20},
    -		{"(*PrivateKey).Equal", Method, 15},
    -		{"(*PrivateKey).Public", Method, 4},
    -		{"(*PrivateKey).Sign", Method, 4},
    -		{"(*PublicKey).ECDH", Method, 20},
    -		{"(*PublicKey).Equal", Method, 15},
    -		{"(PrivateKey).Add", Method, 0},
    -		{"(PrivateKey).Double", Method, 0},
    -		{"(PrivateKey).IsOnCurve", Method, 0},
    -		{"(PrivateKey).Params", Method, 0},
    -		{"(PrivateKey).ScalarBaseMult", Method, 0},
    -		{"(PrivateKey).ScalarMult", Method, 0},
    -		{"(PublicKey).Add", Method, 0},
    -		{"(PublicKey).Double", Method, 0},
    -		{"(PublicKey).IsOnCurve", Method, 0},
    -		{"(PublicKey).Params", Method, 0},
    -		{"(PublicKey).ScalarBaseMult", Method, 0},
    -		{"(PublicKey).ScalarMult", Method, 0},
    -		{"GenerateKey", Func, 0},
    -		{"PrivateKey", Type, 0},
    -		{"PrivateKey.D", Field, 0},
    -		{"PrivateKey.PublicKey", Field, 0},
    -		{"PublicKey", Type, 0},
    -		{"PublicKey.Curve", Field, 0},
    -		{"PublicKey.X", Field, 0},
    -		{"PublicKey.Y", Field, 0},
    -		{"Sign", Func, 0},
    -		{"SignASN1", Func, 15},
    -		{"Verify", Func, 0},
    -		{"VerifyASN1", Func, 15},
    +		{"(*PrivateKey).Bytes", Method, 25, ""},
    +		{"(*PrivateKey).ECDH", Method, 20, ""},
    +		{"(*PrivateKey).Equal", Method, 15, ""},
    +		{"(*PrivateKey).Public", Method, 4, ""},
    +		{"(*PrivateKey).Sign", Method, 4, ""},
    +		{"(*PublicKey).Bytes", Method, 25, ""},
    +		{"(*PublicKey).ECDH", Method, 20, ""},
    +		{"(*PublicKey).Equal", Method, 15, ""},
    +		{"(PrivateKey).Add", Method, 0, ""},
    +		{"(PrivateKey).Double", Method, 0, ""},
    +		{"(PrivateKey).IsOnCurve", Method, 0, ""},
    +		{"(PrivateKey).Params", Method, 0, ""},
    +		{"(PrivateKey).ScalarBaseMult", Method, 0, ""},
    +		{"(PrivateKey).ScalarMult", Method, 0, ""},
    +		{"(PublicKey).Add", Method, 0, ""},
    +		{"(PublicKey).Double", Method, 0, ""},
    +		{"(PublicKey).IsOnCurve", Method, 0, ""},
    +		{"(PublicKey).Params", Method, 0, ""},
    +		{"(PublicKey).ScalarBaseMult", Method, 0, ""},
    +		{"(PublicKey).ScalarMult", Method, 0, ""},
    +		{"GenerateKey", Func, 0, "func(c elliptic.Curve, rand io.Reader) (*PrivateKey, error)"},
    +		{"ParseRawPrivateKey", Func, 25, "func(curve elliptic.Curve, data []byte) (*PrivateKey, error)"},
    +		{"ParseUncompressedPublicKey", Func, 25, "func(curve elliptic.Curve, data []byte) (*PublicKey, error)"},
    +		{"PrivateKey", Type, 0, ""},
    +		{"PrivateKey.D", Field, 0, ""},
    +		{"PrivateKey.PublicKey", Field, 0, ""},
    +		{"PublicKey", Type, 0, ""},
    +		{"PublicKey.Curve", Field, 0, ""},
    +		{"PublicKey.X", Field, 0, ""},
    +		{"PublicKey.Y", Field, 0, ""},
    +		{"Sign", Func, 0, "func(rand io.Reader, priv *PrivateKey, hash []byte) (r *big.Int, s *big.Int, err error)"},
    +		{"SignASN1", Func, 15, "func(rand io.Reader, priv *PrivateKey, hash []byte) ([]byte, error)"},
    +		{"Verify", Func, 0, "func(pub *PublicKey, hash []byte, r *big.Int, s *big.Int) bool"},
    +		{"VerifyASN1", Func, 15, "func(pub *PublicKey, hash []byte, sig []byte) bool"},
     	},
     	"crypto/ed25519": {
    -		{"(*Options).HashFunc", Method, 20},
    -		{"(PrivateKey).Equal", Method, 15},
    -		{"(PrivateKey).Public", Method, 13},
    -		{"(PrivateKey).Seed", Method, 13},
    -		{"(PrivateKey).Sign", Method, 13},
    -		{"(PublicKey).Equal", Method, 15},
    -		{"GenerateKey", Func, 13},
    -		{"NewKeyFromSeed", Func, 13},
    -		{"Options", Type, 20},
    -		{"Options.Context", Field, 20},
    -		{"Options.Hash", Field, 20},
    -		{"PrivateKey", Type, 13},
    -		{"PrivateKeySize", Const, 13},
    -		{"PublicKey", Type, 13},
    -		{"PublicKeySize", Const, 13},
    -		{"SeedSize", Const, 13},
    -		{"Sign", Func, 13},
    -		{"SignatureSize", Const, 13},
    -		{"Verify", Func, 13},
    -		{"VerifyWithOptions", Func, 20},
    +		{"(*Options).HashFunc", Method, 20, ""},
    +		{"(PrivateKey).Equal", Method, 15, ""},
    +		{"(PrivateKey).Public", Method, 13, ""},
    +		{"(PrivateKey).Seed", Method, 13, ""},
    +		{"(PrivateKey).Sign", Method, 13, ""},
    +		{"(PublicKey).Equal", Method, 15, ""},
    +		{"GenerateKey", Func, 13, "func(rand io.Reader) (PublicKey, PrivateKey, error)"},
    +		{"NewKeyFromSeed", Func, 13, "func(seed []byte) PrivateKey"},
    +		{"Options", Type, 20, ""},
    +		{"Options.Context", Field, 20, ""},
    +		{"Options.Hash", Field, 20, ""},
    +		{"PrivateKey", Type, 13, ""},
    +		{"PrivateKeySize", Const, 13, ""},
    +		{"PublicKey", Type, 13, ""},
    +		{"PublicKeySize", Const, 13, ""},
    +		{"SeedSize", Const, 13, ""},
    +		{"Sign", Func, 13, "func(privateKey PrivateKey, message []byte) []byte"},
    +		{"SignatureSize", Const, 13, ""},
    +		{"Verify", Func, 13, "func(publicKey PublicKey, message []byte, sig []byte) bool"},
    +		{"VerifyWithOptions", Func, 20, "func(publicKey PublicKey, message []byte, sig []byte, opts *Options) error"},
     	},
     	"crypto/elliptic": {
    -		{"(*CurveParams).Add", Method, 0},
    -		{"(*CurveParams).Double", Method, 0},
    -		{"(*CurveParams).IsOnCurve", Method, 0},
    -		{"(*CurveParams).Params", Method, 0},
    -		{"(*CurveParams).ScalarBaseMult", Method, 0},
    -		{"(*CurveParams).ScalarMult", Method, 0},
    -		{"Curve", Type, 0},
    -		{"CurveParams", Type, 0},
    -		{"CurveParams.B", Field, 0},
    -		{"CurveParams.BitSize", Field, 0},
    -		{"CurveParams.Gx", Field, 0},
    -		{"CurveParams.Gy", Field, 0},
    -		{"CurveParams.N", Field, 0},
    -		{"CurveParams.Name", Field, 5},
    -		{"CurveParams.P", Field, 0},
    -		{"GenerateKey", Func, 0},
    -		{"Marshal", Func, 0},
    -		{"MarshalCompressed", Func, 15},
    -		{"P224", Func, 0},
    -		{"P256", Func, 0},
    -		{"P384", Func, 0},
    -		{"P521", Func, 0},
    -		{"Unmarshal", Func, 0},
    -		{"UnmarshalCompressed", Func, 15},
    +		{"(*CurveParams).Add", Method, 0, ""},
    +		{"(*CurveParams).Double", Method, 0, ""},
    +		{"(*CurveParams).IsOnCurve", Method, 0, ""},
    +		{"(*CurveParams).Params", Method, 0, ""},
    +		{"(*CurveParams).ScalarBaseMult", Method, 0, ""},
    +		{"(*CurveParams).ScalarMult", Method, 0, ""},
    +		{"Curve", Type, 0, ""},
    +		{"CurveParams", Type, 0, ""},
    +		{"CurveParams.B", Field, 0, ""},
    +		{"CurveParams.BitSize", Field, 0, ""},
    +		{"CurveParams.Gx", Field, 0, ""},
    +		{"CurveParams.Gy", Field, 0, ""},
    +		{"CurveParams.N", Field, 0, ""},
    +		{"CurveParams.Name", Field, 5, ""},
    +		{"CurveParams.P", Field, 0, ""},
    +		{"GenerateKey", Func, 0, "func(curve Curve, rand io.Reader) (priv []byte, x *big.Int, y *big.Int, err error)"},
    +		{"Marshal", Func, 0, "func(curve Curve, x *big.Int, y *big.Int) []byte"},
    +		{"MarshalCompressed", Func, 15, "func(curve Curve, x *big.Int, y *big.Int) []byte"},
    +		{"P224", Func, 0, "func() Curve"},
    +		{"P256", Func, 0, "func() Curve"},
    +		{"P384", Func, 0, "func() Curve"},
    +		{"P521", Func, 0, "func() Curve"},
    +		{"Unmarshal", Func, 0, "func(curve Curve, data []byte) (x *big.Int, y *big.Int)"},
    +		{"UnmarshalCompressed", Func, 15, "func(curve Curve, data []byte) (x *big.Int, y *big.Int)"},
    +	},
    +	"crypto/fips140": {
    +		{"Enabled", Func, 24, "func() bool"},
    +	},
    +	"crypto/hkdf": {
    +		{"Expand", Func, 24, "func[H hash.Hash](h func() H, pseudorandomKey []byte, info string, keyLength int) ([]byte, error)"},
    +		{"Extract", Func, 24, "func[H hash.Hash](h func() H, secret []byte, salt []byte) ([]byte, error)"},
    +		{"Key", Func, 24, "func[Hash hash.Hash](h func() Hash, secret []byte, salt []byte, info string, keyLength int) ([]byte, error)"},
     	},
     	"crypto/hmac": {
    -		{"Equal", Func, 1},
    -		{"New", Func, 0},
    +		{"Equal", Func, 1, "func(mac1 []byte, mac2 []byte) bool"},
    +		{"New", Func, 0, "func(h func() hash.Hash, key []byte) hash.Hash"},
     	},
     	"crypto/md5": {
    -		{"BlockSize", Const, 0},
    -		{"New", Func, 0},
    -		{"Size", Const, 0},
    -		{"Sum", Func, 2},
    +		{"BlockSize", Const, 0, ""},
    +		{"New", Func, 0, "func() hash.Hash"},
    +		{"Size", Const, 0, ""},
    +		{"Sum", Func, 2, "func(data []byte) [16]byte"},
    +	},
    +	"crypto/mlkem": {
    +		{"(*DecapsulationKey1024).Bytes", Method, 24, ""},
    +		{"(*DecapsulationKey1024).Decapsulate", Method, 24, ""},
    +		{"(*DecapsulationKey1024).EncapsulationKey", Method, 24, ""},
    +		{"(*DecapsulationKey768).Bytes", Method, 24, ""},
    +		{"(*DecapsulationKey768).Decapsulate", Method, 24, ""},
    +		{"(*DecapsulationKey768).EncapsulationKey", Method, 24, ""},
    +		{"(*EncapsulationKey1024).Bytes", Method, 24, ""},
    +		{"(*EncapsulationKey1024).Encapsulate", Method, 24, ""},
    +		{"(*EncapsulationKey768).Bytes", Method, 24, ""},
    +		{"(*EncapsulationKey768).Encapsulate", Method, 24, ""},
    +		{"CiphertextSize1024", Const, 24, ""},
    +		{"CiphertextSize768", Const, 24, ""},
    +		{"DecapsulationKey1024", Type, 24, ""},
    +		{"DecapsulationKey768", Type, 24, ""},
    +		{"EncapsulationKey1024", Type, 24, ""},
    +		{"EncapsulationKey768", Type, 24, ""},
    +		{"EncapsulationKeySize1024", Const, 24, ""},
    +		{"EncapsulationKeySize768", Const, 24, ""},
    +		{"GenerateKey1024", Func, 24, "func() (*DecapsulationKey1024, error)"},
    +		{"GenerateKey768", Func, 24, "func() (*DecapsulationKey768, error)"},
    +		{"NewDecapsulationKey1024", Func, 24, "func(seed []byte) (*DecapsulationKey1024, error)"},
    +		{"NewDecapsulationKey768", Func, 24, "func(seed []byte) (*DecapsulationKey768, error)"},
    +		{"NewEncapsulationKey1024", Func, 24, "func(encapsulationKey []byte) (*EncapsulationKey1024, error)"},
    +		{"NewEncapsulationKey768", Func, 24, "func(encapsulationKey []byte) (*EncapsulationKey768, error)"},
    +		{"SeedSize", Const, 24, ""},
    +		{"SharedKeySize", Const, 24, ""},
    +	},
    +	"crypto/pbkdf2": {
    +		{"Key", Func, 24, "func[Hash hash.Hash](h func() Hash, password string, salt []byte, iter int, keyLength int) ([]byte, error)"},
     	},
     	"crypto/rand": {
    -		{"Int", Func, 0},
    -		{"Prime", Func, 0},
    -		{"Read", Func, 0},
    -		{"Reader", Var, 0},
    +		{"Int", Func, 0, "func(rand io.Reader, max *big.Int) (n *big.Int, err error)"},
    +		{"Prime", Func, 0, "func(rand io.Reader, bits int) (*big.Int, error)"},
    +		{"Read", Func, 0, "func(b []byte) (n int, err error)"},
    +		{"Reader", Var, 0, ""},
    +		{"Text", Func, 24, "func() string"},
     	},
     	"crypto/rc4": {
    -		{"(*Cipher).Reset", Method, 0},
    -		{"(*Cipher).XORKeyStream", Method, 0},
    -		{"(KeySizeError).Error", Method, 0},
    -		{"Cipher", Type, 0},
    -		{"KeySizeError", Type, 0},
    -		{"NewCipher", Func, 0},
    +		{"(*Cipher).Reset", Method, 0, ""},
    +		{"(*Cipher).XORKeyStream", Method, 0, ""},
    +		{"(KeySizeError).Error", Method, 0, ""},
    +		{"Cipher", Type, 0, ""},
    +		{"KeySizeError", Type, 0, ""},
    +		{"NewCipher", Func, 0, "func(key []byte) (*Cipher, error)"},
     	},
     	"crypto/rsa": {
    -		{"(*PSSOptions).HashFunc", Method, 4},
    -		{"(*PrivateKey).Decrypt", Method, 5},
    -		{"(*PrivateKey).Equal", Method, 15},
    -		{"(*PrivateKey).Precompute", Method, 0},
    -		{"(*PrivateKey).Public", Method, 4},
    -		{"(*PrivateKey).Sign", Method, 4},
    -		{"(*PrivateKey).Size", Method, 11},
    -		{"(*PrivateKey).Validate", Method, 0},
    -		{"(*PublicKey).Equal", Method, 15},
    -		{"(*PublicKey).Size", Method, 11},
    -		{"CRTValue", Type, 0},
    -		{"CRTValue.Coeff", Field, 0},
    -		{"CRTValue.Exp", Field, 0},
    -		{"CRTValue.R", Field, 0},
    -		{"DecryptOAEP", Func, 0},
    -		{"DecryptPKCS1v15", Func, 0},
    -		{"DecryptPKCS1v15SessionKey", Func, 0},
    -		{"EncryptOAEP", Func, 0},
    -		{"EncryptPKCS1v15", Func, 0},
    -		{"ErrDecryption", Var, 0},
    -		{"ErrMessageTooLong", Var, 0},
    -		{"ErrVerification", Var, 0},
    -		{"GenerateKey", Func, 0},
    -		{"GenerateMultiPrimeKey", Func, 0},
    -		{"OAEPOptions", Type, 5},
    -		{"OAEPOptions.Hash", Field, 5},
    -		{"OAEPOptions.Label", Field, 5},
    -		{"OAEPOptions.MGFHash", Field, 20},
    -		{"PKCS1v15DecryptOptions", Type, 5},
    -		{"PKCS1v15DecryptOptions.SessionKeyLen", Field, 5},
    -		{"PSSOptions", Type, 2},
    -		{"PSSOptions.Hash", Field, 4},
    -		{"PSSOptions.SaltLength", Field, 2},
    -		{"PSSSaltLengthAuto", Const, 2},
    -		{"PSSSaltLengthEqualsHash", Const, 2},
    -		{"PrecomputedValues", Type, 0},
    -		{"PrecomputedValues.CRTValues", Field, 0},
    -		{"PrecomputedValues.Dp", Field, 0},
    -		{"PrecomputedValues.Dq", Field, 0},
    -		{"PrecomputedValues.Qinv", Field, 0},
    -		{"PrivateKey", Type, 0},
    -		{"PrivateKey.D", Field, 0},
    -		{"PrivateKey.Precomputed", Field, 0},
    -		{"PrivateKey.Primes", Field, 0},
    -		{"PrivateKey.PublicKey", Field, 0},
    -		{"PublicKey", Type, 0},
    -		{"PublicKey.E", Field, 0},
    -		{"PublicKey.N", Field, 0},
    -		{"SignPKCS1v15", Func, 0},
    -		{"SignPSS", Func, 2},
    -		{"VerifyPKCS1v15", Func, 0},
    -		{"VerifyPSS", Func, 2},
    +		{"(*PSSOptions).HashFunc", Method, 4, ""},
    +		{"(*PrivateKey).Decrypt", Method, 5, ""},
    +		{"(*PrivateKey).Equal", Method, 15, ""},
    +		{"(*PrivateKey).Precompute", Method, 0, ""},
    +		{"(*PrivateKey).Public", Method, 4, ""},
    +		{"(*PrivateKey).Sign", Method, 4, ""},
    +		{"(*PrivateKey).Size", Method, 11, ""},
    +		{"(*PrivateKey).Validate", Method, 0, ""},
    +		{"(*PublicKey).Equal", Method, 15, ""},
    +		{"(*PublicKey).Size", Method, 11, ""},
    +		{"CRTValue", Type, 0, ""},
    +		{"CRTValue.Coeff", Field, 0, ""},
    +		{"CRTValue.Exp", Field, 0, ""},
    +		{"CRTValue.R", Field, 0, ""},
    +		{"DecryptOAEP", Func, 0, "func(hash hash.Hash, random io.Reader, priv *PrivateKey, ciphertext []byte, label []byte) ([]byte, error)"},
    +		{"DecryptPKCS1v15", Func, 0, "func(random io.Reader, priv *PrivateKey, ciphertext []byte) ([]byte, error)"},
    +		{"DecryptPKCS1v15SessionKey", Func, 0, "func(random io.Reader, priv *PrivateKey, ciphertext []byte, key []byte) error"},
    +		{"EncryptOAEP", Func, 0, "func(hash hash.Hash, random io.Reader, pub *PublicKey, msg []byte, label []byte) ([]byte, error)"},
    +		{"EncryptPKCS1v15", Func, 0, "func(random io.Reader, pub *PublicKey, msg []byte) ([]byte, error)"},
    +		{"ErrDecryption", Var, 0, ""},
    +		{"ErrMessageTooLong", Var, 0, ""},
    +		{"ErrVerification", Var, 0, ""},
    +		{"GenerateKey", Func, 0, "func(random io.Reader, bits int) (*PrivateKey, error)"},
    +		{"GenerateMultiPrimeKey", Func, 0, "func(random io.Reader, nprimes int, bits int) (*PrivateKey, error)"},
    +		{"OAEPOptions", Type, 5, ""},
    +		{"OAEPOptions.Hash", Field, 5, ""},
    +		{"OAEPOptions.Label", Field, 5, ""},
    +		{"OAEPOptions.MGFHash", Field, 20, ""},
    +		{"PKCS1v15DecryptOptions", Type, 5, ""},
    +		{"PKCS1v15DecryptOptions.SessionKeyLen", Field, 5, ""},
    +		{"PSSOptions", Type, 2, ""},
    +		{"PSSOptions.Hash", Field, 4, ""},
    +		{"PSSOptions.SaltLength", Field, 2, ""},
    +		{"PSSSaltLengthAuto", Const, 2, ""},
    +		{"PSSSaltLengthEqualsHash", Const, 2, ""},
    +		{"PrecomputedValues", Type, 0, ""},
    +		{"PrecomputedValues.CRTValues", Field, 0, ""},
    +		{"PrecomputedValues.Dp", Field, 0, ""},
    +		{"PrecomputedValues.Dq", Field, 0, ""},
    +		{"PrecomputedValues.Qinv", Field, 0, ""},
    +		{"PrivateKey", Type, 0, ""},
    +		{"PrivateKey.D", Field, 0, ""},
    +		{"PrivateKey.Precomputed", Field, 0, ""},
    +		{"PrivateKey.Primes", Field, 0, ""},
    +		{"PrivateKey.PublicKey", Field, 0, ""},
    +		{"PublicKey", Type, 0, ""},
    +		{"PublicKey.E", Field, 0, ""},
    +		{"PublicKey.N", Field, 0, ""},
    +		{"SignPKCS1v15", Func, 0, "func(random io.Reader, priv *PrivateKey, hash crypto.Hash, hashed []byte) ([]byte, error)"},
    +		{"SignPSS", Func, 2, "func(rand io.Reader, priv *PrivateKey, hash crypto.Hash, digest []byte, opts *PSSOptions) ([]byte, error)"},
    +		{"VerifyPKCS1v15", Func, 0, "func(pub *PublicKey, hash crypto.Hash, hashed []byte, sig []byte) error"},
    +		{"VerifyPSS", Func, 2, "func(pub *PublicKey, hash crypto.Hash, digest []byte, sig []byte, opts *PSSOptions) error"},
     	},
     	"crypto/sha1": {
    -		{"BlockSize", Const, 0},
    -		{"New", Func, 0},
    -		{"Size", Const, 0},
    -		{"Sum", Func, 2},
    +		{"BlockSize", Const, 0, ""},
    +		{"New", Func, 0, "func() hash.Hash"},
    +		{"Size", Const, 0, ""},
    +		{"Sum", Func, 2, "func(data []byte) [20]byte"},
     	},
     	"crypto/sha256": {
    -		{"BlockSize", Const, 0},
    -		{"New", Func, 0},
    -		{"New224", Func, 0},
    -		{"Size", Const, 0},
    -		{"Size224", Const, 0},
    -		{"Sum224", Func, 2},
    -		{"Sum256", Func, 2},
    +		{"BlockSize", Const, 0, ""},
    +		{"New", Func, 0, "func() hash.Hash"},
    +		{"New224", Func, 0, "func() hash.Hash"},
    +		{"Size", Const, 0, ""},
    +		{"Size224", Const, 0, ""},
    +		{"Sum224", Func, 2, "func(data []byte) [28]byte"},
    +		{"Sum256", Func, 2, "func(data []byte) [32]byte"},
    +	},
    +	"crypto/sha3": {
    +		{"(*SHA3).AppendBinary", Method, 24, ""},
    +		{"(*SHA3).BlockSize", Method, 24, ""},
    +		{"(*SHA3).Clone", Method, 25, ""},
    +		{"(*SHA3).MarshalBinary", Method, 24, ""},
    +		{"(*SHA3).Reset", Method, 24, ""},
    +		{"(*SHA3).Size", Method, 24, ""},
    +		{"(*SHA3).Sum", Method, 24, ""},
    +		{"(*SHA3).UnmarshalBinary", Method, 24, ""},
    +		{"(*SHA3).Write", Method, 24, ""},
    +		{"(*SHAKE).AppendBinary", Method, 24, ""},
    +		{"(*SHAKE).BlockSize", Method, 24, ""},
    +		{"(*SHAKE).MarshalBinary", Method, 24, ""},
    +		{"(*SHAKE).Read", Method, 24, ""},
    +		{"(*SHAKE).Reset", Method, 24, ""},
    +		{"(*SHAKE).UnmarshalBinary", Method, 24, ""},
    +		{"(*SHAKE).Write", Method, 24, ""},
    +		{"New224", Func, 24, "func() *SHA3"},
    +		{"New256", Func, 24, "func() *SHA3"},
    +		{"New384", Func, 24, "func() *SHA3"},
    +		{"New512", Func, 24, "func() *SHA3"},
    +		{"NewCSHAKE128", Func, 24, "func(N []byte, S []byte) *SHAKE"},
    +		{"NewCSHAKE256", Func, 24, "func(N []byte, S []byte) *SHAKE"},
    +		{"NewSHAKE128", Func, 24, "func() *SHAKE"},
    +		{"NewSHAKE256", Func, 24, "func() *SHAKE"},
    +		{"SHA3", Type, 24, ""},
    +		{"SHAKE", Type, 24, ""},
    +		{"Sum224", Func, 24, "func(data []byte) [28]byte"},
    +		{"Sum256", Func, 24, "func(data []byte) [32]byte"},
    +		{"Sum384", Func, 24, "func(data []byte) [48]byte"},
    +		{"Sum512", Func, 24, "func(data []byte) [64]byte"},
    +		{"SumSHAKE128", Func, 24, "func(data []byte, length int) []byte"},
    +		{"SumSHAKE256", Func, 24, "func(data []byte, length int) []byte"},
     	},
     	"crypto/sha512": {
    -		{"BlockSize", Const, 0},
    -		{"New", Func, 0},
    -		{"New384", Func, 0},
    -		{"New512_224", Func, 5},
    -		{"New512_256", Func, 5},
    -		{"Size", Const, 0},
    -		{"Size224", Const, 5},
    -		{"Size256", Const, 5},
    -		{"Size384", Const, 0},
    -		{"Sum384", Func, 2},
    -		{"Sum512", Func, 2},
    -		{"Sum512_224", Func, 5},
    -		{"Sum512_256", Func, 5},
    +		{"BlockSize", Const, 0, ""},
    +		{"New", Func, 0, "func() hash.Hash"},
    +		{"New384", Func, 0, "func() hash.Hash"},
    +		{"New512_224", Func, 5, "func() hash.Hash"},
    +		{"New512_256", Func, 5, "func() hash.Hash"},
    +		{"Size", Const, 0, ""},
    +		{"Size224", Const, 5, ""},
    +		{"Size256", Const, 5, ""},
    +		{"Size384", Const, 0, ""},
    +		{"Sum384", Func, 2, "func(data []byte) [48]byte"},
    +		{"Sum512", Func, 2, "func(data []byte) [64]byte"},
    +		{"Sum512_224", Func, 5, "func(data []byte) [28]byte"},
    +		{"Sum512_256", Func, 5, "func(data []byte) [32]byte"},
     	},
     	"crypto/subtle": {
    -		{"ConstantTimeByteEq", Func, 0},
    -		{"ConstantTimeCompare", Func, 0},
    -		{"ConstantTimeCopy", Func, 0},
    -		{"ConstantTimeEq", Func, 0},
    -		{"ConstantTimeLessOrEq", Func, 2},
    -		{"ConstantTimeSelect", Func, 0},
    -		{"XORBytes", Func, 20},
    +		{"ConstantTimeByteEq", Func, 0, "func(x uint8, y uint8) int"},
    +		{"ConstantTimeCompare", Func, 0, "func(x []byte, y []byte) int"},
    +		{"ConstantTimeCopy", Func, 0, "func(v int, x []byte, y []byte)"},
    +		{"ConstantTimeEq", Func, 0, "func(x int32, y int32) int"},
    +		{"ConstantTimeLessOrEq", Func, 2, "func(x int, y int) int"},
    +		{"ConstantTimeSelect", Func, 0, "func(v int, x int, y int) int"},
    +		{"WithDataIndependentTiming", Func, 24, "func(f func())"},
    +		{"XORBytes", Func, 20, "func(dst []byte, x []byte, y []byte) int"},
     	},
     	"crypto/tls": {
    -		{"(*CertificateRequestInfo).Context", Method, 17},
    -		{"(*CertificateRequestInfo).SupportsCertificate", Method, 14},
    -		{"(*CertificateVerificationError).Error", Method, 20},
    -		{"(*CertificateVerificationError).Unwrap", Method, 20},
    -		{"(*ClientHelloInfo).Context", Method, 17},
    -		{"(*ClientHelloInfo).SupportsCertificate", Method, 14},
    -		{"(*ClientSessionState).ResumptionState", Method, 21},
    -		{"(*Config).BuildNameToCertificate", Method, 0},
    -		{"(*Config).Clone", Method, 8},
    -		{"(*Config).DecryptTicket", Method, 21},
    -		{"(*Config).EncryptTicket", Method, 21},
    -		{"(*Config).SetSessionTicketKeys", Method, 5},
    -		{"(*Conn).Close", Method, 0},
    -		{"(*Conn).CloseWrite", Method, 8},
    -		{"(*Conn).ConnectionState", Method, 0},
    -		{"(*Conn).Handshake", Method, 0},
    -		{"(*Conn).HandshakeContext", Method, 17},
    -		{"(*Conn).LocalAddr", Method, 0},
    -		{"(*Conn).NetConn", Method, 18},
    -		{"(*Conn).OCSPResponse", Method, 0},
    -		{"(*Conn).Read", Method, 0},
    -		{"(*Conn).RemoteAddr", Method, 0},
    -		{"(*Conn).SetDeadline", Method, 0},
    -		{"(*Conn).SetReadDeadline", Method, 0},
    -		{"(*Conn).SetWriteDeadline", Method, 0},
    -		{"(*Conn).VerifyHostname", Method, 0},
    -		{"(*Conn).Write", Method, 0},
    -		{"(*ConnectionState).ExportKeyingMaterial", Method, 11},
    -		{"(*Dialer).Dial", Method, 15},
    -		{"(*Dialer).DialContext", Method, 15},
    -		{"(*ECHRejectionError).Error", Method, 23},
    -		{"(*QUICConn).Close", Method, 21},
    -		{"(*QUICConn).ConnectionState", Method, 21},
    -		{"(*QUICConn).HandleData", Method, 21},
    -		{"(*QUICConn).NextEvent", Method, 21},
    -		{"(*QUICConn).SendSessionTicket", Method, 21},
    -		{"(*QUICConn).SetTransportParameters", Method, 21},
    -		{"(*QUICConn).Start", Method, 21},
    -		{"(*QUICConn).StoreSession", Method, 23},
    -		{"(*SessionState).Bytes", Method, 21},
    -		{"(AlertError).Error", Method, 21},
    -		{"(ClientAuthType).String", Method, 15},
    -		{"(CurveID).String", Method, 15},
    -		{"(QUICEncryptionLevel).String", Method, 21},
    -		{"(RecordHeaderError).Error", Method, 6},
    -		{"(SignatureScheme).String", Method, 15},
    -		{"AlertError", Type, 21},
    -		{"Certificate", Type, 0},
    -		{"Certificate.Certificate", Field, 0},
    -		{"Certificate.Leaf", Field, 0},
    -		{"Certificate.OCSPStaple", Field, 0},
    -		{"Certificate.PrivateKey", Field, 0},
    -		{"Certificate.SignedCertificateTimestamps", Field, 5},
    -		{"Certificate.SupportedSignatureAlgorithms", Field, 14},
    -		{"CertificateRequestInfo", Type, 8},
    -		{"CertificateRequestInfo.AcceptableCAs", Field, 8},
    -		{"CertificateRequestInfo.SignatureSchemes", Field, 8},
    -		{"CertificateRequestInfo.Version", Field, 14},
    -		{"CertificateVerificationError", Type, 20},
    -		{"CertificateVerificationError.Err", Field, 20},
    -		{"CertificateVerificationError.UnverifiedCertificates", Field, 20},
    -		{"CipherSuite", Type, 14},
    -		{"CipherSuite.ID", Field, 14},
    -		{"CipherSuite.Insecure", Field, 14},
    -		{"CipherSuite.Name", Field, 14},
    -		{"CipherSuite.SupportedVersions", Field, 14},
    -		{"CipherSuiteName", Func, 14},
    -		{"CipherSuites", Func, 14},
    -		{"Client", Func, 0},
    -		{"ClientAuthType", Type, 0},
    -		{"ClientHelloInfo", Type, 4},
    -		{"ClientHelloInfo.CipherSuites", Field, 4},
    -		{"ClientHelloInfo.Conn", Field, 8},
    -		{"ClientHelloInfo.ServerName", Field, 4},
    -		{"ClientHelloInfo.SignatureSchemes", Field, 8},
    -		{"ClientHelloInfo.SupportedCurves", Field, 4},
    -		{"ClientHelloInfo.SupportedPoints", Field, 4},
    -		{"ClientHelloInfo.SupportedProtos", Field, 8},
    -		{"ClientHelloInfo.SupportedVersions", Field, 8},
    -		{"ClientSessionCache", Type, 3},
    -		{"ClientSessionState", Type, 3},
    -		{"Config", Type, 0},
    -		{"Config.Certificates", Field, 0},
    -		{"Config.CipherSuites", Field, 0},
    -		{"Config.ClientAuth", Field, 0},
    -		{"Config.ClientCAs", Field, 0},
    -		{"Config.ClientSessionCache", Field, 3},
    -		{"Config.CurvePreferences", Field, 3},
    -		{"Config.DynamicRecordSizingDisabled", Field, 7},
    -		{"Config.EncryptedClientHelloConfigList", Field, 23},
    -		{"Config.EncryptedClientHelloRejectionVerify", Field, 23},
    -		{"Config.GetCertificate", Field, 4},
    -		{"Config.GetClientCertificate", Field, 8},
    -		{"Config.GetConfigForClient", Field, 8},
    -		{"Config.InsecureSkipVerify", Field, 0},
    -		{"Config.KeyLogWriter", Field, 8},
    -		{"Config.MaxVersion", Field, 2},
    -		{"Config.MinVersion", Field, 2},
    -		{"Config.NameToCertificate", Field, 0},
    -		{"Config.NextProtos", Field, 0},
    -		{"Config.PreferServerCipherSuites", Field, 1},
    -		{"Config.Rand", Field, 0},
    -		{"Config.Renegotiation", Field, 7},
    -		{"Config.RootCAs", Field, 0},
    -		{"Config.ServerName", Field, 0},
    -		{"Config.SessionTicketKey", Field, 1},
    -		{"Config.SessionTicketsDisabled", Field, 1},
    -		{"Config.Time", Field, 0},
    -		{"Config.UnwrapSession", Field, 21},
    -		{"Config.VerifyConnection", Field, 15},
    -		{"Config.VerifyPeerCertificate", Field, 8},
    -		{"Config.WrapSession", Field, 21},
    -		{"Conn", Type, 0},
    -		{"ConnectionState", Type, 0},
    -		{"ConnectionState.CipherSuite", Field, 0},
    -		{"ConnectionState.DidResume", Field, 1},
    -		{"ConnectionState.ECHAccepted", Field, 23},
    -		{"ConnectionState.HandshakeComplete", Field, 0},
    -		{"ConnectionState.NegotiatedProtocol", Field, 0},
    -		{"ConnectionState.NegotiatedProtocolIsMutual", Field, 0},
    -		{"ConnectionState.OCSPResponse", Field, 5},
    -		{"ConnectionState.PeerCertificates", Field, 0},
    -		{"ConnectionState.ServerName", Field, 0},
    -		{"ConnectionState.SignedCertificateTimestamps", Field, 5},
    -		{"ConnectionState.TLSUnique", Field, 4},
    -		{"ConnectionState.VerifiedChains", Field, 0},
    -		{"ConnectionState.Version", Field, 3},
    -		{"CurveID", Type, 3},
    -		{"CurveP256", Const, 3},
    -		{"CurveP384", Const, 3},
    -		{"CurveP521", Const, 3},
    -		{"Dial", Func, 0},
    -		{"DialWithDialer", Func, 3},
    -		{"Dialer", Type, 15},
    -		{"Dialer.Config", Field, 15},
    -		{"Dialer.NetDialer", Field, 15},
    -		{"ECDSAWithP256AndSHA256", Const, 8},
    -		{"ECDSAWithP384AndSHA384", Const, 8},
    -		{"ECDSAWithP521AndSHA512", Const, 8},
    -		{"ECDSAWithSHA1", Const, 10},
    -		{"ECHRejectionError", Type, 23},
    -		{"ECHRejectionError.RetryConfigList", Field, 23},
    -		{"Ed25519", Const, 13},
    -		{"InsecureCipherSuites", Func, 14},
    -		{"Listen", Func, 0},
    -		{"LoadX509KeyPair", Func, 0},
    -		{"NewLRUClientSessionCache", Func, 3},
    -		{"NewListener", Func, 0},
    -		{"NewResumptionState", Func, 21},
    -		{"NoClientCert", Const, 0},
    -		{"PKCS1WithSHA1", Const, 8},
    -		{"PKCS1WithSHA256", Const, 8},
    -		{"PKCS1WithSHA384", Const, 8},
    -		{"PKCS1WithSHA512", Const, 8},
    -		{"PSSWithSHA256", Const, 8},
    -		{"PSSWithSHA384", Const, 8},
    -		{"PSSWithSHA512", Const, 8},
    -		{"ParseSessionState", Func, 21},
    -		{"QUICClient", Func, 21},
    -		{"QUICConfig", Type, 21},
    -		{"QUICConfig.EnableSessionEvents", Field, 23},
    -		{"QUICConfig.TLSConfig", Field, 21},
    -		{"QUICConn", Type, 21},
    -		{"QUICEncryptionLevel", Type, 21},
    -		{"QUICEncryptionLevelApplication", Const, 21},
    -		{"QUICEncryptionLevelEarly", Const, 21},
    -		{"QUICEncryptionLevelHandshake", Const, 21},
    -		{"QUICEncryptionLevelInitial", Const, 21},
    -		{"QUICEvent", Type, 21},
    -		{"QUICEvent.Data", Field, 21},
    -		{"QUICEvent.Kind", Field, 21},
    -		{"QUICEvent.Level", Field, 21},
    -		{"QUICEvent.SessionState", Field, 23},
    -		{"QUICEvent.Suite", Field, 21},
    -		{"QUICEventKind", Type, 21},
    -		{"QUICHandshakeDone", Const, 21},
    -		{"QUICNoEvent", Const, 21},
    -		{"QUICRejectedEarlyData", Const, 21},
    -		{"QUICResumeSession", Const, 23},
    -		{"QUICServer", Func, 21},
    -		{"QUICSessionTicketOptions", Type, 21},
    -		{"QUICSessionTicketOptions.EarlyData", Field, 21},
    -		{"QUICSessionTicketOptions.Extra", Field, 23},
    -		{"QUICSetReadSecret", Const, 21},
    -		{"QUICSetWriteSecret", Const, 21},
    -		{"QUICStoreSession", Const, 23},
    -		{"QUICTransportParameters", Const, 21},
    -		{"QUICTransportParametersRequired", Const, 21},
    -		{"QUICWriteData", Const, 21},
    -		{"RecordHeaderError", Type, 6},
    -		{"RecordHeaderError.Conn", Field, 12},
    -		{"RecordHeaderError.Msg", Field, 6},
    -		{"RecordHeaderError.RecordHeader", Field, 6},
    -		{"RenegotiateFreelyAsClient", Const, 7},
    -		{"RenegotiateNever", Const, 7},
    -		{"RenegotiateOnceAsClient", Const, 7},
    -		{"RenegotiationSupport", Type, 7},
    -		{"RequestClientCert", Const, 0},
    -		{"RequireAndVerifyClientCert", Const, 0},
    -		{"RequireAnyClientCert", Const, 0},
    -		{"Server", Func, 0},
    -		{"SessionState", Type, 21},
    -		{"SessionState.EarlyData", Field, 21},
    -		{"SessionState.Extra", Field, 21},
    -		{"SignatureScheme", Type, 8},
    -		{"TLS_AES_128_GCM_SHA256", Const, 12},
    -		{"TLS_AES_256_GCM_SHA384", Const, 12},
    -		{"TLS_CHACHA20_POLY1305_SHA256", Const, 12},
    -		{"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", Const, 2},
    -		{"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", Const, 8},
    -		{"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", Const, 2},
    -		{"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", Const, 2},
    -		{"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", Const, 5},
    -		{"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", Const, 8},
    -		{"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", Const, 14},
    -		{"TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", Const, 2},
    -		{"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", Const, 0},
    -		{"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", Const, 0},
    -		{"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", Const, 8},
    -		{"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", Const, 2},
    -		{"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", Const, 1},
    -		{"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", Const, 5},
    -		{"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", Const, 8},
    -		{"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", Const, 14},
    -		{"TLS_ECDHE_RSA_WITH_RC4_128_SHA", Const, 0},
    -		{"TLS_FALLBACK_SCSV", Const, 4},
    -		{"TLS_RSA_WITH_3DES_EDE_CBC_SHA", Const, 0},
    -		{"TLS_RSA_WITH_AES_128_CBC_SHA", Const, 0},
    -		{"TLS_RSA_WITH_AES_128_CBC_SHA256", Const, 8},
    -		{"TLS_RSA_WITH_AES_128_GCM_SHA256", Const, 6},
    -		{"TLS_RSA_WITH_AES_256_CBC_SHA", Const, 1},
    -		{"TLS_RSA_WITH_AES_256_GCM_SHA384", Const, 6},
    -		{"TLS_RSA_WITH_RC4_128_SHA", Const, 0},
    -		{"VerifyClientCertIfGiven", Const, 0},
    -		{"VersionName", Func, 21},
    -		{"VersionSSL30", Const, 2},
    -		{"VersionTLS10", Const, 2},
    -		{"VersionTLS11", Const, 2},
    -		{"VersionTLS12", Const, 2},
    -		{"VersionTLS13", Const, 12},
    -		{"X25519", Const, 8},
    -		{"X509KeyPair", Func, 0},
    +		{"(*CertificateRequestInfo).Context", Method, 17, ""},
    +		{"(*CertificateRequestInfo).SupportsCertificate", Method, 14, ""},
    +		{"(*CertificateVerificationError).Error", Method, 20, ""},
    +		{"(*CertificateVerificationError).Unwrap", Method, 20, ""},
    +		{"(*ClientHelloInfo).Context", Method, 17, ""},
    +		{"(*ClientHelloInfo).SupportsCertificate", Method, 14, ""},
    +		{"(*ClientSessionState).ResumptionState", Method, 21, ""},
    +		{"(*Config).BuildNameToCertificate", Method, 0, ""},
    +		{"(*Config).Clone", Method, 8, ""},
    +		{"(*Config).DecryptTicket", Method, 21, ""},
    +		{"(*Config).EncryptTicket", Method, 21, ""},
    +		{"(*Config).SetSessionTicketKeys", Method, 5, ""},
    +		{"(*Conn).Close", Method, 0, ""},
    +		{"(*Conn).CloseWrite", Method, 8, ""},
    +		{"(*Conn).ConnectionState", Method, 0, ""},
    +		{"(*Conn).Handshake", Method, 0, ""},
    +		{"(*Conn).HandshakeContext", Method, 17, ""},
    +		{"(*Conn).LocalAddr", Method, 0, ""},
    +		{"(*Conn).NetConn", Method, 18, ""},
    +		{"(*Conn).OCSPResponse", Method, 0, ""},
    +		{"(*Conn).Read", Method, 0, ""},
    +		{"(*Conn).RemoteAddr", Method, 0, ""},
    +		{"(*Conn).SetDeadline", Method, 0, ""},
    +		{"(*Conn).SetReadDeadline", Method, 0, ""},
    +		{"(*Conn).SetWriteDeadline", Method, 0, ""},
    +		{"(*Conn).VerifyHostname", Method, 0, ""},
    +		{"(*Conn).Write", Method, 0, ""},
    +		{"(*ConnectionState).ExportKeyingMaterial", Method, 11, ""},
    +		{"(*Dialer).Dial", Method, 15, ""},
    +		{"(*Dialer).DialContext", Method, 15, ""},
    +		{"(*ECHRejectionError).Error", Method, 23, ""},
    +		{"(*QUICConn).Close", Method, 21, ""},
    +		{"(*QUICConn).ConnectionState", Method, 21, ""},
    +		{"(*QUICConn).HandleData", Method, 21, ""},
    +		{"(*QUICConn).NextEvent", Method, 21, ""},
    +		{"(*QUICConn).SendSessionTicket", Method, 21, ""},
    +		{"(*QUICConn).SetTransportParameters", Method, 21, ""},
    +		{"(*QUICConn).Start", Method, 21, ""},
    +		{"(*QUICConn).StoreSession", Method, 23, ""},
    +		{"(*SessionState).Bytes", Method, 21, ""},
    +		{"(AlertError).Error", Method, 21, ""},
    +		{"(ClientAuthType).String", Method, 15, ""},
    +		{"(CurveID).String", Method, 15, ""},
    +		{"(QUICEncryptionLevel).String", Method, 21, ""},
    +		{"(RecordHeaderError).Error", Method, 6, ""},
    +		{"(SignatureScheme).String", Method, 15, ""},
    +		{"AlertError", Type, 21, ""},
    +		{"Certificate", Type, 0, ""},
    +		{"Certificate.Certificate", Field, 0, ""},
    +		{"Certificate.Leaf", Field, 0, ""},
    +		{"Certificate.OCSPStaple", Field, 0, ""},
    +		{"Certificate.PrivateKey", Field, 0, ""},
    +		{"Certificate.SignedCertificateTimestamps", Field, 5, ""},
    +		{"Certificate.SupportedSignatureAlgorithms", Field, 14, ""},
    +		{"CertificateRequestInfo", Type, 8, ""},
    +		{"CertificateRequestInfo.AcceptableCAs", Field, 8, ""},
    +		{"CertificateRequestInfo.SignatureSchemes", Field, 8, ""},
    +		{"CertificateRequestInfo.Version", Field, 14, ""},
    +		{"CertificateVerificationError", Type, 20, ""},
    +		{"CertificateVerificationError.Err", Field, 20, ""},
    +		{"CertificateVerificationError.UnverifiedCertificates", Field, 20, ""},
    +		{"CipherSuite", Type, 14, ""},
    +		{"CipherSuite.ID", Field, 14, ""},
    +		{"CipherSuite.Insecure", Field, 14, ""},
    +		{"CipherSuite.Name", Field, 14, ""},
    +		{"CipherSuite.SupportedVersions", Field, 14, ""},
    +		{"CipherSuiteName", Func, 14, "func(id uint16) string"},
    +		{"CipherSuites", Func, 14, "func() []*CipherSuite"},
    +		{"Client", Func, 0, "func(conn net.Conn, config *Config) *Conn"},
    +		{"ClientAuthType", Type, 0, ""},
    +		{"ClientHelloInfo", Type, 4, ""},
    +		{"ClientHelloInfo.CipherSuites", Field, 4, ""},
    +		{"ClientHelloInfo.Conn", Field, 8, ""},
    +		{"ClientHelloInfo.Extensions", Field, 24, ""},
    +		{"ClientHelloInfo.ServerName", Field, 4, ""},
    +		{"ClientHelloInfo.SignatureSchemes", Field, 8, ""},
    +		{"ClientHelloInfo.SupportedCurves", Field, 4, ""},
    +		{"ClientHelloInfo.SupportedPoints", Field, 4, ""},
    +		{"ClientHelloInfo.SupportedProtos", Field, 8, ""},
    +		{"ClientHelloInfo.SupportedVersions", Field, 8, ""},
    +		{"ClientSessionCache", Type, 3, ""},
    +		{"ClientSessionState", Type, 3, ""},
    +		{"Config", Type, 0, ""},
    +		{"Config.Certificates", Field, 0, ""},
    +		{"Config.CipherSuites", Field, 0, ""},
    +		{"Config.ClientAuth", Field, 0, ""},
    +		{"Config.ClientCAs", Field, 0, ""},
    +		{"Config.ClientSessionCache", Field, 3, ""},
    +		{"Config.CurvePreferences", Field, 3, ""},
    +		{"Config.DynamicRecordSizingDisabled", Field, 7, ""},
    +		{"Config.EncryptedClientHelloConfigList", Field, 23, ""},
    +		{"Config.EncryptedClientHelloKeys", Field, 24, ""},
    +		{"Config.EncryptedClientHelloRejectionVerify", Field, 23, ""},
    +		{"Config.GetCertificate", Field, 4, ""},
    +		{"Config.GetClientCertificate", Field, 8, ""},
    +		{"Config.GetConfigForClient", Field, 8, ""},
    +		{"Config.GetEncryptedClientHelloKeys", Field, 25, ""},
    +		{"Config.InsecureSkipVerify", Field, 0, ""},
    +		{"Config.KeyLogWriter", Field, 8, ""},
    +		{"Config.MaxVersion", Field, 2, ""},
    +		{"Config.MinVersion", Field, 2, ""},
    +		{"Config.NameToCertificate", Field, 0, ""},
    +		{"Config.NextProtos", Field, 0, ""},
    +		{"Config.PreferServerCipherSuites", Field, 1, ""},
    +		{"Config.Rand", Field, 0, ""},
    +		{"Config.Renegotiation", Field, 7, ""},
    +		{"Config.RootCAs", Field, 0, ""},
    +		{"Config.ServerName", Field, 0, ""},
    +		{"Config.SessionTicketKey", Field, 1, ""},
    +		{"Config.SessionTicketsDisabled", Field, 1, ""},
    +		{"Config.Time", Field, 0, ""},
    +		{"Config.UnwrapSession", Field, 21, ""},
    +		{"Config.VerifyConnection", Field, 15, ""},
    +		{"Config.VerifyPeerCertificate", Field, 8, ""},
    +		{"Config.WrapSession", Field, 21, ""},
    +		{"Conn", Type, 0, ""},
    +		{"ConnectionState", Type, 0, ""},
    +		{"ConnectionState.CipherSuite", Field, 0, ""},
    +		{"ConnectionState.CurveID", Field, 25, ""},
    +		{"ConnectionState.DidResume", Field, 1, ""},
    +		{"ConnectionState.ECHAccepted", Field, 23, ""},
    +		{"ConnectionState.HandshakeComplete", Field, 0, ""},
    +		{"ConnectionState.NegotiatedProtocol", Field, 0, ""},
    +		{"ConnectionState.NegotiatedProtocolIsMutual", Field, 0, ""},
    +		{"ConnectionState.OCSPResponse", Field, 5, ""},
    +		{"ConnectionState.PeerCertificates", Field, 0, ""},
    +		{"ConnectionState.ServerName", Field, 0, ""},
    +		{"ConnectionState.SignedCertificateTimestamps", Field, 5, ""},
    +		{"ConnectionState.TLSUnique", Field, 4, ""},
    +		{"ConnectionState.VerifiedChains", Field, 0, ""},
    +		{"ConnectionState.Version", Field, 3, ""},
    +		{"CurveID", Type, 3, ""},
    +		{"CurveP256", Const, 3, ""},
    +		{"CurveP384", Const, 3, ""},
    +		{"CurveP521", Const, 3, ""},
    +		{"Dial", Func, 0, "func(network string, addr string, config *Config) (*Conn, error)"},
    +		{"DialWithDialer", Func, 3, "func(dialer *net.Dialer, network string, addr string, config *Config) (*Conn, error)"},
    +		{"Dialer", Type, 15, ""},
    +		{"Dialer.Config", Field, 15, ""},
    +		{"Dialer.NetDialer", Field, 15, ""},
    +		{"ECDSAWithP256AndSHA256", Const, 8, ""},
    +		{"ECDSAWithP384AndSHA384", Const, 8, ""},
    +		{"ECDSAWithP521AndSHA512", Const, 8, ""},
    +		{"ECDSAWithSHA1", Const, 10, ""},
    +		{"ECHRejectionError", Type, 23, ""},
    +		{"ECHRejectionError.RetryConfigList", Field, 23, ""},
    +		{"Ed25519", Const, 13, ""},
    +		{"EncryptedClientHelloKey", Type, 24, ""},
    +		{"EncryptedClientHelloKey.Config", Field, 24, ""},
    +		{"EncryptedClientHelloKey.PrivateKey", Field, 24, ""},
    +		{"EncryptedClientHelloKey.SendAsRetry", Field, 24, ""},
    +		{"InsecureCipherSuites", Func, 14, "func() []*CipherSuite"},
    +		{"Listen", Func, 0, "func(network string, laddr string, config *Config) (net.Listener, error)"},
    +		{"LoadX509KeyPair", Func, 0, "func(certFile string, keyFile string) (Certificate, error)"},
    +		{"NewLRUClientSessionCache", Func, 3, "func(capacity int) ClientSessionCache"},
    +		{"NewListener", Func, 0, "func(inner net.Listener, config *Config) net.Listener"},
    +		{"NewResumptionState", Func, 21, "func(ticket []byte, state *SessionState) (*ClientSessionState, error)"},
    +		{"NoClientCert", Const, 0, ""},
    +		{"PKCS1WithSHA1", Const, 8, ""},
    +		{"PKCS1WithSHA256", Const, 8, ""},
    +		{"PKCS1WithSHA384", Const, 8, ""},
    +		{"PKCS1WithSHA512", Const, 8, ""},
    +		{"PSSWithSHA256", Const, 8, ""},
    +		{"PSSWithSHA384", Const, 8, ""},
    +		{"PSSWithSHA512", Const, 8, ""},
    +		{"ParseSessionState", Func, 21, "func(data []byte) (*SessionState, error)"},
    +		{"QUICClient", Func, 21, "func(config *QUICConfig) *QUICConn"},
    +		{"QUICConfig", Type, 21, ""},
    +		{"QUICConfig.EnableSessionEvents", Field, 23, ""},
    +		{"QUICConfig.TLSConfig", Field, 21, ""},
    +		{"QUICConn", Type, 21, ""},
    +		{"QUICEncryptionLevel", Type, 21, ""},
    +		{"QUICEncryptionLevelApplication", Const, 21, ""},
    +		{"QUICEncryptionLevelEarly", Const, 21, ""},
    +		{"QUICEncryptionLevelHandshake", Const, 21, ""},
    +		{"QUICEncryptionLevelInitial", Const, 21, ""},
    +		{"QUICEvent", Type, 21, ""},
    +		{"QUICEvent.Data", Field, 21, ""},
    +		{"QUICEvent.Kind", Field, 21, ""},
    +		{"QUICEvent.Level", Field, 21, ""},
    +		{"QUICEvent.SessionState", Field, 23, ""},
    +		{"QUICEvent.Suite", Field, 21, ""},
    +		{"QUICEventKind", Type, 21, ""},
    +		{"QUICHandshakeDone", Const, 21, ""},
    +		{"QUICNoEvent", Const, 21, ""},
    +		{"QUICRejectedEarlyData", Const, 21, ""},
    +		{"QUICResumeSession", Const, 23, ""},
    +		{"QUICServer", Func, 21, "func(config *QUICConfig) *QUICConn"},
    +		{"QUICSessionTicketOptions", Type, 21, ""},
    +		{"QUICSessionTicketOptions.EarlyData", Field, 21, ""},
    +		{"QUICSessionTicketOptions.Extra", Field, 23, ""},
    +		{"QUICSetReadSecret", Const, 21, ""},
    +		{"QUICSetWriteSecret", Const, 21, ""},
    +		{"QUICStoreSession", Const, 23, ""},
    +		{"QUICTransportParameters", Const, 21, ""},
    +		{"QUICTransportParametersRequired", Const, 21, ""},
    +		{"QUICWriteData", Const, 21, ""},
    +		{"RecordHeaderError", Type, 6, ""},
    +		{"RecordHeaderError.Conn", Field, 12, ""},
    +		{"RecordHeaderError.Msg", Field, 6, ""},
    +		{"RecordHeaderError.RecordHeader", Field, 6, ""},
    +		{"RenegotiateFreelyAsClient", Const, 7, ""},
    +		{"RenegotiateNever", Const, 7, ""},
    +		{"RenegotiateOnceAsClient", Const, 7, ""},
    +		{"RenegotiationSupport", Type, 7, ""},
    +		{"RequestClientCert", Const, 0, ""},
    +		{"RequireAndVerifyClientCert", Const, 0, ""},
    +		{"RequireAnyClientCert", Const, 0, ""},
    +		{"Server", Func, 0, "func(conn net.Conn, config *Config) *Conn"},
    +		{"SessionState", Type, 21, ""},
    +		{"SessionState.EarlyData", Field, 21, ""},
    +		{"SessionState.Extra", Field, 21, ""},
    +		{"SignatureScheme", Type, 8, ""},
    +		{"TLS_AES_128_GCM_SHA256", Const, 12, ""},
    +		{"TLS_AES_256_GCM_SHA384", Const, 12, ""},
    +		{"TLS_CHACHA20_POLY1305_SHA256", Const, 12, ""},
    +		{"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", Const, 2, ""},
    +		{"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", Const, 8, ""},
    +		{"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", Const, 2, ""},
    +		{"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", Const, 2, ""},
    +		{"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", Const, 5, ""},
    +		{"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", Const, 8, ""},
    +		{"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", Const, 14, ""},
    +		{"TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", Const, 2, ""},
    +		{"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", Const, 0, ""},
    +		{"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", Const, 0, ""},
    +		{"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", Const, 8, ""},
    +		{"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", Const, 2, ""},
    +		{"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", Const, 1, ""},
    +		{"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", Const, 5, ""},
    +		{"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", Const, 8, ""},
    +		{"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", Const, 14, ""},
    +		{"TLS_ECDHE_RSA_WITH_RC4_128_SHA", Const, 0, ""},
    +		{"TLS_FALLBACK_SCSV", Const, 4, ""},
    +		{"TLS_RSA_WITH_3DES_EDE_CBC_SHA", Const, 0, ""},
    +		{"TLS_RSA_WITH_AES_128_CBC_SHA", Const, 0, ""},
    +		{"TLS_RSA_WITH_AES_128_CBC_SHA256", Const, 8, ""},
    +		{"TLS_RSA_WITH_AES_128_GCM_SHA256", Const, 6, ""},
    +		{"TLS_RSA_WITH_AES_256_CBC_SHA", Const, 1, ""},
    +		{"TLS_RSA_WITH_AES_256_GCM_SHA384", Const, 6, ""},
    +		{"TLS_RSA_WITH_RC4_128_SHA", Const, 0, ""},
    +		{"VerifyClientCertIfGiven", Const, 0, ""},
    +		{"VersionName", Func, 21, "func(version uint16) string"},
    +		{"VersionSSL30", Const, 2, ""},
    +		{"VersionTLS10", Const, 2, ""},
    +		{"VersionTLS11", Const, 2, ""},
    +		{"VersionTLS12", Const, 2, ""},
    +		{"VersionTLS13", Const, 12, ""},
    +		{"X25519", Const, 8, ""},
    +		{"X25519MLKEM768", Const, 24, ""},
    +		{"X509KeyPair", Func, 0, "func(certPEMBlock []byte, keyPEMBlock []byte) (Certificate, error)"},
     	},
     	"crypto/x509": {
    -		{"(*CertPool).AddCert", Method, 0},
    -		{"(*CertPool).AddCertWithConstraint", Method, 22},
    -		{"(*CertPool).AppendCertsFromPEM", Method, 0},
    -		{"(*CertPool).Clone", Method, 19},
    -		{"(*CertPool).Equal", Method, 19},
    -		{"(*CertPool).Subjects", Method, 0},
    -		{"(*Certificate).CheckCRLSignature", Method, 0},
    -		{"(*Certificate).CheckSignature", Method, 0},
    -		{"(*Certificate).CheckSignatureFrom", Method, 0},
    -		{"(*Certificate).CreateCRL", Method, 0},
    -		{"(*Certificate).Equal", Method, 0},
    -		{"(*Certificate).Verify", Method, 0},
    -		{"(*Certificate).VerifyHostname", Method, 0},
    -		{"(*CertificateRequest).CheckSignature", Method, 5},
    -		{"(*OID).UnmarshalBinary", Method, 23},
    -		{"(*OID).UnmarshalText", Method, 23},
    -		{"(*RevocationList).CheckSignatureFrom", Method, 19},
    -		{"(CertificateInvalidError).Error", Method, 0},
    -		{"(ConstraintViolationError).Error", Method, 0},
    -		{"(HostnameError).Error", Method, 0},
    -		{"(InsecureAlgorithmError).Error", Method, 6},
    -		{"(OID).Equal", Method, 22},
    -		{"(OID).EqualASN1OID", Method, 22},
    -		{"(OID).MarshalBinary", Method, 23},
    -		{"(OID).MarshalText", Method, 23},
    -		{"(OID).String", Method, 22},
    -		{"(PublicKeyAlgorithm).String", Method, 10},
    -		{"(SignatureAlgorithm).String", Method, 6},
    -		{"(SystemRootsError).Error", Method, 1},
    -		{"(SystemRootsError).Unwrap", Method, 16},
    -		{"(UnhandledCriticalExtension).Error", Method, 0},
    -		{"(UnknownAuthorityError).Error", Method, 0},
    -		{"CANotAuthorizedForExtKeyUsage", Const, 10},
    -		{"CANotAuthorizedForThisName", Const, 0},
    -		{"CertPool", Type, 0},
    -		{"Certificate", Type, 0},
    -		{"Certificate.AuthorityKeyId", Field, 0},
    -		{"Certificate.BasicConstraintsValid", Field, 0},
    -		{"Certificate.CRLDistributionPoints", Field, 2},
    -		{"Certificate.DNSNames", Field, 0},
    -		{"Certificate.EmailAddresses", Field, 0},
    -		{"Certificate.ExcludedDNSDomains", Field, 9},
    -		{"Certificate.ExcludedEmailAddresses", Field, 10},
    -		{"Certificate.ExcludedIPRanges", Field, 10},
    -		{"Certificate.ExcludedURIDomains", Field, 10},
    -		{"Certificate.ExtKeyUsage", Field, 0},
    -		{"Certificate.Extensions", Field, 2},
    -		{"Certificate.ExtraExtensions", Field, 2},
    -		{"Certificate.IPAddresses", Field, 1},
    -		{"Certificate.IsCA", Field, 0},
    -		{"Certificate.Issuer", Field, 0},
    -		{"Certificate.IssuingCertificateURL", Field, 2},
    -		{"Certificate.KeyUsage", Field, 0},
    -		{"Certificate.MaxPathLen", Field, 0},
    -		{"Certificate.MaxPathLenZero", Field, 4},
    -		{"Certificate.NotAfter", Field, 0},
    -		{"Certificate.NotBefore", Field, 0},
    -		{"Certificate.OCSPServer", Field, 2},
    -		{"Certificate.PermittedDNSDomains", Field, 0},
    -		{"Certificate.PermittedDNSDomainsCritical", Field, 0},
    -		{"Certificate.PermittedEmailAddresses", Field, 10},
    -		{"Certificate.PermittedIPRanges", Field, 10},
    -		{"Certificate.PermittedURIDomains", Field, 10},
    -		{"Certificate.Policies", Field, 22},
    -		{"Certificate.PolicyIdentifiers", Field, 0},
    -		{"Certificate.PublicKey", Field, 0},
    -		{"Certificate.PublicKeyAlgorithm", Field, 0},
    -		{"Certificate.Raw", Field, 0},
    -		{"Certificate.RawIssuer", Field, 0},
    -		{"Certificate.RawSubject", Field, 0},
    -		{"Certificate.RawSubjectPublicKeyInfo", Field, 0},
    -		{"Certificate.RawTBSCertificate", Field, 0},
    -		{"Certificate.SerialNumber", Field, 0},
    -		{"Certificate.Signature", Field, 0},
    -		{"Certificate.SignatureAlgorithm", Field, 0},
    -		{"Certificate.Subject", Field, 0},
    -		{"Certificate.SubjectKeyId", Field, 0},
    -		{"Certificate.URIs", Field, 10},
    -		{"Certificate.UnhandledCriticalExtensions", Field, 5},
    -		{"Certificate.UnknownExtKeyUsage", Field, 0},
    -		{"Certificate.Version", Field, 0},
    -		{"CertificateInvalidError", Type, 0},
    -		{"CertificateInvalidError.Cert", Field, 0},
    -		{"CertificateInvalidError.Detail", Field, 10},
    -		{"CertificateInvalidError.Reason", Field, 0},
    -		{"CertificateRequest", Type, 3},
    -		{"CertificateRequest.Attributes", Field, 3},
    -		{"CertificateRequest.DNSNames", Field, 3},
    -		{"CertificateRequest.EmailAddresses", Field, 3},
    -		{"CertificateRequest.Extensions", Field, 3},
    -		{"CertificateRequest.ExtraExtensions", Field, 3},
    -		{"CertificateRequest.IPAddresses", Field, 3},
    -		{"CertificateRequest.PublicKey", Field, 3},
    -		{"CertificateRequest.PublicKeyAlgorithm", Field, 3},
    -		{"CertificateRequest.Raw", Field, 3},
    -		{"CertificateRequest.RawSubject", Field, 3},
    -		{"CertificateRequest.RawSubjectPublicKeyInfo", Field, 3},
    -		{"CertificateRequest.RawTBSCertificateRequest", Field, 3},
    -		{"CertificateRequest.Signature", Field, 3},
    -		{"CertificateRequest.SignatureAlgorithm", Field, 3},
    -		{"CertificateRequest.Subject", Field, 3},
    -		{"CertificateRequest.URIs", Field, 10},
    -		{"CertificateRequest.Version", Field, 3},
    -		{"ConstraintViolationError", Type, 0},
    -		{"CreateCertificate", Func, 0},
    -		{"CreateCertificateRequest", Func, 3},
    -		{"CreateRevocationList", Func, 15},
    -		{"DSA", Const, 0},
    -		{"DSAWithSHA1", Const, 0},
    -		{"DSAWithSHA256", Const, 0},
    -		{"DecryptPEMBlock", Func, 1},
    -		{"ECDSA", Const, 1},
    -		{"ECDSAWithSHA1", Const, 1},
    -		{"ECDSAWithSHA256", Const, 1},
    -		{"ECDSAWithSHA384", Const, 1},
    -		{"ECDSAWithSHA512", Const, 1},
    -		{"Ed25519", Const, 13},
    -		{"EncryptPEMBlock", Func, 1},
    -		{"ErrUnsupportedAlgorithm", Var, 0},
    -		{"Expired", Const, 0},
    -		{"ExtKeyUsage", Type, 0},
    -		{"ExtKeyUsageAny", Const, 0},
    -		{"ExtKeyUsageClientAuth", Const, 0},
    -		{"ExtKeyUsageCodeSigning", Const, 0},
    -		{"ExtKeyUsageEmailProtection", Const, 0},
    -		{"ExtKeyUsageIPSECEndSystem", Const, 1},
    -		{"ExtKeyUsageIPSECTunnel", Const, 1},
    -		{"ExtKeyUsageIPSECUser", Const, 1},
    -		{"ExtKeyUsageMicrosoftCommercialCodeSigning", Const, 10},
    -		{"ExtKeyUsageMicrosoftKernelCodeSigning", Const, 10},
    -		{"ExtKeyUsageMicrosoftServerGatedCrypto", Const, 1},
    -		{"ExtKeyUsageNetscapeServerGatedCrypto", Const, 1},
    -		{"ExtKeyUsageOCSPSigning", Const, 0},
    -		{"ExtKeyUsageServerAuth", Const, 0},
    -		{"ExtKeyUsageTimeStamping", Const, 0},
    -		{"HostnameError", Type, 0},
    -		{"HostnameError.Certificate", Field, 0},
    -		{"HostnameError.Host", Field, 0},
    -		{"IncompatibleUsage", Const, 1},
    -		{"IncorrectPasswordError", Var, 1},
    -		{"InsecureAlgorithmError", Type, 6},
    -		{"InvalidReason", Type, 0},
    -		{"IsEncryptedPEMBlock", Func, 1},
    -		{"KeyUsage", Type, 0},
    -		{"KeyUsageCRLSign", Const, 0},
    -		{"KeyUsageCertSign", Const, 0},
    -		{"KeyUsageContentCommitment", Const, 0},
    -		{"KeyUsageDataEncipherment", Const, 0},
    -		{"KeyUsageDecipherOnly", Const, 0},
    -		{"KeyUsageDigitalSignature", Const, 0},
    -		{"KeyUsageEncipherOnly", Const, 0},
    -		{"KeyUsageKeyAgreement", Const, 0},
    -		{"KeyUsageKeyEncipherment", Const, 0},
    -		{"MD2WithRSA", Const, 0},
    -		{"MD5WithRSA", Const, 0},
    -		{"MarshalECPrivateKey", Func, 2},
    -		{"MarshalPKCS1PrivateKey", Func, 0},
    -		{"MarshalPKCS1PublicKey", Func, 10},
    -		{"MarshalPKCS8PrivateKey", Func, 10},
    -		{"MarshalPKIXPublicKey", Func, 0},
    -		{"NameConstraintsWithoutSANs", Const, 10},
    -		{"NameMismatch", Const, 8},
    -		{"NewCertPool", Func, 0},
    -		{"NotAuthorizedToSign", Const, 0},
    -		{"OID", Type, 22},
    -		{"OIDFromInts", Func, 22},
    -		{"PEMCipher", Type, 1},
    -		{"PEMCipher3DES", Const, 1},
    -		{"PEMCipherAES128", Const, 1},
    -		{"PEMCipherAES192", Const, 1},
    -		{"PEMCipherAES256", Const, 1},
    -		{"PEMCipherDES", Const, 1},
    -		{"ParseCRL", Func, 0},
    -		{"ParseCertificate", Func, 0},
    -		{"ParseCertificateRequest", Func, 3},
    -		{"ParseCertificates", Func, 0},
    -		{"ParseDERCRL", Func, 0},
    -		{"ParseECPrivateKey", Func, 1},
    -		{"ParseOID", Func, 23},
    -		{"ParsePKCS1PrivateKey", Func, 0},
    -		{"ParsePKCS1PublicKey", Func, 10},
    -		{"ParsePKCS8PrivateKey", Func, 0},
    -		{"ParsePKIXPublicKey", Func, 0},
    -		{"ParseRevocationList", Func, 19},
    -		{"PublicKeyAlgorithm", Type, 0},
    -		{"PureEd25519", Const, 13},
    -		{"RSA", Const, 0},
    -		{"RevocationList", Type, 15},
    -		{"RevocationList.AuthorityKeyId", Field, 19},
    -		{"RevocationList.Extensions", Field, 19},
    -		{"RevocationList.ExtraExtensions", Field, 15},
    -		{"RevocationList.Issuer", Field, 19},
    -		{"RevocationList.NextUpdate", Field, 15},
    -		{"RevocationList.Number", Field, 15},
    -		{"RevocationList.Raw", Field, 19},
    -		{"RevocationList.RawIssuer", Field, 19},
    -		{"RevocationList.RawTBSRevocationList", Field, 19},
    -		{"RevocationList.RevokedCertificateEntries", Field, 21},
    -		{"RevocationList.RevokedCertificates", Field, 15},
    -		{"RevocationList.Signature", Field, 19},
    -		{"RevocationList.SignatureAlgorithm", Field, 15},
    -		{"RevocationList.ThisUpdate", Field, 15},
    -		{"RevocationListEntry", Type, 21},
    -		{"RevocationListEntry.Extensions", Field, 21},
    -		{"RevocationListEntry.ExtraExtensions", Field, 21},
    -		{"RevocationListEntry.Raw", Field, 21},
    -		{"RevocationListEntry.ReasonCode", Field, 21},
    -		{"RevocationListEntry.RevocationTime", Field, 21},
    -		{"RevocationListEntry.SerialNumber", Field, 21},
    -		{"SHA1WithRSA", Const, 0},
    -		{"SHA256WithRSA", Const, 0},
    -		{"SHA256WithRSAPSS", Const, 8},
    -		{"SHA384WithRSA", Const, 0},
    -		{"SHA384WithRSAPSS", Const, 8},
    -		{"SHA512WithRSA", Const, 0},
    -		{"SHA512WithRSAPSS", Const, 8},
    -		{"SetFallbackRoots", Func, 20},
    -		{"SignatureAlgorithm", Type, 0},
    -		{"SystemCertPool", Func, 7},
    -		{"SystemRootsError", Type, 1},
    -		{"SystemRootsError.Err", Field, 7},
    -		{"TooManyConstraints", Const, 10},
    -		{"TooManyIntermediates", Const, 0},
    -		{"UnconstrainedName", Const, 10},
    -		{"UnhandledCriticalExtension", Type, 0},
    -		{"UnknownAuthorityError", Type, 0},
    -		{"UnknownAuthorityError.Cert", Field, 8},
    -		{"UnknownPublicKeyAlgorithm", Const, 0},
    -		{"UnknownSignatureAlgorithm", Const, 0},
    -		{"VerifyOptions", Type, 0},
    -		{"VerifyOptions.CurrentTime", Field, 0},
    -		{"VerifyOptions.DNSName", Field, 0},
    -		{"VerifyOptions.Intermediates", Field, 0},
    -		{"VerifyOptions.KeyUsages", Field, 1},
    -		{"VerifyOptions.MaxConstraintComparisions", Field, 10},
    -		{"VerifyOptions.Roots", Field, 0},
    +		{"(*CertPool).AddCert", Method, 0, ""},
    +		{"(*CertPool).AddCertWithConstraint", Method, 22, ""},
    +		{"(*CertPool).AppendCertsFromPEM", Method, 0, ""},
    +		{"(*CertPool).Clone", Method, 19, ""},
    +		{"(*CertPool).Equal", Method, 19, ""},
    +		{"(*CertPool).Subjects", Method, 0, ""},
    +		{"(*Certificate).CheckCRLSignature", Method, 0, ""},
    +		{"(*Certificate).CheckSignature", Method, 0, ""},
    +		{"(*Certificate).CheckSignatureFrom", Method, 0, ""},
    +		{"(*Certificate).CreateCRL", Method, 0, ""},
    +		{"(*Certificate).Equal", Method, 0, ""},
    +		{"(*Certificate).Verify", Method, 0, ""},
    +		{"(*Certificate).VerifyHostname", Method, 0, ""},
    +		{"(*CertificateRequest).CheckSignature", Method, 5, ""},
    +		{"(*OID).UnmarshalBinary", Method, 23, ""},
    +		{"(*OID).UnmarshalText", Method, 23, ""},
    +		{"(*RevocationList).CheckSignatureFrom", Method, 19, ""},
    +		{"(CertificateInvalidError).Error", Method, 0, ""},
    +		{"(ConstraintViolationError).Error", Method, 0, ""},
    +		{"(HostnameError).Error", Method, 0, ""},
    +		{"(InsecureAlgorithmError).Error", Method, 6, ""},
    +		{"(OID).AppendBinary", Method, 24, ""},
    +		{"(OID).AppendText", Method, 24, ""},
    +		{"(OID).Equal", Method, 22, ""},
    +		{"(OID).EqualASN1OID", Method, 22, ""},
    +		{"(OID).MarshalBinary", Method, 23, ""},
    +		{"(OID).MarshalText", Method, 23, ""},
    +		{"(OID).String", Method, 22, ""},
    +		{"(PublicKeyAlgorithm).String", Method, 10, ""},
    +		{"(SignatureAlgorithm).String", Method, 6, ""},
    +		{"(SystemRootsError).Error", Method, 1, ""},
    +		{"(SystemRootsError).Unwrap", Method, 16, ""},
    +		{"(UnhandledCriticalExtension).Error", Method, 0, ""},
    +		{"(UnknownAuthorityError).Error", Method, 0, ""},
    +		{"CANotAuthorizedForExtKeyUsage", Const, 10, ""},
    +		{"CANotAuthorizedForThisName", Const, 0, ""},
    +		{"CertPool", Type, 0, ""},
    +		{"Certificate", Type, 0, ""},
    +		{"Certificate.AuthorityKeyId", Field, 0, ""},
    +		{"Certificate.BasicConstraintsValid", Field, 0, ""},
    +		{"Certificate.CRLDistributionPoints", Field, 2, ""},
    +		{"Certificate.DNSNames", Field, 0, ""},
    +		{"Certificate.EmailAddresses", Field, 0, ""},
    +		{"Certificate.ExcludedDNSDomains", Field, 9, ""},
    +		{"Certificate.ExcludedEmailAddresses", Field, 10, ""},
    +		{"Certificate.ExcludedIPRanges", Field, 10, ""},
    +		{"Certificate.ExcludedURIDomains", Field, 10, ""},
    +		{"Certificate.ExtKeyUsage", Field, 0, ""},
    +		{"Certificate.Extensions", Field, 2, ""},
    +		{"Certificate.ExtraExtensions", Field, 2, ""},
    +		{"Certificate.IPAddresses", Field, 1, ""},
    +		{"Certificate.InhibitAnyPolicy", Field, 24, ""},
    +		{"Certificate.InhibitAnyPolicyZero", Field, 24, ""},
    +		{"Certificate.InhibitPolicyMapping", Field, 24, ""},
    +		{"Certificate.InhibitPolicyMappingZero", Field, 24, ""},
    +		{"Certificate.IsCA", Field, 0, ""},
    +		{"Certificate.Issuer", Field, 0, ""},
    +		{"Certificate.IssuingCertificateURL", Field, 2, ""},
    +		{"Certificate.KeyUsage", Field, 0, ""},
    +		{"Certificate.MaxPathLen", Field, 0, ""},
    +		{"Certificate.MaxPathLenZero", Field, 4, ""},
    +		{"Certificate.NotAfter", Field, 0, ""},
    +		{"Certificate.NotBefore", Field, 0, ""},
    +		{"Certificate.OCSPServer", Field, 2, ""},
    +		{"Certificate.PermittedDNSDomains", Field, 0, ""},
    +		{"Certificate.PermittedDNSDomainsCritical", Field, 0, ""},
    +		{"Certificate.PermittedEmailAddresses", Field, 10, ""},
    +		{"Certificate.PermittedIPRanges", Field, 10, ""},
    +		{"Certificate.PermittedURIDomains", Field, 10, ""},
    +		{"Certificate.Policies", Field, 22, ""},
    +		{"Certificate.PolicyIdentifiers", Field, 0, ""},
    +		{"Certificate.PolicyMappings", Field, 24, ""},
    +		{"Certificate.PublicKey", Field, 0, ""},
    +		{"Certificate.PublicKeyAlgorithm", Field, 0, ""},
    +		{"Certificate.Raw", Field, 0, ""},
    +		{"Certificate.RawIssuer", Field, 0, ""},
    +		{"Certificate.RawSubject", Field, 0, ""},
    +		{"Certificate.RawSubjectPublicKeyInfo", Field, 0, ""},
    +		{"Certificate.RawTBSCertificate", Field, 0, ""},
    +		{"Certificate.RequireExplicitPolicy", Field, 24, ""},
    +		{"Certificate.RequireExplicitPolicyZero", Field, 24, ""},
    +		{"Certificate.SerialNumber", Field, 0, ""},
    +		{"Certificate.Signature", Field, 0, ""},
    +		{"Certificate.SignatureAlgorithm", Field, 0, ""},
    +		{"Certificate.Subject", Field, 0, ""},
    +		{"Certificate.SubjectKeyId", Field, 0, ""},
    +		{"Certificate.URIs", Field, 10, ""},
    +		{"Certificate.UnhandledCriticalExtensions", Field, 5, ""},
    +		{"Certificate.UnknownExtKeyUsage", Field, 0, ""},
    +		{"Certificate.Version", Field, 0, ""},
    +		{"CertificateInvalidError", Type, 0, ""},
    +		{"CertificateInvalidError.Cert", Field, 0, ""},
    +		{"CertificateInvalidError.Detail", Field, 10, ""},
    +		{"CertificateInvalidError.Reason", Field, 0, ""},
    +		{"CertificateRequest", Type, 3, ""},
    +		{"CertificateRequest.Attributes", Field, 3, ""},
    +		{"CertificateRequest.DNSNames", Field, 3, ""},
    +		{"CertificateRequest.EmailAddresses", Field, 3, ""},
    +		{"CertificateRequest.Extensions", Field, 3, ""},
    +		{"CertificateRequest.ExtraExtensions", Field, 3, ""},
    +		{"CertificateRequest.IPAddresses", Field, 3, ""},
    +		{"CertificateRequest.PublicKey", Field, 3, ""},
    +		{"CertificateRequest.PublicKeyAlgorithm", Field, 3, ""},
    +		{"CertificateRequest.Raw", Field, 3, ""},
    +		{"CertificateRequest.RawSubject", Field, 3, ""},
    +		{"CertificateRequest.RawSubjectPublicKeyInfo", Field, 3, ""},
    +		{"CertificateRequest.RawTBSCertificateRequest", Field, 3, ""},
    +		{"CertificateRequest.Signature", Field, 3, ""},
    +		{"CertificateRequest.SignatureAlgorithm", Field, 3, ""},
    +		{"CertificateRequest.Subject", Field, 3, ""},
    +		{"CertificateRequest.URIs", Field, 10, ""},
    +		{"CertificateRequest.Version", Field, 3, ""},
    +		{"ConstraintViolationError", Type, 0, ""},
    +		{"CreateCertificate", Func, 0, "func(rand io.Reader, template *Certificate, parent *Certificate, pub any, priv any) ([]byte, error)"},
    +		{"CreateCertificateRequest", Func, 3, "func(rand io.Reader, template *CertificateRequest, priv any) (csr []byte, err error)"},
    +		{"CreateRevocationList", Func, 15, "func(rand io.Reader, template *RevocationList, issuer *Certificate, priv crypto.Signer) ([]byte, error)"},
    +		{"DSA", Const, 0, ""},
    +		{"DSAWithSHA1", Const, 0, ""},
    +		{"DSAWithSHA256", Const, 0, ""},
    +		{"DecryptPEMBlock", Func, 1, "func(b *pem.Block, password []byte) ([]byte, error)"},
    +		{"ECDSA", Const, 1, ""},
    +		{"ECDSAWithSHA1", Const, 1, ""},
    +		{"ECDSAWithSHA256", Const, 1, ""},
    +		{"ECDSAWithSHA384", Const, 1, ""},
    +		{"ECDSAWithSHA512", Const, 1, ""},
    +		{"Ed25519", Const, 13, ""},
    +		{"EncryptPEMBlock", Func, 1, "func(rand io.Reader, blockType string, data []byte, password []byte, alg PEMCipher) (*pem.Block, error)"},
    +		{"ErrUnsupportedAlgorithm", Var, 0, ""},
    +		{"Expired", Const, 0, ""},
    +		{"ExtKeyUsage", Type, 0, ""},
    +		{"ExtKeyUsageAny", Const, 0, ""},
    +		{"ExtKeyUsageClientAuth", Const, 0, ""},
    +		{"ExtKeyUsageCodeSigning", Const, 0, ""},
    +		{"ExtKeyUsageEmailProtection", Const, 0, ""},
    +		{"ExtKeyUsageIPSECEndSystem", Const, 1, ""},
    +		{"ExtKeyUsageIPSECTunnel", Const, 1, ""},
    +		{"ExtKeyUsageIPSECUser", Const, 1, ""},
    +		{"ExtKeyUsageMicrosoftCommercialCodeSigning", Const, 10, ""},
    +		{"ExtKeyUsageMicrosoftKernelCodeSigning", Const, 10, ""},
    +		{"ExtKeyUsageMicrosoftServerGatedCrypto", Const, 1, ""},
    +		{"ExtKeyUsageNetscapeServerGatedCrypto", Const, 1, ""},
    +		{"ExtKeyUsageOCSPSigning", Const, 0, ""},
    +		{"ExtKeyUsageServerAuth", Const, 0, ""},
    +		{"ExtKeyUsageTimeStamping", Const, 0, ""},
    +		{"HostnameError", Type, 0, ""},
    +		{"HostnameError.Certificate", Field, 0, ""},
    +		{"HostnameError.Host", Field, 0, ""},
    +		{"IncompatibleUsage", Const, 1, ""},
    +		{"IncorrectPasswordError", Var, 1, ""},
    +		{"InsecureAlgorithmError", Type, 6, ""},
    +		{"InvalidReason", Type, 0, ""},
    +		{"IsEncryptedPEMBlock", Func, 1, "func(b *pem.Block) bool"},
    +		{"KeyUsage", Type, 0, ""},
    +		{"KeyUsageCRLSign", Const, 0, ""},
    +		{"KeyUsageCertSign", Const, 0, ""},
    +		{"KeyUsageContentCommitment", Const, 0, ""},
    +		{"KeyUsageDataEncipherment", Const, 0, ""},
    +		{"KeyUsageDecipherOnly", Const, 0, ""},
    +		{"KeyUsageDigitalSignature", Const, 0, ""},
    +		{"KeyUsageEncipherOnly", Const, 0, ""},
    +		{"KeyUsageKeyAgreement", Const, 0, ""},
    +		{"KeyUsageKeyEncipherment", Const, 0, ""},
    +		{"MD2WithRSA", Const, 0, ""},
    +		{"MD5WithRSA", Const, 0, ""},
    +		{"MarshalECPrivateKey", Func, 2, "func(key *ecdsa.PrivateKey) ([]byte, error)"},
    +		{"MarshalPKCS1PrivateKey", Func, 0, "func(key *rsa.PrivateKey) []byte"},
    +		{"MarshalPKCS1PublicKey", Func, 10, "func(key *rsa.PublicKey) []byte"},
    +		{"MarshalPKCS8PrivateKey", Func, 10, "func(key any) ([]byte, error)"},
    +		{"MarshalPKIXPublicKey", Func, 0, "func(pub any) ([]byte, error)"},
    +		{"NameConstraintsWithoutSANs", Const, 10, ""},
    +		{"NameMismatch", Const, 8, ""},
    +		{"NewCertPool", Func, 0, "func() *CertPool"},
    +		{"NoValidChains", Const, 24, ""},
    +		{"NotAuthorizedToSign", Const, 0, ""},
    +		{"OID", Type, 22, ""},
    +		{"OIDFromInts", Func, 22, "func(oid []uint64) (OID, error)"},
    +		{"PEMCipher", Type, 1, ""},
    +		{"PEMCipher3DES", Const, 1, ""},
    +		{"PEMCipherAES128", Const, 1, ""},
    +		{"PEMCipherAES192", Const, 1, ""},
    +		{"PEMCipherAES256", Const, 1, ""},
    +		{"PEMCipherDES", Const, 1, ""},
    +		{"ParseCRL", Func, 0, "func(crlBytes []byte) (*pkix.CertificateList, error)"},
    +		{"ParseCertificate", Func, 0, "func(der []byte) (*Certificate, error)"},
    +		{"ParseCertificateRequest", Func, 3, "func(asn1Data []byte) (*CertificateRequest, error)"},
    +		{"ParseCertificates", Func, 0, "func(der []byte) ([]*Certificate, error)"},
    +		{"ParseDERCRL", Func, 0, "func(derBytes []byte) (*pkix.CertificateList, error)"},
    +		{"ParseECPrivateKey", Func, 1, "func(der []byte) (*ecdsa.PrivateKey, error)"},
    +		{"ParseOID", Func, 23, "func(oid string) (OID, error)"},
    +		{"ParsePKCS1PrivateKey", Func, 0, "func(der []byte) (*rsa.PrivateKey, error)"},
    +		{"ParsePKCS1PublicKey", Func, 10, "func(der []byte) (*rsa.PublicKey, error)"},
    +		{"ParsePKCS8PrivateKey", Func, 0, "func(der []byte) (key any, err error)"},
    +		{"ParsePKIXPublicKey", Func, 0, "func(derBytes []byte) (pub any, err error)"},
    +		{"ParseRevocationList", Func, 19, "func(der []byte) (*RevocationList, error)"},
    +		{"PolicyMapping", Type, 24, ""},
    +		{"PolicyMapping.IssuerDomainPolicy", Field, 24, ""},
    +		{"PolicyMapping.SubjectDomainPolicy", Field, 24, ""},
    +		{"PublicKeyAlgorithm", Type, 0, ""},
    +		{"PureEd25519", Const, 13, ""},
    +		{"RSA", Const, 0, ""},
    +		{"RevocationList", Type, 15, ""},
    +		{"RevocationList.AuthorityKeyId", Field, 19, ""},
    +		{"RevocationList.Extensions", Field, 19, ""},
    +		{"RevocationList.ExtraExtensions", Field, 15, ""},
    +		{"RevocationList.Issuer", Field, 19, ""},
    +		{"RevocationList.NextUpdate", Field, 15, ""},
    +		{"RevocationList.Number", Field, 15, ""},
    +		{"RevocationList.Raw", Field, 19, ""},
    +		{"RevocationList.RawIssuer", Field, 19, ""},
    +		{"RevocationList.RawTBSRevocationList", Field, 19, ""},
    +		{"RevocationList.RevokedCertificateEntries", Field, 21, ""},
    +		{"RevocationList.RevokedCertificates", Field, 15, ""},
    +		{"RevocationList.Signature", Field, 19, ""},
    +		{"RevocationList.SignatureAlgorithm", Field, 15, ""},
    +		{"RevocationList.ThisUpdate", Field, 15, ""},
    +		{"RevocationListEntry", Type, 21, ""},
    +		{"RevocationListEntry.Extensions", Field, 21, ""},
    +		{"RevocationListEntry.ExtraExtensions", Field, 21, ""},
    +		{"RevocationListEntry.Raw", Field, 21, ""},
    +		{"RevocationListEntry.ReasonCode", Field, 21, ""},
    +		{"RevocationListEntry.RevocationTime", Field, 21, ""},
    +		{"RevocationListEntry.SerialNumber", Field, 21, ""},
    +		{"SHA1WithRSA", Const, 0, ""},
    +		{"SHA256WithRSA", Const, 0, ""},
    +		{"SHA256WithRSAPSS", Const, 8, ""},
    +		{"SHA384WithRSA", Const, 0, ""},
    +		{"SHA384WithRSAPSS", Const, 8, ""},
    +		{"SHA512WithRSA", Const, 0, ""},
    +		{"SHA512WithRSAPSS", Const, 8, ""},
    +		{"SetFallbackRoots", Func, 20, "func(roots *CertPool)"},
    +		{"SignatureAlgorithm", Type, 0, ""},
    +		{"SystemCertPool", Func, 7, "func() (*CertPool, error)"},
    +		{"SystemRootsError", Type, 1, ""},
    +		{"SystemRootsError.Err", Field, 7, ""},
    +		{"TooManyConstraints", Const, 10, ""},
    +		{"TooManyIntermediates", Const, 0, ""},
    +		{"UnconstrainedName", Const, 10, ""},
    +		{"UnhandledCriticalExtension", Type, 0, ""},
    +		{"UnknownAuthorityError", Type, 0, ""},
    +		{"UnknownAuthorityError.Cert", Field, 8, ""},
    +		{"UnknownPublicKeyAlgorithm", Const, 0, ""},
    +		{"UnknownSignatureAlgorithm", Const, 0, ""},
    +		{"VerifyOptions", Type, 0, ""},
    +		{"VerifyOptions.CertificatePolicies", Field, 24, ""},
    +		{"VerifyOptions.CurrentTime", Field, 0, ""},
    +		{"VerifyOptions.DNSName", Field, 0, ""},
    +		{"VerifyOptions.Intermediates", Field, 0, ""},
    +		{"VerifyOptions.KeyUsages", Field, 1, ""},
    +		{"VerifyOptions.MaxConstraintComparisions", Field, 10, ""},
    +		{"VerifyOptions.Roots", Field, 0, ""},
     	},
     	"crypto/x509/pkix": {
    -		{"(*CertificateList).HasExpired", Method, 0},
    -		{"(*Name).FillFromRDNSequence", Method, 0},
    -		{"(Name).String", Method, 10},
    -		{"(Name).ToRDNSequence", Method, 0},
    -		{"(RDNSequence).String", Method, 10},
    -		{"AlgorithmIdentifier", Type, 0},
    -		{"AlgorithmIdentifier.Algorithm", Field, 0},
    -		{"AlgorithmIdentifier.Parameters", Field, 0},
    -		{"AttributeTypeAndValue", Type, 0},
    -		{"AttributeTypeAndValue.Type", Field, 0},
    -		{"AttributeTypeAndValue.Value", Field, 0},
    -		{"AttributeTypeAndValueSET", Type, 3},
    -		{"AttributeTypeAndValueSET.Type", Field, 3},
    -		{"AttributeTypeAndValueSET.Value", Field, 3},
    -		{"CertificateList", Type, 0},
    -		{"CertificateList.SignatureAlgorithm", Field, 0},
    -		{"CertificateList.SignatureValue", Field, 0},
    -		{"CertificateList.TBSCertList", Field, 0},
    -		{"Extension", Type, 0},
    -		{"Extension.Critical", Field, 0},
    -		{"Extension.Id", Field, 0},
    -		{"Extension.Value", Field, 0},
    -		{"Name", Type, 0},
    -		{"Name.CommonName", Field, 0},
    -		{"Name.Country", Field, 0},
    -		{"Name.ExtraNames", Field, 5},
    -		{"Name.Locality", Field, 0},
    -		{"Name.Names", Field, 0},
    -		{"Name.Organization", Field, 0},
    -		{"Name.OrganizationalUnit", Field, 0},
    -		{"Name.PostalCode", Field, 0},
    -		{"Name.Province", Field, 0},
    -		{"Name.SerialNumber", Field, 0},
    -		{"Name.StreetAddress", Field, 0},
    -		{"RDNSequence", Type, 0},
    -		{"RelativeDistinguishedNameSET", Type, 0},
    -		{"RevokedCertificate", Type, 0},
    -		{"RevokedCertificate.Extensions", Field, 0},
    -		{"RevokedCertificate.RevocationTime", Field, 0},
    -		{"RevokedCertificate.SerialNumber", Field, 0},
    -		{"TBSCertificateList", Type, 0},
    -		{"TBSCertificateList.Extensions", Field, 0},
    -		{"TBSCertificateList.Issuer", Field, 0},
    -		{"TBSCertificateList.NextUpdate", Field, 0},
    -		{"TBSCertificateList.Raw", Field, 0},
    -		{"TBSCertificateList.RevokedCertificates", Field, 0},
    -		{"TBSCertificateList.Signature", Field, 0},
    -		{"TBSCertificateList.ThisUpdate", Field, 0},
    -		{"TBSCertificateList.Version", Field, 0},
    +		{"(*CertificateList).HasExpired", Method, 0, ""},
    +		{"(*Name).FillFromRDNSequence", Method, 0, ""},
    +		{"(Name).String", Method, 10, ""},
    +		{"(Name).ToRDNSequence", Method, 0, ""},
    +		{"(RDNSequence).String", Method, 10, ""},
    +		{"AlgorithmIdentifier", Type, 0, ""},
    +		{"AlgorithmIdentifier.Algorithm", Field, 0, ""},
    +		{"AlgorithmIdentifier.Parameters", Field, 0, ""},
    +		{"AttributeTypeAndValue", Type, 0, ""},
    +		{"AttributeTypeAndValue.Type", Field, 0, ""},
    +		{"AttributeTypeAndValue.Value", Field, 0, ""},
    +		{"AttributeTypeAndValueSET", Type, 3, ""},
    +		{"AttributeTypeAndValueSET.Type", Field, 3, ""},
    +		{"AttributeTypeAndValueSET.Value", Field, 3, ""},
    +		{"CertificateList", Type, 0, ""},
    +		{"CertificateList.SignatureAlgorithm", Field, 0, ""},
    +		{"CertificateList.SignatureValue", Field, 0, ""},
    +		{"CertificateList.TBSCertList", Field, 0, ""},
    +		{"Extension", Type, 0, ""},
    +		{"Extension.Critical", Field, 0, ""},
    +		{"Extension.Id", Field, 0, ""},
    +		{"Extension.Value", Field, 0, ""},
    +		{"Name", Type, 0, ""},
    +		{"Name.CommonName", Field, 0, ""},
    +		{"Name.Country", Field, 0, ""},
    +		{"Name.ExtraNames", Field, 5, ""},
    +		{"Name.Locality", Field, 0, ""},
    +		{"Name.Names", Field, 0, ""},
    +		{"Name.Organization", Field, 0, ""},
    +		{"Name.OrganizationalUnit", Field, 0, ""},
    +		{"Name.PostalCode", Field, 0, ""},
    +		{"Name.Province", Field, 0, ""},
    +		{"Name.SerialNumber", Field, 0, ""},
    +		{"Name.StreetAddress", Field, 0, ""},
    +		{"RDNSequence", Type, 0, ""},
    +		{"RelativeDistinguishedNameSET", Type, 0, ""},
    +		{"RevokedCertificate", Type, 0, ""},
    +		{"RevokedCertificate.Extensions", Field, 0, ""},
    +		{"RevokedCertificate.RevocationTime", Field, 0, ""},
    +		{"RevokedCertificate.SerialNumber", Field, 0, ""},
    +		{"TBSCertificateList", Type, 0, ""},
    +		{"TBSCertificateList.Extensions", Field, 0, ""},
    +		{"TBSCertificateList.Issuer", Field, 0, ""},
    +		{"TBSCertificateList.NextUpdate", Field, 0, ""},
    +		{"TBSCertificateList.Raw", Field, 0, ""},
    +		{"TBSCertificateList.RevokedCertificates", Field, 0, ""},
    +		{"TBSCertificateList.Signature", Field, 0, ""},
    +		{"TBSCertificateList.ThisUpdate", Field, 0, ""},
    +		{"TBSCertificateList.Version", Field, 0, ""},
     	},
     	"database/sql": {
    -		{"(*ColumnType).DatabaseTypeName", Method, 8},
    -		{"(*ColumnType).DecimalSize", Method, 8},
    -		{"(*ColumnType).Length", Method, 8},
    -		{"(*ColumnType).Name", Method, 8},
    -		{"(*ColumnType).Nullable", Method, 8},
    -		{"(*ColumnType).ScanType", Method, 8},
    -		{"(*Conn).BeginTx", Method, 9},
    -		{"(*Conn).Close", Method, 9},
    -		{"(*Conn).ExecContext", Method, 9},
    -		{"(*Conn).PingContext", Method, 9},
    -		{"(*Conn).PrepareContext", Method, 9},
    -		{"(*Conn).QueryContext", Method, 9},
    -		{"(*Conn).QueryRowContext", Method, 9},
    -		{"(*Conn).Raw", Method, 13},
    -		{"(*DB).Begin", Method, 0},
    -		{"(*DB).BeginTx", Method, 8},
    -		{"(*DB).Close", Method, 0},
    -		{"(*DB).Conn", Method, 9},
    -		{"(*DB).Driver", Method, 0},
    -		{"(*DB).Exec", Method, 0},
    -		{"(*DB).ExecContext", Method, 8},
    -		{"(*DB).Ping", Method, 1},
    -		{"(*DB).PingContext", Method, 8},
    -		{"(*DB).Prepare", Method, 0},
    -		{"(*DB).PrepareContext", Method, 8},
    -		{"(*DB).Query", Method, 0},
    -		{"(*DB).QueryContext", Method, 8},
    -		{"(*DB).QueryRow", Method, 0},
    -		{"(*DB).QueryRowContext", Method, 8},
    -		{"(*DB).SetConnMaxIdleTime", Method, 15},
    -		{"(*DB).SetConnMaxLifetime", Method, 6},
    -		{"(*DB).SetMaxIdleConns", Method, 1},
    -		{"(*DB).SetMaxOpenConns", Method, 2},
    -		{"(*DB).Stats", Method, 5},
    -		{"(*Null).Scan", Method, 22},
    -		{"(*NullBool).Scan", Method, 0},
    -		{"(*NullByte).Scan", Method, 17},
    -		{"(*NullFloat64).Scan", Method, 0},
    -		{"(*NullInt16).Scan", Method, 17},
    -		{"(*NullInt32).Scan", Method, 13},
    -		{"(*NullInt64).Scan", Method, 0},
    -		{"(*NullString).Scan", Method, 0},
    -		{"(*NullTime).Scan", Method, 13},
    -		{"(*Row).Err", Method, 15},
    -		{"(*Row).Scan", Method, 0},
    -		{"(*Rows).Close", Method, 0},
    -		{"(*Rows).ColumnTypes", Method, 8},
    -		{"(*Rows).Columns", Method, 0},
    -		{"(*Rows).Err", Method, 0},
    -		{"(*Rows).Next", Method, 0},
    -		{"(*Rows).NextResultSet", Method, 8},
    -		{"(*Rows).Scan", Method, 0},
    -		{"(*Stmt).Close", Method, 0},
    -		{"(*Stmt).Exec", Method, 0},
    -		{"(*Stmt).ExecContext", Method, 8},
    -		{"(*Stmt).Query", Method, 0},
    -		{"(*Stmt).QueryContext", Method, 8},
    -		{"(*Stmt).QueryRow", Method, 0},
    -		{"(*Stmt).QueryRowContext", Method, 8},
    -		{"(*Tx).Commit", Method, 0},
    -		{"(*Tx).Exec", Method, 0},
    -		{"(*Tx).ExecContext", Method, 8},
    -		{"(*Tx).Prepare", Method, 0},
    -		{"(*Tx).PrepareContext", Method, 8},
    -		{"(*Tx).Query", Method, 0},
    -		{"(*Tx).QueryContext", Method, 8},
    -		{"(*Tx).QueryRow", Method, 0},
    -		{"(*Tx).QueryRowContext", Method, 8},
    -		{"(*Tx).Rollback", Method, 0},
    -		{"(*Tx).Stmt", Method, 0},
    -		{"(*Tx).StmtContext", Method, 8},
    -		{"(IsolationLevel).String", Method, 11},
    -		{"(Null).Value", Method, 22},
    -		{"(NullBool).Value", Method, 0},
    -		{"(NullByte).Value", Method, 17},
    -		{"(NullFloat64).Value", Method, 0},
    -		{"(NullInt16).Value", Method, 17},
    -		{"(NullInt32).Value", Method, 13},
    -		{"(NullInt64).Value", Method, 0},
    -		{"(NullString).Value", Method, 0},
    -		{"(NullTime).Value", Method, 13},
    -		{"ColumnType", Type, 8},
    -		{"Conn", Type, 9},
    -		{"DB", Type, 0},
    -		{"DBStats", Type, 5},
    -		{"DBStats.Idle", Field, 11},
    -		{"DBStats.InUse", Field, 11},
    -		{"DBStats.MaxIdleClosed", Field, 11},
    -		{"DBStats.MaxIdleTimeClosed", Field, 15},
    -		{"DBStats.MaxLifetimeClosed", Field, 11},
    -		{"DBStats.MaxOpenConnections", Field, 11},
    -		{"DBStats.OpenConnections", Field, 5},
    -		{"DBStats.WaitCount", Field, 11},
    -		{"DBStats.WaitDuration", Field, 11},
    -		{"Drivers", Func, 4},
    -		{"ErrConnDone", Var, 9},
    -		{"ErrNoRows", Var, 0},
    -		{"ErrTxDone", Var, 0},
    -		{"IsolationLevel", Type, 8},
    -		{"LevelDefault", Const, 8},
    -		{"LevelLinearizable", Const, 8},
    -		{"LevelReadCommitted", Const, 8},
    -		{"LevelReadUncommitted", Const, 8},
    -		{"LevelRepeatableRead", Const, 8},
    -		{"LevelSerializable", Const, 8},
    -		{"LevelSnapshot", Const, 8},
    -		{"LevelWriteCommitted", Const, 8},
    -		{"Named", Func, 8},
    -		{"NamedArg", Type, 8},
    -		{"NamedArg.Name", Field, 8},
    -		{"NamedArg.Value", Field, 8},
    -		{"Null", Type, 22},
    -		{"Null.V", Field, 22},
    -		{"Null.Valid", Field, 22},
    -		{"NullBool", Type, 0},
    -		{"NullBool.Bool", Field, 0},
    -		{"NullBool.Valid", Field, 0},
    -		{"NullByte", Type, 17},
    -		{"NullByte.Byte", Field, 17},
    -		{"NullByte.Valid", Field, 17},
    -		{"NullFloat64", Type, 0},
    -		{"NullFloat64.Float64", Field, 0},
    -		{"NullFloat64.Valid", Field, 0},
    -		{"NullInt16", Type, 17},
    -		{"NullInt16.Int16", Field, 17},
    -		{"NullInt16.Valid", Field, 17},
    -		{"NullInt32", Type, 13},
    -		{"NullInt32.Int32", Field, 13},
    -		{"NullInt32.Valid", Field, 13},
    -		{"NullInt64", Type, 0},
    -		{"NullInt64.Int64", Field, 0},
    -		{"NullInt64.Valid", Field, 0},
    -		{"NullString", Type, 0},
    -		{"NullString.String", Field, 0},
    -		{"NullString.Valid", Field, 0},
    -		{"NullTime", Type, 13},
    -		{"NullTime.Time", Field, 13},
    -		{"NullTime.Valid", Field, 13},
    -		{"Open", Func, 0},
    -		{"OpenDB", Func, 10},
    -		{"Out", Type, 9},
    -		{"Out.Dest", Field, 9},
    -		{"Out.In", Field, 9},
    -		{"RawBytes", Type, 0},
    -		{"Register", Func, 0},
    -		{"Result", Type, 0},
    -		{"Row", Type, 0},
    -		{"Rows", Type, 0},
    -		{"Scanner", Type, 0},
    -		{"Stmt", Type, 0},
    -		{"Tx", Type, 0},
    -		{"TxOptions", Type, 8},
    -		{"TxOptions.Isolation", Field, 8},
    -		{"TxOptions.ReadOnly", Field, 8},
    +		{"(*ColumnType).DatabaseTypeName", Method, 8, ""},
    +		{"(*ColumnType).DecimalSize", Method, 8, ""},
    +		{"(*ColumnType).Length", Method, 8, ""},
    +		{"(*ColumnType).Name", Method, 8, ""},
    +		{"(*ColumnType).Nullable", Method, 8, ""},
    +		{"(*ColumnType).ScanType", Method, 8, ""},
    +		{"(*Conn).BeginTx", Method, 9, ""},
    +		{"(*Conn).Close", Method, 9, ""},
    +		{"(*Conn).ExecContext", Method, 9, ""},
    +		{"(*Conn).PingContext", Method, 9, ""},
    +		{"(*Conn).PrepareContext", Method, 9, ""},
    +		{"(*Conn).QueryContext", Method, 9, ""},
    +		{"(*Conn).QueryRowContext", Method, 9, ""},
    +		{"(*Conn).Raw", Method, 13, ""},
    +		{"(*DB).Begin", Method, 0, ""},
    +		{"(*DB).BeginTx", Method, 8, ""},
    +		{"(*DB).Close", Method, 0, ""},
    +		{"(*DB).Conn", Method, 9, ""},
    +		{"(*DB).Driver", Method, 0, ""},
    +		{"(*DB).Exec", Method, 0, ""},
    +		{"(*DB).ExecContext", Method, 8, ""},
    +		{"(*DB).Ping", Method, 1, ""},
    +		{"(*DB).PingContext", Method, 8, ""},
    +		{"(*DB).Prepare", Method, 0, ""},
    +		{"(*DB).PrepareContext", Method, 8, ""},
    +		{"(*DB).Query", Method, 0, ""},
    +		{"(*DB).QueryContext", Method, 8, ""},
    +		{"(*DB).QueryRow", Method, 0, ""},
    +		{"(*DB).QueryRowContext", Method, 8, ""},
    +		{"(*DB).SetConnMaxIdleTime", Method, 15, ""},
    +		{"(*DB).SetConnMaxLifetime", Method, 6, ""},
    +		{"(*DB).SetMaxIdleConns", Method, 1, ""},
    +		{"(*DB).SetMaxOpenConns", Method, 2, ""},
    +		{"(*DB).Stats", Method, 5, ""},
    +		{"(*Null).Scan", Method, 22, ""},
    +		{"(*NullBool).Scan", Method, 0, ""},
    +		{"(*NullByte).Scan", Method, 17, ""},
    +		{"(*NullFloat64).Scan", Method, 0, ""},
    +		{"(*NullInt16).Scan", Method, 17, ""},
    +		{"(*NullInt32).Scan", Method, 13, ""},
    +		{"(*NullInt64).Scan", Method, 0, ""},
    +		{"(*NullString).Scan", Method, 0, ""},
    +		{"(*NullTime).Scan", Method, 13, ""},
    +		{"(*Row).Err", Method, 15, ""},
    +		{"(*Row).Scan", Method, 0, ""},
    +		{"(*Rows).Close", Method, 0, ""},
    +		{"(*Rows).ColumnTypes", Method, 8, ""},
    +		{"(*Rows).Columns", Method, 0, ""},
    +		{"(*Rows).Err", Method, 0, ""},
    +		{"(*Rows).Next", Method, 0, ""},
    +		{"(*Rows).NextResultSet", Method, 8, ""},
    +		{"(*Rows).Scan", Method, 0, ""},
    +		{"(*Stmt).Close", Method, 0, ""},
    +		{"(*Stmt).Exec", Method, 0, ""},
    +		{"(*Stmt).ExecContext", Method, 8, ""},
    +		{"(*Stmt).Query", Method, 0, ""},
    +		{"(*Stmt).QueryContext", Method, 8, ""},
    +		{"(*Stmt).QueryRow", Method, 0, ""},
    +		{"(*Stmt).QueryRowContext", Method, 8, ""},
    +		{"(*Tx).Commit", Method, 0, ""},
    +		{"(*Tx).Exec", Method, 0, ""},
    +		{"(*Tx).ExecContext", Method, 8, ""},
    +		{"(*Tx).Prepare", Method, 0, ""},
    +		{"(*Tx).PrepareContext", Method, 8, ""},
    +		{"(*Tx).Query", Method, 0, ""},
    +		{"(*Tx).QueryContext", Method, 8, ""},
    +		{"(*Tx).QueryRow", Method, 0, ""},
    +		{"(*Tx).QueryRowContext", Method, 8, ""},
    +		{"(*Tx).Rollback", Method, 0, ""},
    +		{"(*Tx).Stmt", Method, 0, ""},
    +		{"(*Tx).StmtContext", Method, 8, ""},
    +		{"(IsolationLevel).String", Method, 11, ""},
    +		{"(Null).Value", Method, 22, ""},
    +		{"(NullBool).Value", Method, 0, ""},
    +		{"(NullByte).Value", Method, 17, ""},
    +		{"(NullFloat64).Value", Method, 0, ""},
    +		{"(NullInt16).Value", Method, 17, ""},
    +		{"(NullInt32).Value", Method, 13, ""},
    +		{"(NullInt64).Value", Method, 0, ""},
    +		{"(NullString).Value", Method, 0, ""},
    +		{"(NullTime).Value", Method, 13, ""},
    +		{"ColumnType", Type, 8, ""},
    +		{"Conn", Type, 9, ""},
    +		{"DB", Type, 0, ""},
    +		{"DBStats", Type, 5, ""},
    +		{"DBStats.Idle", Field, 11, ""},
    +		{"DBStats.InUse", Field, 11, ""},
    +		{"DBStats.MaxIdleClosed", Field, 11, ""},
    +		{"DBStats.MaxIdleTimeClosed", Field, 15, ""},
    +		{"DBStats.MaxLifetimeClosed", Field, 11, ""},
    +		{"DBStats.MaxOpenConnections", Field, 11, ""},
    +		{"DBStats.OpenConnections", Field, 5, ""},
    +		{"DBStats.WaitCount", Field, 11, ""},
    +		{"DBStats.WaitDuration", Field, 11, ""},
    +		{"Drivers", Func, 4, "func() []string"},
    +		{"ErrConnDone", Var, 9, ""},
    +		{"ErrNoRows", Var, 0, ""},
    +		{"ErrTxDone", Var, 0, ""},
    +		{"IsolationLevel", Type, 8, ""},
    +		{"LevelDefault", Const, 8, ""},
    +		{"LevelLinearizable", Const, 8, ""},
    +		{"LevelReadCommitted", Const, 8, ""},
    +		{"LevelReadUncommitted", Const, 8, ""},
    +		{"LevelRepeatableRead", Const, 8, ""},
    +		{"LevelSerializable", Const, 8, ""},
    +		{"LevelSnapshot", Const, 8, ""},
    +		{"LevelWriteCommitted", Const, 8, ""},
    +		{"Named", Func, 8, "func(name string, value any) NamedArg"},
    +		{"NamedArg", Type, 8, ""},
    +		{"NamedArg.Name", Field, 8, ""},
    +		{"NamedArg.Value", Field, 8, ""},
    +		{"Null", Type, 22, ""},
    +		{"Null.V", Field, 22, ""},
    +		{"Null.Valid", Field, 22, ""},
    +		{"NullBool", Type, 0, ""},
    +		{"NullBool.Bool", Field, 0, ""},
    +		{"NullBool.Valid", Field, 0, ""},
    +		{"NullByte", Type, 17, ""},
    +		{"NullByte.Byte", Field, 17, ""},
    +		{"NullByte.Valid", Field, 17, ""},
    +		{"NullFloat64", Type, 0, ""},
    +		{"NullFloat64.Float64", Field, 0, ""},
    +		{"NullFloat64.Valid", Field, 0, ""},
    +		{"NullInt16", Type, 17, ""},
    +		{"NullInt16.Int16", Field, 17, ""},
    +		{"NullInt16.Valid", Field, 17, ""},
    +		{"NullInt32", Type, 13, ""},
    +		{"NullInt32.Int32", Field, 13, ""},
    +		{"NullInt32.Valid", Field, 13, ""},
    +		{"NullInt64", Type, 0, ""},
    +		{"NullInt64.Int64", Field, 0, ""},
    +		{"NullInt64.Valid", Field, 0, ""},
    +		{"NullString", Type, 0, ""},
    +		{"NullString.String", Field, 0, ""},
    +		{"NullString.Valid", Field, 0, ""},
    +		{"NullTime", Type, 13, ""},
    +		{"NullTime.Time", Field, 13, ""},
    +		{"NullTime.Valid", Field, 13, ""},
    +		{"Open", Func, 0, "func(driverName string, dataSourceName string) (*DB, error)"},
    +		{"OpenDB", Func, 10, "func(c driver.Connector) *DB"},
    +		{"Out", Type, 9, ""},
    +		{"Out.Dest", Field, 9, ""},
    +		{"Out.In", Field, 9, ""},
    +		{"RawBytes", Type, 0, ""},
    +		{"Register", Func, 0, "func(name string, driver driver.Driver)"},
    +		{"Result", Type, 0, ""},
    +		{"Row", Type, 0, ""},
    +		{"Rows", Type, 0, ""},
    +		{"Scanner", Type, 0, ""},
    +		{"Stmt", Type, 0, ""},
    +		{"Tx", Type, 0, ""},
    +		{"TxOptions", Type, 8, ""},
    +		{"TxOptions.Isolation", Field, 8, ""},
    +		{"TxOptions.ReadOnly", Field, 8, ""},
     	},
     	"database/sql/driver": {
    -		{"(NotNull).ConvertValue", Method, 0},
    -		{"(Null).ConvertValue", Method, 0},
    -		{"(RowsAffected).LastInsertId", Method, 0},
    -		{"(RowsAffected).RowsAffected", Method, 0},
    -		{"Bool", Var, 0},
    -		{"ColumnConverter", Type, 0},
    -		{"Conn", Type, 0},
    -		{"ConnBeginTx", Type, 8},
    -		{"ConnPrepareContext", Type, 8},
    -		{"Connector", Type, 10},
    -		{"DefaultParameterConverter", Var, 0},
    -		{"Driver", Type, 0},
    -		{"DriverContext", Type, 10},
    -		{"ErrBadConn", Var, 0},
    -		{"ErrRemoveArgument", Var, 9},
    -		{"ErrSkip", Var, 0},
    -		{"Execer", Type, 0},
    -		{"ExecerContext", Type, 8},
    -		{"Int32", Var, 0},
    -		{"IsScanValue", Func, 0},
    -		{"IsValue", Func, 0},
    -		{"IsolationLevel", Type, 8},
    -		{"NamedValue", Type, 8},
    -		{"NamedValue.Name", Field, 8},
    -		{"NamedValue.Ordinal", Field, 8},
    -		{"NamedValue.Value", Field, 8},
    -		{"NamedValueChecker", Type, 9},
    -		{"NotNull", Type, 0},
    -		{"NotNull.Converter", Field, 0},
    -		{"Null", Type, 0},
    -		{"Null.Converter", Field, 0},
    -		{"Pinger", Type, 8},
    -		{"Queryer", Type, 1},
    -		{"QueryerContext", Type, 8},
    -		{"Result", Type, 0},
    -		{"ResultNoRows", Var, 0},
    -		{"Rows", Type, 0},
    -		{"RowsAffected", Type, 0},
    -		{"RowsColumnTypeDatabaseTypeName", Type, 8},
    -		{"RowsColumnTypeLength", Type, 8},
    -		{"RowsColumnTypeNullable", Type, 8},
    -		{"RowsColumnTypePrecisionScale", Type, 8},
    -		{"RowsColumnTypeScanType", Type, 8},
    -		{"RowsNextResultSet", Type, 8},
    -		{"SessionResetter", Type, 10},
    -		{"Stmt", Type, 0},
    -		{"StmtExecContext", Type, 8},
    -		{"StmtQueryContext", Type, 8},
    -		{"String", Var, 0},
    -		{"Tx", Type, 0},
    -		{"TxOptions", Type, 8},
    -		{"TxOptions.Isolation", Field, 8},
    -		{"TxOptions.ReadOnly", Field, 8},
    -		{"Validator", Type, 15},
    -		{"Value", Type, 0},
    -		{"ValueConverter", Type, 0},
    -		{"Valuer", Type, 0},
    +		{"(NotNull).ConvertValue", Method, 0, ""},
    +		{"(Null).ConvertValue", Method, 0, ""},
    +		{"(RowsAffected).LastInsertId", Method, 0, ""},
    +		{"(RowsAffected).RowsAffected", Method, 0, ""},
    +		{"Bool", Var, 0, ""},
    +		{"ColumnConverter", Type, 0, ""},
    +		{"Conn", Type, 0, ""},
    +		{"ConnBeginTx", Type, 8, ""},
    +		{"ConnPrepareContext", Type, 8, ""},
    +		{"Connector", Type, 10, ""},
    +		{"DefaultParameterConverter", Var, 0, ""},
    +		{"Driver", Type, 0, ""},
    +		{"DriverContext", Type, 10, ""},
    +		{"ErrBadConn", Var, 0, ""},
    +		{"ErrRemoveArgument", Var, 9, ""},
    +		{"ErrSkip", Var, 0, ""},
    +		{"Execer", Type, 0, ""},
    +		{"ExecerContext", Type, 8, ""},
    +		{"Int32", Var, 0, ""},
    +		{"IsScanValue", Func, 0, "func(v any) bool"},
    +		{"IsValue", Func, 0, "func(v any) bool"},
    +		{"IsolationLevel", Type, 8, ""},
    +		{"NamedValue", Type, 8, ""},
    +		{"NamedValue.Name", Field, 8, ""},
    +		{"NamedValue.Ordinal", Field, 8, ""},
    +		{"NamedValue.Value", Field, 8, ""},
    +		{"NamedValueChecker", Type, 9, ""},
    +		{"NotNull", Type, 0, ""},
    +		{"NotNull.Converter", Field, 0, ""},
    +		{"Null", Type, 0, ""},
    +		{"Null.Converter", Field, 0, ""},
    +		{"Pinger", Type, 8, ""},
    +		{"Queryer", Type, 1, ""},
    +		{"QueryerContext", Type, 8, ""},
    +		{"Result", Type, 0, ""},
    +		{"ResultNoRows", Var, 0, ""},
    +		{"Rows", Type, 0, ""},
    +		{"RowsAffected", Type, 0, ""},
    +		{"RowsColumnTypeDatabaseTypeName", Type, 8, ""},
    +		{"RowsColumnTypeLength", Type, 8, ""},
    +		{"RowsColumnTypeNullable", Type, 8, ""},
    +		{"RowsColumnTypePrecisionScale", Type, 8, ""},
    +		{"RowsColumnTypeScanType", Type, 8, ""},
    +		{"RowsNextResultSet", Type, 8, ""},
    +		{"SessionResetter", Type, 10, ""},
    +		{"Stmt", Type, 0, ""},
    +		{"StmtExecContext", Type, 8, ""},
    +		{"StmtQueryContext", Type, 8, ""},
    +		{"String", Var, 0, ""},
    +		{"Tx", Type, 0, ""},
    +		{"TxOptions", Type, 8, ""},
    +		{"TxOptions.Isolation", Field, 8, ""},
    +		{"TxOptions.ReadOnly", Field, 8, ""},
    +		{"Validator", Type, 15, ""},
    +		{"Value", Type, 0, ""},
    +		{"ValueConverter", Type, 0, ""},
    +		{"Valuer", Type, 0, ""},
     	},
     	"debug/buildinfo": {
    -		{"BuildInfo", Type, 18},
    -		{"Read", Func, 18},
    -		{"ReadFile", Func, 18},
    +		{"BuildInfo", Type, 18, ""},
    +		{"Read", Func, 18, "func(r io.ReaderAt) (*BuildInfo, error)"},
    +		{"ReadFile", Func, 18, "func(name string) (info *BuildInfo, err error)"},
     	},
     	"debug/dwarf": {
    -		{"(*AddrType).Basic", Method, 0},
    -		{"(*AddrType).Common", Method, 0},
    -		{"(*AddrType).Size", Method, 0},
    -		{"(*AddrType).String", Method, 0},
    -		{"(*ArrayType).Common", Method, 0},
    -		{"(*ArrayType).Size", Method, 0},
    -		{"(*ArrayType).String", Method, 0},
    -		{"(*BasicType).Basic", Method, 0},
    -		{"(*BasicType).Common", Method, 0},
    -		{"(*BasicType).Size", Method, 0},
    -		{"(*BasicType).String", Method, 0},
    -		{"(*BoolType).Basic", Method, 0},
    -		{"(*BoolType).Common", Method, 0},
    -		{"(*BoolType).Size", Method, 0},
    -		{"(*BoolType).String", Method, 0},
    -		{"(*CharType).Basic", Method, 0},
    -		{"(*CharType).Common", Method, 0},
    -		{"(*CharType).Size", Method, 0},
    -		{"(*CharType).String", Method, 0},
    -		{"(*CommonType).Common", Method, 0},
    -		{"(*CommonType).Size", Method, 0},
    -		{"(*ComplexType).Basic", Method, 0},
    -		{"(*ComplexType).Common", Method, 0},
    -		{"(*ComplexType).Size", Method, 0},
    -		{"(*ComplexType).String", Method, 0},
    -		{"(*Data).AddSection", Method, 14},
    -		{"(*Data).AddTypes", Method, 3},
    -		{"(*Data).LineReader", Method, 5},
    -		{"(*Data).Ranges", Method, 7},
    -		{"(*Data).Reader", Method, 0},
    -		{"(*Data).Type", Method, 0},
    -		{"(*DotDotDotType).Common", Method, 0},
    -		{"(*DotDotDotType).Size", Method, 0},
    -		{"(*DotDotDotType).String", Method, 0},
    -		{"(*Entry).AttrField", Method, 5},
    -		{"(*Entry).Val", Method, 0},
    -		{"(*EnumType).Common", Method, 0},
    -		{"(*EnumType).Size", Method, 0},
    -		{"(*EnumType).String", Method, 0},
    -		{"(*FloatType).Basic", Method, 0},
    -		{"(*FloatType).Common", Method, 0},
    -		{"(*FloatType).Size", Method, 0},
    -		{"(*FloatType).String", Method, 0},
    -		{"(*FuncType).Common", Method, 0},
    -		{"(*FuncType).Size", Method, 0},
    -		{"(*FuncType).String", Method, 0},
    -		{"(*IntType).Basic", Method, 0},
    -		{"(*IntType).Common", Method, 0},
    -		{"(*IntType).Size", Method, 0},
    -		{"(*IntType).String", Method, 0},
    -		{"(*LineReader).Files", Method, 14},
    -		{"(*LineReader).Next", Method, 5},
    -		{"(*LineReader).Reset", Method, 5},
    -		{"(*LineReader).Seek", Method, 5},
    -		{"(*LineReader).SeekPC", Method, 5},
    -		{"(*LineReader).Tell", Method, 5},
    -		{"(*PtrType).Common", Method, 0},
    -		{"(*PtrType).Size", Method, 0},
    -		{"(*PtrType).String", Method, 0},
    -		{"(*QualType).Common", Method, 0},
    -		{"(*QualType).Size", Method, 0},
    -		{"(*QualType).String", Method, 0},
    -		{"(*Reader).AddressSize", Method, 5},
    -		{"(*Reader).ByteOrder", Method, 14},
    -		{"(*Reader).Next", Method, 0},
    -		{"(*Reader).Seek", Method, 0},
    -		{"(*Reader).SeekPC", Method, 7},
    -		{"(*Reader).SkipChildren", Method, 0},
    -		{"(*StructType).Common", Method, 0},
    -		{"(*StructType).Defn", Method, 0},
    -		{"(*StructType).Size", Method, 0},
    -		{"(*StructType).String", Method, 0},
    -		{"(*TypedefType).Common", Method, 0},
    -		{"(*TypedefType).Size", Method, 0},
    -		{"(*TypedefType).String", Method, 0},
    -		{"(*UcharType).Basic", Method, 0},
    -		{"(*UcharType).Common", Method, 0},
    -		{"(*UcharType).Size", Method, 0},
    -		{"(*UcharType).String", Method, 0},
    -		{"(*UintType).Basic", Method, 0},
    -		{"(*UintType).Common", Method, 0},
    -		{"(*UintType).Size", Method, 0},
    -		{"(*UintType).String", Method, 0},
    -		{"(*UnspecifiedType).Basic", Method, 4},
    -		{"(*UnspecifiedType).Common", Method, 4},
    -		{"(*UnspecifiedType).Size", Method, 4},
    -		{"(*UnspecifiedType).String", Method, 4},
    -		{"(*UnsupportedType).Common", Method, 13},
    -		{"(*UnsupportedType).Size", Method, 13},
    -		{"(*UnsupportedType).String", Method, 13},
    -		{"(*VoidType).Common", Method, 0},
    -		{"(*VoidType).Size", Method, 0},
    -		{"(*VoidType).String", Method, 0},
    -		{"(Attr).GoString", Method, 0},
    -		{"(Attr).String", Method, 0},
    -		{"(Class).GoString", Method, 5},
    -		{"(Class).String", Method, 5},
    -		{"(DecodeError).Error", Method, 0},
    -		{"(Tag).GoString", Method, 0},
    -		{"(Tag).String", Method, 0},
    -		{"AddrType", Type, 0},
    -		{"AddrType.BasicType", Field, 0},
    -		{"ArrayType", Type, 0},
    -		{"ArrayType.CommonType", Field, 0},
    -		{"ArrayType.Count", Field, 0},
    -		{"ArrayType.StrideBitSize", Field, 0},
    -		{"ArrayType.Type", Field, 0},
    -		{"Attr", Type, 0},
    -		{"AttrAbstractOrigin", Const, 0},
    -		{"AttrAccessibility", Const, 0},
    -		{"AttrAddrBase", Const, 14},
    -		{"AttrAddrClass", Const, 0},
    -		{"AttrAlignment", Const, 14},
    -		{"AttrAllocated", Const, 0},
    -		{"AttrArtificial", Const, 0},
    -		{"AttrAssociated", Const, 0},
    -		{"AttrBaseTypes", Const, 0},
    -		{"AttrBinaryScale", Const, 14},
    -		{"AttrBitOffset", Const, 0},
    -		{"AttrBitSize", Const, 0},
    -		{"AttrByteSize", Const, 0},
    -		{"AttrCallAllCalls", Const, 14},
    -		{"AttrCallAllSourceCalls", Const, 14},
    -		{"AttrCallAllTailCalls", Const, 14},
    -		{"AttrCallColumn", Const, 0},
    -		{"AttrCallDataLocation", Const, 14},
    -		{"AttrCallDataValue", Const, 14},
    -		{"AttrCallFile", Const, 0},
    -		{"AttrCallLine", Const, 0},
    -		{"AttrCallOrigin", Const, 14},
    -		{"AttrCallPC", Const, 14},
    -		{"AttrCallParameter", Const, 14},
    -		{"AttrCallReturnPC", Const, 14},
    -		{"AttrCallTailCall", Const, 14},
    -		{"AttrCallTarget", Const, 14},
    -		{"AttrCallTargetClobbered", Const, 14},
    -		{"AttrCallValue", Const, 14},
    -		{"AttrCalling", Const, 0},
    -		{"AttrCommonRef", Const, 0},
    -		{"AttrCompDir", Const, 0},
    -		{"AttrConstExpr", Const, 14},
    -		{"AttrConstValue", Const, 0},
    -		{"AttrContainingType", Const, 0},
    -		{"AttrCount", Const, 0},
    -		{"AttrDataBitOffset", Const, 14},
    -		{"AttrDataLocation", Const, 0},
    -		{"AttrDataMemberLoc", Const, 0},
    -		{"AttrDecimalScale", Const, 14},
    -		{"AttrDecimalSign", Const, 14},
    -		{"AttrDeclColumn", Const, 0},
    -		{"AttrDeclFile", Const, 0},
    -		{"AttrDeclLine", Const, 0},
    -		{"AttrDeclaration", Const, 0},
    -		{"AttrDefaultValue", Const, 0},
    -		{"AttrDefaulted", Const, 14},
    -		{"AttrDeleted", Const, 14},
    -		{"AttrDescription", Const, 0},
    -		{"AttrDigitCount", Const, 14},
    -		{"AttrDiscr", Const, 0},
    -		{"AttrDiscrList", Const, 0},
    -		{"AttrDiscrValue", Const, 0},
    -		{"AttrDwoName", Const, 14},
    -		{"AttrElemental", Const, 14},
    -		{"AttrEncoding", Const, 0},
    -		{"AttrEndianity", Const, 14},
    -		{"AttrEntrypc", Const, 0},
    -		{"AttrEnumClass", Const, 14},
    -		{"AttrExplicit", Const, 14},
    -		{"AttrExportSymbols", Const, 14},
    -		{"AttrExtension", Const, 0},
    -		{"AttrExternal", Const, 0},
    -		{"AttrFrameBase", Const, 0},
    -		{"AttrFriend", Const, 0},
    -		{"AttrHighpc", Const, 0},
    -		{"AttrIdentifierCase", Const, 0},
    -		{"AttrImport", Const, 0},
    -		{"AttrInline", Const, 0},
    -		{"AttrIsOptional", Const, 0},
    -		{"AttrLanguage", Const, 0},
    -		{"AttrLinkageName", Const, 14},
    -		{"AttrLocation", Const, 0},
    -		{"AttrLoclistsBase", Const, 14},
    -		{"AttrLowerBound", Const, 0},
    -		{"AttrLowpc", Const, 0},
    -		{"AttrMacroInfo", Const, 0},
    -		{"AttrMacros", Const, 14},
    -		{"AttrMainSubprogram", Const, 14},
    -		{"AttrMutable", Const, 14},
    -		{"AttrName", Const, 0},
    -		{"AttrNamelistItem", Const, 0},
    -		{"AttrNoreturn", Const, 14},
    -		{"AttrObjectPointer", Const, 14},
    -		{"AttrOrdering", Const, 0},
    -		{"AttrPictureString", Const, 14},
    -		{"AttrPriority", Const, 0},
    -		{"AttrProducer", Const, 0},
    -		{"AttrPrototyped", Const, 0},
    -		{"AttrPure", Const, 14},
    -		{"AttrRanges", Const, 0},
    -		{"AttrRank", Const, 14},
    -		{"AttrRecursive", Const, 14},
    -		{"AttrReference", Const, 14},
    -		{"AttrReturnAddr", Const, 0},
    -		{"AttrRnglistsBase", Const, 14},
    -		{"AttrRvalueReference", Const, 14},
    -		{"AttrSegment", Const, 0},
    -		{"AttrSibling", Const, 0},
    -		{"AttrSignature", Const, 14},
    -		{"AttrSmall", Const, 14},
    -		{"AttrSpecification", Const, 0},
    -		{"AttrStartScope", Const, 0},
    -		{"AttrStaticLink", Const, 0},
    -		{"AttrStmtList", Const, 0},
    -		{"AttrStrOffsetsBase", Const, 14},
    -		{"AttrStride", Const, 0},
    -		{"AttrStrideSize", Const, 0},
    -		{"AttrStringLength", Const, 0},
    -		{"AttrStringLengthBitSize", Const, 14},
    -		{"AttrStringLengthByteSize", Const, 14},
    -		{"AttrThreadsScaled", Const, 14},
    -		{"AttrTrampoline", Const, 0},
    -		{"AttrType", Const, 0},
    -		{"AttrUpperBound", Const, 0},
    -		{"AttrUseLocation", Const, 0},
    -		{"AttrUseUTF8", Const, 0},
    -		{"AttrVarParam", Const, 0},
    -		{"AttrVirtuality", Const, 0},
    -		{"AttrVisibility", Const, 0},
    -		{"AttrVtableElemLoc", Const, 0},
    -		{"BasicType", Type, 0},
    -		{"BasicType.BitOffset", Field, 0},
    -		{"BasicType.BitSize", Field, 0},
    -		{"BasicType.CommonType", Field, 0},
    -		{"BasicType.DataBitOffset", Field, 18},
    -		{"BoolType", Type, 0},
    -		{"BoolType.BasicType", Field, 0},
    -		{"CharType", Type, 0},
    -		{"CharType.BasicType", Field, 0},
    -		{"Class", Type, 5},
    -		{"ClassAddrPtr", Const, 14},
    -		{"ClassAddress", Const, 5},
    -		{"ClassBlock", Const, 5},
    -		{"ClassConstant", Const, 5},
    -		{"ClassExprLoc", Const, 5},
    -		{"ClassFlag", Const, 5},
    -		{"ClassLinePtr", Const, 5},
    -		{"ClassLocList", Const, 14},
    -		{"ClassLocListPtr", Const, 5},
    -		{"ClassMacPtr", Const, 5},
    -		{"ClassRangeListPtr", Const, 5},
    -		{"ClassReference", Const, 5},
    -		{"ClassReferenceAlt", Const, 5},
    -		{"ClassReferenceSig", Const, 5},
    -		{"ClassRngList", Const, 14},
    -		{"ClassRngListsPtr", Const, 14},
    -		{"ClassStrOffsetsPtr", Const, 14},
    -		{"ClassString", Const, 5},
    -		{"ClassStringAlt", Const, 5},
    -		{"ClassUnknown", Const, 6},
    -		{"CommonType", Type, 0},
    -		{"CommonType.ByteSize", Field, 0},
    -		{"CommonType.Name", Field, 0},
    -		{"ComplexType", Type, 0},
    -		{"ComplexType.BasicType", Field, 0},
    -		{"Data", Type, 0},
    -		{"DecodeError", Type, 0},
    -		{"DecodeError.Err", Field, 0},
    -		{"DecodeError.Name", Field, 0},
    -		{"DecodeError.Offset", Field, 0},
    -		{"DotDotDotType", Type, 0},
    -		{"DotDotDotType.CommonType", Field, 0},
    -		{"Entry", Type, 0},
    -		{"Entry.Children", Field, 0},
    -		{"Entry.Field", Field, 0},
    -		{"Entry.Offset", Field, 0},
    -		{"Entry.Tag", Field, 0},
    -		{"EnumType", Type, 0},
    -		{"EnumType.CommonType", Field, 0},
    -		{"EnumType.EnumName", Field, 0},
    -		{"EnumType.Val", Field, 0},
    -		{"EnumValue", Type, 0},
    -		{"EnumValue.Name", Field, 0},
    -		{"EnumValue.Val", Field, 0},
    -		{"ErrUnknownPC", Var, 5},
    -		{"Field", Type, 0},
    -		{"Field.Attr", Field, 0},
    -		{"Field.Class", Field, 5},
    -		{"Field.Val", Field, 0},
    -		{"FloatType", Type, 0},
    -		{"FloatType.BasicType", Field, 0},
    -		{"FuncType", Type, 0},
    -		{"FuncType.CommonType", Field, 0},
    -		{"FuncType.ParamType", Field, 0},
    -		{"FuncType.ReturnType", Field, 0},
    -		{"IntType", Type, 0},
    -		{"IntType.BasicType", Field, 0},
    -		{"LineEntry", Type, 5},
    -		{"LineEntry.Address", Field, 5},
    -		{"LineEntry.BasicBlock", Field, 5},
    -		{"LineEntry.Column", Field, 5},
    -		{"LineEntry.Discriminator", Field, 5},
    -		{"LineEntry.EndSequence", Field, 5},
    -		{"LineEntry.EpilogueBegin", Field, 5},
    -		{"LineEntry.File", Field, 5},
    -		{"LineEntry.ISA", Field, 5},
    -		{"LineEntry.IsStmt", Field, 5},
    -		{"LineEntry.Line", Field, 5},
    -		{"LineEntry.OpIndex", Field, 5},
    -		{"LineEntry.PrologueEnd", Field, 5},
    -		{"LineFile", Type, 5},
    -		{"LineFile.Length", Field, 5},
    -		{"LineFile.Mtime", Field, 5},
    -		{"LineFile.Name", Field, 5},
    -		{"LineReader", Type, 5},
    -		{"LineReaderPos", Type, 5},
    -		{"New", Func, 0},
    -		{"Offset", Type, 0},
    -		{"PtrType", Type, 0},
    -		{"PtrType.CommonType", Field, 0},
    -		{"PtrType.Type", Field, 0},
    -		{"QualType", Type, 0},
    -		{"QualType.CommonType", Field, 0},
    -		{"QualType.Qual", Field, 0},
    -		{"QualType.Type", Field, 0},
    -		{"Reader", Type, 0},
    -		{"StructField", Type, 0},
    -		{"StructField.BitOffset", Field, 0},
    -		{"StructField.BitSize", Field, 0},
    -		{"StructField.ByteOffset", Field, 0},
    -		{"StructField.ByteSize", Field, 0},
    -		{"StructField.DataBitOffset", Field, 18},
    -		{"StructField.Name", Field, 0},
    -		{"StructField.Type", Field, 0},
    -		{"StructType", Type, 0},
    -		{"StructType.CommonType", Field, 0},
    -		{"StructType.Field", Field, 0},
    -		{"StructType.Incomplete", Field, 0},
    -		{"StructType.Kind", Field, 0},
    -		{"StructType.StructName", Field, 0},
    -		{"Tag", Type, 0},
    -		{"TagAccessDeclaration", Const, 0},
    -		{"TagArrayType", Const, 0},
    -		{"TagAtomicType", Const, 14},
    -		{"TagBaseType", Const, 0},
    -		{"TagCallSite", Const, 14},
    -		{"TagCallSiteParameter", Const, 14},
    -		{"TagCatchDwarfBlock", Const, 0},
    -		{"TagClassType", Const, 0},
    -		{"TagCoarrayType", Const, 14},
    -		{"TagCommonDwarfBlock", Const, 0},
    -		{"TagCommonInclusion", Const, 0},
    -		{"TagCompileUnit", Const, 0},
    -		{"TagCondition", Const, 3},
    -		{"TagConstType", Const, 0},
    -		{"TagConstant", Const, 0},
    -		{"TagDwarfProcedure", Const, 0},
    -		{"TagDynamicType", Const, 14},
    -		{"TagEntryPoint", Const, 0},
    -		{"TagEnumerationType", Const, 0},
    -		{"TagEnumerator", Const, 0},
    -		{"TagFileType", Const, 0},
    -		{"TagFormalParameter", Const, 0},
    -		{"TagFriend", Const, 0},
    -		{"TagGenericSubrange", Const, 14},
    -		{"TagImmutableType", Const, 14},
    -		{"TagImportedDeclaration", Const, 0},
    -		{"TagImportedModule", Const, 0},
    -		{"TagImportedUnit", Const, 0},
    -		{"TagInheritance", Const, 0},
    -		{"TagInlinedSubroutine", Const, 0},
    -		{"TagInterfaceType", Const, 0},
    -		{"TagLabel", Const, 0},
    -		{"TagLexDwarfBlock", Const, 0},
    -		{"TagMember", Const, 0},
    -		{"TagModule", Const, 0},
    -		{"TagMutableType", Const, 0},
    -		{"TagNamelist", Const, 0},
    -		{"TagNamelistItem", Const, 0},
    -		{"TagNamespace", Const, 0},
    -		{"TagPackedType", Const, 0},
    -		{"TagPartialUnit", Const, 0},
    -		{"TagPointerType", Const, 0},
    -		{"TagPtrToMemberType", Const, 0},
    -		{"TagReferenceType", Const, 0},
    -		{"TagRestrictType", Const, 0},
    -		{"TagRvalueReferenceType", Const, 3},
    -		{"TagSetType", Const, 0},
    -		{"TagSharedType", Const, 3},
    -		{"TagSkeletonUnit", Const, 14},
    -		{"TagStringType", Const, 0},
    -		{"TagStructType", Const, 0},
    -		{"TagSubprogram", Const, 0},
    -		{"TagSubrangeType", Const, 0},
    -		{"TagSubroutineType", Const, 0},
    -		{"TagTemplateAlias", Const, 3},
    -		{"TagTemplateTypeParameter", Const, 0},
    -		{"TagTemplateValueParameter", Const, 0},
    -		{"TagThrownType", Const, 0},
    -		{"TagTryDwarfBlock", Const, 0},
    -		{"TagTypeUnit", Const, 3},
    -		{"TagTypedef", Const, 0},
    -		{"TagUnionType", Const, 0},
    -		{"TagUnspecifiedParameters", Const, 0},
    -		{"TagUnspecifiedType", Const, 0},
    -		{"TagVariable", Const, 0},
    -		{"TagVariant", Const, 0},
    -		{"TagVariantPart", Const, 0},
    -		{"TagVolatileType", Const, 0},
    -		{"TagWithStmt", Const, 0},
    -		{"Type", Type, 0},
    -		{"TypedefType", Type, 0},
    -		{"TypedefType.CommonType", Field, 0},
    -		{"TypedefType.Type", Field, 0},
    -		{"UcharType", Type, 0},
    -		{"UcharType.BasicType", Field, 0},
    -		{"UintType", Type, 0},
    -		{"UintType.BasicType", Field, 0},
    -		{"UnspecifiedType", Type, 4},
    -		{"UnspecifiedType.BasicType", Field, 4},
    -		{"UnsupportedType", Type, 13},
    -		{"UnsupportedType.CommonType", Field, 13},
    -		{"UnsupportedType.Tag", Field, 13},
    -		{"VoidType", Type, 0},
    -		{"VoidType.CommonType", Field, 0},
    +		{"(*AddrType).Basic", Method, 0, ""},
    +		{"(*AddrType).Common", Method, 0, ""},
    +		{"(*AddrType).Size", Method, 0, ""},
    +		{"(*AddrType).String", Method, 0, ""},
    +		{"(*ArrayType).Common", Method, 0, ""},
    +		{"(*ArrayType).Size", Method, 0, ""},
    +		{"(*ArrayType).String", Method, 0, ""},
    +		{"(*BasicType).Basic", Method, 0, ""},
    +		{"(*BasicType).Common", Method, 0, ""},
    +		{"(*BasicType).Size", Method, 0, ""},
    +		{"(*BasicType).String", Method, 0, ""},
    +		{"(*BoolType).Basic", Method, 0, ""},
    +		{"(*BoolType).Common", Method, 0, ""},
    +		{"(*BoolType).Size", Method, 0, ""},
    +		{"(*BoolType).String", Method, 0, ""},
    +		{"(*CharType).Basic", Method, 0, ""},
    +		{"(*CharType).Common", Method, 0, ""},
    +		{"(*CharType).Size", Method, 0, ""},
    +		{"(*CharType).String", Method, 0, ""},
    +		{"(*CommonType).Common", Method, 0, ""},
    +		{"(*CommonType).Size", Method, 0, ""},
    +		{"(*ComplexType).Basic", Method, 0, ""},
    +		{"(*ComplexType).Common", Method, 0, ""},
    +		{"(*ComplexType).Size", Method, 0, ""},
    +		{"(*ComplexType).String", Method, 0, ""},
    +		{"(*Data).AddSection", Method, 14, ""},
    +		{"(*Data).AddTypes", Method, 3, ""},
    +		{"(*Data).LineReader", Method, 5, ""},
    +		{"(*Data).Ranges", Method, 7, ""},
    +		{"(*Data).Reader", Method, 0, ""},
    +		{"(*Data).Type", Method, 0, ""},
    +		{"(*DotDotDotType).Common", Method, 0, ""},
    +		{"(*DotDotDotType).Size", Method, 0, ""},
    +		{"(*DotDotDotType).String", Method, 0, ""},
    +		{"(*Entry).AttrField", Method, 5, ""},
    +		{"(*Entry).Val", Method, 0, ""},
    +		{"(*EnumType).Common", Method, 0, ""},
    +		{"(*EnumType).Size", Method, 0, ""},
    +		{"(*EnumType).String", Method, 0, ""},
    +		{"(*FloatType).Basic", Method, 0, ""},
    +		{"(*FloatType).Common", Method, 0, ""},
    +		{"(*FloatType).Size", Method, 0, ""},
    +		{"(*FloatType).String", Method, 0, ""},
    +		{"(*FuncType).Common", Method, 0, ""},
    +		{"(*FuncType).Size", Method, 0, ""},
    +		{"(*FuncType).String", Method, 0, ""},
    +		{"(*IntType).Basic", Method, 0, ""},
    +		{"(*IntType).Common", Method, 0, ""},
    +		{"(*IntType).Size", Method, 0, ""},
    +		{"(*IntType).String", Method, 0, ""},
    +		{"(*LineReader).Files", Method, 14, ""},
    +		{"(*LineReader).Next", Method, 5, ""},
    +		{"(*LineReader).Reset", Method, 5, ""},
    +		{"(*LineReader).Seek", Method, 5, ""},
    +		{"(*LineReader).SeekPC", Method, 5, ""},
    +		{"(*LineReader).Tell", Method, 5, ""},
    +		{"(*PtrType).Common", Method, 0, ""},
    +		{"(*PtrType).Size", Method, 0, ""},
    +		{"(*PtrType).String", Method, 0, ""},
    +		{"(*QualType).Common", Method, 0, ""},
    +		{"(*QualType).Size", Method, 0, ""},
    +		{"(*QualType).String", Method, 0, ""},
    +		{"(*Reader).AddressSize", Method, 5, ""},
    +		{"(*Reader).ByteOrder", Method, 14, ""},
    +		{"(*Reader).Next", Method, 0, ""},
    +		{"(*Reader).Seek", Method, 0, ""},
    +		{"(*Reader).SeekPC", Method, 7, ""},
    +		{"(*Reader).SkipChildren", Method, 0, ""},
    +		{"(*StructType).Common", Method, 0, ""},
    +		{"(*StructType).Defn", Method, 0, ""},
    +		{"(*StructType).Size", Method, 0, ""},
    +		{"(*StructType).String", Method, 0, ""},
    +		{"(*TypedefType).Common", Method, 0, ""},
    +		{"(*TypedefType).Size", Method, 0, ""},
    +		{"(*TypedefType).String", Method, 0, ""},
    +		{"(*UcharType).Basic", Method, 0, ""},
    +		{"(*UcharType).Common", Method, 0, ""},
    +		{"(*UcharType).Size", Method, 0, ""},
    +		{"(*UcharType).String", Method, 0, ""},
    +		{"(*UintType).Basic", Method, 0, ""},
    +		{"(*UintType).Common", Method, 0, ""},
    +		{"(*UintType).Size", Method, 0, ""},
    +		{"(*UintType).String", Method, 0, ""},
    +		{"(*UnspecifiedType).Basic", Method, 4, ""},
    +		{"(*UnspecifiedType).Common", Method, 4, ""},
    +		{"(*UnspecifiedType).Size", Method, 4, ""},
    +		{"(*UnspecifiedType).String", Method, 4, ""},
    +		{"(*UnsupportedType).Common", Method, 13, ""},
    +		{"(*UnsupportedType).Size", Method, 13, ""},
    +		{"(*UnsupportedType).String", Method, 13, ""},
    +		{"(*VoidType).Common", Method, 0, ""},
    +		{"(*VoidType).Size", Method, 0, ""},
    +		{"(*VoidType).String", Method, 0, ""},
    +		{"(Attr).GoString", Method, 0, ""},
    +		{"(Attr).String", Method, 0, ""},
    +		{"(Class).GoString", Method, 5, ""},
    +		{"(Class).String", Method, 5, ""},
    +		{"(DecodeError).Error", Method, 0, ""},
    +		{"(Tag).GoString", Method, 0, ""},
    +		{"(Tag).String", Method, 0, ""},
    +		{"AddrType", Type, 0, ""},
    +		{"AddrType.BasicType", Field, 0, ""},
    +		{"ArrayType", Type, 0, ""},
    +		{"ArrayType.CommonType", Field, 0, ""},
    +		{"ArrayType.Count", Field, 0, ""},
    +		{"ArrayType.StrideBitSize", Field, 0, ""},
    +		{"ArrayType.Type", Field, 0, ""},
    +		{"Attr", Type, 0, ""},
    +		{"AttrAbstractOrigin", Const, 0, ""},
    +		{"AttrAccessibility", Const, 0, ""},
    +		{"AttrAddrBase", Const, 14, ""},
    +		{"AttrAddrClass", Const, 0, ""},
    +		{"AttrAlignment", Const, 14, ""},
    +		{"AttrAllocated", Const, 0, ""},
    +		{"AttrArtificial", Const, 0, ""},
    +		{"AttrAssociated", Const, 0, ""},
    +		{"AttrBaseTypes", Const, 0, ""},
    +		{"AttrBinaryScale", Const, 14, ""},
    +		{"AttrBitOffset", Const, 0, ""},
    +		{"AttrBitSize", Const, 0, ""},
    +		{"AttrByteSize", Const, 0, ""},
    +		{"AttrCallAllCalls", Const, 14, ""},
    +		{"AttrCallAllSourceCalls", Const, 14, ""},
    +		{"AttrCallAllTailCalls", Const, 14, ""},
    +		{"AttrCallColumn", Const, 0, ""},
    +		{"AttrCallDataLocation", Const, 14, ""},
    +		{"AttrCallDataValue", Const, 14, ""},
    +		{"AttrCallFile", Const, 0, ""},
    +		{"AttrCallLine", Const, 0, ""},
    +		{"AttrCallOrigin", Const, 14, ""},
    +		{"AttrCallPC", Const, 14, ""},
    +		{"AttrCallParameter", Const, 14, ""},
    +		{"AttrCallReturnPC", Const, 14, ""},
    +		{"AttrCallTailCall", Const, 14, ""},
    +		{"AttrCallTarget", Const, 14, ""},
    +		{"AttrCallTargetClobbered", Const, 14, ""},
    +		{"AttrCallValue", Const, 14, ""},
    +		{"AttrCalling", Const, 0, ""},
    +		{"AttrCommonRef", Const, 0, ""},
    +		{"AttrCompDir", Const, 0, ""},
    +		{"AttrConstExpr", Const, 14, ""},
    +		{"AttrConstValue", Const, 0, ""},
    +		{"AttrContainingType", Const, 0, ""},
    +		{"AttrCount", Const, 0, ""},
    +		{"AttrDataBitOffset", Const, 14, ""},
    +		{"AttrDataLocation", Const, 0, ""},
    +		{"AttrDataMemberLoc", Const, 0, ""},
    +		{"AttrDecimalScale", Const, 14, ""},
    +		{"AttrDecimalSign", Const, 14, ""},
    +		{"AttrDeclColumn", Const, 0, ""},
    +		{"AttrDeclFile", Const, 0, ""},
    +		{"AttrDeclLine", Const, 0, ""},
    +		{"AttrDeclaration", Const, 0, ""},
    +		{"AttrDefaultValue", Const, 0, ""},
    +		{"AttrDefaulted", Const, 14, ""},
    +		{"AttrDeleted", Const, 14, ""},
    +		{"AttrDescription", Const, 0, ""},
    +		{"AttrDigitCount", Const, 14, ""},
    +		{"AttrDiscr", Const, 0, ""},
    +		{"AttrDiscrList", Const, 0, ""},
    +		{"AttrDiscrValue", Const, 0, ""},
    +		{"AttrDwoName", Const, 14, ""},
    +		{"AttrElemental", Const, 14, ""},
    +		{"AttrEncoding", Const, 0, ""},
    +		{"AttrEndianity", Const, 14, ""},
    +		{"AttrEntrypc", Const, 0, ""},
    +		{"AttrEnumClass", Const, 14, ""},
    +		{"AttrExplicit", Const, 14, ""},
    +		{"AttrExportSymbols", Const, 14, ""},
    +		{"AttrExtension", Const, 0, ""},
    +		{"AttrExternal", Const, 0, ""},
    +		{"AttrFrameBase", Const, 0, ""},
    +		{"AttrFriend", Const, 0, ""},
    +		{"AttrHighpc", Const, 0, ""},
    +		{"AttrIdentifierCase", Const, 0, ""},
    +		{"AttrImport", Const, 0, ""},
    +		{"AttrInline", Const, 0, ""},
    +		{"AttrIsOptional", Const, 0, ""},
    +		{"AttrLanguage", Const, 0, ""},
    +		{"AttrLinkageName", Const, 14, ""},
    +		{"AttrLocation", Const, 0, ""},
    +		{"AttrLoclistsBase", Const, 14, ""},
    +		{"AttrLowerBound", Const, 0, ""},
    +		{"AttrLowpc", Const, 0, ""},
    +		{"AttrMacroInfo", Const, 0, ""},
    +		{"AttrMacros", Const, 14, ""},
    +		{"AttrMainSubprogram", Const, 14, ""},
    +		{"AttrMutable", Const, 14, ""},
    +		{"AttrName", Const, 0, ""},
    +		{"AttrNamelistItem", Const, 0, ""},
    +		{"AttrNoreturn", Const, 14, ""},
    +		{"AttrObjectPointer", Const, 14, ""},
    +		{"AttrOrdering", Const, 0, ""},
    +		{"AttrPictureString", Const, 14, ""},
    +		{"AttrPriority", Const, 0, ""},
    +		{"AttrProducer", Const, 0, ""},
    +		{"AttrPrototyped", Const, 0, ""},
    +		{"AttrPure", Const, 14, ""},
    +		{"AttrRanges", Const, 0, ""},
    +		{"AttrRank", Const, 14, ""},
    +		{"AttrRecursive", Const, 14, ""},
    +		{"AttrReference", Const, 14, ""},
    +		{"AttrReturnAddr", Const, 0, ""},
    +		{"AttrRnglistsBase", Const, 14, ""},
    +		{"AttrRvalueReference", Const, 14, ""},
    +		{"AttrSegment", Const, 0, ""},
    +		{"AttrSibling", Const, 0, ""},
    +		{"AttrSignature", Const, 14, ""},
    +		{"AttrSmall", Const, 14, ""},
    +		{"AttrSpecification", Const, 0, ""},
    +		{"AttrStartScope", Const, 0, ""},
    +		{"AttrStaticLink", Const, 0, ""},
    +		{"AttrStmtList", Const, 0, ""},
    +		{"AttrStrOffsetsBase", Const, 14, ""},
    +		{"AttrStride", Const, 0, ""},
    +		{"AttrStrideSize", Const, 0, ""},
    +		{"AttrStringLength", Const, 0, ""},
    +		{"AttrStringLengthBitSize", Const, 14, ""},
    +		{"AttrStringLengthByteSize", Const, 14, ""},
    +		{"AttrThreadsScaled", Const, 14, ""},
    +		{"AttrTrampoline", Const, 0, ""},
    +		{"AttrType", Const, 0, ""},
    +		{"AttrUpperBound", Const, 0, ""},
    +		{"AttrUseLocation", Const, 0, ""},
    +		{"AttrUseUTF8", Const, 0, ""},
    +		{"AttrVarParam", Const, 0, ""},
    +		{"AttrVirtuality", Const, 0, ""},
    +		{"AttrVisibility", Const, 0, ""},
    +		{"AttrVtableElemLoc", Const, 0, ""},
    +		{"BasicType", Type, 0, ""},
    +		{"BasicType.BitOffset", Field, 0, ""},
    +		{"BasicType.BitSize", Field, 0, ""},
    +		{"BasicType.CommonType", Field, 0, ""},
    +		{"BasicType.DataBitOffset", Field, 18, ""},
    +		{"BoolType", Type, 0, ""},
    +		{"BoolType.BasicType", Field, 0, ""},
    +		{"CharType", Type, 0, ""},
    +		{"CharType.BasicType", Field, 0, ""},
    +		{"Class", Type, 5, ""},
    +		{"ClassAddrPtr", Const, 14, ""},
    +		{"ClassAddress", Const, 5, ""},
    +		{"ClassBlock", Const, 5, ""},
    +		{"ClassConstant", Const, 5, ""},
    +		{"ClassExprLoc", Const, 5, ""},
    +		{"ClassFlag", Const, 5, ""},
    +		{"ClassLinePtr", Const, 5, ""},
    +		{"ClassLocList", Const, 14, ""},
    +		{"ClassLocListPtr", Const, 5, ""},
    +		{"ClassMacPtr", Const, 5, ""},
    +		{"ClassRangeListPtr", Const, 5, ""},
    +		{"ClassReference", Const, 5, ""},
    +		{"ClassReferenceAlt", Const, 5, ""},
    +		{"ClassReferenceSig", Const, 5, ""},
    +		{"ClassRngList", Const, 14, ""},
    +		{"ClassRngListsPtr", Const, 14, ""},
    +		{"ClassStrOffsetsPtr", Const, 14, ""},
    +		{"ClassString", Const, 5, ""},
    +		{"ClassStringAlt", Const, 5, ""},
    +		{"ClassUnknown", Const, 6, ""},
    +		{"CommonType", Type, 0, ""},
    +		{"CommonType.ByteSize", Field, 0, ""},
    +		{"CommonType.Name", Field, 0, ""},
    +		{"ComplexType", Type, 0, ""},
    +		{"ComplexType.BasicType", Field, 0, ""},
    +		{"Data", Type, 0, ""},
    +		{"DecodeError", Type, 0, ""},
    +		{"DecodeError.Err", Field, 0, ""},
    +		{"DecodeError.Name", Field, 0, ""},
    +		{"DecodeError.Offset", Field, 0, ""},
    +		{"DotDotDotType", Type, 0, ""},
    +		{"DotDotDotType.CommonType", Field, 0, ""},
    +		{"Entry", Type, 0, ""},
    +		{"Entry.Children", Field, 0, ""},
    +		{"Entry.Field", Field, 0, ""},
    +		{"Entry.Offset", Field, 0, ""},
    +		{"Entry.Tag", Field, 0, ""},
    +		{"EnumType", Type, 0, ""},
    +		{"EnumType.CommonType", Field, 0, ""},
    +		{"EnumType.EnumName", Field, 0, ""},
    +		{"EnumType.Val", Field, 0, ""},
    +		{"EnumValue", Type, 0, ""},
    +		{"EnumValue.Name", Field, 0, ""},
    +		{"EnumValue.Val", Field, 0, ""},
    +		{"ErrUnknownPC", Var, 5, ""},
    +		{"Field", Type, 0, ""},
    +		{"Field.Attr", Field, 0, ""},
    +		{"Field.Class", Field, 5, ""},
    +		{"Field.Val", Field, 0, ""},
    +		{"FloatType", Type, 0, ""},
    +		{"FloatType.BasicType", Field, 0, ""},
    +		{"FuncType", Type, 0, ""},
    +		{"FuncType.CommonType", Field, 0, ""},
    +		{"FuncType.ParamType", Field, 0, ""},
    +		{"FuncType.ReturnType", Field, 0, ""},
    +		{"IntType", Type, 0, ""},
    +		{"IntType.BasicType", Field, 0, ""},
    +		{"LineEntry", Type, 5, ""},
    +		{"LineEntry.Address", Field, 5, ""},
    +		{"LineEntry.BasicBlock", Field, 5, ""},
    +		{"LineEntry.Column", Field, 5, ""},
    +		{"LineEntry.Discriminator", Field, 5, ""},
    +		{"LineEntry.EndSequence", Field, 5, ""},
    +		{"LineEntry.EpilogueBegin", Field, 5, ""},
    +		{"LineEntry.File", Field, 5, ""},
    +		{"LineEntry.ISA", Field, 5, ""},
    +		{"LineEntry.IsStmt", Field, 5, ""},
    +		{"LineEntry.Line", Field, 5, ""},
    +		{"LineEntry.OpIndex", Field, 5, ""},
    +		{"LineEntry.PrologueEnd", Field, 5, ""},
    +		{"LineFile", Type, 5, ""},
    +		{"LineFile.Length", Field, 5, ""},
    +		{"LineFile.Mtime", Field, 5, ""},
    +		{"LineFile.Name", Field, 5, ""},
    +		{"LineReader", Type, 5, ""},
    +		{"LineReaderPos", Type, 5, ""},
    +		{"New", Func, 0, "func(abbrev []byte, aranges []byte, frame []byte, info []byte, line []byte, pubnames []byte, ranges []byte, str []byte) (*Data, error)"},
    +		{"Offset", Type, 0, ""},
    +		{"PtrType", Type, 0, ""},
    +		{"PtrType.CommonType", Field, 0, ""},
    +		{"PtrType.Type", Field, 0, ""},
    +		{"QualType", Type, 0, ""},
    +		{"QualType.CommonType", Field, 0, ""},
    +		{"QualType.Qual", Field, 0, ""},
    +		{"QualType.Type", Field, 0, ""},
    +		{"Reader", Type, 0, ""},
    +		{"StructField", Type, 0, ""},
    +		{"StructField.BitOffset", Field, 0, ""},
    +		{"StructField.BitSize", Field, 0, ""},
    +		{"StructField.ByteOffset", Field, 0, ""},
    +		{"StructField.ByteSize", Field, 0, ""},
    +		{"StructField.DataBitOffset", Field, 18, ""},
    +		{"StructField.Name", Field, 0, ""},
    +		{"StructField.Type", Field, 0, ""},
    +		{"StructType", Type, 0, ""},
    +		{"StructType.CommonType", Field, 0, ""},
    +		{"StructType.Field", Field, 0, ""},
    +		{"StructType.Incomplete", Field, 0, ""},
    +		{"StructType.Kind", Field, 0, ""},
    +		{"StructType.StructName", Field, 0, ""},
    +		{"Tag", Type, 0, ""},
    +		{"TagAccessDeclaration", Const, 0, ""},
    +		{"TagArrayType", Const, 0, ""},
    +		{"TagAtomicType", Const, 14, ""},
    +		{"TagBaseType", Const, 0, ""},
    +		{"TagCallSite", Const, 14, ""},
    +		{"TagCallSiteParameter", Const, 14, ""},
    +		{"TagCatchDwarfBlock", Const, 0, ""},
    +		{"TagClassType", Const, 0, ""},
    +		{"TagCoarrayType", Const, 14, ""},
    +		{"TagCommonDwarfBlock", Const, 0, ""},
    +		{"TagCommonInclusion", Const, 0, ""},
    +		{"TagCompileUnit", Const, 0, ""},
    +		{"TagCondition", Const, 3, ""},
    +		{"TagConstType", Const, 0, ""},
    +		{"TagConstant", Const, 0, ""},
    +		{"TagDwarfProcedure", Const, 0, ""},
    +		{"TagDynamicType", Const, 14, ""},
    +		{"TagEntryPoint", Const, 0, ""},
    +		{"TagEnumerationType", Const, 0, ""},
    +		{"TagEnumerator", Const, 0, ""},
    +		{"TagFileType", Const, 0, ""},
    +		{"TagFormalParameter", Const, 0, ""},
    +		{"TagFriend", Const, 0, ""},
    +		{"TagGenericSubrange", Const, 14, ""},
    +		{"TagImmutableType", Const, 14, ""},
    +		{"TagImportedDeclaration", Const, 0, ""},
    +		{"TagImportedModule", Const, 0, ""},
    +		{"TagImportedUnit", Const, 0, ""},
    +		{"TagInheritance", Const, 0, ""},
    +		{"TagInlinedSubroutine", Const, 0, ""},
    +		{"TagInterfaceType", Const, 0, ""},
    +		{"TagLabel", Const, 0, ""},
    +		{"TagLexDwarfBlock", Const, 0, ""},
    +		{"TagMember", Const, 0, ""},
    +		{"TagModule", Const, 0, ""},
    +		{"TagMutableType", Const, 0, ""},
    +		{"TagNamelist", Const, 0, ""},
    +		{"TagNamelistItem", Const, 0, ""},
    +		{"TagNamespace", Const, 0, ""},
    +		{"TagPackedType", Const, 0, ""},
    +		{"TagPartialUnit", Const, 0, ""},
    +		{"TagPointerType", Const, 0, ""},
    +		{"TagPtrToMemberType", Const, 0, ""},
    +		{"TagReferenceType", Const, 0, ""},
    +		{"TagRestrictType", Const, 0, ""},
    +		{"TagRvalueReferenceType", Const, 3, ""},
    +		{"TagSetType", Const, 0, ""},
    +		{"TagSharedType", Const, 3, ""},
    +		{"TagSkeletonUnit", Const, 14, ""},
    +		{"TagStringType", Const, 0, ""},
    +		{"TagStructType", Const, 0, ""},
    +		{"TagSubprogram", Const, 0, ""},
    +		{"TagSubrangeType", Const, 0, ""},
    +		{"TagSubroutineType", Const, 0, ""},
    +		{"TagTemplateAlias", Const, 3, ""},
    +		{"TagTemplateTypeParameter", Const, 0, ""},
    +		{"TagTemplateValueParameter", Const, 0, ""},
    +		{"TagThrownType", Const, 0, ""},
    +		{"TagTryDwarfBlock", Const, 0, ""},
    +		{"TagTypeUnit", Const, 3, ""},
    +		{"TagTypedef", Const, 0, ""},
    +		{"TagUnionType", Const, 0, ""},
    +		{"TagUnspecifiedParameters", Const, 0, ""},
    +		{"TagUnspecifiedType", Const, 0, ""},
    +		{"TagVariable", Const, 0, ""},
    +		{"TagVariant", Const, 0, ""},
    +		{"TagVariantPart", Const, 0, ""},
    +		{"TagVolatileType", Const, 0, ""},
    +		{"TagWithStmt", Const, 0, ""},
    +		{"Type", Type, 0, ""},
    +		{"TypedefType", Type, 0, ""},
    +		{"TypedefType.CommonType", Field, 0, ""},
    +		{"TypedefType.Type", Field, 0, ""},
    +		{"UcharType", Type, 0, ""},
    +		{"UcharType.BasicType", Field, 0, ""},
    +		{"UintType", Type, 0, ""},
    +		{"UintType.BasicType", Field, 0, ""},
    +		{"UnspecifiedType", Type, 4, ""},
    +		{"UnspecifiedType.BasicType", Field, 4, ""},
    +		{"UnsupportedType", Type, 13, ""},
    +		{"UnsupportedType.CommonType", Field, 13, ""},
    +		{"UnsupportedType.Tag", Field, 13, ""},
    +		{"VoidType", Type, 0, ""},
    +		{"VoidType.CommonType", Field, 0, ""},
     	},
     	"debug/elf": {
    -		{"(*File).Close", Method, 0},
    -		{"(*File).DWARF", Method, 0},
    -		{"(*File).DynString", Method, 1},
    -		{"(*File).DynValue", Method, 21},
    -		{"(*File).DynamicSymbols", Method, 4},
    -		{"(*File).ImportedLibraries", Method, 0},
    -		{"(*File).ImportedSymbols", Method, 0},
    -		{"(*File).Section", Method, 0},
    -		{"(*File).SectionByType", Method, 0},
    -		{"(*File).Symbols", Method, 0},
    -		{"(*FormatError).Error", Method, 0},
    -		{"(*Prog).Open", Method, 0},
    -		{"(*Section).Data", Method, 0},
    -		{"(*Section).Open", Method, 0},
    -		{"(Class).GoString", Method, 0},
    -		{"(Class).String", Method, 0},
    -		{"(CompressionType).GoString", Method, 6},
    -		{"(CompressionType).String", Method, 6},
    -		{"(Data).GoString", Method, 0},
    -		{"(Data).String", Method, 0},
    -		{"(DynFlag).GoString", Method, 0},
    -		{"(DynFlag).String", Method, 0},
    -		{"(DynFlag1).GoString", Method, 21},
    -		{"(DynFlag1).String", Method, 21},
    -		{"(DynTag).GoString", Method, 0},
    -		{"(DynTag).String", Method, 0},
    -		{"(Machine).GoString", Method, 0},
    -		{"(Machine).String", Method, 0},
    -		{"(NType).GoString", Method, 0},
    -		{"(NType).String", Method, 0},
    -		{"(OSABI).GoString", Method, 0},
    -		{"(OSABI).String", Method, 0},
    -		{"(Prog).ReadAt", Method, 0},
    -		{"(ProgFlag).GoString", Method, 0},
    -		{"(ProgFlag).String", Method, 0},
    -		{"(ProgType).GoString", Method, 0},
    -		{"(ProgType).String", Method, 0},
    -		{"(R_386).GoString", Method, 0},
    -		{"(R_386).String", Method, 0},
    -		{"(R_390).GoString", Method, 7},
    -		{"(R_390).String", Method, 7},
    -		{"(R_AARCH64).GoString", Method, 4},
    -		{"(R_AARCH64).String", Method, 4},
    -		{"(R_ALPHA).GoString", Method, 0},
    -		{"(R_ALPHA).String", Method, 0},
    -		{"(R_ARM).GoString", Method, 0},
    -		{"(R_ARM).String", Method, 0},
    -		{"(R_LARCH).GoString", Method, 19},
    -		{"(R_LARCH).String", Method, 19},
    -		{"(R_MIPS).GoString", Method, 6},
    -		{"(R_MIPS).String", Method, 6},
    -		{"(R_PPC).GoString", Method, 0},
    -		{"(R_PPC).String", Method, 0},
    -		{"(R_PPC64).GoString", Method, 5},
    -		{"(R_PPC64).String", Method, 5},
    -		{"(R_RISCV).GoString", Method, 11},
    -		{"(R_RISCV).String", Method, 11},
    -		{"(R_SPARC).GoString", Method, 0},
    -		{"(R_SPARC).String", Method, 0},
    -		{"(R_X86_64).GoString", Method, 0},
    -		{"(R_X86_64).String", Method, 0},
    -		{"(Section).ReadAt", Method, 0},
    -		{"(SectionFlag).GoString", Method, 0},
    -		{"(SectionFlag).String", Method, 0},
    -		{"(SectionIndex).GoString", Method, 0},
    -		{"(SectionIndex).String", Method, 0},
    -		{"(SectionType).GoString", Method, 0},
    -		{"(SectionType).String", Method, 0},
    -		{"(SymBind).GoString", Method, 0},
    -		{"(SymBind).String", Method, 0},
    -		{"(SymType).GoString", Method, 0},
    -		{"(SymType).String", Method, 0},
    -		{"(SymVis).GoString", Method, 0},
    -		{"(SymVis).String", Method, 0},
    -		{"(Type).GoString", Method, 0},
    -		{"(Type).String", Method, 0},
    -		{"(Version).GoString", Method, 0},
    -		{"(Version).String", Method, 0},
    -		{"ARM_MAGIC_TRAMP_NUMBER", Const, 0},
    -		{"COMPRESS_HIOS", Const, 6},
    -		{"COMPRESS_HIPROC", Const, 6},
    -		{"COMPRESS_LOOS", Const, 6},
    -		{"COMPRESS_LOPROC", Const, 6},
    -		{"COMPRESS_ZLIB", Const, 6},
    -		{"COMPRESS_ZSTD", Const, 21},
    -		{"Chdr32", Type, 6},
    -		{"Chdr32.Addralign", Field, 6},
    -		{"Chdr32.Size", Field, 6},
    -		{"Chdr32.Type", Field, 6},
    -		{"Chdr64", Type, 6},
    -		{"Chdr64.Addralign", Field, 6},
    -		{"Chdr64.Size", Field, 6},
    -		{"Chdr64.Type", Field, 6},
    -		{"Class", Type, 0},
    -		{"CompressionType", Type, 6},
    -		{"DF_1_CONFALT", Const, 21},
    -		{"DF_1_DIRECT", Const, 21},
    -		{"DF_1_DISPRELDNE", Const, 21},
    -		{"DF_1_DISPRELPND", Const, 21},
    -		{"DF_1_EDITED", Const, 21},
    -		{"DF_1_ENDFILTEE", Const, 21},
    -		{"DF_1_GLOBAL", Const, 21},
    -		{"DF_1_GLOBAUDIT", Const, 21},
    -		{"DF_1_GROUP", Const, 21},
    -		{"DF_1_IGNMULDEF", Const, 21},
    -		{"DF_1_INITFIRST", Const, 21},
    -		{"DF_1_INTERPOSE", Const, 21},
    -		{"DF_1_KMOD", Const, 21},
    -		{"DF_1_LOADFLTR", Const, 21},
    -		{"DF_1_NOCOMMON", Const, 21},
    -		{"DF_1_NODEFLIB", Const, 21},
    -		{"DF_1_NODELETE", Const, 21},
    -		{"DF_1_NODIRECT", Const, 21},
    -		{"DF_1_NODUMP", Const, 21},
    -		{"DF_1_NOHDR", Const, 21},
    -		{"DF_1_NOKSYMS", Const, 21},
    -		{"DF_1_NOOPEN", Const, 21},
    -		{"DF_1_NORELOC", Const, 21},
    -		{"DF_1_NOW", Const, 21},
    -		{"DF_1_ORIGIN", Const, 21},
    -		{"DF_1_PIE", Const, 21},
    -		{"DF_1_SINGLETON", Const, 21},
    -		{"DF_1_STUB", Const, 21},
    -		{"DF_1_SYMINTPOSE", Const, 21},
    -		{"DF_1_TRANS", Const, 21},
    -		{"DF_1_WEAKFILTER", Const, 21},
    -		{"DF_BIND_NOW", Const, 0},
    -		{"DF_ORIGIN", Const, 0},
    -		{"DF_STATIC_TLS", Const, 0},
    -		{"DF_SYMBOLIC", Const, 0},
    -		{"DF_TEXTREL", Const, 0},
    -		{"DT_ADDRRNGHI", Const, 16},
    -		{"DT_ADDRRNGLO", Const, 16},
    -		{"DT_AUDIT", Const, 16},
    -		{"DT_AUXILIARY", Const, 16},
    -		{"DT_BIND_NOW", Const, 0},
    -		{"DT_CHECKSUM", Const, 16},
    -		{"DT_CONFIG", Const, 16},
    -		{"DT_DEBUG", Const, 0},
    -		{"DT_DEPAUDIT", Const, 16},
    -		{"DT_ENCODING", Const, 0},
    -		{"DT_FEATURE", Const, 16},
    -		{"DT_FILTER", Const, 16},
    -		{"DT_FINI", Const, 0},
    -		{"DT_FINI_ARRAY", Const, 0},
    -		{"DT_FINI_ARRAYSZ", Const, 0},
    -		{"DT_FLAGS", Const, 0},
    -		{"DT_FLAGS_1", Const, 16},
    -		{"DT_GNU_CONFLICT", Const, 16},
    -		{"DT_GNU_CONFLICTSZ", Const, 16},
    -		{"DT_GNU_HASH", Const, 16},
    -		{"DT_GNU_LIBLIST", Const, 16},
    -		{"DT_GNU_LIBLISTSZ", Const, 16},
    -		{"DT_GNU_PRELINKED", Const, 16},
    -		{"DT_HASH", Const, 0},
    -		{"DT_HIOS", Const, 0},
    -		{"DT_HIPROC", Const, 0},
    -		{"DT_INIT", Const, 0},
    -		{"DT_INIT_ARRAY", Const, 0},
    -		{"DT_INIT_ARRAYSZ", Const, 0},
    -		{"DT_JMPREL", Const, 0},
    -		{"DT_LOOS", Const, 0},
    -		{"DT_LOPROC", Const, 0},
    -		{"DT_MIPS_AUX_DYNAMIC", Const, 16},
    -		{"DT_MIPS_BASE_ADDRESS", Const, 16},
    -		{"DT_MIPS_COMPACT_SIZE", Const, 16},
    -		{"DT_MIPS_CONFLICT", Const, 16},
    -		{"DT_MIPS_CONFLICTNO", Const, 16},
    -		{"DT_MIPS_CXX_FLAGS", Const, 16},
    -		{"DT_MIPS_DELTA_CLASS", Const, 16},
    -		{"DT_MIPS_DELTA_CLASSSYM", Const, 16},
    -		{"DT_MIPS_DELTA_CLASSSYM_NO", Const, 16},
    -		{"DT_MIPS_DELTA_CLASS_NO", Const, 16},
    -		{"DT_MIPS_DELTA_INSTANCE", Const, 16},
    -		{"DT_MIPS_DELTA_INSTANCE_NO", Const, 16},
    -		{"DT_MIPS_DELTA_RELOC", Const, 16},
    -		{"DT_MIPS_DELTA_RELOC_NO", Const, 16},
    -		{"DT_MIPS_DELTA_SYM", Const, 16},
    -		{"DT_MIPS_DELTA_SYM_NO", Const, 16},
    -		{"DT_MIPS_DYNSTR_ALIGN", Const, 16},
    -		{"DT_MIPS_FLAGS", Const, 16},
    -		{"DT_MIPS_GOTSYM", Const, 16},
    -		{"DT_MIPS_GP_VALUE", Const, 16},
    -		{"DT_MIPS_HIDDEN_GOTIDX", Const, 16},
    -		{"DT_MIPS_HIPAGENO", Const, 16},
    -		{"DT_MIPS_ICHECKSUM", Const, 16},
    -		{"DT_MIPS_INTERFACE", Const, 16},
    -		{"DT_MIPS_INTERFACE_SIZE", Const, 16},
    -		{"DT_MIPS_IVERSION", Const, 16},
    -		{"DT_MIPS_LIBLIST", Const, 16},
    -		{"DT_MIPS_LIBLISTNO", Const, 16},
    -		{"DT_MIPS_LOCALPAGE_GOTIDX", Const, 16},
    -		{"DT_MIPS_LOCAL_GOTIDX", Const, 16},
    -		{"DT_MIPS_LOCAL_GOTNO", Const, 16},
    -		{"DT_MIPS_MSYM", Const, 16},
    -		{"DT_MIPS_OPTIONS", Const, 16},
    -		{"DT_MIPS_PERF_SUFFIX", Const, 16},
    -		{"DT_MIPS_PIXIE_INIT", Const, 16},
    -		{"DT_MIPS_PLTGOT", Const, 16},
    -		{"DT_MIPS_PROTECTED_GOTIDX", Const, 16},
    -		{"DT_MIPS_RLD_MAP", Const, 16},
    -		{"DT_MIPS_RLD_MAP_REL", Const, 16},
    -		{"DT_MIPS_RLD_TEXT_RESOLVE_ADDR", Const, 16},
    -		{"DT_MIPS_RLD_VERSION", Const, 16},
    -		{"DT_MIPS_RWPLT", Const, 16},
    -		{"DT_MIPS_SYMBOL_LIB", Const, 16},
    -		{"DT_MIPS_SYMTABNO", Const, 16},
    -		{"DT_MIPS_TIME_STAMP", Const, 16},
    -		{"DT_MIPS_UNREFEXTNO", Const, 16},
    -		{"DT_MOVEENT", Const, 16},
    -		{"DT_MOVESZ", Const, 16},
    -		{"DT_MOVETAB", Const, 16},
    -		{"DT_NEEDED", Const, 0},
    -		{"DT_NULL", Const, 0},
    -		{"DT_PLTGOT", Const, 0},
    -		{"DT_PLTPAD", Const, 16},
    -		{"DT_PLTPADSZ", Const, 16},
    -		{"DT_PLTREL", Const, 0},
    -		{"DT_PLTRELSZ", Const, 0},
    -		{"DT_POSFLAG_1", Const, 16},
    -		{"DT_PPC64_GLINK", Const, 16},
    -		{"DT_PPC64_OPD", Const, 16},
    -		{"DT_PPC64_OPDSZ", Const, 16},
    -		{"DT_PPC64_OPT", Const, 16},
    -		{"DT_PPC_GOT", Const, 16},
    -		{"DT_PPC_OPT", Const, 16},
    -		{"DT_PREINIT_ARRAY", Const, 0},
    -		{"DT_PREINIT_ARRAYSZ", Const, 0},
    -		{"DT_REL", Const, 0},
    -		{"DT_RELA", Const, 0},
    -		{"DT_RELACOUNT", Const, 16},
    -		{"DT_RELAENT", Const, 0},
    -		{"DT_RELASZ", Const, 0},
    -		{"DT_RELCOUNT", Const, 16},
    -		{"DT_RELENT", Const, 0},
    -		{"DT_RELSZ", Const, 0},
    -		{"DT_RPATH", Const, 0},
    -		{"DT_RUNPATH", Const, 0},
    -		{"DT_SONAME", Const, 0},
    -		{"DT_SPARC_REGISTER", Const, 16},
    -		{"DT_STRSZ", Const, 0},
    -		{"DT_STRTAB", Const, 0},
    -		{"DT_SYMBOLIC", Const, 0},
    -		{"DT_SYMENT", Const, 0},
    -		{"DT_SYMINENT", Const, 16},
    -		{"DT_SYMINFO", Const, 16},
    -		{"DT_SYMINSZ", Const, 16},
    -		{"DT_SYMTAB", Const, 0},
    -		{"DT_SYMTAB_SHNDX", Const, 16},
    -		{"DT_TEXTREL", Const, 0},
    -		{"DT_TLSDESC_GOT", Const, 16},
    -		{"DT_TLSDESC_PLT", Const, 16},
    -		{"DT_USED", Const, 16},
    -		{"DT_VALRNGHI", Const, 16},
    -		{"DT_VALRNGLO", Const, 16},
    -		{"DT_VERDEF", Const, 16},
    -		{"DT_VERDEFNUM", Const, 16},
    -		{"DT_VERNEED", Const, 0},
    -		{"DT_VERNEEDNUM", Const, 0},
    -		{"DT_VERSYM", Const, 0},
    -		{"Data", Type, 0},
    -		{"Dyn32", Type, 0},
    -		{"Dyn32.Tag", Field, 0},
    -		{"Dyn32.Val", Field, 0},
    -		{"Dyn64", Type, 0},
    -		{"Dyn64.Tag", Field, 0},
    -		{"Dyn64.Val", Field, 0},
    -		{"DynFlag", Type, 0},
    -		{"DynFlag1", Type, 21},
    -		{"DynTag", Type, 0},
    -		{"EI_ABIVERSION", Const, 0},
    -		{"EI_CLASS", Const, 0},
    -		{"EI_DATA", Const, 0},
    -		{"EI_NIDENT", Const, 0},
    -		{"EI_OSABI", Const, 0},
    -		{"EI_PAD", Const, 0},
    -		{"EI_VERSION", Const, 0},
    -		{"ELFCLASS32", Const, 0},
    -		{"ELFCLASS64", Const, 0},
    -		{"ELFCLASSNONE", Const, 0},
    -		{"ELFDATA2LSB", Const, 0},
    -		{"ELFDATA2MSB", Const, 0},
    -		{"ELFDATANONE", Const, 0},
    -		{"ELFMAG", Const, 0},
    -		{"ELFOSABI_86OPEN", Const, 0},
    -		{"ELFOSABI_AIX", Const, 0},
    -		{"ELFOSABI_ARM", Const, 0},
    -		{"ELFOSABI_AROS", Const, 11},
    -		{"ELFOSABI_CLOUDABI", Const, 11},
    -		{"ELFOSABI_FENIXOS", Const, 11},
    -		{"ELFOSABI_FREEBSD", Const, 0},
    -		{"ELFOSABI_HPUX", Const, 0},
    -		{"ELFOSABI_HURD", Const, 0},
    -		{"ELFOSABI_IRIX", Const, 0},
    -		{"ELFOSABI_LINUX", Const, 0},
    -		{"ELFOSABI_MODESTO", Const, 0},
    -		{"ELFOSABI_NETBSD", Const, 0},
    -		{"ELFOSABI_NONE", Const, 0},
    -		{"ELFOSABI_NSK", Const, 0},
    -		{"ELFOSABI_OPENBSD", Const, 0},
    -		{"ELFOSABI_OPENVMS", Const, 0},
    -		{"ELFOSABI_SOLARIS", Const, 0},
    -		{"ELFOSABI_STANDALONE", Const, 0},
    -		{"ELFOSABI_TRU64", Const, 0},
    -		{"EM_386", Const, 0},
    -		{"EM_486", Const, 0},
    -		{"EM_56800EX", Const, 11},
    -		{"EM_68HC05", Const, 11},
    -		{"EM_68HC08", Const, 11},
    -		{"EM_68HC11", Const, 11},
    -		{"EM_68HC12", Const, 0},
    -		{"EM_68HC16", Const, 11},
    -		{"EM_68K", Const, 0},
    -		{"EM_78KOR", Const, 11},
    -		{"EM_8051", Const, 11},
    -		{"EM_860", Const, 0},
    -		{"EM_88K", Const, 0},
    -		{"EM_960", Const, 0},
    -		{"EM_AARCH64", Const, 4},
    -		{"EM_ALPHA", Const, 0},
    -		{"EM_ALPHA_STD", Const, 0},
    -		{"EM_ALTERA_NIOS2", Const, 11},
    -		{"EM_AMDGPU", Const, 11},
    -		{"EM_ARC", Const, 0},
    -		{"EM_ARCA", Const, 11},
    -		{"EM_ARC_COMPACT", Const, 11},
    -		{"EM_ARC_COMPACT2", Const, 11},
    -		{"EM_ARM", Const, 0},
    -		{"EM_AVR", Const, 11},
    -		{"EM_AVR32", Const, 11},
    -		{"EM_BA1", Const, 11},
    -		{"EM_BA2", Const, 11},
    -		{"EM_BLACKFIN", Const, 11},
    -		{"EM_BPF", Const, 11},
    -		{"EM_C166", Const, 11},
    -		{"EM_CDP", Const, 11},
    -		{"EM_CE", Const, 11},
    -		{"EM_CLOUDSHIELD", Const, 11},
    -		{"EM_COGE", Const, 11},
    -		{"EM_COLDFIRE", Const, 0},
    -		{"EM_COOL", Const, 11},
    -		{"EM_COREA_1ST", Const, 11},
    -		{"EM_COREA_2ND", Const, 11},
    -		{"EM_CR", Const, 11},
    -		{"EM_CR16", Const, 11},
    -		{"EM_CRAYNV2", Const, 11},
    -		{"EM_CRIS", Const, 11},
    -		{"EM_CRX", Const, 11},
    -		{"EM_CSR_KALIMBA", Const, 11},
    -		{"EM_CUDA", Const, 11},
    -		{"EM_CYPRESS_M8C", Const, 11},
    -		{"EM_D10V", Const, 11},
    -		{"EM_D30V", Const, 11},
    -		{"EM_DSP24", Const, 11},
    -		{"EM_DSPIC30F", Const, 11},
    -		{"EM_DXP", Const, 11},
    -		{"EM_ECOG1", Const, 11},
    -		{"EM_ECOG16", Const, 11},
    -		{"EM_ECOG1X", Const, 11},
    -		{"EM_ECOG2", Const, 11},
    -		{"EM_ETPU", Const, 11},
    -		{"EM_EXCESS", Const, 11},
    -		{"EM_F2MC16", Const, 11},
    -		{"EM_FIREPATH", Const, 11},
    -		{"EM_FR20", Const, 0},
    -		{"EM_FR30", Const, 11},
    -		{"EM_FT32", Const, 11},
    -		{"EM_FX66", Const, 11},
    -		{"EM_H8S", Const, 0},
    -		{"EM_H8_300", Const, 0},
    -		{"EM_H8_300H", Const, 0},
    -		{"EM_H8_500", Const, 0},
    -		{"EM_HUANY", Const, 11},
    -		{"EM_IA_64", Const, 0},
    -		{"EM_INTEL205", Const, 11},
    -		{"EM_INTEL206", Const, 11},
    -		{"EM_INTEL207", Const, 11},
    -		{"EM_INTEL208", Const, 11},
    -		{"EM_INTEL209", Const, 11},
    -		{"EM_IP2K", Const, 11},
    -		{"EM_JAVELIN", Const, 11},
    -		{"EM_K10M", Const, 11},
    -		{"EM_KM32", Const, 11},
    -		{"EM_KMX16", Const, 11},
    -		{"EM_KMX32", Const, 11},
    -		{"EM_KMX8", Const, 11},
    -		{"EM_KVARC", Const, 11},
    -		{"EM_L10M", Const, 11},
    -		{"EM_LANAI", Const, 11},
    -		{"EM_LATTICEMICO32", Const, 11},
    -		{"EM_LOONGARCH", Const, 19},
    -		{"EM_M16C", Const, 11},
    -		{"EM_M32", Const, 0},
    -		{"EM_M32C", Const, 11},
    -		{"EM_M32R", Const, 11},
    -		{"EM_MANIK", Const, 11},
    -		{"EM_MAX", Const, 11},
    -		{"EM_MAXQ30", Const, 11},
    -		{"EM_MCHP_PIC", Const, 11},
    -		{"EM_MCST_ELBRUS", Const, 11},
    -		{"EM_ME16", Const, 0},
    -		{"EM_METAG", Const, 11},
    -		{"EM_MICROBLAZE", Const, 11},
    -		{"EM_MIPS", Const, 0},
    -		{"EM_MIPS_RS3_LE", Const, 0},
    -		{"EM_MIPS_RS4_BE", Const, 0},
    -		{"EM_MIPS_X", Const, 0},
    -		{"EM_MMA", Const, 0},
    -		{"EM_MMDSP_PLUS", Const, 11},
    -		{"EM_MMIX", Const, 11},
    -		{"EM_MN10200", Const, 11},
    -		{"EM_MN10300", Const, 11},
    -		{"EM_MOXIE", Const, 11},
    -		{"EM_MSP430", Const, 11},
    -		{"EM_NCPU", Const, 0},
    -		{"EM_NDR1", Const, 0},
    -		{"EM_NDS32", Const, 11},
    -		{"EM_NONE", Const, 0},
    -		{"EM_NORC", Const, 11},
    -		{"EM_NS32K", Const, 11},
    -		{"EM_OPEN8", Const, 11},
    -		{"EM_OPENRISC", Const, 11},
    -		{"EM_PARISC", Const, 0},
    -		{"EM_PCP", Const, 0},
    -		{"EM_PDP10", Const, 11},
    -		{"EM_PDP11", Const, 11},
    -		{"EM_PDSP", Const, 11},
    -		{"EM_PJ", Const, 11},
    -		{"EM_PPC", Const, 0},
    -		{"EM_PPC64", Const, 0},
    -		{"EM_PRISM", Const, 11},
    -		{"EM_QDSP6", Const, 11},
    -		{"EM_R32C", Const, 11},
    -		{"EM_RCE", Const, 0},
    -		{"EM_RH32", Const, 0},
    -		{"EM_RISCV", Const, 11},
    -		{"EM_RL78", Const, 11},
    -		{"EM_RS08", Const, 11},
    -		{"EM_RX", Const, 11},
    -		{"EM_S370", Const, 0},
    -		{"EM_S390", Const, 0},
    -		{"EM_SCORE7", Const, 11},
    -		{"EM_SEP", Const, 11},
    -		{"EM_SE_C17", Const, 11},
    -		{"EM_SE_C33", Const, 11},
    -		{"EM_SH", Const, 0},
    -		{"EM_SHARC", Const, 11},
    -		{"EM_SLE9X", Const, 11},
    -		{"EM_SNP1K", Const, 11},
    -		{"EM_SPARC", Const, 0},
    -		{"EM_SPARC32PLUS", Const, 0},
    -		{"EM_SPARCV9", Const, 0},
    -		{"EM_ST100", Const, 0},
    -		{"EM_ST19", Const, 11},
    -		{"EM_ST200", Const, 11},
    -		{"EM_ST7", Const, 11},
    -		{"EM_ST9PLUS", Const, 11},
    -		{"EM_STARCORE", Const, 0},
    -		{"EM_STM8", Const, 11},
    -		{"EM_STXP7X", Const, 11},
    -		{"EM_SVX", Const, 11},
    -		{"EM_TILE64", Const, 11},
    -		{"EM_TILEGX", Const, 11},
    -		{"EM_TILEPRO", Const, 11},
    -		{"EM_TINYJ", Const, 0},
    -		{"EM_TI_ARP32", Const, 11},
    -		{"EM_TI_C2000", Const, 11},
    -		{"EM_TI_C5500", Const, 11},
    -		{"EM_TI_C6000", Const, 11},
    -		{"EM_TI_PRU", Const, 11},
    -		{"EM_TMM_GPP", Const, 11},
    -		{"EM_TPC", Const, 11},
    -		{"EM_TRICORE", Const, 0},
    -		{"EM_TRIMEDIA", Const, 11},
    -		{"EM_TSK3000", Const, 11},
    -		{"EM_UNICORE", Const, 11},
    -		{"EM_V800", Const, 0},
    -		{"EM_V850", Const, 11},
    -		{"EM_VAX", Const, 11},
    -		{"EM_VIDEOCORE", Const, 11},
    -		{"EM_VIDEOCORE3", Const, 11},
    -		{"EM_VIDEOCORE5", Const, 11},
    -		{"EM_VISIUM", Const, 11},
    -		{"EM_VPP500", Const, 0},
    -		{"EM_X86_64", Const, 0},
    -		{"EM_XCORE", Const, 11},
    -		{"EM_XGATE", Const, 11},
    -		{"EM_XIMO16", Const, 11},
    -		{"EM_XTENSA", Const, 11},
    -		{"EM_Z80", Const, 11},
    -		{"EM_ZSP", Const, 11},
    -		{"ET_CORE", Const, 0},
    -		{"ET_DYN", Const, 0},
    -		{"ET_EXEC", Const, 0},
    -		{"ET_HIOS", Const, 0},
    -		{"ET_HIPROC", Const, 0},
    -		{"ET_LOOS", Const, 0},
    -		{"ET_LOPROC", Const, 0},
    -		{"ET_NONE", Const, 0},
    -		{"ET_REL", Const, 0},
    -		{"EV_CURRENT", Const, 0},
    -		{"EV_NONE", Const, 0},
    -		{"ErrNoSymbols", Var, 4},
    -		{"File", Type, 0},
    -		{"File.FileHeader", Field, 0},
    -		{"File.Progs", Field, 0},
    -		{"File.Sections", Field, 0},
    -		{"FileHeader", Type, 0},
    -		{"FileHeader.ABIVersion", Field, 0},
    -		{"FileHeader.ByteOrder", Field, 0},
    -		{"FileHeader.Class", Field, 0},
    -		{"FileHeader.Data", Field, 0},
    -		{"FileHeader.Entry", Field, 1},
    -		{"FileHeader.Machine", Field, 0},
    -		{"FileHeader.OSABI", Field, 0},
    -		{"FileHeader.Type", Field, 0},
    -		{"FileHeader.Version", Field, 0},
    -		{"FormatError", Type, 0},
    -		{"Header32", Type, 0},
    -		{"Header32.Ehsize", Field, 0},
    -		{"Header32.Entry", Field, 0},
    -		{"Header32.Flags", Field, 0},
    -		{"Header32.Ident", Field, 0},
    -		{"Header32.Machine", Field, 0},
    -		{"Header32.Phentsize", Field, 0},
    -		{"Header32.Phnum", Field, 0},
    -		{"Header32.Phoff", Field, 0},
    -		{"Header32.Shentsize", Field, 0},
    -		{"Header32.Shnum", Field, 0},
    -		{"Header32.Shoff", Field, 0},
    -		{"Header32.Shstrndx", Field, 0},
    -		{"Header32.Type", Field, 0},
    -		{"Header32.Version", Field, 0},
    -		{"Header64", Type, 0},
    -		{"Header64.Ehsize", Field, 0},
    -		{"Header64.Entry", Field, 0},
    -		{"Header64.Flags", Field, 0},
    -		{"Header64.Ident", Field, 0},
    -		{"Header64.Machine", Field, 0},
    -		{"Header64.Phentsize", Field, 0},
    -		{"Header64.Phnum", Field, 0},
    -		{"Header64.Phoff", Field, 0},
    -		{"Header64.Shentsize", Field, 0},
    -		{"Header64.Shnum", Field, 0},
    -		{"Header64.Shoff", Field, 0},
    -		{"Header64.Shstrndx", Field, 0},
    -		{"Header64.Type", Field, 0},
    -		{"Header64.Version", Field, 0},
    -		{"ImportedSymbol", Type, 0},
    -		{"ImportedSymbol.Library", Field, 0},
    -		{"ImportedSymbol.Name", Field, 0},
    -		{"ImportedSymbol.Version", Field, 0},
    -		{"Machine", Type, 0},
    -		{"NT_FPREGSET", Const, 0},
    -		{"NT_PRPSINFO", Const, 0},
    -		{"NT_PRSTATUS", Const, 0},
    -		{"NType", Type, 0},
    -		{"NewFile", Func, 0},
    -		{"OSABI", Type, 0},
    -		{"Open", Func, 0},
    -		{"PF_MASKOS", Const, 0},
    -		{"PF_MASKPROC", Const, 0},
    -		{"PF_R", Const, 0},
    -		{"PF_W", Const, 0},
    -		{"PF_X", Const, 0},
    -		{"PT_AARCH64_ARCHEXT", Const, 16},
    -		{"PT_AARCH64_UNWIND", Const, 16},
    -		{"PT_ARM_ARCHEXT", Const, 16},
    -		{"PT_ARM_EXIDX", Const, 16},
    -		{"PT_DYNAMIC", Const, 0},
    -		{"PT_GNU_EH_FRAME", Const, 16},
    -		{"PT_GNU_MBIND_HI", Const, 16},
    -		{"PT_GNU_MBIND_LO", Const, 16},
    -		{"PT_GNU_PROPERTY", Const, 16},
    -		{"PT_GNU_RELRO", Const, 16},
    -		{"PT_GNU_STACK", Const, 16},
    -		{"PT_HIOS", Const, 0},
    -		{"PT_HIPROC", Const, 0},
    -		{"PT_INTERP", Const, 0},
    -		{"PT_LOAD", Const, 0},
    -		{"PT_LOOS", Const, 0},
    -		{"PT_LOPROC", Const, 0},
    -		{"PT_MIPS_ABIFLAGS", Const, 16},
    -		{"PT_MIPS_OPTIONS", Const, 16},
    -		{"PT_MIPS_REGINFO", Const, 16},
    -		{"PT_MIPS_RTPROC", Const, 16},
    -		{"PT_NOTE", Const, 0},
    -		{"PT_NULL", Const, 0},
    -		{"PT_OPENBSD_BOOTDATA", Const, 16},
    -		{"PT_OPENBSD_NOBTCFI", Const, 23},
    -		{"PT_OPENBSD_RANDOMIZE", Const, 16},
    -		{"PT_OPENBSD_WXNEEDED", Const, 16},
    -		{"PT_PAX_FLAGS", Const, 16},
    -		{"PT_PHDR", Const, 0},
    -		{"PT_S390_PGSTE", Const, 16},
    -		{"PT_SHLIB", Const, 0},
    -		{"PT_SUNWSTACK", Const, 16},
    -		{"PT_SUNW_EH_FRAME", Const, 16},
    -		{"PT_TLS", Const, 0},
    -		{"Prog", Type, 0},
    -		{"Prog.ProgHeader", Field, 0},
    -		{"Prog.ReaderAt", Field, 0},
    -		{"Prog32", Type, 0},
    -		{"Prog32.Align", Field, 0},
    -		{"Prog32.Filesz", Field, 0},
    -		{"Prog32.Flags", Field, 0},
    -		{"Prog32.Memsz", Field, 0},
    -		{"Prog32.Off", Field, 0},
    -		{"Prog32.Paddr", Field, 0},
    -		{"Prog32.Type", Field, 0},
    -		{"Prog32.Vaddr", Field, 0},
    -		{"Prog64", Type, 0},
    -		{"Prog64.Align", Field, 0},
    -		{"Prog64.Filesz", Field, 0},
    -		{"Prog64.Flags", Field, 0},
    -		{"Prog64.Memsz", Field, 0},
    -		{"Prog64.Off", Field, 0},
    -		{"Prog64.Paddr", Field, 0},
    -		{"Prog64.Type", Field, 0},
    -		{"Prog64.Vaddr", Field, 0},
    -		{"ProgFlag", Type, 0},
    -		{"ProgHeader", Type, 0},
    -		{"ProgHeader.Align", Field, 0},
    -		{"ProgHeader.Filesz", Field, 0},
    -		{"ProgHeader.Flags", Field, 0},
    -		{"ProgHeader.Memsz", Field, 0},
    -		{"ProgHeader.Off", Field, 0},
    -		{"ProgHeader.Paddr", Field, 0},
    -		{"ProgHeader.Type", Field, 0},
    -		{"ProgHeader.Vaddr", Field, 0},
    -		{"ProgType", Type, 0},
    -		{"R_386", Type, 0},
    -		{"R_386_16", Const, 10},
    -		{"R_386_32", Const, 0},
    -		{"R_386_32PLT", Const, 10},
    -		{"R_386_8", Const, 10},
    -		{"R_386_COPY", Const, 0},
    -		{"R_386_GLOB_DAT", Const, 0},
    -		{"R_386_GOT32", Const, 0},
    -		{"R_386_GOT32X", Const, 10},
    -		{"R_386_GOTOFF", Const, 0},
    -		{"R_386_GOTPC", Const, 0},
    -		{"R_386_IRELATIVE", Const, 10},
    -		{"R_386_JMP_SLOT", Const, 0},
    -		{"R_386_NONE", Const, 0},
    -		{"R_386_PC16", Const, 10},
    -		{"R_386_PC32", Const, 0},
    -		{"R_386_PC8", Const, 10},
    -		{"R_386_PLT32", Const, 0},
    -		{"R_386_RELATIVE", Const, 0},
    -		{"R_386_SIZE32", Const, 10},
    -		{"R_386_TLS_DESC", Const, 10},
    -		{"R_386_TLS_DESC_CALL", Const, 10},
    -		{"R_386_TLS_DTPMOD32", Const, 0},
    -		{"R_386_TLS_DTPOFF32", Const, 0},
    -		{"R_386_TLS_GD", Const, 0},
    -		{"R_386_TLS_GD_32", Const, 0},
    -		{"R_386_TLS_GD_CALL", Const, 0},
    -		{"R_386_TLS_GD_POP", Const, 0},
    -		{"R_386_TLS_GD_PUSH", Const, 0},
    -		{"R_386_TLS_GOTDESC", Const, 10},
    -		{"R_386_TLS_GOTIE", Const, 0},
    -		{"R_386_TLS_IE", Const, 0},
    -		{"R_386_TLS_IE_32", Const, 0},
    -		{"R_386_TLS_LDM", Const, 0},
    -		{"R_386_TLS_LDM_32", Const, 0},
    -		{"R_386_TLS_LDM_CALL", Const, 0},
    -		{"R_386_TLS_LDM_POP", Const, 0},
    -		{"R_386_TLS_LDM_PUSH", Const, 0},
    -		{"R_386_TLS_LDO_32", Const, 0},
    -		{"R_386_TLS_LE", Const, 0},
    -		{"R_386_TLS_LE_32", Const, 0},
    -		{"R_386_TLS_TPOFF", Const, 0},
    -		{"R_386_TLS_TPOFF32", Const, 0},
    -		{"R_390", Type, 7},
    -		{"R_390_12", Const, 7},
    -		{"R_390_16", Const, 7},
    -		{"R_390_20", Const, 7},
    -		{"R_390_32", Const, 7},
    -		{"R_390_64", Const, 7},
    -		{"R_390_8", Const, 7},
    -		{"R_390_COPY", Const, 7},
    -		{"R_390_GLOB_DAT", Const, 7},
    -		{"R_390_GOT12", Const, 7},
    -		{"R_390_GOT16", Const, 7},
    -		{"R_390_GOT20", Const, 7},
    -		{"R_390_GOT32", Const, 7},
    -		{"R_390_GOT64", Const, 7},
    -		{"R_390_GOTENT", Const, 7},
    -		{"R_390_GOTOFF", Const, 7},
    -		{"R_390_GOTOFF16", Const, 7},
    -		{"R_390_GOTOFF64", Const, 7},
    -		{"R_390_GOTPC", Const, 7},
    -		{"R_390_GOTPCDBL", Const, 7},
    -		{"R_390_GOTPLT12", Const, 7},
    -		{"R_390_GOTPLT16", Const, 7},
    -		{"R_390_GOTPLT20", Const, 7},
    -		{"R_390_GOTPLT32", Const, 7},
    -		{"R_390_GOTPLT64", Const, 7},
    -		{"R_390_GOTPLTENT", Const, 7},
    -		{"R_390_GOTPLTOFF16", Const, 7},
    -		{"R_390_GOTPLTOFF32", Const, 7},
    -		{"R_390_GOTPLTOFF64", Const, 7},
    -		{"R_390_JMP_SLOT", Const, 7},
    -		{"R_390_NONE", Const, 7},
    -		{"R_390_PC16", Const, 7},
    -		{"R_390_PC16DBL", Const, 7},
    -		{"R_390_PC32", Const, 7},
    -		{"R_390_PC32DBL", Const, 7},
    -		{"R_390_PC64", Const, 7},
    -		{"R_390_PLT16DBL", Const, 7},
    -		{"R_390_PLT32", Const, 7},
    -		{"R_390_PLT32DBL", Const, 7},
    -		{"R_390_PLT64", Const, 7},
    -		{"R_390_RELATIVE", Const, 7},
    -		{"R_390_TLS_DTPMOD", Const, 7},
    -		{"R_390_TLS_DTPOFF", Const, 7},
    -		{"R_390_TLS_GD32", Const, 7},
    -		{"R_390_TLS_GD64", Const, 7},
    -		{"R_390_TLS_GDCALL", Const, 7},
    -		{"R_390_TLS_GOTIE12", Const, 7},
    -		{"R_390_TLS_GOTIE20", Const, 7},
    -		{"R_390_TLS_GOTIE32", Const, 7},
    -		{"R_390_TLS_GOTIE64", Const, 7},
    -		{"R_390_TLS_IE32", Const, 7},
    -		{"R_390_TLS_IE64", Const, 7},
    -		{"R_390_TLS_IEENT", Const, 7},
    -		{"R_390_TLS_LDCALL", Const, 7},
    -		{"R_390_TLS_LDM32", Const, 7},
    -		{"R_390_TLS_LDM64", Const, 7},
    -		{"R_390_TLS_LDO32", Const, 7},
    -		{"R_390_TLS_LDO64", Const, 7},
    -		{"R_390_TLS_LE32", Const, 7},
    -		{"R_390_TLS_LE64", Const, 7},
    -		{"R_390_TLS_LOAD", Const, 7},
    -		{"R_390_TLS_TPOFF", Const, 7},
    -		{"R_AARCH64", Type, 4},
    -		{"R_AARCH64_ABS16", Const, 4},
    -		{"R_AARCH64_ABS32", Const, 4},
    -		{"R_AARCH64_ABS64", Const, 4},
    -		{"R_AARCH64_ADD_ABS_LO12_NC", Const, 4},
    -		{"R_AARCH64_ADR_GOT_PAGE", Const, 4},
    -		{"R_AARCH64_ADR_PREL_LO21", Const, 4},
    -		{"R_AARCH64_ADR_PREL_PG_HI21", Const, 4},
    -		{"R_AARCH64_ADR_PREL_PG_HI21_NC", Const, 4},
    -		{"R_AARCH64_CALL26", Const, 4},
    -		{"R_AARCH64_CONDBR19", Const, 4},
    -		{"R_AARCH64_COPY", Const, 4},
    -		{"R_AARCH64_GLOB_DAT", Const, 4},
    -		{"R_AARCH64_GOT_LD_PREL19", Const, 4},
    -		{"R_AARCH64_IRELATIVE", Const, 4},
    -		{"R_AARCH64_JUMP26", Const, 4},
    -		{"R_AARCH64_JUMP_SLOT", Const, 4},
    -		{"R_AARCH64_LD64_GOTOFF_LO15", Const, 10},
    -		{"R_AARCH64_LD64_GOTPAGE_LO15", Const, 10},
    -		{"R_AARCH64_LD64_GOT_LO12_NC", Const, 4},
    -		{"R_AARCH64_LDST128_ABS_LO12_NC", Const, 4},
    -		{"R_AARCH64_LDST16_ABS_LO12_NC", Const, 4},
    -		{"R_AARCH64_LDST32_ABS_LO12_NC", Const, 4},
    -		{"R_AARCH64_LDST64_ABS_LO12_NC", Const, 4},
    -		{"R_AARCH64_LDST8_ABS_LO12_NC", Const, 4},
    -		{"R_AARCH64_LD_PREL_LO19", Const, 4},
    -		{"R_AARCH64_MOVW_SABS_G0", Const, 4},
    -		{"R_AARCH64_MOVW_SABS_G1", Const, 4},
    -		{"R_AARCH64_MOVW_SABS_G2", Const, 4},
    -		{"R_AARCH64_MOVW_UABS_G0", Const, 4},
    -		{"R_AARCH64_MOVW_UABS_G0_NC", Const, 4},
    -		{"R_AARCH64_MOVW_UABS_G1", Const, 4},
    -		{"R_AARCH64_MOVW_UABS_G1_NC", Const, 4},
    -		{"R_AARCH64_MOVW_UABS_G2", Const, 4},
    -		{"R_AARCH64_MOVW_UABS_G2_NC", Const, 4},
    -		{"R_AARCH64_MOVW_UABS_G3", Const, 4},
    -		{"R_AARCH64_NONE", Const, 4},
    -		{"R_AARCH64_NULL", Const, 4},
    -		{"R_AARCH64_P32_ABS16", Const, 4},
    -		{"R_AARCH64_P32_ABS32", Const, 4},
    -		{"R_AARCH64_P32_ADD_ABS_LO12_NC", Const, 4},
    -		{"R_AARCH64_P32_ADR_GOT_PAGE", Const, 4},
    -		{"R_AARCH64_P32_ADR_PREL_LO21", Const, 4},
    -		{"R_AARCH64_P32_ADR_PREL_PG_HI21", Const, 4},
    -		{"R_AARCH64_P32_CALL26", Const, 4},
    -		{"R_AARCH64_P32_CONDBR19", Const, 4},
    -		{"R_AARCH64_P32_COPY", Const, 4},
    -		{"R_AARCH64_P32_GLOB_DAT", Const, 4},
    -		{"R_AARCH64_P32_GOT_LD_PREL19", Const, 4},
    -		{"R_AARCH64_P32_IRELATIVE", Const, 4},
    -		{"R_AARCH64_P32_JUMP26", Const, 4},
    -		{"R_AARCH64_P32_JUMP_SLOT", Const, 4},
    -		{"R_AARCH64_P32_LD32_GOT_LO12_NC", Const, 4},
    -		{"R_AARCH64_P32_LDST128_ABS_LO12_NC", Const, 4},
    -		{"R_AARCH64_P32_LDST16_ABS_LO12_NC", Const, 4},
    -		{"R_AARCH64_P32_LDST32_ABS_LO12_NC", Const, 4},
    -		{"R_AARCH64_P32_LDST64_ABS_LO12_NC", Const, 4},
    -		{"R_AARCH64_P32_LDST8_ABS_LO12_NC", Const, 4},
    -		{"R_AARCH64_P32_LD_PREL_LO19", Const, 4},
    -		{"R_AARCH64_P32_MOVW_SABS_G0", Const, 4},
    -		{"R_AARCH64_P32_MOVW_UABS_G0", Const, 4},
    -		{"R_AARCH64_P32_MOVW_UABS_G0_NC", Const, 4},
    -		{"R_AARCH64_P32_MOVW_UABS_G1", Const, 4},
    -		{"R_AARCH64_P32_PREL16", Const, 4},
    -		{"R_AARCH64_P32_PREL32", Const, 4},
    -		{"R_AARCH64_P32_RELATIVE", Const, 4},
    -		{"R_AARCH64_P32_TLSDESC", Const, 4},
    -		{"R_AARCH64_P32_TLSDESC_ADD_LO12_NC", Const, 4},
    -		{"R_AARCH64_P32_TLSDESC_ADR_PAGE21", Const, 4},
    -		{"R_AARCH64_P32_TLSDESC_ADR_PREL21", Const, 4},
    -		{"R_AARCH64_P32_TLSDESC_CALL", Const, 4},
    -		{"R_AARCH64_P32_TLSDESC_LD32_LO12_NC", Const, 4},
    -		{"R_AARCH64_P32_TLSDESC_LD_PREL19", Const, 4},
    -		{"R_AARCH64_P32_TLSGD_ADD_LO12_NC", Const, 4},
    -		{"R_AARCH64_P32_TLSGD_ADR_PAGE21", Const, 4},
    -		{"R_AARCH64_P32_TLSIE_ADR_GOTTPREL_PAGE21", Const, 4},
    -		{"R_AARCH64_P32_TLSIE_LD32_GOTTPREL_LO12_NC", Const, 4},
    -		{"R_AARCH64_P32_TLSIE_LD_GOTTPREL_PREL19", Const, 4},
    -		{"R_AARCH64_P32_TLSLE_ADD_TPREL_HI12", Const, 4},
    -		{"R_AARCH64_P32_TLSLE_ADD_TPREL_LO12", Const, 4},
    -		{"R_AARCH64_P32_TLSLE_ADD_TPREL_LO12_NC", Const, 4},
    -		{"R_AARCH64_P32_TLSLE_MOVW_TPREL_G0", Const, 4},
    -		{"R_AARCH64_P32_TLSLE_MOVW_TPREL_G0_NC", Const, 4},
    -		{"R_AARCH64_P32_TLSLE_MOVW_TPREL_G1", Const, 4},
    -		{"R_AARCH64_P32_TLS_DTPMOD", Const, 4},
    -		{"R_AARCH64_P32_TLS_DTPREL", Const, 4},
    -		{"R_AARCH64_P32_TLS_TPREL", Const, 4},
    -		{"R_AARCH64_P32_TSTBR14", Const, 4},
    -		{"R_AARCH64_PREL16", Const, 4},
    -		{"R_AARCH64_PREL32", Const, 4},
    -		{"R_AARCH64_PREL64", Const, 4},
    -		{"R_AARCH64_RELATIVE", Const, 4},
    -		{"R_AARCH64_TLSDESC", Const, 4},
    -		{"R_AARCH64_TLSDESC_ADD", Const, 4},
    -		{"R_AARCH64_TLSDESC_ADD_LO12_NC", Const, 4},
    -		{"R_AARCH64_TLSDESC_ADR_PAGE21", Const, 4},
    -		{"R_AARCH64_TLSDESC_ADR_PREL21", Const, 4},
    -		{"R_AARCH64_TLSDESC_CALL", Const, 4},
    -		{"R_AARCH64_TLSDESC_LD64_LO12_NC", Const, 4},
    -		{"R_AARCH64_TLSDESC_LDR", Const, 4},
    -		{"R_AARCH64_TLSDESC_LD_PREL19", Const, 4},
    -		{"R_AARCH64_TLSDESC_OFF_G0_NC", Const, 4},
    -		{"R_AARCH64_TLSDESC_OFF_G1", Const, 4},
    -		{"R_AARCH64_TLSGD_ADD_LO12_NC", Const, 4},
    -		{"R_AARCH64_TLSGD_ADR_PAGE21", Const, 4},
    -		{"R_AARCH64_TLSGD_ADR_PREL21", Const, 10},
    -		{"R_AARCH64_TLSGD_MOVW_G0_NC", Const, 10},
    -		{"R_AARCH64_TLSGD_MOVW_G1", Const, 10},
    -		{"R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21", Const, 4},
    -		{"R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC", Const, 4},
    -		{"R_AARCH64_TLSIE_LD_GOTTPREL_PREL19", Const, 4},
    -		{"R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC", Const, 4},
    -		{"R_AARCH64_TLSIE_MOVW_GOTTPREL_G1", Const, 4},
    -		{"R_AARCH64_TLSLD_ADR_PAGE21", Const, 10},
    -		{"R_AARCH64_TLSLD_ADR_PREL21", Const, 10},
    -		{"R_AARCH64_TLSLD_LDST128_DTPREL_LO12", Const, 10},
    -		{"R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC", Const, 10},
    -		{"R_AARCH64_TLSLE_ADD_TPREL_HI12", Const, 4},
    -		{"R_AARCH64_TLSLE_ADD_TPREL_LO12", Const, 4},
    -		{"R_AARCH64_TLSLE_ADD_TPREL_LO12_NC", Const, 4},
    -		{"R_AARCH64_TLSLE_LDST128_TPREL_LO12", Const, 10},
    -		{"R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC", Const, 10},
    -		{"R_AARCH64_TLSLE_MOVW_TPREL_G0", Const, 4},
    -		{"R_AARCH64_TLSLE_MOVW_TPREL_G0_NC", Const, 4},
    -		{"R_AARCH64_TLSLE_MOVW_TPREL_G1", Const, 4},
    -		{"R_AARCH64_TLSLE_MOVW_TPREL_G1_NC", Const, 4},
    -		{"R_AARCH64_TLSLE_MOVW_TPREL_G2", Const, 4},
    -		{"R_AARCH64_TLS_DTPMOD64", Const, 4},
    -		{"R_AARCH64_TLS_DTPREL64", Const, 4},
    -		{"R_AARCH64_TLS_TPREL64", Const, 4},
    -		{"R_AARCH64_TSTBR14", Const, 4},
    -		{"R_ALPHA", Type, 0},
    -		{"R_ALPHA_BRADDR", Const, 0},
    -		{"R_ALPHA_COPY", Const, 0},
    -		{"R_ALPHA_GLOB_DAT", Const, 0},
    -		{"R_ALPHA_GPDISP", Const, 0},
    -		{"R_ALPHA_GPREL32", Const, 0},
    -		{"R_ALPHA_GPRELHIGH", Const, 0},
    -		{"R_ALPHA_GPRELLOW", Const, 0},
    -		{"R_ALPHA_GPVALUE", Const, 0},
    -		{"R_ALPHA_HINT", Const, 0},
    -		{"R_ALPHA_IMMED_BR_HI32", Const, 0},
    -		{"R_ALPHA_IMMED_GP_16", Const, 0},
    -		{"R_ALPHA_IMMED_GP_HI32", Const, 0},
    -		{"R_ALPHA_IMMED_LO32", Const, 0},
    -		{"R_ALPHA_IMMED_SCN_HI32", Const, 0},
    -		{"R_ALPHA_JMP_SLOT", Const, 0},
    -		{"R_ALPHA_LITERAL", Const, 0},
    -		{"R_ALPHA_LITUSE", Const, 0},
    -		{"R_ALPHA_NONE", Const, 0},
    -		{"R_ALPHA_OP_PRSHIFT", Const, 0},
    -		{"R_ALPHA_OP_PSUB", Const, 0},
    -		{"R_ALPHA_OP_PUSH", Const, 0},
    -		{"R_ALPHA_OP_STORE", Const, 0},
    -		{"R_ALPHA_REFLONG", Const, 0},
    -		{"R_ALPHA_REFQUAD", Const, 0},
    -		{"R_ALPHA_RELATIVE", Const, 0},
    -		{"R_ALPHA_SREL16", Const, 0},
    -		{"R_ALPHA_SREL32", Const, 0},
    -		{"R_ALPHA_SREL64", Const, 0},
    -		{"R_ARM", Type, 0},
    -		{"R_ARM_ABS12", Const, 0},
    -		{"R_ARM_ABS16", Const, 0},
    -		{"R_ARM_ABS32", Const, 0},
    -		{"R_ARM_ABS32_NOI", Const, 10},
    -		{"R_ARM_ABS8", Const, 0},
    -		{"R_ARM_ALU_PCREL_15_8", Const, 10},
    -		{"R_ARM_ALU_PCREL_23_15", Const, 10},
    -		{"R_ARM_ALU_PCREL_7_0", Const, 10},
    -		{"R_ARM_ALU_PC_G0", Const, 10},
    -		{"R_ARM_ALU_PC_G0_NC", Const, 10},
    -		{"R_ARM_ALU_PC_G1", Const, 10},
    -		{"R_ARM_ALU_PC_G1_NC", Const, 10},
    -		{"R_ARM_ALU_PC_G2", Const, 10},
    -		{"R_ARM_ALU_SBREL_19_12_NC", Const, 10},
    -		{"R_ARM_ALU_SBREL_27_20_CK", Const, 10},
    -		{"R_ARM_ALU_SB_G0", Const, 10},
    -		{"R_ARM_ALU_SB_G0_NC", Const, 10},
    -		{"R_ARM_ALU_SB_G1", Const, 10},
    -		{"R_ARM_ALU_SB_G1_NC", Const, 10},
    -		{"R_ARM_ALU_SB_G2", Const, 10},
    -		{"R_ARM_AMP_VCALL9", Const, 0},
    -		{"R_ARM_BASE_ABS", Const, 10},
    -		{"R_ARM_CALL", Const, 10},
    -		{"R_ARM_COPY", Const, 0},
    -		{"R_ARM_GLOB_DAT", Const, 0},
    -		{"R_ARM_GNU_VTENTRY", Const, 0},
    -		{"R_ARM_GNU_VTINHERIT", Const, 0},
    -		{"R_ARM_GOT32", Const, 0},
    -		{"R_ARM_GOTOFF", Const, 0},
    -		{"R_ARM_GOTOFF12", Const, 10},
    -		{"R_ARM_GOTPC", Const, 0},
    -		{"R_ARM_GOTRELAX", Const, 10},
    -		{"R_ARM_GOT_ABS", Const, 10},
    -		{"R_ARM_GOT_BREL12", Const, 10},
    -		{"R_ARM_GOT_PREL", Const, 10},
    -		{"R_ARM_IRELATIVE", Const, 10},
    -		{"R_ARM_JUMP24", Const, 10},
    -		{"R_ARM_JUMP_SLOT", Const, 0},
    -		{"R_ARM_LDC_PC_G0", Const, 10},
    -		{"R_ARM_LDC_PC_G1", Const, 10},
    -		{"R_ARM_LDC_PC_G2", Const, 10},
    -		{"R_ARM_LDC_SB_G0", Const, 10},
    -		{"R_ARM_LDC_SB_G1", Const, 10},
    -		{"R_ARM_LDC_SB_G2", Const, 10},
    -		{"R_ARM_LDRS_PC_G0", Const, 10},
    -		{"R_ARM_LDRS_PC_G1", Const, 10},
    -		{"R_ARM_LDRS_PC_G2", Const, 10},
    -		{"R_ARM_LDRS_SB_G0", Const, 10},
    -		{"R_ARM_LDRS_SB_G1", Const, 10},
    -		{"R_ARM_LDRS_SB_G2", Const, 10},
    -		{"R_ARM_LDR_PC_G1", Const, 10},
    -		{"R_ARM_LDR_PC_G2", Const, 10},
    -		{"R_ARM_LDR_SBREL_11_10_NC", Const, 10},
    -		{"R_ARM_LDR_SB_G0", Const, 10},
    -		{"R_ARM_LDR_SB_G1", Const, 10},
    -		{"R_ARM_LDR_SB_G2", Const, 10},
    -		{"R_ARM_ME_TOO", Const, 10},
    -		{"R_ARM_MOVT_ABS", Const, 10},
    -		{"R_ARM_MOVT_BREL", Const, 10},
    -		{"R_ARM_MOVT_PREL", Const, 10},
    -		{"R_ARM_MOVW_ABS_NC", Const, 10},
    -		{"R_ARM_MOVW_BREL", Const, 10},
    -		{"R_ARM_MOVW_BREL_NC", Const, 10},
    -		{"R_ARM_MOVW_PREL_NC", Const, 10},
    -		{"R_ARM_NONE", Const, 0},
    -		{"R_ARM_PC13", Const, 0},
    -		{"R_ARM_PC24", Const, 0},
    -		{"R_ARM_PLT32", Const, 0},
    -		{"R_ARM_PLT32_ABS", Const, 10},
    -		{"R_ARM_PREL31", Const, 10},
    -		{"R_ARM_PRIVATE_0", Const, 10},
    -		{"R_ARM_PRIVATE_1", Const, 10},
    -		{"R_ARM_PRIVATE_10", Const, 10},
    -		{"R_ARM_PRIVATE_11", Const, 10},
    -		{"R_ARM_PRIVATE_12", Const, 10},
    -		{"R_ARM_PRIVATE_13", Const, 10},
    -		{"R_ARM_PRIVATE_14", Const, 10},
    -		{"R_ARM_PRIVATE_15", Const, 10},
    -		{"R_ARM_PRIVATE_2", Const, 10},
    -		{"R_ARM_PRIVATE_3", Const, 10},
    -		{"R_ARM_PRIVATE_4", Const, 10},
    -		{"R_ARM_PRIVATE_5", Const, 10},
    -		{"R_ARM_PRIVATE_6", Const, 10},
    -		{"R_ARM_PRIVATE_7", Const, 10},
    -		{"R_ARM_PRIVATE_8", Const, 10},
    -		{"R_ARM_PRIVATE_9", Const, 10},
    -		{"R_ARM_RABS32", Const, 0},
    -		{"R_ARM_RBASE", Const, 0},
    -		{"R_ARM_REL32", Const, 0},
    -		{"R_ARM_REL32_NOI", Const, 10},
    -		{"R_ARM_RELATIVE", Const, 0},
    -		{"R_ARM_RPC24", Const, 0},
    -		{"R_ARM_RREL32", Const, 0},
    -		{"R_ARM_RSBREL32", Const, 0},
    -		{"R_ARM_RXPC25", Const, 10},
    -		{"R_ARM_SBREL31", Const, 10},
    -		{"R_ARM_SBREL32", Const, 0},
    -		{"R_ARM_SWI24", Const, 0},
    -		{"R_ARM_TARGET1", Const, 10},
    -		{"R_ARM_TARGET2", Const, 10},
    -		{"R_ARM_THM_ABS5", Const, 0},
    -		{"R_ARM_THM_ALU_ABS_G0_NC", Const, 10},
    -		{"R_ARM_THM_ALU_ABS_G1_NC", Const, 10},
    -		{"R_ARM_THM_ALU_ABS_G2_NC", Const, 10},
    -		{"R_ARM_THM_ALU_ABS_G3", Const, 10},
    -		{"R_ARM_THM_ALU_PREL_11_0", Const, 10},
    -		{"R_ARM_THM_GOT_BREL12", Const, 10},
    -		{"R_ARM_THM_JUMP11", Const, 10},
    -		{"R_ARM_THM_JUMP19", Const, 10},
    -		{"R_ARM_THM_JUMP24", Const, 10},
    -		{"R_ARM_THM_JUMP6", Const, 10},
    -		{"R_ARM_THM_JUMP8", Const, 10},
    -		{"R_ARM_THM_MOVT_ABS", Const, 10},
    -		{"R_ARM_THM_MOVT_BREL", Const, 10},
    -		{"R_ARM_THM_MOVT_PREL", Const, 10},
    -		{"R_ARM_THM_MOVW_ABS_NC", Const, 10},
    -		{"R_ARM_THM_MOVW_BREL", Const, 10},
    -		{"R_ARM_THM_MOVW_BREL_NC", Const, 10},
    -		{"R_ARM_THM_MOVW_PREL_NC", Const, 10},
    -		{"R_ARM_THM_PC12", Const, 10},
    -		{"R_ARM_THM_PC22", Const, 0},
    -		{"R_ARM_THM_PC8", Const, 0},
    -		{"R_ARM_THM_RPC22", Const, 0},
    -		{"R_ARM_THM_SWI8", Const, 0},
    -		{"R_ARM_THM_TLS_CALL", Const, 10},
    -		{"R_ARM_THM_TLS_DESCSEQ16", Const, 10},
    -		{"R_ARM_THM_TLS_DESCSEQ32", Const, 10},
    -		{"R_ARM_THM_XPC22", Const, 0},
    -		{"R_ARM_TLS_CALL", Const, 10},
    -		{"R_ARM_TLS_DESCSEQ", Const, 10},
    -		{"R_ARM_TLS_DTPMOD32", Const, 10},
    -		{"R_ARM_TLS_DTPOFF32", Const, 10},
    -		{"R_ARM_TLS_GD32", Const, 10},
    -		{"R_ARM_TLS_GOTDESC", Const, 10},
    -		{"R_ARM_TLS_IE12GP", Const, 10},
    -		{"R_ARM_TLS_IE32", Const, 10},
    -		{"R_ARM_TLS_LDM32", Const, 10},
    -		{"R_ARM_TLS_LDO12", Const, 10},
    -		{"R_ARM_TLS_LDO32", Const, 10},
    -		{"R_ARM_TLS_LE12", Const, 10},
    -		{"R_ARM_TLS_LE32", Const, 10},
    -		{"R_ARM_TLS_TPOFF32", Const, 10},
    -		{"R_ARM_V4BX", Const, 10},
    -		{"R_ARM_XPC25", Const, 0},
    -		{"R_INFO", Func, 0},
    -		{"R_INFO32", Func, 0},
    -		{"R_LARCH", Type, 19},
    -		{"R_LARCH_32", Const, 19},
    -		{"R_LARCH_32_PCREL", Const, 20},
    -		{"R_LARCH_64", Const, 19},
    -		{"R_LARCH_64_PCREL", Const, 22},
    -		{"R_LARCH_ABS64_HI12", Const, 20},
    -		{"R_LARCH_ABS64_LO20", Const, 20},
    -		{"R_LARCH_ABS_HI20", Const, 20},
    -		{"R_LARCH_ABS_LO12", Const, 20},
    -		{"R_LARCH_ADD16", Const, 19},
    -		{"R_LARCH_ADD24", Const, 19},
    -		{"R_LARCH_ADD32", Const, 19},
    -		{"R_LARCH_ADD6", Const, 22},
    -		{"R_LARCH_ADD64", Const, 19},
    -		{"R_LARCH_ADD8", Const, 19},
    -		{"R_LARCH_ADD_ULEB128", Const, 22},
    -		{"R_LARCH_ALIGN", Const, 22},
    -		{"R_LARCH_B16", Const, 20},
    -		{"R_LARCH_B21", Const, 20},
    -		{"R_LARCH_B26", Const, 20},
    -		{"R_LARCH_CFA", Const, 22},
    -		{"R_LARCH_COPY", Const, 19},
    -		{"R_LARCH_DELETE", Const, 22},
    -		{"R_LARCH_GNU_VTENTRY", Const, 20},
    -		{"R_LARCH_GNU_VTINHERIT", Const, 20},
    -		{"R_LARCH_GOT64_HI12", Const, 20},
    -		{"R_LARCH_GOT64_LO20", Const, 20},
    -		{"R_LARCH_GOT64_PC_HI12", Const, 20},
    -		{"R_LARCH_GOT64_PC_LO20", Const, 20},
    -		{"R_LARCH_GOT_HI20", Const, 20},
    -		{"R_LARCH_GOT_LO12", Const, 20},
    -		{"R_LARCH_GOT_PC_HI20", Const, 20},
    -		{"R_LARCH_GOT_PC_LO12", Const, 20},
    -		{"R_LARCH_IRELATIVE", Const, 19},
    -		{"R_LARCH_JUMP_SLOT", Const, 19},
    -		{"R_LARCH_MARK_LA", Const, 19},
    -		{"R_LARCH_MARK_PCREL", Const, 19},
    -		{"R_LARCH_NONE", Const, 19},
    -		{"R_LARCH_PCALA64_HI12", Const, 20},
    -		{"R_LARCH_PCALA64_LO20", Const, 20},
    -		{"R_LARCH_PCALA_HI20", Const, 20},
    -		{"R_LARCH_PCALA_LO12", Const, 20},
    -		{"R_LARCH_PCREL20_S2", Const, 22},
    -		{"R_LARCH_RELATIVE", Const, 19},
    -		{"R_LARCH_RELAX", Const, 20},
    -		{"R_LARCH_SOP_ADD", Const, 19},
    -		{"R_LARCH_SOP_AND", Const, 19},
    -		{"R_LARCH_SOP_ASSERT", Const, 19},
    -		{"R_LARCH_SOP_IF_ELSE", Const, 19},
    -		{"R_LARCH_SOP_NOT", Const, 19},
    -		{"R_LARCH_SOP_POP_32_S_0_10_10_16_S2", Const, 19},
    -		{"R_LARCH_SOP_POP_32_S_0_5_10_16_S2", Const, 19},
    -		{"R_LARCH_SOP_POP_32_S_10_12", Const, 19},
    -		{"R_LARCH_SOP_POP_32_S_10_16", Const, 19},
    -		{"R_LARCH_SOP_POP_32_S_10_16_S2", Const, 19},
    -		{"R_LARCH_SOP_POP_32_S_10_5", Const, 19},
    -		{"R_LARCH_SOP_POP_32_S_5_20", Const, 19},
    -		{"R_LARCH_SOP_POP_32_U", Const, 19},
    -		{"R_LARCH_SOP_POP_32_U_10_12", Const, 19},
    -		{"R_LARCH_SOP_PUSH_ABSOLUTE", Const, 19},
    -		{"R_LARCH_SOP_PUSH_DUP", Const, 19},
    -		{"R_LARCH_SOP_PUSH_GPREL", Const, 19},
    -		{"R_LARCH_SOP_PUSH_PCREL", Const, 19},
    -		{"R_LARCH_SOP_PUSH_PLT_PCREL", Const, 19},
    -		{"R_LARCH_SOP_PUSH_TLS_GD", Const, 19},
    -		{"R_LARCH_SOP_PUSH_TLS_GOT", Const, 19},
    -		{"R_LARCH_SOP_PUSH_TLS_TPREL", Const, 19},
    -		{"R_LARCH_SOP_SL", Const, 19},
    -		{"R_LARCH_SOP_SR", Const, 19},
    -		{"R_LARCH_SOP_SUB", Const, 19},
    -		{"R_LARCH_SUB16", Const, 19},
    -		{"R_LARCH_SUB24", Const, 19},
    -		{"R_LARCH_SUB32", Const, 19},
    -		{"R_LARCH_SUB6", Const, 22},
    -		{"R_LARCH_SUB64", Const, 19},
    -		{"R_LARCH_SUB8", Const, 19},
    -		{"R_LARCH_SUB_ULEB128", Const, 22},
    -		{"R_LARCH_TLS_DTPMOD32", Const, 19},
    -		{"R_LARCH_TLS_DTPMOD64", Const, 19},
    -		{"R_LARCH_TLS_DTPREL32", Const, 19},
    -		{"R_LARCH_TLS_DTPREL64", Const, 19},
    -		{"R_LARCH_TLS_GD_HI20", Const, 20},
    -		{"R_LARCH_TLS_GD_PC_HI20", Const, 20},
    -		{"R_LARCH_TLS_IE64_HI12", Const, 20},
    -		{"R_LARCH_TLS_IE64_LO20", Const, 20},
    -		{"R_LARCH_TLS_IE64_PC_HI12", Const, 20},
    -		{"R_LARCH_TLS_IE64_PC_LO20", Const, 20},
    -		{"R_LARCH_TLS_IE_HI20", Const, 20},
    -		{"R_LARCH_TLS_IE_LO12", Const, 20},
    -		{"R_LARCH_TLS_IE_PC_HI20", Const, 20},
    -		{"R_LARCH_TLS_IE_PC_LO12", Const, 20},
    -		{"R_LARCH_TLS_LD_HI20", Const, 20},
    -		{"R_LARCH_TLS_LD_PC_HI20", Const, 20},
    -		{"R_LARCH_TLS_LE64_HI12", Const, 20},
    -		{"R_LARCH_TLS_LE64_LO20", Const, 20},
    -		{"R_LARCH_TLS_LE_HI20", Const, 20},
    -		{"R_LARCH_TLS_LE_LO12", Const, 20},
    -		{"R_LARCH_TLS_TPREL32", Const, 19},
    -		{"R_LARCH_TLS_TPREL64", Const, 19},
    -		{"R_MIPS", Type, 6},
    -		{"R_MIPS_16", Const, 6},
    -		{"R_MIPS_26", Const, 6},
    -		{"R_MIPS_32", Const, 6},
    -		{"R_MIPS_64", Const, 6},
    -		{"R_MIPS_ADD_IMMEDIATE", Const, 6},
    -		{"R_MIPS_CALL16", Const, 6},
    -		{"R_MIPS_CALL_HI16", Const, 6},
    -		{"R_MIPS_CALL_LO16", Const, 6},
    -		{"R_MIPS_DELETE", Const, 6},
    -		{"R_MIPS_GOT16", Const, 6},
    -		{"R_MIPS_GOT_DISP", Const, 6},
    -		{"R_MIPS_GOT_HI16", Const, 6},
    -		{"R_MIPS_GOT_LO16", Const, 6},
    -		{"R_MIPS_GOT_OFST", Const, 6},
    -		{"R_MIPS_GOT_PAGE", Const, 6},
    -		{"R_MIPS_GPREL16", Const, 6},
    -		{"R_MIPS_GPREL32", Const, 6},
    -		{"R_MIPS_HI16", Const, 6},
    -		{"R_MIPS_HIGHER", Const, 6},
    -		{"R_MIPS_HIGHEST", Const, 6},
    -		{"R_MIPS_INSERT_A", Const, 6},
    -		{"R_MIPS_INSERT_B", Const, 6},
    -		{"R_MIPS_JALR", Const, 6},
    -		{"R_MIPS_LITERAL", Const, 6},
    -		{"R_MIPS_LO16", Const, 6},
    -		{"R_MIPS_NONE", Const, 6},
    -		{"R_MIPS_PC16", Const, 6},
    -		{"R_MIPS_PC32", Const, 22},
    -		{"R_MIPS_PJUMP", Const, 6},
    -		{"R_MIPS_REL16", Const, 6},
    -		{"R_MIPS_REL32", Const, 6},
    -		{"R_MIPS_RELGOT", Const, 6},
    -		{"R_MIPS_SCN_DISP", Const, 6},
    -		{"R_MIPS_SHIFT5", Const, 6},
    -		{"R_MIPS_SHIFT6", Const, 6},
    -		{"R_MIPS_SUB", Const, 6},
    -		{"R_MIPS_TLS_DTPMOD32", Const, 6},
    -		{"R_MIPS_TLS_DTPMOD64", Const, 6},
    -		{"R_MIPS_TLS_DTPREL32", Const, 6},
    -		{"R_MIPS_TLS_DTPREL64", Const, 6},
    -		{"R_MIPS_TLS_DTPREL_HI16", Const, 6},
    -		{"R_MIPS_TLS_DTPREL_LO16", Const, 6},
    -		{"R_MIPS_TLS_GD", Const, 6},
    -		{"R_MIPS_TLS_GOTTPREL", Const, 6},
    -		{"R_MIPS_TLS_LDM", Const, 6},
    -		{"R_MIPS_TLS_TPREL32", Const, 6},
    -		{"R_MIPS_TLS_TPREL64", Const, 6},
    -		{"R_MIPS_TLS_TPREL_HI16", Const, 6},
    -		{"R_MIPS_TLS_TPREL_LO16", Const, 6},
    -		{"R_PPC", Type, 0},
    -		{"R_PPC64", Type, 5},
    -		{"R_PPC64_ADDR14", Const, 5},
    -		{"R_PPC64_ADDR14_BRNTAKEN", Const, 5},
    -		{"R_PPC64_ADDR14_BRTAKEN", Const, 5},
    -		{"R_PPC64_ADDR16", Const, 5},
    -		{"R_PPC64_ADDR16_DS", Const, 5},
    -		{"R_PPC64_ADDR16_HA", Const, 5},
    -		{"R_PPC64_ADDR16_HI", Const, 5},
    -		{"R_PPC64_ADDR16_HIGH", Const, 10},
    -		{"R_PPC64_ADDR16_HIGHA", Const, 10},
    -		{"R_PPC64_ADDR16_HIGHER", Const, 5},
    -		{"R_PPC64_ADDR16_HIGHER34", Const, 20},
    -		{"R_PPC64_ADDR16_HIGHERA", Const, 5},
    -		{"R_PPC64_ADDR16_HIGHERA34", Const, 20},
    -		{"R_PPC64_ADDR16_HIGHEST", Const, 5},
    -		{"R_PPC64_ADDR16_HIGHEST34", Const, 20},
    -		{"R_PPC64_ADDR16_HIGHESTA", Const, 5},
    -		{"R_PPC64_ADDR16_HIGHESTA34", Const, 20},
    -		{"R_PPC64_ADDR16_LO", Const, 5},
    -		{"R_PPC64_ADDR16_LO_DS", Const, 5},
    -		{"R_PPC64_ADDR24", Const, 5},
    -		{"R_PPC64_ADDR32", Const, 5},
    -		{"R_PPC64_ADDR64", Const, 5},
    -		{"R_PPC64_ADDR64_LOCAL", Const, 10},
    -		{"R_PPC64_COPY", Const, 20},
    -		{"R_PPC64_D28", Const, 20},
    -		{"R_PPC64_D34", Const, 20},
    -		{"R_PPC64_D34_HA30", Const, 20},
    -		{"R_PPC64_D34_HI30", Const, 20},
    -		{"R_PPC64_D34_LO", Const, 20},
    -		{"R_PPC64_DTPMOD64", Const, 5},
    -		{"R_PPC64_DTPREL16", Const, 5},
    -		{"R_PPC64_DTPREL16_DS", Const, 5},
    -		{"R_PPC64_DTPREL16_HA", Const, 5},
    -		{"R_PPC64_DTPREL16_HI", Const, 5},
    -		{"R_PPC64_DTPREL16_HIGH", Const, 10},
    -		{"R_PPC64_DTPREL16_HIGHA", Const, 10},
    -		{"R_PPC64_DTPREL16_HIGHER", Const, 5},
    -		{"R_PPC64_DTPREL16_HIGHERA", Const, 5},
    -		{"R_PPC64_DTPREL16_HIGHEST", Const, 5},
    -		{"R_PPC64_DTPREL16_HIGHESTA", Const, 5},
    -		{"R_PPC64_DTPREL16_LO", Const, 5},
    -		{"R_PPC64_DTPREL16_LO_DS", Const, 5},
    -		{"R_PPC64_DTPREL34", Const, 20},
    -		{"R_PPC64_DTPREL64", Const, 5},
    -		{"R_PPC64_ENTRY", Const, 10},
    -		{"R_PPC64_GLOB_DAT", Const, 20},
    -		{"R_PPC64_GNU_VTENTRY", Const, 20},
    -		{"R_PPC64_GNU_VTINHERIT", Const, 20},
    -		{"R_PPC64_GOT16", Const, 5},
    -		{"R_PPC64_GOT16_DS", Const, 5},
    -		{"R_PPC64_GOT16_HA", Const, 5},
    -		{"R_PPC64_GOT16_HI", Const, 5},
    -		{"R_PPC64_GOT16_LO", Const, 5},
    -		{"R_PPC64_GOT16_LO_DS", Const, 5},
    -		{"R_PPC64_GOT_DTPREL16_DS", Const, 5},
    -		{"R_PPC64_GOT_DTPREL16_HA", Const, 5},
    -		{"R_PPC64_GOT_DTPREL16_HI", Const, 5},
    -		{"R_PPC64_GOT_DTPREL16_LO_DS", Const, 5},
    -		{"R_PPC64_GOT_DTPREL_PCREL34", Const, 20},
    -		{"R_PPC64_GOT_PCREL34", Const, 20},
    -		{"R_PPC64_GOT_TLSGD16", Const, 5},
    -		{"R_PPC64_GOT_TLSGD16_HA", Const, 5},
    -		{"R_PPC64_GOT_TLSGD16_HI", Const, 5},
    -		{"R_PPC64_GOT_TLSGD16_LO", Const, 5},
    -		{"R_PPC64_GOT_TLSGD_PCREL34", Const, 20},
    -		{"R_PPC64_GOT_TLSLD16", Const, 5},
    -		{"R_PPC64_GOT_TLSLD16_HA", Const, 5},
    -		{"R_PPC64_GOT_TLSLD16_HI", Const, 5},
    -		{"R_PPC64_GOT_TLSLD16_LO", Const, 5},
    -		{"R_PPC64_GOT_TLSLD_PCREL34", Const, 20},
    -		{"R_PPC64_GOT_TPREL16_DS", Const, 5},
    -		{"R_PPC64_GOT_TPREL16_HA", Const, 5},
    -		{"R_PPC64_GOT_TPREL16_HI", Const, 5},
    -		{"R_PPC64_GOT_TPREL16_LO_DS", Const, 5},
    -		{"R_PPC64_GOT_TPREL_PCREL34", Const, 20},
    -		{"R_PPC64_IRELATIVE", Const, 10},
    -		{"R_PPC64_JMP_IREL", Const, 10},
    -		{"R_PPC64_JMP_SLOT", Const, 5},
    -		{"R_PPC64_NONE", Const, 5},
    -		{"R_PPC64_PCREL28", Const, 20},
    -		{"R_PPC64_PCREL34", Const, 20},
    -		{"R_PPC64_PCREL_OPT", Const, 20},
    -		{"R_PPC64_PLT16_HA", Const, 20},
    -		{"R_PPC64_PLT16_HI", Const, 20},
    -		{"R_PPC64_PLT16_LO", Const, 20},
    -		{"R_PPC64_PLT16_LO_DS", Const, 10},
    -		{"R_PPC64_PLT32", Const, 20},
    -		{"R_PPC64_PLT64", Const, 20},
    -		{"R_PPC64_PLTCALL", Const, 20},
    -		{"R_PPC64_PLTCALL_NOTOC", Const, 20},
    -		{"R_PPC64_PLTGOT16", Const, 10},
    -		{"R_PPC64_PLTGOT16_DS", Const, 10},
    -		{"R_PPC64_PLTGOT16_HA", Const, 10},
    -		{"R_PPC64_PLTGOT16_HI", Const, 10},
    -		{"R_PPC64_PLTGOT16_LO", Const, 10},
    -		{"R_PPC64_PLTGOT_LO_DS", Const, 10},
    -		{"R_PPC64_PLTREL32", Const, 20},
    -		{"R_PPC64_PLTREL64", Const, 20},
    -		{"R_PPC64_PLTSEQ", Const, 20},
    -		{"R_PPC64_PLTSEQ_NOTOC", Const, 20},
    -		{"R_PPC64_PLT_PCREL34", Const, 20},
    -		{"R_PPC64_PLT_PCREL34_NOTOC", Const, 20},
    -		{"R_PPC64_REL14", Const, 5},
    -		{"R_PPC64_REL14_BRNTAKEN", Const, 5},
    -		{"R_PPC64_REL14_BRTAKEN", Const, 5},
    -		{"R_PPC64_REL16", Const, 5},
    -		{"R_PPC64_REL16DX_HA", Const, 10},
    -		{"R_PPC64_REL16_HA", Const, 5},
    -		{"R_PPC64_REL16_HI", Const, 5},
    -		{"R_PPC64_REL16_HIGH", Const, 20},
    -		{"R_PPC64_REL16_HIGHA", Const, 20},
    -		{"R_PPC64_REL16_HIGHER", Const, 20},
    -		{"R_PPC64_REL16_HIGHER34", Const, 20},
    -		{"R_PPC64_REL16_HIGHERA", Const, 20},
    -		{"R_PPC64_REL16_HIGHERA34", Const, 20},
    -		{"R_PPC64_REL16_HIGHEST", Const, 20},
    -		{"R_PPC64_REL16_HIGHEST34", Const, 20},
    -		{"R_PPC64_REL16_HIGHESTA", Const, 20},
    -		{"R_PPC64_REL16_HIGHESTA34", Const, 20},
    -		{"R_PPC64_REL16_LO", Const, 5},
    -		{"R_PPC64_REL24", Const, 5},
    -		{"R_PPC64_REL24_NOTOC", Const, 10},
    -		{"R_PPC64_REL24_P9NOTOC", Const, 21},
    -		{"R_PPC64_REL30", Const, 20},
    -		{"R_PPC64_REL32", Const, 5},
    -		{"R_PPC64_REL64", Const, 5},
    -		{"R_PPC64_RELATIVE", Const, 18},
    -		{"R_PPC64_SECTOFF", Const, 20},
    -		{"R_PPC64_SECTOFF_DS", Const, 10},
    -		{"R_PPC64_SECTOFF_HA", Const, 20},
    -		{"R_PPC64_SECTOFF_HI", Const, 20},
    -		{"R_PPC64_SECTOFF_LO", Const, 20},
    -		{"R_PPC64_SECTOFF_LO_DS", Const, 10},
    -		{"R_PPC64_TLS", Const, 5},
    -		{"R_PPC64_TLSGD", Const, 5},
    -		{"R_PPC64_TLSLD", Const, 5},
    -		{"R_PPC64_TOC", Const, 5},
    -		{"R_PPC64_TOC16", Const, 5},
    -		{"R_PPC64_TOC16_DS", Const, 5},
    -		{"R_PPC64_TOC16_HA", Const, 5},
    -		{"R_PPC64_TOC16_HI", Const, 5},
    -		{"R_PPC64_TOC16_LO", Const, 5},
    -		{"R_PPC64_TOC16_LO_DS", Const, 5},
    -		{"R_PPC64_TOCSAVE", Const, 10},
    -		{"R_PPC64_TPREL16", Const, 5},
    -		{"R_PPC64_TPREL16_DS", Const, 5},
    -		{"R_PPC64_TPREL16_HA", Const, 5},
    -		{"R_PPC64_TPREL16_HI", Const, 5},
    -		{"R_PPC64_TPREL16_HIGH", Const, 10},
    -		{"R_PPC64_TPREL16_HIGHA", Const, 10},
    -		{"R_PPC64_TPREL16_HIGHER", Const, 5},
    -		{"R_PPC64_TPREL16_HIGHERA", Const, 5},
    -		{"R_PPC64_TPREL16_HIGHEST", Const, 5},
    -		{"R_PPC64_TPREL16_HIGHESTA", Const, 5},
    -		{"R_PPC64_TPREL16_LO", Const, 5},
    -		{"R_PPC64_TPREL16_LO_DS", Const, 5},
    -		{"R_PPC64_TPREL34", Const, 20},
    -		{"R_PPC64_TPREL64", Const, 5},
    -		{"R_PPC64_UADDR16", Const, 20},
    -		{"R_PPC64_UADDR32", Const, 20},
    -		{"R_PPC64_UADDR64", Const, 20},
    -		{"R_PPC_ADDR14", Const, 0},
    -		{"R_PPC_ADDR14_BRNTAKEN", Const, 0},
    -		{"R_PPC_ADDR14_BRTAKEN", Const, 0},
    -		{"R_PPC_ADDR16", Const, 0},
    -		{"R_PPC_ADDR16_HA", Const, 0},
    -		{"R_PPC_ADDR16_HI", Const, 0},
    -		{"R_PPC_ADDR16_LO", Const, 0},
    -		{"R_PPC_ADDR24", Const, 0},
    -		{"R_PPC_ADDR32", Const, 0},
    -		{"R_PPC_COPY", Const, 0},
    -		{"R_PPC_DTPMOD32", Const, 0},
    -		{"R_PPC_DTPREL16", Const, 0},
    -		{"R_PPC_DTPREL16_HA", Const, 0},
    -		{"R_PPC_DTPREL16_HI", Const, 0},
    -		{"R_PPC_DTPREL16_LO", Const, 0},
    -		{"R_PPC_DTPREL32", Const, 0},
    -		{"R_PPC_EMB_BIT_FLD", Const, 0},
    -		{"R_PPC_EMB_MRKREF", Const, 0},
    -		{"R_PPC_EMB_NADDR16", Const, 0},
    -		{"R_PPC_EMB_NADDR16_HA", Const, 0},
    -		{"R_PPC_EMB_NADDR16_HI", Const, 0},
    -		{"R_PPC_EMB_NADDR16_LO", Const, 0},
    -		{"R_PPC_EMB_NADDR32", Const, 0},
    -		{"R_PPC_EMB_RELSDA", Const, 0},
    -		{"R_PPC_EMB_RELSEC16", Const, 0},
    -		{"R_PPC_EMB_RELST_HA", Const, 0},
    -		{"R_PPC_EMB_RELST_HI", Const, 0},
    -		{"R_PPC_EMB_RELST_LO", Const, 0},
    -		{"R_PPC_EMB_SDA21", Const, 0},
    -		{"R_PPC_EMB_SDA2I16", Const, 0},
    -		{"R_PPC_EMB_SDA2REL", Const, 0},
    -		{"R_PPC_EMB_SDAI16", Const, 0},
    -		{"R_PPC_GLOB_DAT", Const, 0},
    -		{"R_PPC_GOT16", Const, 0},
    -		{"R_PPC_GOT16_HA", Const, 0},
    -		{"R_PPC_GOT16_HI", Const, 0},
    -		{"R_PPC_GOT16_LO", Const, 0},
    -		{"R_PPC_GOT_TLSGD16", Const, 0},
    -		{"R_PPC_GOT_TLSGD16_HA", Const, 0},
    -		{"R_PPC_GOT_TLSGD16_HI", Const, 0},
    -		{"R_PPC_GOT_TLSGD16_LO", Const, 0},
    -		{"R_PPC_GOT_TLSLD16", Const, 0},
    -		{"R_PPC_GOT_TLSLD16_HA", Const, 0},
    -		{"R_PPC_GOT_TLSLD16_HI", Const, 0},
    -		{"R_PPC_GOT_TLSLD16_LO", Const, 0},
    -		{"R_PPC_GOT_TPREL16", Const, 0},
    -		{"R_PPC_GOT_TPREL16_HA", Const, 0},
    -		{"R_PPC_GOT_TPREL16_HI", Const, 0},
    -		{"R_PPC_GOT_TPREL16_LO", Const, 0},
    -		{"R_PPC_JMP_SLOT", Const, 0},
    -		{"R_PPC_LOCAL24PC", Const, 0},
    -		{"R_PPC_NONE", Const, 0},
    -		{"R_PPC_PLT16_HA", Const, 0},
    -		{"R_PPC_PLT16_HI", Const, 0},
    -		{"R_PPC_PLT16_LO", Const, 0},
    -		{"R_PPC_PLT32", Const, 0},
    -		{"R_PPC_PLTREL24", Const, 0},
    -		{"R_PPC_PLTREL32", Const, 0},
    -		{"R_PPC_REL14", Const, 0},
    -		{"R_PPC_REL14_BRNTAKEN", Const, 0},
    -		{"R_PPC_REL14_BRTAKEN", Const, 0},
    -		{"R_PPC_REL24", Const, 0},
    -		{"R_PPC_REL32", Const, 0},
    -		{"R_PPC_RELATIVE", Const, 0},
    -		{"R_PPC_SDAREL16", Const, 0},
    -		{"R_PPC_SECTOFF", Const, 0},
    -		{"R_PPC_SECTOFF_HA", Const, 0},
    -		{"R_PPC_SECTOFF_HI", Const, 0},
    -		{"R_PPC_SECTOFF_LO", Const, 0},
    -		{"R_PPC_TLS", Const, 0},
    -		{"R_PPC_TPREL16", Const, 0},
    -		{"R_PPC_TPREL16_HA", Const, 0},
    -		{"R_PPC_TPREL16_HI", Const, 0},
    -		{"R_PPC_TPREL16_LO", Const, 0},
    -		{"R_PPC_TPREL32", Const, 0},
    -		{"R_PPC_UADDR16", Const, 0},
    -		{"R_PPC_UADDR32", Const, 0},
    -		{"R_RISCV", Type, 11},
    -		{"R_RISCV_32", Const, 11},
    -		{"R_RISCV_32_PCREL", Const, 12},
    -		{"R_RISCV_64", Const, 11},
    -		{"R_RISCV_ADD16", Const, 11},
    -		{"R_RISCV_ADD32", Const, 11},
    -		{"R_RISCV_ADD64", Const, 11},
    -		{"R_RISCV_ADD8", Const, 11},
    -		{"R_RISCV_ALIGN", Const, 11},
    -		{"R_RISCV_BRANCH", Const, 11},
    -		{"R_RISCV_CALL", Const, 11},
    -		{"R_RISCV_CALL_PLT", Const, 11},
    -		{"R_RISCV_COPY", Const, 11},
    -		{"R_RISCV_GNU_VTENTRY", Const, 11},
    -		{"R_RISCV_GNU_VTINHERIT", Const, 11},
    -		{"R_RISCV_GOT_HI20", Const, 11},
    -		{"R_RISCV_GPREL_I", Const, 11},
    -		{"R_RISCV_GPREL_S", Const, 11},
    -		{"R_RISCV_HI20", Const, 11},
    -		{"R_RISCV_JAL", Const, 11},
    -		{"R_RISCV_JUMP_SLOT", Const, 11},
    -		{"R_RISCV_LO12_I", Const, 11},
    -		{"R_RISCV_LO12_S", Const, 11},
    -		{"R_RISCV_NONE", Const, 11},
    -		{"R_RISCV_PCREL_HI20", Const, 11},
    -		{"R_RISCV_PCREL_LO12_I", Const, 11},
    -		{"R_RISCV_PCREL_LO12_S", Const, 11},
    -		{"R_RISCV_RELATIVE", Const, 11},
    -		{"R_RISCV_RELAX", Const, 11},
    -		{"R_RISCV_RVC_BRANCH", Const, 11},
    -		{"R_RISCV_RVC_JUMP", Const, 11},
    -		{"R_RISCV_RVC_LUI", Const, 11},
    -		{"R_RISCV_SET16", Const, 11},
    -		{"R_RISCV_SET32", Const, 11},
    -		{"R_RISCV_SET6", Const, 11},
    -		{"R_RISCV_SET8", Const, 11},
    -		{"R_RISCV_SUB16", Const, 11},
    -		{"R_RISCV_SUB32", Const, 11},
    -		{"R_RISCV_SUB6", Const, 11},
    -		{"R_RISCV_SUB64", Const, 11},
    -		{"R_RISCV_SUB8", Const, 11},
    -		{"R_RISCV_TLS_DTPMOD32", Const, 11},
    -		{"R_RISCV_TLS_DTPMOD64", Const, 11},
    -		{"R_RISCV_TLS_DTPREL32", Const, 11},
    -		{"R_RISCV_TLS_DTPREL64", Const, 11},
    -		{"R_RISCV_TLS_GD_HI20", Const, 11},
    -		{"R_RISCV_TLS_GOT_HI20", Const, 11},
    -		{"R_RISCV_TLS_TPREL32", Const, 11},
    -		{"R_RISCV_TLS_TPREL64", Const, 11},
    -		{"R_RISCV_TPREL_ADD", Const, 11},
    -		{"R_RISCV_TPREL_HI20", Const, 11},
    -		{"R_RISCV_TPREL_I", Const, 11},
    -		{"R_RISCV_TPREL_LO12_I", Const, 11},
    -		{"R_RISCV_TPREL_LO12_S", Const, 11},
    -		{"R_RISCV_TPREL_S", Const, 11},
    -		{"R_SPARC", Type, 0},
    -		{"R_SPARC_10", Const, 0},
    -		{"R_SPARC_11", Const, 0},
    -		{"R_SPARC_13", Const, 0},
    -		{"R_SPARC_16", Const, 0},
    -		{"R_SPARC_22", Const, 0},
    -		{"R_SPARC_32", Const, 0},
    -		{"R_SPARC_5", Const, 0},
    -		{"R_SPARC_6", Const, 0},
    -		{"R_SPARC_64", Const, 0},
    -		{"R_SPARC_7", Const, 0},
    -		{"R_SPARC_8", Const, 0},
    -		{"R_SPARC_COPY", Const, 0},
    -		{"R_SPARC_DISP16", Const, 0},
    -		{"R_SPARC_DISP32", Const, 0},
    -		{"R_SPARC_DISP64", Const, 0},
    -		{"R_SPARC_DISP8", Const, 0},
    -		{"R_SPARC_GLOB_DAT", Const, 0},
    -		{"R_SPARC_GLOB_JMP", Const, 0},
    -		{"R_SPARC_GOT10", Const, 0},
    -		{"R_SPARC_GOT13", Const, 0},
    -		{"R_SPARC_GOT22", Const, 0},
    -		{"R_SPARC_H44", Const, 0},
    -		{"R_SPARC_HH22", Const, 0},
    -		{"R_SPARC_HI22", Const, 0},
    -		{"R_SPARC_HIPLT22", Const, 0},
    -		{"R_SPARC_HIX22", Const, 0},
    -		{"R_SPARC_HM10", Const, 0},
    -		{"R_SPARC_JMP_SLOT", Const, 0},
    -		{"R_SPARC_L44", Const, 0},
    -		{"R_SPARC_LM22", Const, 0},
    -		{"R_SPARC_LO10", Const, 0},
    -		{"R_SPARC_LOPLT10", Const, 0},
    -		{"R_SPARC_LOX10", Const, 0},
    -		{"R_SPARC_M44", Const, 0},
    -		{"R_SPARC_NONE", Const, 0},
    -		{"R_SPARC_OLO10", Const, 0},
    -		{"R_SPARC_PC10", Const, 0},
    -		{"R_SPARC_PC22", Const, 0},
    -		{"R_SPARC_PCPLT10", Const, 0},
    -		{"R_SPARC_PCPLT22", Const, 0},
    -		{"R_SPARC_PCPLT32", Const, 0},
    -		{"R_SPARC_PC_HH22", Const, 0},
    -		{"R_SPARC_PC_HM10", Const, 0},
    -		{"R_SPARC_PC_LM22", Const, 0},
    -		{"R_SPARC_PLT32", Const, 0},
    -		{"R_SPARC_PLT64", Const, 0},
    -		{"R_SPARC_REGISTER", Const, 0},
    -		{"R_SPARC_RELATIVE", Const, 0},
    -		{"R_SPARC_UA16", Const, 0},
    -		{"R_SPARC_UA32", Const, 0},
    -		{"R_SPARC_UA64", Const, 0},
    -		{"R_SPARC_WDISP16", Const, 0},
    -		{"R_SPARC_WDISP19", Const, 0},
    -		{"R_SPARC_WDISP22", Const, 0},
    -		{"R_SPARC_WDISP30", Const, 0},
    -		{"R_SPARC_WPLT30", Const, 0},
    -		{"R_SYM32", Func, 0},
    -		{"R_SYM64", Func, 0},
    -		{"R_TYPE32", Func, 0},
    -		{"R_TYPE64", Func, 0},
    -		{"R_X86_64", Type, 0},
    -		{"R_X86_64_16", Const, 0},
    -		{"R_X86_64_32", Const, 0},
    -		{"R_X86_64_32S", Const, 0},
    -		{"R_X86_64_64", Const, 0},
    -		{"R_X86_64_8", Const, 0},
    -		{"R_X86_64_COPY", Const, 0},
    -		{"R_X86_64_DTPMOD64", Const, 0},
    -		{"R_X86_64_DTPOFF32", Const, 0},
    -		{"R_X86_64_DTPOFF64", Const, 0},
    -		{"R_X86_64_GLOB_DAT", Const, 0},
    -		{"R_X86_64_GOT32", Const, 0},
    -		{"R_X86_64_GOT64", Const, 10},
    -		{"R_X86_64_GOTOFF64", Const, 10},
    -		{"R_X86_64_GOTPC32", Const, 10},
    -		{"R_X86_64_GOTPC32_TLSDESC", Const, 10},
    -		{"R_X86_64_GOTPC64", Const, 10},
    -		{"R_X86_64_GOTPCREL", Const, 0},
    -		{"R_X86_64_GOTPCREL64", Const, 10},
    -		{"R_X86_64_GOTPCRELX", Const, 10},
    -		{"R_X86_64_GOTPLT64", Const, 10},
    -		{"R_X86_64_GOTTPOFF", Const, 0},
    -		{"R_X86_64_IRELATIVE", Const, 10},
    -		{"R_X86_64_JMP_SLOT", Const, 0},
    -		{"R_X86_64_NONE", Const, 0},
    -		{"R_X86_64_PC16", Const, 0},
    -		{"R_X86_64_PC32", Const, 0},
    -		{"R_X86_64_PC32_BND", Const, 10},
    -		{"R_X86_64_PC64", Const, 10},
    -		{"R_X86_64_PC8", Const, 0},
    -		{"R_X86_64_PLT32", Const, 0},
    -		{"R_X86_64_PLT32_BND", Const, 10},
    -		{"R_X86_64_PLTOFF64", Const, 10},
    -		{"R_X86_64_RELATIVE", Const, 0},
    -		{"R_X86_64_RELATIVE64", Const, 10},
    -		{"R_X86_64_REX_GOTPCRELX", Const, 10},
    -		{"R_X86_64_SIZE32", Const, 10},
    -		{"R_X86_64_SIZE64", Const, 10},
    -		{"R_X86_64_TLSDESC", Const, 10},
    -		{"R_X86_64_TLSDESC_CALL", Const, 10},
    -		{"R_X86_64_TLSGD", Const, 0},
    -		{"R_X86_64_TLSLD", Const, 0},
    -		{"R_X86_64_TPOFF32", Const, 0},
    -		{"R_X86_64_TPOFF64", Const, 0},
    -		{"Rel32", Type, 0},
    -		{"Rel32.Info", Field, 0},
    -		{"Rel32.Off", Field, 0},
    -		{"Rel64", Type, 0},
    -		{"Rel64.Info", Field, 0},
    -		{"Rel64.Off", Field, 0},
    -		{"Rela32", Type, 0},
    -		{"Rela32.Addend", Field, 0},
    -		{"Rela32.Info", Field, 0},
    -		{"Rela32.Off", Field, 0},
    -		{"Rela64", Type, 0},
    -		{"Rela64.Addend", Field, 0},
    -		{"Rela64.Info", Field, 0},
    -		{"Rela64.Off", Field, 0},
    -		{"SHF_ALLOC", Const, 0},
    -		{"SHF_COMPRESSED", Const, 6},
    -		{"SHF_EXECINSTR", Const, 0},
    -		{"SHF_GROUP", Const, 0},
    -		{"SHF_INFO_LINK", Const, 0},
    -		{"SHF_LINK_ORDER", Const, 0},
    -		{"SHF_MASKOS", Const, 0},
    -		{"SHF_MASKPROC", Const, 0},
    -		{"SHF_MERGE", Const, 0},
    -		{"SHF_OS_NONCONFORMING", Const, 0},
    -		{"SHF_STRINGS", Const, 0},
    -		{"SHF_TLS", Const, 0},
    -		{"SHF_WRITE", Const, 0},
    -		{"SHN_ABS", Const, 0},
    -		{"SHN_COMMON", Const, 0},
    -		{"SHN_HIOS", Const, 0},
    -		{"SHN_HIPROC", Const, 0},
    -		{"SHN_HIRESERVE", Const, 0},
    -		{"SHN_LOOS", Const, 0},
    -		{"SHN_LOPROC", Const, 0},
    -		{"SHN_LORESERVE", Const, 0},
    -		{"SHN_UNDEF", Const, 0},
    -		{"SHN_XINDEX", Const, 0},
    -		{"SHT_DYNAMIC", Const, 0},
    -		{"SHT_DYNSYM", Const, 0},
    -		{"SHT_FINI_ARRAY", Const, 0},
    -		{"SHT_GNU_ATTRIBUTES", Const, 0},
    -		{"SHT_GNU_HASH", Const, 0},
    -		{"SHT_GNU_LIBLIST", Const, 0},
    -		{"SHT_GNU_VERDEF", Const, 0},
    -		{"SHT_GNU_VERNEED", Const, 0},
    -		{"SHT_GNU_VERSYM", Const, 0},
    -		{"SHT_GROUP", Const, 0},
    -		{"SHT_HASH", Const, 0},
    -		{"SHT_HIOS", Const, 0},
    -		{"SHT_HIPROC", Const, 0},
    -		{"SHT_HIUSER", Const, 0},
    -		{"SHT_INIT_ARRAY", Const, 0},
    -		{"SHT_LOOS", Const, 0},
    -		{"SHT_LOPROC", Const, 0},
    -		{"SHT_LOUSER", Const, 0},
    -		{"SHT_MIPS_ABIFLAGS", Const, 17},
    -		{"SHT_NOBITS", Const, 0},
    -		{"SHT_NOTE", Const, 0},
    -		{"SHT_NULL", Const, 0},
    -		{"SHT_PREINIT_ARRAY", Const, 0},
    -		{"SHT_PROGBITS", Const, 0},
    -		{"SHT_REL", Const, 0},
    -		{"SHT_RELA", Const, 0},
    -		{"SHT_SHLIB", Const, 0},
    -		{"SHT_STRTAB", Const, 0},
    -		{"SHT_SYMTAB", Const, 0},
    -		{"SHT_SYMTAB_SHNDX", Const, 0},
    -		{"STB_GLOBAL", Const, 0},
    -		{"STB_HIOS", Const, 0},
    -		{"STB_HIPROC", Const, 0},
    -		{"STB_LOCAL", Const, 0},
    -		{"STB_LOOS", Const, 0},
    -		{"STB_LOPROC", Const, 0},
    -		{"STB_WEAK", Const, 0},
    -		{"STT_COMMON", Const, 0},
    -		{"STT_FILE", Const, 0},
    -		{"STT_FUNC", Const, 0},
    -		{"STT_GNU_IFUNC", Const, 23},
    -		{"STT_HIOS", Const, 0},
    -		{"STT_HIPROC", Const, 0},
    -		{"STT_LOOS", Const, 0},
    -		{"STT_LOPROC", Const, 0},
    -		{"STT_NOTYPE", Const, 0},
    -		{"STT_OBJECT", Const, 0},
    -		{"STT_RELC", Const, 23},
    -		{"STT_SECTION", Const, 0},
    -		{"STT_SRELC", Const, 23},
    -		{"STT_TLS", Const, 0},
    -		{"STV_DEFAULT", Const, 0},
    -		{"STV_HIDDEN", Const, 0},
    -		{"STV_INTERNAL", Const, 0},
    -		{"STV_PROTECTED", Const, 0},
    -		{"ST_BIND", Func, 0},
    -		{"ST_INFO", Func, 0},
    -		{"ST_TYPE", Func, 0},
    -		{"ST_VISIBILITY", Func, 0},
    -		{"Section", Type, 0},
    -		{"Section.ReaderAt", Field, 0},
    -		{"Section.SectionHeader", Field, 0},
    -		{"Section32", Type, 0},
    -		{"Section32.Addr", Field, 0},
    -		{"Section32.Addralign", Field, 0},
    -		{"Section32.Entsize", Field, 0},
    -		{"Section32.Flags", Field, 0},
    -		{"Section32.Info", Field, 0},
    -		{"Section32.Link", Field, 0},
    -		{"Section32.Name", Field, 0},
    -		{"Section32.Off", Field, 0},
    -		{"Section32.Size", Field, 0},
    -		{"Section32.Type", Field, 0},
    -		{"Section64", Type, 0},
    -		{"Section64.Addr", Field, 0},
    -		{"Section64.Addralign", Field, 0},
    -		{"Section64.Entsize", Field, 0},
    -		{"Section64.Flags", Field, 0},
    -		{"Section64.Info", Field, 0},
    -		{"Section64.Link", Field, 0},
    -		{"Section64.Name", Field, 0},
    -		{"Section64.Off", Field, 0},
    -		{"Section64.Size", Field, 0},
    -		{"Section64.Type", Field, 0},
    -		{"SectionFlag", Type, 0},
    -		{"SectionHeader", Type, 0},
    -		{"SectionHeader.Addr", Field, 0},
    -		{"SectionHeader.Addralign", Field, 0},
    -		{"SectionHeader.Entsize", Field, 0},
    -		{"SectionHeader.FileSize", Field, 6},
    -		{"SectionHeader.Flags", Field, 0},
    -		{"SectionHeader.Info", Field, 0},
    -		{"SectionHeader.Link", Field, 0},
    -		{"SectionHeader.Name", Field, 0},
    -		{"SectionHeader.Offset", Field, 0},
    -		{"SectionHeader.Size", Field, 0},
    -		{"SectionHeader.Type", Field, 0},
    -		{"SectionIndex", Type, 0},
    -		{"SectionType", Type, 0},
    -		{"Sym32", Type, 0},
    -		{"Sym32.Info", Field, 0},
    -		{"Sym32.Name", Field, 0},
    -		{"Sym32.Other", Field, 0},
    -		{"Sym32.Shndx", Field, 0},
    -		{"Sym32.Size", Field, 0},
    -		{"Sym32.Value", Field, 0},
    -		{"Sym32Size", Const, 0},
    -		{"Sym64", Type, 0},
    -		{"Sym64.Info", Field, 0},
    -		{"Sym64.Name", Field, 0},
    -		{"Sym64.Other", Field, 0},
    -		{"Sym64.Shndx", Field, 0},
    -		{"Sym64.Size", Field, 0},
    -		{"Sym64.Value", Field, 0},
    -		{"Sym64Size", Const, 0},
    -		{"SymBind", Type, 0},
    -		{"SymType", Type, 0},
    -		{"SymVis", Type, 0},
    -		{"Symbol", Type, 0},
    -		{"Symbol.Info", Field, 0},
    -		{"Symbol.Library", Field, 13},
    -		{"Symbol.Name", Field, 0},
    -		{"Symbol.Other", Field, 0},
    -		{"Symbol.Section", Field, 0},
    -		{"Symbol.Size", Field, 0},
    -		{"Symbol.Value", Field, 0},
    -		{"Symbol.Version", Field, 13},
    -		{"Type", Type, 0},
    -		{"Version", Type, 0},
    +		{"(*File).Close", Method, 0, ""},
    +		{"(*File).DWARF", Method, 0, ""},
    +		{"(*File).DynString", Method, 1, ""},
    +		{"(*File).DynValue", Method, 21, ""},
    +		{"(*File).DynamicSymbols", Method, 4, ""},
    +		{"(*File).DynamicVersionNeeds", Method, 24, ""},
    +		{"(*File).DynamicVersions", Method, 24, ""},
    +		{"(*File).ImportedLibraries", Method, 0, ""},
    +		{"(*File).ImportedSymbols", Method, 0, ""},
    +		{"(*File).Section", Method, 0, ""},
    +		{"(*File).SectionByType", Method, 0, ""},
    +		{"(*File).Symbols", Method, 0, ""},
    +		{"(*FormatError).Error", Method, 0, ""},
    +		{"(*Prog).Open", Method, 0, ""},
    +		{"(*Section).Data", Method, 0, ""},
    +		{"(*Section).Open", Method, 0, ""},
    +		{"(Class).GoString", Method, 0, ""},
    +		{"(Class).String", Method, 0, ""},
    +		{"(CompressionType).GoString", Method, 6, ""},
    +		{"(CompressionType).String", Method, 6, ""},
    +		{"(Data).GoString", Method, 0, ""},
    +		{"(Data).String", Method, 0, ""},
    +		{"(DynFlag).GoString", Method, 0, ""},
    +		{"(DynFlag).String", Method, 0, ""},
    +		{"(DynFlag1).GoString", Method, 21, ""},
    +		{"(DynFlag1).String", Method, 21, ""},
    +		{"(DynTag).GoString", Method, 0, ""},
    +		{"(DynTag).String", Method, 0, ""},
    +		{"(Machine).GoString", Method, 0, ""},
    +		{"(Machine).String", Method, 0, ""},
    +		{"(NType).GoString", Method, 0, ""},
    +		{"(NType).String", Method, 0, ""},
    +		{"(OSABI).GoString", Method, 0, ""},
    +		{"(OSABI).String", Method, 0, ""},
    +		{"(Prog).ReadAt", Method, 0, ""},
    +		{"(ProgFlag).GoString", Method, 0, ""},
    +		{"(ProgFlag).String", Method, 0, ""},
    +		{"(ProgType).GoString", Method, 0, ""},
    +		{"(ProgType).String", Method, 0, ""},
    +		{"(R_386).GoString", Method, 0, ""},
    +		{"(R_386).String", Method, 0, ""},
    +		{"(R_390).GoString", Method, 7, ""},
    +		{"(R_390).String", Method, 7, ""},
    +		{"(R_AARCH64).GoString", Method, 4, ""},
    +		{"(R_AARCH64).String", Method, 4, ""},
    +		{"(R_ALPHA).GoString", Method, 0, ""},
    +		{"(R_ALPHA).String", Method, 0, ""},
    +		{"(R_ARM).GoString", Method, 0, ""},
    +		{"(R_ARM).String", Method, 0, ""},
    +		{"(R_LARCH).GoString", Method, 19, ""},
    +		{"(R_LARCH).String", Method, 19, ""},
    +		{"(R_MIPS).GoString", Method, 6, ""},
    +		{"(R_MIPS).String", Method, 6, ""},
    +		{"(R_PPC).GoString", Method, 0, ""},
    +		{"(R_PPC).String", Method, 0, ""},
    +		{"(R_PPC64).GoString", Method, 5, ""},
    +		{"(R_PPC64).String", Method, 5, ""},
    +		{"(R_RISCV).GoString", Method, 11, ""},
    +		{"(R_RISCV).String", Method, 11, ""},
    +		{"(R_SPARC).GoString", Method, 0, ""},
    +		{"(R_SPARC).String", Method, 0, ""},
    +		{"(R_X86_64).GoString", Method, 0, ""},
    +		{"(R_X86_64).String", Method, 0, ""},
    +		{"(Section).ReadAt", Method, 0, ""},
    +		{"(SectionFlag).GoString", Method, 0, ""},
    +		{"(SectionFlag).String", Method, 0, ""},
    +		{"(SectionIndex).GoString", Method, 0, ""},
    +		{"(SectionIndex).String", Method, 0, ""},
    +		{"(SectionType).GoString", Method, 0, ""},
    +		{"(SectionType).String", Method, 0, ""},
    +		{"(SymBind).GoString", Method, 0, ""},
    +		{"(SymBind).String", Method, 0, ""},
    +		{"(SymType).GoString", Method, 0, ""},
    +		{"(SymType).String", Method, 0, ""},
    +		{"(SymVis).GoString", Method, 0, ""},
    +		{"(SymVis).String", Method, 0, ""},
    +		{"(Type).GoString", Method, 0, ""},
    +		{"(Type).String", Method, 0, ""},
    +		{"(Version).GoString", Method, 0, ""},
    +		{"(Version).String", Method, 0, ""},
    +		{"(VersionIndex).Index", Method, 24, ""},
    +		{"(VersionIndex).IsHidden", Method, 24, ""},
    +		{"ARM_MAGIC_TRAMP_NUMBER", Const, 0, ""},
    +		{"COMPRESS_HIOS", Const, 6, ""},
    +		{"COMPRESS_HIPROC", Const, 6, ""},
    +		{"COMPRESS_LOOS", Const, 6, ""},
    +		{"COMPRESS_LOPROC", Const, 6, ""},
    +		{"COMPRESS_ZLIB", Const, 6, ""},
    +		{"COMPRESS_ZSTD", Const, 21, ""},
    +		{"Chdr32", Type, 6, ""},
    +		{"Chdr32.Addralign", Field, 6, ""},
    +		{"Chdr32.Size", Field, 6, ""},
    +		{"Chdr32.Type", Field, 6, ""},
    +		{"Chdr64", Type, 6, ""},
    +		{"Chdr64.Addralign", Field, 6, ""},
    +		{"Chdr64.Size", Field, 6, ""},
    +		{"Chdr64.Type", Field, 6, ""},
    +		{"Class", Type, 0, ""},
    +		{"CompressionType", Type, 6, ""},
    +		{"DF_1_CONFALT", Const, 21, ""},
    +		{"DF_1_DIRECT", Const, 21, ""},
    +		{"DF_1_DISPRELDNE", Const, 21, ""},
    +		{"DF_1_DISPRELPND", Const, 21, ""},
    +		{"DF_1_EDITED", Const, 21, ""},
    +		{"DF_1_ENDFILTEE", Const, 21, ""},
    +		{"DF_1_GLOBAL", Const, 21, ""},
    +		{"DF_1_GLOBAUDIT", Const, 21, ""},
    +		{"DF_1_GROUP", Const, 21, ""},
    +		{"DF_1_IGNMULDEF", Const, 21, ""},
    +		{"DF_1_INITFIRST", Const, 21, ""},
    +		{"DF_1_INTERPOSE", Const, 21, ""},
    +		{"DF_1_KMOD", Const, 21, ""},
    +		{"DF_1_LOADFLTR", Const, 21, ""},
    +		{"DF_1_NOCOMMON", Const, 21, ""},
    +		{"DF_1_NODEFLIB", Const, 21, ""},
    +		{"DF_1_NODELETE", Const, 21, ""},
    +		{"DF_1_NODIRECT", Const, 21, ""},
    +		{"DF_1_NODUMP", Const, 21, ""},
    +		{"DF_1_NOHDR", Const, 21, ""},
    +		{"DF_1_NOKSYMS", Const, 21, ""},
    +		{"DF_1_NOOPEN", Const, 21, ""},
    +		{"DF_1_NORELOC", Const, 21, ""},
    +		{"DF_1_NOW", Const, 21, ""},
    +		{"DF_1_ORIGIN", Const, 21, ""},
    +		{"DF_1_PIE", Const, 21, ""},
    +		{"DF_1_SINGLETON", Const, 21, ""},
    +		{"DF_1_STUB", Const, 21, ""},
    +		{"DF_1_SYMINTPOSE", Const, 21, ""},
    +		{"DF_1_TRANS", Const, 21, ""},
    +		{"DF_1_WEAKFILTER", Const, 21, ""},
    +		{"DF_BIND_NOW", Const, 0, ""},
    +		{"DF_ORIGIN", Const, 0, ""},
    +		{"DF_STATIC_TLS", Const, 0, ""},
    +		{"DF_SYMBOLIC", Const, 0, ""},
    +		{"DF_TEXTREL", Const, 0, ""},
    +		{"DT_ADDRRNGHI", Const, 16, ""},
    +		{"DT_ADDRRNGLO", Const, 16, ""},
    +		{"DT_AUDIT", Const, 16, ""},
    +		{"DT_AUXILIARY", Const, 16, ""},
    +		{"DT_BIND_NOW", Const, 0, ""},
    +		{"DT_CHECKSUM", Const, 16, ""},
    +		{"DT_CONFIG", Const, 16, ""},
    +		{"DT_DEBUG", Const, 0, ""},
    +		{"DT_DEPAUDIT", Const, 16, ""},
    +		{"DT_ENCODING", Const, 0, ""},
    +		{"DT_FEATURE", Const, 16, ""},
    +		{"DT_FILTER", Const, 16, ""},
    +		{"DT_FINI", Const, 0, ""},
    +		{"DT_FINI_ARRAY", Const, 0, ""},
    +		{"DT_FINI_ARRAYSZ", Const, 0, ""},
    +		{"DT_FLAGS", Const, 0, ""},
    +		{"DT_FLAGS_1", Const, 16, ""},
    +		{"DT_GNU_CONFLICT", Const, 16, ""},
    +		{"DT_GNU_CONFLICTSZ", Const, 16, ""},
    +		{"DT_GNU_HASH", Const, 16, ""},
    +		{"DT_GNU_LIBLIST", Const, 16, ""},
    +		{"DT_GNU_LIBLISTSZ", Const, 16, ""},
    +		{"DT_GNU_PRELINKED", Const, 16, ""},
    +		{"DT_HASH", Const, 0, ""},
    +		{"DT_HIOS", Const, 0, ""},
    +		{"DT_HIPROC", Const, 0, ""},
    +		{"DT_INIT", Const, 0, ""},
    +		{"DT_INIT_ARRAY", Const, 0, ""},
    +		{"DT_INIT_ARRAYSZ", Const, 0, ""},
    +		{"DT_JMPREL", Const, 0, ""},
    +		{"DT_LOOS", Const, 0, ""},
    +		{"DT_LOPROC", Const, 0, ""},
    +		{"DT_MIPS_AUX_DYNAMIC", Const, 16, ""},
    +		{"DT_MIPS_BASE_ADDRESS", Const, 16, ""},
    +		{"DT_MIPS_COMPACT_SIZE", Const, 16, ""},
    +		{"DT_MIPS_CONFLICT", Const, 16, ""},
    +		{"DT_MIPS_CONFLICTNO", Const, 16, ""},
    +		{"DT_MIPS_CXX_FLAGS", Const, 16, ""},
    +		{"DT_MIPS_DELTA_CLASS", Const, 16, ""},
    +		{"DT_MIPS_DELTA_CLASSSYM", Const, 16, ""},
    +		{"DT_MIPS_DELTA_CLASSSYM_NO", Const, 16, ""},
    +		{"DT_MIPS_DELTA_CLASS_NO", Const, 16, ""},
    +		{"DT_MIPS_DELTA_INSTANCE", Const, 16, ""},
    +		{"DT_MIPS_DELTA_INSTANCE_NO", Const, 16, ""},
    +		{"DT_MIPS_DELTA_RELOC", Const, 16, ""},
    +		{"DT_MIPS_DELTA_RELOC_NO", Const, 16, ""},
    +		{"DT_MIPS_DELTA_SYM", Const, 16, ""},
    +		{"DT_MIPS_DELTA_SYM_NO", Const, 16, ""},
    +		{"DT_MIPS_DYNSTR_ALIGN", Const, 16, ""},
    +		{"DT_MIPS_FLAGS", Const, 16, ""},
    +		{"DT_MIPS_GOTSYM", Const, 16, ""},
    +		{"DT_MIPS_GP_VALUE", Const, 16, ""},
    +		{"DT_MIPS_HIDDEN_GOTIDX", Const, 16, ""},
    +		{"DT_MIPS_HIPAGENO", Const, 16, ""},
    +		{"DT_MIPS_ICHECKSUM", Const, 16, ""},
    +		{"DT_MIPS_INTERFACE", Const, 16, ""},
    +		{"DT_MIPS_INTERFACE_SIZE", Const, 16, ""},
    +		{"DT_MIPS_IVERSION", Const, 16, ""},
    +		{"DT_MIPS_LIBLIST", Const, 16, ""},
    +		{"DT_MIPS_LIBLISTNO", Const, 16, ""},
    +		{"DT_MIPS_LOCALPAGE_GOTIDX", Const, 16, ""},
    +		{"DT_MIPS_LOCAL_GOTIDX", Const, 16, ""},
    +		{"DT_MIPS_LOCAL_GOTNO", Const, 16, ""},
    +		{"DT_MIPS_MSYM", Const, 16, ""},
    +		{"DT_MIPS_OPTIONS", Const, 16, ""},
    +		{"DT_MIPS_PERF_SUFFIX", Const, 16, ""},
    +		{"DT_MIPS_PIXIE_INIT", Const, 16, ""},
    +		{"DT_MIPS_PLTGOT", Const, 16, ""},
    +		{"DT_MIPS_PROTECTED_GOTIDX", Const, 16, ""},
    +		{"DT_MIPS_RLD_MAP", Const, 16, ""},
    +		{"DT_MIPS_RLD_MAP_REL", Const, 16, ""},
    +		{"DT_MIPS_RLD_TEXT_RESOLVE_ADDR", Const, 16, ""},
    +		{"DT_MIPS_RLD_VERSION", Const, 16, ""},
    +		{"DT_MIPS_RWPLT", Const, 16, ""},
    +		{"DT_MIPS_SYMBOL_LIB", Const, 16, ""},
    +		{"DT_MIPS_SYMTABNO", Const, 16, ""},
    +		{"DT_MIPS_TIME_STAMP", Const, 16, ""},
    +		{"DT_MIPS_UNREFEXTNO", Const, 16, ""},
    +		{"DT_MOVEENT", Const, 16, ""},
    +		{"DT_MOVESZ", Const, 16, ""},
    +		{"DT_MOVETAB", Const, 16, ""},
    +		{"DT_NEEDED", Const, 0, ""},
    +		{"DT_NULL", Const, 0, ""},
    +		{"DT_PLTGOT", Const, 0, ""},
    +		{"DT_PLTPAD", Const, 16, ""},
    +		{"DT_PLTPADSZ", Const, 16, ""},
    +		{"DT_PLTREL", Const, 0, ""},
    +		{"DT_PLTRELSZ", Const, 0, ""},
    +		{"DT_POSFLAG_1", Const, 16, ""},
    +		{"DT_PPC64_GLINK", Const, 16, ""},
    +		{"DT_PPC64_OPD", Const, 16, ""},
    +		{"DT_PPC64_OPDSZ", Const, 16, ""},
    +		{"DT_PPC64_OPT", Const, 16, ""},
    +		{"DT_PPC_GOT", Const, 16, ""},
    +		{"DT_PPC_OPT", Const, 16, ""},
    +		{"DT_PREINIT_ARRAY", Const, 0, ""},
    +		{"DT_PREINIT_ARRAYSZ", Const, 0, ""},
    +		{"DT_REL", Const, 0, ""},
    +		{"DT_RELA", Const, 0, ""},
    +		{"DT_RELACOUNT", Const, 16, ""},
    +		{"DT_RELAENT", Const, 0, ""},
    +		{"DT_RELASZ", Const, 0, ""},
    +		{"DT_RELCOUNT", Const, 16, ""},
    +		{"DT_RELENT", Const, 0, ""},
    +		{"DT_RELSZ", Const, 0, ""},
    +		{"DT_RPATH", Const, 0, ""},
    +		{"DT_RUNPATH", Const, 0, ""},
    +		{"DT_SONAME", Const, 0, ""},
    +		{"DT_SPARC_REGISTER", Const, 16, ""},
    +		{"DT_STRSZ", Const, 0, ""},
    +		{"DT_STRTAB", Const, 0, ""},
    +		{"DT_SYMBOLIC", Const, 0, ""},
    +		{"DT_SYMENT", Const, 0, ""},
    +		{"DT_SYMINENT", Const, 16, ""},
    +		{"DT_SYMINFO", Const, 16, ""},
    +		{"DT_SYMINSZ", Const, 16, ""},
    +		{"DT_SYMTAB", Const, 0, ""},
    +		{"DT_SYMTAB_SHNDX", Const, 16, ""},
    +		{"DT_TEXTREL", Const, 0, ""},
    +		{"DT_TLSDESC_GOT", Const, 16, ""},
    +		{"DT_TLSDESC_PLT", Const, 16, ""},
    +		{"DT_USED", Const, 16, ""},
    +		{"DT_VALRNGHI", Const, 16, ""},
    +		{"DT_VALRNGLO", Const, 16, ""},
    +		{"DT_VERDEF", Const, 16, ""},
    +		{"DT_VERDEFNUM", Const, 16, ""},
    +		{"DT_VERNEED", Const, 0, ""},
    +		{"DT_VERNEEDNUM", Const, 0, ""},
    +		{"DT_VERSYM", Const, 0, ""},
    +		{"Data", Type, 0, ""},
    +		{"Dyn32", Type, 0, ""},
    +		{"Dyn32.Tag", Field, 0, ""},
    +		{"Dyn32.Val", Field, 0, ""},
    +		{"Dyn64", Type, 0, ""},
    +		{"Dyn64.Tag", Field, 0, ""},
    +		{"Dyn64.Val", Field, 0, ""},
    +		{"DynFlag", Type, 0, ""},
    +		{"DynFlag1", Type, 21, ""},
    +		{"DynTag", Type, 0, ""},
    +		{"DynamicVersion", Type, 24, ""},
    +		{"DynamicVersion.Deps", Field, 24, ""},
    +		{"DynamicVersion.Flags", Field, 24, ""},
    +		{"DynamicVersion.Index", Field, 24, ""},
    +		{"DynamicVersion.Name", Field, 24, ""},
    +		{"DynamicVersionDep", Type, 24, ""},
    +		{"DynamicVersionDep.Dep", Field, 24, ""},
    +		{"DynamicVersionDep.Flags", Field, 24, ""},
    +		{"DynamicVersionDep.Index", Field, 24, ""},
    +		{"DynamicVersionFlag", Type, 24, ""},
    +		{"DynamicVersionNeed", Type, 24, ""},
    +		{"DynamicVersionNeed.Name", Field, 24, ""},
    +		{"DynamicVersionNeed.Needs", Field, 24, ""},
    +		{"EI_ABIVERSION", Const, 0, ""},
    +		{"EI_CLASS", Const, 0, ""},
    +		{"EI_DATA", Const, 0, ""},
    +		{"EI_NIDENT", Const, 0, ""},
    +		{"EI_OSABI", Const, 0, ""},
    +		{"EI_PAD", Const, 0, ""},
    +		{"EI_VERSION", Const, 0, ""},
    +		{"ELFCLASS32", Const, 0, ""},
    +		{"ELFCLASS64", Const, 0, ""},
    +		{"ELFCLASSNONE", Const, 0, ""},
    +		{"ELFDATA2LSB", Const, 0, ""},
    +		{"ELFDATA2MSB", Const, 0, ""},
    +		{"ELFDATANONE", Const, 0, ""},
    +		{"ELFMAG", Const, 0, ""},
    +		{"ELFOSABI_86OPEN", Const, 0, ""},
    +		{"ELFOSABI_AIX", Const, 0, ""},
    +		{"ELFOSABI_ARM", Const, 0, ""},
    +		{"ELFOSABI_AROS", Const, 11, ""},
    +		{"ELFOSABI_CLOUDABI", Const, 11, ""},
    +		{"ELFOSABI_FENIXOS", Const, 11, ""},
    +		{"ELFOSABI_FREEBSD", Const, 0, ""},
    +		{"ELFOSABI_HPUX", Const, 0, ""},
    +		{"ELFOSABI_HURD", Const, 0, ""},
    +		{"ELFOSABI_IRIX", Const, 0, ""},
    +		{"ELFOSABI_LINUX", Const, 0, ""},
    +		{"ELFOSABI_MODESTO", Const, 0, ""},
    +		{"ELFOSABI_NETBSD", Const, 0, ""},
    +		{"ELFOSABI_NONE", Const, 0, ""},
    +		{"ELFOSABI_NSK", Const, 0, ""},
    +		{"ELFOSABI_OPENBSD", Const, 0, ""},
    +		{"ELFOSABI_OPENVMS", Const, 0, ""},
    +		{"ELFOSABI_SOLARIS", Const, 0, ""},
    +		{"ELFOSABI_STANDALONE", Const, 0, ""},
    +		{"ELFOSABI_TRU64", Const, 0, ""},
    +		{"EM_386", Const, 0, ""},
    +		{"EM_486", Const, 0, ""},
    +		{"EM_56800EX", Const, 11, ""},
    +		{"EM_68HC05", Const, 11, ""},
    +		{"EM_68HC08", Const, 11, ""},
    +		{"EM_68HC11", Const, 11, ""},
    +		{"EM_68HC12", Const, 0, ""},
    +		{"EM_68HC16", Const, 11, ""},
    +		{"EM_68K", Const, 0, ""},
    +		{"EM_78KOR", Const, 11, ""},
    +		{"EM_8051", Const, 11, ""},
    +		{"EM_860", Const, 0, ""},
    +		{"EM_88K", Const, 0, ""},
    +		{"EM_960", Const, 0, ""},
    +		{"EM_AARCH64", Const, 4, ""},
    +		{"EM_ALPHA", Const, 0, ""},
    +		{"EM_ALPHA_STD", Const, 0, ""},
    +		{"EM_ALTERA_NIOS2", Const, 11, ""},
    +		{"EM_AMDGPU", Const, 11, ""},
    +		{"EM_ARC", Const, 0, ""},
    +		{"EM_ARCA", Const, 11, ""},
    +		{"EM_ARC_COMPACT", Const, 11, ""},
    +		{"EM_ARC_COMPACT2", Const, 11, ""},
    +		{"EM_ARM", Const, 0, ""},
    +		{"EM_AVR", Const, 11, ""},
    +		{"EM_AVR32", Const, 11, ""},
    +		{"EM_BA1", Const, 11, ""},
    +		{"EM_BA2", Const, 11, ""},
    +		{"EM_BLACKFIN", Const, 11, ""},
    +		{"EM_BPF", Const, 11, ""},
    +		{"EM_C166", Const, 11, ""},
    +		{"EM_CDP", Const, 11, ""},
    +		{"EM_CE", Const, 11, ""},
    +		{"EM_CLOUDSHIELD", Const, 11, ""},
    +		{"EM_COGE", Const, 11, ""},
    +		{"EM_COLDFIRE", Const, 0, ""},
    +		{"EM_COOL", Const, 11, ""},
    +		{"EM_COREA_1ST", Const, 11, ""},
    +		{"EM_COREA_2ND", Const, 11, ""},
    +		{"EM_CR", Const, 11, ""},
    +		{"EM_CR16", Const, 11, ""},
    +		{"EM_CRAYNV2", Const, 11, ""},
    +		{"EM_CRIS", Const, 11, ""},
    +		{"EM_CRX", Const, 11, ""},
    +		{"EM_CSR_KALIMBA", Const, 11, ""},
    +		{"EM_CUDA", Const, 11, ""},
    +		{"EM_CYPRESS_M8C", Const, 11, ""},
    +		{"EM_D10V", Const, 11, ""},
    +		{"EM_D30V", Const, 11, ""},
    +		{"EM_DSP24", Const, 11, ""},
    +		{"EM_DSPIC30F", Const, 11, ""},
    +		{"EM_DXP", Const, 11, ""},
    +		{"EM_ECOG1", Const, 11, ""},
    +		{"EM_ECOG16", Const, 11, ""},
    +		{"EM_ECOG1X", Const, 11, ""},
    +		{"EM_ECOG2", Const, 11, ""},
    +		{"EM_ETPU", Const, 11, ""},
    +		{"EM_EXCESS", Const, 11, ""},
    +		{"EM_F2MC16", Const, 11, ""},
    +		{"EM_FIREPATH", Const, 11, ""},
    +		{"EM_FR20", Const, 0, ""},
    +		{"EM_FR30", Const, 11, ""},
    +		{"EM_FT32", Const, 11, ""},
    +		{"EM_FX66", Const, 11, ""},
    +		{"EM_H8S", Const, 0, ""},
    +		{"EM_H8_300", Const, 0, ""},
    +		{"EM_H8_300H", Const, 0, ""},
    +		{"EM_H8_500", Const, 0, ""},
    +		{"EM_HUANY", Const, 11, ""},
    +		{"EM_IA_64", Const, 0, ""},
    +		{"EM_INTEL205", Const, 11, ""},
    +		{"EM_INTEL206", Const, 11, ""},
    +		{"EM_INTEL207", Const, 11, ""},
    +		{"EM_INTEL208", Const, 11, ""},
    +		{"EM_INTEL209", Const, 11, ""},
    +		{"EM_IP2K", Const, 11, ""},
    +		{"EM_JAVELIN", Const, 11, ""},
    +		{"EM_K10M", Const, 11, ""},
    +		{"EM_KM32", Const, 11, ""},
    +		{"EM_KMX16", Const, 11, ""},
    +		{"EM_KMX32", Const, 11, ""},
    +		{"EM_KMX8", Const, 11, ""},
    +		{"EM_KVARC", Const, 11, ""},
    +		{"EM_L10M", Const, 11, ""},
    +		{"EM_LANAI", Const, 11, ""},
    +		{"EM_LATTICEMICO32", Const, 11, ""},
    +		{"EM_LOONGARCH", Const, 19, ""},
    +		{"EM_M16C", Const, 11, ""},
    +		{"EM_M32", Const, 0, ""},
    +		{"EM_M32C", Const, 11, ""},
    +		{"EM_M32R", Const, 11, ""},
    +		{"EM_MANIK", Const, 11, ""},
    +		{"EM_MAX", Const, 11, ""},
    +		{"EM_MAXQ30", Const, 11, ""},
    +		{"EM_MCHP_PIC", Const, 11, ""},
    +		{"EM_MCST_ELBRUS", Const, 11, ""},
    +		{"EM_ME16", Const, 0, ""},
    +		{"EM_METAG", Const, 11, ""},
    +		{"EM_MICROBLAZE", Const, 11, ""},
    +		{"EM_MIPS", Const, 0, ""},
    +		{"EM_MIPS_RS3_LE", Const, 0, ""},
    +		{"EM_MIPS_RS4_BE", Const, 0, ""},
    +		{"EM_MIPS_X", Const, 0, ""},
    +		{"EM_MMA", Const, 0, ""},
    +		{"EM_MMDSP_PLUS", Const, 11, ""},
    +		{"EM_MMIX", Const, 11, ""},
    +		{"EM_MN10200", Const, 11, ""},
    +		{"EM_MN10300", Const, 11, ""},
    +		{"EM_MOXIE", Const, 11, ""},
    +		{"EM_MSP430", Const, 11, ""},
    +		{"EM_NCPU", Const, 0, ""},
    +		{"EM_NDR1", Const, 0, ""},
    +		{"EM_NDS32", Const, 11, ""},
    +		{"EM_NONE", Const, 0, ""},
    +		{"EM_NORC", Const, 11, ""},
    +		{"EM_NS32K", Const, 11, ""},
    +		{"EM_OPEN8", Const, 11, ""},
    +		{"EM_OPENRISC", Const, 11, ""},
    +		{"EM_PARISC", Const, 0, ""},
    +		{"EM_PCP", Const, 0, ""},
    +		{"EM_PDP10", Const, 11, ""},
    +		{"EM_PDP11", Const, 11, ""},
    +		{"EM_PDSP", Const, 11, ""},
    +		{"EM_PJ", Const, 11, ""},
    +		{"EM_PPC", Const, 0, ""},
    +		{"EM_PPC64", Const, 0, ""},
    +		{"EM_PRISM", Const, 11, ""},
    +		{"EM_QDSP6", Const, 11, ""},
    +		{"EM_R32C", Const, 11, ""},
    +		{"EM_RCE", Const, 0, ""},
    +		{"EM_RH32", Const, 0, ""},
    +		{"EM_RISCV", Const, 11, ""},
    +		{"EM_RL78", Const, 11, ""},
    +		{"EM_RS08", Const, 11, ""},
    +		{"EM_RX", Const, 11, ""},
    +		{"EM_S370", Const, 0, ""},
    +		{"EM_S390", Const, 0, ""},
    +		{"EM_SCORE7", Const, 11, ""},
    +		{"EM_SEP", Const, 11, ""},
    +		{"EM_SE_C17", Const, 11, ""},
    +		{"EM_SE_C33", Const, 11, ""},
    +		{"EM_SH", Const, 0, ""},
    +		{"EM_SHARC", Const, 11, ""},
    +		{"EM_SLE9X", Const, 11, ""},
    +		{"EM_SNP1K", Const, 11, ""},
    +		{"EM_SPARC", Const, 0, ""},
    +		{"EM_SPARC32PLUS", Const, 0, ""},
    +		{"EM_SPARCV9", Const, 0, ""},
    +		{"EM_ST100", Const, 0, ""},
    +		{"EM_ST19", Const, 11, ""},
    +		{"EM_ST200", Const, 11, ""},
    +		{"EM_ST7", Const, 11, ""},
    +		{"EM_ST9PLUS", Const, 11, ""},
    +		{"EM_STARCORE", Const, 0, ""},
    +		{"EM_STM8", Const, 11, ""},
    +		{"EM_STXP7X", Const, 11, ""},
    +		{"EM_SVX", Const, 11, ""},
    +		{"EM_TILE64", Const, 11, ""},
    +		{"EM_TILEGX", Const, 11, ""},
    +		{"EM_TILEPRO", Const, 11, ""},
    +		{"EM_TINYJ", Const, 0, ""},
    +		{"EM_TI_ARP32", Const, 11, ""},
    +		{"EM_TI_C2000", Const, 11, ""},
    +		{"EM_TI_C5500", Const, 11, ""},
    +		{"EM_TI_C6000", Const, 11, ""},
    +		{"EM_TI_PRU", Const, 11, ""},
    +		{"EM_TMM_GPP", Const, 11, ""},
    +		{"EM_TPC", Const, 11, ""},
    +		{"EM_TRICORE", Const, 0, ""},
    +		{"EM_TRIMEDIA", Const, 11, ""},
    +		{"EM_TSK3000", Const, 11, ""},
    +		{"EM_UNICORE", Const, 11, ""},
    +		{"EM_V800", Const, 0, ""},
    +		{"EM_V850", Const, 11, ""},
    +		{"EM_VAX", Const, 11, ""},
    +		{"EM_VIDEOCORE", Const, 11, ""},
    +		{"EM_VIDEOCORE3", Const, 11, ""},
    +		{"EM_VIDEOCORE5", Const, 11, ""},
    +		{"EM_VISIUM", Const, 11, ""},
    +		{"EM_VPP500", Const, 0, ""},
    +		{"EM_X86_64", Const, 0, ""},
    +		{"EM_XCORE", Const, 11, ""},
    +		{"EM_XGATE", Const, 11, ""},
    +		{"EM_XIMO16", Const, 11, ""},
    +		{"EM_XTENSA", Const, 11, ""},
    +		{"EM_Z80", Const, 11, ""},
    +		{"EM_ZSP", Const, 11, ""},
    +		{"ET_CORE", Const, 0, ""},
    +		{"ET_DYN", Const, 0, ""},
    +		{"ET_EXEC", Const, 0, ""},
    +		{"ET_HIOS", Const, 0, ""},
    +		{"ET_HIPROC", Const, 0, ""},
    +		{"ET_LOOS", Const, 0, ""},
    +		{"ET_LOPROC", Const, 0, ""},
    +		{"ET_NONE", Const, 0, ""},
    +		{"ET_REL", Const, 0, ""},
    +		{"EV_CURRENT", Const, 0, ""},
    +		{"EV_NONE", Const, 0, ""},
    +		{"ErrNoSymbols", Var, 4, ""},
    +		{"File", Type, 0, ""},
    +		{"File.FileHeader", Field, 0, ""},
    +		{"File.Progs", Field, 0, ""},
    +		{"File.Sections", Field, 0, ""},
    +		{"FileHeader", Type, 0, ""},
    +		{"FileHeader.ABIVersion", Field, 0, ""},
    +		{"FileHeader.ByteOrder", Field, 0, ""},
    +		{"FileHeader.Class", Field, 0, ""},
    +		{"FileHeader.Data", Field, 0, ""},
    +		{"FileHeader.Entry", Field, 1, ""},
    +		{"FileHeader.Machine", Field, 0, ""},
    +		{"FileHeader.OSABI", Field, 0, ""},
    +		{"FileHeader.Type", Field, 0, ""},
    +		{"FileHeader.Version", Field, 0, ""},
    +		{"FormatError", Type, 0, ""},
    +		{"Header32", Type, 0, ""},
    +		{"Header32.Ehsize", Field, 0, ""},
    +		{"Header32.Entry", Field, 0, ""},
    +		{"Header32.Flags", Field, 0, ""},
    +		{"Header32.Ident", Field, 0, ""},
    +		{"Header32.Machine", Field, 0, ""},
    +		{"Header32.Phentsize", Field, 0, ""},
    +		{"Header32.Phnum", Field, 0, ""},
    +		{"Header32.Phoff", Field, 0, ""},
    +		{"Header32.Shentsize", Field, 0, ""},
    +		{"Header32.Shnum", Field, 0, ""},
    +		{"Header32.Shoff", Field, 0, ""},
    +		{"Header32.Shstrndx", Field, 0, ""},
    +		{"Header32.Type", Field, 0, ""},
    +		{"Header32.Version", Field, 0, ""},
    +		{"Header64", Type, 0, ""},
    +		{"Header64.Ehsize", Field, 0, ""},
    +		{"Header64.Entry", Field, 0, ""},
    +		{"Header64.Flags", Field, 0, ""},
    +		{"Header64.Ident", Field, 0, ""},
    +		{"Header64.Machine", Field, 0, ""},
    +		{"Header64.Phentsize", Field, 0, ""},
    +		{"Header64.Phnum", Field, 0, ""},
    +		{"Header64.Phoff", Field, 0, ""},
    +		{"Header64.Shentsize", Field, 0, ""},
    +		{"Header64.Shnum", Field, 0, ""},
    +		{"Header64.Shoff", Field, 0, ""},
    +		{"Header64.Shstrndx", Field, 0, ""},
    +		{"Header64.Type", Field, 0, ""},
    +		{"Header64.Version", Field, 0, ""},
    +		{"ImportedSymbol", Type, 0, ""},
    +		{"ImportedSymbol.Library", Field, 0, ""},
    +		{"ImportedSymbol.Name", Field, 0, ""},
    +		{"ImportedSymbol.Version", Field, 0, ""},
    +		{"Machine", Type, 0, ""},
    +		{"NT_FPREGSET", Const, 0, ""},
    +		{"NT_PRPSINFO", Const, 0, ""},
    +		{"NT_PRSTATUS", Const, 0, ""},
    +		{"NType", Type, 0, ""},
    +		{"NewFile", Func, 0, "func(r io.ReaderAt) (*File, error)"},
    +		{"OSABI", Type, 0, ""},
    +		{"Open", Func, 0, "func(name string) (*File, error)"},
    +		{"PF_MASKOS", Const, 0, ""},
    +		{"PF_MASKPROC", Const, 0, ""},
    +		{"PF_R", Const, 0, ""},
    +		{"PF_W", Const, 0, ""},
    +		{"PF_X", Const, 0, ""},
    +		{"PT_AARCH64_ARCHEXT", Const, 16, ""},
    +		{"PT_AARCH64_UNWIND", Const, 16, ""},
    +		{"PT_ARM_ARCHEXT", Const, 16, ""},
    +		{"PT_ARM_EXIDX", Const, 16, ""},
    +		{"PT_DYNAMIC", Const, 0, ""},
    +		{"PT_GNU_EH_FRAME", Const, 16, ""},
    +		{"PT_GNU_MBIND_HI", Const, 16, ""},
    +		{"PT_GNU_MBIND_LO", Const, 16, ""},
    +		{"PT_GNU_PROPERTY", Const, 16, ""},
    +		{"PT_GNU_RELRO", Const, 16, ""},
    +		{"PT_GNU_STACK", Const, 16, ""},
    +		{"PT_HIOS", Const, 0, ""},
    +		{"PT_HIPROC", Const, 0, ""},
    +		{"PT_INTERP", Const, 0, ""},
    +		{"PT_LOAD", Const, 0, ""},
    +		{"PT_LOOS", Const, 0, ""},
    +		{"PT_LOPROC", Const, 0, ""},
    +		{"PT_MIPS_ABIFLAGS", Const, 16, ""},
    +		{"PT_MIPS_OPTIONS", Const, 16, ""},
    +		{"PT_MIPS_REGINFO", Const, 16, ""},
    +		{"PT_MIPS_RTPROC", Const, 16, ""},
    +		{"PT_NOTE", Const, 0, ""},
    +		{"PT_NULL", Const, 0, ""},
    +		{"PT_OPENBSD_BOOTDATA", Const, 16, ""},
    +		{"PT_OPENBSD_NOBTCFI", Const, 23, ""},
    +		{"PT_OPENBSD_RANDOMIZE", Const, 16, ""},
    +		{"PT_OPENBSD_WXNEEDED", Const, 16, ""},
    +		{"PT_PAX_FLAGS", Const, 16, ""},
    +		{"PT_PHDR", Const, 0, ""},
    +		{"PT_RISCV_ATTRIBUTES", Const, 25, ""},
    +		{"PT_S390_PGSTE", Const, 16, ""},
    +		{"PT_SHLIB", Const, 0, ""},
    +		{"PT_SUNWSTACK", Const, 16, ""},
    +		{"PT_SUNW_EH_FRAME", Const, 16, ""},
    +		{"PT_TLS", Const, 0, ""},
    +		{"Prog", Type, 0, ""},
    +		{"Prog.ProgHeader", Field, 0, ""},
    +		{"Prog.ReaderAt", Field, 0, ""},
    +		{"Prog32", Type, 0, ""},
    +		{"Prog32.Align", Field, 0, ""},
    +		{"Prog32.Filesz", Field, 0, ""},
    +		{"Prog32.Flags", Field, 0, ""},
    +		{"Prog32.Memsz", Field, 0, ""},
    +		{"Prog32.Off", Field, 0, ""},
    +		{"Prog32.Paddr", Field, 0, ""},
    +		{"Prog32.Type", Field, 0, ""},
    +		{"Prog32.Vaddr", Field, 0, ""},
    +		{"Prog64", Type, 0, ""},
    +		{"Prog64.Align", Field, 0, ""},
    +		{"Prog64.Filesz", Field, 0, ""},
    +		{"Prog64.Flags", Field, 0, ""},
    +		{"Prog64.Memsz", Field, 0, ""},
    +		{"Prog64.Off", Field, 0, ""},
    +		{"Prog64.Paddr", Field, 0, ""},
    +		{"Prog64.Type", Field, 0, ""},
    +		{"Prog64.Vaddr", Field, 0, ""},
    +		{"ProgFlag", Type, 0, ""},
    +		{"ProgHeader", Type, 0, ""},
    +		{"ProgHeader.Align", Field, 0, ""},
    +		{"ProgHeader.Filesz", Field, 0, ""},
    +		{"ProgHeader.Flags", Field, 0, ""},
    +		{"ProgHeader.Memsz", Field, 0, ""},
    +		{"ProgHeader.Off", Field, 0, ""},
    +		{"ProgHeader.Paddr", Field, 0, ""},
    +		{"ProgHeader.Type", Field, 0, ""},
    +		{"ProgHeader.Vaddr", Field, 0, ""},
    +		{"ProgType", Type, 0, ""},
    +		{"R_386", Type, 0, ""},
    +		{"R_386_16", Const, 10, ""},
    +		{"R_386_32", Const, 0, ""},
    +		{"R_386_32PLT", Const, 10, ""},
    +		{"R_386_8", Const, 10, ""},
    +		{"R_386_COPY", Const, 0, ""},
    +		{"R_386_GLOB_DAT", Const, 0, ""},
    +		{"R_386_GOT32", Const, 0, ""},
    +		{"R_386_GOT32X", Const, 10, ""},
    +		{"R_386_GOTOFF", Const, 0, ""},
    +		{"R_386_GOTPC", Const, 0, ""},
    +		{"R_386_IRELATIVE", Const, 10, ""},
    +		{"R_386_JMP_SLOT", Const, 0, ""},
    +		{"R_386_NONE", Const, 0, ""},
    +		{"R_386_PC16", Const, 10, ""},
    +		{"R_386_PC32", Const, 0, ""},
    +		{"R_386_PC8", Const, 10, ""},
    +		{"R_386_PLT32", Const, 0, ""},
    +		{"R_386_RELATIVE", Const, 0, ""},
    +		{"R_386_SIZE32", Const, 10, ""},
    +		{"R_386_TLS_DESC", Const, 10, ""},
    +		{"R_386_TLS_DESC_CALL", Const, 10, ""},
    +		{"R_386_TLS_DTPMOD32", Const, 0, ""},
    +		{"R_386_TLS_DTPOFF32", Const, 0, ""},
    +		{"R_386_TLS_GD", Const, 0, ""},
    +		{"R_386_TLS_GD_32", Const, 0, ""},
    +		{"R_386_TLS_GD_CALL", Const, 0, ""},
    +		{"R_386_TLS_GD_POP", Const, 0, ""},
    +		{"R_386_TLS_GD_PUSH", Const, 0, ""},
    +		{"R_386_TLS_GOTDESC", Const, 10, ""},
    +		{"R_386_TLS_GOTIE", Const, 0, ""},
    +		{"R_386_TLS_IE", Const, 0, ""},
    +		{"R_386_TLS_IE_32", Const, 0, ""},
    +		{"R_386_TLS_LDM", Const, 0, ""},
    +		{"R_386_TLS_LDM_32", Const, 0, ""},
    +		{"R_386_TLS_LDM_CALL", Const, 0, ""},
    +		{"R_386_TLS_LDM_POP", Const, 0, ""},
    +		{"R_386_TLS_LDM_PUSH", Const, 0, ""},
    +		{"R_386_TLS_LDO_32", Const, 0, ""},
    +		{"R_386_TLS_LE", Const, 0, ""},
    +		{"R_386_TLS_LE_32", Const, 0, ""},
    +		{"R_386_TLS_TPOFF", Const, 0, ""},
    +		{"R_386_TLS_TPOFF32", Const, 0, ""},
    +		{"R_390", Type, 7, ""},
    +		{"R_390_12", Const, 7, ""},
    +		{"R_390_16", Const, 7, ""},
    +		{"R_390_20", Const, 7, ""},
    +		{"R_390_32", Const, 7, ""},
    +		{"R_390_64", Const, 7, ""},
    +		{"R_390_8", Const, 7, ""},
    +		{"R_390_COPY", Const, 7, ""},
    +		{"R_390_GLOB_DAT", Const, 7, ""},
    +		{"R_390_GOT12", Const, 7, ""},
    +		{"R_390_GOT16", Const, 7, ""},
    +		{"R_390_GOT20", Const, 7, ""},
    +		{"R_390_GOT32", Const, 7, ""},
    +		{"R_390_GOT64", Const, 7, ""},
    +		{"R_390_GOTENT", Const, 7, ""},
    +		{"R_390_GOTOFF", Const, 7, ""},
    +		{"R_390_GOTOFF16", Const, 7, ""},
    +		{"R_390_GOTOFF64", Const, 7, ""},
    +		{"R_390_GOTPC", Const, 7, ""},
    +		{"R_390_GOTPCDBL", Const, 7, ""},
    +		{"R_390_GOTPLT12", Const, 7, ""},
    +		{"R_390_GOTPLT16", Const, 7, ""},
    +		{"R_390_GOTPLT20", Const, 7, ""},
    +		{"R_390_GOTPLT32", Const, 7, ""},
    +		{"R_390_GOTPLT64", Const, 7, ""},
    +		{"R_390_GOTPLTENT", Const, 7, ""},
    +		{"R_390_GOTPLTOFF16", Const, 7, ""},
    +		{"R_390_GOTPLTOFF32", Const, 7, ""},
    +		{"R_390_GOTPLTOFF64", Const, 7, ""},
    +		{"R_390_JMP_SLOT", Const, 7, ""},
    +		{"R_390_NONE", Const, 7, ""},
    +		{"R_390_PC16", Const, 7, ""},
    +		{"R_390_PC16DBL", Const, 7, ""},
    +		{"R_390_PC32", Const, 7, ""},
    +		{"R_390_PC32DBL", Const, 7, ""},
    +		{"R_390_PC64", Const, 7, ""},
    +		{"R_390_PLT16DBL", Const, 7, ""},
    +		{"R_390_PLT32", Const, 7, ""},
    +		{"R_390_PLT32DBL", Const, 7, ""},
    +		{"R_390_PLT64", Const, 7, ""},
    +		{"R_390_RELATIVE", Const, 7, ""},
    +		{"R_390_TLS_DTPMOD", Const, 7, ""},
    +		{"R_390_TLS_DTPOFF", Const, 7, ""},
    +		{"R_390_TLS_GD32", Const, 7, ""},
    +		{"R_390_TLS_GD64", Const, 7, ""},
    +		{"R_390_TLS_GDCALL", Const, 7, ""},
    +		{"R_390_TLS_GOTIE12", Const, 7, ""},
    +		{"R_390_TLS_GOTIE20", Const, 7, ""},
    +		{"R_390_TLS_GOTIE32", Const, 7, ""},
    +		{"R_390_TLS_GOTIE64", Const, 7, ""},
    +		{"R_390_TLS_IE32", Const, 7, ""},
    +		{"R_390_TLS_IE64", Const, 7, ""},
    +		{"R_390_TLS_IEENT", Const, 7, ""},
    +		{"R_390_TLS_LDCALL", Const, 7, ""},
    +		{"R_390_TLS_LDM32", Const, 7, ""},
    +		{"R_390_TLS_LDM64", Const, 7, ""},
    +		{"R_390_TLS_LDO32", Const, 7, ""},
    +		{"R_390_TLS_LDO64", Const, 7, ""},
    +		{"R_390_TLS_LE32", Const, 7, ""},
    +		{"R_390_TLS_LE64", Const, 7, ""},
    +		{"R_390_TLS_LOAD", Const, 7, ""},
    +		{"R_390_TLS_TPOFF", Const, 7, ""},
    +		{"R_AARCH64", Type, 4, ""},
    +		{"R_AARCH64_ABS16", Const, 4, ""},
    +		{"R_AARCH64_ABS32", Const, 4, ""},
    +		{"R_AARCH64_ABS64", Const, 4, ""},
    +		{"R_AARCH64_ADD_ABS_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_ADR_GOT_PAGE", Const, 4, ""},
    +		{"R_AARCH64_ADR_PREL_LO21", Const, 4, ""},
    +		{"R_AARCH64_ADR_PREL_PG_HI21", Const, 4, ""},
    +		{"R_AARCH64_ADR_PREL_PG_HI21_NC", Const, 4, ""},
    +		{"R_AARCH64_CALL26", Const, 4, ""},
    +		{"R_AARCH64_CONDBR19", Const, 4, ""},
    +		{"R_AARCH64_COPY", Const, 4, ""},
    +		{"R_AARCH64_GLOB_DAT", Const, 4, ""},
    +		{"R_AARCH64_GOT_LD_PREL19", Const, 4, ""},
    +		{"R_AARCH64_IRELATIVE", Const, 4, ""},
    +		{"R_AARCH64_JUMP26", Const, 4, ""},
    +		{"R_AARCH64_JUMP_SLOT", Const, 4, ""},
    +		{"R_AARCH64_LD64_GOTOFF_LO15", Const, 10, ""},
    +		{"R_AARCH64_LD64_GOTPAGE_LO15", Const, 10, ""},
    +		{"R_AARCH64_LD64_GOT_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_LDST128_ABS_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_LDST16_ABS_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_LDST32_ABS_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_LDST64_ABS_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_LDST8_ABS_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_LD_PREL_LO19", Const, 4, ""},
    +		{"R_AARCH64_MOVW_SABS_G0", Const, 4, ""},
    +		{"R_AARCH64_MOVW_SABS_G1", Const, 4, ""},
    +		{"R_AARCH64_MOVW_SABS_G2", Const, 4, ""},
    +		{"R_AARCH64_MOVW_UABS_G0", Const, 4, ""},
    +		{"R_AARCH64_MOVW_UABS_G0_NC", Const, 4, ""},
    +		{"R_AARCH64_MOVW_UABS_G1", Const, 4, ""},
    +		{"R_AARCH64_MOVW_UABS_G1_NC", Const, 4, ""},
    +		{"R_AARCH64_MOVW_UABS_G2", Const, 4, ""},
    +		{"R_AARCH64_MOVW_UABS_G2_NC", Const, 4, ""},
    +		{"R_AARCH64_MOVW_UABS_G3", Const, 4, ""},
    +		{"R_AARCH64_NONE", Const, 4, ""},
    +		{"R_AARCH64_NULL", Const, 4, ""},
    +		{"R_AARCH64_P32_ABS16", Const, 4, ""},
    +		{"R_AARCH64_P32_ABS32", Const, 4, ""},
    +		{"R_AARCH64_P32_ADD_ABS_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_P32_ADR_GOT_PAGE", Const, 4, ""},
    +		{"R_AARCH64_P32_ADR_PREL_LO21", Const, 4, ""},
    +		{"R_AARCH64_P32_ADR_PREL_PG_HI21", Const, 4, ""},
    +		{"R_AARCH64_P32_CALL26", Const, 4, ""},
    +		{"R_AARCH64_P32_CONDBR19", Const, 4, ""},
    +		{"R_AARCH64_P32_COPY", Const, 4, ""},
    +		{"R_AARCH64_P32_GLOB_DAT", Const, 4, ""},
    +		{"R_AARCH64_P32_GOT_LD_PREL19", Const, 4, ""},
    +		{"R_AARCH64_P32_IRELATIVE", Const, 4, ""},
    +		{"R_AARCH64_P32_JUMP26", Const, 4, ""},
    +		{"R_AARCH64_P32_JUMP_SLOT", Const, 4, ""},
    +		{"R_AARCH64_P32_LD32_GOT_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_P32_LDST128_ABS_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_P32_LDST16_ABS_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_P32_LDST32_ABS_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_P32_LDST64_ABS_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_P32_LDST8_ABS_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_P32_LD_PREL_LO19", Const, 4, ""},
    +		{"R_AARCH64_P32_MOVW_SABS_G0", Const, 4, ""},
    +		{"R_AARCH64_P32_MOVW_UABS_G0", Const, 4, ""},
    +		{"R_AARCH64_P32_MOVW_UABS_G0_NC", Const, 4, ""},
    +		{"R_AARCH64_P32_MOVW_UABS_G1", Const, 4, ""},
    +		{"R_AARCH64_P32_PREL16", Const, 4, ""},
    +		{"R_AARCH64_P32_PREL32", Const, 4, ""},
    +		{"R_AARCH64_P32_RELATIVE", Const, 4, ""},
    +		{"R_AARCH64_P32_TLSDESC", Const, 4, ""},
    +		{"R_AARCH64_P32_TLSDESC_ADD_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_P32_TLSDESC_ADR_PAGE21", Const, 4, ""},
    +		{"R_AARCH64_P32_TLSDESC_ADR_PREL21", Const, 4, ""},
    +		{"R_AARCH64_P32_TLSDESC_CALL", Const, 4, ""},
    +		{"R_AARCH64_P32_TLSDESC_LD32_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_P32_TLSDESC_LD_PREL19", Const, 4, ""},
    +		{"R_AARCH64_P32_TLSGD_ADD_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_P32_TLSGD_ADR_PAGE21", Const, 4, ""},
    +		{"R_AARCH64_P32_TLSIE_ADR_GOTTPREL_PAGE21", Const, 4, ""},
    +		{"R_AARCH64_P32_TLSIE_LD32_GOTTPREL_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_P32_TLSIE_LD_GOTTPREL_PREL19", Const, 4, ""},
    +		{"R_AARCH64_P32_TLSLE_ADD_TPREL_HI12", Const, 4, ""},
    +		{"R_AARCH64_P32_TLSLE_ADD_TPREL_LO12", Const, 4, ""},
    +		{"R_AARCH64_P32_TLSLE_ADD_TPREL_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_P32_TLSLE_MOVW_TPREL_G0", Const, 4, ""},
    +		{"R_AARCH64_P32_TLSLE_MOVW_TPREL_G0_NC", Const, 4, ""},
    +		{"R_AARCH64_P32_TLSLE_MOVW_TPREL_G1", Const, 4, ""},
    +		{"R_AARCH64_P32_TLS_DTPMOD", Const, 4, ""},
    +		{"R_AARCH64_P32_TLS_DTPREL", Const, 4, ""},
    +		{"R_AARCH64_P32_TLS_TPREL", Const, 4, ""},
    +		{"R_AARCH64_P32_TSTBR14", Const, 4, ""},
    +		{"R_AARCH64_PREL16", Const, 4, ""},
    +		{"R_AARCH64_PREL32", Const, 4, ""},
    +		{"R_AARCH64_PREL64", Const, 4, ""},
    +		{"R_AARCH64_RELATIVE", Const, 4, ""},
    +		{"R_AARCH64_TLSDESC", Const, 4, ""},
    +		{"R_AARCH64_TLSDESC_ADD", Const, 4, ""},
    +		{"R_AARCH64_TLSDESC_ADD_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_TLSDESC_ADR_PAGE21", Const, 4, ""},
    +		{"R_AARCH64_TLSDESC_ADR_PREL21", Const, 4, ""},
    +		{"R_AARCH64_TLSDESC_CALL", Const, 4, ""},
    +		{"R_AARCH64_TLSDESC_LD64_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_TLSDESC_LDR", Const, 4, ""},
    +		{"R_AARCH64_TLSDESC_LD_PREL19", Const, 4, ""},
    +		{"R_AARCH64_TLSDESC_OFF_G0_NC", Const, 4, ""},
    +		{"R_AARCH64_TLSDESC_OFF_G1", Const, 4, ""},
    +		{"R_AARCH64_TLSGD_ADD_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_TLSGD_ADR_PAGE21", Const, 4, ""},
    +		{"R_AARCH64_TLSGD_ADR_PREL21", Const, 10, ""},
    +		{"R_AARCH64_TLSGD_MOVW_G0_NC", Const, 10, ""},
    +		{"R_AARCH64_TLSGD_MOVW_G1", Const, 10, ""},
    +		{"R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21", Const, 4, ""},
    +		{"R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_TLSIE_LD_GOTTPREL_PREL19", Const, 4, ""},
    +		{"R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC", Const, 4, ""},
    +		{"R_AARCH64_TLSIE_MOVW_GOTTPREL_G1", Const, 4, ""},
    +		{"R_AARCH64_TLSLD_ADR_PAGE21", Const, 10, ""},
    +		{"R_AARCH64_TLSLD_ADR_PREL21", Const, 10, ""},
    +		{"R_AARCH64_TLSLD_LDST128_DTPREL_LO12", Const, 10, ""},
    +		{"R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC", Const, 10, ""},
    +		{"R_AARCH64_TLSLE_ADD_TPREL_HI12", Const, 4, ""},
    +		{"R_AARCH64_TLSLE_ADD_TPREL_LO12", Const, 4, ""},
    +		{"R_AARCH64_TLSLE_ADD_TPREL_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_TLSLE_LDST128_TPREL_LO12", Const, 10, ""},
    +		{"R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC", Const, 10, ""},
    +		{"R_AARCH64_TLSLE_MOVW_TPREL_G0", Const, 4, ""},
    +		{"R_AARCH64_TLSLE_MOVW_TPREL_G0_NC", Const, 4, ""},
    +		{"R_AARCH64_TLSLE_MOVW_TPREL_G1", Const, 4, ""},
    +		{"R_AARCH64_TLSLE_MOVW_TPREL_G1_NC", Const, 4, ""},
    +		{"R_AARCH64_TLSLE_MOVW_TPREL_G2", Const, 4, ""},
    +		{"R_AARCH64_TLS_DTPMOD64", Const, 4, ""},
    +		{"R_AARCH64_TLS_DTPREL64", Const, 4, ""},
    +		{"R_AARCH64_TLS_TPREL64", Const, 4, ""},
    +		{"R_AARCH64_TSTBR14", Const, 4, ""},
    +		{"R_ALPHA", Type, 0, ""},
    +		{"R_ALPHA_BRADDR", Const, 0, ""},
    +		{"R_ALPHA_COPY", Const, 0, ""},
    +		{"R_ALPHA_GLOB_DAT", Const, 0, ""},
    +		{"R_ALPHA_GPDISP", Const, 0, ""},
    +		{"R_ALPHA_GPREL32", Const, 0, ""},
    +		{"R_ALPHA_GPRELHIGH", Const, 0, ""},
    +		{"R_ALPHA_GPRELLOW", Const, 0, ""},
    +		{"R_ALPHA_GPVALUE", Const, 0, ""},
    +		{"R_ALPHA_HINT", Const, 0, ""},
    +		{"R_ALPHA_IMMED_BR_HI32", Const, 0, ""},
    +		{"R_ALPHA_IMMED_GP_16", Const, 0, ""},
    +		{"R_ALPHA_IMMED_GP_HI32", Const, 0, ""},
    +		{"R_ALPHA_IMMED_LO32", Const, 0, ""},
    +		{"R_ALPHA_IMMED_SCN_HI32", Const, 0, ""},
    +		{"R_ALPHA_JMP_SLOT", Const, 0, ""},
    +		{"R_ALPHA_LITERAL", Const, 0, ""},
    +		{"R_ALPHA_LITUSE", Const, 0, ""},
    +		{"R_ALPHA_NONE", Const, 0, ""},
    +		{"R_ALPHA_OP_PRSHIFT", Const, 0, ""},
    +		{"R_ALPHA_OP_PSUB", Const, 0, ""},
    +		{"R_ALPHA_OP_PUSH", Const, 0, ""},
    +		{"R_ALPHA_OP_STORE", Const, 0, ""},
    +		{"R_ALPHA_REFLONG", Const, 0, ""},
    +		{"R_ALPHA_REFQUAD", Const, 0, ""},
    +		{"R_ALPHA_RELATIVE", Const, 0, ""},
    +		{"R_ALPHA_SREL16", Const, 0, ""},
    +		{"R_ALPHA_SREL32", Const, 0, ""},
    +		{"R_ALPHA_SREL64", Const, 0, ""},
    +		{"R_ARM", Type, 0, ""},
    +		{"R_ARM_ABS12", Const, 0, ""},
    +		{"R_ARM_ABS16", Const, 0, ""},
    +		{"R_ARM_ABS32", Const, 0, ""},
    +		{"R_ARM_ABS32_NOI", Const, 10, ""},
    +		{"R_ARM_ABS8", Const, 0, ""},
    +		{"R_ARM_ALU_PCREL_15_8", Const, 10, ""},
    +		{"R_ARM_ALU_PCREL_23_15", Const, 10, ""},
    +		{"R_ARM_ALU_PCREL_7_0", Const, 10, ""},
    +		{"R_ARM_ALU_PC_G0", Const, 10, ""},
    +		{"R_ARM_ALU_PC_G0_NC", Const, 10, ""},
    +		{"R_ARM_ALU_PC_G1", Const, 10, ""},
    +		{"R_ARM_ALU_PC_G1_NC", Const, 10, ""},
    +		{"R_ARM_ALU_PC_G2", Const, 10, ""},
    +		{"R_ARM_ALU_SBREL_19_12_NC", Const, 10, ""},
    +		{"R_ARM_ALU_SBREL_27_20_CK", Const, 10, ""},
    +		{"R_ARM_ALU_SB_G0", Const, 10, ""},
    +		{"R_ARM_ALU_SB_G0_NC", Const, 10, ""},
    +		{"R_ARM_ALU_SB_G1", Const, 10, ""},
    +		{"R_ARM_ALU_SB_G1_NC", Const, 10, ""},
    +		{"R_ARM_ALU_SB_G2", Const, 10, ""},
    +		{"R_ARM_AMP_VCALL9", Const, 0, ""},
    +		{"R_ARM_BASE_ABS", Const, 10, ""},
    +		{"R_ARM_CALL", Const, 10, ""},
    +		{"R_ARM_COPY", Const, 0, ""},
    +		{"R_ARM_GLOB_DAT", Const, 0, ""},
    +		{"R_ARM_GNU_VTENTRY", Const, 0, ""},
    +		{"R_ARM_GNU_VTINHERIT", Const, 0, ""},
    +		{"R_ARM_GOT32", Const, 0, ""},
    +		{"R_ARM_GOTOFF", Const, 0, ""},
    +		{"R_ARM_GOTOFF12", Const, 10, ""},
    +		{"R_ARM_GOTPC", Const, 0, ""},
    +		{"R_ARM_GOTRELAX", Const, 10, ""},
    +		{"R_ARM_GOT_ABS", Const, 10, ""},
    +		{"R_ARM_GOT_BREL12", Const, 10, ""},
    +		{"R_ARM_GOT_PREL", Const, 10, ""},
    +		{"R_ARM_IRELATIVE", Const, 10, ""},
    +		{"R_ARM_JUMP24", Const, 10, ""},
    +		{"R_ARM_JUMP_SLOT", Const, 0, ""},
    +		{"R_ARM_LDC_PC_G0", Const, 10, ""},
    +		{"R_ARM_LDC_PC_G1", Const, 10, ""},
    +		{"R_ARM_LDC_PC_G2", Const, 10, ""},
    +		{"R_ARM_LDC_SB_G0", Const, 10, ""},
    +		{"R_ARM_LDC_SB_G1", Const, 10, ""},
    +		{"R_ARM_LDC_SB_G2", Const, 10, ""},
    +		{"R_ARM_LDRS_PC_G0", Const, 10, ""},
    +		{"R_ARM_LDRS_PC_G1", Const, 10, ""},
    +		{"R_ARM_LDRS_PC_G2", Const, 10, ""},
    +		{"R_ARM_LDRS_SB_G0", Const, 10, ""},
    +		{"R_ARM_LDRS_SB_G1", Const, 10, ""},
    +		{"R_ARM_LDRS_SB_G2", Const, 10, ""},
    +		{"R_ARM_LDR_PC_G1", Const, 10, ""},
    +		{"R_ARM_LDR_PC_G2", Const, 10, ""},
    +		{"R_ARM_LDR_SBREL_11_10_NC", Const, 10, ""},
    +		{"R_ARM_LDR_SB_G0", Const, 10, ""},
    +		{"R_ARM_LDR_SB_G1", Const, 10, ""},
    +		{"R_ARM_LDR_SB_G2", Const, 10, ""},
    +		{"R_ARM_ME_TOO", Const, 10, ""},
    +		{"R_ARM_MOVT_ABS", Const, 10, ""},
    +		{"R_ARM_MOVT_BREL", Const, 10, ""},
    +		{"R_ARM_MOVT_PREL", Const, 10, ""},
    +		{"R_ARM_MOVW_ABS_NC", Const, 10, ""},
    +		{"R_ARM_MOVW_BREL", Const, 10, ""},
    +		{"R_ARM_MOVW_BREL_NC", Const, 10, ""},
    +		{"R_ARM_MOVW_PREL_NC", Const, 10, ""},
    +		{"R_ARM_NONE", Const, 0, ""},
    +		{"R_ARM_PC13", Const, 0, ""},
    +		{"R_ARM_PC24", Const, 0, ""},
    +		{"R_ARM_PLT32", Const, 0, ""},
    +		{"R_ARM_PLT32_ABS", Const, 10, ""},
    +		{"R_ARM_PREL31", Const, 10, ""},
    +		{"R_ARM_PRIVATE_0", Const, 10, ""},
    +		{"R_ARM_PRIVATE_1", Const, 10, ""},
    +		{"R_ARM_PRIVATE_10", Const, 10, ""},
    +		{"R_ARM_PRIVATE_11", Const, 10, ""},
    +		{"R_ARM_PRIVATE_12", Const, 10, ""},
    +		{"R_ARM_PRIVATE_13", Const, 10, ""},
    +		{"R_ARM_PRIVATE_14", Const, 10, ""},
    +		{"R_ARM_PRIVATE_15", Const, 10, ""},
    +		{"R_ARM_PRIVATE_2", Const, 10, ""},
    +		{"R_ARM_PRIVATE_3", Const, 10, ""},
    +		{"R_ARM_PRIVATE_4", Const, 10, ""},
    +		{"R_ARM_PRIVATE_5", Const, 10, ""},
    +		{"R_ARM_PRIVATE_6", Const, 10, ""},
    +		{"R_ARM_PRIVATE_7", Const, 10, ""},
    +		{"R_ARM_PRIVATE_8", Const, 10, ""},
    +		{"R_ARM_PRIVATE_9", Const, 10, ""},
    +		{"R_ARM_RABS32", Const, 0, ""},
    +		{"R_ARM_RBASE", Const, 0, ""},
    +		{"R_ARM_REL32", Const, 0, ""},
    +		{"R_ARM_REL32_NOI", Const, 10, ""},
    +		{"R_ARM_RELATIVE", Const, 0, ""},
    +		{"R_ARM_RPC24", Const, 0, ""},
    +		{"R_ARM_RREL32", Const, 0, ""},
    +		{"R_ARM_RSBREL32", Const, 0, ""},
    +		{"R_ARM_RXPC25", Const, 10, ""},
    +		{"R_ARM_SBREL31", Const, 10, ""},
    +		{"R_ARM_SBREL32", Const, 0, ""},
    +		{"R_ARM_SWI24", Const, 0, ""},
    +		{"R_ARM_TARGET1", Const, 10, ""},
    +		{"R_ARM_TARGET2", Const, 10, ""},
    +		{"R_ARM_THM_ABS5", Const, 0, ""},
    +		{"R_ARM_THM_ALU_ABS_G0_NC", Const, 10, ""},
    +		{"R_ARM_THM_ALU_ABS_G1_NC", Const, 10, ""},
    +		{"R_ARM_THM_ALU_ABS_G2_NC", Const, 10, ""},
    +		{"R_ARM_THM_ALU_ABS_G3", Const, 10, ""},
    +		{"R_ARM_THM_ALU_PREL_11_0", Const, 10, ""},
    +		{"R_ARM_THM_GOT_BREL12", Const, 10, ""},
    +		{"R_ARM_THM_JUMP11", Const, 10, ""},
    +		{"R_ARM_THM_JUMP19", Const, 10, ""},
    +		{"R_ARM_THM_JUMP24", Const, 10, ""},
    +		{"R_ARM_THM_JUMP6", Const, 10, ""},
    +		{"R_ARM_THM_JUMP8", Const, 10, ""},
    +		{"R_ARM_THM_MOVT_ABS", Const, 10, ""},
    +		{"R_ARM_THM_MOVT_BREL", Const, 10, ""},
    +		{"R_ARM_THM_MOVT_PREL", Const, 10, ""},
    +		{"R_ARM_THM_MOVW_ABS_NC", Const, 10, ""},
    +		{"R_ARM_THM_MOVW_BREL", Const, 10, ""},
    +		{"R_ARM_THM_MOVW_BREL_NC", Const, 10, ""},
    +		{"R_ARM_THM_MOVW_PREL_NC", Const, 10, ""},
    +		{"R_ARM_THM_PC12", Const, 10, ""},
    +		{"R_ARM_THM_PC22", Const, 0, ""},
    +		{"R_ARM_THM_PC8", Const, 0, ""},
    +		{"R_ARM_THM_RPC22", Const, 0, ""},
    +		{"R_ARM_THM_SWI8", Const, 0, ""},
    +		{"R_ARM_THM_TLS_CALL", Const, 10, ""},
    +		{"R_ARM_THM_TLS_DESCSEQ16", Const, 10, ""},
    +		{"R_ARM_THM_TLS_DESCSEQ32", Const, 10, ""},
    +		{"R_ARM_THM_XPC22", Const, 0, ""},
    +		{"R_ARM_TLS_CALL", Const, 10, ""},
    +		{"R_ARM_TLS_DESCSEQ", Const, 10, ""},
    +		{"R_ARM_TLS_DTPMOD32", Const, 10, ""},
    +		{"R_ARM_TLS_DTPOFF32", Const, 10, ""},
    +		{"R_ARM_TLS_GD32", Const, 10, ""},
    +		{"R_ARM_TLS_GOTDESC", Const, 10, ""},
    +		{"R_ARM_TLS_IE12GP", Const, 10, ""},
    +		{"R_ARM_TLS_IE32", Const, 10, ""},
    +		{"R_ARM_TLS_LDM32", Const, 10, ""},
    +		{"R_ARM_TLS_LDO12", Const, 10, ""},
    +		{"R_ARM_TLS_LDO32", Const, 10, ""},
    +		{"R_ARM_TLS_LE12", Const, 10, ""},
    +		{"R_ARM_TLS_LE32", Const, 10, ""},
    +		{"R_ARM_TLS_TPOFF32", Const, 10, ""},
    +		{"R_ARM_V4BX", Const, 10, ""},
    +		{"R_ARM_XPC25", Const, 0, ""},
    +		{"R_INFO", Func, 0, "func(sym uint32, typ uint32) uint64"},
    +		{"R_INFO32", Func, 0, "func(sym uint32, typ uint32) uint32"},
    +		{"R_LARCH", Type, 19, ""},
    +		{"R_LARCH_32", Const, 19, ""},
    +		{"R_LARCH_32_PCREL", Const, 20, ""},
    +		{"R_LARCH_64", Const, 19, ""},
    +		{"R_LARCH_64_PCREL", Const, 22, ""},
    +		{"R_LARCH_ABS64_HI12", Const, 20, ""},
    +		{"R_LARCH_ABS64_LO20", Const, 20, ""},
    +		{"R_LARCH_ABS_HI20", Const, 20, ""},
    +		{"R_LARCH_ABS_LO12", Const, 20, ""},
    +		{"R_LARCH_ADD16", Const, 19, ""},
    +		{"R_LARCH_ADD24", Const, 19, ""},
    +		{"R_LARCH_ADD32", Const, 19, ""},
    +		{"R_LARCH_ADD6", Const, 22, ""},
    +		{"R_LARCH_ADD64", Const, 19, ""},
    +		{"R_LARCH_ADD8", Const, 19, ""},
    +		{"R_LARCH_ADD_ULEB128", Const, 22, ""},
    +		{"R_LARCH_ALIGN", Const, 22, ""},
    +		{"R_LARCH_B16", Const, 20, ""},
    +		{"R_LARCH_B21", Const, 20, ""},
    +		{"R_LARCH_B26", Const, 20, ""},
    +		{"R_LARCH_CFA", Const, 22, ""},
    +		{"R_LARCH_COPY", Const, 19, ""},
    +		{"R_LARCH_DELETE", Const, 22, ""},
    +		{"R_LARCH_GNU_VTENTRY", Const, 20, ""},
    +		{"R_LARCH_GNU_VTINHERIT", Const, 20, ""},
    +		{"R_LARCH_GOT64_HI12", Const, 20, ""},
    +		{"R_LARCH_GOT64_LO20", Const, 20, ""},
    +		{"R_LARCH_GOT64_PC_HI12", Const, 20, ""},
    +		{"R_LARCH_GOT64_PC_LO20", Const, 20, ""},
    +		{"R_LARCH_GOT_HI20", Const, 20, ""},
    +		{"R_LARCH_GOT_LO12", Const, 20, ""},
    +		{"R_LARCH_GOT_PC_HI20", Const, 20, ""},
    +		{"R_LARCH_GOT_PC_LO12", Const, 20, ""},
    +		{"R_LARCH_IRELATIVE", Const, 19, ""},
    +		{"R_LARCH_JUMP_SLOT", Const, 19, ""},
    +		{"R_LARCH_MARK_LA", Const, 19, ""},
    +		{"R_LARCH_MARK_PCREL", Const, 19, ""},
    +		{"R_LARCH_NONE", Const, 19, ""},
    +		{"R_LARCH_PCALA64_HI12", Const, 20, ""},
    +		{"R_LARCH_PCALA64_LO20", Const, 20, ""},
    +		{"R_LARCH_PCALA_HI20", Const, 20, ""},
    +		{"R_LARCH_PCALA_LO12", Const, 20, ""},
    +		{"R_LARCH_PCREL20_S2", Const, 22, ""},
    +		{"R_LARCH_RELATIVE", Const, 19, ""},
    +		{"R_LARCH_RELAX", Const, 20, ""},
    +		{"R_LARCH_SOP_ADD", Const, 19, ""},
    +		{"R_LARCH_SOP_AND", Const, 19, ""},
    +		{"R_LARCH_SOP_ASSERT", Const, 19, ""},
    +		{"R_LARCH_SOP_IF_ELSE", Const, 19, ""},
    +		{"R_LARCH_SOP_NOT", Const, 19, ""},
    +		{"R_LARCH_SOP_POP_32_S_0_10_10_16_S2", Const, 19, ""},
    +		{"R_LARCH_SOP_POP_32_S_0_5_10_16_S2", Const, 19, ""},
    +		{"R_LARCH_SOP_POP_32_S_10_12", Const, 19, ""},
    +		{"R_LARCH_SOP_POP_32_S_10_16", Const, 19, ""},
    +		{"R_LARCH_SOP_POP_32_S_10_16_S2", Const, 19, ""},
    +		{"R_LARCH_SOP_POP_32_S_10_5", Const, 19, ""},
    +		{"R_LARCH_SOP_POP_32_S_5_20", Const, 19, ""},
    +		{"R_LARCH_SOP_POP_32_U", Const, 19, ""},
    +		{"R_LARCH_SOP_POP_32_U_10_12", Const, 19, ""},
    +		{"R_LARCH_SOP_PUSH_ABSOLUTE", Const, 19, ""},
    +		{"R_LARCH_SOP_PUSH_DUP", Const, 19, ""},
    +		{"R_LARCH_SOP_PUSH_GPREL", Const, 19, ""},
    +		{"R_LARCH_SOP_PUSH_PCREL", Const, 19, ""},
    +		{"R_LARCH_SOP_PUSH_PLT_PCREL", Const, 19, ""},
    +		{"R_LARCH_SOP_PUSH_TLS_GD", Const, 19, ""},
    +		{"R_LARCH_SOP_PUSH_TLS_GOT", Const, 19, ""},
    +		{"R_LARCH_SOP_PUSH_TLS_TPREL", Const, 19, ""},
    +		{"R_LARCH_SOP_SL", Const, 19, ""},
    +		{"R_LARCH_SOP_SR", Const, 19, ""},
    +		{"R_LARCH_SOP_SUB", Const, 19, ""},
    +		{"R_LARCH_SUB16", Const, 19, ""},
    +		{"R_LARCH_SUB24", Const, 19, ""},
    +		{"R_LARCH_SUB32", Const, 19, ""},
    +		{"R_LARCH_SUB6", Const, 22, ""},
    +		{"R_LARCH_SUB64", Const, 19, ""},
    +		{"R_LARCH_SUB8", Const, 19, ""},
    +		{"R_LARCH_SUB_ULEB128", Const, 22, ""},
    +		{"R_LARCH_TLS_DTPMOD32", Const, 19, ""},
    +		{"R_LARCH_TLS_DTPMOD64", Const, 19, ""},
    +		{"R_LARCH_TLS_DTPREL32", Const, 19, ""},
    +		{"R_LARCH_TLS_DTPREL64", Const, 19, ""},
    +		{"R_LARCH_TLS_GD_HI20", Const, 20, ""},
    +		{"R_LARCH_TLS_GD_PC_HI20", Const, 20, ""},
    +		{"R_LARCH_TLS_IE64_HI12", Const, 20, ""},
    +		{"R_LARCH_TLS_IE64_LO20", Const, 20, ""},
    +		{"R_LARCH_TLS_IE64_PC_HI12", Const, 20, ""},
    +		{"R_LARCH_TLS_IE64_PC_LO20", Const, 20, ""},
    +		{"R_LARCH_TLS_IE_HI20", Const, 20, ""},
    +		{"R_LARCH_TLS_IE_LO12", Const, 20, ""},
    +		{"R_LARCH_TLS_IE_PC_HI20", Const, 20, ""},
    +		{"R_LARCH_TLS_IE_PC_LO12", Const, 20, ""},
    +		{"R_LARCH_TLS_LD_HI20", Const, 20, ""},
    +		{"R_LARCH_TLS_LD_PC_HI20", Const, 20, ""},
    +		{"R_LARCH_TLS_LE64_HI12", Const, 20, ""},
    +		{"R_LARCH_TLS_LE64_LO20", Const, 20, ""},
    +		{"R_LARCH_TLS_LE_HI20", Const, 20, ""},
    +		{"R_LARCH_TLS_LE_LO12", Const, 20, ""},
    +		{"R_LARCH_TLS_TPREL32", Const, 19, ""},
    +		{"R_LARCH_TLS_TPREL64", Const, 19, ""},
    +		{"R_MIPS", Type, 6, ""},
    +		{"R_MIPS_16", Const, 6, ""},
    +		{"R_MIPS_26", Const, 6, ""},
    +		{"R_MIPS_32", Const, 6, ""},
    +		{"R_MIPS_64", Const, 6, ""},
    +		{"R_MIPS_ADD_IMMEDIATE", Const, 6, ""},
    +		{"R_MIPS_CALL16", Const, 6, ""},
    +		{"R_MIPS_CALL_HI16", Const, 6, ""},
    +		{"R_MIPS_CALL_LO16", Const, 6, ""},
    +		{"R_MIPS_DELETE", Const, 6, ""},
    +		{"R_MIPS_GOT16", Const, 6, ""},
    +		{"R_MIPS_GOT_DISP", Const, 6, ""},
    +		{"R_MIPS_GOT_HI16", Const, 6, ""},
    +		{"R_MIPS_GOT_LO16", Const, 6, ""},
    +		{"R_MIPS_GOT_OFST", Const, 6, ""},
    +		{"R_MIPS_GOT_PAGE", Const, 6, ""},
    +		{"R_MIPS_GPREL16", Const, 6, ""},
    +		{"R_MIPS_GPREL32", Const, 6, ""},
    +		{"R_MIPS_HI16", Const, 6, ""},
    +		{"R_MIPS_HIGHER", Const, 6, ""},
    +		{"R_MIPS_HIGHEST", Const, 6, ""},
    +		{"R_MIPS_INSERT_A", Const, 6, ""},
    +		{"R_MIPS_INSERT_B", Const, 6, ""},
    +		{"R_MIPS_JALR", Const, 6, ""},
    +		{"R_MIPS_LITERAL", Const, 6, ""},
    +		{"R_MIPS_LO16", Const, 6, ""},
    +		{"R_MIPS_NONE", Const, 6, ""},
    +		{"R_MIPS_PC16", Const, 6, ""},
    +		{"R_MIPS_PC32", Const, 22, ""},
    +		{"R_MIPS_PJUMP", Const, 6, ""},
    +		{"R_MIPS_REL16", Const, 6, ""},
    +		{"R_MIPS_REL32", Const, 6, ""},
    +		{"R_MIPS_RELGOT", Const, 6, ""},
    +		{"R_MIPS_SCN_DISP", Const, 6, ""},
    +		{"R_MIPS_SHIFT5", Const, 6, ""},
    +		{"R_MIPS_SHIFT6", Const, 6, ""},
    +		{"R_MIPS_SUB", Const, 6, ""},
    +		{"R_MIPS_TLS_DTPMOD32", Const, 6, ""},
    +		{"R_MIPS_TLS_DTPMOD64", Const, 6, ""},
    +		{"R_MIPS_TLS_DTPREL32", Const, 6, ""},
    +		{"R_MIPS_TLS_DTPREL64", Const, 6, ""},
    +		{"R_MIPS_TLS_DTPREL_HI16", Const, 6, ""},
    +		{"R_MIPS_TLS_DTPREL_LO16", Const, 6, ""},
    +		{"R_MIPS_TLS_GD", Const, 6, ""},
    +		{"R_MIPS_TLS_GOTTPREL", Const, 6, ""},
    +		{"R_MIPS_TLS_LDM", Const, 6, ""},
    +		{"R_MIPS_TLS_TPREL32", Const, 6, ""},
    +		{"R_MIPS_TLS_TPREL64", Const, 6, ""},
    +		{"R_MIPS_TLS_TPREL_HI16", Const, 6, ""},
    +		{"R_MIPS_TLS_TPREL_LO16", Const, 6, ""},
    +		{"R_PPC", Type, 0, ""},
    +		{"R_PPC64", Type, 5, ""},
    +		{"R_PPC64_ADDR14", Const, 5, ""},
    +		{"R_PPC64_ADDR14_BRNTAKEN", Const, 5, ""},
    +		{"R_PPC64_ADDR14_BRTAKEN", Const, 5, ""},
    +		{"R_PPC64_ADDR16", Const, 5, ""},
    +		{"R_PPC64_ADDR16_DS", Const, 5, ""},
    +		{"R_PPC64_ADDR16_HA", Const, 5, ""},
    +		{"R_PPC64_ADDR16_HI", Const, 5, ""},
    +		{"R_PPC64_ADDR16_HIGH", Const, 10, ""},
    +		{"R_PPC64_ADDR16_HIGHA", Const, 10, ""},
    +		{"R_PPC64_ADDR16_HIGHER", Const, 5, ""},
    +		{"R_PPC64_ADDR16_HIGHER34", Const, 20, ""},
    +		{"R_PPC64_ADDR16_HIGHERA", Const, 5, ""},
    +		{"R_PPC64_ADDR16_HIGHERA34", Const, 20, ""},
    +		{"R_PPC64_ADDR16_HIGHEST", Const, 5, ""},
    +		{"R_PPC64_ADDR16_HIGHEST34", Const, 20, ""},
    +		{"R_PPC64_ADDR16_HIGHESTA", Const, 5, ""},
    +		{"R_PPC64_ADDR16_HIGHESTA34", Const, 20, ""},
    +		{"R_PPC64_ADDR16_LO", Const, 5, ""},
    +		{"R_PPC64_ADDR16_LO_DS", Const, 5, ""},
    +		{"R_PPC64_ADDR24", Const, 5, ""},
    +		{"R_PPC64_ADDR32", Const, 5, ""},
    +		{"R_PPC64_ADDR64", Const, 5, ""},
    +		{"R_PPC64_ADDR64_LOCAL", Const, 10, ""},
    +		{"R_PPC64_COPY", Const, 20, ""},
    +		{"R_PPC64_D28", Const, 20, ""},
    +		{"R_PPC64_D34", Const, 20, ""},
    +		{"R_PPC64_D34_HA30", Const, 20, ""},
    +		{"R_PPC64_D34_HI30", Const, 20, ""},
    +		{"R_PPC64_D34_LO", Const, 20, ""},
    +		{"R_PPC64_DTPMOD64", Const, 5, ""},
    +		{"R_PPC64_DTPREL16", Const, 5, ""},
    +		{"R_PPC64_DTPREL16_DS", Const, 5, ""},
    +		{"R_PPC64_DTPREL16_HA", Const, 5, ""},
    +		{"R_PPC64_DTPREL16_HI", Const, 5, ""},
    +		{"R_PPC64_DTPREL16_HIGH", Const, 10, ""},
    +		{"R_PPC64_DTPREL16_HIGHA", Const, 10, ""},
    +		{"R_PPC64_DTPREL16_HIGHER", Const, 5, ""},
    +		{"R_PPC64_DTPREL16_HIGHERA", Const, 5, ""},
    +		{"R_PPC64_DTPREL16_HIGHEST", Const, 5, ""},
    +		{"R_PPC64_DTPREL16_HIGHESTA", Const, 5, ""},
    +		{"R_PPC64_DTPREL16_LO", Const, 5, ""},
    +		{"R_PPC64_DTPREL16_LO_DS", Const, 5, ""},
    +		{"R_PPC64_DTPREL34", Const, 20, ""},
    +		{"R_PPC64_DTPREL64", Const, 5, ""},
    +		{"R_PPC64_ENTRY", Const, 10, ""},
    +		{"R_PPC64_GLOB_DAT", Const, 20, ""},
    +		{"R_PPC64_GNU_VTENTRY", Const, 20, ""},
    +		{"R_PPC64_GNU_VTINHERIT", Const, 20, ""},
    +		{"R_PPC64_GOT16", Const, 5, ""},
    +		{"R_PPC64_GOT16_DS", Const, 5, ""},
    +		{"R_PPC64_GOT16_HA", Const, 5, ""},
    +		{"R_PPC64_GOT16_HI", Const, 5, ""},
    +		{"R_PPC64_GOT16_LO", Const, 5, ""},
    +		{"R_PPC64_GOT16_LO_DS", Const, 5, ""},
    +		{"R_PPC64_GOT_DTPREL16_DS", Const, 5, ""},
    +		{"R_PPC64_GOT_DTPREL16_HA", Const, 5, ""},
    +		{"R_PPC64_GOT_DTPREL16_HI", Const, 5, ""},
    +		{"R_PPC64_GOT_DTPREL16_LO_DS", Const, 5, ""},
    +		{"R_PPC64_GOT_DTPREL_PCREL34", Const, 20, ""},
    +		{"R_PPC64_GOT_PCREL34", Const, 20, ""},
    +		{"R_PPC64_GOT_TLSGD16", Const, 5, ""},
    +		{"R_PPC64_GOT_TLSGD16_HA", Const, 5, ""},
    +		{"R_PPC64_GOT_TLSGD16_HI", Const, 5, ""},
    +		{"R_PPC64_GOT_TLSGD16_LO", Const, 5, ""},
    +		{"R_PPC64_GOT_TLSGD_PCREL34", Const, 20, ""},
    +		{"R_PPC64_GOT_TLSLD16", Const, 5, ""},
    +		{"R_PPC64_GOT_TLSLD16_HA", Const, 5, ""},
    +		{"R_PPC64_GOT_TLSLD16_HI", Const, 5, ""},
    +		{"R_PPC64_GOT_TLSLD16_LO", Const, 5, ""},
    +		{"R_PPC64_GOT_TLSLD_PCREL34", Const, 20, ""},
    +		{"R_PPC64_GOT_TPREL16_DS", Const, 5, ""},
    +		{"R_PPC64_GOT_TPREL16_HA", Const, 5, ""},
    +		{"R_PPC64_GOT_TPREL16_HI", Const, 5, ""},
    +		{"R_PPC64_GOT_TPREL16_LO_DS", Const, 5, ""},
    +		{"R_PPC64_GOT_TPREL_PCREL34", Const, 20, ""},
    +		{"R_PPC64_IRELATIVE", Const, 10, ""},
    +		{"R_PPC64_JMP_IREL", Const, 10, ""},
    +		{"R_PPC64_JMP_SLOT", Const, 5, ""},
    +		{"R_PPC64_NONE", Const, 5, ""},
    +		{"R_PPC64_PCREL28", Const, 20, ""},
    +		{"R_PPC64_PCREL34", Const, 20, ""},
    +		{"R_PPC64_PCREL_OPT", Const, 20, ""},
    +		{"R_PPC64_PLT16_HA", Const, 20, ""},
    +		{"R_PPC64_PLT16_HI", Const, 20, ""},
    +		{"R_PPC64_PLT16_LO", Const, 20, ""},
    +		{"R_PPC64_PLT16_LO_DS", Const, 10, ""},
    +		{"R_PPC64_PLT32", Const, 20, ""},
    +		{"R_PPC64_PLT64", Const, 20, ""},
    +		{"R_PPC64_PLTCALL", Const, 20, ""},
    +		{"R_PPC64_PLTCALL_NOTOC", Const, 20, ""},
    +		{"R_PPC64_PLTGOT16", Const, 10, ""},
    +		{"R_PPC64_PLTGOT16_DS", Const, 10, ""},
    +		{"R_PPC64_PLTGOT16_HA", Const, 10, ""},
    +		{"R_PPC64_PLTGOT16_HI", Const, 10, ""},
    +		{"R_PPC64_PLTGOT16_LO", Const, 10, ""},
    +		{"R_PPC64_PLTGOT_LO_DS", Const, 10, ""},
    +		{"R_PPC64_PLTREL32", Const, 20, ""},
    +		{"R_PPC64_PLTREL64", Const, 20, ""},
    +		{"R_PPC64_PLTSEQ", Const, 20, ""},
    +		{"R_PPC64_PLTSEQ_NOTOC", Const, 20, ""},
    +		{"R_PPC64_PLT_PCREL34", Const, 20, ""},
    +		{"R_PPC64_PLT_PCREL34_NOTOC", Const, 20, ""},
    +		{"R_PPC64_REL14", Const, 5, ""},
    +		{"R_PPC64_REL14_BRNTAKEN", Const, 5, ""},
    +		{"R_PPC64_REL14_BRTAKEN", Const, 5, ""},
    +		{"R_PPC64_REL16", Const, 5, ""},
    +		{"R_PPC64_REL16DX_HA", Const, 10, ""},
    +		{"R_PPC64_REL16_HA", Const, 5, ""},
    +		{"R_PPC64_REL16_HI", Const, 5, ""},
    +		{"R_PPC64_REL16_HIGH", Const, 20, ""},
    +		{"R_PPC64_REL16_HIGHA", Const, 20, ""},
    +		{"R_PPC64_REL16_HIGHER", Const, 20, ""},
    +		{"R_PPC64_REL16_HIGHER34", Const, 20, ""},
    +		{"R_PPC64_REL16_HIGHERA", Const, 20, ""},
    +		{"R_PPC64_REL16_HIGHERA34", Const, 20, ""},
    +		{"R_PPC64_REL16_HIGHEST", Const, 20, ""},
    +		{"R_PPC64_REL16_HIGHEST34", Const, 20, ""},
    +		{"R_PPC64_REL16_HIGHESTA", Const, 20, ""},
    +		{"R_PPC64_REL16_HIGHESTA34", Const, 20, ""},
    +		{"R_PPC64_REL16_LO", Const, 5, ""},
    +		{"R_PPC64_REL24", Const, 5, ""},
    +		{"R_PPC64_REL24_NOTOC", Const, 10, ""},
    +		{"R_PPC64_REL24_P9NOTOC", Const, 21, ""},
    +		{"R_PPC64_REL30", Const, 20, ""},
    +		{"R_PPC64_REL32", Const, 5, ""},
    +		{"R_PPC64_REL64", Const, 5, ""},
    +		{"R_PPC64_RELATIVE", Const, 18, ""},
    +		{"R_PPC64_SECTOFF", Const, 20, ""},
    +		{"R_PPC64_SECTOFF_DS", Const, 10, ""},
    +		{"R_PPC64_SECTOFF_HA", Const, 20, ""},
    +		{"R_PPC64_SECTOFF_HI", Const, 20, ""},
    +		{"R_PPC64_SECTOFF_LO", Const, 20, ""},
    +		{"R_PPC64_SECTOFF_LO_DS", Const, 10, ""},
    +		{"R_PPC64_TLS", Const, 5, ""},
    +		{"R_PPC64_TLSGD", Const, 5, ""},
    +		{"R_PPC64_TLSLD", Const, 5, ""},
    +		{"R_PPC64_TOC", Const, 5, ""},
    +		{"R_PPC64_TOC16", Const, 5, ""},
    +		{"R_PPC64_TOC16_DS", Const, 5, ""},
    +		{"R_PPC64_TOC16_HA", Const, 5, ""},
    +		{"R_PPC64_TOC16_HI", Const, 5, ""},
    +		{"R_PPC64_TOC16_LO", Const, 5, ""},
    +		{"R_PPC64_TOC16_LO_DS", Const, 5, ""},
    +		{"R_PPC64_TOCSAVE", Const, 10, ""},
    +		{"R_PPC64_TPREL16", Const, 5, ""},
    +		{"R_PPC64_TPREL16_DS", Const, 5, ""},
    +		{"R_PPC64_TPREL16_HA", Const, 5, ""},
    +		{"R_PPC64_TPREL16_HI", Const, 5, ""},
    +		{"R_PPC64_TPREL16_HIGH", Const, 10, ""},
    +		{"R_PPC64_TPREL16_HIGHA", Const, 10, ""},
    +		{"R_PPC64_TPREL16_HIGHER", Const, 5, ""},
    +		{"R_PPC64_TPREL16_HIGHERA", Const, 5, ""},
    +		{"R_PPC64_TPREL16_HIGHEST", Const, 5, ""},
    +		{"R_PPC64_TPREL16_HIGHESTA", Const, 5, ""},
    +		{"R_PPC64_TPREL16_LO", Const, 5, ""},
    +		{"R_PPC64_TPREL16_LO_DS", Const, 5, ""},
    +		{"R_PPC64_TPREL34", Const, 20, ""},
    +		{"R_PPC64_TPREL64", Const, 5, ""},
    +		{"R_PPC64_UADDR16", Const, 20, ""},
    +		{"R_PPC64_UADDR32", Const, 20, ""},
    +		{"R_PPC64_UADDR64", Const, 20, ""},
    +		{"R_PPC_ADDR14", Const, 0, ""},
    +		{"R_PPC_ADDR14_BRNTAKEN", Const, 0, ""},
    +		{"R_PPC_ADDR14_BRTAKEN", Const, 0, ""},
    +		{"R_PPC_ADDR16", Const, 0, ""},
    +		{"R_PPC_ADDR16_HA", Const, 0, ""},
    +		{"R_PPC_ADDR16_HI", Const, 0, ""},
    +		{"R_PPC_ADDR16_LO", Const, 0, ""},
    +		{"R_PPC_ADDR24", Const, 0, ""},
    +		{"R_PPC_ADDR32", Const, 0, ""},
    +		{"R_PPC_COPY", Const, 0, ""},
    +		{"R_PPC_DTPMOD32", Const, 0, ""},
    +		{"R_PPC_DTPREL16", Const, 0, ""},
    +		{"R_PPC_DTPREL16_HA", Const, 0, ""},
    +		{"R_PPC_DTPREL16_HI", Const, 0, ""},
    +		{"R_PPC_DTPREL16_LO", Const, 0, ""},
    +		{"R_PPC_DTPREL32", Const, 0, ""},
    +		{"R_PPC_EMB_BIT_FLD", Const, 0, ""},
    +		{"R_PPC_EMB_MRKREF", Const, 0, ""},
    +		{"R_PPC_EMB_NADDR16", Const, 0, ""},
    +		{"R_PPC_EMB_NADDR16_HA", Const, 0, ""},
    +		{"R_PPC_EMB_NADDR16_HI", Const, 0, ""},
    +		{"R_PPC_EMB_NADDR16_LO", Const, 0, ""},
    +		{"R_PPC_EMB_NADDR32", Const, 0, ""},
    +		{"R_PPC_EMB_RELSDA", Const, 0, ""},
    +		{"R_PPC_EMB_RELSEC16", Const, 0, ""},
    +		{"R_PPC_EMB_RELST_HA", Const, 0, ""},
    +		{"R_PPC_EMB_RELST_HI", Const, 0, ""},
    +		{"R_PPC_EMB_RELST_LO", Const, 0, ""},
    +		{"R_PPC_EMB_SDA21", Const, 0, ""},
    +		{"R_PPC_EMB_SDA2I16", Const, 0, ""},
    +		{"R_PPC_EMB_SDA2REL", Const, 0, ""},
    +		{"R_PPC_EMB_SDAI16", Const, 0, ""},
    +		{"R_PPC_GLOB_DAT", Const, 0, ""},
    +		{"R_PPC_GOT16", Const, 0, ""},
    +		{"R_PPC_GOT16_HA", Const, 0, ""},
    +		{"R_PPC_GOT16_HI", Const, 0, ""},
    +		{"R_PPC_GOT16_LO", Const, 0, ""},
    +		{"R_PPC_GOT_TLSGD16", Const, 0, ""},
    +		{"R_PPC_GOT_TLSGD16_HA", Const, 0, ""},
    +		{"R_PPC_GOT_TLSGD16_HI", Const, 0, ""},
    +		{"R_PPC_GOT_TLSGD16_LO", Const, 0, ""},
    +		{"R_PPC_GOT_TLSLD16", Const, 0, ""},
    +		{"R_PPC_GOT_TLSLD16_HA", Const, 0, ""},
    +		{"R_PPC_GOT_TLSLD16_HI", Const, 0, ""},
    +		{"R_PPC_GOT_TLSLD16_LO", Const, 0, ""},
    +		{"R_PPC_GOT_TPREL16", Const, 0, ""},
    +		{"R_PPC_GOT_TPREL16_HA", Const, 0, ""},
    +		{"R_PPC_GOT_TPREL16_HI", Const, 0, ""},
    +		{"R_PPC_GOT_TPREL16_LO", Const, 0, ""},
    +		{"R_PPC_JMP_SLOT", Const, 0, ""},
    +		{"R_PPC_LOCAL24PC", Const, 0, ""},
    +		{"R_PPC_NONE", Const, 0, ""},
    +		{"R_PPC_PLT16_HA", Const, 0, ""},
    +		{"R_PPC_PLT16_HI", Const, 0, ""},
    +		{"R_PPC_PLT16_LO", Const, 0, ""},
    +		{"R_PPC_PLT32", Const, 0, ""},
    +		{"R_PPC_PLTREL24", Const, 0, ""},
    +		{"R_PPC_PLTREL32", Const, 0, ""},
    +		{"R_PPC_REL14", Const, 0, ""},
    +		{"R_PPC_REL14_BRNTAKEN", Const, 0, ""},
    +		{"R_PPC_REL14_BRTAKEN", Const, 0, ""},
    +		{"R_PPC_REL24", Const, 0, ""},
    +		{"R_PPC_REL32", Const, 0, ""},
    +		{"R_PPC_RELATIVE", Const, 0, ""},
    +		{"R_PPC_SDAREL16", Const, 0, ""},
    +		{"R_PPC_SECTOFF", Const, 0, ""},
    +		{"R_PPC_SECTOFF_HA", Const, 0, ""},
    +		{"R_PPC_SECTOFF_HI", Const, 0, ""},
    +		{"R_PPC_SECTOFF_LO", Const, 0, ""},
    +		{"R_PPC_TLS", Const, 0, ""},
    +		{"R_PPC_TPREL16", Const, 0, ""},
    +		{"R_PPC_TPREL16_HA", Const, 0, ""},
    +		{"R_PPC_TPREL16_HI", Const, 0, ""},
    +		{"R_PPC_TPREL16_LO", Const, 0, ""},
    +		{"R_PPC_TPREL32", Const, 0, ""},
    +		{"R_PPC_UADDR16", Const, 0, ""},
    +		{"R_PPC_UADDR32", Const, 0, ""},
    +		{"R_RISCV", Type, 11, ""},
    +		{"R_RISCV_32", Const, 11, ""},
    +		{"R_RISCV_32_PCREL", Const, 12, ""},
    +		{"R_RISCV_64", Const, 11, ""},
    +		{"R_RISCV_ADD16", Const, 11, ""},
    +		{"R_RISCV_ADD32", Const, 11, ""},
    +		{"R_RISCV_ADD64", Const, 11, ""},
    +		{"R_RISCV_ADD8", Const, 11, ""},
    +		{"R_RISCV_ALIGN", Const, 11, ""},
    +		{"R_RISCV_BRANCH", Const, 11, ""},
    +		{"R_RISCV_CALL", Const, 11, ""},
    +		{"R_RISCV_CALL_PLT", Const, 11, ""},
    +		{"R_RISCV_COPY", Const, 11, ""},
    +		{"R_RISCV_GNU_VTENTRY", Const, 11, ""},
    +		{"R_RISCV_GNU_VTINHERIT", Const, 11, ""},
    +		{"R_RISCV_GOT_HI20", Const, 11, ""},
    +		{"R_RISCV_GPREL_I", Const, 11, ""},
    +		{"R_RISCV_GPREL_S", Const, 11, ""},
    +		{"R_RISCV_HI20", Const, 11, ""},
    +		{"R_RISCV_JAL", Const, 11, ""},
    +		{"R_RISCV_JUMP_SLOT", Const, 11, ""},
    +		{"R_RISCV_LO12_I", Const, 11, ""},
    +		{"R_RISCV_LO12_S", Const, 11, ""},
    +		{"R_RISCV_NONE", Const, 11, ""},
    +		{"R_RISCV_PCREL_HI20", Const, 11, ""},
    +		{"R_RISCV_PCREL_LO12_I", Const, 11, ""},
    +		{"R_RISCV_PCREL_LO12_S", Const, 11, ""},
    +		{"R_RISCV_RELATIVE", Const, 11, ""},
    +		{"R_RISCV_RELAX", Const, 11, ""},
    +		{"R_RISCV_RVC_BRANCH", Const, 11, ""},
    +		{"R_RISCV_RVC_JUMP", Const, 11, ""},
    +		{"R_RISCV_RVC_LUI", Const, 11, ""},
    +		{"R_RISCV_SET16", Const, 11, ""},
    +		{"R_RISCV_SET32", Const, 11, ""},
    +		{"R_RISCV_SET6", Const, 11, ""},
    +		{"R_RISCV_SET8", Const, 11, ""},
    +		{"R_RISCV_SUB16", Const, 11, ""},
    +		{"R_RISCV_SUB32", Const, 11, ""},
    +		{"R_RISCV_SUB6", Const, 11, ""},
    +		{"R_RISCV_SUB64", Const, 11, ""},
    +		{"R_RISCV_SUB8", Const, 11, ""},
    +		{"R_RISCV_TLS_DTPMOD32", Const, 11, ""},
    +		{"R_RISCV_TLS_DTPMOD64", Const, 11, ""},
    +		{"R_RISCV_TLS_DTPREL32", Const, 11, ""},
    +		{"R_RISCV_TLS_DTPREL64", Const, 11, ""},
    +		{"R_RISCV_TLS_GD_HI20", Const, 11, ""},
    +		{"R_RISCV_TLS_GOT_HI20", Const, 11, ""},
    +		{"R_RISCV_TLS_TPREL32", Const, 11, ""},
    +		{"R_RISCV_TLS_TPREL64", Const, 11, ""},
    +		{"R_RISCV_TPREL_ADD", Const, 11, ""},
    +		{"R_RISCV_TPREL_HI20", Const, 11, ""},
    +		{"R_RISCV_TPREL_I", Const, 11, ""},
    +		{"R_RISCV_TPREL_LO12_I", Const, 11, ""},
    +		{"R_RISCV_TPREL_LO12_S", Const, 11, ""},
    +		{"R_RISCV_TPREL_S", Const, 11, ""},
    +		{"R_SPARC", Type, 0, ""},
    +		{"R_SPARC_10", Const, 0, ""},
    +		{"R_SPARC_11", Const, 0, ""},
    +		{"R_SPARC_13", Const, 0, ""},
    +		{"R_SPARC_16", Const, 0, ""},
    +		{"R_SPARC_22", Const, 0, ""},
    +		{"R_SPARC_32", Const, 0, ""},
    +		{"R_SPARC_5", Const, 0, ""},
    +		{"R_SPARC_6", Const, 0, ""},
    +		{"R_SPARC_64", Const, 0, ""},
    +		{"R_SPARC_7", Const, 0, ""},
    +		{"R_SPARC_8", Const, 0, ""},
    +		{"R_SPARC_COPY", Const, 0, ""},
    +		{"R_SPARC_DISP16", Const, 0, ""},
    +		{"R_SPARC_DISP32", Const, 0, ""},
    +		{"R_SPARC_DISP64", Const, 0, ""},
    +		{"R_SPARC_DISP8", Const, 0, ""},
    +		{"R_SPARC_GLOB_DAT", Const, 0, ""},
    +		{"R_SPARC_GLOB_JMP", Const, 0, ""},
    +		{"R_SPARC_GOT10", Const, 0, ""},
    +		{"R_SPARC_GOT13", Const, 0, ""},
    +		{"R_SPARC_GOT22", Const, 0, ""},
    +		{"R_SPARC_H44", Const, 0, ""},
    +		{"R_SPARC_HH22", Const, 0, ""},
    +		{"R_SPARC_HI22", Const, 0, ""},
    +		{"R_SPARC_HIPLT22", Const, 0, ""},
    +		{"R_SPARC_HIX22", Const, 0, ""},
    +		{"R_SPARC_HM10", Const, 0, ""},
    +		{"R_SPARC_JMP_SLOT", Const, 0, ""},
    +		{"R_SPARC_L44", Const, 0, ""},
    +		{"R_SPARC_LM22", Const, 0, ""},
    +		{"R_SPARC_LO10", Const, 0, ""},
    +		{"R_SPARC_LOPLT10", Const, 0, ""},
    +		{"R_SPARC_LOX10", Const, 0, ""},
    +		{"R_SPARC_M44", Const, 0, ""},
    +		{"R_SPARC_NONE", Const, 0, ""},
    +		{"R_SPARC_OLO10", Const, 0, ""},
    +		{"R_SPARC_PC10", Const, 0, ""},
    +		{"R_SPARC_PC22", Const, 0, ""},
    +		{"R_SPARC_PCPLT10", Const, 0, ""},
    +		{"R_SPARC_PCPLT22", Const, 0, ""},
    +		{"R_SPARC_PCPLT32", Const, 0, ""},
    +		{"R_SPARC_PC_HH22", Const, 0, ""},
    +		{"R_SPARC_PC_HM10", Const, 0, ""},
    +		{"R_SPARC_PC_LM22", Const, 0, ""},
    +		{"R_SPARC_PLT32", Const, 0, ""},
    +		{"R_SPARC_PLT64", Const, 0, ""},
    +		{"R_SPARC_REGISTER", Const, 0, ""},
    +		{"R_SPARC_RELATIVE", Const, 0, ""},
    +		{"R_SPARC_UA16", Const, 0, ""},
    +		{"R_SPARC_UA32", Const, 0, ""},
    +		{"R_SPARC_UA64", Const, 0, ""},
    +		{"R_SPARC_WDISP16", Const, 0, ""},
    +		{"R_SPARC_WDISP19", Const, 0, ""},
    +		{"R_SPARC_WDISP22", Const, 0, ""},
    +		{"R_SPARC_WDISP30", Const, 0, ""},
    +		{"R_SPARC_WPLT30", Const, 0, ""},
    +		{"R_SYM32", Func, 0, "func(info uint32) uint32"},
    +		{"R_SYM64", Func, 0, "func(info uint64) uint32"},
    +		{"R_TYPE32", Func, 0, "func(info uint32) uint32"},
    +		{"R_TYPE64", Func, 0, "func(info uint64) uint32"},
    +		{"R_X86_64", Type, 0, ""},
    +		{"R_X86_64_16", Const, 0, ""},
    +		{"R_X86_64_32", Const, 0, ""},
    +		{"R_X86_64_32S", Const, 0, ""},
    +		{"R_X86_64_64", Const, 0, ""},
    +		{"R_X86_64_8", Const, 0, ""},
    +		{"R_X86_64_COPY", Const, 0, ""},
    +		{"R_X86_64_DTPMOD64", Const, 0, ""},
    +		{"R_X86_64_DTPOFF32", Const, 0, ""},
    +		{"R_X86_64_DTPOFF64", Const, 0, ""},
    +		{"R_X86_64_GLOB_DAT", Const, 0, ""},
    +		{"R_X86_64_GOT32", Const, 0, ""},
    +		{"R_X86_64_GOT64", Const, 10, ""},
    +		{"R_X86_64_GOTOFF64", Const, 10, ""},
    +		{"R_X86_64_GOTPC32", Const, 10, ""},
    +		{"R_X86_64_GOTPC32_TLSDESC", Const, 10, ""},
    +		{"R_X86_64_GOTPC64", Const, 10, ""},
    +		{"R_X86_64_GOTPCREL", Const, 0, ""},
    +		{"R_X86_64_GOTPCREL64", Const, 10, ""},
    +		{"R_X86_64_GOTPCRELX", Const, 10, ""},
    +		{"R_X86_64_GOTPLT64", Const, 10, ""},
    +		{"R_X86_64_GOTTPOFF", Const, 0, ""},
    +		{"R_X86_64_IRELATIVE", Const, 10, ""},
    +		{"R_X86_64_JMP_SLOT", Const, 0, ""},
    +		{"R_X86_64_NONE", Const, 0, ""},
    +		{"R_X86_64_PC16", Const, 0, ""},
    +		{"R_X86_64_PC32", Const, 0, ""},
    +		{"R_X86_64_PC32_BND", Const, 10, ""},
    +		{"R_X86_64_PC64", Const, 10, ""},
    +		{"R_X86_64_PC8", Const, 0, ""},
    +		{"R_X86_64_PLT32", Const, 0, ""},
    +		{"R_X86_64_PLT32_BND", Const, 10, ""},
    +		{"R_X86_64_PLTOFF64", Const, 10, ""},
    +		{"R_X86_64_RELATIVE", Const, 0, ""},
    +		{"R_X86_64_RELATIVE64", Const, 10, ""},
    +		{"R_X86_64_REX_GOTPCRELX", Const, 10, ""},
    +		{"R_X86_64_SIZE32", Const, 10, ""},
    +		{"R_X86_64_SIZE64", Const, 10, ""},
    +		{"R_X86_64_TLSDESC", Const, 10, ""},
    +		{"R_X86_64_TLSDESC_CALL", Const, 10, ""},
    +		{"R_X86_64_TLSGD", Const, 0, ""},
    +		{"R_X86_64_TLSLD", Const, 0, ""},
    +		{"R_X86_64_TPOFF32", Const, 0, ""},
    +		{"R_X86_64_TPOFF64", Const, 0, ""},
    +		{"Rel32", Type, 0, ""},
    +		{"Rel32.Info", Field, 0, ""},
    +		{"Rel32.Off", Field, 0, ""},
    +		{"Rel64", Type, 0, ""},
    +		{"Rel64.Info", Field, 0, ""},
    +		{"Rel64.Off", Field, 0, ""},
    +		{"Rela32", Type, 0, ""},
    +		{"Rela32.Addend", Field, 0, ""},
    +		{"Rela32.Info", Field, 0, ""},
    +		{"Rela32.Off", Field, 0, ""},
    +		{"Rela64", Type, 0, ""},
    +		{"Rela64.Addend", Field, 0, ""},
    +		{"Rela64.Info", Field, 0, ""},
    +		{"Rela64.Off", Field, 0, ""},
    +		{"SHF_ALLOC", Const, 0, ""},
    +		{"SHF_COMPRESSED", Const, 6, ""},
    +		{"SHF_EXECINSTR", Const, 0, ""},
    +		{"SHF_GROUP", Const, 0, ""},
    +		{"SHF_INFO_LINK", Const, 0, ""},
    +		{"SHF_LINK_ORDER", Const, 0, ""},
    +		{"SHF_MASKOS", Const, 0, ""},
    +		{"SHF_MASKPROC", Const, 0, ""},
    +		{"SHF_MERGE", Const, 0, ""},
    +		{"SHF_OS_NONCONFORMING", Const, 0, ""},
    +		{"SHF_STRINGS", Const, 0, ""},
    +		{"SHF_TLS", Const, 0, ""},
    +		{"SHF_WRITE", Const, 0, ""},
    +		{"SHN_ABS", Const, 0, ""},
    +		{"SHN_COMMON", Const, 0, ""},
    +		{"SHN_HIOS", Const, 0, ""},
    +		{"SHN_HIPROC", Const, 0, ""},
    +		{"SHN_HIRESERVE", Const, 0, ""},
    +		{"SHN_LOOS", Const, 0, ""},
    +		{"SHN_LOPROC", Const, 0, ""},
    +		{"SHN_LORESERVE", Const, 0, ""},
    +		{"SHN_UNDEF", Const, 0, ""},
    +		{"SHN_XINDEX", Const, 0, ""},
    +		{"SHT_DYNAMIC", Const, 0, ""},
    +		{"SHT_DYNSYM", Const, 0, ""},
    +		{"SHT_FINI_ARRAY", Const, 0, ""},
    +		{"SHT_GNU_ATTRIBUTES", Const, 0, ""},
    +		{"SHT_GNU_HASH", Const, 0, ""},
    +		{"SHT_GNU_LIBLIST", Const, 0, ""},
    +		{"SHT_GNU_VERDEF", Const, 0, ""},
    +		{"SHT_GNU_VERNEED", Const, 0, ""},
    +		{"SHT_GNU_VERSYM", Const, 0, ""},
    +		{"SHT_GROUP", Const, 0, ""},
    +		{"SHT_HASH", Const, 0, ""},
    +		{"SHT_HIOS", Const, 0, ""},
    +		{"SHT_HIPROC", Const, 0, ""},
    +		{"SHT_HIUSER", Const, 0, ""},
    +		{"SHT_INIT_ARRAY", Const, 0, ""},
    +		{"SHT_LOOS", Const, 0, ""},
    +		{"SHT_LOPROC", Const, 0, ""},
    +		{"SHT_LOUSER", Const, 0, ""},
    +		{"SHT_MIPS_ABIFLAGS", Const, 17, ""},
    +		{"SHT_NOBITS", Const, 0, ""},
    +		{"SHT_NOTE", Const, 0, ""},
    +		{"SHT_NULL", Const, 0, ""},
    +		{"SHT_PREINIT_ARRAY", Const, 0, ""},
    +		{"SHT_PROGBITS", Const, 0, ""},
    +		{"SHT_REL", Const, 0, ""},
    +		{"SHT_RELA", Const, 0, ""},
    +		{"SHT_RISCV_ATTRIBUTES", Const, 25, ""},
    +		{"SHT_SHLIB", Const, 0, ""},
    +		{"SHT_STRTAB", Const, 0, ""},
    +		{"SHT_SYMTAB", Const, 0, ""},
    +		{"SHT_SYMTAB_SHNDX", Const, 0, ""},
    +		{"STB_GLOBAL", Const, 0, ""},
    +		{"STB_HIOS", Const, 0, ""},
    +		{"STB_HIPROC", Const, 0, ""},
    +		{"STB_LOCAL", Const, 0, ""},
    +		{"STB_LOOS", Const, 0, ""},
    +		{"STB_LOPROC", Const, 0, ""},
    +		{"STB_WEAK", Const, 0, ""},
    +		{"STT_COMMON", Const, 0, ""},
    +		{"STT_FILE", Const, 0, ""},
    +		{"STT_FUNC", Const, 0, ""},
    +		{"STT_GNU_IFUNC", Const, 23, ""},
    +		{"STT_HIOS", Const, 0, ""},
    +		{"STT_HIPROC", Const, 0, ""},
    +		{"STT_LOOS", Const, 0, ""},
    +		{"STT_LOPROC", Const, 0, ""},
    +		{"STT_NOTYPE", Const, 0, ""},
    +		{"STT_OBJECT", Const, 0, ""},
    +		{"STT_RELC", Const, 23, ""},
    +		{"STT_SECTION", Const, 0, ""},
    +		{"STT_SRELC", Const, 23, ""},
    +		{"STT_TLS", Const, 0, ""},
    +		{"STV_DEFAULT", Const, 0, ""},
    +		{"STV_HIDDEN", Const, 0, ""},
    +		{"STV_INTERNAL", Const, 0, ""},
    +		{"STV_PROTECTED", Const, 0, ""},
    +		{"ST_BIND", Func, 0, "func(info uint8) SymBind"},
    +		{"ST_INFO", Func, 0, "func(bind SymBind, typ SymType) uint8"},
    +		{"ST_TYPE", Func, 0, "func(info uint8) SymType"},
    +		{"ST_VISIBILITY", Func, 0, "func(other uint8) SymVis"},
    +		{"Section", Type, 0, ""},
    +		{"Section.ReaderAt", Field, 0, ""},
    +		{"Section.SectionHeader", Field, 0, ""},
    +		{"Section32", Type, 0, ""},
    +		{"Section32.Addr", Field, 0, ""},
    +		{"Section32.Addralign", Field, 0, ""},
    +		{"Section32.Entsize", Field, 0, ""},
    +		{"Section32.Flags", Field, 0, ""},
    +		{"Section32.Info", Field, 0, ""},
    +		{"Section32.Link", Field, 0, ""},
    +		{"Section32.Name", Field, 0, ""},
    +		{"Section32.Off", Field, 0, ""},
    +		{"Section32.Size", Field, 0, ""},
    +		{"Section32.Type", Field, 0, ""},
    +		{"Section64", Type, 0, ""},
    +		{"Section64.Addr", Field, 0, ""},
    +		{"Section64.Addralign", Field, 0, ""},
    +		{"Section64.Entsize", Field, 0, ""},
    +		{"Section64.Flags", Field, 0, ""},
    +		{"Section64.Info", Field, 0, ""},
    +		{"Section64.Link", Field, 0, ""},
    +		{"Section64.Name", Field, 0, ""},
    +		{"Section64.Off", Field, 0, ""},
    +		{"Section64.Size", Field, 0, ""},
    +		{"Section64.Type", Field, 0, ""},
    +		{"SectionFlag", Type, 0, ""},
    +		{"SectionHeader", Type, 0, ""},
    +		{"SectionHeader.Addr", Field, 0, ""},
    +		{"SectionHeader.Addralign", Field, 0, ""},
    +		{"SectionHeader.Entsize", Field, 0, ""},
    +		{"SectionHeader.FileSize", Field, 6, ""},
    +		{"SectionHeader.Flags", Field, 0, ""},
    +		{"SectionHeader.Info", Field, 0, ""},
    +		{"SectionHeader.Link", Field, 0, ""},
    +		{"SectionHeader.Name", Field, 0, ""},
    +		{"SectionHeader.Offset", Field, 0, ""},
    +		{"SectionHeader.Size", Field, 0, ""},
    +		{"SectionHeader.Type", Field, 0, ""},
    +		{"SectionIndex", Type, 0, ""},
    +		{"SectionType", Type, 0, ""},
    +		{"Sym32", Type, 0, ""},
    +		{"Sym32.Info", Field, 0, ""},
    +		{"Sym32.Name", Field, 0, ""},
    +		{"Sym32.Other", Field, 0, ""},
    +		{"Sym32.Shndx", Field, 0, ""},
    +		{"Sym32.Size", Field, 0, ""},
    +		{"Sym32.Value", Field, 0, ""},
    +		{"Sym32Size", Const, 0, ""},
    +		{"Sym64", Type, 0, ""},
    +		{"Sym64.Info", Field, 0, ""},
    +		{"Sym64.Name", Field, 0, ""},
    +		{"Sym64.Other", Field, 0, ""},
    +		{"Sym64.Shndx", Field, 0, ""},
    +		{"Sym64.Size", Field, 0, ""},
    +		{"Sym64.Value", Field, 0, ""},
    +		{"Sym64Size", Const, 0, ""},
    +		{"SymBind", Type, 0, ""},
    +		{"SymType", Type, 0, ""},
    +		{"SymVis", Type, 0, ""},
    +		{"Symbol", Type, 0, ""},
    +		{"Symbol.HasVersion", Field, 24, ""},
    +		{"Symbol.Info", Field, 0, ""},
    +		{"Symbol.Library", Field, 13, ""},
    +		{"Symbol.Name", Field, 0, ""},
    +		{"Symbol.Other", Field, 0, ""},
    +		{"Symbol.Section", Field, 0, ""},
    +		{"Symbol.Size", Field, 0, ""},
    +		{"Symbol.Value", Field, 0, ""},
    +		{"Symbol.Version", Field, 13, ""},
    +		{"Symbol.VersionIndex", Field, 24, ""},
    +		{"Type", Type, 0, ""},
    +		{"VER_FLG_BASE", Const, 24, ""},
    +		{"VER_FLG_INFO", Const, 24, ""},
    +		{"VER_FLG_WEAK", Const, 24, ""},
    +		{"Version", Type, 0, ""},
    +		{"VersionIndex", Type, 24, ""},
     	},
     	"debug/gosym": {
    -		{"(*DecodingError).Error", Method, 0},
    -		{"(*LineTable).LineToPC", Method, 0},
    -		{"(*LineTable).PCToLine", Method, 0},
    -		{"(*Sym).BaseName", Method, 0},
    -		{"(*Sym).PackageName", Method, 0},
    -		{"(*Sym).ReceiverName", Method, 0},
    -		{"(*Sym).Static", Method, 0},
    -		{"(*Table).LineToPC", Method, 0},
    -		{"(*Table).LookupFunc", Method, 0},
    -		{"(*Table).LookupSym", Method, 0},
    -		{"(*Table).PCToFunc", Method, 0},
    -		{"(*Table).PCToLine", Method, 0},
    -		{"(*Table).SymByAddr", Method, 0},
    -		{"(*UnknownLineError).Error", Method, 0},
    -		{"(Func).BaseName", Method, 0},
    -		{"(Func).PackageName", Method, 0},
    -		{"(Func).ReceiverName", Method, 0},
    -		{"(Func).Static", Method, 0},
    -		{"(UnknownFileError).Error", Method, 0},
    -		{"DecodingError", Type, 0},
    -		{"Func", Type, 0},
    -		{"Func.End", Field, 0},
    -		{"Func.Entry", Field, 0},
    -		{"Func.FrameSize", Field, 0},
    -		{"Func.LineTable", Field, 0},
    -		{"Func.Locals", Field, 0},
    -		{"Func.Obj", Field, 0},
    -		{"Func.Params", Field, 0},
    -		{"Func.Sym", Field, 0},
    -		{"LineTable", Type, 0},
    -		{"LineTable.Data", Field, 0},
    -		{"LineTable.Line", Field, 0},
    -		{"LineTable.PC", Field, 0},
    -		{"NewLineTable", Func, 0},
    -		{"NewTable", Func, 0},
    -		{"Obj", Type, 0},
    -		{"Obj.Funcs", Field, 0},
    -		{"Obj.Paths", Field, 0},
    -		{"Sym", Type, 0},
    -		{"Sym.Func", Field, 0},
    -		{"Sym.GoType", Field, 0},
    -		{"Sym.Name", Field, 0},
    -		{"Sym.Type", Field, 0},
    -		{"Sym.Value", Field, 0},
    -		{"Table", Type, 0},
    -		{"Table.Files", Field, 0},
    -		{"Table.Funcs", Field, 0},
    -		{"Table.Objs", Field, 0},
    -		{"Table.Syms", Field, 0},
    -		{"UnknownFileError", Type, 0},
    -		{"UnknownLineError", Type, 0},
    -		{"UnknownLineError.File", Field, 0},
    -		{"UnknownLineError.Line", Field, 0},
    +		{"(*DecodingError).Error", Method, 0, ""},
    +		{"(*LineTable).LineToPC", Method, 0, ""},
    +		{"(*LineTable).PCToLine", Method, 0, ""},
    +		{"(*Sym).BaseName", Method, 0, ""},
    +		{"(*Sym).PackageName", Method, 0, ""},
    +		{"(*Sym).ReceiverName", Method, 0, ""},
    +		{"(*Sym).Static", Method, 0, ""},
    +		{"(*Table).LineToPC", Method, 0, ""},
    +		{"(*Table).LookupFunc", Method, 0, ""},
    +		{"(*Table).LookupSym", Method, 0, ""},
    +		{"(*Table).PCToFunc", Method, 0, ""},
    +		{"(*Table).PCToLine", Method, 0, ""},
    +		{"(*Table).SymByAddr", Method, 0, ""},
    +		{"(*UnknownLineError).Error", Method, 0, ""},
    +		{"(Func).BaseName", Method, 0, ""},
    +		{"(Func).PackageName", Method, 0, ""},
    +		{"(Func).ReceiverName", Method, 0, ""},
    +		{"(Func).Static", Method, 0, ""},
    +		{"(UnknownFileError).Error", Method, 0, ""},
    +		{"DecodingError", Type, 0, ""},
    +		{"Func", Type, 0, ""},
    +		{"Func.End", Field, 0, ""},
    +		{"Func.Entry", Field, 0, ""},
    +		{"Func.FrameSize", Field, 0, ""},
    +		{"Func.LineTable", Field, 0, ""},
    +		{"Func.Locals", Field, 0, ""},
    +		{"Func.Obj", Field, 0, ""},
    +		{"Func.Params", Field, 0, ""},
    +		{"Func.Sym", Field, 0, ""},
    +		{"LineTable", Type, 0, ""},
    +		{"LineTable.Data", Field, 0, ""},
    +		{"LineTable.Line", Field, 0, ""},
    +		{"LineTable.PC", Field, 0, ""},
    +		{"NewLineTable", Func, 0, "func(data []byte, text uint64) *LineTable"},
    +		{"NewTable", Func, 0, "func(symtab []byte, pcln *LineTable) (*Table, error)"},
    +		{"Obj", Type, 0, ""},
    +		{"Obj.Funcs", Field, 0, ""},
    +		{"Obj.Paths", Field, 0, ""},
    +		{"Sym", Type, 0, ""},
    +		{"Sym.Func", Field, 0, ""},
    +		{"Sym.GoType", Field, 0, ""},
    +		{"Sym.Name", Field, 0, ""},
    +		{"Sym.Type", Field, 0, ""},
    +		{"Sym.Value", Field, 0, ""},
    +		{"Table", Type, 0, ""},
    +		{"Table.Files", Field, 0, ""},
    +		{"Table.Funcs", Field, 0, ""},
    +		{"Table.Objs", Field, 0, ""},
    +		{"Table.Syms", Field, 0, ""},
    +		{"UnknownFileError", Type, 0, ""},
    +		{"UnknownLineError", Type, 0, ""},
    +		{"UnknownLineError.File", Field, 0, ""},
    +		{"UnknownLineError.Line", Field, 0, ""},
     	},
     	"debug/macho": {
    -		{"(*FatFile).Close", Method, 3},
    -		{"(*File).Close", Method, 0},
    -		{"(*File).DWARF", Method, 0},
    -		{"(*File).ImportedLibraries", Method, 0},
    -		{"(*File).ImportedSymbols", Method, 0},
    -		{"(*File).Section", Method, 0},
    -		{"(*File).Segment", Method, 0},
    -		{"(*FormatError).Error", Method, 0},
    -		{"(*Section).Data", Method, 0},
    -		{"(*Section).Open", Method, 0},
    -		{"(*Segment).Data", Method, 0},
    -		{"(*Segment).Open", Method, 0},
    -		{"(Cpu).GoString", Method, 0},
    -		{"(Cpu).String", Method, 0},
    -		{"(Dylib).Raw", Method, 0},
    -		{"(Dysymtab).Raw", Method, 0},
    -		{"(FatArch).Close", Method, 3},
    -		{"(FatArch).DWARF", Method, 3},
    -		{"(FatArch).ImportedLibraries", Method, 3},
    -		{"(FatArch).ImportedSymbols", Method, 3},
    -		{"(FatArch).Section", Method, 3},
    -		{"(FatArch).Segment", Method, 3},
    -		{"(LoadBytes).Raw", Method, 0},
    -		{"(LoadCmd).GoString", Method, 0},
    -		{"(LoadCmd).String", Method, 0},
    -		{"(RelocTypeARM).GoString", Method, 10},
    -		{"(RelocTypeARM).String", Method, 10},
    -		{"(RelocTypeARM64).GoString", Method, 10},
    -		{"(RelocTypeARM64).String", Method, 10},
    -		{"(RelocTypeGeneric).GoString", Method, 10},
    -		{"(RelocTypeGeneric).String", Method, 10},
    -		{"(RelocTypeX86_64).GoString", Method, 10},
    -		{"(RelocTypeX86_64).String", Method, 10},
    -		{"(Rpath).Raw", Method, 10},
    -		{"(Section).ReadAt", Method, 0},
    -		{"(Segment).Raw", Method, 0},
    -		{"(Segment).ReadAt", Method, 0},
    -		{"(Symtab).Raw", Method, 0},
    -		{"(Type).GoString", Method, 10},
    -		{"(Type).String", Method, 10},
    -		{"ARM64_RELOC_ADDEND", Const, 10},
    -		{"ARM64_RELOC_BRANCH26", Const, 10},
    -		{"ARM64_RELOC_GOT_LOAD_PAGE21", Const, 10},
    -		{"ARM64_RELOC_GOT_LOAD_PAGEOFF12", Const, 10},
    -		{"ARM64_RELOC_PAGE21", Const, 10},
    -		{"ARM64_RELOC_PAGEOFF12", Const, 10},
    -		{"ARM64_RELOC_POINTER_TO_GOT", Const, 10},
    -		{"ARM64_RELOC_SUBTRACTOR", Const, 10},
    -		{"ARM64_RELOC_TLVP_LOAD_PAGE21", Const, 10},
    -		{"ARM64_RELOC_TLVP_LOAD_PAGEOFF12", Const, 10},
    -		{"ARM64_RELOC_UNSIGNED", Const, 10},
    -		{"ARM_RELOC_BR24", Const, 10},
    -		{"ARM_RELOC_HALF", Const, 10},
    -		{"ARM_RELOC_HALF_SECTDIFF", Const, 10},
    -		{"ARM_RELOC_LOCAL_SECTDIFF", Const, 10},
    -		{"ARM_RELOC_PAIR", Const, 10},
    -		{"ARM_RELOC_PB_LA_PTR", Const, 10},
    -		{"ARM_RELOC_SECTDIFF", Const, 10},
    -		{"ARM_RELOC_VANILLA", Const, 10},
    -		{"ARM_THUMB_32BIT_BRANCH", Const, 10},
    -		{"ARM_THUMB_RELOC_BR22", Const, 10},
    -		{"Cpu", Type, 0},
    -		{"Cpu386", Const, 0},
    -		{"CpuAmd64", Const, 0},
    -		{"CpuArm", Const, 3},
    -		{"CpuArm64", Const, 11},
    -		{"CpuPpc", Const, 3},
    -		{"CpuPpc64", Const, 3},
    -		{"Dylib", Type, 0},
    -		{"Dylib.CompatVersion", Field, 0},
    -		{"Dylib.CurrentVersion", Field, 0},
    -		{"Dylib.LoadBytes", Field, 0},
    -		{"Dylib.Name", Field, 0},
    -		{"Dylib.Time", Field, 0},
    -		{"DylibCmd", Type, 0},
    -		{"DylibCmd.Cmd", Field, 0},
    -		{"DylibCmd.CompatVersion", Field, 0},
    -		{"DylibCmd.CurrentVersion", Field, 0},
    -		{"DylibCmd.Len", Field, 0},
    -		{"DylibCmd.Name", Field, 0},
    -		{"DylibCmd.Time", Field, 0},
    -		{"Dysymtab", Type, 0},
    -		{"Dysymtab.DysymtabCmd", Field, 0},
    -		{"Dysymtab.IndirectSyms", Field, 0},
    -		{"Dysymtab.LoadBytes", Field, 0},
    -		{"DysymtabCmd", Type, 0},
    -		{"DysymtabCmd.Cmd", Field, 0},
    -		{"DysymtabCmd.Extrefsymoff", Field, 0},
    -		{"DysymtabCmd.Extreloff", Field, 0},
    -		{"DysymtabCmd.Iextdefsym", Field, 0},
    -		{"DysymtabCmd.Ilocalsym", Field, 0},
    -		{"DysymtabCmd.Indirectsymoff", Field, 0},
    -		{"DysymtabCmd.Iundefsym", Field, 0},
    -		{"DysymtabCmd.Len", Field, 0},
    -		{"DysymtabCmd.Locreloff", Field, 0},
    -		{"DysymtabCmd.Modtaboff", Field, 0},
    -		{"DysymtabCmd.Nextdefsym", Field, 0},
    -		{"DysymtabCmd.Nextrefsyms", Field, 0},
    -		{"DysymtabCmd.Nextrel", Field, 0},
    -		{"DysymtabCmd.Nindirectsyms", Field, 0},
    -		{"DysymtabCmd.Nlocalsym", Field, 0},
    -		{"DysymtabCmd.Nlocrel", Field, 0},
    -		{"DysymtabCmd.Nmodtab", Field, 0},
    -		{"DysymtabCmd.Ntoc", Field, 0},
    -		{"DysymtabCmd.Nundefsym", Field, 0},
    -		{"DysymtabCmd.Tocoffset", Field, 0},
    -		{"ErrNotFat", Var, 3},
    -		{"FatArch", Type, 3},
    -		{"FatArch.FatArchHeader", Field, 3},
    -		{"FatArch.File", Field, 3},
    -		{"FatArchHeader", Type, 3},
    -		{"FatArchHeader.Align", Field, 3},
    -		{"FatArchHeader.Cpu", Field, 3},
    -		{"FatArchHeader.Offset", Field, 3},
    -		{"FatArchHeader.Size", Field, 3},
    -		{"FatArchHeader.SubCpu", Field, 3},
    -		{"FatFile", Type, 3},
    -		{"FatFile.Arches", Field, 3},
    -		{"FatFile.Magic", Field, 3},
    -		{"File", Type, 0},
    -		{"File.ByteOrder", Field, 0},
    -		{"File.Dysymtab", Field, 0},
    -		{"File.FileHeader", Field, 0},
    -		{"File.Loads", Field, 0},
    -		{"File.Sections", Field, 0},
    -		{"File.Symtab", Field, 0},
    -		{"FileHeader", Type, 0},
    -		{"FileHeader.Cmdsz", Field, 0},
    -		{"FileHeader.Cpu", Field, 0},
    -		{"FileHeader.Flags", Field, 0},
    -		{"FileHeader.Magic", Field, 0},
    -		{"FileHeader.Ncmd", Field, 0},
    -		{"FileHeader.SubCpu", Field, 0},
    -		{"FileHeader.Type", Field, 0},
    -		{"FlagAllModsBound", Const, 10},
    -		{"FlagAllowStackExecution", Const, 10},
    -		{"FlagAppExtensionSafe", Const, 10},
    -		{"FlagBindAtLoad", Const, 10},
    -		{"FlagBindsToWeak", Const, 10},
    -		{"FlagCanonical", Const, 10},
    -		{"FlagDeadStrippableDylib", Const, 10},
    -		{"FlagDyldLink", Const, 10},
    -		{"FlagForceFlat", Const, 10},
    -		{"FlagHasTLVDescriptors", Const, 10},
    -		{"FlagIncrLink", Const, 10},
    -		{"FlagLazyInit", Const, 10},
    -		{"FlagNoFixPrebinding", Const, 10},
    -		{"FlagNoHeapExecution", Const, 10},
    -		{"FlagNoMultiDefs", Const, 10},
    -		{"FlagNoReexportedDylibs", Const, 10},
    -		{"FlagNoUndefs", Const, 10},
    -		{"FlagPIE", Const, 10},
    -		{"FlagPrebindable", Const, 10},
    -		{"FlagPrebound", Const, 10},
    -		{"FlagRootSafe", Const, 10},
    -		{"FlagSetuidSafe", Const, 10},
    -		{"FlagSplitSegs", Const, 10},
    -		{"FlagSubsectionsViaSymbols", Const, 10},
    -		{"FlagTwoLevel", Const, 10},
    -		{"FlagWeakDefines", Const, 10},
    -		{"FormatError", Type, 0},
    -		{"GENERIC_RELOC_LOCAL_SECTDIFF", Const, 10},
    -		{"GENERIC_RELOC_PAIR", Const, 10},
    -		{"GENERIC_RELOC_PB_LA_PTR", Const, 10},
    -		{"GENERIC_RELOC_SECTDIFF", Const, 10},
    -		{"GENERIC_RELOC_TLV", Const, 10},
    -		{"GENERIC_RELOC_VANILLA", Const, 10},
    -		{"Load", Type, 0},
    -		{"LoadBytes", Type, 0},
    -		{"LoadCmd", Type, 0},
    -		{"LoadCmdDylib", Const, 0},
    -		{"LoadCmdDylinker", Const, 0},
    -		{"LoadCmdDysymtab", Const, 0},
    -		{"LoadCmdRpath", Const, 10},
    -		{"LoadCmdSegment", Const, 0},
    -		{"LoadCmdSegment64", Const, 0},
    -		{"LoadCmdSymtab", Const, 0},
    -		{"LoadCmdThread", Const, 0},
    -		{"LoadCmdUnixThread", Const, 0},
    -		{"Magic32", Const, 0},
    -		{"Magic64", Const, 0},
    -		{"MagicFat", Const, 3},
    -		{"NewFatFile", Func, 3},
    -		{"NewFile", Func, 0},
    -		{"Nlist32", Type, 0},
    -		{"Nlist32.Desc", Field, 0},
    -		{"Nlist32.Name", Field, 0},
    -		{"Nlist32.Sect", Field, 0},
    -		{"Nlist32.Type", Field, 0},
    -		{"Nlist32.Value", Field, 0},
    -		{"Nlist64", Type, 0},
    -		{"Nlist64.Desc", Field, 0},
    -		{"Nlist64.Name", Field, 0},
    -		{"Nlist64.Sect", Field, 0},
    -		{"Nlist64.Type", Field, 0},
    -		{"Nlist64.Value", Field, 0},
    -		{"Open", Func, 0},
    -		{"OpenFat", Func, 3},
    -		{"Regs386", Type, 0},
    -		{"Regs386.AX", Field, 0},
    -		{"Regs386.BP", Field, 0},
    -		{"Regs386.BX", Field, 0},
    -		{"Regs386.CS", Field, 0},
    -		{"Regs386.CX", Field, 0},
    -		{"Regs386.DI", Field, 0},
    -		{"Regs386.DS", Field, 0},
    -		{"Regs386.DX", Field, 0},
    -		{"Regs386.ES", Field, 0},
    -		{"Regs386.FLAGS", Field, 0},
    -		{"Regs386.FS", Field, 0},
    -		{"Regs386.GS", Field, 0},
    -		{"Regs386.IP", Field, 0},
    -		{"Regs386.SI", Field, 0},
    -		{"Regs386.SP", Field, 0},
    -		{"Regs386.SS", Field, 0},
    -		{"RegsAMD64", Type, 0},
    -		{"RegsAMD64.AX", Field, 0},
    -		{"RegsAMD64.BP", Field, 0},
    -		{"RegsAMD64.BX", Field, 0},
    -		{"RegsAMD64.CS", Field, 0},
    -		{"RegsAMD64.CX", Field, 0},
    -		{"RegsAMD64.DI", Field, 0},
    -		{"RegsAMD64.DX", Field, 0},
    -		{"RegsAMD64.FLAGS", Field, 0},
    -		{"RegsAMD64.FS", Field, 0},
    -		{"RegsAMD64.GS", Field, 0},
    -		{"RegsAMD64.IP", Field, 0},
    -		{"RegsAMD64.R10", Field, 0},
    -		{"RegsAMD64.R11", Field, 0},
    -		{"RegsAMD64.R12", Field, 0},
    -		{"RegsAMD64.R13", Field, 0},
    -		{"RegsAMD64.R14", Field, 0},
    -		{"RegsAMD64.R15", Field, 0},
    -		{"RegsAMD64.R8", Field, 0},
    -		{"RegsAMD64.R9", Field, 0},
    -		{"RegsAMD64.SI", Field, 0},
    -		{"RegsAMD64.SP", Field, 0},
    -		{"Reloc", Type, 10},
    -		{"Reloc.Addr", Field, 10},
    -		{"Reloc.Extern", Field, 10},
    -		{"Reloc.Len", Field, 10},
    -		{"Reloc.Pcrel", Field, 10},
    -		{"Reloc.Scattered", Field, 10},
    -		{"Reloc.Type", Field, 10},
    -		{"Reloc.Value", Field, 10},
    -		{"RelocTypeARM", Type, 10},
    -		{"RelocTypeARM64", Type, 10},
    -		{"RelocTypeGeneric", Type, 10},
    -		{"RelocTypeX86_64", Type, 10},
    -		{"Rpath", Type, 10},
    -		{"Rpath.LoadBytes", Field, 10},
    -		{"Rpath.Path", Field, 10},
    -		{"RpathCmd", Type, 10},
    -		{"RpathCmd.Cmd", Field, 10},
    -		{"RpathCmd.Len", Field, 10},
    -		{"RpathCmd.Path", Field, 10},
    -		{"Section", Type, 0},
    -		{"Section.ReaderAt", Field, 0},
    -		{"Section.Relocs", Field, 10},
    -		{"Section.SectionHeader", Field, 0},
    -		{"Section32", Type, 0},
    -		{"Section32.Addr", Field, 0},
    -		{"Section32.Align", Field, 0},
    -		{"Section32.Flags", Field, 0},
    -		{"Section32.Name", Field, 0},
    -		{"Section32.Nreloc", Field, 0},
    -		{"Section32.Offset", Field, 0},
    -		{"Section32.Reloff", Field, 0},
    -		{"Section32.Reserve1", Field, 0},
    -		{"Section32.Reserve2", Field, 0},
    -		{"Section32.Seg", Field, 0},
    -		{"Section32.Size", Field, 0},
    -		{"Section64", Type, 0},
    -		{"Section64.Addr", Field, 0},
    -		{"Section64.Align", Field, 0},
    -		{"Section64.Flags", Field, 0},
    -		{"Section64.Name", Field, 0},
    -		{"Section64.Nreloc", Field, 0},
    -		{"Section64.Offset", Field, 0},
    -		{"Section64.Reloff", Field, 0},
    -		{"Section64.Reserve1", Field, 0},
    -		{"Section64.Reserve2", Field, 0},
    -		{"Section64.Reserve3", Field, 0},
    -		{"Section64.Seg", Field, 0},
    -		{"Section64.Size", Field, 0},
    -		{"SectionHeader", Type, 0},
    -		{"SectionHeader.Addr", Field, 0},
    -		{"SectionHeader.Align", Field, 0},
    -		{"SectionHeader.Flags", Field, 0},
    -		{"SectionHeader.Name", Field, 0},
    -		{"SectionHeader.Nreloc", Field, 0},
    -		{"SectionHeader.Offset", Field, 0},
    -		{"SectionHeader.Reloff", Field, 0},
    -		{"SectionHeader.Seg", Field, 0},
    -		{"SectionHeader.Size", Field, 0},
    -		{"Segment", Type, 0},
    -		{"Segment.LoadBytes", Field, 0},
    -		{"Segment.ReaderAt", Field, 0},
    -		{"Segment.SegmentHeader", Field, 0},
    -		{"Segment32", Type, 0},
    -		{"Segment32.Addr", Field, 0},
    -		{"Segment32.Cmd", Field, 0},
    -		{"Segment32.Filesz", Field, 0},
    -		{"Segment32.Flag", Field, 0},
    -		{"Segment32.Len", Field, 0},
    -		{"Segment32.Maxprot", Field, 0},
    -		{"Segment32.Memsz", Field, 0},
    -		{"Segment32.Name", Field, 0},
    -		{"Segment32.Nsect", Field, 0},
    -		{"Segment32.Offset", Field, 0},
    -		{"Segment32.Prot", Field, 0},
    -		{"Segment64", Type, 0},
    -		{"Segment64.Addr", Field, 0},
    -		{"Segment64.Cmd", Field, 0},
    -		{"Segment64.Filesz", Field, 0},
    -		{"Segment64.Flag", Field, 0},
    -		{"Segment64.Len", Field, 0},
    -		{"Segment64.Maxprot", Field, 0},
    -		{"Segment64.Memsz", Field, 0},
    -		{"Segment64.Name", Field, 0},
    -		{"Segment64.Nsect", Field, 0},
    -		{"Segment64.Offset", Field, 0},
    -		{"Segment64.Prot", Field, 0},
    -		{"SegmentHeader", Type, 0},
    -		{"SegmentHeader.Addr", Field, 0},
    -		{"SegmentHeader.Cmd", Field, 0},
    -		{"SegmentHeader.Filesz", Field, 0},
    -		{"SegmentHeader.Flag", Field, 0},
    -		{"SegmentHeader.Len", Field, 0},
    -		{"SegmentHeader.Maxprot", Field, 0},
    -		{"SegmentHeader.Memsz", Field, 0},
    -		{"SegmentHeader.Name", Field, 0},
    -		{"SegmentHeader.Nsect", Field, 0},
    -		{"SegmentHeader.Offset", Field, 0},
    -		{"SegmentHeader.Prot", Field, 0},
    -		{"Symbol", Type, 0},
    -		{"Symbol.Desc", Field, 0},
    -		{"Symbol.Name", Field, 0},
    -		{"Symbol.Sect", Field, 0},
    -		{"Symbol.Type", Field, 0},
    -		{"Symbol.Value", Field, 0},
    -		{"Symtab", Type, 0},
    -		{"Symtab.LoadBytes", Field, 0},
    -		{"Symtab.Syms", Field, 0},
    -		{"Symtab.SymtabCmd", Field, 0},
    -		{"SymtabCmd", Type, 0},
    -		{"SymtabCmd.Cmd", Field, 0},
    -		{"SymtabCmd.Len", Field, 0},
    -		{"SymtabCmd.Nsyms", Field, 0},
    -		{"SymtabCmd.Stroff", Field, 0},
    -		{"SymtabCmd.Strsize", Field, 0},
    -		{"SymtabCmd.Symoff", Field, 0},
    -		{"Thread", Type, 0},
    -		{"Thread.Cmd", Field, 0},
    -		{"Thread.Data", Field, 0},
    -		{"Thread.Len", Field, 0},
    -		{"Thread.Type", Field, 0},
    -		{"Type", Type, 0},
    -		{"TypeBundle", Const, 3},
    -		{"TypeDylib", Const, 3},
    -		{"TypeExec", Const, 0},
    -		{"TypeObj", Const, 0},
    -		{"X86_64_RELOC_BRANCH", Const, 10},
    -		{"X86_64_RELOC_GOT", Const, 10},
    -		{"X86_64_RELOC_GOT_LOAD", Const, 10},
    -		{"X86_64_RELOC_SIGNED", Const, 10},
    -		{"X86_64_RELOC_SIGNED_1", Const, 10},
    -		{"X86_64_RELOC_SIGNED_2", Const, 10},
    -		{"X86_64_RELOC_SIGNED_4", Const, 10},
    -		{"X86_64_RELOC_SUBTRACTOR", Const, 10},
    -		{"X86_64_RELOC_TLV", Const, 10},
    -		{"X86_64_RELOC_UNSIGNED", Const, 10},
    +		{"(*FatFile).Close", Method, 3, ""},
    +		{"(*File).Close", Method, 0, ""},
    +		{"(*File).DWARF", Method, 0, ""},
    +		{"(*File).ImportedLibraries", Method, 0, ""},
    +		{"(*File).ImportedSymbols", Method, 0, ""},
    +		{"(*File).Section", Method, 0, ""},
    +		{"(*File).Segment", Method, 0, ""},
    +		{"(*FormatError).Error", Method, 0, ""},
    +		{"(*Section).Data", Method, 0, ""},
    +		{"(*Section).Open", Method, 0, ""},
    +		{"(*Segment).Data", Method, 0, ""},
    +		{"(*Segment).Open", Method, 0, ""},
    +		{"(Cpu).GoString", Method, 0, ""},
    +		{"(Cpu).String", Method, 0, ""},
    +		{"(Dylib).Raw", Method, 0, ""},
    +		{"(Dysymtab).Raw", Method, 0, ""},
    +		{"(FatArch).Close", Method, 3, ""},
    +		{"(FatArch).DWARF", Method, 3, ""},
    +		{"(FatArch).ImportedLibraries", Method, 3, ""},
    +		{"(FatArch).ImportedSymbols", Method, 3, ""},
    +		{"(FatArch).Section", Method, 3, ""},
    +		{"(FatArch).Segment", Method, 3, ""},
    +		{"(LoadBytes).Raw", Method, 0, ""},
    +		{"(LoadCmd).GoString", Method, 0, ""},
    +		{"(LoadCmd).String", Method, 0, ""},
    +		{"(RelocTypeARM).GoString", Method, 10, ""},
    +		{"(RelocTypeARM).String", Method, 10, ""},
    +		{"(RelocTypeARM64).GoString", Method, 10, ""},
    +		{"(RelocTypeARM64).String", Method, 10, ""},
    +		{"(RelocTypeGeneric).GoString", Method, 10, ""},
    +		{"(RelocTypeGeneric).String", Method, 10, ""},
    +		{"(RelocTypeX86_64).GoString", Method, 10, ""},
    +		{"(RelocTypeX86_64).String", Method, 10, ""},
    +		{"(Rpath).Raw", Method, 10, ""},
    +		{"(Section).ReadAt", Method, 0, ""},
    +		{"(Segment).Raw", Method, 0, ""},
    +		{"(Segment).ReadAt", Method, 0, ""},
    +		{"(Symtab).Raw", Method, 0, ""},
    +		{"(Type).GoString", Method, 10, ""},
    +		{"(Type).String", Method, 10, ""},
    +		{"ARM64_RELOC_ADDEND", Const, 10, ""},
    +		{"ARM64_RELOC_BRANCH26", Const, 10, ""},
    +		{"ARM64_RELOC_GOT_LOAD_PAGE21", Const, 10, ""},
    +		{"ARM64_RELOC_GOT_LOAD_PAGEOFF12", Const, 10, ""},
    +		{"ARM64_RELOC_PAGE21", Const, 10, ""},
    +		{"ARM64_RELOC_PAGEOFF12", Const, 10, ""},
    +		{"ARM64_RELOC_POINTER_TO_GOT", Const, 10, ""},
    +		{"ARM64_RELOC_SUBTRACTOR", Const, 10, ""},
    +		{"ARM64_RELOC_TLVP_LOAD_PAGE21", Const, 10, ""},
    +		{"ARM64_RELOC_TLVP_LOAD_PAGEOFF12", Const, 10, ""},
    +		{"ARM64_RELOC_UNSIGNED", Const, 10, ""},
    +		{"ARM_RELOC_BR24", Const, 10, ""},
    +		{"ARM_RELOC_HALF", Const, 10, ""},
    +		{"ARM_RELOC_HALF_SECTDIFF", Const, 10, ""},
    +		{"ARM_RELOC_LOCAL_SECTDIFF", Const, 10, ""},
    +		{"ARM_RELOC_PAIR", Const, 10, ""},
    +		{"ARM_RELOC_PB_LA_PTR", Const, 10, ""},
    +		{"ARM_RELOC_SECTDIFF", Const, 10, ""},
    +		{"ARM_RELOC_VANILLA", Const, 10, ""},
    +		{"ARM_THUMB_32BIT_BRANCH", Const, 10, ""},
    +		{"ARM_THUMB_RELOC_BR22", Const, 10, ""},
    +		{"Cpu", Type, 0, ""},
    +		{"Cpu386", Const, 0, ""},
    +		{"CpuAmd64", Const, 0, ""},
    +		{"CpuArm", Const, 3, ""},
    +		{"CpuArm64", Const, 11, ""},
    +		{"CpuPpc", Const, 3, ""},
    +		{"CpuPpc64", Const, 3, ""},
    +		{"Dylib", Type, 0, ""},
    +		{"Dylib.CompatVersion", Field, 0, ""},
    +		{"Dylib.CurrentVersion", Field, 0, ""},
    +		{"Dylib.LoadBytes", Field, 0, ""},
    +		{"Dylib.Name", Field, 0, ""},
    +		{"Dylib.Time", Field, 0, ""},
    +		{"DylibCmd", Type, 0, ""},
    +		{"DylibCmd.Cmd", Field, 0, ""},
    +		{"DylibCmd.CompatVersion", Field, 0, ""},
    +		{"DylibCmd.CurrentVersion", Field, 0, ""},
    +		{"DylibCmd.Len", Field, 0, ""},
    +		{"DylibCmd.Name", Field, 0, ""},
    +		{"DylibCmd.Time", Field, 0, ""},
    +		{"Dysymtab", Type, 0, ""},
    +		{"Dysymtab.DysymtabCmd", Field, 0, ""},
    +		{"Dysymtab.IndirectSyms", Field, 0, ""},
    +		{"Dysymtab.LoadBytes", Field, 0, ""},
    +		{"DysymtabCmd", Type, 0, ""},
    +		{"DysymtabCmd.Cmd", Field, 0, ""},
    +		{"DysymtabCmd.Extrefsymoff", Field, 0, ""},
    +		{"DysymtabCmd.Extreloff", Field, 0, ""},
    +		{"DysymtabCmd.Iextdefsym", Field, 0, ""},
    +		{"DysymtabCmd.Ilocalsym", Field, 0, ""},
    +		{"DysymtabCmd.Indirectsymoff", Field, 0, ""},
    +		{"DysymtabCmd.Iundefsym", Field, 0, ""},
    +		{"DysymtabCmd.Len", Field, 0, ""},
    +		{"DysymtabCmd.Locreloff", Field, 0, ""},
    +		{"DysymtabCmd.Modtaboff", Field, 0, ""},
    +		{"DysymtabCmd.Nextdefsym", Field, 0, ""},
    +		{"DysymtabCmd.Nextrefsyms", Field, 0, ""},
    +		{"DysymtabCmd.Nextrel", Field, 0, ""},
    +		{"DysymtabCmd.Nindirectsyms", Field, 0, ""},
    +		{"DysymtabCmd.Nlocalsym", Field, 0, ""},
    +		{"DysymtabCmd.Nlocrel", Field, 0, ""},
    +		{"DysymtabCmd.Nmodtab", Field, 0, ""},
    +		{"DysymtabCmd.Ntoc", Field, 0, ""},
    +		{"DysymtabCmd.Nundefsym", Field, 0, ""},
    +		{"DysymtabCmd.Tocoffset", Field, 0, ""},
    +		{"ErrNotFat", Var, 3, ""},
    +		{"FatArch", Type, 3, ""},
    +		{"FatArch.FatArchHeader", Field, 3, ""},
    +		{"FatArch.File", Field, 3, ""},
    +		{"FatArchHeader", Type, 3, ""},
    +		{"FatArchHeader.Align", Field, 3, ""},
    +		{"FatArchHeader.Cpu", Field, 3, ""},
    +		{"FatArchHeader.Offset", Field, 3, ""},
    +		{"FatArchHeader.Size", Field, 3, ""},
    +		{"FatArchHeader.SubCpu", Field, 3, ""},
    +		{"FatFile", Type, 3, ""},
    +		{"FatFile.Arches", Field, 3, ""},
    +		{"FatFile.Magic", Field, 3, ""},
    +		{"File", Type, 0, ""},
    +		{"File.ByteOrder", Field, 0, ""},
    +		{"File.Dysymtab", Field, 0, ""},
    +		{"File.FileHeader", Field, 0, ""},
    +		{"File.Loads", Field, 0, ""},
    +		{"File.Sections", Field, 0, ""},
    +		{"File.Symtab", Field, 0, ""},
    +		{"FileHeader", Type, 0, ""},
    +		{"FileHeader.Cmdsz", Field, 0, ""},
    +		{"FileHeader.Cpu", Field, 0, ""},
    +		{"FileHeader.Flags", Field, 0, ""},
    +		{"FileHeader.Magic", Field, 0, ""},
    +		{"FileHeader.Ncmd", Field, 0, ""},
    +		{"FileHeader.SubCpu", Field, 0, ""},
    +		{"FileHeader.Type", Field, 0, ""},
    +		{"FlagAllModsBound", Const, 10, ""},
    +		{"FlagAllowStackExecution", Const, 10, ""},
    +		{"FlagAppExtensionSafe", Const, 10, ""},
    +		{"FlagBindAtLoad", Const, 10, ""},
    +		{"FlagBindsToWeak", Const, 10, ""},
    +		{"FlagCanonical", Const, 10, ""},
    +		{"FlagDeadStrippableDylib", Const, 10, ""},
    +		{"FlagDyldLink", Const, 10, ""},
    +		{"FlagForceFlat", Const, 10, ""},
    +		{"FlagHasTLVDescriptors", Const, 10, ""},
    +		{"FlagIncrLink", Const, 10, ""},
    +		{"FlagLazyInit", Const, 10, ""},
    +		{"FlagNoFixPrebinding", Const, 10, ""},
    +		{"FlagNoHeapExecution", Const, 10, ""},
    +		{"FlagNoMultiDefs", Const, 10, ""},
    +		{"FlagNoReexportedDylibs", Const, 10, ""},
    +		{"FlagNoUndefs", Const, 10, ""},
    +		{"FlagPIE", Const, 10, ""},
    +		{"FlagPrebindable", Const, 10, ""},
    +		{"FlagPrebound", Const, 10, ""},
    +		{"FlagRootSafe", Const, 10, ""},
    +		{"FlagSetuidSafe", Const, 10, ""},
    +		{"FlagSplitSegs", Const, 10, ""},
    +		{"FlagSubsectionsViaSymbols", Const, 10, ""},
    +		{"FlagTwoLevel", Const, 10, ""},
    +		{"FlagWeakDefines", Const, 10, ""},
    +		{"FormatError", Type, 0, ""},
    +		{"GENERIC_RELOC_LOCAL_SECTDIFF", Const, 10, ""},
    +		{"GENERIC_RELOC_PAIR", Const, 10, ""},
    +		{"GENERIC_RELOC_PB_LA_PTR", Const, 10, ""},
    +		{"GENERIC_RELOC_SECTDIFF", Const, 10, ""},
    +		{"GENERIC_RELOC_TLV", Const, 10, ""},
    +		{"GENERIC_RELOC_VANILLA", Const, 10, ""},
    +		{"Load", Type, 0, ""},
    +		{"LoadBytes", Type, 0, ""},
    +		{"LoadCmd", Type, 0, ""},
    +		{"LoadCmdDylib", Const, 0, ""},
    +		{"LoadCmdDylinker", Const, 0, ""},
    +		{"LoadCmdDysymtab", Const, 0, ""},
    +		{"LoadCmdRpath", Const, 10, ""},
    +		{"LoadCmdSegment", Const, 0, ""},
    +		{"LoadCmdSegment64", Const, 0, ""},
    +		{"LoadCmdSymtab", Const, 0, ""},
    +		{"LoadCmdThread", Const, 0, ""},
    +		{"LoadCmdUnixThread", Const, 0, ""},
    +		{"Magic32", Const, 0, ""},
    +		{"Magic64", Const, 0, ""},
    +		{"MagicFat", Const, 3, ""},
    +		{"NewFatFile", Func, 3, "func(r io.ReaderAt) (*FatFile, error)"},
    +		{"NewFile", Func, 0, "func(r io.ReaderAt) (*File, error)"},
    +		{"Nlist32", Type, 0, ""},
    +		{"Nlist32.Desc", Field, 0, ""},
    +		{"Nlist32.Name", Field, 0, ""},
    +		{"Nlist32.Sect", Field, 0, ""},
    +		{"Nlist32.Type", Field, 0, ""},
    +		{"Nlist32.Value", Field, 0, ""},
    +		{"Nlist64", Type, 0, ""},
    +		{"Nlist64.Desc", Field, 0, ""},
    +		{"Nlist64.Name", Field, 0, ""},
    +		{"Nlist64.Sect", Field, 0, ""},
    +		{"Nlist64.Type", Field, 0, ""},
    +		{"Nlist64.Value", Field, 0, ""},
    +		{"Open", Func, 0, "func(name string) (*File, error)"},
    +		{"OpenFat", Func, 3, "func(name string) (*FatFile, error)"},
    +		{"Regs386", Type, 0, ""},
    +		{"Regs386.AX", Field, 0, ""},
    +		{"Regs386.BP", Field, 0, ""},
    +		{"Regs386.BX", Field, 0, ""},
    +		{"Regs386.CS", Field, 0, ""},
    +		{"Regs386.CX", Field, 0, ""},
    +		{"Regs386.DI", Field, 0, ""},
    +		{"Regs386.DS", Field, 0, ""},
    +		{"Regs386.DX", Field, 0, ""},
    +		{"Regs386.ES", Field, 0, ""},
    +		{"Regs386.FLAGS", Field, 0, ""},
    +		{"Regs386.FS", Field, 0, ""},
    +		{"Regs386.GS", Field, 0, ""},
    +		{"Regs386.IP", Field, 0, ""},
    +		{"Regs386.SI", Field, 0, ""},
    +		{"Regs386.SP", Field, 0, ""},
    +		{"Regs386.SS", Field, 0, ""},
    +		{"RegsAMD64", Type, 0, ""},
    +		{"RegsAMD64.AX", Field, 0, ""},
    +		{"RegsAMD64.BP", Field, 0, ""},
    +		{"RegsAMD64.BX", Field, 0, ""},
    +		{"RegsAMD64.CS", Field, 0, ""},
    +		{"RegsAMD64.CX", Field, 0, ""},
    +		{"RegsAMD64.DI", Field, 0, ""},
    +		{"RegsAMD64.DX", Field, 0, ""},
    +		{"RegsAMD64.FLAGS", Field, 0, ""},
    +		{"RegsAMD64.FS", Field, 0, ""},
    +		{"RegsAMD64.GS", Field, 0, ""},
    +		{"RegsAMD64.IP", Field, 0, ""},
    +		{"RegsAMD64.R10", Field, 0, ""},
    +		{"RegsAMD64.R11", Field, 0, ""},
    +		{"RegsAMD64.R12", Field, 0, ""},
    +		{"RegsAMD64.R13", Field, 0, ""},
    +		{"RegsAMD64.R14", Field, 0, ""},
    +		{"RegsAMD64.R15", Field, 0, ""},
    +		{"RegsAMD64.R8", Field, 0, ""},
    +		{"RegsAMD64.R9", Field, 0, ""},
    +		{"RegsAMD64.SI", Field, 0, ""},
    +		{"RegsAMD64.SP", Field, 0, ""},
    +		{"Reloc", Type, 10, ""},
    +		{"Reloc.Addr", Field, 10, ""},
    +		{"Reloc.Extern", Field, 10, ""},
    +		{"Reloc.Len", Field, 10, ""},
    +		{"Reloc.Pcrel", Field, 10, ""},
    +		{"Reloc.Scattered", Field, 10, ""},
    +		{"Reloc.Type", Field, 10, ""},
    +		{"Reloc.Value", Field, 10, ""},
    +		{"RelocTypeARM", Type, 10, ""},
    +		{"RelocTypeARM64", Type, 10, ""},
    +		{"RelocTypeGeneric", Type, 10, ""},
    +		{"RelocTypeX86_64", Type, 10, ""},
    +		{"Rpath", Type, 10, ""},
    +		{"Rpath.LoadBytes", Field, 10, ""},
    +		{"Rpath.Path", Field, 10, ""},
    +		{"RpathCmd", Type, 10, ""},
    +		{"RpathCmd.Cmd", Field, 10, ""},
    +		{"RpathCmd.Len", Field, 10, ""},
    +		{"RpathCmd.Path", Field, 10, ""},
    +		{"Section", Type, 0, ""},
    +		{"Section.ReaderAt", Field, 0, ""},
    +		{"Section.Relocs", Field, 10, ""},
    +		{"Section.SectionHeader", Field, 0, ""},
    +		{"Section32", Type, 0, ""},
    +		{"Section32.Addr", Field, 0, ""},
    +		{"Section32.Align", Field, 0, ""},
    +		{"Section32.Flags", Field, 0, ""},
    +		{"Section32.Name", Field, 0, ""},
    +		{"Section32.Nreloc", Field, 0, ""},
    +		{"Section32.Offset", Field, 0, ""},
    +		{"Section32.Reloff", Field, 0, ""},
    +		{"Section32.Reserve1", Field, 0, ""},
    +		{"Section32.Reserve2", Field, 0, ""},
    +		{"Section32.Seg", Field, 0, ""},
    +		{"Section32.Size", Field, 0, ""},
    +		{"Section64", Type, 0, ""},
    +		{"Section64.Addr", Field, 0, ""},
    +		{"Section64.Align", Field, 0, ""},
    +		{"Section64.Flags", Field, 0, ""},
    +		{"Section64.Name", Field, 0, ""},
    +		{"Section64.Nreloc", Field, 0, ""},
    +		{"Section64.Offset", Field, 0, ""},
    +		{"Section64.Reloff", Field, 0, ""},
    +		{"Section64.Reserve1", Field, 0, ""},
    +		{"Section64.Reserve2", Field, 0, ""},
    +		{"Section64.Reserve3", Field, 0, ""},
    +		{"Section64.Seg", Field, 0, ""},
    +		{"Section64.Size", Field, 0, ""},
    +		{"SectionHeader", Type, 0, ""},
    +		{"SectionHeader.Addr", Field, 0, ""},
    +		{"SectionHeader.Align", Field, 0, ""},
    +		{"SectionHeader.Flags", Field, 0, ""},
    +		{"SectionHeader.Name", Field, 0, ""},
    +		{"SectionHeader.Nreloc", Field, 0, ""},
    +		{"SectionHeader.Offset", Field, 0, ""},
    +		{"SectionHeader.Reloff", Field, 0, ""},
    +		{"SectionHeader.Seg", Field, 0, ""},
    +		{"SectionHeader.Size", Field, 0, ""},
    +		{"Segment", Type, 0, ""},
    +		{"Segment.LoadBytes", Field, 0, ""},
    +		{"Segment.ReaderAt", Field, 0, ""},
    +		{"Segment.SegmentHeader", Field, 0, ""},
    +		{"Segment32", Type, 0, ""},
    +		{"Segment32.Addr", Field, 0, ""},
    +		{"Segment32.Cmd", Field, 0, ""},
    +		{"Segment32.Filesz", Field, 0, ""},
    +		{"Segment32.Flag", Field, 0, ""},
    +		{"Segment32.Len", Field, 0, ""},
    +		{"Segment32.Maxprot", Field, 0, ""},
    +		{"Segment32.Memsz", Field, 0, ""},
    +		{"Segment32.Name", Field, 0, ""},
    +		{"Segment32.Nsect", Field, 0, ""},
    +		{"Segment32.Offset", Field, 0, ""},
    +		{"Segment32.Prot", Field, 0, ""},
    +		{"Segment64", Type, 0, ""},
    +		{"Segment64.Addr", Field, 0, ""},
    +		{"Segment64.Cmd", Field, 0, ""},
    +		{"Segment64.Filesz", Field, 0, ""},
    +		{"Segment64.Flag", Field, 0, ""},
    +		{"Segment64.Len", Field, 0, ""},
    +		{"Segment64.Maxprot", Field, 0, ""},
    +		{"Segment64.Memsz", Field, 0, ""},
    +		{"Segment64.Name", Field, 0, ""},
    +		{"Segment64.Nsect", Field, 0, ""},
    +		{"Segment64.Offset", Field, 0, ""},
    +		{"Segment64.Prot", Field, 0, ""},
    +		{"SegmentHeader", Type, 0, ""},
    +		{"SegmentHeader.Addr", Field, 0, ""},
    +		{"SegmentHeader.Cmd", Field, 0, ""},
    +		{"SegmentHeader.Filesz", Field, 0, ""},
    +		{"SegmentHeader.Flag", Field, 0, ""},
    +		{"SegmentHeader.Len", Field, 0, ""},
    +		{"SegmentHeader.Maxprot", Field, 0, ""},
    +		{"SegmentHeader.Memsz", Field, 0, ""},
    +		{"SegmentHeader.Name", Field, 0, ""},
    +		{"SegmentHeader.Nsect", Field, 0, ""},
    +		{"SegmentHeader.Offset", Field, 0, ""},
    +		{"SegmentHeader.Prot", Field, 0, ""},
    +		{"Symbol", Type, 0, ""},
    +		{"Symbol.Desc", Field, 0, ""},
    +		{"Symbol.Name", Field, 0, ""},
    +		{"Symbol.Sect", Field, 0, ""},
    +		{"Symbol.Type", Field, 0, ""},
    +		{"Symbol.Value", Field, 0, ""},
    +		{"Symtab", Type, 0, ""},
    +		{"Symtab.LoadBytes", Field, 0, ""},
    +		{"Symtab.Syms", Field, 0, ""},
    +		{"Symtab.SymtabCmd", Field, 0, ""},
    +		{"SymtabCmd", Type, 0, ""},
    +		{"SymtabCmd.Cmd", Field, 0, ""},
    +		{"SymtabCmd.Len", Field, 0, ""},
    +		{"SymtabCmd.Nsyms", Field, 0, ""},
    +		{"SymtabCmd.Stroff", Field, 0, ""},
    +		{"SymtabCmd.Strsize", Field, 0, ""},
    +		{"SymtabCmd.Symoff", Field, 0, ""},
    +		{"Thread", Type, 0, ""},
    +		{"Thread.Cmd", Field, 0, ""},
    +		{"Thread.Data", Field, 0, ""},
    +		{"Thread.Len", Field, 0, ""},
    +		{"Thread.Type", Field, 0, ""},
    +		{"Type", Type, 0, ""},
    +		{"TypeBundle", Const, 3, ""},
    +		{"TypeDylib", Const, 3, ""},
    +		{"TypeExec", Const, 0, ""},
    +		{"TypeObj", Const, 0, ""},
    +		{"X86_64_RELOC_BRANCH", Const, 10, ""},
    +		{"X86_64_RELOC_GOT", Const, 10, ""},
    +		{"X86_64_RELOC_GOT_LOAD", Const, 10, ""},
    +		{"X86_64_RELOC_SIGNED", Const, 10, ""},
    +		{"X86_64_RELOC_SIGNED_1", Const, 10, ""},
    +		{"X86_64_RELOC_SIGNED_2", Const, 10, ""},
    +		{"X86_64_RELOC_SIGNED_4", Const, 10, ""},
    +		{"X86_64_RELOC_SUBTRACTOR", Const, 10, ""},
    +		{"X86_64_RELOC_TLV", Const, 10, ""},
    +		{"X86_64_RELOC_UNSIGNED", Const, 10, ""},
     	},
     	"debug/pe": {
    -		{"(*COFFSymbol).FullName", Method, 8},
    -		{"(*File).COFFSymbolReadSectionDefAux", Method, 19},
    -		{"(*File).Close", Method, 0},
    -		{"(*File).DWARF", Method, 0},
    -		{"(*File).ImportedLibraries", Method, 0},
    -		{"(*File).ImportedSymbols", Method, 0},
    -		{"(*File).Section", Method, 0},
    -		{"(*FormatError).Error", Method, 0},
    -		{"(*Section).Data", Method, 0},
    -		{"(*Section).Open", Method, 0},
    -		{"(Section).ReadAt", Method, 0},
    -		{"(StringTable).String", Method, 8},
    -		{"COFFSymbol", Type, 1},
    -		{"COFFSymbol.Name", Field, 1},
    -		{"COFFSymbol.NumberOfAuxSymbols", Field, 1},
    -		{"COFFSymbol.SectionNumber", Field, 1},
    -		{"COFFSymbol.StorageClass", Field, 1},
    -		{"COFFSymbol.Type", Field, 1},
    -		{"COFFSymbol.Value", Field, 1},
    -		{"COFFSymbolAuxFormat5", Type, 19},
    -		{"COFFSymbolAuxFormat5.Checksum", Field, 19},
    -		{"COFFSymbolAuxFormat5.NumLineNumbers", Field, 19},
    -		{"COFFSymbolAuxFormat5.NumRelocs", Field, 19},
    -		{"COFFSymbolAuxFormat5.SecNum", Field, 19},
    -		{"COFFSymbolAuxFormat5.Selection", Field, 19},
    -		{"COFFSymbolAuxFormat5.Size", Field, 19},
    -		{"COFFSymbolSize", Const, 1},
    -		{"DataDirectory", Type, 3},
    -		{"DataDirectory.Size", Field, 3},
    -		{"DataDirectory.VirtualAddress", Field, 3},
    -		{"File", Type, 0},
    -		{"File.COFFSymbols", Field, 8},
    -		{"File.FileHeader", Field, 0},
    -		{"File.OptionalHeader", Field, 3},
    -		{"File.Sections", Field, 0},
    -		{"File.StringTable", Field, 8},
    -		{"File.Symbols", Field, 1},
    -		{"FileHeader", Type, 0},
    -		{"FileHeader.Characteristics", Field, 0},
    -		{"FileHeader.Machine", Field, 0},
    -		{"FileHeader.NumberOfSections", Field, 0},
    -		{"FileHeader.NumberOfSymbols", Field, 0},
    -		{"FileHeader.PointerToSymbolTable", Field, 0},
    -		{"FileHeader.SizeOfOptionalHeader", Field, 0},
    -		{"FileHeader.TimeDateStamp", Field, 0},
    -		{"FormatError", Type, 0},
    -		{"IMAGE_COMDAT_SELECT_ANY", Const, 19},
    -		{"IMAGE_COMDAT_SELECT_ASSOCIATIVE", Const, 19},
    -		{"IMAGE_COMDAT_SELECT_EXACT_MATCH", Const, 19},
    -		{"IMAGE_COMDAT_SELECT_LARGEST", Const, 19},
    -		{"IMAGE_COMDAT_SELECT_NODUPLICATES", Const, 19},
    -		{"IMAGE_COMDAT_SELECT_SAME_SIZE", Const, 19},
    -		{"IMAGE_DIRECTORY_ENTRY_ARCHITECTURE", Const, 11},
    -		{"IMAGE_DIRECTORY_ENTRY_BASERELOC", Const, 11},
    -		{"IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT", Const, 11},
    -		{"IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR", Const, 11},
    -		{"IMAGE_DIRECTORY_ENTRY_DEBUG", Const, 11},
    -		{"IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT", Const, 11},
    -		{"IMAGE_DIRECTORY_ENTRY_EXCEPTION", Const, 11},
    -		{"IMAGE_DIRECTORY_ENTRY_EXPORT", Const, 11},
    -		{"IMAGE_DIRECTORY_ENTRY_GLOBALPTR", Const, 11},
    -		{"IMAGE_DIRECTORY_ENTRY_IAT", Const, 11},
    -		{"IMAGE_DIRECTORY_ENTRY_IMPORT", Const, 11},
    -		{"IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG", Const, 11},
    -		{"IMAGE_DIRECTORY_ENTRY_RESOURCE", Const, 11},
    -		{"IMAGE_DIRECTORY_ENTRY_SECURITY", Const, 11},
    -		{"IMAGE_DIRECTORY_ENTRY_TLS", Const, 11},
    -		{"IMAGE_DLLCHARACTERISTICS_APPCONTAINER", Const, 15},
    -		{"IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE", Const, 15},
    -		{"IMAGE_DLLCHARACTERISTICS_FORCE_INTEGRITY", Const, 15},
    -		{"IMAGE_DLLCHARACTERISTICS_GUARD_CF", Const, 15},
    -		{"IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA", Const, 15},
    -		{"IMAGE_DLLCHARACTERISTICS_NO_BIND", Const, 15},
    -		{"IMAGE_DLLCHARACTERISTICS_NO_ISOLATION", Const, 15},
    -		{"IMAGE_DLLCHARACTERISTICS_NO_SEH", Const, 15},
    -		{"IMAGE_DLLCHARACTERISTICS_NX_COMPAT", Const, 15},
    -		{"IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE", Const, 15},
    -		{"IMAGE_DLLCHARACTERISTICS_WDM_DRIVER", Const, 15},
    -		{"IMAGE_FILE_32BIT_MACHINE", Const, 15},
    -		{"IMAGE_FILE_AGGRESIVE_WS_TRIM", Const, 15},
    -		{"IMAGE_FILE_BYTES_REVERSED_HI", Const, 15},
    -		{"IMAGE_FILE_BYTES_REVERSED_LO", Const, 15},
    -		{"IMAGE_FILE_DEBUG_STRIPPED", Const, 15},
    -		{"IMAGE_FILE_DLL", Const, 15},
    -		{"IMAGE_FILE_EXECUTABLE_IMAGE", Const, 15},
    -		{"IMAGE_FILE_LARGE_ADDRESS_AWARE", Const, 15},
    -		{"IMAGE_FILE_LINE_NUMS_STRIPPED", Const, 15},
    -		{"IMAGE_FILE_LOCAL_SYMS_STRIPPED", Const, 15},
    -		{"IMAGE_FILE_MACHINE_AM33", Const, 0},
    -		{"IMAGE_FILE_MACHINE_AMD64", Const, 0},
    -		{"IMAGE_FILE_MACHINE_ARM", Const, 0},
    -		{"IMAGE_FILE_MACHINE_ARM64", Const, 11},
    -		{"IMAGE_FILE_MACHINE_ARMNT", Const, 12},
    -		{"IMAGE_FILE_MACHINE_EBC", Const, 0},
    -		{"IMAGE_FILE_MACHINE_I386", Const, 0},
    -		{"IMAGE_FILE_MACHINE_IA64", Const, 0},
    -		{"IMAGE_FILE_MACHINE_LOONGARCH32", Const, 19},
    -		{"IMAGE_FILE_MACHINE_LOONGARCH64", Const, 19},
    -		{"IMAGE_FILE_MACHINE_M32R", Const, 0},
    -		{"IMAGE_FILE_MACHINE_MIPS16", Const, 0},
    -		{"IMAGE_FILE_MACHINE_MIPSFPU", Const, 0},
    -		{"IMAGE_FILE_MACHINE_MIPSFPU16", Const, 0},
    -		{"IMAGE_FILE_MACHINE_POWERPC", Const, 0},
    -		{"IMAGE_FILE_MACHINE_POWERPCFP", Const, 0},
    -		{"IMAGE_FILE_MACHINE_R4000", Const, 0},
    -		{"IMAGE_FILE_MACHINE_RISCV128", Const, 20},
    -		{"IMAGE_FILE_MACHINE_RISCV32", Const, 20},
    -		{"IMAGE_FILE_MACHINE_RISCV64", Const, 20},
    -		{"IMAGE_FILE_MACHINE_SH3", Const, 0},
    -		{"IMAGE_FILE_MACHINE_SH3DSP", Const, 0},
    -		{"IMAGE_FILE_MACHINE_SH4", Const, 0},
    -		{"IMAGE_FILE_MACHINE_SH5", Const, 0},
    -		{"IMAGE_FILE_MACHINE_THUMB", Const, 0},
    -		{"IMAGE_FILE_MACHINE_UNKNOWN", Const, 0},
    -		{"IMAGE_FILE_MACHINE_WCEMIPSV2", Const, 0},
    -		{"IMAGE_FILE_NET_RUN_FROM_SWAP", Const, 15},
    -		{"IMAGE_FILE_RELOCS_STRIPPED", Const, 15},
    -		{"IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP", Const, 15},
    -		{"IMAGE_FILE_SYSTEM", Const, 15},
    -		{"IMAGE_FILE_UP_SYSTEM_ONLY", Const, 15},
    -		{"IMAGE_SCN_CNT_CODE", Const, 19},
    -		{"IMAGE_SCN_CNT_INITIALIZED_DATA", Const, 19},
    -		{"IMAGE_SCN_CNT_UNINITIALIZED_DATA", Const, 19},
    -		{"IMAGE_SCN_LNK_COMDAT", Const, 19},
    -		{"IMAGE_SCN_MEM_DISCARDABLE", Const, 19},
    -		{"IMAGE_SCN_MEM_EXECUTE", Const, 19},
    -		{"IMAGE_SCN_MEM_READ", Const, 19},
    -		{"IMAGE_SCN_MEM_WRITE", Const, 19},
    -		{"IMAGE_SUBSYSTEM_EFI_APPLICATION", Const, 15},
    -		{"IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER", Const, 15},
    -		{"IMAGE_SUBSYSTEM_EFI_ROM", Const, 15},
    -		{"IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER", Const, 15},
    -		{"IMAGE_SUBSYSTEM_NATIVE", Const, 15},
    -		{"IMAGE_SUBSYSTEM_NATIVE_WINDOWS", Const, 15},
    -		{"IMAGE_SUBSYSTEM_OS2_CUI", Const, 15},
    -		{"IMAGE_SUBSYSTEM_POSIX_CUI", Const, 15},
    -		{"IMAGE_SUBSYSTEM_UNKNOWN", Const, 15},
    -		{"IMAGE_SUBSYSTEM_WINDOWS_BOOT_APPLICATION", Const, 15},
    -		{"IMAGE_SUBSYSTEM_WINDOWS_CE_GUI", Const, 15},
    -		{"IMAGE_SUBSYSTEM_WINDOWS_CUI", Const, 15},
    -		{"IMAGE_SUBSYSTEM_WINDOWS_GUI", Const, 15},
    -		{"IMAGE_SUBSYSTEM_XBOX", Const, 15},
    -		{"ImportDirectory", Type, 0},
    -		{"ImportDirectory.FirstThunk", Field, 0},
    -		{"ImportDirectory.ForwarderChain", Field, 0},
    -		{"ImportDirectory.Name", Field, 0},
    -		{"ImportDirectory.OriginalFirstThunk", Field, 0},
    -		{"ImportDirectory.TimeDateStamp", Field, 0},
    -		{"NewFile", Func, 0},
    -		{"Open", Func, 0},
    -		{"OptionalHeader32", Type, 3},
    -		{"OptionalHeader32.AddressOfEntryPoint", Field, 3},
    -		{"OptionalHeader32.BaseOfCode", Field, 3},
    -		{"OptionalHeader32.BaseOfData", Field, 3},
    -		{"OptionalHeader32.CheckSum", Field, 3},
    -		{"OptionalHeader32.DataDirectory", Field, 3},
    -		{"OptionalHeader32.DllCharacteristics", Field, 3},
    -		{"OptionalHeader32.FileAlignment", Field, 3},
    -		{"OptionalHeader32.ImageBase", Field, 3},
    -		{"OptionalHeader32.LoaderFlags", Field, 3},
    -		{"OptionalHeader32.Magic", Field, 3},
    -		{"OptionalHeader32.MajorImageVersion", Field, 3},
    -		{"OptionalHeader32.MajorLinkerVersion", Field, 3},
    -		{"OptionalHeader32.MajorOperatingSystemVersion", Field, 3},
    -		{"OptionalHeader32.MajorSubsystemVersion", Field, 3},
    -		{"OptionalHeader32.MinorImageVersion", Field, 3},
    -		{"OptionalHeader32.MinorLinkerVersion", Field, 3},
    -		{"OptionalHeader32.MinorOperatingSystemVersion", Field, 3},
    -		{"OptionalHeader32.MinorSubsystemVersion", Field, 3},
    -		{"OptionalHeader32.NumberOfRvaAndSizes", Field, 3},
    -		{"OptionalHeader32.SectionAlignment", Field, 3},
    -		{"OptionalHeader32.SizeOfCode", Field, 3},
    -		{"OptionalHeader32.SizeOfHeaders", Field, 3},
    -		{"OptionalHeader32.SizeOfHeapCommit", Field, 3},
    -		{"OptionalHeader32.SizeOfHeapReserve", Field, 3},
    -		{"OptionalHeader32.SizeOfImage", Field, 3},
    -		{"OptionalHeader32.SizeOfInitializedData", Field, 3},
    -		{"OptionalHeader32.SizeOfStackCommit", Field, 3},
    -		{"OptionalHeader32.SizeOfStackReserve", Field, 3},
    -		{"OptionalHeader32.SizeOfUninitializedData", Field, 3},
    -		{"OptionalHeader32.Subsystem", Field, 3},
    -		{"OptionalHeader32.Win32VersionValue", Field, 3},
    -		{"OptionalHeader64", Type, 3},
    -		{"OptionalHeader64.AddressOfEntryPoint", Field, 3},
    -		{"OptionalHeader64.BaseOfCode", Field, 3},
    -		{"OptionalHeader64.CheckSum", Field, 3},
    -		{"OptionalHeader64.DataDirectory", Field, 3},
    -		{"OptionalHeader64.DllCharacteristics", Field, 3},
    -		{"OptionalHeader64.FileAlignment", Field, 3},
    -		{"OptionalHeader64.ImageBase", Field, 3},
    -		{"OptionalHeader64.LoaderFlags", Field, 3},
    -		{"OptionalHeader64.Magic", Field, 3},
    -		{"OptionalHeader64.MajorImageVersion", Field, 3},
    -		{"OptionalHeader64.MajorLinkerVersion", Field, 3},
    -		{"OptionalHeader64.MajorOperatingSystemVersion", Field, 3},
    -		{"OptionalHeader64.MajorSubsystemVersion", Field, 3},
    -		{"OptionalHeader64.MinorImageVersion", Field, 3},
    -		{"OptionalHeader64.MinorLinkerVersion", Field, 3},
    -		{"OptionalHeader64.MinorOperatingSystemVersion", Field, 3},
    -		{"OptionalHeader64.MinorSubsystemVersion", Field, 3},
    -		{"OptionalHeader64.NumberOfRvaAndSizes", Field, 3},
    -		{"OptionalHeader64.SectionAlignment", Field, 3},
    -		{"OptionalHeader64.SizeOfCode", Field, 3},
    -		{"OptionalHeader64.SizeOfHeaders", Field, 3},
    -		{"OptionalHeader64.SizeOfHeapCommit", Field, 3},
    -		{"OptionalHeader64.SizeOfHeapReserve", Field, 3},
    -		{"OptionalHeader64.SizeOfImage", Field, 3},
    -		{"OptionalHeader64.SizeOfInitializedData", Field, 3},
    -		{"OptionalHeader64.SizeOfStackCommit", Field, 3},
    -		{"OptionalHeader64.SizeOfStackReserve", Field, 3},
    -		{"OptionalHeader64.SizeOfUninitializedData", Field, 3},
    -		{"OptionalHeader64.Subsystem", Field, 3},
    -		{"OptionalHeader64.Win32VersionValue", Field, 3},
    -		{"Reloc", Type, 8},
    -		{"Reloc.SymbolTableIndex", Field, 8},
    -		{"Reloc.Type", Field, 8},
    -		{"Reloc.VirtualAddress", Field, 8},
    -		{"Section", Type, 0},
    -		{"Section.ReaderAt", Field, 0},
    -		{"Section.Relocs", Field, 8},
    -		{"Section.SectionHeader", Field, 0},
    -		{"SectionHeader", Type, 0},
    -		{"SectionHeader.Characteristics", Field, 0},
    -		{"SectionHeader.Name", Field, 0},
    -		{"SectionHeader.NumberOfLineNumbers", Field, 0},
    -		{"SectionHeader.NumberOfRelocations", Field, 0},
    -		{"SectionHeader.Offset", Field, 0},
    -		{"SectionHeader.PointerToLineNumbers", Field, 0},
    -		{"SectionHeader.PointerToRelocations", Field, 0},
    -		{"SectionHeader.Size", Field, 0},
    -		{"SectionHeader.VirtualAddress", Field, 0},
    -		{"SectionHeader.VirtualSize", Field, 0},
    -		{"SectionHeader32", Type, 0},
    -		{"SectionHeader32.Characteristics", Field, 0},
    -		{"SectionHeader32.Name", Field, 0},
    -		{"SectionHeader32.NumberOfLineNumbers", Field, 0},
    -		{"SectionHeader32.NumberOfRelocations", Field, 0},
    -		{"SectionHeader32.PointerToLineNumbers", Field, 0},
    -		{"SectionHeader32.PointerToRawData", Field, 0},
    -		{"SectionHeader32.PointerToRelocations", Field, 0},
    -		{"SectionHeader32.SizeOfRawData", Field, 0},
    -		{"SectionHeader32.VirtualAddress", Field, 0},
    -		{"SectionHeader32.VirtualSize", Field, 0},
    -		{"StringTable", Type, 8},
    -		{"Symbol", Type, 1},
    -		{"Symbol.Name", Field, 1},
    -		{"Symbol.SectionNumber", Field, 1},
    -		{"Symbol.StorageClass", Field, 1},
    -		{"Symbol.Type", Field, 1},
    -		{"Symbol.Value", Field, 1},
    +		{"(*COFFSymbol).FullName", Method, 8, ""},
    +		{"(*File).COFFSymbolReadSectionDefAux", Method, 19, ""},
    +		{"(*File).Close", Method, 0, ""},
    +		{"(*File).DWARF", Method, 0, ""},
    +		{"(*File).ImportedLibraries", Method, 0, ""},
    +		{"(*File).ImportedSymbols", Method, 0, ""},
    +		{"(*File).Section", Method, 0, ""},
    +		{"(*FormatError).Error", Method, 0, ""},
    +		{"(*Section).Data", Method, 0, ""},
    +		{"(*Section).Open", Method, 0, ""},
    +		{"(Section).ReadAt", Method, 0, ""},
    +		{"(StringTable).String", Method, 8, ""},
    +		{"COFFSymbol", Type, 1, ""},
    +		{"COFFSymbol.Name", Field, 1, ""},
    +		{"COFFSymbol.NumberOfAuxSymbols", Field, 1, ""},
    +		{"COFFSymbol.SectionNumber", Field, 1, ""},
    +		{"COFFSymbol.StorageClass", Field, 1, ""},
    +		{"COFFSymbol.Type", Field, 1, ""},
    +		{"COFFSymbol.Value", Field, 1, ""},
    +		{"COFFSymbolAuxFormat5", Type, 19, ""},
    +		{"COFFSymbolAuxFormat5.Checksum", Field, 19, ""},
    +		{"COFFSymbolAuxFormat5.NumLineNumbers", Field, 19, ""},
    +		{"COFFSymbolAuxFormat5.NumRelocs", Field, 19, ""},
    +		{"COFFSymbolAuxFormat5.SecNum", Field, 19, ""},
    +		{"COFFSymbolAuxFormat5.Selection", Field, 19, ""},
    +		{"COFFSymbolAuxFormat5.Size", Field, 19, ""},
    +		{"COFFSymbolSize", Const, 1, ""},
    +		{"DataDirectory", Type, 3, ""},
    +		{"DataDirectory.Size", Field, 3, ""},
    +		{"DataDirectory.VirtualAddress", Field, 3, ""},
    +		{"File", Type, 0, ""},
    +		{"File.COFFSymbols", Field, 8, ""},
    +		{"File.FileHeader", Field, 0, ""},
    +		{"File.OptionalHeader", Field, 3, ""},
    +		{"File.Sections", Field, 0, ""},
    +		{"File.StringTable", Field, 8, ""},
    +		{"File.Symbols", Field, 1, ""},
    +		{"FileHeader", Type, 0, ""},
    +		{"FileHeader.Characteristics", Field, 0, ""},
    +		{"FileHeader.Machine", Field, 0, ""},
    +		{"FileHeader.NumberOfSections", Field, 0, ""},
    +		{"FileHeader.NumberOfSymbols", Field, 0, ""},
    +		{"FileHeader.PointerToSymbolTable", Field, 0, ""},
    +		{"FileHeader.SizeOfOptionalHeader", Field, 0, ""},
    +		{"FileHeader.TimeDateStamp", Field, 0, ""},
    +		{"FormatError", Type, 0, ""},
    +		{"IMAGE_COMDAT_SELECT_ANY", Const, 19, ""},
    +		{"IMAGE_COMDAT_SELECT_ASSOCIATIVE", Const, 19, ""},
    +		{"IMAGE_COMDAT_SELECT_EXACT_MATCH", Const, 19, ""},
    +		{"IMAGE_COMDAT_SELECT_LARGEST", Const, 19, ""},
    +		{"IMAGE_COMDAT_SELECT_NODUPLICATES", Const, 19, ""},
    +		{"IMAGE_COMDAT_SELECT_SAME_SIZE", Const, 19, ""},
    +		{"IMAGE_DIRECTORY_ENTRY_ARCHITECTURE", Const, 11, ""},
    +		{"IMAGE_DIRECTORY_ENTRY_BASERELOC", Const, 11, ""},
    +		{"IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT", Const, 11, ""},
    +		{"IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR", Const, 11, ""},
    +		{"IMAGE_DIRECTORY_ENTRY_DEBUG", Const, 11, ""},
    +		{"IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT", Const, 11, ""},
    +		{"IMAGE_DIRECTORY_ENTRY_EXCEPTION", Const, 11, ""},
    +		{"IMAGE_DIRECTORY_ENTRY_EXPORT", Const, 11, ""},
    +		{"IMAGE_DIRECTORY_ENTRY_GLOBALPTR", Const, 11, ""},
    +		{"IMAGE_DIRECTORY_ENTRY_IAT", Const, 11, ""},
    +		{"IMAGE_DIRECTORY_ENTRY_IMPORT", Const, 11, ""},
    +		{"IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG", Const, 11, ""},
    +		{"IMAGE_DIRECTORY_ENTRY_RESOURCE", Const, 11, ""},
    +		{"IMAGE_DIRECTORY_ENTRY_SECURITY", Const, 11, ""},
    +		{"IMAGE_DIRECTORY_ENTRY_TLS", Const, 11, ""},
    +		{"IMAGE_DLLCHARACTERISTICS_APPCONTAINER", Const, 15, ""},
    +		{"IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE", Const, 15, ""},
    +		{"IMAGE_DLLCHARACTERISTICS_FORCE_INTEGRITY", Const, 15, ""},
    +		{"IMAGE_DLLCHARACTERISTICS_GUARD_CF", Const, 15, ""},
    +		{"IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA", Const, 15, ""},
    +		{"IMAGE_DLLCHARACTERISTICS_NO_BIND", Const, 15, ""},
    +		{"IMAGE_DLLCHARACTERISTICS_NO_ISOLATION", Const, 15, ""},
    +		{"IMAGE_DLLCHARACTERISTICS_NO_SEH", Const, 15, ""},
    +		{"IMAGE_DLLCHARACTERISTICS_NX_COMPAT", Const, 15, ""},
    +		{"IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE", Const, 15, ""},
    +		{"IMAGE_DLLCHARACTERISTICS_WDM_DRIVER", Const, 15, ""},
    +		{"IMAGE_FILE_32BIT_MACHINE", Const, 15, ""},
    +		{"IMAGE_FILE_AGGRESIVE_WS_TRIM", Const, 15, ""},
    +		{"IMAGE_FILE_BYTES_REVERSED_HI", Const, 15, ""},
    +		{"IMAGE_FILE_BYTES_REVERSED_LO", Const, 15, ""},
    +		{"IMAGE_FILE_DEBUG_STRIPPED", Const, 15, ""},
    +		{"IMAGE_FILE_DLL", Const, 15, ""},
    +		{"IMAGE_FILE_EXECUTABLE_IMAGE", Const, 15, ""},
    +		{"IMAGE_FILE_LARGE_ADDRESS_AWARE", Const, 15, ""},
    +		{"IMAGE_FILE_LINE_NUMS_STRIPPED", Const, 15, ""},
    +		{"IMAGE_FILE_LOCAL_SYMS_STRIPPED", Const, 15, ""},
    +		{"IMAGE_FILE_MACHINE_AM33", Const, 0, ""},
    +		{"IMAGE_FILE_MACHINE_AMD64", Const, 0, ""},
    +		{"IMAGE_FILE_MACHINE_ARM", Const, 0, ""},
    +		{"IMAGE_FILE_MACHINE_ARM64", Const, 11, ""},
    +		{"IMAGE_FILE_MACHINE_ARMNT", Const, 12, ""},
    +		{"IMAGE_FILE_MACHINE_EBC", Const, 0, ""},
    +		{"IMAGE_FILE_MACHINE_I386", Const, 0, ""},
    +		{"IMAGE_FILE_MACHINE_IA64", Const, 0, ""},
    +		{"IMAGE_FILE_MACHINE_LOONGARCH32", Const, 19, ""},
    +		{"IMAGE_FILE_MACHINE_LOONGARCH64", Const, 19, ""},
    +		{"IMAGE_FILE_MACHINE_M32R", Const, 0, ""},
    +		{"IMAGE_FILE_MACHINE_MIPS16", Const, 0, ""},
    +		{"IMAGE_FILE_MACHINE_MIPSFPU", Const, 0, ""},
    +		{"IMAGE_FILE_MACHINE_MIPSFPU16", Const, 0, ""},
    +		{"IMAGE_FILE_MACHINE_POWERPC", Const, 0, ""},
    +		{"IMAGE_FILE_MACHINE_POWERPCFP", Const, 0, ""},
    +		{"IMAGE_FILE_MACHINE_R4000", Const, 0, ""},
    +		{"IMAGE_FILE_MACHINE_RISCV128", Const, 20, ""},
    +		{"IMAGE_FILE_MACHINE_RISCV32", Const, 20, ""},
    +		{"IMAGE_FILE_MACHINE_RISCV64", Const, 20, ""},
    +		{"IMAGE_FILE_MACHINE_SH3", Const, 0, ""},
    +		{"IMAGE_FILE_MACHINE_SH3DSP", Const, 0, ""},
    +		{"IMAGE_FILE_MACHINE_SH4", Const, 0, ""},
    +		{"IMAGE_FILE_MACHINE_SH5", Const, 0, ""},
    +		{"IMAGE_FILE_MACHINE_THUMB", Const, 0, ""},
    +		{"IMAGE_FILE_MACHINE_UNKNOWN", Const, 0, ""},
    +		{"IMAGE_FILE_MACHINE_WCEMIPSV2", Const, 0, ""},
    +		{"IMAGE_FILE_NET_RUN_FROM_SWAP", Const, 15, ""},
    +		{"IMAGE_FILE_RELOCS_STRIPPED", Const, 15, ""},
    +		{"IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP", Const, 15, ""},
    +		{"IMAGE_FILE_SYSTEM", Const, 15, ""},
    +		{"IMAGE_FILE_UP_SYSTEM_ONLY", Const, 15, ""},
    +		{"IMAGE_SCN_CNT_CODE", Const, 19, ""},
    +		{"IMAGE_SCN_CNT_INITIALIZED_DATA", Const, 19, ""},
    +		{"IMAGE_SCN_CNT_UNINITIALIZED_DATA", Const, 19, ""},
    +		{"IMAGE_SCN_LNK_COMDAT", Const, 19, ""},
    +		{"IMAGE_SCN_MEM_DISCARDABLE", Const, 19, ""},
    +		{"IMAGE_SCN_MEM_EXECUTE", Const, 19, ""},
    +		{"IMAGE_SCN_MEM_READ", Const, 19, ""},
    +		{"IMAGE_SCN_MEM_WRITE", Const, 19, ""},
    +		{"IMAGE_SUBSYSTEM_EFI_APPLICATION", Const, 15, ""},
    +		{"IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER", Const, 15, ""},
    +		{"IMAGE_SUBSYSTEM_EFI_ROM", Const, 15, ""},
    +		{"IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER", Const, 15, ""},
    +		{"IMAGE_SUBSYSTEM_NATIVE", Const, 15, ""},
    +		{"IMAGE_SUBSYSTEM_NATIVE_WINDOWS", Const, 15, ""},
    +		{"IMAGE_SUBSYSTEM_OS2_CUI", Const, 15, ""},
    +		{"IMAGE_SUBSYSTEM_POSIX_CUI", Const, 15, ""},
    +		{"IMAGE_SUBSYSTEM_UNKNOWN", Const, 15, ""},
    +		{"IMAGE_SUBSYSTEM_WINDOWS_BOOT_APPLICATION", Const, 15, ""},
    +		{"IMAGE_SUBSYSTEM_WINDOWS_CE_GUI", Const, 15, ""},
    +		{"IMAGE_SUBSYSTEM_WINDOWS_CUI", Const, 15, ""},
    +		{"IMAGE_SUBSYSTEM_WINDOWS_GUI", Const, 15, ""},
    +		{"IMAGE_SUBSYSTEM_XBOX", Const, 15, ""},
    +		{"ImportDirectory", Type, 0, ""},
    +		{"ImportDirectory.FirstThunk", Field, 0, ""},
    +		{"ImportDirectory.ForwarderChain", Field, 0, ""},
    +		{"ImportDirectory.Name", Field, 0, ""},
    +		{"ImportDirectory.OriginalFirstThunk", Field, 0, ""},
    +		{"ImportDirectory.TimeDateStamp", Field, 0, ""},
    +		{"NewFile", Func, 0, "func(r io.ReaderAt) (*File, error)"},
    +		{"Open", Func, 0, "func(name string) (*File, error)"},
    +		{"OptionalHeader32", Type, 3, ""},
    +		{"OptionalHeader32.AddressOfEntryPoint", Field, 3, ""},
    +		{"OptionalHeader32.BaseOfCode", Field, 3, ""},
    +		{"OptionalHeader32.BaseOfData", Field, 3, ""},
    +		{"OptionalHeader32.CheckSum", Field, 3, ""},
    +		{"OptionalHeader32.DataDirectory", Field, 3, ""},
    +		{"OptionalHeader32.DllCharacteristics", Field, 3, ""},
    +		{"OptionalHeader32.FileAlignment", Field, 3, ""},
    +		{"OptionalHeader32.ImageBase", Field, 3, ""},
    +		{"OptionalHeader32.LoaderFlags", Field, 3, ""},
    +		{"OptionalHeader32.Magic", Field, 3, ""},
    +		{"OptionalHeader32.MajorImageVersion", Field, 3, ""},
    +		{"OptionalHeader32.MajorLinkerVersion", Field, 3, ""},
    +		{"OptionalHeader32.MajorOperatingSystemVersion", Field, 3, ""},
    +		{"OptionalHeader32.MajorSubsystemVersion", Field, 3, ""},
    +		{"OptionalHeader32.MinorImageVersion", Field, 3, ""},
    +		{"OptionalHeader32.MinorLinkerVersion", Field, 3, ""},
    +		{"OptionalHeader32.MinorOperatingSystemVersion", Field, 3, ""},
    +		{"OptionalHeader32.MinorSubsystemVersion", Field, 3, ""},
    +		{"OptionalHeader32.NumberOfRvaAndSizes", Field, 3, ""},
    +		{"OptionalHeader32.SectionAlignment", Field, 3, ""},
    +		{"OptionalHeader32.SizeOfCode", Field, 3, ""},
    +		{"OptionalHeader32.SizeOfHeaders", Field, 3, ""},
    +		{"OptionalHeader32.SizeOfHeapCommit", Field, 3, ""},
    +		{"OptionalHeader32.SizeOfHeapReserve", Field, 3, ""},
    +		{"OptionalHeader32.SizeOfImage", Field, 3, ""},
    +		{"OptionalHeader32.SizeOfInitializedData", Field, 3, ""},
    +		{"OptionalHeader32.SizeOfStackCommit", Field, 3, ""},
    +		{"OptionalHeader32.SizeOfStackReserve", Field, 3, ""},
    +		{"OptionalHeader32.SizeOfUninitializedData", Field, 3, ""},
    +		{"OptionalHeader32.Subsystem", Field, 3, ""},
    +		{"OptionalHeader32.Win32VersionValue", Field, 3, ""},
    +		{"OptionalHeader64", Type, 3, ""},
    +		{"OptionalHeader64.AddressOfEntryPoint", Field, 3, ""},
    +		{"OptionalHeader64.BaseOfCode", Field, 3, ""},
    +		{"OptionalHeader64.CheckSum", Field, 3, ""},
    +		{"OptionalHeader64.DataDirectory", Field, 3, ""},
    +		{"OptionalHeader64.DllCharacteristics", Field, 3, ""},
    +		{"OptionalHeader64.FileAlignment", Field, 3, ""},
    +		{"OptionalHeader64.ImageBase", Field, 3, ""},
    +		{"OptionalHeader64.LoaderFlags", Field, 3, ""},
    +		{"OptionalHeader64.Magic", Field, 3, ""},
    +		{"OptionalHeader64.MajorImageVersion", Field, 3, ""},
    +		{"OptionalHeader64.MajorLinkerVersion", Field, 3, ""},
    +		{"OptionalHeader64.MajorOperatingSystemVersion", Field, 3, ""},
    +		{"OptionalHeader64.MajorSubsystemVersion", Field, 3, ""},
    +		{"OptionalHeader64.MinorImageVersion", Field, 3, ""},
    +		{"OptionalHeader64.MinorLinkerVersion", Field, 3, ""},
    +		{"OptionalHeader64.MinorOperatingSystemVersion", Field, 3, ""},
    +		{"OptionalHeader64.MinorSubsystemVersion", Field, 3, ""},
    +		{"OptionalHeader64.NumberOfRvaAndSizes", Field, 3, ""},
    +		{"OptionalHeader64.SectionAlignment", Field, 3, ""},
    +		{"OptionalHeader64.SizeOfCode", Field, 3, ""},
    +		{"OptionalHeader64.SizeOfHeaders", Field, 3, ""},
    +		{"OptionalHeader64.SizeOfHeapCommit", Field, 3, ""},
    +		{"OptionalHeader64.SizeOfHeapReserve", Field, 3, ""},
    +		{"OptionalHeader64.SizeOfImage", Field, 3, ""},
    +		{"OptionalHeader64.SizeOfInitializedData", Field, 3, ""},
    +		{"OptionalHeader64.SizeOfStackCommit", Field, 3, ""},
    +		{"OptionalHeader64.SizeOfStackReserve", Field, 3, ""},
    +		{"OptionalHeader64.SizeOfUninitializedData", Field, 3, ""},
    +		{"OptionalHeader64.Subsystem", Field, 3, ""},
    +		{"OptionalHeader64.Win32VersionValue", Field, 3, ""},
    +		{"Reloc", Type, 8, ""},
    +		{"Reloc.SymbolTableIndex", Field, 8, ""},
    +		{"Reloc.Type", Field, 8, ""},
    +		{"Reloc.VirtualAddress", Field, 8, ""},
    +		{"Section", Type, 0, ""},
    +		{"Section.ReaderAt", Field, 0, ""},
    +		{"Section.Relocs", Field, 8, ""},
    +		{"Section.SectionHeader", Field, 0, ""},
    +		{"SectionHeader", Type, 0, ""},
    +		{"SectionHeader.Characteristics", Field, 0, ""},
    +		{"SectionHeader.Name", Field, 0, ""},
    +		{"SectionHeader.NumberOfLineNumbers", Field, 0, ""},
    +		{"SectionHeader.NumberOfRelocations", Field, 0, ""},
    +		{"SectionHeader.Offset", Field, 0, ""},
    +		{"SectionHeader.PointerToLineNumbers", Field, 0, ""},
    +		{"SectionHeader.PointerToRelocations", Field, 0, ""},
    +		{"SectionHeader.Size", Field, 0, ""},
    +		{"SectionHeader.VirtualAddress", Field, 0, ""},
    +		{"SectionHeader.VirtualSize", Field, 0, ""},
    +		{"SectionHeader32", Type, 0, ""},
    +		{"SectionHeader32.Characteristics", Field, 0, ""},
    +		{"SectionHeader32.Name", Field, 0, ""},
    +		{"SectionHeader32.NumberOfLineNumbers", Field, 0, ""},
    +		{"SectionHeader32.NumberOfRelocations", Field, 0, ""},
    +		{"SectionHeader32.PointerToLineNumbers", Field, 0, ""},
    +		{"SectionHeader32.PointerToRawData", Field, 0, ""},
    +		{"SectionHeader32.PointerToRelocations", Field, 0, ""},
    +		{"SectionHeader32.SizeOfRawData", Field, 0, ""},
    +		{"SectionHeader32.VirtualAddress", Field, 0, ""},
    +		{"SectionHeader32.VirtualSize", Field, 0, ""},
    +		{"StringTable", Type, 8, ""},
    +		{"Symbol", Type, 1, ""},
    +		{"Symbol.Name", Field, 1, ""},
    +		{"Symbol.SectionNumber", Field, 1, ""},
    +		{"Symbol.StorageClass", Field, 1, ""},
    +		{"Symbol.Type", Field, 1, ""},
    +		{"Symbol.Value", Field, 1, ""},
     	},
     	"debug/plan9obj": {
    -		{"(*File).Close", Method, 3},
    -		{"(*File).Section", Method, 3},
    -		{"(*File).Symbols", Method, 3},
    -		{"(*Section).Data", Method, 3},
    -		{"(*Section).Open", Method, 3},
    -		{"(Section).ReadAt", Method, 3},
    -		{"ErrNoSymbols", Var, 18},
    -		{"File", Type, 3},
    -		{"File.FileHeader", Field, 3},
    -		{"File.Sections", Field, 3},
    -		{"FileHeader", Type, 3},
    -		{"FileHeader.Bss", Field, 3},
    -		{"FileHeader.Entry", Field, 3},
    -		{"FileHeader.HdrSize", Field, 4},
    -		{"FileHeader.LoadAddress", Field, 4},
    -		{"FileHeader.Magic", Field, 3},
    -		{"FileHeader.PtrSize", Field, 3},
    -		{"Magic386", Const, 3},
    -		{"Magic64", Const, 3},
    -		{"MagicAMD64", Const, 3},
    -		{"MagicARM", Const, 3},
    -		{"NewFile", Func, 3},
    -		{"Open", Func, 3},
    -		{"Section", Type, 3},
    -		{"Section.ReaderAt", Field, 3},
    -		{"Section.SectionHeader", Field, 3},
    -		{"SectionHeader", Type, 3},
    -		{"SectionHeader.Name", Field, 3},
    -		{"SectionHeader.Offset", Field, 3},
    -		{"SectionHeader.Size", Field, 3},
    -		{"Sym", Type, 3},
    -		{"Sym.Name", Field, 3},
    -		{"Sym.Type", Field, 3},
    -		{"Sym.Value", Field, 3},
    +		{"(*File).Close", Method, 3, ""},
    +		{"(*File).Section", Method, 3, ""},
    +		{"(*File).Symbols", Method, 3, ""},
    +		{"(*Section).Data", Method, 3, ""},
    +		{"(*Section).Open", Method, 3, ""},
    +		{"(Section).ReadAt", Method, 3, ""},
    +		{"ErrNoSymbols", Var, 18, ""},
    +		{"File", Type, 3, ""},
    +		{"File.FileHeader", Field, 3, ""},
    +		{"File.Sections", Field, 3, ""},
    +		{"FileHeader", Type, 3, ""},
    +		{"FileHeader.Bss", Field, 3, ""},
    +		{"FileHeader.Entry", Field, 3, ""},
    +		{"FileHeader.HdrSize", Field, 4, ""},
    +		{"FileHeader.LoadAddress", Field, 4, ""},
    +		{"FileHeader.Magic", Field, 3, ""},
    +		{"FileHeader.PtrSize", Field, 3, ""},
    +		{"Magic386", Const, 3, ""},
    +		{"Magic64", Const, 3, ""},
    +		{"MagicAMD64", Const, 3, ""},
    +		{"MagicARM", Const, 3, ""},
    +		{"NewFile", Func, 3, "func(r io.ReaderAt) (*File, error)"},
    +		{"Open", Func, 3, "func(name string) (*File, error)"},
    +		{"Section", Type, 3, ""},
    +		{"Section.ReaderAt", Field, 3, ""},
    +		{"Section.SectionHeader", Field, 3, ""},
    +		{"SectionHeader", Type, 3, ""},
    +		{"SectionHeader.Name", Field, 3, ""},
    +		{"SectionHeader.Offset", Field, 3, ""},
    +		{"SectionHeader.Size", Field, 3, ""},
    +		{"Sym", Type, 3, ""},
    +		{"Sym.Name", Field, 3, ""},
    +		{"Sym.Type", Field, 3, ""},
    +		{"Sym.Value", Field, 3, ""},
     	},
     	"embed": {
    -		{"(FS).Open", Method, 16},
    -		{"(FS).ReadDir", Method, 16},
    -		{"(FS).ReadFile", Method, 16},
    -		{"FS", Type, 16},
    +		{"(FS).Open", Method, 16, ""},
    +		{"(FS).ReadDir", Method, 16, ""},
    +		{"(FS).ReadFile", Method, 16, ""},
    +		{"FS", Type, 16, ""},
     	},
     	"encoding": {
    -		{"BinaryMarshaler", Type, 2},
    -		{"BinaryUnmarshaler", Type, 2},
    -		{"TextMarshaler", Type, 2},
    -		{"TextUnmarshaler", Type, 2},
    +		{"BinaryAppender", Type, 24, ""},
    +		{"BinaryMarshaler", Type, 2, ""},
    +		{"BinaryUnmarshaler", Type, 2, ""},
    +		{"TextAppender", Type, 24, ""},
    +		{"TextMarshaler", Type, 2, ""},
    +		{"TextUnmarshaler", Type, 2, ""},
     	},
     	"encoding/ascii85": {
    -		{"(CorruptInputError).Error", Method, 0},
    -		{"CorruptInputError", Type, 0},
    -		{"Decode", Func, 0},
    -		{"Encode", Func, 0},
    -		{"MaxEncodedLen", Func, 0},
    -		{"NewDecoder", Func, 0},
    -		{"NewEncoder", Func, 0},
    +		{"(CorruptInputError).Error", Method, 0, ""},
    +		{"CorruptInputError", Type, 0, ""},
    +		{"Decode", Func, 0, "func(dst []byte, src []byte, flush bool) (ndst int, nsrc int, err error)"},
    +		{"Encode", Func, 0, "func(dst []byte, src []byte) int"},
    +		{"MaxEncodedLen", Func, 0, "func(n int) int"},
    +		{"NewDecoder", Func, 0, "func(r io.Reader) io.Reader"},
    +		{"NewEncoder", Func, 0, "func(w io.Writer) io.WriteCloser"},
     	},
     	"encoding/asn1": {
    -		{"(BitString).At", Method, 0},
    -		{"(BitString).RightAlign", Method, 0},
    -		{"(ObjectIdentifier).Equal", Method, 0},
    -		{"(ObjectIdentifier).String", Method, 3},
    -		{"(StructuralError).Error", Method, 0},
    -		{"(SyntaxError).Error", Method, 0},
    -		{"BitString", Type, 0},
    -		{"BitString.BitLength", Field, 0},
    -		{"BitString.Bytes", Field, 0},
    -		{"ClassApplication", Const, 6},
    -		{"ClassContextSpecific", Const, 6},
    -		{"ClassPrivate", Const, 6},
    -		{"ClassUniversal", Const, 6},
    -		{"Enumerated", Type, 0},
    -		{"Flag", Type, 0},
    -		{"Marshal", Func, 0},
    -		{"MarshalWithParams", Func, 10},
    -		{"NullBytes", Var, 9},
    -		{"NullRawValue", Var, 9},
    -		{"ObjectIdentifier", Type, 0},
    -		{"RawContent", Type, 0},
    -		{"RawValue", Type, 0},
    -		{"RawValue.Bytes", Field, 0},
    -		{"RawValue.Class", Field, 0},
    -		{"RawValue.FullBytes", Field, 0},
    -		{"RawValue.IsCompound", Field, 0},
    -		{"RawValue.Tag", Field, 0},
    -		{"StructuralError", Type, 0},
    -		{"StructuralError.Msg", Field, 0},
    -		{"SyntaxError", Type, 0},
    -		{"SyntaxError.Msg", Field, 0},
    -		{"TagBMPString", Const, 14},
    -		{"TagBitString", Const, 6},
    -		{"TagBoolean", Const, 6},
    -		{"TagEnum", Const, 6},
    -		{"TagGeneralString", Const, 6},
    -		{"TagGeneralizedTime", Const, 6},
    -		{"TagIA5String", Const, 6},
    -		{"TagInteger", Const, 6},
    -		{"TagNull", Const, 9},
    -		{"TagNumericString", Const, 10},
    -		{"TagOID", Const, 6},
    -		{"TagOctetString", Const, 6},
    -		{"TagPrintableString", Const, 6},
    -		{"TagSequence", Const, 6},
    -		{"TagSet", Const, 6},
    -		{"TagT61String", Const, 6},
    -		{"TagUTCTime", Const, 6},
    -		{"TagUTF8String", Const, 6},
    -		{"Unmarshal", Func, 0},
    -		{"UnmarshalWithParams", Func, 0},
    +		{"(BitString).At", Method, 0, ""},
    +		{"(BitString).RightAlign", Method, 0, ""},
    +		{"(ObjectIdentifier).Equal", Method, 0, ""},
    +		{"(ObjectIdentifier).String", Method, 3, ""},
    +		{"(StructuralError).Error", Method, 0, ""},
    +		{"(SyntaxError).Error", Method, 0, ""},
    +		{"BitString", Type, 0, ""},
    +		{"BitString.BitLength", Field, 0, ""},
    +		{"BitString.Bytes", Field, 0, ""},
    +		{"ClassApplication", Const, 6, ""},
    +		{"ClassContextSpecific", Const, 6, ""},
    +		{"ClassPrivate", Const, 6, ""},
    +		{"ClassUniversal", Const, 6, ""},
    +		{"Enumerated", Type, 0, ""},
    +		{"Flag", Type, 0, ""},
    +		{"Marshal", Func, 0, "func(val any) ([]byte, error)"},
    +		{"MarshalWithParams", Func, 10, "func(val any, params string) ([]byte, error)"},
    +		{"NullBytes", Var, 9, ""},
    +		{"NullRawValue", Var, 9, ""},
    +		{"ObjectIdentifier", Type, 0, ""},
    +		{"RawContent", Type, 0, ""},
    +		{"RawValue", Type, 0, ""},
    +		{"RawValue.Bytes", Field, 0, ""},
    +		{"RawValue.Class", Field, 0, ""},
    +		{"RawValue.FullBytes", Field, 0, ""},
    +		{"RawValue.IsCompound", Field, 0, ""},
    +		{"RawValue.Tag", Field, 0, ""},
    +		{"StructuralError", Type, 0, ""},
    +		{"StructuralError.Msg", Field, 0, ""},
    +		{"SyntaxError", Type, 0, ""},
    +		{"SyntaxError.Msg", Field, 0, ""},
    +		{"TagBMPString", Const, 14, ""},
    +		{"TagBitString", Const, 6, ""},
    +		{"TagBoolean", Const, 6, ""},
    +		{"TagEnum", Const, 6, ""},
    +		{"TagGeneralString", Const, 6, ""},
    +		{"TagGeneralizedTime", Const, 6, ""},
    +		{"TagIA5String", Const, 6, ""},
    +		{"TagInteger", Const, 6, ""},
    +		{"TagNull", Const, 9, ""},
    +		{"TagNumericString", Const, 10, ""},
    +		{"TagOID", Const, 6, ""},
    +		{"TagOctetString", Const, 6, ""},
    +		{"TagPrintableString", Const, 6, ""},
    +		{"TagSequence", Const, 6, ""},
    +		{"TagSet", Const, 6, ""},
    +		{"TagT61String", Const, 6, ""},
    +		{"TagUTCTime", Const, 6, ""},
    +		{"TagUTF8String", Const, 6, ""},
    +		{"Unmarshal", Func, 0, "func(b []byte, val any) (rest []byte, err error)"},
    +		{"UnmarshalWithParams", Func, 0, "func(b []byte, val any, params string) (rest []byte, err error)"},
     	},
     	"encoding/base32": {
    -		{"(*Encoding).AppendDecode", Method, 22},
    -		{"(*Encoding).AppendEncode", Method, 22},
    -		{"(*Encoding).Decode", Method, 0},
    -		{"(*Encoding).DecodeString", Method, 0},
    -		{"(*Encoding).DecodedLen", Method, 0},
    -		{"(*Encoding).Encode", Method, 0},
    -		{"(*Encoding).EncodeToString", Method, 0},
    -		{"(*Encoding).EncodedLen", Method, 0},
    -		{"(CorruptInputError).Error", Method, 0},
    -		{"(Encoding).WithPadding", Method, 9},
    -		{"CorruptInputError", Type, 0},
    -		{"Encoding", Type, 0},
    -		{"HexEncoding", Var, 0},
    -		{"NewDecoder", Func, 0},
    -		{"NewEncoder", Func, 0},
    -		{"NewEncoding", Func, 0},
    -		{"NoPadding", Const, 9},
    -		{"StdEncoding", Var, 0},
    -		{"StdPadding", Const, 9},
    +		{"(*Encoding).AppendDecode", Method, 22, ""},
    +		{"(*Encoding).AppendEncode", Method, 22, ""},
    +		{"(*Encoding).Decode", Method, 0, ""},
    +		{"(*Encoding).DecodeString", Method, 0, ""},
    +		{"(*Encoding).DecodedLen", Method, 0, ""},
    +		{"(*Encoding).Encode", Method, 0, ""},
    +		{"(*Encoding).EncodeToString", Method, 0, ""},
    +		{"(*Encoding).EncodedLen", Method, 0, ""},
    +		{"(CorruptInputError).Error", Method, 0, ""},
    +		{"(Encoding).WithPadding", Method, 9, ""},
    +		{"CorruptInputError", Type, 0, ""},
    +		{"Encoding", Type, 0, ""},
    +		{"HexEncoding", Var, 0, ""},
    +		{"NewDecoder", Func, 0, "func(enc *Encoding, r io.Reader) io.Reader"},
    +		{"NewEncoder", Func, 0, "func(enc *Encoding, w io.Writer) io.WriteCloser"},
    +		{"NewEncoding", Func, 0, "func(encoder string) *Encoding"},
    +		{"NoPadding", Const, 9, ""},
    +		{"StdEncoding", Var, 0, ""},
    +		{"StdPadding", Const, 9, ""},
     	},
     	"encoding/base64": {
    -		{"(*Encoding).AppendDecode", Method, 22},
    -		{"(*Encoding).AppendEncode", Method, 22},
    -		{"(*Encoding).Decode", Method, 0},
    -		{"(*Encoding).DecodeString", Method, 0},
    -		{"(*Encoding).DecodedLen", Method, 0},
    -		{"(*Encoding).Encode", Method, 0},
    -		{"(*Encoding).EncodeToString", Method, 0},
    -		{"(*Encoding).EncodedLen", Method, 0},
    -		{"(CorruptInputError).Error", Method, 0},
    -		{"(Encoding).Strict", Method, 8},
    -		{"(Encoding).WithPadding", Method, 5},
    -		{"CorruptInputError", Type, 0},
    -		{"Encoding", Type, 0},
    -		{"NewDecoder", Func, 0},
    -		{"NewEncoder", Func, 0},
    -		{"NewEncoding", Func, 0},
    -		{"NoPadding", Const, 5},
    -		{"RawStdEncoding", Var, 5},
    -		{"RawURLEncoding", Var, 5},
    -		{"StdEncoding", Var, 0},
    -		{"StdPadding", Const, 5},
    -		{"URLEncoding", Var, 0},
    +		{"(*Encoding).AppendDecode", Method, 22, ""},
    +		{"(*Encoding).AppendEncode", Method, 22, ""},
    +		{"(*Encoding).Decode", Method, 0, ""},
    +		{"(*Encoding).DecodeString", Method, 0, ""},
    +		{"(*Encoding).DecodedLen", Method, 0, ""},
    +		{"(*Encoding).Encode", Method, 0, ""},
    +		{"(*Encoding).EncodeToString", Method, 0, ""},
    +		{"(*Encoding).EncodedLen", Method, 0, ""},
    +		{"(CorruptInputError).Error", Method, 0, ""},
    +		{"(Encoding).Strict", Method, 8, ""},
    +		{"(Encoding).WithPadding", Method, 5, ""},
    +		{"CorruptInputError", Type, 0, ""},
    +		{"Encoding", Type, 0, ""},
    +		{"NewDecoder", Func, 0, "func(enc *Encoding, r io.Reader) io.Reader"},
    +		{"NewEncoder", Func, 0, "func(enc *Encoding, w io.Writer) io.WriteCloser"},
    +		{"NewEncoding", Func, 0, "func(encoder string) *Encoding"},
    +		{"NoPadding", Const, 5, ""},
    +		{"RawStdEncoding", Var, 5, ""},
    +		{"RawURLEncoding", Var, 5, ""},
    +		{"StdEncoding", Var, 0, ""},
    +		{"StdPadding", Const, 5, ""},
    +		{"URLEncoding", Var, 0, ""},
     	},
     	"encoding/binary": {
    -		{"Append", Func, 23},
    -		{"AppendByteOrder", Type, 19},
    -		{"AppendUvarint", Func, 19},
    -		{"AppendVarint", Func, 19},
    -		{"BigEndian", Var, 0},
    -		{"ByteOrder", Type, 0},
    -		{"Decode", Func, 23},
    -		{"Encode", Func, 23},
    -		{"LittleEndian", Var, 0},
    -		{"MaxVarintLen16", Const, 0},
    -		{"MaxVarintLen32", Const, 0},
    -		{"MaxVarintLen64", Const, 0},
    -		{"NativeEndian", Var, 21},
    -		{"PutUvarint", Func, 0},
    -		{"PutVarint", Func, 0},
    -		{"Read", Func, 0},
    -		{"ReadUvarint", Func, 0},
    -		{"ReadVarint", Func, 0},
    -		{"Size", Func, 0},
    -		{"Uvarint", Func, 0},
    -		{"Varint", Func, 0},
    -		{"Write", Func, 0},
    +		{"Append", Func, 23, "func(buf []byte, order ByteOrder, data any) ([]byte, error)"},
    +		{"AppendByteOrder", Type, 19, ""},
    +		{"AppendUvarint", Func, 19, "func(buf []byte, x uint64) []byte"},
    +		{"AppendVarint", Func, 19, "func(buf []byte, x int64) []byte"},
    +		{"BigEndian", Var, 0, ""},
    +		{"ByteOrder", Type, 0, ""},
    +		{"Decode", Func, 23, "func(buf []byte, order ByteOrder, data any) (int, error)"},
    +		{"Encode", Func, 23, "func(buf []byte, order ByteOrder, data any) (int, error)"},
    +		{"LittleEndian", Var, 0, ""},
    +		{"MaxVarintLen16", Const, 0, ""},
    +		{"MaxVarintLen32", Const, 0, ""},
    +		{"MaxVarintLen64", Const, 0, ""},
    +		{"NativeEndian", Var, 21, ""},
    +		{"PutUvarint", Func, 0, "func(buf []byte, x uint64) int"},
    +		{"PutVarint", Func, 0, "func(buf []byte, x int64) int"},
    +		{"Read", Func, 0, "func(r io.Reader, order ByteOrder, data any) error"},
    +		{"ReadUvarint", Func, 0, "func(r io.ByteReader) (uint64, error)"},
    +		{"ReadVarint", Func, 0, "func(r io.ByteReader) (int64, error)"},
    +		{"Size", Func, 0, "func(v any) int"},
    +		{"Uvarint", Func, 0, "func(buf []byte) (uint64, int)"},
    +		{"Varint", Func, 0, "func(buf []byte) (int64, int)"},
    +		{"Write", Func, 0, "func(w io.Writer, order ByteOrder, data any) error"},
     	},
     	"encoding/csv": {
    -		{"(*ParseError).Error", Method, 0},
    -		{"(*ParseError).Unwrap", Method, 13},
    -		{"(*Reader).FieldPos", Method, 17},
    -		{"(*Reader).InputOffset", Method, 19},
    -		{"(*Reader).Read", Method, 0},
    -		{"(*Reader).ReadAll", Method, 0},
    -		{"(*Writer).Error", Method, 1},
    -		{"(*Writer).Flush", Method, 0},
    -		{"(*Writer).Write", Method, 0},
    -		{"(*Writer).WriteAll", Method, 0},
    -		{"ErrBareQuote", Var, 0},
    -		{"ErrFieldCount", Var, 0},
    -		{"ErrQuote", Var, 0},
    -		{"ErrTrailingComma", Var, 0},
    -		{"NewReader", Func, 0},
    -		{"NewWriter", Func, 0},
    -		{"ParseError", Type, 0},
    -		{"ParseError.Column", Field, 0},
    -		{"ParseError.Err", Field, 0},
    -		{"ParseError.Line", Field, 0},
    -		{"ParseError.StartLine", Field, 10},
    -		{"Reader", Type, 0},
    -		{"Reader.Comma", Field, 0},
    -		{"Reader.Comment", Field, 0},
    -		{"Reader.FieldsPerRecord", Field, 0},
    -		{"Reader.LazyQuotes", Field, 0},
    -		{"Reader.ReuseRecord", Field, 9},
    -		{"Reader.TrailingComma", Field, 0},
    -		{"Reader.TrimLeadingSpace", Field, 0},
    -		{"Writer", Type, 0},
    -		{"Writer.Comma", Field, 0},
    -		{"Writer.UseCRLF", Field, 0},
    +		{"(*ParseError).Error", Method, 0, ""},
    +		{"(*ParseError).Unwrap", Method, 13, ""},
    +		{"(*Reader).FieldPos", Method, 17, ""},
    +		{"(*Reader).InputOffset", Method, 19, ""},
    +		{"(*Reader).Read", Method, 0, ""},
    +		{"(*Reader).ReadAll", Method, 0, ""},
    +		{"(*Writer).Error", Method, 1, ""},
    +		{"(*Writer).Flush", Method, 0, ""},
    +		{"(*Writer).Write", Method, 0, ""},
    +		{"(*Writer).WriteAll", Method, 0, ""},
    +		{"ErrBareQuote", Var, 0, ""},
    +		{"ErrFieldCount", Var, 0, ""},
    +		{"ErrQuote", Var, 0, ""},
    +		{"ErrTrailingComma", Var, 0, ""},
    +		{"NewReader", Func, 0, "func(r io.Reader) *Reader"},
    +		{"NewWriter", Func, 0, "func(w io.Writer) *Writer"},
    +		{"ParseError", Type, 0, ""},
    +		{"ParseError.Column", Field, 0, ""},
    +		{"ParseError.Err", Field, 0, ""},
    +		{"ParseError.Line", Field, 0, ""},
    +		{"ParseError.StartLine", Field, 10, ""},
    +		{"Reader", Type, 0, ""},
    +		{"Reader.Comma", Field, 0, ""},
    +		{"Reader.Comment", Field, 0, ""},
    +		{"Reader.FieldsPerRecord", Field, 0, ""},
    +		{"Reader.LazyQuotes", Field, 0, ""},
    +		{"Reader.ReuseRecord", Field, 9, ""},
    +		{"Reader.TrailingComma", Field, 0, ""},
    +		{"Reader.TrimLeadingSpace", Field, 0, ""},
    +		{"Writer", Type, 0, ""},
    +		{"Writer.Comma", Field, 0, ""},
    +		{"Writer.UseCRLF", Field, 0, ""},
     	},
     	"encoding/gob": {
    -		{"(*Decoder).Decode", Method, 0},
    -		{"(*Decoder).DecodeValue", Method, 0},
    -		{"(*Encoder).Encode", Method, 0},
    -		{"(*Encoder).EncodeValue", Method, 0},
    -		{"CommonType", Type, 0},
    -		{"CommonType.Id", Field, 0},
    -		{"CommonType.Name", Field, 0},
    -		{"Decoder", Type, 0},
    -		{"Encoder", Type, 0},
    -		{"GobDecoder", Type, 0},
    -		{"GobEncoder", Type, 0},
    -		{"NewDecoder", Func, 0},
    -		{"NewEncoder", Func, 0},
    -		{"Register", Func, 0},
    -		{"RegisterName", Func, 0},
    +		{"(*Decoder).Decode", Method, 0, ""},
    +		{"(*Decoder).DecodeValue", Method, 0, ""},
    +		{"(*Encoder).Encode", Method, 0, ""},
    +		{"(*Encoder).EncodeValue", Method, 0, ""},
    +		{"CommonType", Type, 0, ""},
    +		{"CommonType.Id", Field, 0, ""},
    +		{"CommonType.Name", Field, 0, ""},
    +		{"Decoder", Type, 0, ""},
    +		{"Encoder", Type, 0, ""},
    +		{"GobDecoder", Type, 0, ""},
    +		{"GobEncoder", Type, 0, ""},
    +		{"NewDecoder", Func, 0, "func(r io.Reader) *Decoder"},
    +		{"NewEncoder", Func, 0, "func(w io.Writer) *Encoder"},
    +		{"Register", Func, 0, "func(value any)"},
    +		{"RegisterName", Func, 0, "func(name string, value any)"},
     	},
     	"encoding/hex": {
    -		{"(InvalidByteError).Error", Method, 0},
    -		{"AppendDecode", Func, 22},
    -		{"AppendEncode", Func, 22},
    -		{"Decode", Func, 0},
    -		{"DecodeString", Func, 0},
    -		{"DecodedLen", Func, 0},
    -		{"Dump", Func, 0},
    -		{"Dumper", Func, 0},
    -		{"Encode", Func, 0},
    -		{"EncodeToString", Func, 0},
    -		{"EncodedLen", Func, 0},
    -		{"ErrLength", Var, 0},
    -		{"InvalidByteError", Type, 0},
    -		{"NewDecoder", Func, 10},
    -		{"NewEncoder", Func, 10},
    +		{"(InvalidByteError).Error", Method, 0, ""},
    +		{"AppendDecode", Func, 22, "func(dst []byte, src []byte) ([]byte, error)"},
    +		{"AppendEncode", Func, 22, "func(dst []byte, src []byte) []byte"},
    +		{"Decode", Func, 0, "func(dst []byte, src []byte) (int, error)"},
    +		{"DecodeString", Func, 0, "func(s string) ([]byte, error)"},
    +		{"DecodedLen", Func, 0, "func(x int) int"},
    +		{"Dump", Func, 0, "func(data []byte) string"},
    +		{"Dumper", Func, 0, "func(w io.Writer) io.WriteCloser"},
    +		{"Encode", Func, 0, "func(dst []byte, src []byte) int"},
    +		{"EncodeToString", Func, 0, "func(src []byte) string"},
    +		{"EncodedLen", Func, 0, "func(n int) int"},
    +		{"ErrLength", Var, 0, ""},
    +		{"InvalidByteError", Type, 0, ""},
    +		{"NewDecoder", Func, 10, "func(r io.Reader) io.Reader"},
    +		{"NewEncoder", Func, 10, "func(w io.Writer) io.Writer"},
     	},
     	"encoding/json": {
    -		{"(*Decoder).Buffered", Method, 1},
    -		{"(*Decoder).Decode", Method, 0},
    -		{"(*Decoder).DisallowUnknownFields", Method, 10},
    -		{"(*Decoder).InputOffset", Method, 14},
    -		{"(*Decoder).More", Method, 5},
    -		{"(*Decoder).Token", Method, 5},
    -		{"(*Decoder).UseNumber", Method, 1},
    -		{"(*Encoder).Encode", Method, 0},
    -		{"(*Encoder).SetEscapeHTML", Method, 7},
    -		{"(*Encoder).SetIndent", Method, 7},
    -		{"(*InvalidUTF8Error).Error", Method, 0},
    -		{"(*InvalidUnmarshalError).Error", Method, 0},
    -		{"(*MarshalerError).Error", Method, 0},
    -		{"(*MarshalerError).Unwrap", Method, 13},
    -		{"(*RawMessage).MarshalJSON", Method, 0},
    -		{"(*RawMessage).UnmarshalJSON", Method, 0},
    -		{"(*SyntaxError).Error", Method, 0},
    -		{"(*UnmarshalFieldError).Error", Method, 0},
    -		{"(*UnmarshalTypeError).Error", Method, 0},
    -		{"(*UnsupportedTypeError).Error", Method, 0},
    -		{"(*UnsupportedValueError).Error", Method, 0},
    -		{"(Delim).String", Method, 5},
    -		{"(Number).Float64", Method, 1},
    -		{"(Number).Int64", Method, 1},
    -		{"(Number).String", Method, 1},
    -		{"(RawMessage).MarshalJSON", Method, 8},
    -		{"Compact", Func, 0},
    -		{"Decoder", Type, 0},
    -		{"Delim", Type, 5},
    -		{"Encoder", Type, 0},
    -		{"HTMLEscape", Func, 0},
    -		{"Indent", Func, 0},
    -		{"InvalidUTF8Error", Type, 0},
    -		{"InvalidUTF8Error.S", Field, 0},
    -		{"InvalidUnmarshalError", Type, 0},
    -		{"InvalidUnmarshalError.Type", Field, 0},
    -		{"Marshal", Func, 0},
    -		{"MarshalIndent", Func, 0},
    -		{"Marshaler", Type, 0},
    -		{"MarshalerError", Type, 0},
    -		{"MarshalerError.Err", Field, 0},
    -		{"MarshalerError.Type", Field, 0},
    -		{"NewDecoder", Func, 0},
    -		{"NewEncoder", Func, 0},
    -		{"Number", Type, 1},
    -		{"RawMessage", Type, 0},
    -		{"SyntaxError", Type, 0},
    -		{"SyntaxError.Offset", Field, 0},
    -		{"Token", Type, 5},
    -		{"Unmarshal", Func, 0},
    -		{"UnmarshalFieldError", Type, 0},
    -		{"UnmarshalFieldError.Field", Field, 0},
    -		{"UnmarshalFieldError.Key", Field, 0},
    -		{"UnmarshalFieldError.Type", Field, 0},
    -		{"UnmarshalTypeError", Type, 0},
    -		{"UnmarshalTypeError.Field", Field, 8},
    -		{"UnmarshalTypeError.Offset", Field, 5},
    -		{"UnmarshalTypeError.Struct", Field, 8},
    -		{"UnmarshalTypeError.Type", Field, 0},
    -		{"UnmarshalTypeError.Value", Field, 0},
    -		{"Unmarshaler", Type, 0},
    -		{"UnsupportedTypeError", Type, 0},
    -		{"UnsupportedTypeError.Type", Field, 0},
    -		{"UnsupportedValueError", Type, 0},
    -		{"UnsupportedValueError.Str", Field, 0},
    -		{"UnsupportedValueError.Value", Field, 0},
    -		{"Valid", Func, 9},
    +		{"(*Decoder).Buffered", Method, 1, ""},
    +		{"(*Decoder).Decode", Method, 0, ""},
    +		{"(*Decoder).DisallowUnknownFields", Method, 10, ""},
    +		{"(*Decoder).InputOffset", Method, 14, ""},
    +		{"(*Decoder).More", Method, 5, ""},
    +		{"(*Decoder).Token", Method, 5, ""},
    +		{"(*Decoder).UseNumber", Method, 1, ""},
    +		{"(*Encoder).Encode", Method, 0, ""},
    +		{"(*Encoder).SetEscapeHTML", Method, 7, ""},
    +		{"(*Encoder).SetIndent", Method, 7, ""},
    +		{"(*InvalidUTF8Error).Error", Method, 0, ""},
    +		{"(*InvalidUnmarshalError).Error", Method, 0, ""},
    +		{"(*MarshalerError).Error", Method, 0, ""},
    +		{"(*MarshalerError).Unwrap", Method, 13, ""},
    +		{"(*RawMessage).MarshalJSON", Method, 0, ""},
    +		{"(*RawMessage).UnmarshalJSON", Method, 0, ""},
    +		{"(*SyntaxError).Error", Method, 0, ""},
    +		{"(*UnmarshalFieldError).Error", Method, 0, ""},
    +		{"(*UnmarshalTypeError).Error", Method, 0, ""},
    +		{"(*UnsupportedTypeError).Error", Method, 0, ""},
    +		{"(*UnsupportedValueError).Error", Method, 0, ""},
    +		{"(Delim).String", Method, 5, ""},
    +		{"(Number).Float64", Method, 1, ""},
    +		{"(Number).Int64", Method, 1, ""},
    +		{"(Number).String", Method, 1, ""},
    +		{"(RawMessage).MarshalJSON", Method, 8, ""},
    +		{"Compact", Func, 0, "func(dst *bytes.Buffer, src []byte) error"},
    +		{"Decoder", Type, 0, ""},
    +		{"Delim", Type, 5, ""},
    +		{"Encoder", Type, 0, ""},
    +		{"HTMLEscape", Func, 0, "func(dst *bytes.Buffer, src []byte)"},
    +		{"Indent", Func, 0, "func(dst *bytes.Buffer, src []byte, prefix string, indent string) error"},
    +		{"InvalidUTF8Error", Type, 0, ""},
    +		{"InvalidUTF8Error.S", Field, 0, ""},
    +		{"InvalidUnmarshalError", Type, 0, ""},
    +		{"InvalidUnmarshalError.Type", Field, 0, ""},
    +		{"Marshal", Func, 0, "func(v any) ([]byte, error)"},
    +		{"MarshalIndent", Func, 0, "func(v any, prefix string, indent string) ([]byte, error)"},
    +		{"Marshaler", Type, 0, ""},
    +		{"MarshalerError", Type, 0, ""},
    +		{"MarshalerError.Err", Field, 0, ""},
    +		{"MarshalerError.Type", Field, 0, ""},
    +		{"NewDecoder", Func, 0, "func(r io.Reader) *Decoder"},
    +		{"NewEncoder", Func, 0, "func(w io.Writer) *Encoder"},
    +		{"Number", Type, 1, ""},
    +		{"RawMessage", Type, 0, ""},
    +		{"SyntaxError", Type, 0, ""},
    +		{"SyntaxError.Offset", Field, 0, ""},
    +		{"Token", Type, 5, ""},
    +		{"Unmarshal", Func, 0, "func(data []byte, v any) error"},
    +		{"UnmarshalFieldError", Type, 0, ""},
    +		{"UnmarshalFieldError.Field", Field, 0, ""},
    +		{"UnmarshalFieldError.Key", Field, 0, ""},
    +		{"UnmarshalFieldError.Type", Field, 0, ""},
    +		{"UnmarshalTypeError", Type, 0, ""},
    +		{"UnmarshalTypeError.Field", Field, 8, ""},
    +		{"UnmarshalTypeError.Offset", Field, 5, ""},
    +		{"UnmarshalTypeError.Struct", Field, 8, ""},
    +		{"UnmarshalTypeError.Type", Field, 0, ""},
    +		{"UnmarshalTypeError.Value", Field, 0, ""},
    +		{"Unmarshaler", Type, 0, ""},
    +		{"UnsupportedTypeError", Type, 0, ""},
    +		{"UnsupportedTypeError.Type", Field, 0, ""},
    +		{"UnsupportedValueError", Type, 0, ""},
    +		{"UnsupportedValueError.Str", Field, 0, ""},
    +		{"UnsupportedValueError.Value", Field, 0, ""},
    +		{"Valid", Func, 9, "func(data []byte) bool"},
     	},
     	"encoding/pem": {
    -		{"Block", Type, 0},
    -		{"Block.Bytes", Field, 0},
    -		{"Block.Headers", Field, 0},
    -		{"Block.Type", Field, 0},
    -		{"Decode", Func, 0},
    -		{"Encode", Func, 0},
    -		{"EncodeToMemory", Func, 0},
    +		{"Block", Type, 0, ""},
    +		{"Block.Bytes", Field, 0, ""},
    +		{"Block.Headers", Field, 0, ""},
    +		{"Block.Type", Field, 0, ""},
    +		{"Decode", Func, 0, "func(data []byte) (p *Block, rest []byte)"},
    +		{"Encode", Func, 0, "func(out io.Writer, b *Block) error"},
    +		{"EncodeToMemory", Func, 0, "func(b *Block) []byte"},
     	},
     	"encoding/xml": {
    -		{"(*Decoder).Decode", Method, 0},
    -		{"(*Decoder).DecodeElement", Method, 0},
    -		{"(*Decoder).InputOffset", Method, 4},
    -		{"(*Decoder).InputPos", Method, 19},
    -		{"(*Decoder).RawToken", Method, 0},
    -		{"(*Decoder).Skip", Method, 0},
    -		{"(*Decoder).Token", Method, 0},
    -		{"(*Encoder).Close", Method, 20},
    -		{"(*Encoder).Encode", Method, 0},
    -		{"(*Encoder).EncodeElement", Method, 2},
    -		{"(*Encoder).EncodeToken", Method, 2},
    -		{"(*Encoder).Flush", Method, 2},
    -		{"(*Encoder).Indent", Method, 1},
    -		{"(*SyntaxError).Error", Method, 0},
    -		{"(*TagPathError).Error", Method, 0},
    -		{"(*UnsupportedTypeError).Error", Method, 0},
    -		{"(CharData).Copy", Method, 0},
    -		{"(Comment).Copy", Method, 0},
    -		{"(Directive).Copy", Method, 0},
    -		{"(ProcInst).Copy", Method, 0},
    -		{"(StartElement).Copy", Method, 0},
    -		{"(StartElement).End", Method, 2},
    -		{"(UnmarshalError).Error", Method, 0},
    -		{"Attr", Type, 0},
    -		{"Attr.Name", Field, 0},
    -		{"Attr.Value", Field, 0},
    -		{"CharData", Type, 0},
    -		{"Comment", Type, 0},
    -		{"CopyToken", Func, 0},
    -		{"Decoder", Type, 0},
    -		{"Decoder.AutoClose", Field, 0},
    -		{"Decoder.CharsetReader", Field, 0},
    -		{"Decoder.DefaultSpace", Field, 1},
    -		{"Decoder.Entity", Field, 0},
    -		{"Decoder.Strict", Field, 0},
    -		{"Directive", Type, 0},
    -		{"Encoder", Type, 0},
    -		{"EndElement", Type, 0},
    -		{"EndElement.Name", Field, 0},
    -		{"Escape", Func, 0},
    -		{"EscapeText", Func, 1},
    -		{"HTMLAutoClose", Var, 0},
    -		{"HTMLEntity", Var, 0},
    -		{"Header", Const, 0},
    -		{"Marshal", Func, 0},
    -		{"MarshalIndent", Func, 0},
    -		{"Marshaler", Type, 2},
    -		{"MarshalerAttr", Type, 2},
    -		{"Name", Type, 0},
    -		{"Name.Local", Field, 0},
    -		{"Name.Space", Field, 0},
    -		{"NewDecoder", Func, 0},
    -		{"NewEncoder", Func, 0},
    -		{"NewTokenDecoder", Func, 10},
    -		{"ProcInst", Type, 0},
    -		{"ProcInst.Inst", Field, 0},
    -		{"ProcInst.Target", Field, 0},
    -		{"StartElement", Type, 0},
    -		{"StartElement.Attr", Field, 0},
    -		{"StartElement.Name", Field, 0},
    -		{"SyntaxError", Type, 0},
    -		{"SyntaxError.Line", Field, 0},
    -		{"SyntaxError.Msg", Field, 0},
    -		{"TagPathError", Type, 0},
    -		{"TagPathError.Field1", Field, 0},
    -		{"TagPathError.Field2", Field, 0},
    -		{"TagPathError.Struct", Field, 0},
    -		{"TagPathError.Tag1", Field, 0},
    -		{"TagPathError.Tag2", Field, 0},
    -		{"Token", Type, 0},
    -		{"TokenReader", Type, 10},
    -		{"Unmarshal", Func, 0},
    -		{"UnmarshalError", Type, 0},
    -		{"Unmarshaler", Type, 2},
    -		{"UnmarshalerAttr", Type, 2},
    -		{"UnsupportedTypeError", Type, 0},
    -		{"UnsupportedTypeError.Type", Field, 0},
    +		{"(*Decoder).Decode", Method, 0, ""},
    +		{"(*Decoder).DecodeElement", Method, 0, ""},
    +		{"(*Decoder).InputOffset", Method, 4, ""},
    +		{"(*Decoder).InputPos", Method, 19, ""},
    +		{"(*Decoder).RawToken", Method, 0, ""},
    +		{"(*Decoder).Skip", Method, 0, ""},
    +		{"(*Decoder).Token", Method, 0, ""},
    +		{"(*Encoder).Close", Method, 20, ""},
    +		{"(*Encoder).Encode", Method, 0, ""},
    +		{"(*Encoder).EncodeElement", Method, 2, ""},
    +		{"(*Encoder).EncodeToken", Method, 2, ""},
    +		{"(*Encoder).Flush", Method, 2, ""},
    +		{"(*Encoder).Indent", Method, 1, ""},
    +		{"(*SyntaxError).Error", Method, 0, ""},
    +		{"(*TagPathError).Error", Method, 0, ""},
    +		{"(*UnsupportedTypeError).Error", Method, 0, ""},
    +		{"(CharData).Copy", Method, 0, ""},
    +		{"(Comment).Copy", Method, 0, ""},
    +		{"(Directive).Copy", Method, 0, ""},
    +		{"(ProcInst).Copy", Method, 0, ""},
    +		{"(StartElement).Copy", Method, 0, ""},
    +		{"(StartElement).End", Method, 2, ""},
    +		{"(UnmarshalError).Error", Method, 0, ""},
    +		{"Attr", Type, 0, ""},
    +		{"Attr.Name", Field, 0, ""},
    +		{"Attr.Value", Field, 0, ""},
    +		{"CharData", Type, 0, ""},
    +		{"Comment", Type, 0, ""},
    +		{"CopyToken", Func, 0, "func(t Token) Token"},
    +		{"Decoder", Type, 0, ""},
    +		{"Decoder.AutoClose", Field, 0, ""},
    +		{"Decoder.CharsetReader", Field, 0, ""},
    +		{"Decoder.DefaultSpace", Field, 1, ""},
    +		{"Decoder.Entity", Field, 0, ""},
    +		{"Decoder.Strict", Field, 0, ""},
    +		{"Directive", Type, 0, ""},
    +		{"Encoder", Type, 0, ""},
    +		{"EndElement", Type, 0, ""},
    +		{"EndElement.Name", Field, 0, ""},
    +		{"Escape", Func, 0, "func(w io.Writer, s []byte)"},
    +		{"EscapeText", Func, 1, "func(w io.Writer, s []byte) error"},
    +		{"HTMLAutoClose", Var, 0, ""},
    +		{"HTMLEntity", Var, 0, ""},
    +		{"Header", Const, 0, ""},
    +		{"Marshal", Func, 0, "func(v any) ([]byte, error)"},
    +		{"MarshalIndent", Func, 0, "func(v any, prefix string, indent string) ([]byte, error)"},
    +		{"Marshaler", Type, 2, ""},
    +		{"MarshalerAttr", Type, 2, ""},
    +		{"Name", Type, 0, ""},
    +		{"Name.Local", Field, 0, ""},
    +		{"Name.Space", Field, 0, ""},
    +		{"NewDecoder", Func, 0, "func(r io.Reader) *Decoder"},
    +		{"NewEncoder", Func, 0, "func(w io.Writer) *Encoder"},
    +		{"NewTokenDecoder", Func, 10, "func(t TokenReader) *Decoder"},
    +		{"ProcInst", Type, 0, ""},
    +		{"ProcInst.Inst", Field, 0, ""},
    +		{"ProcInst.Target", Field, 0, ""},
    +		{"StartElement", Type, 0, ""},
    +		{"StartElement.Attr", Field, 0, ""},
    +		{"StartElement.Name", Field, 0, ""},
    +		{"SyntaxError", Type, 0, ""},
    +		{"SyntaxError.Line", Field, 0, ""},
    +		{"SyntaxError.Msg", Field, 0, ""},
    +		{"TagPathError", Type, 0, ""},
    +		{"TagPathError.Field1", Field, 0, ""},
    +		{"TagPathError.Field2", Field, 0, ""},
    +		{"TagPathError.Struct", Field, 0, ""},
    +		{"TagPathError.Tag1", Field, 0, ""},
    +		{"TagPathError.Tag2", Field, 0, ""},
    +		{"Token", Type, 0, ""},
    +		{"TokenReader", Type, 10, ""},
    +		{"Unmarshal", Func, 0, "func(data []byte, v any) error"},
    +		{"UnmarshalError", Type, 0, ""},
    +		{"Unmarshaler", Type, 2, ""},
    +		{"UnmarshalerAttr", Type, 2, ""},
    +		{"UnsupportedTypeError", Type, 0, ""},
    +		{"UnsupportedTypeError.Type", Field, 0, ""},
     	},
     	"errors": {
    -		{"As", Func, 13},
    -		{"ErrUnsupported", Var, 21},
    -		{"Is", Func, 13},
    -		{"Join", Func, 20},
    -		{"New", Func, 0},
    -		{"Unwrap", Func, 13},
    +		{"As", Func, 13, "func(err error, target any) bool"},
    +		{"ErrUnsupported", Var, 21, ""},
    +		{"Is", Func, 13, "func(err error, target error) bool"},
    +		{"Join", Func, 20, "func(errs ...error) error"},
    +		{"New", Func, 0, "func(text string) error"},
    +		{"Unwrap", Func, 13, "func(err error) error"},
     	},
     	"expvar": {
    -		{"(*Float).Add", Method, 0},
    -		{"(*Float).Set", Method, 0},
    -		{"(*Float).String", Method, 0},
    -		{"(*Float).Value", Method, 8},
    -		{"(*Int).Add", Method, 0},
    -		{"(*Int).Set", Method, 0},
    -		{"(*Int).String", Method, 0},
    -		{"(*Int).Value", Method, 8},
    -		{"(*Map).Add", Method, 0},
    -		{"(*Map).AddFloat", Method, 0},
    -		{"(*Map).Delete", Method, 12},
    -		{"(*Map).Do", Method, 0},
    -		{"(*Map).Get", Method, 0},
    -		{"(*Map).Init", Method, 0},
    -		{"(*Map).Set", Method, 0},
    -		{"(*Map).String", Method, 0},
    -		{"(*String).Set", Method, 0},
    -		{"(*String).String", Method, 0},
    -		{"(*String).Value", Method, 8},
    -		{"(Func).String", Method, 0},
    -		{"(Func).Value", Method, 8},
    -		{"Do", Func, 0},
    -		{"Float", Type, 0},
    -		{"Func", Type, 0},
    -		{"Get", Func, 0},
    -		{"Handler", Func, 8},
    -		{"Int", Type, 0},
    -		{"KeyValue", Type, 0},
    -		{"KeyValue.Key", Field, 0},
    -		{"KeyValue.Value", Field, 0},
    -		{"Map", Type, 0},
    -		{"NewFloat", Func, 0},
    -		{"NewInt", Func, 0},
    -		{"NewMap", Func, 0},
    -		{"NewString", Func, 0},
    -		{"Publish", Func, 0},
    -		{"String", Type, 0},
    -		{"Var", Type, 0},
    +		{"(*Float).Add", Method, 0, ""},
    +		{"(*Float).Set", Method, 0, ""},
    +		{"(*Float).String", Method, 0, ""},
    +		{"(*Float).Value", Method, 8, ""},
    +		{"(*Int).Add", Method, 0, ""},
    +		{"(*Int).Set", Method, 0, ""},
    +		{"(*Int).String", Method, 0, ""},
    +		{"(*Int).Value", Method, 8, ""},
    +		{"(*Map).Add", Method, 0, ""},
    +		{"(*Map).AddFloat", Method, 0, ""},
    +		{"(*Map).Delete", Method, 12, ""},
    +		{"(*Map).Do", Method, 0, ""},
    +		{"(*Map).Get", Method, 0, ""},
    +		{"(*Map).Init", Method, 0, ""},
    +		{"(*Map).Set", Method, 0, ""},
    +		{"(*Map).String", Method, 0, ""},
    +		{"(*String).Set", Method, 0, ""},
    +		{"(*String).String", Method, 0, ""},
    +		{"(*String).Value", Method, 8, ""},
    +		{"(Func).String", Method, 0, ""},
    +		{"(Func).Value", Method, 8, ""},
    +		{"Do", Func, 0, "func(f func(KeyValue))"},
    +		{"Float", Type, 0, ""},
    +		{"Func", Type, 0, ""},
    +		{"Get", Func, 0, "func(name string) Var"},
    +		{"Handler", Func, 8, "func() http.Handler"},
    +		{"Int", Type, 0, ""},
    +		{"KeyValue", Type, 0, ""},
    +		{"KeyValue.Key", Field, 0, ""},
    +		{"KeyValue.Value", Field, 0, ""},
    +		{"Map", Type, 0, ""},
    +		{"NewFloat", Func, 0, "func(name string) *Float"},
    +		{"NewInt", Func, 0, "func(name string) *Int"},
    +		{"NewMap", Func, 0, "func(name string) *Map"},
    +		{"NewString", Func, 0, "func(name string) *String"},
    +		{"Publish", Func, 0, "func(name string, v Var)"},
    +		{"String", Type, 0, ""},
    +		{"Var", Type, 0, ""},
     	},
     	"flag": {
    -		{"(*FlagSet).Arg", Method, 0},
    -		{"(*FlagSet).Args", Method, 0},
    -		{"(*FlagSet).Bool", Method, 0},
    -		{"(*FlagSet).BoolFunc", Method, 21},
    -		{"(*FlagSet).BoolVar", Method, 0},
    -		{"(*FlagSet).Duration", Method, 0},
    -		{"(*FlagSet).DurationVar", Method, 0},
    -		{"(*FlagSet).ErrorHandling", Method, 10},
    -		{"(*FlagSet).Float64", Method, 0},
    -		{"(*FlagSet).Float64Var", Method, 0},
    -		{"(*FlagSet).Func", Method, 16},
    -		{"(*FlagSet).Init", Method, 0},
    -		{"(*FlagSet).Int", Method, 0},
    -		{"(*FlagSet).Int64", Method, 0},
    -		{"(*FlagSet).Int64Var", Method, 0},
    -		{"(*FlagSet).IntVar", Method, 0},
    -		{"(*FlagSet).Lookup", Method, 0},
    -		{"(*FlagSet).NArg", Method, 0},
    -		{"(*FlagSet).NFlag", Method, 0},
    -		{"(*FlagSet).Name", Method, 10},
    -		{"(*FlagSet).Output", Method, 10},
    -		{"(*FlagSet).Parse", Method, 0},
    -		{"(*FlagSet).Parsed", Method, 0},
    -		{"(*FlagSet).PrintDefaults", Method, 0},
    -		{"(*FlagSet).Set", Method, 0},
    -		{"(*FlagSet).SetOutput", Method, 0},
    -		{"(*FlagSet).String", Method, 0},
    -		{"(*FlagSet).StringVar", Method, 0},
    -		{"(*FlagSet).TextVar", Method, 19},
    -		{"(*FlagSet).Uint", Method, 0},
    -		{"(*FlagSet).Uint64", Method, 0},
    -		{"(*FlagSet).Uint64Var", Method, 0},
    -		{"(*FlagSet).UintVar", Method, 0},
    -		{"(*FlagSet).Var", Method, 0},
    -		{"(*FlagSet).Visit", Method, 0},
    -		{"(*FlagSet).VisitAll", Method, 0},
    -		{"Arg", Func, 0},
    -		{"Args", Func, 0},
    -		{"Bool", Func, 0},
    -		{"BoolFunc", Func, 21},
    -		{"BoolVar", Func, 0},
    -		{"CommandLine", Var, 2},
    -		{"ContinueOnError", Const, 0},
    -		{"Duration", Func, 0},
    -		{"DurationVar", Func, 0},
    -		{"ErrHelp", Var, 0},
    -		{"ErrorHandling", Type, 0},
    -		{"ExitOnError", Const, 0},
    -		{"Flag", Type, 0},
    -		{"Flag.DefValue", Field, 0},
    -		{"Flag.Name", Field, 0},
    -		{"Flag.Usage", Field, 0},
    -		{"Flag.Value", Field, 0},
    -		{"FlagSet", Type, 0},
    -		{"FlagSet.Usage", Field, 0},
    -		{"Float64", Func, 0},
    -		{"Float64Var", Func, 0},
    -		{"Func", Func, 16},
    -		{"Getter", Type, 2},
    -		{"Int", Func, 0},
    -		{"Int64", Func, 0},
    -		{"Int64Var", Func, 0},
    -		{"IntVar", Func, 0},
    -		{"Lookup", Func, 0},
    -		{"NArg", Func, 0},
    -		{"NFlag", Func, 0},
    -		{"NewFlagSet", Func, 0},
    -		{"PanicOnError", Const, 0},
    -		{"Parse", Func, 0},
    -		{"Parsed", Func, 0},
    -		{"PrintDefaults", Func, 0},
    -		{"Set", Func, 0},
    -		{"String", Func, 0},
    -		{"StringVar", Func, 0},
    -		{"TextVar", Func, 19},
    -		{"Uint", Func, 0},
    -		{"Uint64", Func, 0},
    -		{"Uint64Var", Func, 0},
    -		{"UintVar", Func, 0},
    -		{"UnquoteUsage", Func, 5},
    -		{"Usage", Var, 0},
    -		{"Value", Type, 0},
    -		{"Var", Func, 0},
    -		{"Visit", Func, 0},
    -		{"VisitAll", Func, 0},
    +		{"(*FlagSet).Arg", Method, 0, ""},
    +		{"(*FlagSet).Args", Method, 0, ""},
    +		{"(*FlagSet).Bool", Method, 0, ""},
    +		{"(*FlagSet).BoolFunc", Method, 21, ""},
    +		{"(*FlagSet).BoolVar", Method, 0, ""},
    +		{"(*FlagSet).Duration", Method, 0, ""},
    +		{"(*FlagSet).DurationVar", Method, 0, ""},
    +		{"(*FlagSet).ErrorHandling", Method, 10, ""},
    +		{"(*FlagSet).Float64", Method, 0, ""},
    +		{"(*FlagSet).Float64Var", Method, 0, ""},
    +		{"(*FlagSet).Func", Method, 16, ""},
    +		{"(*FlagSet).Init", Method, 0, ""},
    +		{"(*FlagSet).Int", Method, 0, ""},
    +		{"(*FlagSet).Int64", Method, 0, ""},
    +		{"(*FlagSet).Int64Var", Method, 0, ""},
    +		{"(*FlagSet).IntVar", Method, 0, ""},
    +		{"(*FlagSet).Lookup", Method, 0, ""},
    +		{"(*FlagSet).NArg", Method, 0, ""},
    +		{"(*FlagSet).NFlag", Method, 0, ""},
    +		{"(*FlagSet).Name", Method, 10, ""},
    +		{"(*FlagSet).Output", Method, 10, ""},
    +		{"(*FlagSet).Parse", Method, 0, ""},
    +		{"(*FlagSet).Parsed", Method, 0, ""},
    +		{"(*FlagSet).PrintDefaults", Method, 0, ""},
    +		{"(*FlagSet).Set", Method, 0, ""},
    +		{"(*FlagSet).SetOutput", Method, 0, ""},
    +		{"(*FlagSet).String", Method, 0, ""},
    +		{"(*FlagSet).StringVar", Method, 0, ""},
    +		{"(*FlagSet).TextVar", Method, 19, ""},
    +		{"(*FlagSet).Uint", Method, 0, ""},
    +		{"(*FlagSet).Uint64", Method, 0, ""},
    +		{"(*FlagSet).Uint64Var", Method, 0, ""},
    +		{"(*FlagSet).UintVar", Method, 0, ""},
    +		{"(*FlagSet).Var", Method, 0, ""},
    +		{"(*FlagSet).Visit", Method, 0, ""},
    +		{"(*FlagSet).VisitAll", Method, 0, ""},
    +		{"Arg", Func, 0, "func(i int) string"},
    +		{"Args", Func, 0, "func() []string"},
    +		{"Bool", Func, 0, "func(name string, value bool, usage string) *bool"},
    +		{"BoolFunc", Func, 21, "func(name string, usage string, fn func(string) error)"},
    +		{"BoolVar", Func, 0, "func(p *bool, name string, value bool, usage string)"},
    +		{"CommandLine", Var, 2, ""},
    +		{"ContinueOnError", Const, 0, ""},
    +		{"Duration", Func, 0, "func(name string, value time.Duration, usage string) *time.Duration"},
    +		{"DurationVar", Func, 0, "func(p *time.Duration, name string, value time.Duration, usage string)"},
    +		{"ErrHelp", Var, 0, ""},
    +		{"ErrorHandling", Type, 0, ""},
    +		{"ExitOnError", Const, 0, ""},
    +		{"Flag", Type, 0, ""},
    +		{"Flag.DefValue", Field, 0, ""},
    +		{"Flag.Name", Field, 0, ""},
    +		{"Flag.Usage", Field, 0, ""},
    +		{"Flag.Value", Field, 0, ""},
    +		{"FlagSet", Type, 0, ""},
    +		{"FlagSet.Usage", Field, 0, ""},
    +		{"Float64", Func, 0, "func(name string, value float64, usage string) *float64"},
    +		{"Float64Var", Func, 0, "func(p *float64, name string, value float64, usage string)"},
    +		{"Func", Func, 16, "func(name string, usage string, fn func(string) error)"},
    +		{"Getter", Type, 2, ""},
    +		{"Int", Func, 0, "func(name string, value int, usage string) *int"},
    +		{"Int64", Func, 0, "func(name string, value int64, usage string) *int64"},
    +		{"Int64Var", Func, 0, "func(p *int64, name string, value int64, usage string)"},
    +		{"IntVar", Func, 0, "func(p *int, name string, value int, usage string)"},
    +		{"Lookup", Func, 0, "func(name string) *Flag"},
    +		{"NArg", Func, 0, "func() int"},
    +		{"NFlag", Func, 0, "func() int"},
    +		{"NewFlagSet", Func, 0, "func(name string, errorHandling ErrorHandling) *FlagSet"},
    +		{"PanicOnError", Const, 0, ""},
    +		{"Parse", Func, 0, "func()"},
    +		{"Parsed", Func, 0, "func() bool"},
    +		{"PrintDefaults", Func, 0, "func()"},
    +		{"Set", Func, 0, "func(name string, value string) error"},
    +		{"String", Func, 0, "func(name string, value string, usage string) *string"},
    +		{"StringVar", Func, 0, "func(p *string, name string, value string, usage string)"},
    +		{"TextVar", Func, 19, "func(p encoding.TextUnmarshaler, name string, value encoding.TextMarshaler, usage string)"},
    +		{"Uint", Func, 0, "func(name string, value uint, usage string) *uint"},
    +		{"Uint64", Func, 0, "func(name string, value uint64, usage string) *uint64"},
    +		{"Uint64Var", Func, 0, "func(p *uint64, name string, value uint64, usage string)"},
    +		{"UintVar", Func, 0, "func(p *uint, name string, value uint, usage string)"},
    +		{"UnquoteUsage", Func, 5, "func(flag *Flag) (name string, usage string)"},
    +		{"Usage", Var, 0, ""},
    +		{"Value", Type, 0, ""},
    +		{"Var", Func, 0, "func(value Value, name string, usage string)"},
    +		{"Visit", Func, 0, "func(fn func(*Flag))"},
    +		{"VisitAll", Func, 0, "func(fn func(*Flag))"},
     	},
     	"fmt": {
    -		{"Append", Func, 19},
    -		{"Appendf", Func, 19},
    -		{"Appendln", Func, 19},
    -		{"Errorf", Func, 0},
    -		{"FormatString", Func, 20},
    -		{"Formatter", Type, 0},
    -		{"Fprint", Func, 0},
    -		{"Fprintf", Func, 0},
    -		{"Fprintln", Func, 0},
    -		{"Fscan", Func, 0},
    -		{"Fscanf", Func, 0},
    -		{"Fscanln", Func, 0},
    -		{"GoStringer", Type, 0},
    -		{"Print", Func, 0},
    -		{"Printf", Func, 0},
    -		{"Println", Func, 0},
    -		{"Scan", Func, 0},
    -		{"ScanState", Type, 0},
    -		{"Scanf", Func, 0},
    -		{"Scanln", Func, 0},
    -		{"Scanner", Type, 0},
    -		{"Sprint", Func, 0},
    -		{"Sprintf", Func, 0},
    -		{"Sprintln", Func, 0},
    -		{"Sscan", Func, 0},
    -		{"Sscanf", Func, 0},
    -		{"Sscanln", Func, 0},
    -		{"State", Type, 0},
    -		{"Stringer", Type, 0},
    +		{"Append", Func, 19, "func(b []byte, a ...any) []byte"},
    +		{"Appendf", Func, 19, "func(b []byte, format string, a ...any) []byte"},
    +		{"Appendln", Func, 19, "func(b []byte, a ...any) []byte"},
    +		{"Errorf", Func, 0, "func(format string, a ...any) error"},
    +		{"FormatString", Func, 20, "func(state State, verb rune) string"},
    +		{"Formatter", Type, 0, ""},
    +		{"Fprint", Func, 0, "func(w io.Writer, a ...any) (n int, err error)"},
    +		{"Fprintf", Func, 0, "func(w io.Writer, format string, a ...any) (n int, err error)"},
    +		{"Fprintln", Func, 0, "func(w io.Writer, a ...any) (n int, err error)"},
    +		{"Fscan", Func, 0, "func(r io.Reader, a ...any) (n int, err error)"},
    +		{"Fscanf", Func, 0, "func(r io.Reader, format string, a ...any) (n int, err error)"},
    +		{"Fscanln", Func, 0, "func(r io.Reader, a ...any) (n int, err error)"},
    +		{"GoStringer", Type, 0, ""},
    +		{"Print", Func, 0, "func(a ...any) (n int, err error)"},
    +		{"Printf", Func, 0, "func(format string, a ...any) (n int, err error)"},
    +		{"Println", Func, 0, "func(a ...any) (n int, err error)"},
    +		{"Scan", Func, 0, "func(a ...any) (n int, err error)"},
    +		{"ScanState", Type, 0, ""},
    +		{"Scanf", Func, 0, "func(format string, a ...any) (n int, err error)"},
    +		{"Scanln", Func, 0, "func(a ...any) (n int, err error)"},
    +		{"Scanner", Type, 0, ""},
    +		{"Sprint", Func, 0, "func(a ...any) string"},
    +		{"Sprintf", Func, 0, "func(format string, a ...any) string"},
    +		{"Sprintln", Func, 0, "func(a ...any) string"},
    +		{"Sscan", Func, 0, "func(str string, a ...any) (n int, err error)"},
    +		{"Sscanf", Func, 0, "func(str string, format string, a ...any) (n int, err error)"},
    +		{"Sscanln", Func, 0, "func(str string, a ...any) (n int, err error)"},
    +		{"State", Type, 0, ""},
    +		{"Stringer", Type, 0, ""},
     	},
     	"go/ast": {
    -		{"(*ArrayType).End", Method, 0},
    -		{"(*ArrayType).Pos", Method, 0},
    -		{"(*AssignStmt).End", Method, 0},
    -		{"(*AssignStmt).Pos", Method, 0},
    -		{"(*BadDecl).End", Method, 0},
    -		{"(*BadDecl).Pos", Method, 0},
    -		{"(*BadExpr).End", Method, 0},
    -		{"(*BadExpr).Pos", Method, 0},
    -		{"(*BadStmt).End", Method, 0},
    -		{"(*BadStmt).Pos", Method, 0},
    -		{"(*BasicLit).End", Method, 0},
    -		{"(*BasicLit).Pos", Method, 0},
    -		{"(*BinaryExpr).End", Method, 0},
    -		{"(*BinaryExpr).Pos", Method, 0},
    -		{"(*BlockStmt).End", Method, 0},
    -		{"(*BlockStmt).Pos", Method, 0},
    -		{"(*BranchStmt).End", Method, 0},
    -		{"(*BranchStmt).Pos", Method, 0},
    -		{"(*CallExpr).End", Method, 0},
    -		{"(*CallExpr).Pos", Method, 0},
    -		{"(*CaseClause).End", Method, 0},
    -		{"(*CaseClause).Pos", Method, 0},
    -		{"(*ChanType).End", Method, 0},
    -		{"(*ChanType).Pos", Method, 0},
    -		{"(*CommClause).End", Method, 0},
    -		{"(*CommClause).Pos", Method, 0},
    -		{"(*Comment).End", Method, 0},
    -		{"(*Comment).Pos", Method, 0},
    -		{"(*CommentGroup).End", Method, 0},
    -		{"(*CommentGroup).Pos", Method, 0},
    -		{"(*CommentGroup).Text", Method, 0},
    -		{"(*CompositeLit).End", Method, 0},
    -		{"(*CompositeLit).Pos", Method, 0},
    -		{"(*DeclStmt).End", Method, 0},
    -		{"(*DeclStmt).Pos", Method, 0},
    -		{"(*DeferStmt).End", Method, 0},
    -		{"(*DeferStmt).Pos", Method, 0},
    -		{"(*Ellipsis).End", Method, 0},
    -		{"(*Ellipsis).Pos", Method, 0},
    -		{"(*EmptyStmt).End", Method, 0},
    -		{"(*EmptyStmt).Pos", Method, 0},
    -		{"(*ExprStmt).End", Method, 0},
    -		{"(*ExprStmt).Pos", Method, 0},
    -		{"(*Field).End", Method, 0},
    -		{"(*Field).Pos", Method, 0},
    -		{"(*FieldList).End", Method, 0},
    -		{"(*FieldList).NumFields", Method, 0},
    -		{"(*FieldList).Pos", Method, 0},
    -		{"(*File).End", Method, 0},
    -		{"(*File).Pos", Method, 0},
    -		{"(*ForStmt).End", Method, 0},
    -		{"(*ForStmt).Pos", Method, 0},
    -		{"(*FuncDecl).End", Method, 0},
    -		{"(*FuncDecl).Pos", Method, 0},
    -		{"(*FuncLit).End", Method, 0},
    -		{"(*FuncLit).Pos", Method, 0},
    -		{"(*FuncType).End", Method, 0},
    -		{"(*FuncType).Pos", Method, 0},
    -		{"(*GenDecl).End", Method, 0},
    -		{"(*GenDecl).Pos", Method, 0},
    -		{"(*GoStmt).End", Method, 0},
    -		{"(*GoStmt).Pos", Method, 0},
    -		{"(*Ident).End", Method, 0},
    -		{"(*Ident).IsExported", Method, 0},
    -		{"(*Ident).Pos", Method, 0},
    -		{"(*Ident).String", Method, 0},
    -		{"(*IfStmt).End", Method, 0},
    -		{"(*IfStmt).Pos", Method, 0},
    -		{"(*ImportSpec).End", Method, 0},
    -		{"(*ImportSpec).Pos", Method, 0},
    -		{"(*IncDecStmt).End", Method, 0},
    -		{"(*IncDecStmt).Pos", Method, 0},
    -		{"(*IndexExpr).End", Method, 0},
    -		{"(*IndexExpr).Pos", Method, 0},
    -		{"(*IndexListExpr).End", Method, 18},
    -		{"(*IndexListExpr).Pos", Method, 18},
    -		{"(*InterfaceType).End", Method, 0},
    -		{"(*InterfaceType).Pos", Method, 0},
    -		{"(*KeyValueExpr).End", Method, 0},
    -		{"(*KeyValueExpr).Pos", Method, 0},
    -		{"(*LabeledStmt).End", Method, 0},
    -		{"(*LabeledStmt).Pos", Method, 0},
    -		{"(*MapType).End", Method, 0},
    -		{"(*MapType).Pos", Method, 0},
    -		{"(*Object).Pos", Method, 0},
    -		{"(*Package).End", Method, 0},
    -		{"(*Package).Pos", Method, 0},
    -		{"(*ParenExpr).End", Method, 0},
    -		{"(*ParenExpr).Pos", Method, 0},
    -		{"(*RangeStmt).End", Method, 0},
    -		{"(*RangeStmt).Pos", Method, 0},
    -		{"(*ReturnStmt).End", Method, 0},
    -		{"(*ReturnStmt).Pos", Method, 0},
    -		{"(*Scope).Insert", Method, 0},
    -		{"(*Scope).Lookup", Method, 0},
    -		{"(*Scope).String", Method, 0},
    -		{"(*SelectStmt).End", Method, 0},
    -		{"(*SelectStmt).Pos", Method, 0},
    -		{"(*SelectorExpr).End", Method, 0},
    -		{"(*SelectorExpr).Pos", Method, 0},
    -		{"(*SendStmt).End", Method, 0},
    -		{"(*SendStmt).Pos", Method, 0},
    -		{"(*SliceExpr).End", Method, 0},
    -		{"(*SliceExpr).Pos", Method, 0},
    -		{"(*StarExpr).End", Method, 0},
    -		{"(*StarExpr).Pos", Method, 0},
    -		{"(*StructType).End", Method, 0},
    -		{"(*StructType).Pos", Method, 0},
    -		{"(*SwitchStmt).End", Method, 0},
    -		{"(*SwitchStmt).Pos", Method, 0},
    -		{"(*TypeAssertExpr).End", Method, 0},
    -		{"(*TypeAssertExpr).Pos", Method, 0},
    -		{"(*TypeSpec).End", Method, 0},
    -		{"(*TypeSpec).Pos", Method, 0},
    -		{"(*TypeSwitchStmt).End", Method, 0},
    -		{"(*TypeSwitchStmt).Pos", Method, 0},
    -		{"(*UnaryExpr).End", Method, 0},
    -		{"(*UnaryExpr).Pos", Method, 0},
    -		{"(*ValueSpec).End", Method, 0},
    -		{"(*ValueSpec).Pos", Method, 0},
    -		{"(CommentMap).Comments", Method, 1},
    -		{"(CommentMap).Filter", Method, 1},
    -		{"(CommentMap).String", Method, 1},
    -		{"(CommentMap).Update", Method, 1},
    -		{"(ObjKind).String", Method, 0},
    -		{"ArrayType", Type, 0},
    -		{"ArrayType.Elt", Field, 0},
    -		{"ArrayType.Lbrack", Field, 0},
    -		{"ArrayType.Len", Field, 0},
    -		{"AssignStmt", Type, 0},
    -		{"AssignStmt.Lhs", Field, 0},
    -		{"AssignStmt.Rhs", Field, 0},
    -		{"AssignStmt.Tok", Field, 0},
    -		{"AssignStmt.TokPos", Field, 0},
    -		{"Bad", Const, 0},
    -		{"BadDecl", Type, 0},
    -		{"BadDecl.From", Field, 0},
    -		{"BadDecl.To", Field, 0},
    -		{"BadExpr", Type, 0},
    -		{"BadExpr.From", Field, 0},
    -		{"BadExpr.To", Field, 0},
    -		{"BadStmt", Type, 0},
    -		{"BadStmt.From", Field, 0},
    -		{"BadStmt.To", Field, 0},
    -		{"BasicLit", Type, 0},
    -		{"BasicLit.Kind", Field, 0},
    -		{"BasicLit.Value", Field, 0},
    -		{"BasicLit.ValuePos", Field, 0},
    -		{"BinaryExpr", Type, 0},
    -		{"BinaryExpr.Op", Field, 0},
    -		{"BinaryExpr.OpPos", Field, 0},
    -		{"BinaryExpr.X", Field, 0},
    -		{"BinaryExpr.Y", Field, 0},
    -		{"BlockStmt", Type, 0},
    -		{"BlockStmt.Lbrace", Field, 0},
    -		{"BlockStmt.List", Field, 0},
    -		{"BlockStmt.Rbrace", Field, 0},
    -		{"BranchStmt", Type, 0},
    -		{"BranchStmt.Label", Field, 0},
    -		{"BranchStmt.Tok", Field, 0},
    -		{"BranchStmt.TokPos", Field, 0},
    -		{"CallExpr", Type, 0},
    -		{"CallExpr.Args", Field, 0},
    -		{"CallExpr.Ellipsis", Field, 0},
    -		{"CallExpr.Fun", Field, 0},
    -		{"CallExpr.Lparen", Field, 0},
    -		{"CallExpr.Rparen", Field, 0},
    -		{"CaseClause", Type, 0},
    -		{"CaseClause.Body", Field, 0},
    -		{"CaseClause.Case", Field, 0},
    -		{"CaseClause.Colon", Field, 0},
    -		{"CaseClause.List", Field, 0},
    -		{"ChanDir", Type, 0},
    -		{"ChanType", Type, 0},
    -		{"ChanType.Arrow", Field, 1},
    -		{"ChanType.Begin", Field, 0},
    -		{"ChanType.Dir", Field, 0},
    -		{"ChanType.Value", Field, 0},
    -		{"CommClause", Type, 0},
    -		{"CommClause.Body", Field, 0},
    -		{"CommClause.Case", Field, 0},
    -		{"CommClause.Colon", Field, 0},
    -		{"CommClause.Comm", Field, 0},
    -		{"Comment", Type, 0},
    -		{"Comment.Slash", Field, 0},
    -		{"Comment.Text", Field, 0},
    -		{"CommentGroup", Type, 0},
    -		{"CommentGroup.List", Field, 0},
    -		{"CommentMap", Type, 1},
    -		{"CompositeLit", Type, 0},
    -		{"CompositeLit.Elts", Field, 0},
    -		{"CompositeLit.Incomplete", Field, 11},
    -		{"CompositeLit.Lbrace", Field, 0},
    -		{"CompositeLit.Rbrace", Field, 0},
    -		{"CompositeLit.Type", Field, 0},
    -		{"Con", Const, 0},
    -		{"Decl", Type, 0},
    -		{"DeclStmt", Type, 0},
    -		{"DeclStmt.Decl", Field, 0},
    -		{"DeferStmt", Type, 0},
    -		{"DeferStmt.Call", Field, 0},
    -		{"DeferStmt.Defer", Field, 0},
    -		{"Ellipsis", Type, 0},
    -		{"Ellipsis.Ellipsis", Field, 0},
    -		{"Ellipsis.Elt", Field, 0},
    -		{"EmptyStmt", Type, 0},
    -		{"EmptyStmt.Implicit", Field, 5},
    -		{"EmptyStmt.Semicolon", Field, 0},
    -		{"Expr", Type, 0},
    -		{"ExprStmt", Type, 0},
    -		{"ExprStmt.X", Field, 0},
    -		{"Field", Type, 0},
    -		{"Field.Comment", Field, 0},
    -		{"Field.Doc", Field, 0},
    -		{"Field.Names", Field, 0},
    -		{"Field.Tag", Field, 0},
    -		{"Field.Type", Field, 0},
    -		{"FieldFilter", Type, 0},
    -		{"FieldList", Type, 0},
    -		{"FieldList.Closing", Field, 0},
    -		{"FieldList.List", Field, 0},
    -		{"FieldList.Opening", Field, 0},
    -		{"File", Type, 0},
    -		{"File.Comments", Field, 0},
    -		{"File.Decls", Field, 0},
    -		{"File.Doc", Field, 0},
    -		{"File.FileEnd", Field, 20},
    -		{"File.FileStart", Field, 20},
    -		{"File.GoVersion", Field, 21},
    -		{"File.Imports", Field, 0},
    -		{"File.Name", Field, 0},
    -		{"File.Package", Field, 0},
    -		{"File.Scope", Field, 0},
    -		{"File.Unresolved", Field, 0},
    -		{"FileExports", Func, 0},
    -		{"Filter", Type, 0},
    -		{"FilterDecl", Func, 0},
    -		{"FilterFile", Func, 0},
    -		{"FilterFuncDuplicates", Const, 0},
    -		{"FilterImportDuplicates", Const, 0},
    -		{"FilterPackage", Func, 0},
    -		{"FilterUnassociatedComments", Const, 0},
    -		{"ForStmt", Type, 0},
    -		{"ForStmt.Body", Field, 0},
    -		{"ForStmt.Cond", Field, 0},
    -		{"ForStmt.For", Field, 0},
    -		{"ForStmt.Init", Field, 0},
    -		{"ForStmt.Post", Field, 0},
    -		{"Fprint", Func, 0},
    -		{"Fun", Const, 0},
    -		{"FuncDecl", Type, 0},
    -		{"FuncDecl.Body", Field, 0},
    -		{"FuncDecl.Doc", Field, 0},
    -		{"FuncDecl.Name", Field, 0},
    -		{"FuncDecl.Recv", Field, 0},
    -		{"FuncDecl.Type", Field, 0},
    -		{"FuncLit", Type, 0},
    -		{"FuncLit.Body", Field, 0},
    -		{"FuncLit.Type", Field, 0},
    -		{"FuncType", Type, 0},
    -		{"FuncType.Func", Field, 0},
    -		{"FuncType.Params", Field, 0},
    -		{"FuncType.Results", Field, 0},
    -		{"FuncType.TypeParams", Field, 18},
    -		{"GenDecl", Type, 0},
    -		{"GenDecl.Doc", Field, 0},
    -		{"GenDecl.Lparen", Field, 0},
    -		{"GenDecl.Rparen", Field, 0},
    -		{"GenDecl.Specs", Field, 0},
    -		{"GenDecl.Tok", Field, 0},
    -		{"GenDecl.TokPos", Field, 0},
    -		{"GoStmt", Type, 0},
    -		{"GoStmt.Call", Field, 0},
    -		{"GoStmt.Go", Field, 0},
    -		{"Ident", Type, 0},
    -		{"Ident.Name", Field, 0},
    -		{"Ident.NamePos", Field, 0},
    -		{"Ident.Obj", Field, 0},
    -		{"IfStmt", Type, 0},
    -		{"IfStmt.Body", Field, 0},
    -		{"IfStmt.Cond", Field, 0},
    -		{"IfStmt.Else", Field, 0},
    -		{"IfStmt.If", Field, 0},
    -		{"IfStmt.Init", Field, 0},
    -		{"ImportSpec", Type, 0},
    -		{"ImportSpec.Comment", Field, 0},
    -		{"ImportSpec.Doc", Field, 0},
    -		{"ImportSpec.EndPos", Field, 0},
    -		{"ImportSpec.Name", Field, 0},
    -		{"ImportSpec.Path", Field, 0},
    -		{"Importer", Type, 0},
    -		{"IncDecStmt", Type, 0},
    -		{"IncDecStmt.Tok", Field, 0},
    -		{"IncDecStmt.TokPos", Field, 0},
    -		{"IncDecStmt.X", Field, 0},
    -		{"IndexExpr", Type, 0},
    -		{"IndexExpr.Index", Field, 0},
    -		{"IndexExpr.Lbrack", Field, 0},
    -		{"IndexExpr.Rbrack", Field, 0},
    -		{"IndexExpr.X", Field, 0},
    -		{"IndexListExpr", Type, 18},
    -		{"IndexListExpr.Indices", Field, 18},
    -		{"IndexListExpr.Lbrack", Field, 18},
    -		{"IndexListExpr.Rbrack", Field, 18},
    -		{"IndexListExpr.X", Field, 18},
    -		{"Inspect", Func, 0},
    -		{"InterfaceType", Type, 0},
    -		{"InterfaceType.Incomplete", Field, 0},
    -		{"InterfaceType.Interface", Field, 0},
    -		{"InterfaceType.Methods", Field, 0},
    -		{"IsExported", Func, 0},
    -		{"IsGenerated", Func, 21},
    -		{"KeyValueExpr", Type, 0},
    -		{"KeyValueExpr.Colon", Field, 0},
    -		{"KeyValueExpr.Key", Field, 0},
    -		{"KeyValueExpr.Value", Field, 0},
    -		{"LabeledStmt", Type, 0},
    -		{"LabeledStmt.Colon", Field, 0},
    -		{"LabeledStmt.Label", Field, 0},
    -		{"LabeledStmt.Stmt", Field, 0},
    -		{"Lbl", Const, 0},
    -		{"MapType", Type, 0},
    -		{"MapType.Key", Field, 0},
    -		{"MapType.Map", Field, 0},
    -		{"MapType.Value", Field, 0},
    -		{"MergeMode", Type, 0},
    -		{"MergePackageFiles", Func, 0},
    -		{"NewCommentMap", Func, 1},
    -		{"NewIdent", Func, 0},
    -		{"NewObj", Func, 0},
    -		{"NewPackage", Func, 0},
    -		{"NewScope", Func, 0},
    -		{"Node", Type, 0},
    -		{"NotNilFilter", Func, 0},
    -		{"ObjKind", Type, 0},
    -		{"Object", Type, 0},
    -		{"Object.Data", Field, 0},
    -		{"Object.Decl", Field, 0},
    -		{"Object.Kind", Field, 0},
    -		{"Object.Name", Field, 0},
    -		{"Object.Type", Field, 0},
    -		{"Package", Type, 0},
    -		{"Package.Files", Field, 0},
    -		{"Package.Imports", Field, 0},
    -		{"Package.Name", Field, 0},
    -		{"Package.Scope", Field, 0},
    -		{"PackageExports", Func, 0},
    -		{"ParenExpr", Type, 0},
    -		{"ParenExpr.Lparen", Field, 0},
    -		{"ParenExpr.Rparen", Field, 0},
    -		{"ParenExpr.X", Field, 0},
    -		{"Pkg", Const, 0},
    -		{"Preorder", Func, 23},
    -		{"Print", Func, 0},
    -		{"RECV", Const, 0},
    -		{"RangeStmt", Type, 0},
    -		{"RangeStmt.Body", Field, 0},
    -		{"RangeStmt.For", Field, 0},
    -		{"RangeStmt.Key", Field, 0},
    -		{"RangeStmt.Range", Field, 20},
    -		{"RangeStmt.Tok", Field, 0},
    -		{"RangeStmt.TokPos", Field, 0},
    -		{"RangeStmt.Value", Field, 0},
    -		{"RangeStmt.X", Field, 0},
    -		{"ReturnStmt", Type, 0},
    -		{"ReturnStmt.Results", Field, 0},
    -		{"ReturnStmt.Return", Field, 0},
    -		{"SEND", Const, 0},
    -		{"Scope", Type, 0},
    -		{"Scope.Objects", Field, 0},
    -		{"Scope.Outer", Field, 0},
    -		{"SelectStmt", Type, 0},
    -		{"SelectStmt.Body", Field, 0},
    -		{"SelectStmt.Select", Field, 0},
    -		{"SelectorExpr", Type, 0},
    -		{"SelectorExpr.Sel", Field, 0},
    -		{"SelectorExpr.X", Field, 0},
    -		{"SendStmt", Type, 0},
    -		{"SendStmt.Arrow", Field, 0},
    -		{"SendStmt.Chan", Field, 0},
    -		{"SendStmt.Value", Field, 0},
    -		{"SliceExpr", Type, 0},
    -		{"SliceExpr.High", Field, 0},
    -		{"SliceExpr.Lbrack", Field, 0},
    -		{"SliceExpr.Low", Field, 0},
    -		{"SliceExpr.Max", Field, 2},
    -		{"SliceExpr.Rbrack", Field, 0},
    -		{"SliceExpr.Slice3", Field, 2},
    -		{"SliceExpr.X", Field, 0},
    -		{"SortImports", Func, 0},
    -		{"Spec", Type, 0},
    -		{"StarExpr", Type, 0},
    -		{"StarExpr.Star", Field, 0},
    -		{"StarExpr.X", Field, 0},
    -		{"Stmt", Type, 0},
    -		{"StructType", Type, 0},
    -		{"StructType.Fields", Field, 0},
    -		{"StructType.Incomplete", Field, 0},
    -		{"StructType.Struct", Field, 0},
    -		{"SwitchStmt", Type, 0},
    -		{"SwitchStmt.Body", Field, 0},
    -		{"SwitchStmt.Init", Field, 0},
    -		{"SwitchStmt.Switch", Field, 0},
    -		{"SwitchStmt.Tag", Field, 0},
    -		{"Typ", Const, 0},
    -		{"TypeAssertExpr", Type, 0},
    -		{"TypeAssertExpr.Lparen", Field, 2},
    -		{"TypeAssertExpr.Rparen", Field, 2},
    -		{"TypeAssertExpr.Type", Field, 0},
    -		{"TypeAssertExpr.X", Field, 0},
    -		{"TypeSpec", Type, 0},
    -		{"TypeSpec.Assign", Field, 9},
    -		{"TypeSpec.Comment", Field, 0},
    -		{"TypeSpec.Doc", Field, 0},
    -		{"TypeSpec.Name", Field, 0},
    -		{"TypeSpec.Type", Field, 0},
    -		{"TypeSpec.TypeParams", Field, 18},
    -		{"TypeSwitchStmt", Type, 0},
    -		{"TypeSwitchStmt.Assign", Field, 0},
    -		{"TypeSwitchStmt.Body", Field, 0},
    -		{"TypeSwitchStmt.Init", Field, 0},
    -		{"TypeSwitchStmt.Switch", Field, 0},
    -		{"UnaryExpr", Type, 0},
    -		{"UnaryExpr.Op", Field, 0},
    -		{"UnaryExpr.OpPos", Field, 0},
    -		{"UnaryExpr.X", Field, 0},
    -		{"Unparen", Func, 22},
    -		{"ValueSpec", Type, 0},
    -		{"ValueSpec.Comment", Field, 0},
    -		{"ValueSpec.Doc", Field, 0},
    -		{"ValueSpec.Names", Field, 0},
    -		{"ValueSpec.Type", Field, 0},
    -		{"ValueSpec.Values", Field, 0},
    -		{"Var", Const, 0},
    -		{"Visitor", Type, 0},
    -		{"Walk", Func, 0},
    +		{"(*ArrayType).End", Method, 0, ""},
    +		{"(*ArrayType).Pos", Method, 0, ""},
    +		{"(*AssignStmt).End", Method, 0, ""},
    +		{"(*AssignStmt).Pos", Method, 0, ""},
    +		{"(*BadDecl).End", Method, 0, ""},
    +		{"(*BadDecl).Pos", Method, 0, ""},
    +		{"(*BadExpr).End", Method, 0, ""},
    +		{"(*BadExpr).Pos", Method, 0, ""},
    +		{"(*BadStmt).End", Method, 0, ""},
    +		{"(*BadStmt).Pos", Method, 0, ""},
    +		{"(*BasicLit).End", Method, 0, ""},
    +		{"(*BasicLit).Pos", Method, 0, ""},
    +		{"(*BinaryExpr).End", Method, 0, ""},
    +		{"(*BinaryExpr).Pos", Method, 0, ""},
    +		{"(*BlockStmt).End", Method, 0, ""},
    +		{"(*BlockStmt).Pos", Method, 0, ""},
    +		{"(*BranchStmt).End", Method, 0, ""},
    +		{"(*BranchStmt).Pos", Method, 0, ""},
    +		{"(*CallExpr).End", Method, 0, ""},
    +		{"(*CallExpr).Pos", Method, 0, ""},
    +		{"(*CaseClause).End", Method, 0, ""},
    +		{"(*CaseClause).Pos", Method, 0, ""},
    +		{"(*ChanType).End", Method, 0, ""},
    +		{"(*ChanType).Pos", Method, 0, ""},
    +		{"(*CommClause).End", Method, 0, ""},
    +		{"(*CommClause).Pos", Method, 0, ""},
    +		{"(*Comment).End", Method, 0, ""},
    +		{"(*Comment).Pos", Method, 0, ""},
    +		{"(*CommentGroup).End", Method, 0, ""},
    +		{"(*CommentGroup).Pos", Method, 0, ""},
    +		{"(*CommentGroup).Text", Method, 0, ""},
    +		{"(*CompositeLit).End", Method, 0, ""},
    +		{"(*CompositeLit).Pos", Method, 0, ""},
    +		{"(*DeclStmt).End", Method, 0, ""},
    +		{"(*DeclStmt).Pos", Method, 0, ""},
    +		{"(*DeferStmt).End", Method, 0, ""},
    +		{"(*DeferStmt).Pos", Method, 0, ""},
    +		{"(*Ellipsis).End", Method, 0, ""},
    +		{"(*Ellipsis).Pos", Method, 0, ""},
    +		{"(*EmptyStmt).End", Method, 0, ""},
    +		{"(*EmptyStmt).Pos", Method, 0, ""},
    +		{"(*ExprStmt).End", Method, 0, ""},
    +		{"(*ExprStmt).Pos", Method, 0, ""},
    +		{"(*Field).End", Method, 0, ""},
    +		{"(*Field).Pos", Method, 0, ""},
    +		{"(*FieldList).End", Method, 0, ""},
    +		{"(*FieldList).NumFields", Method, 0, ""},
    +		{"(*FieldList).Pos", Method, 0, ""},
    +		{"(*File).End", Method, 0, ""},
    +		{"(*File).Pos", Method, 0, ""},
    +		{"(*ForStmt).End", Method, 0, ""},
    +		{"(*ForStmt).Pos", Method, 0, ""},
    +		{"(*FuncDecl).End", Method, 0, ""},
    +		{"(*FuncDecl).Pos", Method, 0, ""},
    +		{"(*FuncLit).End", Method, 0, ""},
    +		{"(*FuncLit).Pos", Method, 0, ""},
    +		{"(*FuncType).End", Method, 0, ""},
    +		{"(*FuncType).Pos", Method, 0, ""},
    +		{"(*GenDecl).End", Method, 0, ""},
    +		{"(*GenDecl).Pos", Method, 0, ""},
    +		{"(*GoStmt).End", Method, 0, ""},
    +		{"(*GoStmt).Pos", Method, 0, ""},
    +		{"(*Ident).End", Method, 0, ""},
    +		{"(*Ident).IsExported", Method, 0, ""},
    +		{"(*Ident).Pos", Method, 0, ""},
    +		{"(*Ident).String", Method, 0, ""},
    +		{"(*IfStmt).End", Method, 0, ""},
    +		{"(*IfStmt).Pos", Method, 0, ""},
    +		{"(*ImportSpec).End", Method, 0, ""},
    +		{"(*ImportSpec).Pos", Method, 0, ""},
    +		{"(*IncDecStmt).End", Method, 0, ""},
    +		{"(*IncDecStmt).Pos", Method, 0, ""},
    +		{"(*IndexExpr).End", Method, 0, ""},
    +		{"(*IndexExpr).Pos", Method, 0, ""},
    +		{"(*IndexListExpr).End", Method, 18, ""},
    +		{"(*IndexListExpr).Pos", Method, 18, ""},
    +		{"(*InterfaceType).End", Method, 0, ""},
    +		{"(*InterfaceType).Pos", Method, 0, ""},
    +		{"(*KeyValueExpr).End", Method, 0, ""},
    +		{"(*KeyValueExpr).Pos", Method, 0, ""},
    +		{"(*LabeledStmt).End", Method, 0, ""},
    +		{"(*LabeledStmt).Pos", Method, 0, ""},
    +		{"(*MapType).End", Method, 0, ""},
    +		{"(*MapType).Pos", Method, 0, ""},
    +		{"(*Object).Pos", Method, 0, ""},
    +		{"(*Package).End", Method, 0, ""},
    +		{"(*Package).Pos", Method, 0, ""},
    +		{"(*ParenExpr).End", Method, 0, ""},
    +		{"(*ParenExpr).Pos", Method, 0, ""},
    +		{"(*RangeStmt).End", Method, 0, ""},
    +		{"(*RangeStmt).Pos", Method, 0, ""},
    +		{"(*ReturnStmt).End", Method, 0, ""},
    +		{"(*ReturnStmt).Pos", Method, 0, ""},
    +		{"(*Scope).Insert", Method, 0, ""},
    +		{"(*Scope).Lookup", Method, 0, ""},
    +		{"(*Scope).String", Method, 0, ""},
    +		{"(*SelectStmt).End", Method, 0, ""},
    +		{"(*SelectStmt).Pos", Method, 0, ""},
    +		{"(*SelectorExpr).End", Method, 0, ""},
    +		{"(*SelectorExpr).Pos", Method, 0, ""},
    +		{"(*SendStmt).End", Method, 0, ""},
    +		{"(*SendStmt).Pos", Method, 0, ""},
    +		{"(*SliceExpr).End", Method, 0, ""},
    +		{"(*SliceExpr).Pos", Method, 0, ""},
    +		{"(*StarExpr).End", Method, 0, ""},
    +		{"(*StarExpr).Pos", Method, 0, ""},
    +		{"(*StructType).End", Method, 0, ""},
    +		{"(*StructType).Pos", Method, 0, ""},
    +		{"(*SwitchStmt).End", Method, 0, ""},
    +		{"(*SwitchStmt).Pos", Method, 0, ""},
    +		{"(*TypeAssertExpr).End", Method, 0, ""},
    +		{"(*TypeAssertExpr).Pos", Method, 0, ""},
    +		{"(*TypeSpec).End", Method, 0, ""},
    +		{"(*TypeSpec).Pos", Method, 0, ""},
    +		{"(*TypeSwitchStmt).End", Method, 0, ""},
    +		{"(*TypeSwitchStmt).Pos", Method, 0, ""},
    +		{"(*UnaryExpr).End", Method, 0, ""},
    +		{"(*UnaryExpr).Pos", Method, 0, ""},
    +		{"(*ValueSpec).End", Method, 0, ""},
    +		{"(*ValueSpec).Pos", Method, 0, ""},
    +		{"(CommentMap).Comments", Method, 1, ""},
    +		{"(CommentMap).Filter", Method, 1, ""},
    +		{"(CommentMap).String", Method, 1, ""},
    +		{"(CommentMap).Update", Method, 1, ""},
    +		{"(ObjKind).String", Method, 0, ""},
    +		{"ArrayType", Type, 0, ""},
    +		{"ArrayType.Elt", Field, 0, ""},
    +		{"ArrayType.Lbrack", Field, 0, ""},
    +		{"ArrayType.Len", Field, 0, ""},
    +		{"AssignStmt", Type, 0, ""},
    +		{"AssignStmt.Lhs", Field, 0, ""},
    +		{"AssignStmt.Rhs", Field, 0, ""},
    +		{"AssignStmt.Tok", Field, 0, ""},
    +		{"AssignStmt.TokPos", Field, 0, ""},
    +		{"Bad", Const, 0, ""},
    +		{"BadDecl", Type, 0, ""},
    +		{"BadDecl.From", Field, 0, ""},
    +		{"BadDecl.To", Field, 0, ""},
    +		{"BadExpr", Type, 0, ""},
    +		{"BadExpr.From", Field, 0, ""},
    +		{"BadExpr.To", Field, 0, ""},
    +		{"BadStmt", Type, 0, ""},
    +		{"BadStmt.From", Field, 0, ""},
    +		{"BadStmt.To", Field, 0, ""},
    +		{"BasicLit", Type, 0, ""},
    +		{"BasicLit.Kind", Field, 0, ""},
    +		{"BasicLit.Value", Field, 0, ""},
    +		{"BasicLit.ValuePos", Field, 0, ""},
    +		{"BinaryExpr", Type, 0, ""},
    +		{"BinaryExpr.Op", Field, 0, ""},
    +		{"BinaryExpr.OpPos", Field, 0, ""},
    +		{"BinaryExpr.X", Field, 0, ""},
    +		{"BinaryExpr.Y", Field, 0, ""},
    +		{"BlockStmt", Type, 0, ""},
    +		{"BlockStmt.Lbrace", Field, 0, ""},
    +		{"BlockStmt.List", Field, 0, ""},
    +		{"BlockStmt.Rbrace", Field, 0, ""},
    +		{"BranchStmt", Type, 0, ""},
    +		{"BranchStmt.Label", Field, 0, ""},
    +		{"BranchStmt.Tok", Field, 0, ""},
    +		{"BranchStmt.TokPos", Field, 0, ""},
    +		{"CallExpr", Type, 0, ""},
    +		{"CallExpr.Args", Field, 0, ""},
    +		{"CallExpr.Ellipsis", Field, 0, ""},
    +		{"CallExpr.Fun", Field, 0, ""},
    +		{"CallExpr.Lparen", Field, 0, ""},
    +		{"CallExpr.Rparen", Field, 0, ""},
    +		{"CaseClause", Type, 0, ""},
    +		{"CaseClause.Body", Field, 0, ""},
    +		{"CaseClause.Case", Field, 0, ""},
    +		{"CaseClause.Colon", Field, 0, ""},
    +		{"CaseClause.List", Field, 0, ""},
    +		{"ChanDir", Type, 0, ""},
    +		{"ChanType", Type, 0, ""},
    +		{"ChanType.Arrow", Field, 1, ""},
    +		{"ChanType.Begin", Field, 0, ""},
    +		{"ChanType.Dir", Field, 0, ""},
    +		{"ChanType.Value", Field, 0, ""},
    +		{"CommClause", Type, 0, ""},
    +		{"CommClause.Body", Field, 0, ""},
    +		{"CommClause.Case", Field, 0, ""},
    +		{"CommClause.Colon", Field, 0, ""},
    +		{"CommClause.Comm", Field, 0, ""},
    +		{"Comment", Type, 0, ""},
    +		{"Comment.Slash", Field, 0, ""},
    +		{"Comment.Text", Field, 0, ""},
    +		{"CommentGroup", Type, 0, ""},
    +		{"CommentGroup.List", Field, 0, ""},
    +		{"CommentMap", Type, 1, ""},
    +		{"CompositeLit", Type, 0, ""},
    +		{"CompositeLit.Elts", Field, 0, ""},
    +		{"CompositeLit.Incomplete", Field, 11, ""},
    +		{"CompositeLit.Lbrace", Field, 0, ""},
    +		{"CompositeLit.Rbrace", Field, 0, ""},
    +		{"CompositeLit.Type", Field, 0, ""},
    +		{"Con", Const, 0, ""},
    +		{"Decl", Type, 0, ""},
    +		{"DeclStmt", Type, 0, ""},
    +		{"DeclStmt.Decl", Field, 0, ""},
    +		{"DeferStmt", Type, 0, ""},
    +		{"DeferStmt.Call", Field, 0, ""},
    +		{"DeferStmt.Defer", Field, 0, ""},
    +		{"Ellipsis", Type, 0, ""},
    +		{"Ellipsis.Ellipsis", Field, 0, ""},
    +		{"Ellipsis.Elt", Field, 0, ""},
    +		{"EmptyStmt", Type, 0, ""},
    +		{"EmptyStmt.Implicit", Field, 5, ""},
    +		{"EmptyStmt.Semicolon", Field, 0, ""},
    +		{"Expr", Type, 0, ""},
    +		{"ExprStmt", Type, 0, ""},
    +		{"ExprStmt.X", Field, 0, ""},
    +		{"Field", Type, 0, ""},
    +		{"Field.Comment", Field, 0, ""},
    +		{"Field.Doc", Field, 0, ""},
    +		{"Field.Names", Field, 0, ""},
    +		{"Field.Tag", Field, 0, ""},
    +		{"Field.Type", Field, 0, ""},
    +		{"FieldFilter", Type, 0, ""},
    +		{"FieldList", Type, 0, ""},
    +		{"FieldList.Closing", Field, 0, ""},
    +		{"FieldList.List", Field, 0, ""},
    +		{"FieldList.Opening", Field, 0, ""},
    +		{"File", Type, 0, ""},
    +		{"File.Comments", Field, 0, ""},
    +		{"File.Decls", Field, 0, ""},
    +		{"File.Doc", Field, 0, ""},
    +		{"File.FileEnd", Field, 20, ""},
    +		{"File.FileStart", Field, 20, ""},
    +		{"File.GoVersion", Field, 21, ""},
    +		{"File.Imports", Field, 0, ""},
    +		{"File.Name", Field, 0, ""},
    +		{"File.Package", Field, 0, ""},
    +		{"File.Scope", Field, 0, ""},
    +		{"File.Unresolved", Field, 0, ""},
    +		{"FileExports", Func, 0, "func(src *File) bool"},
    +		{"Filter", Type, 0, ""},
    +		{"FilterDecl", Func, 0, "func(decl Decl, f Filter) bool"},
    +		{"FilterFile", Func, 0, "func(src *File, f Filter) bool"},
    +		{"FilterFuncDuplicates", Const, 0, ""},
    +		{"FilterImportDuplicates", Const, 0, ""},
    +		{"FilterPackage", Func, 0, "func(pkg *Package, f Filter) bool"},
    +		{"FilterUnassociatedComments", Const, 0, ""},
    +		{"ForStmt", Type, 0, ""},
    +		{"ForStmt.Body", Field, 0, ""},
    +		{"ForStmt.Cond", Field, 0, ""},
    +		{"ForStmt.For", Field, 0, ""},
    +		{"ForStmt.Init", Field, 0, ""},
    +		{"ForStmt.Post", Field, 0, ""},
    +		{"Fprint", Func, 0, "func(w io.Writer, fset *token.FileSet, x any, f FieldFilter) error"},
    +		{"Fun", Const, 0, ""},
    +		{"FuncDecl", Type, 0, ""},
    +		{"FuncDecl.Body", Field, 0, ""},
    +		{"FuncDecl.Doc", Field, 0, ""},
    +		{"FuncDecl.Name", Field, 0, ""},
    +		{"FuncDecl.Recv", Field, 0, ""},
    +		{"FuncDecl.Type", Field, 0, ""},
    +		{"FuncLit", Type, 0, ""},
    +		{"FuncLit.Body", Field, 0, ""},
    +		{"FuncLit.Type", Field, 0, ""},
    +		{"FuncType", Type, 0, ""},
    +		{"FuncType.Func", Field, 0, ""},
    +		{"FuncType.Params", Field, 0, ""},
    +		{"FuncType.Results", Field, 0, ""},
    +		{"FuncType.TypeParams", Field, 18, ""},
    +		{"GenDecl", Type, 0, ""},
    +		{"GenDecl.Doc", Field, 0, ""},
    +		{"GenDecl.Lparen", Field, 0, ""},
    +		{"GenDecl.Rparen", Field, 0, ""},
    +		{"GenDecl.Specs", Field, 0, ""},
    +		{"GenDecl.Tok", Field, 0, ""},
    +		{"GenDecl.TokPos", Field, 0, ""},
    +		{"GoStmt", Type, 0, ""},
    +		{"GoStmt.Call", Field, 0, ""},
    +		{"GoStmt.Go", Field, 0, ""},
    +		{"Ident", Type, 0, ""},
    +		{"Ident.Name", Field, 0, ""},
    +		{"Ident.NamePos", Field, 0, ""},
    +		{"Ident.Obj", Field, 0, ""},
    +		{"IfStmt", Type, 0, ""},
    +		{"IfStmt.Body", Field, 0, ""},
    +		{"IfStmt.Cond", Field, 0, ""},
    +		{"IfStmt.Else", Field, 0, ""},
    +		{"IfStmt.If", Field, 0, ""},
    +		{"IfStmt.Init", Field, 0, ""},
    +		{"ImportSpec", Type, 0, ""},
    +		{"ImportSpec.Comment", Field, 0, ""},
    +		{"ImportSpec.Doc", Field, 0, ""},
    +		{"ImportSpec.EndPos", Field, 0, ""},
    +		{"ImportSpec.Name", Field, 0, ""},
    +		{"ImportSpec.Path", Field, 0, ""},
    +		{"Importer", Type, 0, ""},
    +		{"IncDecStmt", Type, 0, ""},
    +		{"IncDecStmt.Tok", Field, 0, ""},
    +		{"IncDecStmt.TokPos", Field, 0, ""},
    +		{"IncDecStmt.X", Field, 0, ""},
    +		{"IndexExpr", Type, 0, ""},
    +		{"IndexExpr.Index", Field, 0, ""},
    +		{"IndexExpr.Lbrack", Field, 0, ""},
    +		{"IndexExpr.Rbrack", Field, 0, ""},
    +		{"IndexExpr.X", Field, 0, ""},
    +		{"IndexListExpr", Type, 18, ""},
    +		{"IndexListExpr.Indices", Field, 18, ""},
    +		{"IndexListExpr.Lbrack", Field, 18, ""},
    +		{"IndexListExpr.Rbrack", Field, 18, ""},
    +		{"IndexListExpr.X", Field, 18, ""},
    +		{"Inspect", Func, 0, "func(node Node, f func(Node) bool)"},
    +		{"InterfaceType", Type, 0, ""},
    +		{"InterfaceType.Incomplete", Field, 0, ""},
    +		{"InterfaceType.Interface", Field, 0, ""},
    +		{"InterfaceType.Methods", Field, 0, ""},
    +		{"IsExported", Func, 0, "func(name string) bool"},
    +		{"IsGenerated", Func, 21, "func(file *File) bool"},
    +		{"KeyValueExpr", Type, 0, ""},
    +		{"KeyValueExpr.Colon", Field, 0, ""},
    +		{"KeyValueExpr.Key", Field, 0, ""},
    +		{"KeyValueExpr.Value", Field, 0, ""},
    +		{"LabeledStmt", Type, 0, ""},
    +		{"LabeledStmt.Colon", Field, 0, ""},
    +		{"LabeledStmt.Label", Field, 0, ""},
    +		{"LabeledStmt.Stmt", Field, 0, ""},
    +		{"Lbl", Const, 0, ""},
    +		{"MapType", Type, 0, ""},
    +		{"MapType.Key", Field, 0, ""},
    +		{"MapType.Map", Field, 0, ""},
    +		{"MapType.Value", Field, 0, ""},
    +		{"MergeMode", Type, 0, ""},
    +		{"MergePackageFiles", Func, 0, "func(pkg *Package, mode MergeMode) *File"},
    +		{"NewCommentMap", Func, 1, "func(fset *token.FileSet, node Node, comments []*CommentGroup) CommentMap"},
    +		{"NewIdent", Func, 0, "func(name string) *Ident"},
    +		{"NewObj", Func, 0, "func(kind ObjKind, name string) *Object"},
    +		{"NewPackage", Func, 0, "func(fset *token.FileSet, files map[string]*File, importer Importer, universe *Scope) (*Package, error)"},
    +		{"NewScope", Func, 0, "func(outer *Scope) *Scope"},
    +		{"Node", Type, 0, ""},
    +		{"NotNilFilter", Func, 0, "func(_ string, v reflect.Value) bool"},
    +		{"ObjKind", Type, 0, ""},
    +		{"Object", Type, 0, ""},
    +		{"Object.Data", Field, 0, ""},
    +		{"Object.Decl", Field, 0, ""},
    +		{"Object.Kind", Field, 0, ""},
    +		{"Object.Name", Field, 0, ""},
    +		{"Object.Type", Field, 0, ""},
    +		{"Package", Type, 0, ""},
    +		{"Package.Files", Field, 0, ""},
    +		{"Package.Imports", Field, 0, ""},
    +		{"Package.Name", Field, 0, ""},
    +		{"Package.Scope", Field, 0, ""},
    +		{"PackageExports", Func, 0, "func(pkg *Package) bool"},
    +		{"ParenExpr", Type, 0, ""},
    +		{"ParenExpr.Lparen", Field, 0, ""},
    +		{"ParenExpr.Rparen", Field, 0, ""},
    +		{"ParenExpr.X", Field, 0, ""},
    +		{"Pkg", Const, 0, ""},
    +		{"Preorder", Func, 23, "func(root Node) iter.Seq[Node]"},
    +		{"PreorderStack", Func, 25, "func(root Node, stack []Node, f func(n Node, stack []Node) bool)"},
    +		{"Print", Func, 0, "func(fset *token.FileSet, x any) error"},
    +		{"RECV", Const, 0, ""},
    +		{"RangeStmt", Type, 0, ""},
    +		{"RangeStmt.Body", Field, 0, ""},
    +		{"RangeStmt.For", Field, 0, ""},
    +		{"RangeStmt.Key", Field, 0, ""},
    +		{"RangeStmt.Range", Field, 20, ""},
    +		{"RangeStmt.Tok", Field, 0, ""},
    +		{"RangeStmt.TokPos", Field, 0, ""},
    +		{"RangeStmt.Value", Field, 0, ""},
    +		{"RangeStmt.X", Field, 0, ""},
    +		{"ReturnStmt", Type, 0, ""},
    +		{"ReturnStmt.Results", Field, 0, ""},
    +		{"ReturnStmt.Return", Field, 0, ""},
    +		{"SEND", Const, 0, ""},
    +		{"Scope", Type, 0, ""},
    +		{"Scope.Objects", Field, 0, ""},
    +		{"Scope.Outer", Field, 0, ""},
    +		{"SelectStmt", Type, 0, ""},
    +		{"SelectStmt.Body", Field, 0, ""},
    +		{"SelectStmt.Select", Field, 0, ""},
    +		{"SelectorExpr", Type, 0, ""},
    +		{"SelectorExpr.Sel", Field, 0, ""},
    +		{"SelectorExpr.X", Field, 0, ""},
    +		{"SendStmt", Type, 0, ""},
    +		{"SendStmt.Arrow", Field, 0, ""},
    +		{"SendStmt.Chan", Field, 0, ""},
    +		{"SendStmt.Value", Field, 0, ""},
    +		{"SliceExpr", Type, 0, ""},
    +		{"SliceExpr.High", Field, 0, ""},
    +		{"SliceExpr.Lbrack", Field, 0, ""},
    +		{"SliceExpr.Low", Field, 0, ""},
    +		{"SliceExpr.Max", Field, 2, ""},
    +		{"SliceExpr.Rbrack", Field, 0, ""},
    +		{"SliceExpr.Slice3", Field, 2, ""},
    +		{"SliceExpr.X", Field, 0, ""},
    +		{"SortImports", Func, 0, "func(fset *token.FileSet, f *File)"},
    +		{"Spec", Type, 0, ""},
    +		{"StarExpr", Type, 0, ""},
    +		{"StarExpr.Star", Field, 0, ""},
    +		{"StarExpr.X", Field, 0, ""},
    +		{"Stmt", Type, 0, ""},
    +		{"StructType", Type, 0, ""},
    +		{"StructType.Fields", Field, 0, ""},
    +		{"StructType.Incomplete", Field, 0, ""},
    +		{"StructType.Struct", Field, 0, ""},
    +		{"SwitchStmt", Type, 0, ""},
    +		{"SwitchStmt.Body", Field, 0, ""},
    +		{"SwitchStmt.Init", Field, 0, ""},
    +		{"SwitchStmt.Switch", Field, 0, ""},
    +		{"SwitchStmt.Tag", Field, 0, ""},
    +		{"Typ", Const, 0, ""},
    +		{"TypeAssertExpr", Type, 0, ""},
    +		{"TypeAssertExpr.Lparen", Field, 2, ""},
    +		{"TypeAssertExpr.Rparen", Field, 2, ""},
    +		{"TypeAssertExpr.Type", Field, 0, ""},
    +		{"TypeAssertExpr.X", Field, 0, ""},
    +		{"TypeSpec", Type, 0, ""},
    +		{"TypeSpec.Assign", Field, 9, ""},
    +		{"TypeSpec.Comment", Field, 0, ""},
    +		{"TypeSpec.Doc", Field, 0, ""},
    +		{"TypeSpec.Name", Field, 0, ""},
    +		{"TypeSpec.Type", Field, 0, ""},
    +		{"TypeSpec.TypeParams", Field, 18, ""},
    +		{"TypeSwitchStmt", Type, 0, ""},
    +		{"TypeSwitchStmt.Assign", Field, 0, ""},
    +		{"TypeSwitchStmt.Body", Field, 0, ""},
    +		{"TypeSwitchStmt.Init", Field, 0, ""},
    +		{"TypeSwitchStmt.Switch", Field, 0, ""},
    +		{"UnaryExpr", Type, 0, ""},
    +		{"UnaryExpr.Op", Field, 0, ""},
    +		{"UnaryExpr.OpPos", Field, 0, ""},
    +		{"UnaryExpr.X", Field, 0, ""},
    +		{"Unparen", Func, 22, "func(e Expr) Expr"},
    +		{"ValueSpec", Type, 0, ""},
    +		{"ValueSpec.Comment", Field, 0, ""},
    +		{"ValueSpec.Doc", Field, 0, ""},
    +		{"ValueSpec.Names", Field, 0, ""},
    +		{"ValueSpec.Type", Field, 0, ""},
    +		{"ValueSpec.Values", Field, 0, ""},
    +		{"Var", Const, 0, ""},
    +		{"Visitor", Type, 0, ""},
    +		{"Walk", Func, 0, "func(v Visitor, node Node)"},
     	},
     	"go/build": {
    -		{"(*Context).Import", Method, 0},
    -		{"(*Context).ImportDir", Method, 0},
    -		{"(*Context).MatchFile", Method, 2},
    -		{"(*Context).SrcDirs", Method, 0},
    -		{"(*MultiplePackageError).Error", Method, 4},
    -		{"(*NoGoError).Error", Method, 0},
    -		{"(*Package).IsCommand", Method, 0},
    -		{"AllowBinary", Const, 0},
    -		{"ArchChar", Func, 0},
    -		{"Context", Type, 0},
    -		{"Context.BuildTags", Field, 0},
    -		{"Context.CgoEnabled", Field, 0},
    -		{"Context.Compiler", Field, 0},
    -		{"Context.Dir", Field, 14},
    -		{"Context.GOARCH", Field, 0},
    -		{"Context.GOOS", Field, 0},
    -		{"Context.GOPATH", Field, 0},
    -		{"Context.GOROOT", Field, 0},
    -		{"Context.HasSubdir", Field, 0},
    -		{"Context.InstallSuffix", Field, 1},
    -		{"Context.IsAbsPath", Field, 0},
    -		{"Context.IsDir", Field, 0},
    -		{"Context.JoinPath", Field, 0},
    -		{"Context.OpenFile", Field, 0},
    -		{"Context.ReadDir", Field, 0},
    -		{"Context.ReleaseTags", Field, 1},
    -		{"Context.SplitPathList", Field, 0},
    -		{"Context.ToolTags", Field, 17},
    -		{"Context.UseAllFiles", Field, 0},
    -		{"Default", Var, 0},
    -		{"Directive", Type, 21},
    -		{"Directive.Pos", Field, 21},
    -		{"Directive.Text", Field, 21},
    -		{"FindOnly", Const, 0},
    -		{"IgnoreVendor", Const, 6},
    -		{"Import", Func, 0},
    -		{"ImportComment", Const, 4},
    -		{"ImportDir", Func, 0},
    -		{"ImportMode", Type, 0},
    -		{"IsLocalImport", Func, 0},
    -		{"MultiplePackageError", Type, 4},
    -		{"MultiplePackageError.Dir", Field, 4},
    -		{"MultiplePackageError.Files", Field, 4},
    -		{"MultiplePackageError.Packages", Field, 4},
    -		{"NoGoError", Type, 0},
    -		{"NoGoError.Dir", Field, 0},
    -		{"Package", Type, 0},
    -		{"Package.AllTags", Field, 2},
    -		{"Package.BinDir", Field, 0},
    -		{"Package.BinaryOnly", Field, 7},
    -		{"Package.CFiles", Field, 0},
    -		{"Package.CXXFiles", Field, 2},
    -		{"Package.CgoCFLAGS", Field, 0},
    -		{"Package.CgoCPPFLAGS", Field, 2},
    -		{"Package.CgoCXXFLAGS", Field, 2},
    -		{"Package.CgoFFLAGS", Field, 7},
    -		{"Package.CgoFiles", Field, 0},
    -		{"Package.CgoLDFLAGS", Field, 0},
    -		{"Package.CgoPkgConfig", Field, 0},
    -		{"Package.ConflictDir", Field, 2},
    -		{"Package.Dir", Field, 0},
    -		{"Package.Directives", Field, 21},
    -		{"Package.Doc", Field, 0},
    -		{"Package.EmbedPatternPos", Field, 16},
    -		{"Package.EmbedPatterns", Field, 16},
    -		{"Package.FFiles", Field, 7},
    -		{"Package.GoFiles", Field, 0},
    -		{"Package.Goroot", Field, 0},
    -		{"Package.HFiles", Field, 0},
    -		{"Package.IgnoredGoFiles", Field, 1},
    -		{"Package.IgnoredOtherFiles", Field, 16},
    -		{"Package.ImportComment", Field, 4},
    -		{"Package.ImportPath", Field, 0},
    -		{"Package.ImportPos", Field, 0},
    -		{"Package.Imports", Field, 0},
    -		{"Package.InvalidGoFiles", Field, 6},
    -		{"Package.MFiles", Field, 3},
    -		{"Package.Name", Field, 0},
    -		{"Package.PkgObj", Field, 0},
    -		{"Package.PkgRoot", Field, 0},
    -		{"Package.PkgTargetRoot", Field, 5},
    -		{"Package.Root", Field, 0},
    -		{"Package.SFiles", Field, 0},
    -		{"Package.SrcRoot", Field, 0},
    -		{"Package.SwigCXXFiles", Field, 1},
    -		{"Package.SwigFiles", Field, 1},
    -		{"Package.SysoFiles", Field, 0},
    -		{"Package.TestDirectives", Field, 21},
    -		{"Package.TestEmbedPatternPos", Field, 16},
    -		{"Package.TestEmbedPatterns", Field, 16},
    -		{"Package.TestGoFiles", Field, 0},
    -		{"Package.TestImportPos", Field, 0},
    -		{"Package.TestImports", Field, 0},
    -		{"Package.XTestDirectives", Field, 21},
    -		{"Package.XTestEmbedPatternPos", Field, 16},
    -		{"Package.XTestEmbedPatterns", Field, 16},
    -		{"Package.XTestGoFiles", Field, 0},
    -		{"Package.XTestImportPos", Field, 0},
    -		{"Package.XTestImports", Field, 0},
    -		{"ToolDir", Var, 0},
    +		{"(*Context).Import", Method, 0, ""},
    +		{"(*Context).ImportDir", Method, 0, ""},
    +		{"(*Context).MatchFile", Method, 2, ""},
    +		{"(*Context).SrcDirs", Method, 0, ""},
    +		{"(*MultiplePackageError).Error", Method, 4, ""},
    +		{"(*NoGoError).Error", Method, 0, ""},
    +		{"(*Package).IsCommand", Method, 0, ""},
    +		{"AllowBinary", Const, 0, ""},
    +		{"ArchChar", Func, 0, "func(goarch string) (string, error)"},
    +		{"Context", Type, 0, ""},
    +		{"Context.BuildTags", Field, 0, ""},
    +		{"Context.CgoEnabled", Field, 0, ""},
    +		{"Context.Compiler", Field, 0, ""},
    +		{"Context.Dir", Field, 14, ""},
    +		{"Context.GOARCH", Field, 0, ""},
    +		{"Context.GOOS", Field, 0, ""},
    +		{"Context.GOPATH", Field, 0, ""},
    +		{"Context.GOROOT", Field, 0, ""},
    +		{"Context.HasSubdir", Field, 0, ""},
    +		{"Context.InstallSuffix", Field, 1, ""},
    +		{"Context.IsAbsPath", Field, 0, ""},
    +		{"Context.IsDir", Field, 0, ""},
    +		{"Context.JoinPath", Field, 0, ""},
    +		{"Context.OpenFile", Field, 0, ""},
    +		{"Context.ReadDir", Field, 0, ""},
    +		{"Context.ReleaseTags", Field, 1, ""},
    +		{"Context.SplitPathList", Field, 0, ""},
    +		{"Context.ToolTags", Field, 17, ""},
    +		{"Context.UseAllFiles", Field, 0, ""},
    +		{"Default", Var, 0, ""},
    +		{"Directive", Type, 21, ""},
    +		{"Directive.Pos", Field, 21, ""},
    +		{"Directive.Text", Field, 21, ""},
    +		{"FindOnly", Const, 0, ""},
    +		{"IgnoreVendor", Const, 6, ""},
    +		{"Import", Func, 0, "func(path string, srcDir string, mode ImportMode) (*Package, error)"},
    +		{"ImportComment", Const, 4, ""},
    +		{"ImportDir", Func, 0, "func(dir string, mode ImportMode) (*Package, error)"},
    +		{"ImportMode", Type, 0, ""},
    +		{"IsLocalImport", Func, 0, "func(path string) bool"},
    +		{"MultiplePackageError", Type, 4, ""},
    +		{"MultiplePackageError.Dir", Field, 4, ""},
    +		{"MultiplePackageError.Files", Field, 4, ""},
    +		{"MultiplePackageError.Packages", Field, 4, ""},
    +		{"NoGoError", Type, 0, ""},
    +		{"NoGoError.Dir", Field, 0, ""},
    +		{"Package", Type, 0, ""},
    +		{"Package.AllTags", Field, 2, ""},
    +		{"Package.BinDir", Field, 0, ""},
    +		{"Package.BinaryOnly", Field, 7, ""},
    +		{"Package.CFiles", Field, 0, ""},
    +		{"Package.CXXFiles", Field, 2, ""},
    +		{"Package.CgoCFLAGS", Field, 0, ""},
    +		{"Package.CgoCPPFLAGS", Field, 2, ""},
    +		{"Package.CgoCXXFLAGS", Field, 2, ""},
    +		{"Package.CgoFFLAGS", Field, 7, ""},
    +		{"Package.CgoFiles", Field, 0, ""},
    +		{"Package.CgoLDFLAGS", Field, 0, ""},
    +		{"Package.CgoPkgConfig", Field, 0, ""},
    +		{"Package.ConflictDir", Field, 2, ""},
    +		{"Package.Dir", Field, 0, ""},
    +		{"Package.Directives", Field, 21, ""},
    +		{"Package.Doc", Field, 0, ""},
    +		{"Package.EmbedPatternPos", Field, 16, ""},
    +		{"Package.EmbedPatterns", Field, 16, ""},
    +		{"Package.FFiles", Field, 7, ""},
    +		{"Package.GoFiles", Field, 0, ""},
    +		{"Package.Goroot", Field, 0, ""},
    +		{"Package.HFiles", Field, 0, ""},
    +		{"Package.IgnoredGoFiles", Field, 1, ""},
    +		{"Package.IgnoredOtherFiles", Field, 16, ""},
    +		{"Package.ImportComment", Field, 4, ""},
    +		{"Package.ImportPath", Field, 0, ""},
    +		{"Package.ImportPos", Field, 0, ""},
    +		{"Package.Imports", Field, 0, ""},
    +		{"Package.InvalidGoFiles", Field, 6, ""},
    +		{"Package.MFiles", Field, 3, ""},
    +		{"Package.Name", Field, 0, ""},
    +		{"Package.PkgObj", Field, 0, ""},
    +		{"Package.PkgRoot", Field, 0, ""},
    +		{"Package.PkgTargetRoot", Field, 5, ""},
    +		{"Package.Root", Field, 0, ""},
    +		{"Package.SFiles", Field, 0, ""},
    +		{"Package.SrcRoot", Field, 0, ""},
    +		{"Package.SwigCXXFiles", Field, 1, ""},
    +		{"Package.SwigFiles", Field, 1, ""},
    +		{"Package.SysoFiles", Field, 0, ""},
    +		{"Package.TestDirectives", Field, 21, ""},
    +		{"Package.TestEmbedPatternPos", Field, 16, ""},
    +		{"Package.TestEmbedPatterns", Field, 16, ""},
    +		{"Package.TestGoFiles", Field, 0, ""},
    +		{"Package.TestImportPos", Field, 0, ""},
    +		{"Package.TestImports", Field, 0, ""},
    +		{"Package.XTestDirectives", Field, 21, ""},
    +		{"Package.XTestEmbedPatternPos", Field, 16, ""},
    +		{"Package.XTestEmbedPatterns", Field, 16, ""},
    +		{"Package.XTestGoFiles", Field, 0, ""},
    +		{"Package.XTestImportPos", Field, 0, ""},
    +		{"Package.XTestImports", Field, 0, ""},
    +		{"ToolDir", Var, 0, ""},
     	},
     	"go/build/constraint": {
    -		{"(*AndExpr).Eval", Method, 16},
    -		{"(*AndExpr).String", Method, 16},
    -		{"(*NotExpr).Eval", Method, 16},
    -		{"(*NotExpr).String", Method, 16},
    -		{"(*OrExpr).Eval", Method, 16},
    -		{"(*OrExpr).String", Method, 16},
    -		{"(*SyntaxError).Error", Method, 16},
    -		{"(*TagExpr).Eval", Method, 16},
    -		{"(*TagExpr).String", Method, 16},
    -		{"AndExpr", Type, 16},
    -		{"AndExpr.X", Field, 16},
    -		{"AndExpr.Y", Field, 16},
    -		{"Expr", Type, 16},
    -		{"GoVersion", Func, 21},
    -		{"IsGoBuild", Func, 16},
    -		{"IsPlusBuild", Func, 16},
    -		{"NotExpr", Type, 16},
    -		{"NotExpr.X", Field, 16},
    -		{"OrExpr", Type, 16},
    -		{"OrExpr.X", Field, 16},
    -		{"OrExpr.Y", Field, 16},
    -		{"Parse", Func, 16},
    -		{"PlusBuildLines", Func, 16},
    -		{"SyntaxError", Type, 16},
    -		{"SyntaxError.Err", Field, 16},
    -		{"SyntaxError.Offset", Field, 16},
    -		{"TagExpr", Type, 16},
    -		{"TagExpr.Tag", Field, 16},
    +		{"(*AndExpr).Eval", Method, 16, ""},
    +		{"(*AndExpr).String", Method, 16, ""},
    +		{"(*NotExpr).Eval", Method, 16, ""},
    +		{"(*NotExpr).String", Method, 16, ""},
    +		{"(*OrExpr).Eval", Method, 16, ""},
    +		{"(*OrExpr).String", Method, 16, ""},
    +		{"(*SyntaxError).Error", Method, 16, ""},
    +		{"(*TagExpr).Eval", Method, 16, ""},
    +		{"(*TagExpr).String", Method, 16, ""},
    +		{"AndExpr", Type, 16, ""},
    +		{"AndExpr.X", Field, 16, ""},
    +		{"AndExpr.Y", Field, 16, ""},
    +		{"Expr", Type, 16, ""},
    +		{"GoVersion", Func, 21, "func(x Expr) string"},
    +		{"IsGoBuild", Func, 16, "func(line string) bool"},
    +		{"IsPlusBuild", Func, 16, "func(line string) bool"},
    +		{"NotExpr", Type, 16, ""},
    +		{"NotExpr.X", Field, 16, ""},
    +		{"OrExpr", Type, 16, ""},
    +		{"OrExpr.X", Field, 16, ""},
    +		{"OrExpr.Y", Field, 16, ""},
    +		{"Parse", Func, 16, "func(line string) (Expr, error)"},
    +		{"PlusBuildLines", Func, 16, "func(x Expr) ([]string, error)"},
    +		{"SyntaxError", Type, 16, ""},
    +		{"SyntaxError.Err", Field, 16, ""},
    +		{"SyntaxError.Offset", Field, 16, ""},
    +		{"TagExpr", Type, 16, ""},
    +		{"TagExpr.Tag", Field, 16, ""},
     	},
     	"go/constant": {
    -		{"(Kind).String", Method, 18},
    -		{"BinaryOp", Func, 5},
    -		{"BitLen", Func, 5},
    -		{"Bool", Const, 5},
    -		{"BoolVal", Func, 5},
    -		{"Bytes", Func, 5},
    -		{"Compare", Func, 5},
    -		{"Complex", Const, 5},
    -		{"Denom", Func, 5},
    -		{"Float", Const, 5},
    -		{"Float32Val", Func, 5},
    -		{"Float64Val", Func, 5},
    -		{"Imag", Func, 5},
    -		{"Int", Const, 5},
    -		{"Int64Val", Func, 5},
    -		{"Kind", Type, 5},
    -		{"Make", Func, 13},
    -		{"MakeBool", Func, 5},
    -		{"MakeFloat64", Func, 5},
    -		{"MakeFromBytes", Func, 5},
    -		{"MakeFromLiteral", Func, 5},
    -		{"MakeImag", Func, 5},
    -		{"MakeInt64", Func, 5},
    -		{"MakeString", Func, 5},
    -		{"MakeUint64", Func, 5},
    -		{"MakeUnknown", Func, 5},
    -		{"Num", Func, 5},
    -		{"Real", Func, 5},
    -		{"Shift", Func, 5},
    -		{"Sign", Func, 5},
    -		{"String", Const, 5},
    -		{"StringVal", Func, 5},
    -		{"ToComplex", Func, 6},
    -		{"ToFloat", Func, 6},
    -		{"ToInt", Func, 6},
    -		{"Uint64Val", Func, 5},
    -		{"UnaryOp", Func, 5},
    -		{"Unknown", Const, 5},
    -		{"Val", Func, 13},
    -		{"Value", Type, 5},
    +		{"(Kind).String", Method, 18, ""},
    +		{"BinaryOp", Func, 5, "func(x_ Value, op token.Token, y_ Value) Value"},
    +		{"BitLen", Func, 5, "func(x Value) int"},
    +		{"Bool", Const, 5, ""},
    +		{"BoolVal", Func, 5, "func(x Value) bool"},
    +		{"Bytes", Func, 5, "func(x Value) []byte"},
    +		{"Compare", Func, 5, "func(x_ Value, op token.Token, y_ Value) bool"},
    +		{"Complex", Const, 5, ""},
    +		{"Denom", Func, 5, "func(x Value) Value"},
    +		{"Float", Const, 5, ""},
    +		{"Float32Val", Func, 5, "func(x Value) (float32, bool)"},
    +		{"Float64Val", Func, 5, "func(x Value) (float64, bool)"},
    +		{"Imag", Func, 5, "func(x Value) Value"},
    +		{"Int", Const, 5, ""},
    +		{"Int64Val", Func, 5, "func(x Value) (int64, bool)"},
    +		{"Kind", Type, 5, ""},
    +		{"Make", Func, 13, "func(x any) Value"},
    +		{"MakeBool", Func, 5, "func(b bool) Value"},
    +		{"MakeFloat64", Func, 5, "func(x float64) Value"},
    +		{"MakeFromBytes", Func, 5, "func(bytes []byte) Value"},
    +		{"MakeFromLiteral", Func, 5, "func(lit string, tok token.Token, zero uint) Value"},
    +		{"MakeImag", Func, 5, "func(x Value) Value"},
    +		{"MakeInt64", Func, 5, "func(x int64) Value"},
    +		{"MakeString", Func, 5, "func(s string) Value"},
    +		{"MakeUint64", Func, 5, "func(x uint64) Value"},
    +		{"MakeUnknown", Func, 5, "func() Value"},
    +		{"Num", Func, 5, "func(x Value) Value"},
    +		{"Real", Func, 5, "func(x Value) Value"},
    +		{"Shift", Func, 5, "func(x Value, op token.Token, s uint) Value"},
    +		{"Sign", Func, 5, "func(x Value) int"},
    +		{"String", Const, 5, ""},
    +		{"StringVal", Func, 5, "func(x Value) string"},
    +		{"ToComplex", Func, 6, "func(x Value) Value"},
    +		{"ToFloat", Func, 6, "func(x Value) Value"},
    +		{"ToInt", Func, 6, "func(x Value) Value"},
    +		{"Uint64Val", Func, 5, "func(x Value) (uint64, bool)"},
    +		{"UnaryOp", Func, 5, "func(op token.Token, y Value, prec uint) Value"},
    +		{"Unknown", Const, 5, ""},
    +		{"Val", Func, 13, "func(x Value) any"},
    +		{"Value", Type, 5, ""},
     	},
     	"go/doc": {
    -		{"(*Package).Filter", Method, 0},
    -		{"(*Package).HTML", Method, 19},
    -		{"(*Package).Markdown", Method, 19},
    -		{"(*Package).Parser", Method, 19},
    -		{"(*Package).Printer", Method, 19},
    -		{"(*Package).Synopsis", Method, 19},
    -		{"(*Package).Text", Method, 19},
    -		{"AllDecls", Const, 0},
    -		{"AllMethods", Const, 0},
    -		{"Example", Type, 0},
    -		{"Example.Code", Field, 0},
    -		{"Example.Comments", Field, 0},
    -		{"Example.Doc", Field, 0},
    -		{"Example.EmptyOutput", Field, 1},
    -		{"Example.Name", Field, 0},
    -		{"Example.Order", Field, 1},
    -		{"Example.Output", Field, 0},
    -		{"Example.Play", Field, 1},
    -		{"Example.Suffix", Field, 14},
    -		{"Example.Unordered", Field, 7},
    -		{"Examples", Func, 0},
    -		{"Filter", Type, 0},
    -		{"Func", Type, 0},
    -		{"Func.Decl", Field, 0},
    -		{"Func.Doc", Field, 0},
    -		{"Func.Examples", Field, 14},
    -		{"Func.Level", Field, 0},
    -		{"Func.Name", Field, 0},
    -		{"Func.Orig", Field, 0},
    -		{"Func.Recv", Field, 0},
    -		{"IllegalPrefixes", Var, 1},
    -		{"IsPredeclared", Func, 8},
    -		{"Mode", Type, 0},
    -		{"New", Func, 0},
    -		{"NewFromFiles", Func, 14},
    -		{"Note", Type, 1},
    -		{"Note.Body", Field, 1},
    -		{"Note.End", Field, 1},
    -		{"Note.Pos", Field, 1},
    -		{"Note.UID", Field, 1},
    -		{"Package", Type, 0},
    -		{"Package.Bugs", Field, 0},
    -		{"Package.Consts", Field, 0},
    -		{"Package.Doc", Field, 0},
    -		{"Package.Examples", Field, 14},
    -		{"Package.Filenames", Field, 0},
    -		{"Package.Funcs", Field, 0},
    -		{"Package.ImportPath", Field, 0},
    -		{"Package.Imports", Field, 0},
    -		{"Package.Name", Field, 0},
    -		{"Package.Notes", Field, 1},
    -		{"Package.Types", Field, 0},
    -		{"Package.Vars", Field, 0},
    -		{"PreserveAST", Const, 12},
    -		{"Synopsis", Func, 0},
    -		{"ToHTML", Func, 0},
    -		{"ToText", Func, 0},
    -		{"Type", Type, 0},
    -		{"Type.Consts", Field, 0},
    -		{"Type.Decl", Field, 0},
    -		{"Type.Doc", Field, 0},
    -		{"Type.Examples", Field, 14},
    -		{"Type.Funcs", Field, 0},
    -		{"Type.Methods", Field, 0},
    -		{"Type.Name", Field, 0},
    -		{"Type.Vars", Field, 0},
    -		{"Value", Type, 0},
    -		{"Value.Decl", Field, 0},
    -		{"Value.Doc", Field, 0},
    -		{"Value.Names", Field, 0},
    +		{"(*Package).Filter", Method, 0, ""},
    +		{"(*Package).HTML", Method, 19, ""},
    +		{"(*Package).Markdown", Method, 19, ""},
    +		{"(*Package).Parser", Method, 19, ""},
    +		{"(*Package).Printer", Method, 19, ""},
    +		{"(*Package).Synopsis", Method, 19, ""},
    +		{"(*Package).Text", Method, 19, ""},
    +		{"AllDecls", Const, 0, ""},
    +		{"AllMethods", Const, 0, ""},
    +		{"Example", Type, 0, ""},
    +		{"Example.Code", Field, 0, ""},
    +		{"Example.Comments", Field, 0, ""},
    +		{"Example.Doc", Field, 0, ""},
    +		{"Example.EmptyOutput", Field, 1, ""},
    +		{"Example.Name", Field, 0, ""},
    +		{"Example.Order", Field, 1, ""},
    +		{"Example.Output", Field, 0, ""},
    +		{"Example.Play", Field, 1, ""},
    +		{"Example.Suffix", Field, 14, ""},
    +		{"Example.Unordered", Field, 7, ""},
    +		{"Examples", Func, 0, "func(testFiles ...*ast.File) []*Example"},
    +		{"Filter", Type, 0, ""},
    +		{"Func", Type, 0, ""},
    +		{"Func.Decl", Field, 0, ""},
    +		{"Func.Doc", Field, 0, ""},
    +		{"Func.Examples", Field, 14, ""},
    +		{"Func.Level", Field, 0, ""},
    +		{"Func.Name", Field, 0, ""},
    +		{"Func.Orig", Field, 0, ""},
    +		{"Func.Recv", Field, 0, ""},
    +		{"IllegalPrefixes", Var, 1, ""},
    +		{"IsPredeclared", Func, 8, "func(s string) bool"},
    +		{"Mode", Type, 0, ""},
    +		{"New", Func, 0, "func(pkg *ast.Package, importPath string, mode Mode) *Package"},
    +		{"NewFromFiles", Func, 14, "func(fset *token.FileSet, files []*ast.File, importPath string, opts ...any) (*Package, error)"},
    +		{"Note", Type, 1, ""},
    +		{"Note.Body", Field, 1, ""},
    +		{"Note.End", Field, 1, ""},
    +		{"Note.Pos", Field, 1, ""},
    +		{"Note.UID", Field, 1, ""},
    +		{"Package", Type, 0, ""},
    +		{"Package.Bugs", Field, 0, ""},
    +		{"Package.Consts", Field, 0, ""},
    +		{"Package.Doc", Field, 0, ""},
    +		{"Package.Examples", Field, 14, ""},
    +		{"Package.Filenames", Field, 0, ""},
    +		{"Package.Funcs", Field, 0, ""},
    +		{"Package.ImportPath", Field, 0, ""},
    +		{"Package.Imports", Field, 0, ""},
    +		{"Package.Name", Field, 0, ""},
    +		{"Package.Notes", Field, 1, ""},
    +		{"Package.Types", Field, 0, ""},
    +		{"Package.Vars", Field, 0, ""},
    +		{"PreserveAST", Const, 12, ""},
    +		{"Synopsis", Func, 0, "func(text string) string"},
    +		{"ToHTML", Func, 0, "func(w io.Writer, text string, words map[string]string)"},
    +		{"ToText", Func, 0, "func(w io.Writer, text string, prefix string, codePrefix string, width int)"},
    +		{"Type", Type, 0, ""},
    +		{"Type.Consts", Field, 0, ""},
    +		{"Type.Decl", Field, 0, ""},
    +		{"Type.Doc", Field, 0, ""},
    +		{"Type.Examples", Field, 14, ""},
    +		{"Type.Funcs", Field, 0, ""},
    +		{"Type.Methods", Field, 0, ""},
    +		{"Type.Name", Field, 0, ""},
    +		{"Type.Vars", Field, 0, ""},
    +		{"Value", Type, 0, ""},
    +		{"Value.Decl", Field, 0, ""},
    +		{"Value.Doc", Field, 0, ""},
    +		{"Value.Names", Field, 0, ""},
     	},
     	"go/doc/comment": {
    -		{"(*DocLink).DefaultURL", Method, 19},
    -		{"(*Heading).DefaultID", Method, 19},
    -		{"(*List).BlankBefore", Method, 19},
    -		{"(*List).BlankBetween", Method, 19},
    -		{"(*Parser).Parse", Method, 19},
    -		{"(*Printer).Comment", Method, 19},
    -		{"(*Printer).HTML", Method, 19},
    -		{"(*Printer).Markdown", Method, 19},
    -		{"(*Printer).Text", Method, 19},
    -		{"Block", Type, 19},
    -		{"Code", Type, 19},
    -		{"Code.Text", Field, 19},
    -		{"DefaultLookupPackage", Func, 19},
    -		{"Doc", Type, 19},
    -		{"Doc.Content", Field, 19},
    -		{"Doc.Links", Field, 19},
    -		{"DocLink", Type, 19},
    -		{"DocLink.ImportPath", Field, 19},
    -		{"DocLink.Name", Field, 19},
    -		{"DocLink.Recv", Field, 19},
    -		{"DocLink.Text", Field, 19},
    -		{"Heading", Type, 19},
    -		{"Heading.Text", Field, 19},
    -		{"Italic", Type, 19},
    -		{"Link", Type, 19},
    -		{"Link.Auto", Field, 19},
    -		{"Link.Text", Field, 19},
    -		{"Link.URL", Field, 19},
    -		{"LinkDef", Type, 19},
    -		{"LinkDef.Text", Field, 19},
    -		{"LinkDef.URL", Field, 19},
    -		{"LinkDef.Used", Field, 19},
    -		{"List", Type, 19},
    -		{"List.ForceBlankBefore", Field, 19},
    -		{"List.ForceBlankBetween", Field, 19},
    -		{"List.Items", Field, 19},
    -		{"ListItem", Type, 19},
    -		{"ListItem.Content", Field, 19},
    -		{"ListItem.Number", Field, 19},
    -		{"Paragraph", Type, 19},
    -		{"Paragraph.Text", Field, 19},
    -		{"Parser", Type, 19},
    -		{"Parser.LookupPackage", Field, 19},
    -		{"Parser.LookupSym", Field, 19},
    -		{"Parser.Words", Field, 19},
    -		{"Plain", Type, 19},
    -		{"Printer", Type, 19},
    -		{"Printer.DocLinkBaseURL", Field, 19},
    -		{"Printer.DocLinkURL", Field, 19},
    -		{"Printer.HeadingID", Field, 19},
    -		{"Printer.HeadingLevel", Field, 19},
    -		{"Printer.TextCodePrefix", Field, 19},
    -		{"Printer.TextPrefix", Field, 19},
    -		{"Printer.TextWidth", Field, 19},
    -		{"Text", Type, 19},
    +		{"(*DocLink).DefaultURL", Method, 19, ""},
    +		{"(*Heading).DefaultID", Method, 19, ""},
    +		{"(*List).BlankBefore", Method, 19, ""},
    +		{"(*List).BlankBetween", Method, 19, ""},
    +		{"(*Parser).Parse", Method, 19, ""},
    +		{"(*Printer).Comment", Method, 19, ""},
    +		{"(*Printer).HTML", Method, 19, ""},
    +		{"(*Printer).Markdown", Method, 19, ""},
    +		{"(*Printer).Text", Method, 19, ""},
    +		{"Block", Type, 19, ""},
    +		{"Code", Type, 19, ""},
    +		{"Code.Text", Field, 19, ""},
    +		{"DefaultLookupPackage", Func, 19, "func(name string) (importPath string, ok bool)"},
    +		{"Doc", Type, 19, ""},
    +		{"Doc.Content", Field, 19, ""},
    +		{"Doc.Links", Field, 19, ""},
    +		{"DocLink", Type, 19, ""},
    +		{"DocLink.ImportPath", Field, 19, ""},
    +		{"DocLink.Name", Field, 19, ""},
    +		{"DocLink.Recv", Field, 19, ""},
    +		{"DocLink.Text", Field, 19, ""},
    +		{"Heading", Type, 19, ""},
    +		{"Heading.Text", Field, 19, ""},
    +		{"Italic", Type, 19, ""},
    +		{"Link", Type, 19, ""},
    +		{"Link.Auto", Field, 19, ""},
    +		{"Link.Text", Field, 19, ""},
    +		{"Link.URL", Field, 19, ""},
    +		{"LinkDef", Type, 19, ""},
    +		{"LinkDef.Text", Field, 19, ""},
    +		{"LinkDef.URL", Field, 19, ""},
    +		{"LinkDef.Used", Field, 19, ""},
    +		{"List", Type, 19, ""},
    +		{"List.ForceBlankBefore", Field, 19, ""},
    +		{"List.ForceBlankBetween", Field, 19, ""},
    +		{"List.Items", Field, 19, ""},
    +		{"ListItem", Type, 19, ""},
    +		{"ListItem.Content", Field, 19, ""},
    +		{"ListItem.Number", Field, 19, ""},
    +		{"Paragraph", Type, 19, ""},
    +		{"Paragraph.Text", Field, 19, ""},
    +		{"Parser", Type, 19, ""},
    +		{"Parser.LookupPackage", Field, 19, ""},
    +		{"Parser.LookupSym", Field, 19, ""},
    +		{"Parser.Words", Field, 19, ""},
    +		{"Plain", Type, 19, ""},
    +		{"Printer", Type, 19, ""},
    +		{"Printer.DocLinkBaseURL", Field, 19, ""},
    +		{"Printer.DocLinkURL", Field, 19, ""},
    +		{"Printer.HeadingID", Field, 19, ""},
    +		{"Printer.HeadingLevel", Field, 19, ""},
    +		{"Printer.TextCodePrefix", Field, 19, ""},
    +		{"Printer.TextPrefix", Field, 19, ""},
    +		{"Printer.TextWidth", Field, 19, ""},
    +		{"Text", Type, 19, ""},
     	},
     	"go/format": {
    -		{"Node", Func, 1},
    -		{"Source", Func, 1},
    +		{"Node", Func, 1, "func(dst io.Writer, fset *token.FileSet, node any) error"},
    +		{"Source", Func, 1, "func(src []byte) ([]byte, error)"},
     	},
     	"go/importer": {
    -		{"Default", Func, 5},
    -		{"For", Func, 5},
    -		{"ForCompiler", Func, 12},
    -		{"Lookup", Type, 5},
    +		{"Default", Func, 5, "func() types.Importer"},
    +		{"For", Func, 5, "func(compiler string, lookup Lookup) types.Importer"},
    +		{"ForCompiler", Func, 12, "func(fset *token.FileSet, compiler string, lookup Lookup) types.Importer"},
    +		{"Lookup", Type, 5, ""},
     	},
     	"go/parser": {
    -		{"AllErrors", Const, 1},
    -		{"DeclarationErrors", Const, 0},
    -		{"ImportsOnly", Const, 0},
    -		{"Mode", Type, 0},
    -		{"PackageClauseOnly", Const, 0},
    -		{"ParseComments", Const, 0},
    -		{"ParseDir", Func, 0},
    -		{"ParseExpr", Func, 0},
    -		{"ParseExprFrom", Func, 5},
    -		{"ParseFile", Func, 0},
    -		{"SkipObjectResolution", Const, 17},
    -		{"SpuriousErrors", Const, 0},
    -		{"Trace", Const, 0},
    +		{"AllErrors", Const, 1, ""},
    +		{"DeclarationErrors", Const, 0, ""},
    +		{"ImportsOnly", Const, 0, ""},
    +		{"Mode", Type, 0, ""},
    +		{"PackageClauseOnly", Const, 0, ""},
    +		{"ParseComments", Const, 0, ""},
    +		{"ParseDir", Func, 0, "func(fset *token.FileSet, path string, filter func(fs.FileInfo) bool, mode Mode) (pkgs map[string]*ast.Package, first error)"},
    +		{"ParseExpr", Func, 0, "func(x string) (ast.Expr, error)"},
    +		{"ParseExprFrom", Func, 5, "func(fset *token.FileSet, filename string, src any, mode Mode) (expr ast.Expr, err error)"},
    +		{"ParseFile", Func, 0, "func(fset *token.FileSet, filename string, src any, mode Mode) (f *ast.File, err error)"},
    +		{"SkipObjectResolution", Const, 17, ""},
    +		{"SpuriousErrors", Const, 0, ""},
    +		{"Trace", Const, 0, ""},
     	},
     	"go/printer": {
    -		{"(*Config).Fprint", Method, 0},
    -		{"CommentedNode", Type, 0},
    -		{"CommentedNode.Comments", Field, 0},
    -		{"CommentedNode.Node", Field, 0},
    -		{"Config", Type, 0},
    -		{"Config.Indent", Field, 1},
    -		{"Config.Mode", Field, 0},
    -		{"Config.Tabwidth", Field, 0},
    -		{"Fprint", Func, 0},
    -		{"Mode", Type, 0},
    -		{"RawFormat", Const, 0},
    -		{"SourcePos", Const, 0},
    -		{"TabIndent", Const, 0},
    -		{"UseSpaces", Const, 0},
    +		{"(*Config).Fprint", Method, 0, ""},
    +		{"CommentedNode", Type, 0, ""},
    +		{"CommentedNode.Comments", Field, 0, ""},
    +		{"CommentedNode.Node", Field, 0, ""},
    +		{"Config", Type, 0, ""},
    +		{"Config.Indent", Field, 1, ""},
    +		{"Config.Mode", Field, 0, ""},
    +		{"Config.Tabwidth", Field, 0, ""},
    +		{"Fprint", Func, 0, "func(output io.Writer, fset *token.FileSet, node any) error"},
    +		{"Mode", Type, 0, ""},
    +		{"RawFormat", Const, 0, ""},
    +		{"SourcePos", Const, 0, ""},
    +		{"TabIndent", Const, 0, ""},
    +		{"UseSpaces", Const, 0, ""},
     	},
     	"go/scanner": {
    -		{"(*ErrorList).Add", Method, 0},
    -		{"(*ErrorList).RemoveMultiples", Method, 0},
    -		{"(*ErrorList).Reset", Method, 0},
    -		{"(*Scanner).Init", Method, 0},
    -		{"(*Scanner).Scan", Method, 0},
    -		{"(Error).Error", Method, 0},
    -		{"(ErrorList).Err", Method, 0},
    -		{"(ErrorList).Error", Method, 0},
    -		{"(ErrorList).Len", Method, 0},
    -		{"(ErrorList).Less", Method, 0},
    -		{"(ErrorList).Sort", Method, 0},
    -		{"(ErrorList).Swap", Method, 0},
    -		{"Error", Type, 0},
    -		{"Error.Msg", Field, 0},
    -		{"Error.Pos", Field, 0},
    -		{"ErrorHandler", Type, 0},
    -		{"ErrorList", Type, 0},
    -		{"Mode", Type, 0},
    -		{"PrintError", Func, 0},
    -		{"ScanComments", Const, 0},
    -		{"Scanner", Type, 0},
    -		{"Scanner.ErrorCount", Field, 0},
    +		{"(*ErrorList).Add", Method, 0, ""},
    +		{"(*ErrorList).RemoveMultiples", Method, 0, ""},
    +		{"(*ErrorList).Reset", Method, 0, ""},
    +		{"(*Scanner).Init", Method, 0, ""},
    +		{"(*Scanner).Scan", Method, 0, ""},
    +		{"(Error).Error", Method, 0, ""},
    +		{"(ErrorList).Err", Method, 0, ""},
    +		{"(ErrorList).Error", Method, 0, ""},
    +		{"(ErrorList).Len", Method, 0, ""},
    +		{"(ErrorList).Less", Method, 0, ""},
    +		{"(ErrorList).Sort", Method, 0, ""},
    +		{"(ErrorList).Swap", Method, 0, ""},
    +		{"Error", Type, 0, ""},
    +		{"Error.Msg", Field, 0, ""},
    +		{"Error.Pos", Field, 0, ""},
    +		{"ErrorHandler", Type, 0, ""},
    +		{"ErrorList", Type, 0, ""},
    +		{"Mode", Type, 0, ""},
    +		{"PrintError", Func, 0, "func(w io.Writer, err error)"},
    +		{"ScanComments", Const, 0, ""},
    +		{"Scanner", Type, 0, ""},
    +		{"Scanner.ErrorCount", Field, 0, ""},
     	},
     	"go/token": {
    -		{"(*File).AddLine", Method, 0},
    -		{"(*File).AddLineColumnInfo", Method, 11},
    -		{"(*File).AddLineInfo", Method, 0},
    -		{"(*File).Base", Method, 0},
    -		{"(*File).Line", Method, 0},
    -		{"(*File).LineCount", Method, 0},
    -		{"(*File).LineStart", Method, 12},
    -		{"(*File).Lines", Method, 21},
    -		{"(*File).MergeLine", Method, 2},
    -		{"(*File).Name", Method, 0},
    -		{"(*File).Offset", Method, 0},
    -		{"(*File).Pos", Method, 0},
    -		{"(*File).Position", Method, 0},
    -		{"(*File).PositionFor", Method, 4},
    -		{"(*File).SetLines", Method, 0},
    -		{"(*File).SetLinesForContent", Method, 0},
    -		{"(*File).Size", Method, 0},
    -		{"(*FileSet).AddFile", Method, 0},
    -		{"(*FileSet).Base", Method, 0},
    -		{"(*FileSet).File", Method, 0},
    -		{"(*FileSet).Iterate", Method, 0},
    -		{"(*FileSet).Position", Method, 0},
    -		{"(*FileSet).PositionFor", Method, 4},
    -		{"(*FileSet).Read", Method, 0},
    -		{"(*FileSet).RemoveFile", Method, 20},
    -		{"(*FileSet).Write", Method, 0},
    -		{"(*Position).IsValid", Method, 0},
    -		{"(Pos).IsValid", Method, 0},
    -		{"(Position).String", Method, 0},
    -		{"(Token).IsKeyword", Method, 0},
    -		{"(Token).IsLiteral", Method, 0},
    -		{"(Token).IsOperator", Method, 0},
    -		{"(Token).Precedence", Method, 0},
    -		{"(Token).String", Method, 0},
    -		{"ADD", Const, 0},
    -		{"ADD_ASSIGN", Const, 0},
    -		{"AND", Const, 0},
    -		{"AND_ASSIGN", Const, 0},
    -		{"AND_NOT", Const, 0},
    -		{"AND_NOT_ASSIGN", Const, 0},
    -		{"ARROW", Const, 0},
    -		{"ASSIGN", Const, 0},
    -		{"BREAK", Const, 0},
    -		{"CASE", Const, 0},
    -		{"CHAN", Const, 0},
    -		{"CHAR", Const, 0},
    -		{"COLON", Const, 0},
    -		{"COMMA", Const, 0},
    -		{"COMMENT", Const, 0},
    -		{"CONST", Const, 0},
    -		{"CONTINUE", Const, 0},
    -		{"DEC", Const, 0},
    -		{"DEFAULT", Const, 0},
    -		{"DEFER", Const, 0},
    -		{"DEFINE", Const, 0},
    -		{"ELLIPSIS", Const, 0},
    -		{"ELSE", Const, 0},
    -		{"EOF", Const, 0},
    -		{"EQL", Const, 0},
    -		{"FALLTHROUGH", Const, 0},
    -		{"FLOAT", Const, 0},
    -		{"FOR", Const, 0},
    -		{"FUNC", Const, 0},
    -		{"File", Type, 0},
    -		{"FileSet", Type, 0},
    -		{"GEQ", Const, 0},
    -		{"GO", Const, 0},
    -		{"GOTO", Const, 0},
    -		{"GTR", Const, 0},
    -		{"HighestPrec", Const, 0},
    -		{"IDENT", Const, 0},
    -		{"IF", Const, 0},
    -		{"ILLEGAL", Const, 0},
    -		{"IMAG", Const, 0},
    -		{"IMPORT", Const, 0},
    -		{"INC", Const, 0},
    -		{"INT", Const, 0},
    -		{"INTERFACE", Const, 0},
    -		{"IsExported", Func, 13},
    -		{"IsIdentifier", Func, 13},
    -		{"IsKeyword", Func, 13},
    -		{"LAND", Const, 0},
    -		{"LBRACE", Const, 0},
    -		{"LBRACK", Const, 0},
    -		{"LEQ", Const, 0},
    -		{"LOR", Const, 0},
    -		{"LPAREN", Const, 0},
    -		{"LSS", Const, 0},
    -		{"Lookup", Func, 0},
    -		{"LowestPrec", Const, 0},
    -		{"MAP", Const, 0},
    -		{"MUL", Const, 0},
    -		{"MUL_ASSIGN", Const, 0},
    -		{"NEQ", Const, 0},
    -		{"NOT", Const, 0},
    -		{"NewFileSet", Func, 0},
    -		{"NoPos", Const, 0},
    -		{"OR", Const, 0},
    -		{"OR_ASSIGN", Const, 0},
    -		{"PACKAGE", Const, 0},
    -		{"PERIOD", Const, 0},
    -		{"Pos", Type, 0},
    -		{"Position", Type, 0},
    -		{"Position.Column", Field, 0},
    -		{"Position.Filename", Field, 0},
    -		{"Position.Line", Field, 0},
    -		{"Position.Offset", Field, 0},
    -		{"QUO", Const, 0},
    -		{"QUO_ASSIGN", Const, 0},
    -		{"RANGE", Const, 0},
    -		{"RBRACE", Const, 0},
    -		{"RBRACK", Const, 0},
    -		{"REM", Const, 0},
    -		{"REM_ASSIGN", Const, 0},
    -		{"RETURN", Const, 0},
    -		{"RPAREN", Const, 0},
    -		{"SELECT", Const, 0},
    -		{"SEMICOLON", Const, 0},
    -		{"SHL", Const, 0},
    -		{"SHL_ASSIGN", Const, 0},
    -		{"SHR", Const, 0},
    -		{"SHR_ASSIGN", Const, 0},
    -		{"STRING", Const, 0},
    -		{"STRUCT", Const, 0},
    -		{"SUB", Const, 0},
    -		{"SUB_ASSIGN", Const, 0},
    -		{"SWITCH", Const, 0},
    -		{"TILDE", Const, 18},
    -		{"TYPE", Const, 0},
    -		{"Token", Type, 0},
    -		{"UnaryPrec", Const, 0},
    -		{"VAR", Const, 0},
    -		{"XOR", Const, 0},
    -		{"XOR_ASSIGN", Const, 0},
    +		{"(*File).AddLine", Method, 0, ""},
    +		{"(*File).AddLineColumnInfo", Method, 11, ""},
    +		{"(*File).AddLineInfo", Method, 0, ""},
    +		{"(*File).Base", Method, 0, ""},
    +		{"(*File).Line", Method, 0, ""},
    +		{"(*File).LineCount", Method, 0, ""},
    +		{"(*File).LineStart", Method, 12, ""},
    +		{"(*File).Lines", Method, 21, ""},
    +		{"(*File).MergeLine", Method, 2, ""},
    +		{"(*File).Name", Method, 0, ""},
    +		{"(*File).Offset", Method, 0, ""},
    +		{"(*File).Pos", Method, 0, ""},
    +		{"(*File).Position", Method, 0, ""},
    +		{"(*File).PositionFor", Method, 4, ""},
    +		{"(*File).SetLines", Method, 0, ""},
    +		{"(*File).SetLinesForContent", Method, 0, ""},
    +		{"(*File).Size", Method, 0, ""},
    +		{"(*FileSet).AddExistingFiles", Method, 25, ""},
    +		{"(*FileSet).AddFile", Method, 0, ""},
    +		{"(*FileSet).Base", Method, 0, ""},
    +		{"(*FileSet).File", Method, 0, ""},
    +		{"(*FileSet).Iterate", Method, 0, ""},
    +		{"(*FileSet).Position", Method, 0, ""},
    +		{"(*FileSet).PositionFor", Method, 4, ""},
    +		{"(*FileSet).Read", Method, 0, ""},
    +		{"(*FileSet).RemoveFile", Method, 20, ""},
    +		{"(*FileSet).Write", Method, 0, ""},
    +		{"(*Position).IsValid", Method, 0, ""},
    +		{"(Pos).IsValid", Method, 0, ""},
    +		{"(Position).String", Method, 0, ""},
    +		{"(Token).IsKeyword", Method, 0, ""},
    +		{"(Token).IsLiteral", Method, 0, ""},
    +		{"(Token).IsOperator", Method, 0, ""},
    +		{"(Token).Precedence", Method, 0, ""},
    +		{"(Token).String", Method, 0, ""},
    +		{"ADD", Const, 0, ""},
    +		{"ADD_ASSIGN", Const, 0, ""},
    +		{"AND", Const, 0, ""},
    +		{"AND_ASSIGN", Const, 0, ""},
    +		{"AND_NOT", Const, 0, ""},
    +		{"AND_NOT_ASSIGN", Const, 0, ""},
    +		{"ARROW", Const, 0, ""},
    +		{"ASSIGN", Const, 0, ""},
    +		{"BREAK", Const, 0, ""},
    +		{"CASE", Const, 0, ""},
    +		{"CHAN", Const, 0, ""},
    +		{"CHAR", Const, 0, ""},
    +		{"COLON", Const, 0, ""},
    +		{"COMMA", Const, 0, ""},
    +		{"COMMENT", Const, 0, ""},
    +		{"CONST", Const, 0, ""},
    +		{"CONTINUE", Const, 0, ""},
    +		{"DEC", Const, 0, ""},
    +		{"DEFAULT", Const, 0, ""},
    +		{"DEFER", Const, 0, ""},
    +		{"DEFINE", Const, 0, ""},
    +		{"ELLIPSIS", Const, 0, ""},
    +		{"ELSE", Const, 0, ""},
    +		{"EOF", Const, 0, ""},
    +		{"EQL", Const, 0, ""},
    +		{"FALLTHROUGH", Const, 0, ""},
    +		{"FLOAT", Const, 0, ""},
    +		{"FOR", Const, 0, ""},
    +		{"FUNC", Const, 0, ""},
    +		{"File", Type, 0, ""},
    +		{"FileSet", Type, 0, ""},
    +		{"GEQ", Const, 0, ""},
    +		{"GO", Const, 0, ""},
    +		{"GOTO", Const, 0, ""},
    +		{"GTR", Const, 0, ""},
    +		{"HighestPrec", Const, 0, ""},
    +		{"IDENT", Const, 0, ""},
    +		{"IF", Const, 0, ""},
    +		{"ILLEGAL", Const, 0, ""},
    +		{"IMAG", Const, 0, ""},
    +		{"IMPORT", Const, 0, ""},
    +		{"INC", Const, 0, ""},
    +		{"INT", Const, 0, ""},
    +		{"INTERFACE", Const, 0, ""},
    +		{"IsExported", Func, 13, "func(name string) bool"},
    +		{"IsIdentifier", Func, 13, "func(name string) bool"},
    +		{"IsKeyword", Func, 13, "func(name string) bool"},
    +		{"LAND", Const, 0, ""},
    +		{"LBRACE", Const, 0, ""},
    +		{"LBRACK", Const, 0, ""},
    +		{"LEQ", Const, 0, ""},
    +		{"LOR", Const, 0, ""},
    +		{"LPAREN", Const, 0, ""},
    +		{"LSS", Const, 0, ""},
    +		{"Lookup", Func, 0, "func(ident string) Token"},
    +		{"LowestPrec", Const, 0, ""},
    +		{"MAP", Const, 0, ""},
    +		{"MUL", Const, 0, ""},
    +		{"MUL_ASSIGN", Const, 0, ""},
    +		{"NEQ", Const, 0, ""},
    +		{"NOT", Const, 0, ""},
    +		{"NewFileSet", Func, 0, "func() *FileSet"},
    +		{"NoPos", Const, 0, ""},
    +		{"OR", Const, 0, ""},
    +		{"OR_ASSIGN", Const, 0, ""},
    +		{"PACKAGE", Const, 0, ""},
    +		{"PERIOD", Const, 0, ""},
    +		{"Pos", Type, 0, ""},
    +		{"Position", Type, 0, ""},
    +		{"Position.Column", Field, 0, ""},
    +		{"Position.Filename", Field, 0, ""},
    +		{"Position.Line", Field, 0, ""},
    +		{"Position.Offset", Field, 0, ""},
    +		{"QUO", Const, 0, ""},
    +		{"QUO_ASSIGN", Const, 0, ""},
    +		{"RANGE", Const, 0, ""},
    +		{"RBRACE", Const, 0, ""},
    +		{"RBRACK", Const, 0, ""},
    +		{"REM", Const, 0, ""},
    +		{"REM_ASSIGN", Const, 0, ""},
    +		{"RETURN", Const, 0, ""},
    +		{"RPAREN", Const, 0, ""},
    +		{"SELECT", Const, 0, ""},
    +		{"SEMICOLON", Const, 0, ""},
    +		{"SHL", Const, 0, ""},
    +		{"SHL_ASSIGN", Const, 0, ""},
    +		{"SHR", Const, 0, ""},
    +		{"SHR_ASSIGN", Const, 0, ""},
    +		{"STRING", Const, 0, ""},
    +		{"STRUCT", Const, 0, ""},
    +		{"SUB", Const, 0, ""},
    +		{"SUB_ASSIGN", Const, 0, ""},
    +		{"SWITCH", Const, 0, ""},
    +		{"TILDE", Const, 18, ""},
    +		{"TYPE", Const, 0, ""},
    +		{"Token", Type, 0, ""},
    +		{"UnaryPrec", Const, 0, ""},
    +		{"VAR", Const, 0, ""},
    +		{"XOR", Const, 0, ""},
    +		{"XOR_ASSIGN", Const, 0, ""},
     	},
     	"go/types": {
    -		{"(*Alias).Obj", Method, 22},
    -		{"(*Alias).Origin", Method, 23},
    -		{"(*Alias).Rhs", Method, 23},
    -		{"(*Alias).SetTypeParams", Method, 23},
    -		{"(*Alias).String", Method, 22},
    -		{"(*Alias).TypeArgs", Method, 23},
    -		{"(*Alias).TypeParams", Method, 23},
    -		{"(*Alias).Underlying", Method, 22},
    -		{"(*ArgumentError).Error", Method, 18},
    -		{"(*ArgumentError).Unwrap", Method, 18},
    -		{"(*Array).Elem", Method, 5},
    -		{"(*Array).Len", Method, 5},
    -		{"(*Array).String", Method, 5},
    -		{"(*Array).Underlying", Method, 5},
    -		{"(*Basic).Info", Method, 5},
    -		{"(*Basic).Kind", Method, 5},
    -		{"(*Basic).Name", Method, 5},
    -		{"(*Basic).String", Method, 5},
    -		{"(*Basic).Underlying", Method, 5},
    -		{"(*Builtin).Exported", Method, 5},
    -		{"(*Builtin).Id", Method, 5},
    -		{"(*Builtin).Name", Method, 5},
    -		{"(*Builtin).Parent", Method, 5},
    -		{"(*Builtin).Pkg", Method, 5},
    -		{"(*Builtin).Pos", Method, 5},
    -		{"(*Builtin).String", Method, 5},
    -		{"(*Builtin).Type", Method, 5},
    -		{"(*Chan).Dir", Method, 5},
    -		{"(*Chan).Elem", Method, 5},
    -		{"(*Chan).String", Method, 5},
    -		{"(*Chan).Underlying", Method, 5},
    -		{"(*Checker).Files", Method, 5},
    -		{"(*Config).Check", Method, 5},
    -		{"(*Const).Exported", Method, 5},
    -		{"(*Const).Id", Method, 5},
    -		{"(*Const).Name", Method, 5},
    -		{"(*Const).Parent", Method, 5},
    -		{"(*Const).Pkg", Method, 5},
    -		{"(*Const).Pos", Method, 5},
    -		{"(*Const).String", Method, 5},
    -		{"(*Const).Type", Method, 5},
    -		{"(*Const).Val", Method, 5},
    -		{"(*Func).Exported", Method, 5},
    -		{"(*Func).FullName", Method, 5},
    -		{"(*Func).Id", Method, 5},
    -		{"(*Func).Name", Method, 5},
    -		{"(*Func).Origin", Method, 19},
    -		{"(*Func).Parent", Method, 5},
    -		{"(*Func).Pkg", Method, 5},
    -		{"(*Func).Pos", Method, 5},
    -		{"(*Func).Scope", Method, 5},
    -		{"(*Func).Signature", Method, 23},
    -		{"(*Func).String", Method, 5},
    -		{"(*Func).Type", Method, 5},
    -		{"(*Info).ObjectOf", Method, 5},
    -		{"(*Info).PkgNameOf", Method, 22},
    -		{"(*Info).TypeOf", Method, 5},
    -		{"(*Initializer).String", Method, 5},
    -		{"(*Interface).Complete", Method, 5},
    -		{"(*Interface).Embedded", Method, 5},
    -		{"(*Interface).EmbeddedType", Method, 11},
    -		{"(*Interface).Empty", Method, 5},
    -		{"(*Interface).ExplicitMethod", Method, 5},
    -		{"(*Interface).IsComparable", Method, 18},
    -		{"(*Interface).IsImplicit", Method, 18},
    -		{"(*Interface).IsMethodSet", Method, 18},
    -		{"(*Interface).MarkImplicit", Method, 18},
    -		{"(*Interface).Method", Method, 5},
    -		{"(*Interface).NumEmbeddeds", Method, 5},
    -		{"(*Interface).NumExplicitMethods", Method, 5},
    -		{"(*Interface).NumMethods", Method, 5},
    -		{"(*Interface).String", Method, 5},
    -		{"(*Interface).Underlying", Method, 5},
    -		{"(*Label).Exported", Method, 5},
    -		{"(*Label).Id", Method, 5},
    -		{"(*Label).Name", Method, 5},
    -		{"(*Label).Parent", Method, 5},
    -		{"(*Label).Pkg", Method, 5},
    -		{"(*Label).Pos", Method, 5},
    -		{"(*Label).String", Method, 5},
    -		{"(*Label).Type", Method, 5},
    -		{"(*Map).Elem", Method, 5},
    -		{"(*Map).Key", Method, 5},
    -		{"(*Map).String", Method, 5},
    -		{"(*Map).Underlying", Method, 5},
    -		{"(*MethodSet).At", Method, 5},
    -		{"(*MethodSet).Len", Method, 5},
    -		{"(*MethodSet).Lookup", Method, 5},
    -		{"(*MethodSet).String", Method, 5},
    -		{"(*Named).AddMethod", Method, 5},
    -		{"(*Named).Method", Method, 5},
    -		{"(*Named).NumMethods", Method, 5},
    -		{"(*Named).Obj", Method, 5},
    -		{"(*Named).Origin", Method, 18},
    -		{"(*Named).SetTypeParams", Method, 18},
    -		{"(*Named).SetUnderlying", Method, 5},
    -		{"(*Named).String", Method, 5},
    -		{"(*Named).TypeArgs", Method, 18},
    -		{"(*Named).TypeParams", Method, 18},
    -		{"(*Named).Underlying", Method, 5},
    -		{"(*Nil).Exported", Method, 5},
    -		{"(*Nil).Id", Method, 5},
    -		{"(*Nil).Name", Method, 5},
    -		{"(*Nil).Parent", Method, 5},
    -		{"(*Nil).Pkg", Method, 5},
    -		{"(*Nil).Pos", Method, 5},
    -		{"(*Nil).String", Method, 5},
    -		{"(*Nil).Type", Method, 5},
    -		{"(*Package).Complete", Method, 5},
    -		{"(*Package).GoVersion", Method, 21},
    -		{"(*Package).Imports", Method, 5},
    -		{"(*Package).MarkComplete", Method, 5},
    -		{"(*Package).Name", Method, 5},
    -		{"(*Package).Path", Method, 5},
    -		{"(*Package).Scope", Method, 5},
    -		{"(*Package).SetImports", Method, 5},
    -		{"(*Package).SetName", Method, 6},
    -		{"(*Package).String", Method, 5},
    -		{"(*PkgName).Exported", Method, 5},
    -		{"(*PkgName).Id", Method, 5},
    -		{"(*PkgName).Imported", Method, 5},
    -		{"(*PkgName).Name", Method, 5},
    -		{"(*PkgName).Parent", Method, 5},
    -		{"(*PkgName).Pkg", Method, 5},
    -		{"(*PkgName).Pos", Method, 5},
    -		{"(*PkgName).String", Method, 5},
    -		{"(*PkgName).Type", Method, 5},
    -		{"(*Pointer).Elem", Method, 5},
    -		{"(*Pointer).String", Method, 5},
    -		{"(*Pointer).Underlying", Method, 5},
    -		{"(*Scope).Child", Method, 5},
    -		{"(*Scope).Contains", Method, 5},
    -		{"(*Scope).End", Method, 5},
    -		{"(*Scope).Innermost", Method, 5},
    -		{"(*Scope).Insert", Method, 5},
    -		{"(*Scope).Len", Method, 5},
    -		{"(*Scope).Lookup", Method, 5},
    -		{"(*Scope).LookupParent", Method, 5},
    -		{"(*Scope).Names", Method, 5},
    -		{"(*Scope).NumChildren", Method, 5},
    -		{"(*Scope).Parent", Method, 5},
    -		{"(*Scope).Pos", Method, 5},
    -		{"(*Scope).String", Method, 5},
    -		{"(*Scope).WriteTo", Method, 5},
    -		{"(*Selection).Index", Method, 5},
    -		{"(*Selection).Indirect", Method, 5},
    -		{"(*Selection).Kind", Method, 5},
    -		{"(*Selection).Obj", Method, 5},
    -		{"(*Selection).Recv", Method, 5},
    -		{"(*Selection).String", Method, 5},
    -		{"(*Selection).Type", Method, 5},
    -		{"(*Signature).Params", Method, 5},
    -		{"(*Signature).Recv", Method, 5},
    -		{"(*Signature).RecvTypeParams", Method, 18},
    -		{"(*Signature).Results", Method, 5},
    -		{"(*Signature).String", Method, 5},
    -		{"(*Signature).TypeParams", Method, 18},
    -		{"(*Signature).Underlying", Method, 5},
    -		{"(*Signature).Variadic", Method, 5},
    -		{"(*Slice).Elem", Method, 5},
    -		{"(*Slice).String", Method, 5},
    -		{"(*Slice).Underlying", Method, 5},
    -		{"(*StdSizes).Alignof", Method, 5},
    -		{"(*StdSizes).Offsetsof", Method, 5},
    -		{"(*StdSizes).Sizeof", Method, 5},
    -		{"(*Struct).Field", Method, 5},
    -		{"(*Struct).NumFields", Method, 5},
    -		{"(*Struct).String", Method, 5},
    -		{"(*Struct).Tag", Method, 5},
    -		{"(*Struct).Underlying", Method, 5},
    -		{"(*Term).String", Method, 18},
    -		{"(*Term).Tilde", Method, 18},
    -		{"(*Term).Type", Method, 18},
    -		{"(*Tuple).At", Method, 5},
    -		{"(*Tuple).Len", Method, 5},
    -		{"(*Tuple).String", Method, 5},
    -		{"(*Tuple).Underlying", Method, 5},
    -		{"(*TypeList).At", Method, 18},
    -		{"(*TypeList).Len", Method, 18},
    -		{"(*TypeName).Exported", Method, 5},
    -		{"(*TypeName).Id", Method, 5},
    -		{"(*TypeName).IsAlias", Method, 9},
    -		{"(*TypeName).Name", Method, 5},
    -		{"(*TypeName).Parent", Method, 5},
    -		{"(*TypeName).Pkg", Method, 5},
    -		{"(*TypeName).Pos", Method, 5},
    -		{"(*TypeName).String", Method, 5},
    -		{"(*TypeName).Type", Method, 5},
    -		{"(*TypeParam).Constraint", Method, 18},
    -		{"(*TypeParam).Index", Method, 18},
    -		{"(*TypeParam).Obj", Method, 18},
    -		{"(*TypeParam).SetConstraint", Method, 18},
    -		{"(*TypeParam).String", Method, 18},
    -		{"(*TypeParam).Underlying", Method, 18},
    -		{"(*TypeParamList).At", Method, 18},
    -		{"(*TypeParamList).Len", Method, 18},
    -		{"(*Union).Len", Method, 18},
    -		{"(*Union).String", Method, 18},
    -		{"(*Union).Term", Method, 18},
    -		{"(*Union).Underlying", Method, 18},
    -		{"(*Var).Anonymous", Method, 5},
    -		{"(*Var).Embedded", Method, 11},
    -		{"(*Var).Exported", Method, 5},
    -		{"(*Var).Id", Method, 5},
    -		{"(*Var).IsField", Method, 5},
    -		{"(*Var).Name", Method, 5},
    -		{"(*Var).Origin", Method, 19},
    -		{"(*Var).Parent", Method, 5},
    -		{"(*Var).Pkg", Method, 5},
    -		{"(*Var).Pos", Method, 5},
    -		{"(*Var).String", Method, 5},
    -		{"(*Var).Type", Method, 5},
    -		{"(Checker).ObjectOf", Method, 5},
    -		{"(Checker).PkgNameOf", Method, 22},
    -		{"(Checker).TypeOf", Method, 5},
    -		{"(Error).Error", Method, 5},
    -		{"(TypeAndValue).Addressable", Method, 5},
    -		{"(TypeAndValue).Assignable", Method, 5},
    -		{"(TypeAndValue).HasOk", Method, 5},
    -		{"(TypeAndValue).IsBuiltin", Method, 5},
    -		{"(TypeAndValue).IsNil", Method, 5},
    -		{"(TypeAndValue).IsType", Method, 5},
    -		{"(TypeAndValue).IsValue", Method, 5},
    -		{"(TypeAndValue).IsVoid", Method, 5},
    -		{"Alias", Type, 22},
    -		{"ArgumentError", Type, 18},
    -		{"ArgumentError.Err", Field, 18},
    -		{"ArgumentError.Index", Field, 18},
    -		{"Array", Type, 5},
    -		{"AssertableTo", Func, 5},
    -		{"AssignableTo", Func, 5},
    -		{"Basic", Type, 5},
    -		{"BasicInfo", Type, 5},
    -		{"BasicKind", Type, 5},
    -		{"Bool", Const, 5},
    -		{"Builtin", Type, 5},
    -		{"Byte", Const, 5},
    -		{"Chan", Type, 5},
    -		{"ChanDir", Type, 5},
    -		{"CheckExpr", Func, 13},
    -		{"Checker", Type, 5},
    -		{"Checker.Info", Field, 5},
    -		{"Comparable", Func, 5},
    -		{"Complex128", Const, 5},
    -		{"Complex64", Const, 5},
    -		{"Config", Type, 5},
    -		{"Config.Context", Field, 18},
    -		{"Config.DisableUnusedImportCheck", Field, 5},
    -		{"Config.Error", Field, 5},
    -		{"Config.FakeImportC", Field, 5},
    -		{"Config.GoVersion", Field, 18},
    -		{"Config.IgnoreFuncBodies", Field, 5},
    -		{"Config.Importer", Field, 5},
    -		{"Config.Sizes", Field, 5},
    -		{"Const", Type, 5},
    -		{"Context", Type, 18},
    -		{"ConvertibleTo", Func, 5},
    -		{"DefPredeclaredTestFuncs", Func, 5},
    -		{"Default", Func, 8},
    -		{"Error", Type, 5},
    -		{"Error.Fset", Field, 5},
    -		{"Error.Msg", Field, 5},
    -		{"Error.Pos", Field, 5},
    -		{"Error.Soft", Field, 5},
    -		{"Eval", Func, 5},
    -		{"ExprString", Func, 5},
    -		{"FieldVal", Const, 5},
    -		{"Float32", Const, 5},
    -		{"Float64", Const, 5},
    -		{"Func", Type, 5},
    -		{"Id", Func, 5},
    -		{"Identical", Func, 5},
    -		{"IdenticalIgnoreTags", Func, 8},
    -		{"Implements", Func, 5},
    -		{"ImportMode", Type, 6},
    -		{"Importer", Type, 5},
    -		{"ImporterFrom", Type, 6},
    -		{"Info", Type, 5},
    -		{"Info.Defs", Field, 5},
    -		{"Info.FileVersions", Field, 22},
    -		{"Info.Implicits", Field, 5},
    -		{"Info.InitOrder", Field, 5},
    -		{"Info.Instances", Field, 18},
    -		{"Info.Scopes", Field, 5},
    -		{"Info.Selections", Field, 5},
    -		{"Info.Types", Field, 5},
    -		{"Info.Uses", Field, 5},
    -		{"Initializer", Type, 5},
    -		{"Initializer.Lhs", Field, 5},
    -		{"Initializer.Rhs", Field, 5},
    -		{"Instance", Type, 18},
    -		{"Instance.Type", Field, 18},
    -		{"Instance.TypeArgs", Field, 18},
    -		{"Instantiate", Func, 18},
    -		{"Int", Const, 5},
    -		{"Int16", Const, 5},
    -		{"Int32", Const, 5},
    -		{"Int64", Const, 5},
    -		{"Int8", Const, 5},
    -		{"Interface", Type, 5},
    -		{"Invalid", Const, 5},
    -		{"IsBoolean", Const, 5},
    -		{"IsComplex", Const, 5},
    -		{"IsConstType", Const, 5},
    -		{"IsFloat", Const, 5},
    -		{"IsInteger", Const, 5},
    -		{"IsInterface", Func, 5},
    -		{"IsNumeric", Const, 5},
    -		{"IsOrdered", Const, 5},
    -		{"IsString", Const, 5},
    -		{"IsUnsigned", Const, 5},
    -		{"IsUntyped", Const, 5},
    -		{"Label", Type, 5},
    -		{"LookupFieldOrMethod", Func, 5},
    -		{"Map", Type, 5},
    -		{"MethodExpr", Const, 5},
    -		{"MethodSet", Type, 5},
    -		{"MethodVal", Const, 5},
    -		{"MissingMethod", Func, 5},
    -		{"Named", Type, 5},
    -		{"NewAlias", Func, 22},
    -		{"NewArray", Func, 5},
    -		{"NewChan", Func, 5},
    -		{"NewChecker", Func, 5},
    -		{"NewConst", Func, 5},
    -		{"NewContext", Func, 18},
    -		{"NewField", Func, 5},
    -		{"NewFunc", Func, 5},
    -		{"NewInterface", Func, 5},
    -		{"NewInterfaceType", Func, 11},
    -		{"NewLabel", Func, 5},
    -		{"NewMap", Func, 5},
    -		{"NewMethodSet", Func, 5},
    -		{"NewNamed", Func, 5},
    -		{"NewPackage", Func, 5},
    -		{"NewParam", Func, 5},
    -		{"NewPkgName", Func, 5},
    -		{"NewPointer", Func, 5},
    -		{"NewScope", Func, 5},
    -		{"NewSignature", Func, 5},
    -		{"NewSignatureType", Func, 18},
    -		{"NewSlice", Func, 5},
    -		{"NewStruct", Func, 5},
    -		{"NewTerm", Func, 18},
    -		{"NewTuple", Func, 5},
    -		{"NewTypeName", Func, 5},
    -		{"NewTypeParam", Func, 18},
    -		{"NewUnion", Func, 18},
    -		{"NewVar", Func, 5},
    -		{"Nil", Type, 5},
    -		{"Object", Type, 5},
    -		{"ObjectString", Func, 5},
    -		{"Package", Type, 5},
    -		{"PkgName", Type, 5},
    -		{"Pointer", Type, 5},
    -		{"Qualifier", Type, 5},
    -		{"RecvOnly", Const, 5},
    -		{"RelativeTo", Func, 5},
    -		{"Rune", Const, 5},
    -		{"Satisfies", Func, 20},
    -		{"Scope", Type, 5},
    -		{"Selection", Type, 5},
    -		{"SelectionKind", Type, 5},
    -		{"SelectionString", Func, 5},
    -		{"SendOnly", Const, 5},
    -		{"SendRecv", Const, 5},
    -		{"Signature", Type, 5},
    -		{"Sizes", Type, 5},
    -		{"SizesFor", Func, 9},
    -		{"Slice", Type, 5},
    -		{"StdSizes", Type, 5},
    -		{"StdSizes.MaxAlign", Field, 5},
    -		{"StdSizes.WordSize", Field, 5},
    -		{"String", Const, 5},
    -		{"Struct", Type, 5},
    -		{"Term", Type, 18},
    -		{"Tuple", Type, 5},
    -		{"Typ", Var, 5},
    -		{"Type", Type, 5},
    -		{"TypeAndValue", Type, 5},
    -		{"TypeAndValue.Type", Field, 5},
    -		{"TypeAndValue.Value", Field, 5},
    -		{"TypeList", Type, 18},
    -		{"TypeName", Type, 5},
    -		{"TypeParam", Type, 18},
    -		{"TypeParamList", Type, 18},
    -		{"TypeString", Func, 5},
    -		{"Uint", Const, 5},
    -		{"Uint16", Const, 5},
    -		{"Uint32", Const, 5},
    -		{"Uint64", Const, 5},
    -		{"Uint8", Const, 5},
    -		{"Uintptr", Const, 5},
    -		{"Unalias", Func, 22},
    -		{"Union", Type, 18},
    -		{"Universe", Var, 5},
    -		{"Unsafe", Var, 5},
    -		{"UnsafePointer", Const, 5},
    -		{"UntypedBool", Const, 5},
    -		{"UntypedComplex", Const, 5},
    -		{"UntypedFloat", Const, 5},
    -		{"UntypedInt", Const, 5},
    -		{"UntypedNil", Const, 5},
    -		{"UntypedRune", Const, 5},
    -		{"UntypedString", Const, 5},
    -		{"Var", Type, 5},
    -		{"WriteExpr", Func, 5},
    -		{"WriteSignature", Func, 5},
    -		{"WriteType", Func, 5},
    +		{"(*Alias).Obj", Method, 22, ""},
    +		{"(*Alias).Origin", Method, 23, ""},
    +		{"(*Alias).Rhs", Method, 23, ""},
    +		{"(*Alias).SetTypeParams", Method, 23, ""},
    +		{"(*Alias).String", Method, 22, ""},
    +		{"(*Alias).TypeArgs", Method, 23, ""},
    +		{"(*Alias).TypeParams", Method, 23, ""},
    +		{"(*Alias).Underlying", Method, 22, ""},
    +		{"(*ArgumentError).Error", Method, 18, ""},
    +		{"(*ArgumentError).Unwrap", Method, 18, ""},
    +		{"(*Array).Elem", Method, 5, ""},
    +		{"(*Array).Len", Method, 5, ""},
    +		{"(*Array).String", Method, 5, ""},
    +		{"(*Array).Underlying", Method, 5, ""},
    +		{"(*Basic).Info", Method, 5, ""},
    +		{"(*Basic).Kind", Method, 5, ""},
    +		{"(*Basic).Name", Method, 5, ""},
    +		{"(*Basic).String", Method, 5, ""},
    +		{"(*Basic).Underlying", Method, 5, ""},
    +		{"(*Builtin).Exported", Method, 5, ""},
    +		{"(*Builtin).Id", Method, 5, ""},
    +		{"(*Builtin).Name", Method, 5, ""},
    +		{"(*Builtin).Parent", Method, 5, ""},
    +		{"(*Builtin).Pkg", Method, 5, ""},
    +		{"(*Builtin).Pos", Method, 5, ""},
    +		{"(*Builtin).String", Method, 5, ""},
    +		{"(*Builtin).Type", Method, 5, ""},
    +		{"(*Chan).Dir", Method, 5, ""},
    +		{"(*Chan).Elem", Method, 5, ""},
    +		{"(*Chan).String", Method, 5, ""},
    +		{"(*Chan).Underlying", Method, 5, ""},
    +		{"(*Checker).Files", Method, 5, ""},
    +		{"(*Config).Check", Method, 5, ""},
    +		{"(*Const).Exported", Method, 5, ""},
    +		{"(*Const).Id", Method, 5, ""},
    +		{"(*Const).Name", Method, 5, ""},
    +		{"(*Const).Parent", Method, 5, ""},
    +		{"(*Const).Pkg", Method, 5, ""},
    +		{"(*Const).Pos", Method, 5, ""},
    +		{"(*Const).String", Method, 5, ""},
    +		{"(*Const).Type", Method, 5, ""},
    +		{"(*Const).Val", Method, 5, ""},
    +		{"(*Func).Exported", Method, 5, ""},
    +		{"(*Func).FullName", Method, 5, ""},
    +		{"(*Func).Id", Method, 5, ""},
    +		{"(*Func).Name", Method, 5, ""},
    +		{"(*Func).Origin", Method, 19, ""},
    +		{"(*Func).Parent", Method, 5, ""},
    +		{"(*Func).Pkg", Method, 5, ""},
    +		{"(*Func).Pos", Method, 5, ""},
    +		{"(*Func).Scope", Method, 5, ""},
    +		{"(*Func).Signature", Method, 23, ""},
    +		{"(*Func).String", Method, 5, ""},
    +		{"(*Func).Type", Method, 5, ""},
    +		{"(*Info).ObjectOf", Method, 5, ""},
    +		{"(*Info).PkgNameOf", Method, 22, ""},
    +		{"(*Info).TypeOf", Method, 5, ""},
    +		{"(*Initializer).String", Method, 5, ""},
    +		{"(*Interface).Complete", Method, 5, ""},
    +		{"(*Interface).Embedded", Method, 5, ""},
    +		{"(*Interface).EmbeddedType", Method, 11, ""},
    +		{"(*Interface).EmbeddedTypes", Method, 24, ""},
    +		{"(*Interface).Empty", Method, 5, ""},
    +		{"(*Interface).ExplicitMethod", Method, 5, ""},
    +		{"(*Interface).ExplicitMethods", Method, 24, ""},
    +		{"(*Interface).IsComparable", Method, 18, ""},
    +		{"(*Interface).IsImplicit", Method, 18, ""},
    +		{"(*Interface).IsMethodSet", Method, 18, ""},
    +		{"(*Interface).MarkImplicit", Method, 18, ""},
    +		{"(*Interface).Method", Method, 5, ""},
    +		{"(*Interface).Methods", Method, 24, ""},
    +		{"(*Interface).NumEmbeddeds", Method, 5, ""},
    +		{"(*Interface).NumExplicitMethods", Method, 5, ""},
    +		{"(*Interface).NumMethods", Method, 5, ""},
    +		{"(*Interface).String", Method, 5, ""},
    +		{"(*Interface).Underlying", Method, 5, ""},
    +		{"(*Label).Exported", Method, 5, ""},
    +		{"(*Label).Id", Method, 5, ""},
    +		{"(*Label).Name", Method, 5, ""},
    +		{"(*Label).Parent", Method, 5, ""},
    +		{"(*Label).Pkg", Method, 5, ""},
    +		{"(*Label).Pos", Method, 5, ""},
    +		{"(*Label).String", Method, 5, ""},
    +		{"(*Label).Type", Method, 5, ""},
    +		{"(*Map).Elem", Method, 5, ""},
    +		{"(*Map).Key", Method, 5, ""},
    +		{"(*Map).String", Method, 5, ""},
    +		{"(*Map).Underlying", Method, 5, ""},
    +		{"(*MethodSet).At", Method, 5, ""},
    +		{"(*MethodSet).Len", Method, 5, ""},
    +		{"(*MethodSet).Lookup", Method, 5, ""},
    +		{"(*MethodSet).Methods", Method, 24, ""},
    +		{"(*MethodSet).String", Method, 5, ""},
    +		{"(*Named).AddMethod", Method, 5, ""},
    +		{"(*Named).Method", Method, 5, ""},
    +		{"(*Named).Methods", Method, 24, ""},
    +		{"(*Named).NumMethods", Method, 5, ""},
    +		{"(*Named).Obj", Method, 5, ""},
    +		{"(*Named).Origin", Method, 18, ""},
    +		{"(*Named).SetTypeParams", Method, 18, ""},
    +		{"(*Named).SetUnderlying", Method, 5, ""},
    +		{"(*Named).String", Method, 5, ""},
    +		{"(*Named).TypeArgs", Method, 18, ""},
    +		{"(*Named).TypeParams", Method, 18, ""},
    +		{"(*Named).Underlying", Method, 5, ""},
    +		{"(*Nil).Exported", Method, 5, ""},
    +		{"(*Nil).Id", Method, 5, ""},
    +		{"(*Nil).Name", Method, 5, ""},
    +		{"(*Nil).Parent", Method, 5, ""},
    +		{"(*Nil).Pkg", Method, 5, ""},
    +		{"(*Nil).Pos", Method, 5, ""},
    +		{"(*Nil).String", Method, 5, ""},
    +		{"(*Nil).Type", Method, 5, ""},
    +		{"(*Package).Complete", Method, 5, ""},
    +		{"(*Package).GoVersion", Method, 21, ""},
    +		{"(*Package).Imports", Method, 5, ""},
    +		{"(*Package).MarkComplete", Method, 5, ""},
    +		{"(*Package).Name", Method, 5, ""},
    +		{"(*Package).Path", Method, 5, ""},
    +		{"(*Package).Scope", Method, 5, ""},
    +		{"(*Package).SetImports", Method, 5, ""},
    +		{"(*Package).SetName", Method, 6, ""},
    +		{"(*Package).String", Method, 5, ""},
    +		{"(*PkgName).Exported", Method, 5, ""},
    +		{"(*PkgName).Id", Method, 5, ""},
    +		{"(*PkgName).Imported", Method, 5, ""},
    +		{"(*PkgName).Name", Method, 5, ""},
    +		{"(*PkgName).Parent", Method, 5, ""},
    +		{"(*PkgName).Pkg", Method, 5, ""},
    +		{"(*PkgName).Pos", Method, 5, ""},
    +		{"(*PkgName).String", Method, 5, ""},
    +		{"(*PkgName).Type", Method, 5, ""},
    +		{"(*Pointer).Elem", Method, 5, ""},
    +		{"(*Pointer).String", Method, 5, ""},
    +		{"(*Pointer).Underlying", Method, 5, ""},
    +		{"(*Scope).Child", Method, 5, ""},
    +		{"(*Scope).Children", Method, 24, ""},
    +		{"(*Scope).Contains", Method, 5, ""},
    +		{"(*Scope).End", Method, 5, ""},
    +		{"(*Scope).Innermost", Method, 5, ""},
    +		{"(*Scope).Insert", Method, 5, ""},
    +		{"(*Scope).Len", Method, 5, ""},
    +		{"(*Scope).Lookup", Method, 5, ""},
    +		{"(*Scope).LookupParent", Method, 5, ""},
    +		{"(*Scope).Names", Method, 5, ""},
    +		{"(*Scope).NumChildren", Method, 5, ""},
    +		{"(*Scope).Parent", Method, 5, ""},
    +		{"(*Scope).Pos", Method, 5, ""},
    +		{"(*Scope).String", Method, 5, ""},
    +		{"(*Scope).WriteTo", Method, 5, ""},
    +		{"(*Selection).Index", Method, 5, ""},
    +		{"(*Selection).Indirect", Method, 5, ""},
    +		{"(*Selection).Kind", Method, 5, ""},
    +		{"(*Selection).Obj", Method, 5, ""},
    +		{"(*Selection).Recv", Method, 5, ""},
    +		{"(*Selection).String", Method, 5, ""},
    +		{"(*Selection).Type", Method, 5, ""},
    +		{"(*Signature).Params", Method, 5, ""},
    +		{"(*Signature).Recv", Method, 5, ""},
    +		{"(*Signature).RecvTypeParams", Method, 18, ""},
    +		{"(*Signature).Results", Method, 5, ""},
    +		{"(*Signature).String", Method, 5, ""},
    +		{"(*Signature).TypeParams", Method, 18, ""},
    +		{"(*Signature).Underlying", Method, 5, ""},
    +		{"(*Signature).Variadic", Method, 5, ""},
    +		{"(*Slice).Elem", Method, 5, ""},
    +		{"(*Slice).String", Method, 5, ""},
    +		{"(*Slice).Underlying", Method, 5, ""},
    +		{"(*StdSizes).Alignof", Method, 5, ""},
    +		{"(*StdSizes).Offsetsof", Method, 5, ""},
    +		{"(*StdSizes).Sizeof", Method, 5, ""},
    +		{"(*Struct).Field", Method, 5, ""},
    +		{"(*Struct).Fields", Method, 24, ""},
    +		{"(*Struct).NumFields", Method, 5, ""},
    +		{"(*Struct).String", Method, 5, ""},
    +		{"(*Struct).Tag", Method, 5, ""},
    +		{"(*Struct).Underlying", Method, 5, ""},
    +		{"(*Term).String", Method, 18, ""},
    +		{"(*Term).Tilde", Method, 18, ""},
    +		{"(*Term).Type", Method, 18, ""},
    +		{"(*Tuple).At", Method, 5, ""},
    +		{"(*Tuple).Len", Method, 5, ""},
    +		{"(*Tuple).String", Method, 5, ""},
    +		{"(*Tuple).Underlying", Method, 5, ""},
    +		{"(*Tuple).Variables", Method, 24, ""},
    +		{"(*TypeList).At", Method, 18, ""},
    +		{"(*TypeList).Len", Method, 18, ""},
    +		{"(*TypeList).Types", Method, 24, ""},
    +		{"(*TypeName).Exported", Method, 5, ""},
    +		{"(*TypeName).Id", Method, 5, ""},
    +		{"(*TypeName).IsAlias", Method, 9, ""},
    +		{"(*TypeName).Name", Method, 5, ""},
    +		{"(*TypeName).Parent", Method, 5, ""},
    +		{"(*TypeName).Pkg", Method, 5, ""},
    +		{"(*TypeName).Pos", Method, 5, ""},
    +		{"(*TypeName).String", Method, 5, ""},
    +		{"(*TypeName).Type", Method, 5, ""},
    +		{"(*TypeParam).Constraint", Method, 18, ""},
    +		{"(*TypeParam).Index", Method, 18, ""},
    +		{"(*TypeParam).Obj", Method, 18, ""},
    +		{"(*TypeParam).SetConstraint", Method, 18, ""},
    +		{"(*TypeParam).String", Method, 18, ""},
    +		{"(*TypeParam).Underlying", Method, 18, ""},
    +		{"(*TypeParamList).At", Method, 18, ""},
    +		{"(*TypeParamList).Len", Method, 18, ""},
    +		{"(*TypeParamList).TypeParams", Method, 24, ""},
    +		{"(*Union).Len", Method, 18, ""},
    +		{"(*Union).String", Method, 18, ""},
    +		{"(*Union).Term", Method, 18, ""},
    +		{"(*Union).Terms", Method, 24, ""},
    +		{"(*Union).Underlying", Method, 18, ""},
    +		{"(*Var).Anonymous", Method, 5, ""},
    +		{"(*Var).Embedded", Method, 11, ""},
    +		{"(*Var).Exported", Method, 5, ""},
    +		{"(*Var).Id", Method, 5, ""},
    +		{"(*Var).IsField", Method, 5, ""},
    +		{"(*Var).Kind", Method, 25, ""},
    +		{"(*Var).Name", Method, 5, ""},
    +		{"(*Var).Origin", Method, 19, ""},
    +		{"(*Var).Parent", Method, 5, ""},
    +		{"(*Var).Pkg", Method, 5, ""},
    +		{"(*Var).Pos", Method, 5, ""},
    +		{"(*Var).SetKind", Method, 25, ""},
    +		{"(*Var).String", Method, 5, ""},
    +		{"(*Var).Type", Method, 5, ""},
    +		{"(Checker).ObjectOf", Method, 5, ""},
    +		{"(Checker).PkgNameOf", Method, 22, ""},
    +		{"(Checker).TypeOf", Method, 5, ""},
    +		{"(Error).Error", Method, 5, ""},
    +		{"(TypeAndValue).Addressable", Method, 5, ""},
    +		{"(TypeAndValue).Assignable", Method, 5, ""},
    +		{"(TypeAndValue).HasOk", Method, 5, ""},
    +		{"(TypeAndValue).IsBuiltin", Method, 5, ""},
    +		{"(TypeAndValue).IsNil", Method, 5, ""},
    +		{"(TypeAndValue).IsType", Method, 5, ""},
    +		{"(TypeAndValue).IsValue", Method, 5, ""},
    +		{"(TypeAndValue).IsVoid", Method, 5, ""},
    +		{"(VarKind).String", Method, 25, ""},
    +		{"Alias", Type, 22, ""},
    +		{"ArgumentError", Type, 18, ""},
    +		{"ArgumentError.Err", Field, 18, ""},
    +		{"ArgumentError.Index", Field, 18, ""},
    +		{"Array", Type, 5, ""},
    +		{"AssertableTo", Func, 5, "func(V *Interface, T Type) bool"},
    +		{"AssignableTo", Func, 5, "func(V Type, T Type) bool"},
    +		{"Basic", Type, 5, ""},
    +		{"BasicInfo", Type, 5, ""},
    +		{"BasicKind", Type, 5, ""},
    +		{"Bool", Const, 5, ""},
    +		{"Builtin", Type, 5, ""},
    +		{"Byte", Const, 5, ""},
    +		{"Chan", Type, 5, ""},
    +		{"ChanDir", Type, 5, ""},
    +		{"CheckExpr", Func, 13, "func(fset *token.FileSet, pkg *Package, pos token.Pos, expr ast.Expr, info *Info) (err error)"},
    +		{"Checker", Type, 5, ""},
    +		{"Checker.Info", Field, 5, ""},
    +		{"Comparable", Func, 5, "func(T Type) bool"},
    +		{"Complex128", Const, 5, ""},
    +		{"Complex64", Const, 5, ""},
    +		{"Config", Type, 5, ""},
    +		{"Config.Context", Field, 18, ""},
    +		{"Config.DisableUnusedImportCheck", Field, 5, ""},
    +		{"Config.Error", Field, 5, ""},
    +		{"Config.FakeImportC", Field, 5, ""},
    +		{"Config.GoVersion", Field, 18, ""},
    +		{"Config.IgnoreFuncBodies", Field, 5, ""},
    +		{"Config.Importer", Field, 5, ""},
    +		{"Config.Sizes", Field, 5, ""},
    +		{"Const", Type, 5, ""},
    +		{"Context", Type, 18, ""},
    +		{"ConvertibleTo", Func, 5, "func(V Type, T Type) bool"},
    +		{"DefPredeclaredTestFuncs", Func, 5, "func()"},
    +		{"Default", Func, 8, "func(t Type) Type"},
    +		{"Error", Type, 5, ""},
    +		{"Error.Fset", Field, 5, ""},
    +		{"Error.Msg", Field, 5, ""},
    +		{"Error.Pos", Field, 5, ""},
    +		{"Error.Soft", Field, 5, ""},
    +		{"Eval", Func, 5, "func(fset *token.FileSet, pkg *Package, pos token.Pos, expr string) (_ TypeAndValue, err error)"},
    +		{"ExprString", Func, 5, "func(x ast.Expr) string"},
    +		{"FieldVal", Const, 5, ""},
    +		{"FieldVar", Const, 25, ""},
    +		{"Float32", Const, 5, ""},
    +		{"Float64", Const, 5, ""},
    +		{"Func", Type, 5, ""},
    +		{"Id", Func, 5, "func(pkg *Package, name string) string"},
    +		{"Identical", Func, 5, "func(x Type, y Type) bool"},
    +		{"IdenticalIgnoreTags", Func, 8, "func(x Type, y Type) bool"},
    +		{"Implements", Func, 5, "func(V Type, T *Interface) bool"},
    +		{"ImportMode", Type, 6, ""},
    +		{"Importer", Type, 5, ""},
    +		{"ImporterFrom", Type, 6, ""},
    +		{"Info", Type, 5, ""},
    +		{"Info.Defs", Field, 5, ""},
    +		{"Info.FileVersions", Field, 22, ""},
    +		{"Info.Implicits", Field, 5, ""},
    +		{"Info.InitOrder", Field, 5, ""},
    +		{"Info.Instances", Field, 18, ""},
    +		{"Info.Scopes", Field, 5, ""},
    +		{"Info.Selections", Field, 5, ""},
    +		{"Info.Types", Field, 5, ""},
    +		{"Info.Uses", Field, 5, ""},
    +		{"Initializer", Type, 5, ""},
    +		{"Initializer.Lhs", Field, 5, ""},
    +		{"Initializer.Rhs", Field, 5, ""},
    +		{"Instance", Type, 18, ""},
    +		{"Instance.Type", Field, 18, ""},
    +		{"Instance.TypeArgs", Field, 18, ""},
    +		{"Instantiate", Func, 18, "func(ctxt *Context, orig Type, targs []Type, validate bool) (Type, error)"},
    +		{"Int", Const, 5, ""},
    +		{"Int16", Const, 5, ""},
    +		{"Int32", Const, 5, ""},
    +		{"Int64", Const, 5, ""},
    +		{"Int8", Const, 5, ""},
    +		{"Interface", Type, 5, ""},
    +		{"Invalid", Const, 5, ""},
    +		{"IsBoolean", Const, 5, ""},
    +		{"IsComplex", Const, 5, ""},
    +		{"IsConstType", Const, 5, ""},
    +		{"IsFloat", Const, 5, ""},
    +		{"IsInteger", Const, 5, ""},
    +		{"IsInterface", Func, 5, "func(t Type) bool"},
    +		{"IsNumeric", Const, 5, ""},
    +		{"IsOrdered", Const, 5, ""},
    +		{"IsString", Const, 5, ""},
    +		{"IsUnsigned", Const, 5, ""},
    +		{"IsUntyped", Const, 5, ""},
    +		{"Label", Type, 5, ""},
    +		{"LocalVar", Const, 25, ""},
    +		{"LookupFieldOrMethod", Func, 5, "func(T Type, addressable bool, pkg *Package, name string) (obj Object, index []int, indirect bool)"},
    +		{"LookupSelection", Func, 25, "func(T Type, addressable bool, pkg *Package, name string) (Selection, bool)"},
    +		{"Map", Type, 5, ""},
    +		{"MethodExpr", Const, 5, ""},
    +		{"MethodSet", Type, 5, ""},
    +		{"MethodVal", Const, 5, ""},
    +		{"MissingMethod", Func, 5, "func(V Type, T *Interface, static bool) (method *Func, wrongType bool)"},
    +		{"Named", Type, 5, ""},
    +		{"NewAlias", Func, 22, "func(obj *TypeName, rhs Type) *Alias"},
    +		{"NewArray", Func, 5, "func(elem Type, len int64) *Array"},
    +		{"NewChan", Func, 5, "func(dir ChanDir, elem Type) *Chan"},
    +		{"NewChecker", Func, 5, "func(conf *Config, fset *token.FileSet, pkg *Package, info *Info) *Checker"},
    +		{"NewConst", Func, 5, "func(pos token.Pos, pkg *Package, name string, typ Type, val constant.Value) *Const"},
    +		{"NewContext", Func, 18, "func() *Context"},
    +		{"NewField", Func, 5, "func(pos token.Pos, pkg *Package, name string, typ Type, embedded bool) *Var"},
    +		{"NewFunc", Func, 5, "func(pos token.Pos, pkg *Package, name string, sig *Signature) *Func"},
    +		{"NewInterface", Func, 5, "func(methods []*Func, embeddeds []*Named) *Interface"},
    +		{"NewInterfaceType", Func, 11, "func(methods []*Func, embeddeds []Type) *Interface"},
    +		{"NewLabel", Func, 5, "func(pos token.Pos, pkg *Package, name string) *Label"},
    +		{"NewMap", Func, 5, "func(key Type, elem Type) *Map"},
    +		{"NewMethodSet", Func, 5, "func(T Type) *MethodSet"},
    +		{"NewNamed", Func, 5, "func(obj *TypeName, underlying Type, methods []*Func) *Named"},
    +		{"NewPackage", Func, 5, "func(path string, name string) *Package"},
    +		{"NewParam", Func, 5, "func(pos token.Pos, pkg *Package, name string, typ Type) *Var"},
    +		{"NewPkgName", Func, 5, "func(pos token.Pos, pkg *Package, name string, imported *Package) *PkgName"},
    +		{"NewPointer", Func, 5, "func(elem Type) *Pointer"},
    +		{"NewScope", Func, 5, "func(parent *Scope, pos token.Pos, end token.Pos, comment string) *Scope"},
    +		{"NewSignature", Func, 5, "func(recv *Var, params *Tuple, results *Tuple, variadic bool) *Signature"},
    +		{"NewSignatureType", Func, 18, "func(recv *Var, recvTypeParams []*TypeParam, typeParams []*TypeParam, params *Tuple, results *Tuple, variadic bool) *Signature"},
    +		{"NewSlice", Func, 5, "func(elem Type) *Slice"},
    +		{"NewStruct", Func, 5, "func(fields []*Var, tags []string) *Struct"},
    +		{"NewTerm", Func, 18, "func(tilde bool, typ Type) *Term"},
    +		{"NewTuple", Func, 5, "func(x ...*Var) *Tuple"},
    +		{"NewTypeName", Func, 5, "func(pos token.Pos, pkg *Package, name string, typ Type) *TypeName"},
    +		{"NewTypeParam", Func, 18, "func(obj *TypeName, constraint Type) *TypeParam"},
    +		{"NewUnion", Func, 18, "func(terms []*Term) *Union"},
    +		{"NewVar", Func, 5, "func(pos token.Pos, pkg *Package, name string, typ Type) *Var"},
    +		{"Nil", Type, 5, ""},
    +		{"Object", Type, 5, ""},
    +		{"ObjectString", Func, 5, "func(obj Object, qf Qualifier) string"},
    +		{"Package", Type, 5, ""},
    +		{"PackageVar", Const, 25, ""},
    +		{"ParamVar", Const, 25, ""},
    +		{"PkgName", Type, 5, ""},
    +		{"Pointer", Type, 5, ""},
    +		{"Qualifier", Type, 5, ""},
    +		{"RecvOnly", Const, 5, ""},
    +		{"RecvVar", Const, 25, ""},
    +		{"RelativeTo", Func, 5, "func(pkg *Package) Qualifier"},
    +		{"ResultVar", Const, 25, ""},
    +		{"Rune", Const, 5, ""},
    +		{"Satisfies", Func, 20, "func(V Type, T *Interface) bool"},
    +		{"Scope", Type, 5, ""},
    +		{"Selection", Type, 5, ""},
    +		{"SelectionKind", Type, 5, ""},
    +		{"SelectionString", Func, 5, "func(s *Selection, qf Qualifier) string"},
    +		{"SendOnly", Const, 5, ""},
    +		{"SendRecv", Const, 5, ""},
    +		{"Signature", Type, 5, ""},
    +		{"Sizes", Type, 5, ""},
    +		{"SizesFor", Func, 9, "func(compiler string, arch string) Sizes"},
    +		{"Slice", Type, 5, ""},
    +		{"StdSizes", Type, 5, ""},
    +		{"StdSizes.MaxAlign", Field, 5, ""},
    +		{"StdSizes.WordSize", Field, 5, ""},
    +		{"String", Const, 5, ""},
    +		{"Struct", Type, 5, ""},
    +		{"Term", Type, 18, ""},
    +		{"Tuple", Type, 5, ""},
    +		{"Typ", Var, 5, ""},
    +		{"Type", Type, 5, ""},
    +		{"TypeAndValue", Type, 5, ""},
    +		{"TypeAndValue.Type", Field, 5, ""},
    +		{"TypeAndValue.Value", Field, 5, ""},
    +		{"TypeList", Type, 18, ""},
    +		{"TypeName", Type, 5, ""},
    +		{"TypeParam", Type, 18, ""},
    +		{"TypeParamList", Type, 18, ""},
    +		{"TypeString", Func, 5, "func(typ Type, qf Qualifier) string"},
    +		{"Uint", Const, 5, ""},
    +		{"Uint16", Const, 5, ""},
    +		{"Uint32", Const, 5, ""},
    +		{"Uint64", Const, 5, ""},
    +		{"Uint8", Const, 5, ""},
    +		{"Uintptr", Const, 5, ""},
    +		{"Unalias", Func, 22, "func(t Type) Type"},
    +		{"Union", Type, 18, ""},
    +		{"Universe", Var, 5, ""},
    +		{"Unsafe", Var, 5, ""},
    +		{"UnsafePointer", Const, 5, ""},
    +		{"UntypedBool", Const, 5, ""},
    +		{"UntypedComplex", Const, 5, ""},
    +		{"UntypedFloat", Const, 5, ""},
    +		{"UntypedInt", Const, 5, ""},
    +		{"UntypedNil", Const, 5, ""},
    +		{"UntypedRune", Const, 5, ""},
    +		{"UntypedString", Const, 5, ""},
    +		{"Var", Type, 5, ""},
    +		{"VarKind", Type, 25, ""},
    +		{"WriteExpr", Func, 5, "func(buf *bytes.Buffer, x ast.Expr)"},
    +		{"WriteSignature", Func, 5, "func(buf *bytes.Buffer, sig *Signature, qf Qualifier)"},
    +		{"WriteType", Func, 5, "func(buf *bytes.Buffer, typ Type, qf Qualifier)"},
     	},
     	"go/version": {
    -		{"Compare", Func, 22},
    -		{"IsValid", Func, 22},
    -		{"Lang", Func, 22},
    +		{"Compare", Func, 22, "func(x string, y string) int"},
    +		{"IsValid", Func, 22, "func(x string) bool"},
    +		{"Lang", Func, 22, "func(x string) string"},
     	},
     	"hash": {
    -		{"Hash", Type, 0},
    -		{"Hash32", Type, 0},
    -		{"Hash64", Type, 0},
    +		{"Cloner", Type, 25, ""},
    +		{"Hash", Type, 0, ""},
    +		{"Hash32", Type, 0, ""},
    +		{"Hash64", Type, 0, ""},
    +		{"XOF", Type, 25, ""},
     	},
     	"hash/adler32": {
    -		{"Checksum", Func, 0},
    -		{"New", Func, 0},
    -		{"Size", Const, 0},
    +		{"Checksum", Func, 0, "func(data []byte) uint32"},
    +		{"New", Func, 0, "func() hash.Hash32"},
    +		{"Size", Const, 0, ""},
     	},
     	"hash/crc32": {
    -		{"Castagnoli", Const, 0},
    -		{"Checksum", Func, 0},
    -		{"ChecksumIEEE", Func, 0},
    -		{"IEEE", Const, 0},
    -		{"IEEETable", Var, 0},
    -		{"Koopman", Const, 0},
    -		{"MakeTable", Func, 0},
    -		{"New", Func, 0},
    -		{"NewIEEE", Func, 0},
    -		{"Size", Const, 0},
    -		{"Table", Type, 0},
    -		{"Update", Func, 0},
    +		{"Castagnoli", Const, 0, ""},
    +		{"Checksum", Func, 0, "func(data []byte, tab *Table) uint32"},
    +		{"ChecksumIEEE", Func, 0, "func(data []byte) uint32"},
    +		{"IEEE", Const, 0, ""},
    +		{"IEEETable", Var, 0, ""},
    +		{"Koopman", Const, 0, ""},
    +		{"MakeTable", Func, 0, "func(poly uint32) *Table"},
    +		{"New", Func, 0, "func(tab *Table) hash.Hash32"},
    +		{"NewIEEE", Func, 0, "func() hash.Hash32"},
    +		{"Size", Const, 0, ""},
    +		{"Table", Type, 0, ""},
    +		{"Update", Func, 0, "func(crc uint32, tab *Table, p []byte) uint32"},
     	},
     	"hash/crc64": {
    -		{"Checksum", Func, 0},
    -		{"ECMA", Const, 0},
    -		{"ISO", Const, 0},
    -		{"MakeTable", Func, 0},
    -		{"New", Func, 0},
    -		{"Size", Const, 0},
    -		{"Table", Type, 0},
    -		{"Update", Func, 0},
    +		{"Checksum", Func, 0, "func(data []byte, tab *Table) uint64"},
    +		{"ECMA", Const, 0, ""},
    +		{"ISO", Const, 0, ""},
    +		{"MakeTable", Func, 0, "func(poly uint64) *Table"},
    +		{"New", Func, 0, "func(tab *Table) hash.Hash64"},
    +		{"Size", Const, 0, ""},
    +		{"Table", Type, 0, ""},
    +		{"Update", Func, 0, "func(crc uint64, tab *Table, p []byte) uint64"},
     	},
     	"hash/fnv": {
    -		{"New128", Func, 9},
    -		{"New128a", Func, 9},
    -		{"New32", Func, 0},
    -		{"New32a", Func, 0},
    -		{"New64", Func, 0},
    -		{"New64a", Func, 0},
    +		{"New128", Func, 9, "func() hash.Hash"},
    +		{"New128a", Func, 9, "func() hash.Hash"},
    +		{"New32", Func, 0, "func() hash.Hash32"},
    +		{"New32a", Func, 0, "func() hash.Hash32"},
    +		{"New64", Func, 0, "func() hash.Hash64"},
    +		{"New64a", Func, 0, "func() hash.Hash64"},
     	},
     	"hash/maphash": {
    -		{"(*Hash).BlockSize", Method, 14},
    -		{"(*Hash).Reset", Method, 14},
    -		{"(*Hash).Seed", Method, 14},
    -		{"(*Hash).SetSeed", Method, 14},
    -		{"(*Hash).Size", Method, 14},
    -		{"(*Hash).Sum", Method, 14},
    -		{"(*Hash).Sum64", Method, 14},
    -		{"(*Hash).Write", Method, 14},
    -		{"(*Hash).WriteByte", Method, 14},
    -		{"(*Hash).WriteString", Method, 14},
    -		{"Bytes", Func, 19},
    -		{"Hash", Type, 14},
    -		{"MakeSeed", Func, 14},
    -		{"Seed", Type, 14},
    -		{"String", Func, 19},
    +		{"(*Hash).BlockSize", Method, 14, ""},
    +		{"(*Hash).Clone", Method, 25, ""},
    +		{"(*Hash).Reset", Method, 14, ""},
    +		{"(*Hash).Seed", Method, 14, ""},
    +		{"(*Hash).SetSeed", Method, 14, ""},
    +		{"(*Hash).Size", Method, 14, ""},
    +		{"(*Hash).Sum", Method, 14, ""},
    +		{"(*Hash).Sum64", Method, 14, ""},
    +		{"(*Hash).Write", Method, 14, ""},
    +		{"(*Hash).WriteByte", Method, 14, ""},
    +		{"(*Hash).WriteString", Method, 14, ""},
    +		{"Bytes", Func, 19, "func(seed Seed, b []byte) uint64"},
    +		{"Comparable", Func, 24, "func[T comparable](seed Seed, v T) uint64"},
    +		{"Hash", Type, 14, ""},
    +		{"MakeSeed", Func, 14, "func() Seed"},
    +		{"Seed", Type, 14, ""},
    +		{"String", Func, 19, "func(seed Seed, s string) uint64"},
    +		{"WriteComparable", Func, 24, "func[T comparable](h *Hash, x T)"},
     	},
     	"html": {
    -		{"EscapeString", Func, 0},
    -		{"UnescapeString", Func, 0},
    +		{"EscapeString", Func, 0, "func(s string) string"},
    +		{"UnescapeString", Func, 0, "func(s string) string"},
     	},
     	"html/template": {
    -		{"(*Error).Error", Method, 0},
    -		{"(*Template).AddParseTree", Method, 0},
    -		{"(*Template).Clone", Method, 0},
    -		{"(*Template).DefinedTemplates", Method, 6},
    -		{"(*Template).Delims", Method, 0},
    -		{"(*Template).Execute", Method, 0},
    -		{"(*Template).ExecuteTemplate", Method, 0},
    -		{"(*Template).Funcs", Method, 0},
    -		{"(*Template).Lookup", Method, 0},
    -		{"(*Template).Name", Method, 0},
    -		{"(*Template).New", Method, 0},
    -		{"(*Template).Option", Method, 5},
    -		{"(*Template).Parse", Method, 0},
    -		{"(*Template).ParseFS", Method, 16},
    -		{"(*Template).ParseFiles", Method, 0},
    -		{"(*Template).ParseGlob", Method, 0},
    -		{"(*Template).Templates", Method, 0},
    -		{"CSS", Type, 0},
    -		{"ErrAmbigContext", Const, 0},
    -		{"ErrBadHTML", Const, 0},
    -		{"ErrBranchEnd", Const, 0},
    -		{"ErrEndContext", Const, 0},
    -		{"ErrJSTemplate", Const, 21},
    -		{"ErrNoSuchTemplate", Const, 0},
    -		{"ErrOutputContext", Const, 0},
    -		{"ErrPartialCharset", Const, 0},
    -		{"ErrPartialEscape", Const, 0},
    -		{"ErrPredefinedEscaper", Const, 9},
    -		{"ErrRangeLoopReentry", Const, 0},
    -		{"ErrSlashAmbig", Const, 0},
    -		{"Error", Type, 0},
    -		{"Error.Description", Field, 0},
    -		{"Error.ErrorCode", Field, 0},
    -		{"Error.Line", Field, 0},
    -		{"Error.Name", Field, 0},
    -		{"Error.Node", Field, 4},
    -		{"ErrorCode", Type, 0},
    -		{"FuncMap", Type, 0},
    -		{"HTML", Type, 0},
    -		{"HTMLAttr", Type, 0},
    -		{"HTMLEscape", Func, 0},
    -		{"HTMLEscapeString", Func, 0},
    -		{"HTMLEscaper", Func, 0},
    -		{"IsTrue", Func, 6},
    -		{"JS", Type, 0},
    -		{"JSEscape", Func, 0},
    -		{"JSEscapeString", Func, 0},
    -		{"JSEscaper", Func, 0},
    -		{"JSStr", Type, 0},
    -		{"Must", Func, 0},
    -		{"New", Func, 0},
    -		{"OK", Const, 0},
    -		{"ParseFS", Func, 16},
    -		{"ParseFiles", Func, 0},
    -		{"ParseGlob", Func, 0},
    -		{"Srcset", Type, 10},
    -		{"Template", Type, 0},
    -		{"Template.Tree", Field, 2},
    -		{"URL", Type, 0},
    -		{"URLQueryEscaper", Func, 0},
    +		{"(*Error).Error", Method, 0, ""},
    +		{"(*Template).AddParseTree", Method, 0, ""},
    +		{"(*Template).Clone", Method, 0, ""},
    +		{"(*Template).DefinedTemplates", Method, 6, ""},
    +		{"(*Template).Delims", Method, 0, ""},
    +		{"(*Template).Execute", Method, 0, ""},
    +		{"(*Template).ExecuteTemplate", Method, 0, ""},
    +		{"(*Template).Funcs", Method, 0, ""},
    +		{"(*Template).Lookup", Method, 0, ""},
    +		{"(*Template).Name", Method, 0, ""},
    +		{"(*Template).New", Method, 0, ""},
    +		{"(*Template).Option", Method, 5, ""},
    +		{"(*Template).Parse", Method, 0, ""},
    +		{"(*Template).ParseFS", Method, 16, ""},
    +		{"(*Template).ParseFiles", Method, 0, ""},
    +		{"(*Template).ParseGlob", Method, 0, ""},
    +		{"(*Template).Templates", Method, 0, ""},
    +		{"CSS", Type, 0, ""},
    +		{"ErrAmbigContext", Const, 0, ""},
    +		{"ErrBadHTML", Const, 0, ""},
    +		{"ErrBranchEnd", Const, 0, ""},
    +		{"ErrEndContext", Const, 0, ""},
    +		{"ErrJSTemplate", Const, 21, ""},
    +		{"ErrNoSuchTemplate", Const, 0, ""},
    +		{"ErrOutputContext", Const, 0, ""},
    +		{"ErrPartialCharset", Const, 0, ""},
    +		{"ErrPartialEscape", Const, 0, ""},
    +		{"ErrPredefinedEscaper", Const, 9, ""},
    +		{"ErrRangeLoopReentry", Const, 0, ""},
    +		{"ErrSlashAmbig", Const, 0, ""},
    +		{"Error", Type, 0, ""},
    +		{"Error.Description", Field, 0, ""},
    +		{"Error.ErrorCode", Field, 0, ""},
    +		{"Error.Line", Field, 0, ""},
    +		{"Error.Name", Field, 0, ""},
    +		{"Error.Node", Field, 4, ""},
    +		{"ErrorCode", Type, 0, ""},
    +		{"FuncMap", Type, 0, ""},
    +		{"HTML", Type, 0, ""},
    +		{"HTMLAttr", Type, 0, ""},
    +		{"HTMLEscape", Func, 0, "func(w io.Writer, b []byte)"},
    +		{"HTMLEscapeString", Func, 0, "func(s string) string"},
    +		{"HTMLEscaper", Func, 0, "func(args ...any) string"},
    +		{"IsTrue", Func, 6, "func(val any) (truth bool, ok bool)"},
    +		{"JS", Type, 0, ""},
    +		{"JSEscape", Func, 0, "func(w io.Writer, b []byte)"},
    +		{"JSEscapeString", Func, 0, "func(s string) string"},
    +		{"JSEscaper", Func, 0, "func(args ...any) string"},
    +		{"JSStr", Type, 0, ""},
    +		{"Must", Func, 0, "func(t *Template, err error) *Template"},
    +		{"New", Func, 0, "func(name string) *Template"},
    +		{"OK", Const, 0, ""},
    +		{"ParseFS", Func, 16, "func(fs fs.FS, patterns ...string) (*Template, error)"},
    +		{"ParseFiles", Func, 0, "func(filenames ...string) (*Template, error)"},
    +		{"ParseGlob", Func, 0, "func(pattern string) (*Template, error)"},
    +		{"Srcset", Type, 10, ""},
    +		{"Template", Type, 0, ""},
    +		{"Template.Tree", Field, 2, ""},
    +		{"URL", Type, 0, ""},
    +		{"URLQueryEscaper", Func, 0, "func(args ...any) string"},
     	},
     	"image": {
    -		{"(*Alpha).AlphaAt", Method, 4},
    -		{"(*Alpha).At", Method, 0},
    -		{"(*Alpha).Bounds", Method, 0},
    -		{"(*Alpha).ColorModel", Method, 0},
    -		{"(*Alpha).Opaque", Method, 0},
    -		{"(*Alpha).PixOffset", Method, 0},
    -		{"(*Alpha).RGBA64At", Method, 17},
    -		{"(*Alpha).Set", Method, 0},
    -		{"(*Alpha).SetAlpha", Method, 0},
    -		{"(*Alpha).SetRGBA64", Method, 17},
    -		{"(*Alpha).SubImage", Method, 0},
    -		{"(*Alpha16).Alpha16At", Method, 4},
    -		{"(*Alpha16).At", Method, 0},
    -		{"(*Alpha16).Bounds", Method, 0},
    -		{"(*Alpha16).ColorModel", Method, 0},
    -		{"(*Alpha16).Opaque", Method, 0},
    -		{"(*Alpha16).PixOffset", Method, 0},
    -		{"(*Alpha16).RGBA64At", Method, 17},
    -		{"(*Alpha16).Set", Method, 0},
    -		{"(*Alpha16).SetAlpha16", Method, 0},
    -		{"(*Alpha16).SetRGBA64", Method, 17},
    -		{"(*Alpha16).SubImage", Method, 0},
    -		{"(*CMYK).At", Method, 5},
    -		{"(*CMYK).Bounds", Method, 5},
    -		{"(*CMYK).CMYKAt", Method, 5},
    -		{"(*CMYK).ColorModel", Method, 5},
    -		{"(*CMYK).Opaque", Method, 5},
    -		{"(*CMYK).PixOffset", Method, 5},
    -		{"(*CMYK).RGBA64At", Method, 17},
    -		{"(*CMYK).Set", Method, 5},
    -		{"(*CMYK).SetCMYK", Method, 5},
    -		{"(*CMYK).SetRGBA64", Method, 17},
    -		{"(*CMYK).SubImage", Method, 5},
    -		{"(*Gray).At", Method, 0},
    -		{"(*Gray).Bounds", Method, 0},
    -		{"(*Gray).ColorModel", Method, 0},
    -		{"(*Gray).GrayAt", Method, 4},
    -		{"(*Gray).Opaque", Method, 0},
    -		{"(*Gray).PixOffset", Method, 0},
    -		{"(*Gray).RGBA64At", Method, 17},
    -		{"(*Gray).Set", Method, 0},
    -		{"(*Gray).SetGray", Method, 0},
    -		{"(*Gray).SetRGBA64", Method, 17},
    -		{"(*Gray).SubImage", Method, 0},
    -		{"(*Gray16).At", Method, 0},
    -		{"(*Gray16).Bounds", Method, 0},
    -		{"(*Gray16).ColorModel", Method, 0},
    -		{"(*Gray16).Gray16At", Method, 4},
    -		{"(*Gray16).Opaque", Method, 0},
    -		{"(*Gray16).PixOffset", Method, 0},
    -		{"(*Gray16).RGBA64At", Method, 17},
    -		{"(*Gray16).Set", Method, 0},
    -		{"(*Gray16).SetGray16", Method, 0},
    -		{"(*Gray16).SetRGBA64", Method, 17},
    -		{"(*Gray16).SubImage", Method, 0},
    -		{"(*NRGBA).At", Method, 0},
    -		{"(*NRGBA).Bounds", Method, 0},
    -		{"(*NRGBA).ColorModel", Method, 0},
    -		{"(*NRGBA).NRGBAAt", Method, 4},
    -		{"(*NRGBA).Opaque", Method, 0},
    -		{"(*NRGBA).PixOffset", Method, 0},
    -		{"(*NRGBA).RGBA64At", Method, 17},
    -		{"(*NRGBA).Set", Method, 0},
    -		{"(*NRGBA).SetNRGBA", Method, 0},
    -		{"(*NRGBA).SetRGBA64", Method, 17},
    -		{"(*NRGBA).SubImage", Method, 0},
    -		{"(*NRGBA64).At", Method, 0},
    -		{"(*NRGBA64).Bounds", Method, 0},
    -		{"(*NRGBA64).ColorModel", Method, 0},
    -		{"(*NRGBA64).NRGBA64At", Method, 4},
    -		{"(*NRGBA64).Opaque", Method, 0},
    -		{"(*NRGBA64).PixOffset", Method, 0},
    -		{"(*NRGBA64).RGBA64At", Method, 17},
    -		{"(*NRGBA64).Set", Method, 0},
    -		{"(*NRGBA64).SetNRGBA64", Method, 0},
    -		{"(*NRGBA64).SetRGBA64", Method, 17},
    -		{"(*NRGBA64).SubImage", Method, 0},
    -		{"(*NYCbCrA).AOffset", Method, 6},
    -		{"(*NYCbCrA).At", Method, 6},
    -		{"(*NYCbCrA).Bounds", Method, 6},
    -		{"(*NYCbCrA).COffset", Method, 6},
    -		{"(*NYCbCrA).ColorModel", Method, 6},
    -		{"(*NYCbCrA).NYCbCrAAt", Method, 6},
    -		{"(*NYCbCrA).Opaque", Method, 6},
    -		{"(*NYCbCrA).RGBA64At", Method, 17},
    -		{"(*NYCbCrA).SubImage", Method, 6},
    -		{"(*NYCbCrA).YCbCrAt", Method, 6},
    -		{"(*NYCbCrA).YOffset", Method, 6},
    -		{"(*Paletted).At", Method, 0},
    -		{"(*Paletted).Bounds", Method, 0},
    -		{"(*Paletted).ColorIndexAt", Method, 0},
    -		{"(*Paletted).ColorModel", Method, 0},
    -		{"(*Paletted).Opaque", Method, 0},
    -		{"(*Paletted).PixOffset", Method, 0},
    -		{"(*Paletted).RGBA64At", Method, 17},
    -		{"(*Paletted).Set", Method, 0},
    -		{"(*Paletted).SetColorIndex", Method, 0},
    -		{"(*Paletted).SetRGBA64", Method, 17},
    -		{"(*Paletted).SubImage", Method, 0},
    -		{"(*RGBA).At", Method, 0},
    -		{"(*RGBA).Bounds", Method, 0},
    -		{"(*RGBA).ColorModel", Method, 0},
    -		{"(*RGBA).Opaque", Method, 0},
    -		{"(*RGBA).PixOffset", Method, 0},
    -		{"(*RGBA).RGBA64At", Method, 17},
    -		{"(*RGBA).RGBAAt", Method, 4},
    -		{"(*RGBA).Set", Method, 0},
    -		{"(*RGBA).SetRGBA", Method, 0},
    -		{"(*RGBA).SetRGBA64", Method, 17},
    -		{"(*RGBA).SubImage", Method, 0},
    -		{"(*RGBA64).At", Method, 0},
    -		{"(*RGBA64).Bounds", Method, 0},
    -		{"(*RGBA64).ColorModel", Method, 0},
    -		{"(*RGBA64).Opaque", Method, 0},
    -		{"(*RGBA64).PixOffset", Method, 0},
    -		{"(*RGBA64).RGBA64At", Method, 4},
    -		{"(*RGBA64).Set", Method, 0},
    -		{"(*RGBA64).SetRGBA64", Method, 0},
    -		{"(*RGBA64).SubImage", Method, 0},
    -		{"(*Uniform).At", Method, 0},
    -		{"(*Uniform).Bounds", Method, 0},
    -		{"(*Uniform).ColorModel", Method, 0},
    -		{"(*Uniform).Convert", Method, 0},
    -		{"(*Uniform).Opaque", Method, 0},
    -		{"(*Uniform).RGBA", Method, 0},
    -		{"(*Uniform).RGBA64At", Method, 17},
    -		{"(*YCbCr).At", Method, 0},
    -		{"(*YCbCr).Bounds", Method, 0},
    -		{"(*YCbCr).COffset", Method, 0},
    -		{"(*YCbCr).ColorModel", Method, 0},
    -		{"(*YCbCr).Opaque", Method, 0},
    -		{"(*YCbCr).RGBA64At", Method, 17},
    -		{"(*YCbCr).SubImage", Method, 0},
    -		{"(*YCbCr).YCbCrAt", Method, 4},
    -		{"(*YCbCr).YOffset", Method, 0},
    -		{"(Point).Add", Method, 0},
    -		{"(Point).Div", Method, 0},
    -		{"(Point).Eq", Method, 0},
    -		{"(Point).In", Method, 0},
    -		{"(Point).Mod", Method, 0},
    -		{"(Point).Mul", Method, 0},
    -		{"(Point).String", Method, 0},
    -		{"(Point).Sub", Method, 0},
    -		{"(Rectangle).Add", Method, 0},
    -		{"(Rectangle).At", Method, 5},
    -		{"(Rectangle).Bounds", Method, 5},
    -		{"(Rectangle).Canon", Method, 0},
    -		{"(Rectangle).ColorModel", Method, 5},
    -		{"(Rectangle).Dx", Method, 0},
    -		{"(Rectangle).Dy", Method, 0},
    -		{"(Rectangle).Empty", Method, 0},
    -		{"(Rectangle).Eq", Method, 0},
    -		{"(Rectangle).In", Method, 0},
    -		{"(Rectangle).Inset", Method, 0},
    -		{"(Rectangle).Intersect", Method, 0},
    -		{"(Rectangle).Overlaps", Method, 0},
    -		{"(Rectangle).RGBA64At", Method, 17},
    -		{"(Rectangle).Size", Method, 0},
    -		{"(Rectangle).String", Method, 0},
    -		{"(Rectangle).Sub", Method, 0},
    -		{"(Rectangle).Union", Method, 0},
    -		{"(YCbCrSubsampleRatio).String", Method, 0},
    -		{"Alpha", Type, 0},
    -		{"Alpha.Pix", Field, 0},
    -		{"Alpha.Rect", Field, 0},
    -		{"Alpha.Stride", Field, 0},
    -		{"Alpha16", Type, 0},
    -		{"Alpha16.Pix", Field, 0},
    -		{"Alpha16.Rect", Field, 0},
    -		{"Alpha16.Stride", Field, 0},
    -		{"Black", Var, 0},
    -		{"CMYK", Type, 5},
    -		{"CMYK.Pix", Field, 5},
    -		{"CMYK.Rect", Field, 5},
    -		{"CMYK.Stride", Field, 5},
    -		{"Config", Type, 0},
    -		{"Config.ColorModel", Field, 0},
    -		{"Config.Height", Field, 0},
    -		{"Config.Width", Field, 0},
    -		{"Decode", Func, 0},
    -		{"DecodeConfig", Func, 0},
    -		{"ErrFormat", Var, 0},
    -		{"Gray", Type, 0},
    -		{"Gray.Pix", Field, 0},
    -		{"Gray.Rect", Field, 0},
    -		{"Gray.Stride", Field, 0},
    -		{"Gray16", Type, 0},
    -		{"Gray16.Pix", Field, 0},
    -		{"Gray16.Rect", Field, 0},
    -		{"Gray16.Stride", Field, 0},
    -		{"Image", Type, 0},
    -		{"NRGBA", Type, 0},
    -		{"NRGBA.Pix", Field, 0},
    -		{"NRGBA.Rect", Field, 0},
    -		{"NRGBA.Stride", Field, 0},
    -		{"NRGBA64", Type, 0},
    -		{"NRGBA64.Pix", Field, 0},
    -		{"NRGBA64.Rect", Field, 0},
    -		{"NRGBA64.Stride", Field, 0},
    -		{"NYCbCrA", Type, 6},
    -		{"NYCbCrA.A", Field, 6},
    -		{"NYCbCrA.AStride", Field, 6},
    -		{"NYCbCrA.YCbCr", Field, 6},
    -		{"NewAlpha", Func, 0},
    -		{"NewAlpha16", Func, 0},
    -		{"NewCMYK", Func, 5},
    -		{"NewGray", Func, 0},
    -		{"NewGray16", Func, 0},
    -		{"NewNRGBA", Func, 0},
    -		{"NewNRGBA64", Func, 0},
    -		{"NewNYCbCrA", Func, 6},
    -		{"NewPaletted", Func, 0},
    -		{"NewRGBA", Func, 0},
    -		{"NewRGBA64", Func, 0},
    -		{"NewUniform", Func, 0},
    -		{"NewYCbCr", Func, 0},
    -		{"Opaque", Var, 0},
    -		{"Paletted", Type, 0},
    -		{"Paletted.Palette", Field, 0},
    -		{"Paletted.Pix", Field, 0},
    -		{"Paletted.Rect", Field, 0},
    -		{"Paletted.Stride", Field, 0},
    -		{"PalettedImage", Type, 0},
    -		{"Point", Type, 0},
    -		{"Point.X", Field, 0},
    -		{"Point.Y", Field, 0},
    -		{"Pt", Func, 0},
    -		{"RGBA", Type, 0},
    -		{"RGBA.Pix", Field, 0},
    -		{"RGBA.Rect", Field, 0},
    -		{"RGBA.Stride", Field, 0},
    -		{"RGBA64", Type, 0},
    -		{"RGBA64.Pix", Field, 0},
    -		{"RGBA64.Rect", Field, 0},
    -		{"RGBA64.Stride", Field, 0},
    -		{"RGBA64Image", Type, 17},
    -		{"Rect", Func, 0},
    -		{"Rectangle", Type, 0},
    -		{"Rectangle.Max", Field, 0},
    -		{"Rectangle.Min", Field, 0},
    -		{"RegisterFormat", Func, 0},
    -		{"Transparent", Var, 0},
    -		{"Uniform", Type, 0},
    -		{"Uniform.C", Field, 0},
    -		{"White", Var, 0},
    -		{"YCbCr", Type, 0},
    -		{"YCbCr.CStride", Field, 0},
    -		{"YCbCr.Cb", Field, 0},
    -		{"YCbCr.Cr", Field, 0},
    -		{"YCbCr.Rect", Field, 0},
    -		{"YCbCr.SubsampleRatio", Field, 0},
    -		{"YCbCr.Y", Field, 0},
    -		{"YCbCr.YStride", Field, 0},
    -		{"YCbCrSubsampleRatio", Type, 0},
    -		{"YCbCrSubsampleRatio410", Const, 5},
    -		{"YCbCrSubsampleRatio411", Const, 5},
    -		{"YCbCrSubsampleRatio420", Const, 0},
    -		{"YCbCrSubsampleRatio422", Const, 0},
    -		{"YCbCrSubsampleRatio440", Const, 1},
    -		{"YCbCrSubsampleRatio444", Const, 0},
    -		{"ZP", Var, 0},
    -		{"ZR", Var, 0},
    +		{"(*Alpha).AlphaAt", Method, 4, ""},
    +		{"(*Alpha).At", Method, 0, ""},
    +		{"(*Alpha).Bounds", Method, 0, ""},
    +		{"(*Alpha).ColorModel", Method, 0, ""},
    +		{"(*Alpha).Opaque", Method, 0, ""},
    +		{"(*Alpha).PixOffset", Method, 0, ""},
    +		{"(*Alpha).RGBA64At", Method, 17, ""},
    +		{"(*Alpha).Set", Method, 0, ""},
    +		{"(*Alpha).SetAlpha", Method, 0, ""},
    +		{"(*Alpha).SetRGBA64", Method, 17, ""},
    +		{"(*Alpha).SubImage", Method, 0, ""},
    +		{"(*Alpha16).Alpha16At", Method, 4, ""},
    +		{"(*Alpha16).At", Method, 0, ""},
    +		{"(*Alpha16).Bounds", Method, 0, ""},
    +		{"(*Alpha16).ColorModel", Method, 0, ""},
    +		{"(*Alpha16).Opaque", Method, 0, ""},
    +		{"(*Alpha16).PixOffset", Method, 0, ""},
    +		{"(*Alpha16).RGBA64At", Method, 17, ""},
    +		{"(*Alpha16).Set", Method, 0, ""},
    +		{"(*Alpha16).SetAlpha16", Method, 0, ""},
    +		{"(*Alpha16).SetRGBA64", Method, 17, ""},
    +		{"(*Alpha16).SubImage", Method, 0, ""},
    +		{"(*CMYK).At", Method, 5, ""},
    +		{"(*CMYK).Bounds", Method, 5, ""},
    +		{"(*CMYK).CMYKAt", Method, 5, ""},
    +		{"(*CMYK).ColorModel", Method, 5, ""},
    +		{"(*CMYK).Opaque", Method, 5, ""},
    +		{"(*CMYK).PixOffset", Method, 5, ""},
    +		{"(*CMYK).RGBA64At", Method, 17, ""},
    +		{"(*CMYK).Set", Method, 5, ""},
    +		{"(*CMYK).SetCMYK", Method, 5, ""},
    +		{"(*CMYK).SetRGBA64", Method, 17, ""},
    +		{"(*CMYK).SubImage", Method, 5, ""},
    +		{"(*Gray).At", Method, 0, ""},
    +		{"(*Gray).Bounds", Method, 0, ""},
    +		{"(*Gray).ColorModel", Method, 0, ""},
    +		{"(*Gray).GrayAt", Method, 4, ""},
    +		{"(*Gray).Opaque", Method, 0, ""},
    +		{"(*Gray).PixOffset", Method, 0, ""},
    +		{"(*Gray).RGBA64At", Method, 17, ""},
    +		{"(*Gray).Set", Method, 0, ""},
    +		{"(*Gray).SetGray", Method, 0, ""},
    +		{"(*Gray).SetRGBA64", Method, 17, ""},
    +		{"(*Gray).SubImage", Method, 0, ""},
    +		{"(*Gray16).At", Method, 0, ""},
    +		{"(*Gray16).Bounds", Method, 0, ""},
    +		{"(*Gray16).ColorModel", Method, 0, ""},
    +		{"(*Gray16).Gray16At", Method, 4, ""},
    +		{"(*Gray16).Opaque", Method, 0, ""},
    +		{"(*Gray16).PixOffset", Method, 0, ""},
    +		{"(*Gray16).RGBA64At", Method, 17, ""},
    +		{"(*Gray16).Set", Method, 0, ""},
    +		{"(*Gray16).SetGray16", Method, 0, ""},
    +		{"(*Gray16).SetRGBA64", Method, 17, ""},
    +		{"(*Gray16).SubImage", Method, 0, ""},
    +		{"(*NRGBA).At", Method, 0, ""},
    +		{"(*NRGBA).Bounds", Method, 0, ""},
    +		{"(*NRGBA).ColorModel", Method, 0, ""},
    +		{"(*NRGBA).NRGBAAt", Method, 4, ""},
    +		{"(*NRGBA).Opaque", Method, 0, ""},
    +		{"(*NRGBA).PixOffset", Method, 0, ""},
    +		{"(*NRGBA).RGBA64At", Method, 17, ""},
    +		{"(*NRGBA).Set", Method, 0, ""},
    +		{"(*NRGBA).SetNRGBA", Method, 0, ""},
    +		{"(*NRGBA).SetRGBA64", Method, 17, ""},
    +		{"(*NRGBA).SubImage", Method, 0, ""},
    +		{"(*NRGBA64).At", Method, 0, ""},
    +		{"(*NRGBA64).Bounds", Method, 0, ""},
    +		{"(*NRGBA64).ColorModel", Method, 0, ""},
    +		{"(*NRGBA64).NRGBA64At", Method, 4, ""},
    +		{"(*NRGBA64).Opaque", Method, 0, ""},
    +		{"(*NRGBA64).PixOffset", Method, 0, ""},
    +		{"(*NRGBA64).RGBA64At", Method, 17, ""},
    +		{"(*NRGBA64).Set", Method, 0, ""},
    +		{"(*NRGBA64).SetNRGBA64", Method, 0, ""},
    +		{"(*NRGBA64).SetRGBA64", Method, 17, ""},
    +		{"(*NRGBA64).SubImage", Method, 0, ""},
    +		{"(*NYCbCrA).AOffset", Method, 6, ""},
    +		{"(*NYCbCrA).At", Method, 6, ""},
    +		{"(*NYCbCrA).Bounds", Method, 6, ""},
    +		{"(*NYCbCrA).COffset", Method, 6, ""},
    +		{"(*NYCbCrA).ColorModel", Method, 6, ""},
    +		{"(*NYCbCrA).NYCbCrAAt", Method, 6, ""},
    +		{"(*NYCbCrA).Opaque", Method, 6, ""},
    +		{"(*NYCbCrA).RGBA64At", Method, 17, ""},
    +		{"(*NYCbCrA).SubImage", Method, 6, ""},
    +		{"(*NYCbCrA).YCbCrAt", Method, 6, ""},
    +		{"(*NYCbCrA).YOffset", Method, 6, ""},
    +		{"(*Paletted).At", Method, 0, ""},
    +		{"(*Paletted).Bounds", Method, 0, ""},
    +		{"(*Paletted).ColorIndexAt", Method, 0, ""},
    +		{"(*Paletted).ColorModel", Method, 0, ""},
    +		{"(*Paletted).Opaque", Method, 0, ""},
    +		{"(*Paletted).PixOffset", Method, 0, ""},
    +		{"(*Paletted).RGBA64At", Method, 17, ""},
    +		{"(*Paletted).Set", Method, 0, ""},
    +		{"(*Paletted).SetColorIndex", Method, 0, ""},
    +		{"(*Paletted).SetRGBA64", Method, 17, ""},
    +		{"(*Paletted).SubImage", Method, 0, ""},
    +		{"(*RGBA).At", Method, 0, ""},
    +		{"(*RGBA).Bounds", Method, 0, ""},
    +		{"(*RGBA).ColorModel", Method, 0, ""},
    +		{"(*RGBA).Opaque", Method, 0, ""},
    +		{"(*RGBA).PixOffset", Method, 0, ""},
    +		{"(*RGBA).RGBA64At", Method, 17, ""},
    +		{"(*RGBA).RGBAAt", Method, 4, ""},
    +		{"(*RGBA).Set", Method, 0, ""},
    +		{"(*RGBA).SetRGBA", Method, 0, ""},
    +		{"(*RGBA).SetRGBA64", Method, 17, ""},
    +		{"(*RGBA).SubImage", Method, 0, ""},
    +		{"(*RGBA64).At", Method, 0, ""},
    +		{"(*RGBA64).Bounds", Method, 0, ""},
    +		{"(*RGBA64).ColorModel", Method, 0, ""},
    +		{"(*RGBA64).Opaque", Method, 0, ""},
    +		{"(*RGBA64).PixOffset", Method, 0, ""},
    +		{"(*RGBA64).RGBA64At", Method, 4, ""},
    +		{"(*RGBA64).Set", Method, 0, ""},
    +		{"(*RGBA64).SetRGBA64", Method, 0, ""},
    +		{"(*RGBA64).SubImage", Method, 0, ""},
    +		{"(*Uniform).At", Method, 0, ""},
    +		{"(*Uniform).Bounds", Method, 0, ""},
    +		{"(*Uniform).ColorModel", Method, 0, ""},
    +		{"(*Uniform).Convert", Method, 0, ""},
    +		{"(*Uniform).Opaque", Method, 0, ""},
    +		{"(*Uniform).RGBA", Method, 0, ""},
    +		{"(*Uniform).RGBA64At", Method, 17, ""},
    +		{"(*YCbCr).At", Method, 0, ""},
    +		{"(*YCbCr).Bounds", Method, 0, ""},
    +		{"(*YCbCr).COffset", Method, 0, ""},
    +		{"(*YCbCr).ColorModel", Method, 0, ""},
    +		{"(*YCbCr).Opaque", Method, 0, ""},
    +		{"(*YCbCr).RGBA64At", Method, 17, ""},
    +		{"(*YCbCr).SubImage", Method, 0, ""},
    +		{"(*YCbCr).YCbCrAt", Method, 4, ""},
    +		{"(*YCbCr).YOffset", Method, 0, ""},
    +		{"(Point).Add", Method, 0, ""},
    +		{"(Point).Div", Method, 0, ""},
    +		{"(Point).Eq", Method, 0, ""},
    +		{"(Point).In", Method, 0, ""},
    +		{"(Point).Mod", Method, 0, ""},
    +		{"(Point).Mul", Method, 0, ""},
    +		{"(Point).String", Method, 0, ""},
    +		{"(Point).Sub", Method, 0, ""},
    +		{"(Rectangle).Add", Method, 0, ""},
    +		{"(Rectangle).At", Method, 5, ""},
    +		{"(Rectangle).Bounds", Method, 5, ""},
    +		{"(Rectangle).Canon", Method, 0, ""},
    +		{"(Rectangle).ColorModel", Method, 5, ""},
    +		{"(Rectangle).Dx", Method, 0, ""},
    +		{"(Rectangle).Dy", Method, 0, ""},
    +		{"(Rectangle).Empty", Method, 0, ""},
    +		{"(Rectangle).Eq", Method, 0, ""},
    +		{"(Rectangle).In", Method, 0, ""},
    +		{"(Rectangle).Inset", Method, 0, ""},
    +		{"(Rectangle).Intersect", Method, 0, ""},
    +		{"(Rectangle).Overlaps", Method, 0, ""},
    +		{"(Rectangle).RGBA64At", Method, 17, ""},
    +		{"(Rectangle).Size", Method, 0, ""},
    +		{"(Rectangle).String", Method, 0, ""},
    +		{"(Rectangle).Sub", Method, 0, ""},
    +		{"(Rectangle).Union", Method, 0, ""},
    +		{"(YCbCrSubsampleRatio).String", Method, 0, ""},
    +		{"Alpha", Type, 0, ""},
    +		{"Alpha.Pix", Field, 0, ""},
    +		{"Alpha.Rect", Field, 0, ""},
    +		{"Alpha.Stride", Field, 0, ""},
    +		{"Alpha16", Type, 0, ""},
    +		{"Alpha16.Pix", Field, 0, ""},
    +		{"Alpha16.Rect", Field, 0, ""},
    +		{"Alpha16.Stride", Field, 0, ""},
    +		{"Black", Var, 0, ""},
    +		{"CMYK", Type, 5, ""},
    +		{"CMYK.Pix", Field, 5, ""},
    +		{"CMYK.Rect", Field, 5, ""},
    +		{"CMYK.Stride", Field, 5, ""},
    +		{"Config", Type, 0, ""},
    +		{"Config.ColorModel", Field, 0, ""},
    +		{"Config.Height", Field, 0, ""},
    +		{"Config.Width", Field, 0, ""},
    +		{"Decode", Func, 0, "func(r io.Reader) (Image, string, error)"},
    +		{"DecodeConfig", Func, 0, "func(r io.Reader) (Config, string, error)"},
    +		{"ErrFormat", Var, 0, ""},
    +		{"Gray", Type, 0, ""},
    +		{"Gray.Pix", Field, 0, ""},
    +		{"Gray.Rect", Field, 0, ""},
    +		{"Gray.Stride", Field, 0, ""},
    +		{"Gray16", Type, 0, ""},
    +		{"Gray16.Pix", Field, 0, ""},
    +		{"Gray16.Rect", Field, 0, ""},
    +		{"Gray16.Stride", Field, 0, ""},
    +		{"Image", Type, 0, ""},
    +		{"NRGBA", Type, 0, ""},
    +		{"NRGBA.Pix", Field, 0, ""},
    +		{"NRGBA.Rect", Field, 0, ""},
    +		{"NRGBA.Stride", Field, 0, ""},
    +		{"NRGBA64", Type, 0, ""},
    +		{"NRGBA64.Pix", Field, 0, ""},
    +		{"NRGBA64.Rect", Field, 0, ""},
    +		{"NRGBA64.Stride", Field, 0, ""},
    +		{"NYCbCrA", Type, 6, ""},
    +		{"NYCbCrA.A", Field, 6, ""},
    +		{"NYCbCrA.AStride", Field, 6, ""},
    +		{"NYCbCrA.YCbCr", Field, 6, ""},
    +		{"NewAlpha", Func, 0, "func(r Rectangle) *Alpha"},
    +		{"NewAlpha16", Func, 0, "func(r Rectangle) *Alpha16"},
    +		{"NewCMYK", Func, 5, "func(r Rectangle) *CMYK"},
    +		{"NewGray", Func, 0, "func(r Rectangle) *Gray"},
    +		{"NewGray16", Func, 0, "func(r Rectangle) *Gray16"},
    +		{"NewNRGBA", Func, 0, "func(r Rectangle) *NRGBA"},
    +		{"NewNRGBA64", Func, 0, "func(r Rectangle) *NRGBA64"},
    +		{"NewNYCbCrA", Func, 6, "func(r Rectangle, subsampleRatio YCbCrSubsampleRatio) *NYCbCrA"},
    +		{"NewPaletted", Func, 0, "func(r Rectangle, p color.Palette) *Paletted"},
    +		{"NewRGBA", Func, 0, "func(r Rectangle) *RGBA"},
    +		{"NewRGBA64", Func, 0, "func(r Rectangle) *RGBA64"},
    +		{"NewUniform", Func, 0, "func(c color.Color) *Uniform"},
    +		{"NewYCbCr", Func, 0, "func(r Rectangle, subsampleRatio YCbCrSubsampleRatio) *YCbCr"},
    +		{"Opaque", Var, 0, ""},
    +		{"Paletted", Type, 0, ""},
    +		{"Paletted.Palette", Field, 0, ""},
    +		{"Paletted.Pix", Field, 0, ""},
    +		{"Paletted.Rect", Field, 0, ""},
    +		{"Paletted.Stride", Field, 0, ""},
    +		{"PalettedImage", Type, 0, ""},
    +		{"Point", Type, 0, ""},
    +		{"Point.X", Field, 0, ""},
    +		{"Point.Y", Field, 0, ""},
    +		{"Pt", Func, 0, "func(X int, Y int) Point"},
    +		{"RGBA", Type, 0, ""},
    +		{"RGBA.Pix", Field, 0, ""},
    +		{"RGBA.Rect", Field, 0, ""},
    +		{"RGBA.Stride", Field, 0, ""},
    +		{"RGBA64", Type, 0, ""},
    +		{"RGBA64.Pix", Field, 0, ""},
    +		{"RGBA64.Rect", Field, 0, ""},
    +		{"RGBA64.Stride", Field, 0, ""},
    +		{"RGBA64Image", Type, 17, ""},
    +		{"Rect", Func, 0, "func(x0 int, y0 int, x1 int, y1 int) Rectangle"},
    +		{"Rectangle", Type, 0, ""},
    +		{"Rectangle.Max", Field, 0, ""},
    +		{"Rectangle.Min", Field, 0, ""},
    +		{"RegisterFormat", Func, 0, "func(name string, magic string, decode func(io.Reader) (Image, error), decodeConfig func(io.Reader) (Config, error))"},
    +		{"Transparent", Var, 0, ""},
    +		{"Uniform", Type, 0, ""},
    +		{"Uniform.C", Field, 0, ""},
    +		{"White", Var, 0, ""},
    +		{"YCbCr", Type, 0, ""},
    +		{"YCbCr.CStride", Field, 0, ""},
    +		{"YCbCr.Cb", Field, 0, ""},
    +		{"YCbCr.Cr", Field, 0, ""},
    +		{"YCbCr.Rect", Field, 0, ""},
    +		{"YCbCr.SubsampleRatio", Field, 0, ""},
    +		{"YCbCr.Y", Field, 0, ""},
    +		{"YCbCr.YStride", Field, 0, ""},
    +		{"YCbCrSubsampleRatio", Type, 0, ""},
    +		{"YCbCrSubsampleRatio410", Const, 5, ""},
    +		{"YCbCrSubsampleRatio411", Const, 5, ""},
    +		{"YCbCrSubsampleRatio420", Const, 0, ""},
    +		{"YCbCrSubsampleRatio422", Const, 0, ""},
    +		{"YCbCrSubsampleRatio440", Const, 1, ""},
    +		{"YCbCrSubsampleRatio444", Const, 0, ""},
    +		{"ZP", Var, 0, ""},
    +		{"ZR", Var, 0, ""},
     	},
     	"image/color": {
    -		{"(Alpha).RGBA", Method, 0},
    -		{"(Alpha16).RGBA", Method, 0},
    -		{"(CMYK).RGBA", Method, 5},
    -		{"(Gray).RGBA", Method, 0},
    -		{"(Gray16).RGBA", Method, 0},
    -		{"(NRGBA).RGBA", Method, 0},
    -		{"(NRGBA64).RGBA", Method, 0},
    -		{"(NYCbCrA).RGBA", Method, 6},
    -		{"(Palette).Convert", Method, 0},
    -		{"(Palette).Index", Method, 0},
    -		{"(RGBA).RGBA", Method, 0},
    -		{"(RGBA64).RGBA", Method, 0},
    -		{"(YCbCr).RGBA", Method, 0},
    -		{"Alpha", Type, 0},
    -		{"Alpha.A", Field, 0},
    -		{"Alpha16", Type, 0},
    -		{"Alpha16.A", Field, 0},
    -		{"Alpha16Model", Var, 0},
    -		{"AlphaModel", Var, 0},
    -		{"Black", Var, 0},
    -		{"CMYK", Type, 5},
    -		{"CMYK.C", Field, 5},
    -		{"CMYK.K", Field, 5},
    -		{"CMYK.M", Field, 5},
    -		{"CMYK.Y", Field, 5},
    -		{"CMYKModel", Var, 5},
    -		{"CMYKToRGB", Func, 5},
    -		{"Color", Type, 0},
    -		{"Gray", Type, 0},
    -		{"Gray.Y", Field, 0},
    -		{"Gray16", Type, 0},
    -		{"Gray16.Y", Field, 0},
    -		{"Gray16Model", Var, 0},
    -		{"GrayModel", Var, 0},
    -		{"Model", Type, 0},
    -		{"ModelFunc", Func, 0},
    -		{"NRGBA", Type, 0},
    -		{"NRGBA.A", Field, 0},
    -		{"NRGBA.B", Field, 0},
    -		{"NRGBA.G", Field, 0},
    -		{"NRGBA.R", Field, 0},
    -		{"NRGBA64", Type, 0},
    -		{"NRGBA64.A", Field, 0},
    -		{"NRGBA64.B", Field, 0},
    -		{"NRGBA64.G", Field, 0},
    -		{"NRGBA64.R", Field, 0},
    -		{"NRGBA64Model", Var, 0},
    -		{"NRGBAModel", Var, 0},
    -		{"NYCbCrA", Type, 6},
    -		{"NYCbCrA.A", Field, 6},
    -		{"NYCbCrA.YCbCr", Field, 6},
    -		{"NYCbCrAModel", Var, 6},
    -		{"Opaque", Var, 0},
    -		{"Palette", Type, 0},
    -		{"RGBA", Type, 0},
    -		{"RGBA.A", Field, 0},
    -		{"RGBA.B", Field, 0},
    -		{"RGBA.G", Field, 0},
    -		{"RGBA.R", Field, 0},
    -		{"RGBA64", Type, 0},
    -		{"RGBA64.A", Field, 0},
    -		{"RGBA64.B", Field, 0},
    -		{"RGBA64.G", Field, 0},
    -		{"RGBA64.R", Field, 0},
    -		{"RGBA64Model", Var, 0},
    -		{"RGBAModel", Var, 0},
    -		{"RGBToCMYK", Func, 5},
    -		{"RGBToYCbCr", Func, 0},
    -		{"Transparent", Var, 0},
    -		{"White", Var, 0},
    -		{"YCbCr", Type, 0},
    -		{"YCbCr.Cb", Field, 0},
    -		{"YCbCr.Cr", Field, 0},
    -		{"YCbCr.Y", Field, 0},
    -		{"YCbCrModel", Var, 0},
    -		{"YCbCrToRGB", Func, 0},
    +		{"(Alpha).RGBA", Method, 0, ""},
    +		{"(Alpha16).RGBA", Method, 0, ""},
    +		{"(CMYK).RGBA", Method, 5, ""},
    +		{"(Gray).RGBA", Method, 0, ""},
    +		{"(Gray16).RGBA", Method, 0, ""},
    +		{"(NRGBA).RGBA", Method, 0, ""},
    +		{"(NRGBA64).RGBA", Method, 0, ""},
    +		{"(NYCbCrA).RGBA", Method, 6, ""},
    +		{"(Palette).Convert", Method, 0, ""},
    +		{"(Palette).Index", Method, 0, ""},
    +		{"(RGBA).RGBA", Method, 0, ""},
    +		{"(RGBA64).RGBA", Method, 0, ""},
    +		{"(YCbCr).RGBA", Method, 0, ""},
    +		{"Alpha", Type, 0, ""},
    +		{"Alpha.A", Field, 0, ""},
    +		{"Alpha16", Type, 0, ""},
    +		{"Alpha16.A", Field, 0, ""},
    +		{"Alpha16Model", Var, 0, ""},
    +		{"AlphaModel", Var, 0, ""},
    +		{"Black", Var, 0, ""},
    +		{"CMYK", Type, 5, ""},
    +		{"CMYK.C", Field, 5, ""},
    +		{"CMYK.K", Field, 5, ""},
    +		{"CMYK.M", Field, 5, ""},
    +		{"CMYK.Y", Field, 5, ""},
    +		{"CMYKModel", Var, 5, ""},
    +		{"CMYKToRGB", Func, 5, "func(c uint8, m uint8, y uint8, k uint8) (uint8, uint8, uint8)"},
    +		{"Color", Type, 0, ""},
    +		{"Gray", Type, 0, ""},
    +		{"Gray.Y", Field, 0, ""},
    +		{"Gray16", Type, 0, ""},
    +		{"Gray16.Y", Field, 0, ""},
    +		{"Gray16Model", Var, 0, ""},
    +		{"GrayModel", Var, 0, ""},
    +		{"Model", Type, 0, ""},
    +		{"ModelFunc", Func, 0, "func(f func(Color) Color) Model"},
    +		{"NRGBA", Type, 0, ""},
    +		{"NRGBA.A", Field, 0, ""},
    +		{"NRGBA.B", Field, 0, ""},
    +		{"NRGBA.G", Field, 0, ""},
    +		{"NRGBA.R", Field, 0, ""},
    +		{"NRGBA64", Type, 0, ""},
    +		{"NRGBA64.A", Field, 0, ""},
    +		{"NRGBA64.B", Field, 0, ""},
    +		{"NRGBA64.G", Field, 0, ""},
    +		{"NRGBA64.R", Field, 0, ""},
    +		{"NRGBA64Model", Var, 0, ""},
    +		{"NRGBAModel", Var, 0, ""},
    +		{"NYCbCrA", Type, 6, ""},
    +		{"NYCbCrA.A", Field, 6, ""},
    +		{"NYCbCrA.YCbCr", Field, 6, ""},
    +		{"NYCbCrAModel", Var, 6, ""},
    +		{"Opaque", Var, 0, ""},
    +		{"Palette", Type, 0, ""},
    +		{"RGBA", Type, 0, ""},
    +		{"RGBA.A", Field, 0, ""},
    +		{"RGBA.B", Field, 0, ""},
    +		{"RGBA.G", Field, 0, ""},
    +		{"RGBA.R", Field, 0, ""},
    +		{"RGBA64", Type, 0, ""},
    +		{"RGBA64.A", Field, 0, ""},
    +		{"RGBA64.B", Field, 0, ""},
    +		{"RGBA64.G", Field, 0, ""},
    +		{"RGBA64.R", Field, 0, ""},
    +		{"RGBA64Model", Var, 0, ""},
    +		{"RGBAModel", Var, 0, ""},
    +		{"RGBToCMYK", Func, 5, "func(r uint8, g uint8, b uint8) (uint8, uint8, uint8, uint8)"},
    +		{"RGBToYCbCr", Func, 0, "func(r uint8, g uint8, b uint8) (uint8, uint8, uint8)"},
    +		{"Transparent", Var, 0, ""},
    +		{"White", Var, 0, ""},
    +		{"YCbCr", Type, 0, ""},
    +		{"YCbCr.Cb", Field, 0, ""},
    +		{"YCbCr.Cr", Field, 0, ""},
    +		{"YCbCr.Y", Field, 0, ""},
    +		{"YCbCrModel", Var, 0, ""},
    +		{"YCbCrToRGB", Func, 0, "func(y uint8, cb uint8, cr uint8) (uint8, uint8, uint8)"},
     	},
     	"image/color/palette": {
    -		{"Plan9", Var, 2},
    -		{"WebSafe", Var, 2},
    +		{"Plan9", Var, 2, ""},
    +		{"WebSafe", Var, 2, ""},
     	},
     	"image/draw": {
    -		{"(Op).Draw", Method, 2},
    -		{"Draw", Func, 0},
    -		{"DrawMask", Func, 0},
    -		{"Drawer", Type, 2},
    -		{"FloydSteinberg", Var, 2},
    -		{"Image", Type, 0},
    -		{"Op", Type, 0},
    -		{"Over", Const, 0},
    -		{"Quantizer", Type, 2},
    -		{"RGBA64Image", Type, 17},
    -		{"Src", Const, 0},
    +		{"(Op).Draw", Method, 2, ""},
    +		{"Draw", Func, 0, "func(dst Image, r image.Rectangle, src image.Image, sp image.Point, op Op)"},
    +		{"DrawMask", Func, 0, "func(dst Image, r image.Rectangle, src image.Image, sp image.Point, mask image.Image, mp image.Point, op Op)"},
    +		{"Drawer", Type, 2, ""},
    +		{"FloydSteinberg", Var, 2, ""},
    +		{"Image", Type, 0, ""},
    +		{"Op", Type, 0, ""},
    +		{"Over", Const, 0, ""},
    +		{"Quantizer", Type, 2, ""},
    +		{"RGBA64Image", Type, 17, ""},
    +		{"Src", Const, 0, ""},
     	},
     	"image/gif": {
    -		{"Decode", Func, 0},
    -		{"DecodeAll", Func, 0},
    -		{"DecodeConfig", Func, 0},
    -		{"DisposalBackground", Const, 5},
    -		{"DisposalNone", Const, 5},
    -		{"DisposalPrevious", Const, 5},
    -		{"Encode", Func, 2},
    -		{"EncodeAll", Func, 2},
    -		{"GIF", Type, 0},
    -		{"GIF.BackgroundIndex", Field, 5},
    -		{"GIF.Config", Field, 5},
    -		{"GIF.Delay", Field, 0},
    -		{"GIF.Disposal", Field, 5},
    -		{"GIF.Image", Field, 0},
    -		{"GIF.LoopCount", Field, 0},
    -		{"Options", Type, 2},
    -		{"Options.Drawer", Field, 2},
    -		{"Options.NumColors", Field, 2},
    -		{"Options.Quantizer", Field, 2},
    +		{"Decode", Func, 0, "func(r io.Reader) (image.Image, error)"},
    +		{"DecodeAll", Func, 0, "func(r io.Reader) (*GIF, error)"},
    +		{"DecodeConfig", Func, 0, "func(r io.Reader) (image.Config, error)"},
    +		{"DisposalBackground", Const, 5, ""},
    +		{"DisposalNone", Const, 5, ""},
    +		{"DisposalPrevious", Const, 5, ""},
    +		{"Encode", Func, 2, "func(w io.Writer, m image.Image, o *Options) error"},
    +		{"EncodeAll", Func, 2, "func(w io.Writer, g *GIF) error"},
    +		{"GIF", Type, 0, ""},
    +		{"GIF.BackgroundIndex", Field, 5, ""},
    +		{"GIF.Config", Field, 5, ""},
    +		{"GIF.Delay", Field, 0, ""},
    +		{"GIF.Disposal", Field, 5, ""},
    +		{"GIF.Image", Field, 0, ""},
    +		{"GIF.LoopCount", Field, 0, ""},
    +		{"Options", Type, 2, ""},
    +		{"Options.Drawer", Field, 2, ""},
    +		{"Options.NumColors", Field, 2, ""},
    +		{"Options.Quantizer", Field, 2, ""},
     	},
     	"image/jpeg": {
    -		{"(FormatError).Error", Method, 0},
    -		{"(UnsupportedError).Error", Method, 0},
    -		{"Decode", Func, 0},
    -		{"DecodeConfig", Func, 0},
    -		{"DefaultQuality", Const, 0},
    -		{"Encode", Func, 0},
    -		{"FormatError", Type, 0},
    -		{"Options", Type, 0},
    -		{"Options.Quality", Field, 0},
    -		{"Reader", Type, 0},
    -		{"UnsupportedError", Type, 0},
    +		{"(FormatError).Error", Method, 0, ""},
    +		{"(UnsupportedError).Error", Method, 0, ""},
    +		{"Decode", Func, 0, "func(r io.Reader) (image.Image, error)"},
    +		{"DecodeConfig", Func, 0, "func(r io.Reader) (image.Config, error)"},
    +		{"DefaultQuality", Const, 0, ""},
    +		{"Encode", Func, 0, "func(w io.Writer, m image.Image, o *Options) error"},
    +		{"FormatError", Type, 0, ""},
    +		{"Options", Type, 0, ""},
    +		{"Options.Quality", Field, 0, ""},
    +		{"Reader", Type, 0, ""},
    +		{"UnsupportedError", Type, 0, ""},
     	},
     	"image/png": {
    -		{"(*Encoder).Encode", Method, 4},
    -		{"(FormatError).Error", Method, 0},
    -		{"(UnsupportedError).Error", Method, 0},
    -		{"BestCompression", Const, 4},
    -		{"BestSpeed", Const, 4},
    -		{"CompressionLevel", Type, 4},
    -		{"Decode", Func, 0},
    -		{"DecodeConfig", Func, 0},
    -		{"DefaultCompression", Const, 4},
    -		{"Encode", Func, 0},
    -		{"Encoder", Type, 4},
    -		{"Encoder.BufferPool", Field, 9},
    -		{"Encoder.CompressionLevel", Field, 4},
    -		{"EncoderBuffer", Type, 9},
    -		{"EncoderBufferPool", Type, 9},
    -		{"FormatError", Type, 0},
    -		{"NoCompression", Const, 4},
    -		{"UnsupportedError", Type, 0},
    +		{"(*Encoder).Encode", Method, 4, ""},
    +		{"(FormatError).Error", Method, 0, ""},
    +		{"(UnsupportedError).Error", Method, 0, ""},
    +		{"BestCompression", Const, 4, ""},
    +		{"BestSpeed", Const, 4, ""},
    +		{"CompressionLevel", Type, 4, ""},
    +		{"Decode", Func, 0, "func(r io.Reader) (image.Image, error)"},
    +		{"DecodeConfig", Func, 0, "func(r io.Reader) (image.Config, error)"},
    +		{"DefaultCompression", Const, 4, ""},
    +		{"Encode", Func, 0, "func(w io.Writer, m image.Image) error"},
    +		{"Encoder", Type, 4, ""},
    +		{"Encoder.BufferPool", Field, 9, ""},
    +		{"Encoder.CompressionLevel", Field, 4, ""},
    +		{"EncoderBuffer", Type, 9, ""},
    +		{"EncoderBufferPool", Type, 9, ""},
    +		{"FormatError", Type, 0, ""},
    +		{"NoCompression", Const, 4, ""},
    +		{"UnsupportedError", Type, 0, ""},
     	},
     	"index/suffixarray": {
    -		{"(*Index).Bytes", Method, 0},
    -		{"(*Index).FindAllIndex", Method, 0},
    -		{"(*Index).Lookup", Method, 0},
    -		{"(*Index).Read", Method, 0},
    -		{"(*Index).Write", Method, 0},
    -		{"Index", Type, 0},
    -		{"New", Func, 0},
    +		{"(*Index).Bytes", Method, 0, ""},
    +		{"(*Index).FindAllIndex", Method, 0, ""},
    +		{"(*Index).Lookup", Method, 0, ""},
    +		{"(*Index).Read", Method, 0, ""},
    +		{"(*Index).Write", Method, 0, ""},
    +		{"Index", Type, 0, ""},
    +		{"New", Func, 0, "func(data []byte) *Index"},
     	},
     	"io": {
    -		{"(*LimitedReader).Read", Method, 0},
    -		{"(*OffsetWriter).Seek", Method, 20},
    -		{"(*OffsetWriter).Write", Method, 20},
    -		{"(*OffsetWriter).WriteAt", Method, 20},
    -		{"(*PipeReader).Close", Method, 0},
    -		{"(*PipeReader).CloseWithError", Method, 0},
    -		{"(*PipeReader).Read", Method, 0},
    -		{"(*PipeWriter).Close", Method, 0},
    -		{"(*PipeWriter).CloseWithError", Method, 0},
    -		{"(*PipeWriter).Write", Method, 0},
    -		{"(*SectionReader).Outer", Method, 22},
    -		{"(*SectionReader).Read", Method, 0},
    -		{"(*SectionReader).ReadAt", Method, 0},
    -		{"(*SectionReader).Seek", Method, 0},
    -		{"(*SectionReader).Size", Method, 0},
    -		{"ByteReader", Type, 0},
    -		{"ByteScanner", Type, 0},
    -		{"ByteWriter", Type, 1},
    -		{"Closer", Type, 0},
    -		{"Copy", Func, 0},
    -		{"CopyBuffer", Func, 5},
    -		{"CopyN", Func, 0},
    -		{"Discard", Var, 16},
    -		{"EOF", Var, 0},
    -		{"ErrClosedPipe", Var, 0},
    -		{"ErrNoProgress", Var, 1},
    -		{"ErrShortBuffer", Var, 0},
    -		{"ErrShortWrite", Var, 0},
    -		{"ErrUnexpectedEOF", Var, 0},
    -		{"LimitReader", Func, 0},
    -		{"LimitedReader", Type, 0},
    -		{"LimitedReader.N", Field, 0},
    -		{"LimitedReader.R", Field, 0},
    -		{"MultiReader", Func, 0},
    -		{"MultiWriter", Func, 0},
    -		{"NewOffsetWriter", Func, 20},
    -		{"NewSectionReader", Func, 0},
    -		{"NopCloser", Func, 16},
    -		{"OffsetWriter", Type, 20},
    -		{"Pipe", Func, 0},
    -		{"PipeReader", Type, 0},
    -		{"PipeWriter", Type, 0},
    -		{"ReadAll", Func, 16},
    -		{"ReadAtLeast", Func, 0},
    -		{"ReadCloser", Type, 0},
    -		{"ReadFull", Func, 0},
    -		{"ReadSeekCloser", Type, 16},
    -		{"ReadSeeker", Type, 0},
    -		{"ReadWriteCloser", Type, 0},
    -		{"ReadWriteSeeker", Type, 0},
    -		{"ReadWriter", Type, 0},
    -		{"Reader", Type, 0},
    -		{"ReaderAt", Type, 0},
    -		{"ReaderFrom", Type, 0},
    -		{"RuneReader", Type, 0},
    -		{"RuneScanner", Type, 0},
    -		{"SectionReader", Type, 0},
    -		{"SeekCurrent", Const, 7},
    -		{"SeekEnd", Const, 7},
    -		{"SeekStart", Const, 7},
    -		{"Seeker", Type, 0},
    -		{"StringWriter", Type, 12},
    -		{"TeeReader", Func, 0},
    -		{"WriteCloser", Type, 0},
    -		{"WriteSeeker", Type, 0},
    -		{"WriteString", Func, 0},
    -		{"Writer", Type, 0},
    -		{"WriterAt", Type, 0},
    -		{"WriterTo", Type, 0},
    +		{"(*LimitedReader).Read", Method, 0, ""},
    +		{"(*OffsetWriter).Seek", Method, 20, ""},
    +		{"(*OffsetWriter).Write", Method, 20, ""},
    +		{"(*OffsetWriter).WriteAt", Method, 20, ""},
    +		{"(*PipeReader).Close", Method, 0, ""},
    +		{"(*PipeReader).CloseWithError", Method, 0, ""},
    +		{"(*PipeReader).Read", Method, 0, ""},
    +		{"(*PipeWriter).Close", Method, 0, ""},
    +		{"(*PipeWriter).CloseWithError", Method, 0, ""},
    +		{"(*PipeWriter).Write", Method, 0, ""},
    +		{"(*SectionReader).Outer", Method, 22, ""},
    +		{"(*SectionReader).Read", Method, 0, ""},
    +		{"(*SectionReader).ReadAt", Method, 0, ""},
    +		{"(*SectionReader).Seek", Method, 0, ""},
    +		{"(*SectionReader).Size", Method, 0, ""},
    +		{"ByteReader", Type, 0, ""},
    +		{"ByteScanner", Type, 0, ""},
    +		{"ByteWriter", Type, 1, ""},
    +		{"Closer", Type, 0, ""},
    +		{"Copy", Func, 0, "func(dst Writer, src Reader) (written int64, err error)"},
    +		{"CopyBuffer", Func, 5, "func(dst Writer, src Reader, buf []byte) (written int64, err error)"},
    +		{"CopyN", Func, 0, "func(dst Writer, src Reader, n int64) (written int64, err error)"},
    +		{"Discard", Var, 16, ""},
    +		{"EOF", Var, 0, ""},
    +		{"ErrClosedPipe", Var, 0, ""},
    +		{"ErrNoProgress", Var, 1, ""},
    +		{"ErrShortBuffer", Var, 0, ""},
    +		{"ErrShortWrite", Var, 0, ""},
    +		{"ErrUnexpectedEOF", Var, 0, ""},
    +		{"LimitReader", Func, 0, "func(r Reader, n int64) Reader"},
    +		{"LimitedReader", Type, 0, ""},
    +		{"LimitedReader.N", Field, 0, ""},
    +		{"LimitedReader.R", Field, 0, ""},
    +		{"MultiReader", Func, 0, "func(readers ...Reader) Reader"},
    +		{"MultiWriter", Func, 0, "func(writers ...Writer) Writer"},
    +		{"NewOffsetWriter", Func, 20, "func(w WriterAt, off int64) *OffsetWriter"},
    +		{"NewSectionReader", Func, 0, "func(r ReaderAt, off int64, n int64) *SectionReader"},
    +		{"NopCloser", Func, 16, "func(r Reader) ReadCloser"},
    +		{"OffsetWriter", Type, 20, ""},
    +		{"Pipe", Func, 0, "func() (*PipeReader, *PipeWriter)"},
    +		{"PipeReader", Type, 0, ""},
    +		{"PipeWriter", Type, 0, ""},
    +		{"ReadAll", Func, 16, "func(r Reader) ([]byte, error)"},
    +		{"ReadAtLeast", Func, 0, "func(r Reader, buf []byte, min int) (n int, err error)"},
    +		{"ReadCloser", Type, 0, ""},
    +		{"ReadFull", Func, 0, "func(r Reader, buf []byte) (n int, err error)"},
    +		{"ReadSeekCloser", Type, 16, ""},
    +		{"ReadSeeker", Type, 0, ""},
    +		{"ReadWriteCloser", Type, 0, ""},
    +		{"ReadWriteSeeker", Type, 0, ""},
    +		{"ReadWriter", Type, 0, ""},
    +		{"Reader", Type, 0, ""},
    +		{"ReaderAt", Type, 0, ""},
    +		{"ReaderFrom", Type, 0, ""},
    +		{"RuneReader", Type, 0, ""},
    +		{"RuneScanner", Type, 0, ""},
    +		{"SectionReader", Type, 0, ""},
    +		{"SeekCurrent", Const, 7, ""},
    +		{"SeekEnd", Const, 7, ""},
    +		{"SeekStart", Const, 7, ""},
    +		{"Seeker", Type, 0, ""},
    +		{"StringWriter", Type, 12, ""},
    +		{"TeeReader", Func, 0, "func(r Reader, w Writer) Reader"},
    +		{"WriteCloser", Type, 0, ""},
    +		{"WriteSeeker", Type, 0, ""},
    +		{"WriteString", Func, 0, "func(w Writer, s string) (n int, err error)"},
    +		{"Writer", Type, 0, ""},
    +		{"WriterAt", Type, 0, ""},
    +		{"WriterTo", Type, 0, ""},
     	},
     	"io/fs": {
    -		{"(*PathError).Error", Method, 16},
    -		{"(*PathError).Timeout", Method, 16},
    -		{"(*PathError).Unwrap", Method, 16},
    -		{"(FileMode).IsDir", Method, 16},
    -		{"(FileMode).IsRegular", Method, 16},
    -		{"(FileMode).Perm", Method, 16},
    -		{"(FileMode).String", Method, 16},
    -		{"(FileMode).Type", Method, 16},
    -		{"DirEntry", Type, 16},
    -		{"ErrClosed", Var, 16},
    -		{"ErrExist", Var, 16},
    -		{"ErrInvalid", Var, 16},
    -		{"ErrNotExist", Var, 16},
    -		{"ErrPermission", Var, 16},
    -		{"FS", Type, 16},
    -		{"File", Type, 16},
    -		{"FileInfo", Type, 16},
    -		{"FileInfoToDirEntry", Func, 17},
    -		{"FileMode", Type, 16},
    -		{"FormatDirEntry", Func, 21},
    -		{"FormatFileInfo", Func, 21},
    -		{"Glob", Func, 16},
    -		{"GlobFS", Type, 16},
    -		{"ModeAppend", Const, 16},
    -		{"ModeCharDevice", Const, 16},
    -		{"ModeDevice", Const, 16},
    -		{"ModeDir", Const, 16},
    -		{"ModeExclusive", Const, 16},
    -		{"ModeIrregular", Const, 16},
    -		{"ModeNamedPipe", Const, 16},
    -		{"ModePerm", Const, 16},
    -		{"ModeSetgid", Const, 16},
    -		{"ModeSetuid", Const, 16},
    -		{"ModeSocket", Const, 16},
    -		{"ModeSticky", Const, 16},
    -		{"ModeSymlink", Const, 16},
    -		{"ModeTemporary", Const, 16},
    -		{"ModeType", Const, 16},
    -		{"PathError", Type, 16},
    -		{"PathError.Err", Field, 16},
    -		{"PathError.Op", Field, 16},
    -		{"PathError.Path", Field, 16},
    -		{"ReadDir", Func, 16},
    -		{"ReadDirFS", Type, 16},
    -		{"ReadDirFile", Type, 16},
    -		{"ReadFile", Func, 16},
    -		{"ReadFileFS", Type, 16},
    -		{"SkipAll", Var, 20},
    -		{"SkipDir", Var, 16},
    -		{"Stat", Func, 16},
    -		{"StatFS", Type, 16},
    -		{"Sub", Func, 16},
    -		{"SubFS", Type, 16},
    -		{"ValidPath", Func, 16},
    -		{"WalkDir", Func, 16},
    -		{"WalkDirFunc", Type, 16},
    +		{"(*PathError).Error", Method, 16, ""},
    +		{"(*PathError).Timeout", Method, 16, ""},
    +		{"(*PathError).Unwrap", Method, 16, ""},
    +		{"(FileMode).IsDir", Method, 16, ""},
    +		{"(FileMode).IsRegular", Method, 16, ""},
    +		{"(FileMode).Perm", Method, 16, ""},
    +		{"(FileMode).String", Method, 16, ""},
    +		{"(FileMode).Type", Method, 16, ""},
    +		{"DirEntry", Type, 16, ""},
    +		{"ErrClosed", Var, 16, ""},
    +		{"ErrExist", Var, 16, ""},
    +		{"ErrInvalid", Var, 16, ""},
    +		{"ErrNotExist", Var, 16, ""},
    +		{"ErrPermission", Var, 16, ""},
    +		{"FS", Type, 16, ""},
    +		{"File", Type, 16, ""},
    +		{"FileInfo", Type, 16, ""},
    +		{"FileInfoToDirEntry", Func, 17, "func(info FileInfo) DirEntry"},
    +		{"FileMode", Type, 16, ""},
    +		{"FormatDirEntry", Func, 21, "func(dir DirEntry) string"},
    +		{"FormatFileInfo", Func, 21, "func(info FileInfo) string"},
    +		{"Glob", Func, 16, "func(fsys FS, pattern string) (matches []string, err error)"},
    +		{"GlobFS", Type, 16, ""},
    +		{"Lstat", Func, 25, "func(fsys FS, name string) (FileInfo, error)"},
    +		{"ModeAppend", Const, 16, ""},
    +		{"ModeCharDevice", Const, 16, ""},
    +		{"ModeDevice", Const, 16, ""},
    +		{"ModeDir", Const, 16, ""},
    +		{"ModeExclusive", Const, 16, ""},
    +		{"ModeIrregular", Const, 16, ""},
    +		{"ModeNamedPipe", Const, 16, ""},
    +		{"ModePerm", Const, 16, ""},
    +		{"ModeSetgid", Const, 16, ""},
    +		{"ModeSetuid", Const, 16, ""},
    +		{"ModeSocket", Const, 16, ""},
    +		{"ModeSticky", Const, 16, ""},
    +		{"ModeSymlink", Const, 16, ""},
    +		{"ModeTemporary", Const, 16, ""},
    +		{"ModeType", Const, 16, ""},
    +		{"PathError", Type, 16, ""},
    +		{"PathError.Err", Field, 16, ""},
    +		{"PathError.Op", Field, 16, ""},
    +		{"PathError.Path", Field, 16, ""},
    +		{"ReadDir", Func, 16, "func(fsys FS, name string) ([]DirEntry, error)"},
    +		{"ReadDirFS", Type, 16, ""},
    +		{"ReadDirFile", Type, 16, ""},
    +		{"ReadFile", Func, 16, "func(fsys FS, name string) ([]byte, error)"},
    +		{"ReadFileFS", Type, 16, ""},
    +		{"ReadLink", Func, 25, "func(fsys FS, name string) (string, error)"},
    +		{"ReadLinkFS", Type, 25, ""},
    +		{"SkipAll", Var, 20, ""},
    +		{"SkipDir", Var, 16, ""},
    +		{"Stat", Func, 16, "func(fsys FS, name string) (FileInfo, error)"},
    +		{"StatFS", Type, 16, ""},
    +		{"Sub", Func, 16, "func(fsys FS, dir string) (FS, error)"},
    +		{"SubFS", Type, 16, ""},
    +		{"ValidPath", Func, 16, "func(name string) bool"},
    +		{"WalkDir", Func, 16, "func(fsys FS, root string, fn WalkDirFunc) error"},
    +		{"WalkDirFunc", Type, 16, ""},
     	},
     	"io/ioutil": {
    -		{"Discard", Var, 0},
    -		{"NopCloser", Func, 0},
    -		{"ReadAll", Func, 0},
    -		{"ReadDir", Func, 0},
    -		{"ReadFile", Func, 0},
    -		{"TempDir", Func, 0},
    -		{"TempFile", Func, 0},
    -		{"WriteFile", Func, 0},
    +		{"Discard", Var, 0, ""},
    +		{"NopCloser", Func, 0, "func(r io.Reader) io.ReadCloser"},
    +		{"ReadAll", Func, 0, "func(r io.Reader) ([]byte, error)"},
    +		{"ReadDir", Func, 0, "func(dirname string) ([]fs.FileInfo, error)"},
    +		{"ReadFile", Func, 0, "func(filename string) ([]byte, error)"},
    +		{"TempDir", Func, 0, "func(dir string, pattern string) (name string, err error)"},
    +		{"TempFile", Func, 0, "func(dir string, pattern string) (f *os.File, err error)"},
    +		{"WriteFile", Func, 0, "func(filename string, data []byte, perm fs.FileMode) error"},
     	},
     	"iter": {
    -		{"Pull", Func, 23},
    -		{"Pull2", Func, 23},
    -		{"Seq", Type, 23},
    -		{"Seq2", Type, 23},
    +		{"Pull", Func, 23, "func[V any](seq Seq[V]) (next func() (V, bool), stop func())"},
    +		{"Pull2", Func, 23, "func[K, V any](seq Seq2[K, V]) (next func() (K, V, bool), stop func())"},
    +		{"Seq", Type, 23, ""},
    +		{"Seq2", Type, 23, ""},
     	},
     	"log": {
    -		{"(*Logger).Fatal", Method, 0},
    -		{"(*Logger).Fatalf", Method, 0},
    -		{"(*Logger).Fatalln", Method, 0},
    -		{"(*Logger).Flags", Method, 0},
    -		{"(*Logger).Output", Method, 0},
    -		{"(*Logger).Panic", Method, 0},
    -		{"(*Logger).Panicf", Method, 0},
    -		{"(*Logger).Panicln", Method, 0},
    -		{"(*Logger).Prefix", Method, 0},
    -		{"(*Logger).Print", Method, 0},
    -		{"(*Logger).Printf", Method, 0},
    -		{"(*Logger).Println", Method, 0},
    -		{"(*Logger).SetFlags", Method, 0},
    -		{"(*Logger).SetOutput", Method, 5},
    -		{"(*Logger).SetPrefix", Method, 0},
    -		{"(*Logger).Writer", Method, 12},
    -		{"Default", Func, 16},
    -		{"Fatal", Func, 0},
    -		{"Fatalf", Func, 0},
    -		{"Fatalln", Func, 0},
    -		{"Flags", Func, 0},
    -		{"LUTC", Const, 5},
    -		{"Ldate", Const, 0},
    -		{"Llongfile", Const, 0},
    -		{"Lmicroseconds", Const, 0},
    -		{"Lmsgprefix", Const, 14},
    -		{"Logger", Type, 0},
    -		{"Lshortfile", Const, 0},
    -		{"LstdFlags", Const, 0},
    -		{"Ltime", Const, 0},
    -		{"New", Func, 0},
    -		{"Output", Func, 5},
    -		{"Panic", Func, 0},
    -		{"Panicf", Func, 0},
    -		{"Panicln", Func, 0},
    -		{"Prefix", Func, 0},
    -		{"Print", Func, 0},
    -		{"Printf", Func, 0},
    -		{"Println", Func, 0},
    -		{"SetFlags", Func, 0},
    -		{"SetOutput", Func, 0},
    -		{"SetPrefix", Func, 0},
    -		{"Writer", Func, 13},
    +		{"(*Logger).Fatal", Method, 0, ""},
    +		{"(*Logger).Fatalf", Method, 0, ""},
    +		{"(*Logger).Fatalln", Method, 0, ""},
    +		{"(*Logger).Flags", Method, 0, ""},
    +		{"(*Logger).Output", Method, 0, ""},
    +		{"(*Logger).Panic", Method, 0, ""},
    +		{"(*Logger).Panicf", Method, 0, ""},
    +		{"(*Logger).Panicln", Method, 0, ""},
    +		{"(*Logger).Prefix", Method, 0, ""},
    +		{"(*Logger).Print", Method, 0, ""},
    +		{"(*Logger).Printf", Method, 0, ""},
    +		{"(*Logger).Println", Method, 0, ""},
    +		{"(*Logger).SetFlags", Method, 0, ""},
    +		{"(*Logger).SetOutput", Method, 5, ""},
    +		{"(*Logger).SetPrefix", Method, 0, ""},
    +		{"(*Logger).Writer", Method, 12, ""},
    +		{"Default", Func, 16, "func() *Logger"},
    +		{"Fatal", Func, 0, "func(v ...any)"},
    +		{"Fatalf", Func, 0, "func(format string, v ...any)"},
    +		{"Fatalln", Func, 0, "func(v ...any)"},
    +		{"Flags", Func, 0, "func() int"},
    +		{"LUTC", Const, 5, ""},
    +		{"Ldate", Const, 0, ""},
    +		{"Llongfile", Const, 0, ""},
    +		{"Lmicroseconds", Const, 0, ""},
    +		{"Lmsgprefix", Const, 14, ""},
    +		{"Logger", Type, 0, ""},
    +		{"Lshortfile", Const, 0, ""},
    +		{"LstdFlags", Const, 0, ""},
    +		{"Ltime", Const, 0, ""},
    +		{"New", Func, 0, "func(out io.Writer, prefix string, flag int) *Logger"},
    +		{"Output", Func, 5, "func(calldepth int, s string) error"},
    +		{"Panic", Func, 0, "func(v ...any)"},
    +		{"Panicf", Func, 0, "func(format string, v ...any)"},
    +		{"Panicln", Func, 0, "func(v ...any)"},
    +		{"Prefix", Func, 0, "func() string"},
    +		{"Print", Func, 0, "func(v ...any)"},
    +		{"Printf", Func, 0, "func(format string, v ...any)"},
    +		{"Println", Func, 0, "func(v ...any)"},
    +		{"SetFlags", Func, 0, "func(flag int)"},
    +		{"SetOutput", Func, 0, "func(w io.Writer)"},
    +		{"SetPrefix", Func, 0, "func(prefix string)"},
    +		{"Writer", Func, 13, "func() io.Writer"},
     	},
     	"log/slog": {
    -		{"(*JSONHandler).Enabled", Method, 21},
    -		{"(*JSONHandler).Handle", Method, 21},
    -		{"(*JSONHandler).WithAttrs", Method, 21},
    -		{"(*JSONHandler).WithGroup", Method, 21},
    -		{"(*Level).UnmarshalJSON", Method, 21},
    -		{"(*Level).UnmarshalText", Method, 21},
    -		{"(*LevelVar).Level", Method, 21},
    -		{"(*LevelVar).MarshalText", Method, 21},
    -		{"(*LevelVar).Set", Method, 21},
    -		{"(*LevelVar).String", Method, 21},
    -		{"(*LevelVar).UnmarshalText", Method, 21},
    -		{"(*Logger).Debug", Method, 21},
    -		{"(*Logger).DebugContext", Method, 21},
    -		{"(*Logger).Enabled", Method, 21},
    -		{"(*Logger).Error", Method, 21},
    -		{"(*Logger).ErrorContext", Method, 21},
    -		{"(*Logger).Handler", Method, 21},
    -		{"(*Logger).Info", Method, 21},
    -		{"(*Logger).InfoContext", Method, 21},
    -		{"(*Logger).Log", Method, 21},
    -		{"(*Logger).LogAttrs", Method, 21},
    -		{"(*Logger).Warn", Method, 21},
    -		{"(*Logger).WarnContext", Method, 21},
    -		{"(*Logger).With", Method, 21},
    -		{"(*Logger).WithGroup", Method, 21},
    -		{"(*Record).Add", Method, 21},
    -		{"(*Record).AddAttrs", Method, 21},
    -		{"(*TextHandler).Enabled", Method, 21},
    -		{"(*TextHandler).Handle", Method, 21},
    -		{"(*TextHandler).WithAttrs", Method, 21},
    -		{"(*TextHandler).WithGroup", Method, 21},
    -		{"(Attr).Equal", Method, 21},
    -		{"(Attr).String", Method, 21},
    -		{"(Kind).String", Method, 21},
    -		{"(Level).Level", Method, 21},
    -		{"(Level).MarshalJSON", Method, 21},
    -		{"(Level).MarshalText", Method, 21},
    -		{"(Level).String", Method, 21},
    -		{"(Record).Attrs", Method, 21},
    -		{"(Record).Clone", Method, 21},
    -		{"(Record).NumAttrs", Method, 21},
    -		{"(Value).Any", Method, 21},
    -		{"(Value).Bool", Method, 21},
    -		{"(Value).Duration", Method, 21},
    -		{"(Value).Equal", Method, 21},
    -		{"(Value).Float64", Method, 21},
    -		{"(Value).Group", Method, 21},
    -		{"(Value).Int64", Method, 21},
    -		{"(Value).Kind", Method, 21},
    -		{"(Value).LogValuer", Method, 21},
    -		{"(Value).Resolve", Method, 21},
    -		{"(Value).String", Method, 21},
    -		{"(Value).Time", Method, 21},
    -		{"(Value).Uint64", Method, 21},
    -		{"Any", Func, 21},
    -		{"AnyValue", Func, 21},
    -		{"Attr", Type, 21},
    -		{"Attr.Key", Field, 21},
    -		{"Attr.Value", Field, 21},
    -		{"Bool", Func, 21},
    -		{"BoolValue", Func, 21},
    -		{"Debug", Func, 21},
    -		{"DebugContext", Func, 21},
    -		{"Default", Func, 21},
    -		{"Duration", Func, 21},
    -		{"DurationValue", Func, 21},
    -		{"Error", Func, 21},
    -		{"ErrorContext", Func, 21},
    -		{"Float64", Func, 21},
    -		{"Float64Value", Func, 21},
    -		{"Group", Func, 21},
    -		{"GroupValue", Func, 21},
    -		{"Handler", Type, 21},
    -		{"HandlerOptions", Type, 21},
    -		{"HandlerOptions.AddSource", Field, 21},
    -		{"HandlerOptions.Level", Field, 21},
    -		{"HandlerOptions.ReplaceAttr", Field, 21},
    -		{"Info", Func, 21},
    -		{"InfoContext", Func, 21},
    -		{"Int", Func, 21},
    -		{"Int64", Func, 21},
    -		{"Int64Value", Func, 21},
    -		{"IntValue", Func, 21},
    -		{"JSONHandler", Type, 21},
    -		{"Kind", Type, 21},
    -		{"KindAny", Const, 21},
    -		{"KindBool", Const, 21},
    -		{"KindDuration", Const, 21},
    -		{"KindFloat64", Const, 21},
    -		{"KindGroup", Const, 21},
    -		{"KindInt64", Const, 21},
    -		{"KindLogValuer", Const, 21},
    -		{"KindString", Const, 21},
    -		{"KindTime", Const, 21},
    -		{"KindUint64", Const, 21},
    -		{"Level", Type, 21},
    -		{"LevelDebug", Const, 21},
    -		{"LevelError", Const, 21},
    -		{"LevelInfo", Const, 21},
    -		{"LevelKey", Const, 21},
    -		{"LevelVar", Type, 21},
    -		{"LevelWarn", Const, 21},
    -		{"Leveler", Type, 21},
    -		{"Log", Func, 21},
    -		{"LogAttrs", Func, 21},
    -		{"LogValuer", Type, 21},
    -		{"Logger", Type, 21},
    -		{"MessageKey", Const, 21},
    -		{"New", Func, 21},
    -		{"NewJSONHandler", Func, 21},
    -		{"NewLogLogger", Func, 21},
    -		{"NewRecord", Func, 21},
    -		{"NewTextHandler", Func, 21},
    -		{"Record", Type, 21},
    -		{"Record.Level", Field, 21},
    -		{"Record.Message", Field, 21},
    -		{"Record.PC", Field, 21},
    -		{"Record.Time", Field, 21},
    -		{"SetDefault", Func, 21},
    -		{"SetLogLoggerLevel", Func, 22},
    -		{"Source", Type, 21},
    -		{"Source.File", Field, 21},
    -		{"Source.Function", Field, 21},
    -		{"Source.Line", Field, 21},
    -		{"SourceKey", Const, 21},
    -		{"String", Func, 21},
    -		{"StringValue", Func, 21},
    -		{"TextHandler", Type, 21},
    -		{"Time", Func, 21},
    -		{"TimeKey", Const, 21},
    -		{"TimeValue", Func, 21},
    -		{"Uint64", Func, 21},
    -		{"Uint64Value", Func, 21},
    -		{"Value", Type, 21},
    -		{"Warn", Func, 21},
    -		{"WarnContext", Func, 21},
    -		{"With", Func, 21},
    +		{"(*JSONHandler).Enabled", Method, 21, ""},
    +		{"(*JSONHandler).Handle", Method, 21, ""},
    +		{"(*JSONHandler).WithAttrs", Method, 21, ""},
    +		{"(*JSONHandler).WithGroup", Method, 21, ""},
    +		{"(*Level).UnmarshalJSON", Method, 21, ""},
    +		{"(*Level).UnmarshalText", Method, 21, ""},
    +		{"(*LevelVar).AppendText", Method, 24, ""},
    +		{"(*LevelVar).Level", Method, 21, ""},
    +		{"(*LevelVar).MarshalText", Method, 21, ""},
    +		{"(*LevelVar).Set", Method, 21, ""},
    +		{"(*LevelVar).String", Method, 21, ""},
    +		{"(*LevelVar).UnmarshalText", Method, 21, ""},
    +		{"(*Logger).Debug", Method, 21, ""},
    +		{"(*Logger).DebugContext", Method, 21, ""},
    +		{"(*Logger).Enabled", Method, 21, ""},
    +		{"(*Logger).Error", Method, 21, ""},
    +		{"(*Logger).ErrorContext", Method, 21, ""},
    +		{"(*Logger).Handler", Method, 21, ""},
    +		{"(*Logger).Info", Method, 21, ""},
    +		{"(*Logger).InfoContext", Method, 21, ""},
    +		{"(*Logger).Log", Method, 21, ""},
    +		{"(*Logger).LogAttrs", Method, 21, ""},
    +		{"(*Logger).Warn", Method, 21, ""},
    +		{"(*Logger).WarnContext", Method, 21, ""},
    +		{"(*Logger).With", Method, 21, ""},
    +		{"(*Logger).WithGroup", Method, 21, ""},
    +		{"(*Record).Add", Method, 21, ""},
    +		{"(*Record).AddAttrs", Method, 21, ""},
    +		{"(*TextHandler).Enabled", Method, 21, ""},
    +		{"(*TextHandler).Handle", Method, 21, ""},
    +		{"(*TextHandler).WithAttrs", Method, 21, ""},
    +		{"(*TextHandler).WithGroup", Method, 21, ""},
    +		{"(Attr).Equal", Method, 21, ""},
    +		{"(Attr).String", Method, 21, ""},
    +		{"(Kind).String", Method, 21, ""},
    +		{"(Level).AppendText", Method, 24, ""},
    +		{"(Level).Level", Method, 21, ""},
    +		{"(Level).MarshalJSON", Method, 21, ""},
    +		{"(Level).MarshalText", Method, 21, ""},
    +		{"(Level).String", Method, 21, ""},
    +		{"(Record).Attrs", Method, 21, ""},
    +		{"(Record).Clone", Method, 21, ""},
    +		{"(Record).NumAttrs", Method, 21, ""},
    +		{"(Record).Source", Method, 25, ""},
    +		{"(Value).Any", Method, 21, ""},
    +		{"(Value).Bool", Method, 21, ""},
    +		{"(Value).Duration", Method, 21, ""},
    +		{"(Value).Equal", Method, 21, ""},
    +		{"(Value).Float64", Method, 21, ""},
    +		{"(Value).Group", Method, 21, ""},
    +		{"(Value).Int64", Method, 21, ""},
    +		{"(Value).Kind", Method, 21, ""},
    +		{"(Value).LogValuer", Method, 21, ""},
    +		{"(Value).Resolve", Method, 21, ""},
    +		{"(Value).String", Method, 21, ""},
    +		{"(Value).Time", Method, 21, ""},
    +		{"(Value).Uint64", Method, 21, ""},
    +		{"Any", Func, 21, "func(key string, value any) Attr"},
    +		{"AnyValue", Func, 21, "func(v any) Value"},
    +		{"Attr", Type, 21, ""},
    +		{"Attr.Key", Field, 21, ""},
    +		{"Attr.Value", Field, 21, ""},
    +		{"Bool", Func, 21, "func(key string, v bool) Attr"},
    +		{"BoolValue", Func, 21, "func(v bool) Value"},
    +		{"Debug", Func, 21, "func(msg string, args ...any)"},
    +		{"DebugContext", Func, 21, "func(ctx context.Context, msg string, args ...any)"},
    +		{"Default", Func, 21, "func() *Logger"},
    +		{"DiscardHandler", Var, 24, ""},
    +		{"Duration", Func, 21, "func(key string, v time.Duration) Attr"},
    +		{"DurationValue", Func, 21, "func(v time.Duration) Value"},
    +		{"Error", Func, 21, "func(msg string, args ...any)"},
    +		{"ErrorContext", Func, 21, "func(ctx context.Context, msg string, args ...any)"},
    +		{"Float64", Func, 21, "func(key string, v float64) Attr"},
    +		{"Float64Value", Func, 21, "func(v float64) Value"},
    +		{"Group", Func, 21, "func(key string, args ...any) Attr"},
    +		{"GroupAttrs", Func, 25, "func(key string, attrs ...Attr) Attr"},
    +		{"GroupValue", Func, 21, "func(as ...Attr) Value"},
    +		{"Handler", Type, 21, ""},
    +		{"HandlerOptions", Type, 21, ""},
    +		{"HandlerOptions.AddSource", Field, 21, ""},
    +		{"HandlerOptions.Level", Field, 21, ""},
    +		{"HandlerOptions.ReplaceAttr", Field, 21, ""},
    +		{"Info", Func, 21, "func(msg string, args ...any)"},
    +		{"InfoContext", Func, 21, "func(ctx context.Context, msg string, args ...any)"},
    +		{"Int", Func, 21, "func(key string, value int) Attr"},
    +		{"Int64", Func, 21, "func(key string, value int64) Attr"},
    +		{"Int64Value", Func, 21, "func(v int64) Value"},
    +		{"IntValue", Func, 21, "func(v int) Value"},
    +		{"JSONHandler", Type, 21, ""},
    +		{"Kind", Type, 21, ""},
    +		{"KindAny", Const, 21, ""},
    +		{"KindBool", Const, 21, ""},
    +		{"KindDuration", Const, 21, ""},
    +		{"KindFloat64", Const, 21, ""},
    +		{"KindGroup", Const, 21, ""},
    +		{"KindInt64", Const, 21, ""},
    +		{"KindLogValuer", Const, 21, ""},
    +		{"KindString", Const, 21, ""},
    +		{"KindTime", Const, 21, ""},
    +		{"KindUint64", Const, 21, ""},
    +		{"Level", Type, 21, ""},
    +		{"LevelDebug", Const, 21, ""},
    +		{"LevelError", Const, 21, ""},
    +		{"LevelInfo", Const, 21, ""},
    +		{"LevelKey", Const, 21, ""},
    +		{"LevelVar", Type, 21, ""},
    +		{"LevelWarn", Const, 21, ""},
    +		{"Leveler", Type, 21, ""},
    +		{"Log", Func, 21, "func(ctx context.Context, level Level, msg string, args ...any)"},
    +		{"LogAttrs", Func, 21, "func(ctx context.Context, level Level, msg string, attrs ...Attr)"},
    +		{"LogValuer", Type, 21, ""},
    +		{"Logger", Type, 21, ""},
    +		{"MessageKey", Const, 21, ""},
    +		{"New", Func, 21, "func(h Handler) *Logger"},
    +		{"NewJSONHandler", Func, 21, "func(w io.Writer, opts *HandlerOptions) *JSONHandler"},
    +		{"NewLogLogger", Func, 21, "func(h Handler, level Level) *log.Logger"},
    +		{"NewRecord", Func, 21, "func(t time.Time, level Level, msg string, pc uintptr) Record"},
    +		{"NewTextHandler", Func, 21, "func(w io.Writer, opts *HandlerOptions) *TextHandler"},
    +		{"Record", Type, 21, ""},
    +		{"Record.Level", Field, 21, ""},
    +		{"Record.Message", Field, 21, ""},
    +		{"Record.PC", Field, 21, ""},
    +		{"Record.Time", Field, 21, ""},
    +		{"SetDefault", Func, 21, "func(l *Logger)"},
    +		{"SetLogLoggerLevel", Func, 22, "func(level Level) (oldLevel Level)"},
    +		{"Source", Type, 21, ""},
    +		{"Source.File", Field, 21, ""},
    +		{"Source.Function", Field, 21, ""},
    +		{"Source.Line", Field, 21, ""},
    +		{"SourceKey", Const, 21, ""},
    +		{"String", Func, 21, "func(key string, value string) Attr"},
    +		{"StringValue", Func, 21, "func(value string) Value"},
    +		{"TextHandler", Type, 21, ""},
    +		{"Time", Func, 21, "func(key string, v time.Time) Attr"},
    +		{"TimeKey", Const, 21, ""},
    +		{"TimeValue", Func, 21, "func(v time.Time) Value"},
    +		{"Uint64", Func, 21, "func(key string, v uint64) Attr"},
    +		{"Uint64Value", Func, 21, "func(v uint64) Value"},
    +		{"Value", Type, 21, ""},
    +		{"Warn", Func, 21, "func(msg string, args ...any)"},
    +		{"WarnContext", Func, 21, "func(ctx context.Context, msg string, args ...any)"},
    +		{"With", Func, 21, "func(args ...any) *Logger"},
     	},
     	"log/syslog": {
    -		{"(*Writer).Alert", Method, 0},
    -		{"(*Writer).Close", Method, 0},
    -		{"(*Writer).Crit", Method, 0},
    -		{"(*Writer).Debug", Method, 0},
    -		{"(*Writer).Emerg", Method, 0},
    -		{"(*Writer).Err", Method, 0},
    -		{"(*Writer).Info", Method, 0},
    -		{"(*Writer).Notice", Method, 0},
    -		{"(*Writer).Warning", Method, 0},
    -		{"(*Writer).Write", Method, 0},
    -		{"Dial", Func, 0},
    -		{"LOG_ALERT", Const, 0},
    -		{"LOG_AUTH", Const, 1},
    -		{"LOG_AUTHPRIV", Const, 1},
    -		{"LOG_CRIT", Const, 0},
    -		{"LOG_CRON", Const, 1},
    -		{"LOG_DAEMON", Const, 1},
    -		{"LOG_DEBUG", Const, 0},
    -		{"LOG_EMERG", Const, 0},
    -		{"LOG_ERR", Const, 0},
    -		{"LOG_FTP", Const, 1},
    -		{"LOG_INFO", Const, 0},
    -		{"LOG_KERN", Const, 1},
    -		{"LOG_LOCAL0", Const, 1},
    -		{"LOG_LOCAL1", Const, 1},
    -		{"LOG_LOCAL2", Const, 1},
    -		{"LOG_LOCAL3", Const, 1},
    -		{"LOG_LOCAL4", Const, 1},
    -		{"LOG_LOCAL5", Const, 1},
    -		{"LOG_LOCAL6", Const, 1},
    -		{"LOG_LOCAL7", Const, 1},
    -		{"LOG_LPR", Const, 1},
    -		{"LOG_MAIL", Const, 1},
    -		{"LOG_NEWS", Const, 1},
    -		{"LOG_NOTICE", Const, 0},
    -		{"LOG_SYSLOG", Const, 1},
    -		{"LOG_USER", Const, 1},
    -		{"LOG_UUCP", Const, 1},
    -		{"LOG_WARNING", Const, 0},
    -		{"New", Func, 0},
    -		{"NewLogger", Func, 0},
    -		{"Priority", Type, 0},
    -		{"Writer", Type, 0},
    +		{"(*Writer).Alert", Method, 0, ""},
    +		{"(*Writer).Close", Method, 0, ""},
    +		{"(*Writer).Crit", Method, 0, ""},
    +		{"(*Writer).Debug", Method, 0, ""},
    +		{"(*Writer).Emerg", Method, 0, ""},
    +		{"(*Writer).Err", Method, 0, ""},
    +		{"(*Writer).Info", Method, 0, ""},
    +		{"(*Writer).Notice", Method, 0, ""},
    +		{"(*Writer).Warning", Method, 0, ""},
    +		{"(*Writer).Write", Method, 0, ""},
    +		{"Dial", Func, 0, "func(network string, raddr string, priority Priority, tag string) (*Writer, error)"},
    +		{"LOG_ALERT", Const, 0, ""},
    +		{"LOG_AUTH", Const, 1, ""},
    +		{"LOG_AUTHPRIV", Const, 1, ""},
    +		{"LOG_CRIT", Const, 0, ""},
    +		{"LOG_CRON", Const, 1, ""},
    +		{"LOG_DAEMON", Const, 1, ""},
    +		{"LOG_DEBUG", Const, 0, ""},
    +		{"LOG_EMERG", Const, 0, ""},
    +		{"LOG_ERR", Const, 0, ""},
    +		{"LOG_FTP", Const, 1, ""},
    +		{"LOG_INFO", Const, 0, ""},
    +		{"LOG_KERN", Const, 1, ""},
    +		{"LOG_LOCAL0", Const, 1, ""},
    +		{"LOG_LOCAL1", Const, 1, ""},
    +		{"LOG_LOCAL2", Const, 1, ""},
    +		{"LOG_LOCAL3", Const, 1, ""},
    +		{"LOG_LOCAL4", Const, 1, ""},
    +		{"LOG_LOCAL5", Const, 1, ""},
    +		{"LOG_LOCAL6", Const, 1, ""},
    +		{"LOG_LOCAL7", Const, 1, ""},
    +		{"LOG_LPR", Const, 1, ""},
    +		{"LOG_MAIL", Const, 1, ""},
    +		{"LOG_NEWS", Const, 1, ""},
    +		{"LOG_NOTICE", Const, 0, ""},
    +		{"LOG_SYSLOG", Const, 1, ""},
    +		{"LOG_USER", Const, 1, ""},
    +		{"LOG_UUCP", Const, 1, ""},
    +		{"LOG_WARNING", Const, 0, ""},
    +		{"New", Func, 0, "func(priority Priority, tag string) (*Writer, error)"},
    +		{"NewLogger", Func, 0, "func(p Priority, logFlag int) (*log.Logger, error)"},
    +		{"Priority", Type, 0, ""},
    +		{"Writer", Type, 0, ""},
     	},
     	"maps": {
    -		{"All", Func, 23},
    -		{"Clone", Func, 21},
    -		{"Collect", Func, 23},
    -		{"Copy", Func, 21},
    -		{"DeleteFunc", Func, 21},
    -		{"Equal", Func, 21},
    -		{"EqualFunc", Func, 21},
    -		{"Insert", Func, 23},
    -		{"Keys", Func, 23},
    -		{"Values", Func, 23},
    +		{"All", Func, 23, "func[Map ~map[K]V, K comparable, V any](m Map) iter.Seq2[K, V]"},
    +		{"Clone", Func, 21, "func[M ~map[K]V, K comparable, V any](m M) M"},
    +		{"Collect", Func, 23, "func[K comparable, V any](seq iter.Seq2[K, V]) map[K]V"},
    +		{"Copy", Func, 21, "func[M1 ~map[K]V, M2 ~map[K]V, K comparable, V any](dst M1, src M2)"},
    +		{"DeleteFunc", Func, 21, "func[M ~map[K]V, K comparable, V any](m M, del func(K, V) bool)"},
    +		{"Equal", Func, 21, "func[M1, M2 ~map[K]V, K, V comparable](m1 M1, m2 M2) bool"},
    +		{"EqualFunc", Func, 21, "func[M1 ~map[K]V1, M2 ~map[K]V2, K comparable, V1, V2 any](m1 M1, m2 M2, eq func(V1, V2) bool) bool"},
    +		{"Insert", Func, 23, "func[Map ~map[K]V, K comparable, V any](m Map, seq iter.Seq2[K, V])"},
    +		{"Keys", Func, 23, "func[Map ~map[K]V, K comparable, V any](m Map) iter.Seq[K]"},
    +		{"Values", Func, 23, "func[Map ~map[K]V, K comparable, V any](m Map) iter.Seq[V]"},
     	},
     	"math": {
    -		{"Abs", Func, 0},
    -		{"Acos", Func, 0},
    -		{"Acosh", Func, 0},
    -		{"Asin", Func, 0},
    -		{"Asinh", Func, 0},
    -		{"Atan", Func, 0},
    -		{"Atan2", Func, 0},
    -		{"Atanh", Func, 0},
    -		{"Cbrt", Func, 0},
    -		{"Ceil", Func, 0},
    -		{"Copysign", Func, 0},
    -		{"Cos", Func, 0},
    -		{"Cosh", Func, 0},
    -		{"Dim", Func, 0},
    -		{"E", Const, 0},
    -		{"Erf", Func, 0},
    -		{"Erfc", Func, 0},
    -		{"Erfcinv", Func, 10},
    -		{"Erfinv", Func, 10},
    -		{"Exp", Func, 0},
    -		{"Exp2", Func, 0},
    -		{"Expm1", Func, 0},
    -		{"FMA", Func, 14},
    -		{"Float32bits", Func, 0},
    -		{"Float32frombits", Func, 0},
    -		{"Float64bits", Func, 0},
    -		{"Float64frombits", Func, 0},
    -		{"Floor", Func, 0},
    -		{"Frexp", Func, 0},
    -		{"Gamma", Func, 0},
    -		{"Hypot", Func, 0},
    -		{"Ilogb", Func, 0},
    -		{"Inf", Func, 0},
    -		{"IsInf", Func, 0},
    -		{"IsNaN", Func, 0},
    -		{"J0", Func, 0},
    -		{"J1", Func, 0},
    -		{"Jn", Func, 0},
    -		{"Ldexp", Func, 0},
    -		{"Lgamma", Func, 0},
    -		{"Ln10", Const, 0},
    -		{"Ln2", Const, 0},
    -		{"Log", Func, 0},
    -		{"Log10", Func, 0},
    -		{"Log10E", Const, 0},
    -		{"Log1p", Func, 0},
    -		{"Log2", Func, 0},
    -		{"Log2E", Const, 0},
    -		{"Logb", Func, 0},
    -		{"Max", Func, 0},
    -		{"MaxFloat32", Const, 0},
    -		{"MaxFloat64", Const, 0},
    -		{"MaxInt", Const, 17},
    -		{"MaxInt16", Const, 0},
    -		{"MaxInt32", Const, 0},
    -		{"MaxInt64", Const, 0},
    -		{"MaxInt8", Const, 0},
    -		{"MaxUint", Const, 17},
    -		{"MaxUint16", Const, 0},
    -		{"MaxUint32", Const, 0},
    -		{"MaxUint64", Const, 0},
    -		{"MaxUint8", Const, 0},
    -		{"Min", Func, 0},
    -		{"MinInt", Const, 17},
    -		{"MinInt16", Const, 0},
    -		{"MinInt32", Const, 0},
    -		{"MinInt64", Const, 0},
    -		{"MinInt8", Const, 0},
    -		{"Mod", Func, 0},
    -		{"Modf", Func, 0},
    -		{"NaN", Func, 0},
    -		{"Nextafter", Func, 0},
    -		{"Nextafter32", Func, 4},
    -		{"Phi", Const, 0},
    -		{"Pi", Const, 0},
    -		{"Pow", Func, 0},
    -		{"Pow10", Func, 0},
    -		{"Remainder", Func, 0},
    -		{"Round", Func, 10},
    -		{"RoundToEven", Func, 10},
    -		{"Signbit", Func, 0},
    -		{"Sin", Func, 0},
    -		{"Sincos", Func, 0},
    -		{"Sinh", Func, 0},
    -		{"SmallestNonzeroFloat32", Const, 0},
    -		{"SmallestNonzeroFloat64", Const, 0},
    -		{"Sqrt", Func, 0},
    -		{"Sqrt2", Const, 0},
    -		{"SqrtE", Const, 0},
    -		{"SqrtPhi", Const, 0},
    -		{"SqrtPi", Const, 0},
    -		{"Tan", Func, 0},
    -		{"Tanh", Func, 0},
    -		{"Trunc", Func, 0},
    -		{"Y0", Func, 0},
    -		{"Y1", Func, 0},
    -		{"Yn", Func, 0},
    +		{"Abs", Func, 0, "func(x float64) float64"},
    +		{"Acos", Func, 0, "func(x float64) float64"},
    +		{"Acosh", Func, 0, "func(x float64) float64"},
    +		{"Asin", Func, 0, "func(x float64) float64"},
    +		{"Asinh", Func, 0, "func(x float64) float64"},
    +		{"Atan", Func, 0, "func(x float64) float64"},
    +		{"Atan2", Func, 0, "func(y float64, x float64) float64"},
    +		{"Atanh", Func, 0, "func(x float64) float64"},
    +		{"Cbrt", Func, 0, "func(x float64) float64"},
    +		{"Ceil", Func, 0, "func(x float64) float64"},
    +		{"Copysign", Func, 0, "func(f float64, sign float64) float64"},
    +		{"Cos", Func, 0, "func(x float64) float64"},
    +		{"Cosh", Func, 0, "func(x float64) float64"},
    +		{"Dim", Func, 0, "func(x float64, y float64) float64"},
    +		{"E", Const, 0, ""},
    +		{"Erf", Func, 0, "func(x float64) float64"},
    +		{"Erfc", Func, 0, "func(x float64) float64"},
    +		{"Erfcinv", Func, 10, "func(x float64) float64"},
    +		{"Erfinv", Func, 10, "func(x float64) float64"},
    +		{"Exp", Func, 0, "func(x float64) float64"},
    +		{"Exp2", Func, 0, "func(x float64) float64"},
    +		{"Expm1", Func, 0, "func(x float64) float64"},
    +		{"FMA", Func, 14, "func(x float64, y float64, z float64) float64"},
    +		{"Float32bits", Func, 0, "func(f float32) uint32"},
    +		{"Float32frombits", Func, 0, "func(b uint32) float32"},
    +		{"Float64bits", Func, 0, "func(f float64) uint64"},
    +		{"Float64frombits", Func, 0, "func(b uint64) float64"},
    +		{"Floor", Func, 0, "func(x float64) float64"},
    +		{"Frexp", Func, 0, "func(f float64) (frac float64, exp int)"},
    +		{"Gamma", Func, 0, "func(x float64) float64"},
    +		{"Hypot", Func, 0, "func(p float64, q float64) float64"},
    +		{"Ilogb", Func, 0, "func(x float64) int"},
    +		{"Inf", Func, 0, "func(sign int) float64"},
    +		{"IsInf", Func, 0, "func(f float64, sign int) bool"},
    +		{"IsNaN", Func, 0, "func(f float64) (is bool)"},
    +		{"J0", Func, 0, "func(x float64) float64"},
    +		{"J1", Func, 0, "func(x float64) float64"},
    +		{"Jn", Func, 0, "func(n int, x float64) float64"},
    +		{"Ldexp", Func, 0, "func(frac float64, exp int) float64"},
    +		{"Lgamma", Func, 0, "func(x float64) (lgamma float64, sign int)"},
    +		{"Ln10", Const, 0, ""},
    +		{"Ln2", Const, 0, ""},
    +		{"Log", Func, 0, "func(x float64) float64"},
    +		{"Log10", Func, 0, "func(x float64) float64"},
    +		{"Log10E", Const, 0, ""},
    +		{"Log1p", Func, 0, "func(x float64) float64"},
    +		{"Log2", Func, 0, "func(x float64) float64"},
    +		{"Log2E", Const, 0, ""},
    +		{"Logb", Func, 0, "func(x float64) float64"},
    +		{"Max", Func, 0, "func(x float64, y float64) float64"},
    +		{"MaxFloat32", Const, 0, ""},
    +		{"MaxFloat64", Const, 0, ""},
    +		{"MaxInt", Const, 17, ""},
    +		{"MaxInt16", Const, 0, ""},
    +		{"MaxInt32", Const, 0, ""},
    +		{"MaxInt64", Const, 0, ""},
    +		{"MaxInt8", Const, 0, ""},
    +		{"MaxUint", Const, 17, ""},
    +		{"MaxUint16", Const, 0, ""},
    +		{"MaxUint32", Const, 0, ""},
    +		{"MaxUint64", Const, 0, ""},
    +		{"MaxUint8", Const, 0, ""},
    +		{"Min", Func, 0, "func(x float64, y float64) float64"},
    +		{"MinInt", Const, 17, ""},
    +		{"MinInt16", Const, 0, ""},
    +		{"MinInt32", Const, 0, ""},
    +		{"MinInt64", Const, 0, ""},
    +		{"MinInt8", Const, 0, ""},
    +		{"Mod", Func, 0, "func(x float64, y float64) float64"},
    +		{"Modf", Func, 0, "func(f float64) (int float64, frac float64)"},
    +		{"NaN", Func, 0, "func() float64"},
    +		{"Nextafter", Func, 0, "func(x float64, y float64) (r float64)"},
    +		{"Nextafter32", Func, 4, "func(x float32, y float32) (r float32)"},
    +		{"Phi", Const, 0, ""},
    +		{"Pi", Const, 0, ""},
    +		{"Pow", Func, 0, "func(x float64, y float64) float64"},
    +		{"Pow10", Func, 0, "func(n int) float64"},
    +		{"Remainder", Func, 0, "func(x float64, y float64) float64"},
    +		{"Round", Func, 10, "func(x float64) float64"},
    +		{"RoundToEven", Func, 10, "func(x float64) float64"},
    +		{"Signbit", Func, 0, "func(x float64) bool"},
    +		{"Sin", Func, 0, "func(x float64) float64"},
    +		{"Sincos", Func, 0, "func(x float64) (sin float64, cos float64)"},
    +		{"Sinh", Func, 0, "func(x float64) float64"},
    +		{"SmallestNonzeroFloat32", Const, 0, ""},
    +		{"SmallestNonzeroFloat64", Const, 0, ""},
    +		{"Sqrt", Func, 0, "func(x float64) float64"},
    +		{"Sqrt2", Const, 0, ""},
    +		{"SqrtE", Const, 0, ""},
    +		{"SqrtPhi", Const, 0, ""},
    +		{"SqrtPi", Const, 0, ""},
    +		{"Tan", Func, 0, "func(x float64) float64"},
    +		{"Tanh", Func, 0, "func(x float64) float64"},
    +		{"Trunc", Func, 0, "func(x float64) float64"},
    +		{"Y0", Func, 0, "func(x float64) float64"},
    +		{"Y1", Func, 0, "func(x float64) float64"},
    +		{"Yn", Func, 0, "func(n int, x float64) float64"},
     	},
     	"math/big": {
    -		{"(*Float).Abs", Method, 5},
    -		{"(*Float).Acc", Method, 5},
    -		{"(*Float).Add", Method, 5},
    -		{"(*Float).Append", Method, 5},
    -		{"(*Float).Cmp", Method, 5},
    -		{"(*Float).Copy", Method, 5},
    -		{"(*Float).Float32", Method, 5},
    -		{"(*Float).Float64", Method, 5},
    -		{"(*Float).Format", Method, 5},
    -		{"(*Float).GobDecode", Method, 7},
    -		{"(*Float).GobEncode", Method, 7},
    -		{"(*Float).Int", Method, 5},
    -		{"(*Float).Int64", Method, 5},
    -		{"(*Float).IsInf", Method, 5},
    -		{"(*Float).IsInt", Method, 5},
    -		{"(*Float).MantExp", Method, 5},
    -		{"(*Float).MarshalText", Method, 6},
    -		{"(*Float).MinPrec", Method, 5},
    -		{"(*Float).Mode", Method, 5},
    -		{"(*Float).Mul", Method, 5},
    -		{"(*Float).Neg", Method, 5},
    -		{"(*Float).Parse", Method, 5},
    -		{"(*Float).Prec", Method, 5},
    -		{"(*Float).Quo", Method, 5},
    -		{"(*Float).Rat", Method, 5},
    -		{"(*Float).Scan", Method, 8},
    -		{"(*Float).Set", Method, 5},
    -		{"(*Float).SetFloat64", Method, 5},
    -		{"(*Float).SetInf", Method, 5},
    -		{"(*Float).SetInt", Method, 5},
    -		{"(*Float).SetInt64", Method, 5},
    -		{"(*Float).SetMantExp", Method, 5},
    -		{"(*Float).SetMode", Method, 5},
    -		{"(*Float).SetPrec", Method, 5},
    -		{"(*Float).SetRat", Method, 5},
    -		{"(*Float).SetString", Method, 5},
    -		{"(*Float).SetUint64", Method, 5},
    -		{"(*Float).Sign", Method, 5},
    -		{"(*Float).Signbit", Method, 5},
    -		{"(*Float).Sqrt", Method, 10},
    -		{"(*Float).String", Method, 5},
    -		{"(*Float).Sub", Method, 5},
    -		{"(*Float).Text", Method, 5},
    -		{"(*Float).Uint64", Method, 5},
    -		{"(*Float).UnmarshalText", Method, 6},
    -		{"(*Int).Abs", Method, 0},
    -		{"(*Int).Add", Method, 0},
    -		{"(*Int).And", Method, 0},
    -		{"(*Int).AndNot", Method, 0},
    -		{"(*Int).Append", Method, 6},
    -		{"(*Int).Binomial", Method, 0},
    -		{"(*Int).Bit", Method, 0},
    -		{"(*Int).BitLen", Method, 0},
    -		{"(*Int).Bits", Method, 0},
    -		{"(*Int).Bytes", Method, 0},
    -		{"(*Int).Cmp", Method, 0},
    -		{"(*Int).CmpAbs", Method, 10},
    -		{"(*Int).Div", Method, 0},
    -		{"(*Int).DivMod", Method, 0},
    -		{"(*Int).Exp", Method, 0},
    -		{"(*Int).FillBytes", Method, 15},
    -		{"(*Int).Float64", Method, 21},
    -		{"(*Int).Format", Method, 0},
    -		{"(*Int).GCD", Method, 0},
    -		{"(*Int).GobDecode", Method, 0},
    -		{"(*Int).GobEncode", Method, 0},
    -		{"(*Int).Int64", Method, 0},
    -		{"(*Int).IsInt64", Method, 9},
    -		{"(*Int).IsUint64", Method, 9},
    -		{"(*Int).Lsh", Method, 0},
    -		{"(*Int).MarshalJSON", Method, 1},
    -		{"(*Int).MarshalText", Method, 3},
    -		{"(*Int).Mod", Method, 0},
    -		{"(*Int).ModInverse", Method, 0},
    -		{"(*Int).ModSqrt", Method, 5},
    -		{"(*Int).Mul", Method, 0},
    -		{"(*Int).MulRange", Method, 0},
    -		{"(*Int).Neg", Method, 0},
    -		{"(*Int).Not", Method, 0},
    -		{"(*Int).Or", Method, 0},
    -		{"(*Int).ProbablyPrime", Method, 0},
    -		{"(*Int).Quo", Method, 0},
    -		{"(*Int).QuoRem", Method, 0},
    -		{"(*Int).Rand", Method, 0},
    -		{"(*Int).Rem", Method, 0},
    -		{"(*Int).Rsh", Method, 0},
    -		{"(*Int).Scan", Method, 0},
    -		{"(*Int).Set", Method, 0},
    -		{"(*Int).SetBit", Method, 0},
    -		{"(*Int).SetBits", Method, 0},
    -		{"(*Int).SetBytes", Method, 0},
    -		{"(*Int).SetInt64", Method, 0},
    -		{"(*Int).SetString", Method, 0},
    -		{"(*Int).SetUint64", Method, 1},
    -		{"(*Int).Sign", Method, 0},
    -		{"(*Int).Sqrt", Method, 8},
    -		{"(*Int).String", Method, 0},
    -		{"(*Int).Sub", Method, 0},
    -		{"(*Int).Text", Method, 6},
    -		{"(*Int).TrailingZeroBits", Method, 13},
    -		{"(*Int).Uint64", Method, 1},
    -		{"(*Int).UnmarshalJSON", Method, 1},
    -		{"(*Int).UnmarshalText", Method, 3},
    -		{"(*Int).Xor", Method, 0},
    -		{"(*Rat).Abs", Method, 0},
    -		{"(*Rat).Add", Method, 0},
    -		{"(*Rat).Cmp", Method, 0},
    -		{"(*Rat).Denom", Method, 0},
    -		{"(*Rat).Float32", Method, 4},
    -		{"(*Rat).Float64", Method, 1},
    -		{"(*Rat).FloatPrec", Method, 22},
    -		{"(*Rat).FloatString", Method, 0},
    -		{"(*Rat).GobDecode", Method, 0},
    -		{"(*Rat).GobEncode", Method, 0},
    -		{"(*Rat).Inv", Method, 0},
    -		{"(*Rat).IsInt", Method, 0},
    -		{"(*Rat).MarshalText", Method, 3},
    -		{"(*Rat).Mul", Method, 0},
    -		{"(*Rat).Neg", Method, 0},
    -		{"(*Rat).Num", Method, 0},
    -		{"(*Rat).Quo", Method, 0},
    -		{"(*Rat).RatString", Method, 0},
    -		{"(*Rat).Scan", Method, 0},
    -		{"(*Rat).Set", Method, 0},
    -		{"(*Rat).SetFloat64", Method, 1},
    -		{"(*Rat).SetFrac", Method, 0},
    -		{"(*Rat).SetFrac64", Method, 0},
    -		{"(*Rat).SetInt", Method, 0},
    -		{"(*Rat).SetInt64", Method, 0},
    -		{"(*Rat).SetString", Method, 0},
    -		{"(*Rat).SetUint64", Method, 13},
    -		{"(*Rat).Sign", Method, 0},
    -		{"(*Rat).String", Method, 0},
    -		{"(*Rat).Sub", Method, 0},
    -		{"(*Rat).UnmarshalText", Method, 3},
    -		{"(Accuracy).String", Method, 5},
    -		{"(ErrNaN).Error", Method, 5},
    -		{"(RoundingMode).String", Method, 5},
    -		{"Above", Const, 5},
    -		{"Accuracy", Type, 5},
    -		{"AwayFromZero", Const, 5},
    -		{"Below", Const, 5},
    -		{"ErrNaN", Type, 5},
    -		{"Exact", Const, 5},
    -		{"Float", Type, 5},
    -		{"Int", Type, 0},
    -		{"Jacobi", Func, 5},
    -		{"MaxBase", Const, 0},
    -		{"MaxExp", Const, 5},
    -		{"MaxPrec", Const, 5},
    -		{"MinExp", Const, 5},
    -		{"NewFloat", Func, 5},
    -		{"NewInt", Func, 0},
    -		{"NewRat", Func, 0},
    -		{"ParseFloat", Func, 5},
    -		{"Rat", Type, 0},
    -		{"RoundingMode", Type, 5},
    -		{"ToNearestAway", Const, 5},
    -		{"ToNearestEven", Const, 5},
    -		{"ToNegativeInf", Const, 5},
    -		{"ToPositiveInf", Const, 5},
    -		{"ToZero", Const, 5},
    -		{"Word", Type, 0},
    +		{"(*Float).Abs", Method, 5, ""},
    +		{"(*Float).Acc", Method, 5, ""},
    +		{"(*Float).Add", Method, 5, ""},
    +		{"(*Float).Append", Method, 5, ""},
    +		{"(*Float).AppendText", Method, 24, ""},
    +		{"(*Float).Cmp", Method, 5, ""},
    +		{"(*Float).Copy", Method, 5, ""},
    +		{"(*Float).Float32", Method, 5, ""},
    +		{"(*Float).Float64", Method, 5, ""},
    +		{"(*Float).Format", Method, 5, ""},
    +		{"(*Float).GobDecode", Method, 7, ""},
    +		{"(*Float).GobEncode", Method, 7, ""},
    +		{"(*Float).Int", Method, 5, ""},
    +		{"(*Float).Int64", Method, 5, ""},
    +		{"(*Float).IsInf", Method, 5, ""},
    +		{"(*Float).IsInt", Method, 5, ""},
    +		{"(*Float).MantExp", Method, 5, ""},
    +		{"(*Float).MarshalText", Method, 6, ""},
    +		{"(*Float).MinPrec", Method, 5, ""},
    +		{"(*Float).Mode", Method, 5, ""},
    +		{"(*Float).Mul", Method, 5, ""},
    +		{"(*Float).Neg", Method, 5, ""},
    +		{"(*Float).Parse", Method, 5, ""},
    +		{"(*Float).Prec", Method, 5, ""},
    +		{"(*Float).Quo", Method, 5, ""},
    +		{"(*Float).Rat", Method, 5, ""},
    +		{"(*Float).Scan", Method, 8, ""},
    +		{"(*Float).Set", Method, 5, ""},
    +		{"(*Float).SetFloat64", Method, 5, ""},
    +		{"(*Float).SetInf", Method, 5, ""},
    +		{"(*Float).SetInt", Method, 5, ""},
    +		{"(*Float).SetInt64", Method, 5, ""},
    +		{"(*Float).SetMantExp", Method, 5, ""},
    +		{"(*Float).SetMode", Method, 5, ""},
    +		{"(*Float).SetPrec", Method, 5, ""},
    +		{"(*Float).SetRat", Method, 5, ""},
    +		{"(*Float).SetString", Method, 5, ""},
    +		{"(*Float).SetUint64", Method, 5, ""},
    +		{"(*Float).Sign", Method, 5, ""},
    +		{"(*Float).Signbit", Method, 5, ""},
    +		{"(*Float).Sqrt", Method, 10, ""},
    +		{"(*Float).String", Method, 5, ""},
    +		{"(*Float).Sub", Method, 5, ""},
    +		{"(*Float).Text", Method, 5, ""},
    +		{"(*Float).Uint64", Method, 5, ""},
    +		{"(*Float).UnmarshalText", Method, 6, ""},
    +		{"(*Int).Abs", Method, 0, ""},
    +		{"(*Int).Add", Method, 0, ""},
    +		{"(*Int).And", Method, 0, ""},
    +		{"(*Int).AndNot", Method, 0, ""},
    +		{"(*Int).Append", Method, 6, ""},
    +		{"(*Int).AppendText", Method, 24, ""},
    +		{"(*Int).Binomial", Method, 0, ""},
    +		{"(*Int).Bit", Method, 0, ""},
    +		{"(*Int).BitLen", Method, 0, ""},
    +		{"(*Int).Bits", Method, 0, ""},
    +		{"(*Int).Bytes", Method, 0, ""},
    +		{"(*Int).Cmp", Method, 0, ""},
    +		{"(*Int).CmpAbs", Method, 10, ""},
    +		{"(*Int).Div", Method, 0, ""},
    +		{"(*Int).DivMod", Method, 0, ""},
    +		{"(*Int).Exp", Method, 0, ""},
    +		{"(*Int).FillBytes", Method, 15, ""},
    +		{"(*Int).Float64", Method, 21, ""},
    +		{"(*Int).Format", Method, 0, ""},
    +		{"(*Int).GCD", Method, 0, ""},
    +		{"(*Int).GobDecode", Method, 0, ""},
    +		{"(*Int).GobEncode", Method, 0, ""},
    +		{"(*Int).Int64", Method, 0, ""},
    +		{"(*Int).IsInt64", Method, 9, ""},
    +		{"(*Int).IsUint64", Method, 9, ""},
    +		{"(*Int).Lsh", Method, 0, ""},
    +		{"(*Int).MarshalJSON", Method, 1, ""},
    +		{"(*Int).MarshalText", Method, 3, ""},
    +		{"(*Int).Mod", Method, 0, ""},
    +		{"(*Int).ModInverse", Method, 0, ""},
    +		{"(*Int).ModSqrt", Method, 5, ""},
    +		{"(*Int).Mul", Method, 0, ""},
    +		{"(*Int).MulRange", Method, 0, ""},
    +		{"(*Int).Neg", Method, 0, ""},
    +		{"(*Int).Not", Method, 0, ""},
    +		{"(*Int).Or", Method, 0, ""},
    +		{"(*Int).ProbablyPrime", Method, 0, ""},
    +		{"(*Int).Quo", Method, 0, ""},
    +		{"(*Int).QuoRem", Method, 0, ""},
    +		{"(*Int).Rand", Method, 0, ""},
    +		{"(*Int).Rem", Method, 0, ""},
    +		{"(*Int).Rsh", Method, 0, ""},
    +		{"(*Int).Scan", Method, 0, ""},
    +		{"(*Int).Set", Method, 0, ""},
    +		{"(*Int).SetBit", Method, 0, ""},
    +		{"(*Int).SetBits", Method, 0, ""},
    +		{"(*Int).SetBytes", Method, 0, ""},
    +		{"(*Int).SetInt64", Method, 0, ""},
    +		{"(*Int).SetString", Method, 0, ""},
    +		{"(*Int).SetUint64", Method, 1, ""},
    +		{"(*Int).Sign", Method, 0, ""},
    +		{"(*Int).Sqrt", Method, 8, ""},
    +		{"(*Int).String", Method, 0, ""},
    +		{"(*Int).Sub", Method, 0, ""},
    +		{"(*Int).Text", Method, 6, ""},
    +		{"(*Int).TrailingZeroBits", Method, 13, ""},
    +		{"(*Int).Uint64", Method, 1, ""},
    +		{"(*Int).UnmarshalJSON", Method, 1, ""},
    +		{"(*Int).UnmarshalText", Method, 3, ""},
    +		{"(*Int).Xor", Method, 0, ""},
    +		{"(*Rat).Abs", Method, 0, ""},
    +		{"(*Rat).Add", Method, 0, ""},
    +		{"(*Rat).AppendText", Method, 24, ""},
    +		{"(*Rat).Cmp", Method, 0, ""},
    +		{"(*Rat).Denom", Method, 0, ""},
    +		{"(*Rat).Float32", Method, 4, ""},
    +		{"(*Rat).Float64", Method, 1, ""},
    +		{"(*Rat).FloatPrec", Method, 22, ""},
    +		{"(*Rat).FloatString", Method, 0, ""},
    +		{"(*Rat).GobDecode", Method, 0, ""},
    +		{"(*Rat).GobEncode", Method, 0, ""},
    +		{"(*Rat).Inv", Method, 0, ""},
    +		{"(*Rat).IsInt", Method, 0, ""},
    +		{"(*Rat).MarshalText", Method, 3, ""},
    +		{"(*Rat).Mul", Method, 0, ""},
    +		{"(*Rat).Neg", Method, 0, ""},
    +		{"(*Rat).Num", Method, 0, ""},
    +		{"(*Rat).Quo", Method, 0, ""},
    +		{"(*Rat).RatString", Method, 0, ""},
    +		{"(*Rat).Scan", Method, 0, ""},
    +		{"(*Rat).Set", Method, 0, ""},
    +		{"(*Rat).SetFloat64", Method, 1, ""},
    +		{"(*Rat).SetFrac", Method, 0, ""},
    +		{"(*Rat).SetFrac64", Method, 0, ""},
    +		{"(*Rat).SetInt", Method, 0, ""},
    +		{"(*Rat).SetInt64", Method, 0, ""},
    +		{"(*Rat).SetString", Method, 0, ""},
    +		{"(*Rat).SetUint64", Method, 13, ""},
    +		{"(*Rat).Sign", Method, 0, ""},
    +		{"(*Rat).String", Method, 0, ""},
    +		{"(*Rat).Sub", Method, 0, ""},
    +		{"(*Rat).UnmarshalText", Method, 3, ""},
    +		{"(Accuracy).String", Method, 5, ""},
    +		{"(ErrNaN).Error", Method, 5, ""},
    +		{"(RoundingMode).String", Method, 5, ""},
    +		{"Above", Const, 5, ""},
    +		{"Accuracy", Type, 5, ""},
    +		{"AwayFromZero", Const, 5, ""},
    +		{"Below", Const, 5, ""},
    +		{"ErrNaN", Type, 5, ""},
    +		{"Exact", Const, 5, ""},
    +		{"Float", Type, 5, ""},
    +		{"Int", Type, 0, ""},
    +		{"Jacobi", Func, 5, "func(x *Int, y *Int) int"},
    +		{"MaxBase", Const, 0, ""},
    +		{"MaxExp", Const, 5, ""},
    +		{"MaxPrec", Const, 5, ""},
    +		{"MinExp", Const, 5, ""},
    +		{"NewFloat", Func, 5, "func(x float64) *Float"},
    +		{"NewInt", Func, 0, "func(x int64) *Int"},
    +		{"NewRat", Func, 0, "func(a int64, b int64) *Rat"},
    +		{"ParseFloat", Func, 5, "func(s string, base int, prec uint, mode RoundingMode) (f *Float, b int, err error)"},
    +		{"Rat", Type, 0, ""},
    +		{"RoundingMode", Type, 5, ""},
    +		{"ToNearestAway", Const, 5, ""},
    +		{"ToNearestEven", Const, 5, ""},
    +		{"ToNegativeInf", Const, 5, ""},
    +		{"ToPositiveInf", Const, 5, ""},
    +		{"ToZero", Const, 5, ""},
    +		{"Word", Type, 0, ""},
     	},
     	"math/bits": {
    -		{"Add", Func, 12},
    -		{"Add32", Func, 12},
    -		{"Add64", Func, 12},
    -		{"Div", Func, 12},
    -		{"Div32", Func, 12},
    -		{"Div64", Func, 12},
    -		{"LeadingZeros", Func, 9},
    -		{"LeadingZeros16", Func, 9},
    -		{"LeadingZeros32", Func, 9},
    -		{"LeadingZeros64", Func, 9},
    -		{"LeadingZeros8", Func, 9},
    -		{"Len", Func, 9},
    -		{"Len16", Func, 9},
    -		{"Len32", Func, 9},
    -		{"Len64", Func, 9},
    -		{"Len8", Func, 9},
    -		{"Mul", Func, 12},
    -		{"Mul32", Func, 12},
    -		{"Mul64", Func, 12},
    -		{"OnesCount", Func, 9},
    -		{"OnesCount16", Func, 9},
    -		{"OnesCount32", Func, 9},
    -		{"OnesCount64", Func, 9},
    -		{"OnesCount8", Func, 9},
    -		{"Rem", Func, 14},
    -		{"Rem32", Func, 14},
    -		{"Rem64", Func, 14},
    -		{"Reverse", Func, 9},
    -		{"Reverse16", Func, 9},
    -		{"Reverse32", Func, 9},
    -		{"Reverse64", Func, 9},
    -		{"Reverse8", Func, 9},
    -		{"ReverseBytes", Func, 9},
    -		{"ReverseBytes16", Func, 9},
    -		{"ReverseBytes32", Func, 9},
    -		{"ReverseBytes64", Func, 9},
    -		{"RotateLeft", Func, 9},
    -		{"RotateLeft16", Func, 9},
    -		{"RotateLeft32", Func, 9},
    -		{"RotateLeft64", Func, 9},
    -		{"RotateLeft8", Func, 9},
    -		{"Sub", Func, 12},
    -		{"Sub32", Func, 12},
    -		{"Sub64", Func, 12},
    -		{"TrailingZeros", Func, 9},
    -		{"TrailingZeros16", Func, 9},
    -		{"TrailingZeros32", Func, 9},
    -		{"TrailingZeros64", Func, 9},
    -		{"TrailingZeros8", Func, 9},
    -		{"UintSize", Const, 9},
    +		{"Add", Func, 12, "func(x uint, y uint, carry uint) (sum uint, carryOut uint)"},
    +		{"Add32", Func, 12, "func(x uint32, y uint32, carry uint32) (sum uint32, carryOut uint32)"},
    +		{"Add64", Func, 12, "func(x uint64, y uint64, carry uint64) (sum uint64, carryOut uint64)"},
    +		{"Div", Func, 12, "func(hi uint, lo uint, y uint) (quo uint, rem uint)"},
    +		{"Div32", Func, 12, "func(hi uint32, lo uint32, y uint32) (quo uint32, rem uint32)"},
    +		{"Div64", Func, 12, "func(hi uint64, lo uint64, y uint64) (quo uint64, rem uint64)"},
    +		{"LeadingZeros", Func, 9, "func(x uint) int"},
    +		{"LeadingZeros16", Func, 9, "func(x uint16) int"},
    +		{"LeadingZeros32", Func, 9, "func(x uint32) int"},
    +		{"LeadingZeros64", Func, 9, "func(x uint64) int"},
    +		{"LeadingZeros8", Func, 9, "func(x uint8) int"},
    +		{"Len", Func, 9, "func(x uint) int"},
    +		{"Len16", Func, 9, "func(x uint16) (n int)"},
    +		{"Len32", Func, 9, "func(x uint32) (n int)"},
    +		{"Len64", Func, 9, "func(x uint64) (n int)"},
    +		{"Len8", Func, 9, "func(x uint8) int"},
    +		{"Mul", Func, 12, "func(x uint, y uint) (hi uint, lo uint)"},
    +		{"Mul32", Func, 12, "func(x uint32, y uint32) (hi uint32, lo uint32)"},
    +		{"Mul64", Func, 12, "func(x uint64, y uint64) (hi uint64, lo uint64)"},
    +		{"OnesCount", Func, 9, "func(x uint) int"},
    +		{"OnesCount16", Func, 9, "func(x uint16) int"},
    +		{"OnesCount32", Func, 9, "func(x uint32) int"},
    +		{"OnesCount64", Func, 9, "func(x uint64) int"},
    +		{"OnesCount8", Func, 9, "func(x uint8) int"},
    +		{"Rem", Func, 14, "func(hi uint, lo uint, y uint) uint"},
    +		{"Rem32", Func, 14, "func(hi uint32, lo uint32, y uint32) uint32"},
    +		{"Rem64", Func, 14, "func(hi uint64, lo uint64, y uint64) uint64"},
    +		{"Reverse", Func, 9, "func(x uint) uint"},
    +		{"Reverse16", Func, 9, "func(x uint16) uint16"},
    +		{"Reverse32", Func, 9, "func(x uint32) uint32"},
    +		{"Reverse64", Func, 9, "func(x uint64) uint64"},
    +		{"Reverse8", Func, 9, "func(x uint8) uint8"},
    +		{"ReverseBytes", Func, 9, "func(x uint) uint"},
    +		{"ReverseBytes16", Func, 9, "func(x uint16) uint16"},
    +		{"ReverseBytes32", Func, 9, "func(x uint32) uint32"},
    +		{"ReverseBytes64", Func, 9, "func(x uint64) uint64"},
    +		{"RotateLeft", Func, 9, "func(x uint, k int) uint"},
    +		{"RotateLeft16", Func, 9, "func(x uint16, k int) uint16"},
    +		{"RotateLeft32", Func, 9, "func(x uint32, k int) uint32"},
    +		{"RotateLeft64", Func, 9, "func(x uint64, k int) uint64"},
    +		{"RotateLeft8", Func, 9, "func(x uint8, k int) uint8"},
    +		{"Sub", Func, 12, "func(x uint, y uint, borrow uint) (diff uint, borrowOut uint)"},
    +		{"Sub32", Func, 12, "func(x uint32, y uint32, borrow uint32) (diff uint32, borrowOut uint32)"},
    +		{"Sub64", Func, 12, "func(x uint64, y uint64, borrow uint64) (diff uint64, borrowOut uint64)"},
    +		{"TrailingZeros", Func, 9, "func(x uint) int"},
    +		{"TrailingZeros16", Func, 9, "func(x uint16) int"},
    +		{"TrailingZeros32", Func, 9, "func(x uint32) int"},
    +		{"TrailingZeros64", Func, 9, "func(x uint64) int"},
    +		{"TrailingZeros8", Func, 9, "func(x uint8) int"},
    +		{"UintSize", Const, 9, ""},
     	},
     	"math/cmplx": {
    -		{"Abs", Func, 0},
    -		{"Acos", Func, 0},
    -		{"Acosh", Func, 0},
    -		{"Asin", Func, 0},
    -		{"Asinh", Func, 0},
    -		{"Atan", Func, 0},
    -		{"Atanh", Func, 0},
    -		{"Conj", Func, 0},
    -		{"Cos", Func, 0},
    -		{"Cosh", Func, 0},
    -		{"Cot", Func, 0},
    -		{"Exp", Func, 0},
    -		{"Inf", Func, 0},
    -		{"IsInf", Func, 0},
    -		{"IsNaN", Func, 0},
    -		{"Log", Func, 0},
    -		{"Log10", Func, 0},
    -		{"NaN", Func, 0},
    -		{"Phase", Func, 0},
    -		{"Polar", Func, 0},
    -		{"Pow", Func, 0},
    -		{"Rect", Func, 0},
    -		{"Sin", Func, 0},
    -		{"Sinh", Func, 0},
    -		{"Sqrt", Func, 0},
    -		{"Tan", Func, 0},
    -		{"Tanh", Func, 0},
    +		{"Abs", Func, 0, "func(x complex128) float64"},
    +		{"Acos", Func, 0, "func(x complex128) complex128"},
    +		{"Acosh", Func, 0, "func(x complex128) complex128"},
    +		{"Asin", Func, 0, "func(x complex128) complex128"},
    +		{"Asinh", Func, 0, "func(x complex128) complex128"},
    +		{"Atan", Func, 0, "func(x complex128) complex128"},
    +		{"Atanh", Func, 0, "func(x complex128) complex128"},
    +		{"Conj", Func, 0, "func(x complex128) complex128"},
    +		{"Cos", Func, 0, "func(x complex128) complex128"},
    +		{"Cosh", Func, 0, "func(x complex128) complex128"},
    +		{"Cot", Func, 0, "func(x complex128) complex128"},
    +		{"Exp", Func, 0, "func(x complex128) complex128"},
    +		{"Inf", Func, 0, "func() complex128"},
    +		{"IsInf", Func, 0, "func(x complex128) bool"},
    +		{"IsNaN", Func, 0, "func(x complex128) bool"},
    +		{"Log", Func, 0, "func(x complex128) complex128"},
    +		{"Log10", Func, 0, "func(x complex128) complex128"},
    +		{"NaN", Func, 0, "func() complex128"},
    +		{"Phase", Func, 0, "func(x complex128) float64"},
    +		{"Polar", Func, 0, "func(x complex128) (r float64, θ float64)"},
    +		{"Pow", Func, 0, "func(x complex128, y complex128) complex128"},
    +		{"Rect", Func, 0, "func(r float64, θ float64) complex128"},
    +		{"Sin", Func, 0, "func(x complex128) complex128"},
    +		{"Sinh", Func, 0, "func(x complex128) complex128"},
    +		{"Sqrt", Func, 0, "func(x complex128) complex128"},
    +		{"Tan", Func, 0, "func(x complex128) complex128"},
    +		{"Tanh", Func, 0, "func(x complex128) complex128"},
     	},
     	"math/rand": {
    -		{"(*Rand).ExpFloat64", Method, 0},
    -		{"(*Rand).Float32", Method, 0},
    -		{"(*Rand).Float64", Method, 0},
    -		{"(*Rand).Int", Method, 0},
    -		{"(*Rand).Int31", Method, 0},
    -		{"(*Rand).Int31n", Method, 0},
    -		{"(*Rand).Int63", Method, 0},
    -		{"(*Rand).Int63n", Method, 0},
    -		{"(*Rand).Intn", Method, 0},
    -		{"(*Rand).NormFloat64", Method, 0},
    -		{"(*Rand).Perm", Method, 0},
    -		{"(*Rand).Read", Method, 6},
    -		{"(*Rand).Seed", Method, 0},
    -		{"(*Rand).Shuffle", Method, 10},
    -		{"(*Rand).Uint32", Method, 0},
    -		{"(*Rand).Uint64", Method, 8},
    -		{"(*Zipf).Uint64", Method, 0},
    -		{"ExpFloat64", Func, 0},
    -		{"Float32", Func, 0},
    -		{"Float64", Func, 0},
    -		{"Int", Func, 0},
    -		{"Int31", Func, 0},
    -		{"Int31n", Func, 0},
    -		{"Int63", Func, 0},
    -		{"Int63n", Func, 0},
    -		{"Intn", Func, 0},
    -		{"New", Func, 0},
    -		{"NewSource", Func, 0},
    -		{"NewZipf", Func, 0},
    -		{"NormFloat64", Func, 0},
    -		{"Perm", Func, 0},
    -		{"Rand", Type, 0},
    -		{"Read", Func, 6},
    -		{"Seed", Func, 0},
    -		{"Shuffle", Func, 10},
    -		{"Source", Type, 0},
    -		{"Source64", Type, 8},
    -		{"Uint32", Func, 0},
    -		{"Uint64", Func, 8},
    -		{"Zipf", Type, 0},
    +		{"(*Rand).ExpFloat64", Method, 0, ""},
    +		{"(*Rand).Float32", Method, 0, ""},
    +		{"(*Rand).Float64", Method, 0, ""},
    +		{"(*Rand).Int", Method, 0, ""},
    +		{"(*Rand).Int31", Method, 0, ""},
    +		{"(*Rand).Int31n", Method, 0, ""},
    +		{"(*Rand).Int63", Method, 0, ""},
    +		{"(*Rand).Int63n", Method, 0, ""},
    +		{"(*Rand).Intn", Method, 0, ""},
    +		{"(*Rand).NormFloat64", Method, 0, ""},
    +		{"(*Rand).Perm", Method, 0, ""},
    +		{"(*Rand).Read", Method, 6, ""},
    +		{"(*Rand).Seed", Method, 0, ""},
    +		{"(*Rand).Shuffle", Method, 10, ""},
    +		{"(*Rand).Uint32", Method, 0, ""},
    +		{"(*Rand).Uint64", Method, 8, ""},
    +		{"(*Zipf).Uint64", Method, 0, ""},
    +		{"ExpFloat64", Func, 0, "func() float64"},
    +		{"Float32", Func, 0, "func() float32"},
    +		{"Float64", Func, 0, "func() float64"},
    +		{"Int", Func, 0, "func() int"},
    +		{"Int31", Func, 0, "func() int32"},
    +		{"Int31n", Func, 0, "func(n int32) int32"},
    +		{"Int63", Func, 0, "func() int64"},
    +		{"Int63n", Func, 0, "func(n int64) int64"},
    +		{"Intn", Func, 0, "func(n int) int"},
    +		{"New", Func, 0, "func(src Source) *Rand"},
    +		{"NewSource", Func, 0, "func(seed int64) Source"},
    +		{"NewZipf", Func, 0, "func(r *Rand, s float64, v float64, imax uint64) *Zipf"},
    +		{"NormFloat64", Func, 0, "func() float64"},
    +		{"Perm", Func, 0, "func(n int) []int"},
    +		{"Rand", Type, 0, ""},
    +		{"Read", Func, 6, "func(p []byte) (n int, err error)"},
    +		{"Seed", Func, 0, "func(seed int64)"},
    +		{"Shuffle", Func, 10, "func(n int, swap func(i int, j int))"},
    +		{"Source", Type, 0, ""},
    +		{"Source64", Type, 8, ""},
    +		{"Uint32", Func, 0, "func() uint32"},
    +		{"Uint64", Func, 8, "func() uint64"},
    +		{"Zipf", Type, 0, ""},
     	},
     	"math/rand/v2": {
    -		{"(*ChaCha8).MarshalBinary", Method, 22},
    -		{"(*ChaCha8).Read", Method, 23},
    -		{"(*ChaCha8).Seed", Method, 22},
    -		{"(*ChaCha8).Uint64", Method, 22},
    -		{"(*ChaCha8).UnmarshalBinary", Method, 22},
    -		{"(*PCG).MarshalBinary", Method, 22},
    -		{"(*PCG).Seed", Method, 22},
    -		{"(*PCG).Uint64", Method, 22},
    -		{"(*PCG).UnmarshalBinary", Method, 22},
    -		{"(*Rand).ExpFloat64", Method, 22},
    -		{"(*Rand).Float32", Method, 22},
    -		{"(*Rand).Float64", Method, 22},
    -		{"(*Rand).Int", Method, 22},
    -		{"(*Rand).Int32", Method, 22},
    -		{"(*Rand).Int32N", Method, 22},
    -		{"(*Rand).Int64", Method, 22},
    -		{"(*Rand).Int64N", Method, 22},
    -		{"(*Rand).IntN", Method, 22},
    -		{"(*Rand).NormFloat64", Method, 22},
    -		{"(*Rand).Perm", Method, 22},
    -		{"(*Rand).Shuffle", Method, 22},
    -		{"(*Rand).Uint", Method, 23},
    -		{"(*Rand).Uint32", Method, 22},
    -		{"(*Rand).Uint32N", Method, 22},
    -		{"(*Rand).Uint64", Method, 22},
    -		{"(*Rand).Uint64N", Method, 22},
    -		{"(*Rand).UintN", Method, 22},
    -		{"(*Zipf).Uint64", Method, 22},
    -		{"ChaCha8", Type, 22},
    -		{"ExpFloat64", Func, 22},
    -		{"Float32", Func, 22},
    -		{"Float64", Func, 22},
    -		{"Int", Func, 22},
    -		{"Int32", Func, 22},
    -		{"Int32N", Func, 22},
    -		{"Int64", Func, 22},
    -		{"Int64N", Func, 22},
    -		{"IntN", Func, 22},
    -		{"N", Func, 22},
    -		{"New", Func, 22},
    -		{"NewChaCha8", Func, 22},
    -		{"NewPCG", Func, 22},
    -		{"NewZipf", Func, 22},
    -		{"NormFloat64", Func, 22},
    -		{"PCG", Type, 22},
    -		{"Perm", Func, 22},
    -		{"Rand", Type, 22},
    -		{"Shuffle", Func, 22},
    -		{"Source", Type, 22},
    -		{"Uint", Func, 23},
    -		{"Uint32", Func, 22},
    -		{"Uint32N", Func, 22},
    -		{"Uint64", Func, 22},
    -		{"Uint64N", Func, 22},
    -		{"UintN", Func, 22},
    -		{"Zipf", Type, 22},
    +		{"(*ChaCha8).AppendBinary", Method, 24, ""},
    +		{"(*ChaCha8).MarshalBinary", Method, 22, ""},
    +		{"(*ChaCha8).Read", Method, 23, ""},
    +		{"(*ChaCha8).Seed", Method, 22, ""},
    +		{"(*ChaCha8).Uint64", Method, 22, ""},
    +		{"(*ChaCha8).UnmarshalBinary", Method, 22, ""},
    +		{"(*PCG).AppendBinary", Method, 24, ""},
    +		{"(*PCG).MarshalBinary", Method, 22, ""},
    +		{"(*PCG).Seed", Method, 22, ""},
    +		{"(*PCG).Uint64", Method, 22, ""},
    +		{"(*PCG).UnmarshalBinary", Method, 22, ""},
    +		{"(*Rand).ExpFloat64", Method, 22, ""},
    +		{"(*Rand).Float32", Method, 22, ""},
    +		{"(*Rand).Float64", Method, 22, ""},
    +		{"(*Rand).Int", Method, 22, ""},
    +		{"(*Rand).Int32", Method, 22, ""},
    +		{"(*Rand).Int32N", Method, 22, ""},
    +		{"(*Rand).Int64", Method, 22, ""},
    +		{"(*Rand).Int64N", Method, 22, ""},
    +		{"(*Rand).IntN", Method, 22, ""},
    +		{"(*Rand).NormFloat64", Method, 22, ""},
    +		{"(*Rand).Perm", Method, 22, ""},
    +		{"(*Rand).Shuffle", Method, 22, ""},
    +		{"(*Rand).Uint", Method, 23, ""},
    +		{"(*Rand).Uint32", Method, 22, ""},
    +		{"(*Rand).Uint32N", Method, 22, ""},
    +		{"(*Rand).Uint64", Method, 22, ""},
    +		{"(*Rand).Uint64N", Method, 22, ""},
    +		{"(*Rand).UintN", Method, 22, ""},
    +		{"(*Zipf).Uint64", Method, 22, ""},
    +		{"ChaCha8", Type, 22, ""},
    +		{"ExpFloat64", Func, 22, "func() float64"},
    +		{"Float32", Func, 22, "func() float32"},
    +		{"Float64", Func, 22, "func() float64"},
    +		{"Int", Func, 22, "func() int"},
    +		{"Int32", Func, 22, "func() int32"},
    +		{"Int32N", Func, 22, "func(n int32) int32"},
    +		{"Int64", Func, 22, "func() int64"},
    +		{"Int64N", Func, 22, "func(n int64) int64"},
    +		{"IntN", Func, 22, "func(n int) int"},
    +		{"N", Func, 22, "func[Int intType](n Int) Int"},
    +		{"New", Func, 22, "func(src Source) *Rand"},
    +		{"NewChaCha8", Func, 22, "func(seed [32]byte) *ChaCha8"},
    +		{"NewPCG", Func, 22, "func(seed1 uint64, seed2 uint64) *PCG"},
    +		{"NewZipf", Func, 22, "func(r *Rand, s float64, v float64, imax uint64) *Zipf"},
    +		{"NormFloat64", Func, 22, "func() float64"},
    +		{"PCG", Type, 22, ""},
    +		{"Perm", Func, 22, "func(n int) []int"},
    +		{"Rand", Type, 22, ""},
    +		{"Shuffle", Func, 22, "func(n int, swap func(i int, j int))"},
    +		{"Source", Type, 22, ""},
    +		{"Uint", Func, 23, "func() uint"},
    +		{"Uint32", Func, 22, "func() uint32"},
    +		{"Uint32N", Func, 22, "func(n uint32) uint32"},
    +		{"Uint64", Func, 22, "func() uint64"},
    +		{"Uint64N", Func, 22, "func(n uint64) uint64"},
    +		{"UintN", Func, 22, "func(n uint) uint"},
    +		{"Zipf", Type, 22, ""},
     	},
     	"mime": {
    -		{"(*WordDecoder).Decode", Method, 5},
    -		{"(*WordDecoder).DecodeHeader", Method, 5},
    -		{"(WordEncoder).Encode", Method, 5},
    -		{"AddExtensionType", Func, 0},
    -		{"BEncoding", Const, 5},
    -		{"ErrInvalidMediaParameter", Var, 9},
    -		{"ExtensionsByType", Func, 5},
    -		{"FormatMediaType", Func, 0},
    -		{"ParseMediaType", Func, 0},
    -		{"QEncoding", Const, 5},
    -		{"TypeByExtension", Func, 0},
    -		{"WordDecoder", Type, 5},
    -		{"WordDecoder.CharsetReader", Field, 5},
    -		{"WordEncoder", Type, 5},
    +		{"(*WordDecoder).Decode", Method, 5, ""},
    +		{"(*WordDecoder).DecodeHeader", Method, 5, ""},
    +		{"(WordEncoder).Encode", Method, 5, ""},
    +		{"AddExtensionType", Func, 0, "func(ext string, typ string) error"},
    +		{"BEncoding", Const, 5, ""},
    +		{"ErrInvalidMediaParameter", Var, 9, ""},
    +		{"ExtensionsByType", Func, 5, "func(typ string) ([]string, error)"},
    +		{"FormatMediaType", Func, 0, "func(t string, param map[string]string) string"},
    +		{"ParseMediaType", Func, 0, "func(v string) (mediatype string, params map[string]string, err error)"},
    +		{"QEncoding", Const, 5, ""},
    +		{"TypeByExtension", Func, 0, "func(ext string) string"},
    +		{"WordDecoder", Type, 5, ""},
    +		{"WordDecoder.CharsetReader", Field, 5, ""},
    +		{"WordEncoder", Type, 5, ""},
     	},
     	"mime/multipart": {
    -		{"(*FileHeader).Open", Method, 0},
    -		{"(*Form).RemoveAll", Method, 0},
    -		{"(*Part).Close", Method, 0},
    -		{"(*Part).FileName", Method, 0},
    -		{"(*Part).FormName", Method, 0},
    -		{"(*Part).Read", Method, 0},
    -		{"(*Reader).NextPart", Method, 0},
    -		{"(*Reader).NextRawPart", Method, 14},
    -		{"(*Reader).ReadForm", Method, 0},
    -		{"(*Writer).Boundary", Method, 0},
    -		{"(*Writer).Close", Method, 0},
    -		{"(*Writer).CreateFormField", Method, 0},
    -		{"(*Writer).CreateFormFile", Method, 0},
    -		{"(*Writer).CreatePart", Method, 0},
    -		{"(*Writer).FormDataContentType", Method, 0},
    -		{"(*Writer).SetBoundary", Method, 1},
    -		{"(*Writer).WriteField", Method, 0},
    -		{"ErrMessageTooLarge", Var, 9},
    -		{"File", Type, 0},
    -		{"FileHeader", Type, 0},
    -		{"FileHeader.Filename", Field, 0},
    -		{"FileHeader.Header", Field, 0},
    -		{"FileHeader.Size", Field, 9},
    -		{"Form", Type, 0},
    -		{"Form.File", Field, 0},
    -		{"Form.Value", Field, 0},
    -		{"NewReader", Func, 0},
    -		{"NewWriter", Func, 0},
    -		{"Part", Type, 0},
    -		{"Part.Header", Field, 0},
    -		{"Reader", Type, 0},
    -		{"Writer", Type, 0},
    +		{"(*FileHeader).Open", Method, 0, ""},
    +		{"(*Form).RemoveAll", Method, 0, ""},
    +		{"(*Part).Close", Method, 0, ""},
    +		{"(*Part).FileName", Method, 0, ""},
    +		{"(*Part).FormName", Method, 0, ""},
    +		{"(*Part).Read", Method, 0, ""},
    +		{"(*Reader).NextPart", Method, 0, ""},
    +		{"(*Reader).NextRawPart", Method, 14, ""},
    +		{"(*Reader).ReadForm", Method, 0, ""},
    +		{"(*Writer).Boundary", Method, 0, ""},
    +		{"(*Writer).Close", Method, 0, ""},
    +		{"(*Writer).CreateFormField", Method, 0, ""},
    +		{"(*Writer).CreateFormFile", Method, 0, ""},
    +		{"(*Writer).CreatePart", Method, 0, ""},
    +		{"(*Writer).FormDataContentType", Method, 0, ""},
    +		{"(*Writer).SetBoundary", Method, 1, ""},
    +		{"(*Writer).WriteField", Method, 0, ""},
    +		{"ErrMessageTooLarge", Var, 9, ""},
    +		{"File", Type, 0, ""},
    +		{"FileContentDisposition", Func, 25, "func(fieldname string, filename string) string"},
    +		{"FileHeader", Type, 0, ""},
    +		{"FileHeader.Filename", Field, 0, ""},
    +		{"FileHeader.Header", Field, 0, ""},
    +		{"FileHeader.Size", Field, 9, ""},
    +		{"Form", Type, 0, ""},
    +		{"Form.File", Field, 0, ""},
    +		{"Form.Value", Field, 0, ""},
    +		{"NewReader", Func, 0, "func(r io.Reader, boundary string) *Reader"},
    +		{"NewWriter", Func, 0, "func(w io.Writer) *Writer"},
    +		{"Part", Type, 0, ""},
    +		{"Part.Header", Field, 0, ""},
    +		{"Reader", Type, 0, ""},
    +		{"Writer", Type, 0, ""},
     	},
     	"mime/quotedprintable": {
    -		{"(*Reader).Read", Method, 5},
    -		{"(*Writer).Close", Method, 5},
    -		{"(*Writer).Write", Method, 5},
    -		{"NewReader", Func, 5},
    -		{"NewWriter", Func, 5},
    -		{"Reader", Type, 5},
    -		{"Writer", Type, 5},
    -		{"Writer.Binary", Field, 5},
    +		{"(*Reader).Read", Method, 5, ""},
    +		{"(*Writer).Close", Method, 5, ""},
    +		{"(*Writer).Write", Method, 5, ""},
    +		{"NewReader", Func, 5, "func(r io.Reader) *Reader"},
    +		{"NewWriter", Func, 5, "func(w io.Writer) *Writer"},
    +		{"Reader", Type, 5, ""},
    +		{"Writer", Type, 5, ""},
    +		{"Writer.Binary", Field, 5, ""},
     	},
     	"net": {
    -		{"(*AddrError).Error", Method, 0},
    -		{"(*AddrError).Temporary", Method, 0},
    -		{"(*AddrError).Timeout", Method, 0},
    -		{"(*Buffers).Read", Method, 8},
    -		{"(*Buffers).WriteTo", Method, 8},
    -		{"(*DNSConfigError).Error", Method, 0},
    -		{"(*DNSConfigError).Temporary", Method, 0},
    -		{"(*DNSConfigError).Timeout", Method, 0},
    -		{"(*DNSConfigError).Unwrap", Method, 13},
    -		{"(*DNSError).Error", Method, 0},
    -		{"(*DNSError).Temporary", Method, 0},
    -		{"(*DNSError).Timeout", Method, 0},
    -		{"(*DNSError).Unwrap", Method, 23},
    -		{"(*Dialer).Dial", Method, 1},
    -		{"(*Dialer).DialContext", Method, 7},
    -		{"(*Dialer).MultipathTCP", Method, 21},
    -		{"(*Dialer).SetMultipathTCP", Method, 21},
    -		{"(*IP).UnmarshalText", Method, 2},
    -		{"(*IPAddr).Network", Method, 0},
    -		{"(*IPAddr).String", Method, 0},
    -		{"(*IPConn).Close", Method, 0},
    -		{"(*IPConn).File", Method, 0},
    -		{"(*IPConn).LocalAddr", Method, 0},
    -		{"(*IPConn).Read", Method, 0},
    -		{"(*IPConn).ReadFrom", Method, 0},
    -		{"(*IPConn).ReadFromIP", Method, 0},
    -		{"(*IPConn).ReadMsgIP", Method, 1},
    -		{"(*IPConn).RemoteAddr", Method, 0},
    -		{"(*IPConn).SetDeadline", Method, 0},
    -		{"(*IPConn).SetReadBuffer", Method, 0},
    -		{"(*IPConn).SetReadDeadline", Method, 0},
    -		{"(*IPConn).SetWriteBuffer", Method, 0},
    -		{"(*IPConn).SetWriteDeadline", Method, 0},
    -		{"(*IPConn).SyscallConn", Method, 9},
    -		{"(*IPConn).Write", Method, 0},
    -		{"(*IPConn).WriteMsgIP", Method, 1},
    -		{"(*IPConn).WriteTo", Method, 0},
    -		{"(*IPConn).WriteToIP", Method, 0},
    -		{"(*IPNet).Contains", Method, 0},
    -		{"(*IPNet).Network", Method, 0},
    -		{"(*IPNet).String", Method, 0},
    -		{"(*Interface).Addrs", Method, 0},
    -		{"(*Interface).MulticastAddrs", Method, 0},
    -		{"(*ListenConfig).Listen", Method, 11},
    -		{"(*ListenConfig).ListenPacket", Method, 11},
    -		{"(*ListenConfig).MultipathTCP", Method, 21},
    -		{"(*ListenConfig).SetMultipathTCP", Method, 21},
    -		{"(*OpError).Error", Method, 0},
    -		{"(*OpError).Temporary", Method, 0},
    -		{"(*OpError).Timeout", Method, 0},
    -		{"(*OpError).Unwrap", Method, 13},
    -		{"(*ParseError).Error", Method, 0},
    -		{"(*ParseError).Temporary", Method, 17},
    -		{"(*ParseError).Timeout", Method, 17},
    -		{"(*Resolver).LookupAddr", Method, 8},
    -		{"(*Resolver).LookupCNAME", Method, 8},
    -		{"(*Resolver).LookupHost", Method, 8},
    -		{"(*Resolver).LookupIP", Method, 15},
    -		{"(*Resolver).LookupIPAddr", Method, 8},
    -		{"(*Resolver).LookupMX", Method, 8},
    -		{"(*Resolver).LookupNS", Method, 8},
    -		{"(*Resolver).LookupNetIP", Method, 18},
    -		{"(*Resolver).LookupPort", Method, 8},
    -		{"(*Resolver).LookupSRV", Method, 8},
    -		{"(*Resolver).LookupTXT", Method, 8},
    -		{"(*TCPAddr).AddrPort", Method, 18},
    -		{"(*TCPAddr).Network", Method, 0},
    -		{"(*TCPAddr).String", Method, 0},
    -		{"(*TCPConn).Close", Method, 0},
    -		{"(*TCPConn).CloseRead", Method, 0},
    -		{"(*TCPConn).CloseWrite", Method, 0},
    -		{"(*TCPConn).File", Method, 0},
    -		{"(*TCPConn).LocalAddr", Method, 0},
    -		{"(*TCPConn).MultipathTCP", Method, 21},
    -		{"(*TCPConn).Read", Method, 0},
    -		{"(*TCPConn).ReadFrom", Method, 0},
    -		{"(*TCPConn).RemoteAddr", Method, 0},
    -		{"(*TCPConn).SetDeadline", Method, 0},
    -		{"(*TCPConn).SetKeepAlive", Method, 0},
    -		{"(*TCPConn).SetKeepAliveConfig", Method, 23},
    -		{"(*TCPConn).SetKeepAlivePeriod", Method, 2},
    -		{"(*TCPConn).SetLinger", Method, 0},
    -		{"(*TCPConn).SetNoDelay", Method, 0},
    -		{"(*TCPConn).SetReadBuffer", Method, 0},
    -		{"(*TCPConn).SetReadDeadline", Method, 0},
    -		{"(*TCPConn).SetWriteBuffer", Method, 0},
    -		{"(*TCPConn).SetWriteDeadline", Method, 0},
    -		{"(*TCPConn).SyscallConn", Method, 9},
    -		{"(*TCPConn).Write", Method, 0},
    -		{"(*TCPConn).WriteTo", Method, 22},
    -		{"(*TCPListener).Accept", Method, 0},
    -		{"(*TCPListener).AcceptTCP", Method, 0},
    -		{"(*TCPListener).Addr", Method, 0},
    -		{"(*TCPListener).Close", Method, 0},
    -		{"(*TCPListener).File", Method, 0},
    -		{"(*TCPListener).SetDeadline", Method, 0},
    -		{"(*TCPListener).SyscallConn", Method, 10},
    -		{"(*UDPAddr).AddrPort", Method, 18},
    -		{"(*UDPAddr).Network", Method, 0},
    -		{"(*UDPAddr).String", Method, 0},
    -		{"(*UDPConn).Close", Method, 0},
    -		{"(*UDPConn).File", Method, 0},
    -		{"(*UDPConn).LocalAddr", Method, 0},
    -		{"(*UDPConn).Read", Method, 0},
    -		{"(*UDPConn).ReadFrom", Method, 0},
    -		{"(*UDPConn).ReadFromUDP", Method, 0},
    -		{"(*UDPConn).ReadFromUDPAddrPort", Method, 18},
    -		{"(*UDPConn).ReadMsgUDP", Method, 1},
    -		{"(*UDPConn).ReadMsgUDPAddrPort", Method, 18},
    -		{"(*UDPConn).RemoteAddr", Method, 0},
    -		{"(*UDPConn).SetDeadline", Method, 0},
    -		{"(*UDPConn).SetReadBuffer", Method, 0},
    -		{"(*UDPConn).SetReadDeadline", Method, 0},
    -		{"(*UDPConn).SetWriteBuffer", Method, 0},
    -		{"(*UDPConn).SetWriteDeadline", Method, 0},
    -		{"(*UDPConn).SyscallConn", Method, 9},
    -		{"(*UDPConn).Write", Method, 0},
    -		{"(*UDPConn).WriteMsgUDP", Method, 1},
    -		{"(*UDPConn).WriteMsgUDPAddrPort", Method, 18},
    -		{"(*UDPConn).WriteTo", Method, 0},
    -		{"(*UDPConn).WriteToUDP", Method, 0},
    -		{"(*UDPConn).WriteToUDPAddrPort", Method, 18},
    -		{"(*UnixAddr).Network", Method, 0},
    -		{"(*UnixAddr).String", Method, 0},
    -		{"(*UnixConn).Close", Method, 0},
    -		{"(*UnixConn).CloseRead", Method, 1},
    -		{"(*UnixConn).CloseWrite", Method, 1},
    -		{"(*UnixConn).File", Method, 0},
    -		{"(*UnixConn).LocalAddr", Method, 0},
    -		{"(*UnixConn).Read", Method, 0},
    -		{"(*UnixConn).ReadFrom", Method, 0},
    -		{"(*UnixConn).ReadFromUnix", Method, 0},
    -		{"(*UnixConn).ReadMsgUnix", Method, 0},
    -		{"(*UnixConn).RemoteAddr", Method, 0},
    -		{"(*UnixConn).SetDeadline", Method, 0},
    -		{"(*UnixConn).SetReadBuffer", Method, 0},
    -		{"(*UnixConn).SetReadDeadline", Method, 0},
    -		{"(*UnixConn).SetWriteBuffer", Method, 0},
    -		{"(*UnixConn).SetWriteDeadline", Method, 0},
    -		{"(*UnixConn).SyscallConn", Method, 9},
    -		{"(*UnixConn).Write", Method, 0},
    -		{"(*UnixConn).WriteMsgUnix", Method, 0},
    -		{"(*UnixConn).WriteTo", Method, 0},
    -		{"(*UnixConn).WriteToUnix", Method, 0},
    -		{"(*UnixListener).Accept", Method, 0},
    -		{"(*UnixListener).AcceptUnix", Method, 0},
    -		{"(*UnixListener).Addr", Method, 0},
    -		{"(*UnixListener).Close", Method, 0},
    -		{"(*UnixListener).File", Method, 0},
    -		{"(*UnixListener).SetDeadline", Method, 0},
    -		{"(*UnixListener).SetUnlinkOnClose", Method, 8},
    -		{"(*UnixListener).SyscallConn", Method, 10},
    -		{"(Flags).String", Method, 0},
    -		{"(HardwareAddr).String", Method, 0},
    -		{"(IP).DefaultMask", Method, 0},
    -		{"(IP).Equal", Method, 0},
    -		{"(IP).IsGlobalUnicast", Method, 0},
    -		{"(IP).IsInterfaceLocalMulticast", Method, 0},
    -		{"(IP).IsLinkLocalMulticast", Method, 0},
    -		{"(IP).IsLinkLocalUnicast", Method, 0},
    -		{"(IP).IsLoopback", Method, 0},
    -		{"(IP).IsMulticast", Method, 0},
    -		{"(IP).IsPrivate", Method, 17},
    -		{"(IP).IsUnspecified", Method, 0},
    -		{"(IP).MarshalText", Method, 2},
    -		{"(IP).Mask", Method, 0},
    -		{"(IP).String", Method, 0},
    -		{"(IP).To16", Method, 0},
    -		{"(IP).To4", Method, 0},
    -		{"(IPMask).Size", Method, 0},
    -		{"(IPMask).String", Method, 0},
    -		{"(InvalidAddrError).Error", Method, 0},
    -		{"(InvalidAddrError).Temporary", Method, 0},
    -		{"(InvalidAddrError).Timeout", Method, 0},
    -		{"(UnknownNetworkError).Error", Method, 0},
    -		{"(UnknownNetworkError).Temporary", Method, 0},
    -		{"(UnknownNetworkError).Timeout", Method, 0},
    -		{"Addr", Type, 0},
    -		{"AddrError", Type, 0},
    -		{"AddrError.Addr", Field, 0},
    -		{"AddrError.Err", Field, 0},
    -		{"Buffers", Type, 8},
    -		{"CIDRMask", Func, 0},
    -		{"Conn", Type, 0},
    -		{"DNSConfigError", Type, 0},
    -		{"DNSConfigError.Err", Field, 0},
    -		{"DNSError", Type, 0},
    -		{"DNSError.Err", Field, 0},
    -		{"DNSError.IsNotFound", Field, 13},
    -		{"DNSError.IsTemporary", Field, 6},
    -		{"DNSError.IsTimeout", Field, 0},
    -		{"DNSError.Name", Field, 0},
    -		{"DNSError.Server", Field, 0},
    -		{"DNSError.UnwrapErr", Field, 23},
    -		{"DefaultResolver", Var, 8},
    -		{"Dial", Func, 0},
    -		{"DialIP", Func, 0},
    -		{"DialTCP", Func, 0},
    -		{"DialTimeout", Func, 0},
    -		{"DialUDP", Func, 0},
    -		{"DialUnix", Func, 0},
    -		{"Dialer", Type, 1},
    -		{"Dialer.Cancel", Field, 6},
    -		{"Dialer.Control", Field, 11},
    -		{"Dialer.ControlContext", Field, 20},
    -		{"Dialer.Deadline", Field, 1},
    -		{"Dialer.DualStack", Field, 2},
    -		{"Dialer.FallbackDelay", Field, 5},
    -		{"Dialer.KeepAlive", Field, 3},
    -		{"Dialer.KeepAliveConfig", Field, 23},
    -		{"Dialer.LocalAddr", Field, 1},
    -		{"Dialer.Resolver", Field, 8},
    -		{"Dialer.Timeout", Field, 1},
    -		{"ErrClosed", Var, 16},
    -		{"ErrWriteToConnected", Var, 0},
    -		{"Error", Type, 0},
    -		{"FileConn", Func, 0},
    -		{"FileListener", Func, 0},
    -		{"FilePacketConn", Func, 0},
    -		{"FlagBroadcast", Const, 0},
    -		{"FlagLoopback", Const, 0},
    -		{"FlagMulticast", Const, 0},
    -		{"FlagPointToPoint", Const, 0},
    -		{"FlagRunning", Const, 20},
    -		{"FlagUp", Const, 0},
    -		{"Flags", Type, 0},
    -		{"HardwareAddr", Type, 0},
    -		{"IP", Type, 0},
    -		{"IPAddr", Type, 0},
    -		{"IPAddr.IP", Field, 0},
    -		{"IPAddr.Zone", Field, 1},
    -		{"IPConn", Type, 0},
    -		{"IPMask", Type, 0},
    -		{"IPNet", Type, 0},
    -		{"IPNet.IP", Field, 0},
    -		{"IPNet.Mask", Field, 0},
    -		{"IPv4", Func, 0},
    -		{"IPv4Mask", Func, 0},
    -		{"IPv4allrouter", Var, 0},
    -		{"IPv4allsys", Var, 0},
    -		{"IPv4bcast", Var, 0},
    -		{"IPv4len", Const, 0},
    -		{"IPv4zero", Var, 0},
    -		{"IPv6interfacelocalallnodes", Var, 0},
    -		{"IPv6len", Const, 0},
    -		{"IPv6linklocalallnodes", Var, 0},
    -		{"IPv6linklocalallrouters", Var, 0},
    -		{"IPv6loopback", Var, 0},
    -		{"IPv6unspecified", Var, 0},
    -		{"IPv6zero", Var, 0},
    -		{"Interface", Type, 0},
    -		{"Interface.Flags", Field, 0},
    -		{"Interface.HardwareAddr", Field, 0},
    -		{"Interface.Index", Field, 0},
    -		{"Interface.MTU", Field, 0},
    -		{"Interface.Name", Field, 0},
    -		{"InterfaceAddrs", Func, 0},
    -		{"InterfaceByIndex", Func, 0},
    -		{"InterfaceByName", Func, 0},
    -		{"Interfaces", Func, 0},
    -		{"InvalidAddrError", Type, 0},
    -		{"JoinHostPort", Func, 0},
    -		{"KeepAliveConfig", Type, 23},
    -		{"KeepAliveConfig.Count", Field, 23},
    -		{"KeepAliveConfig.Enable", Field, 23},
    -		{"KeepAliveConfig.Idle", Field, 23},
    -		{"KeepAliveConfig.Interval", Field, 23},
    -		{"Listen", Func, 0},
    -		{"ListenConfig", Type, 11},
    -		{"ListenConfig.Control", Field, 11},
    -		{"ListenConfig.KeepAlive", Field, 13},
    -		{"ListenConfig.KeepAliveConfig", Field, 23},
    -		{"ListenIP", Func, 0},
    -		{"ListenMulticastUDP", Func, 0},
    -		{"ListenPacket", Func, 0},
    -		{"ListenTCP", Func, 0},
    -		{"ListenUDP", Func, 0},
    -		{"ListenUnix", Func, 0},
    -		{"ListenUnixgram", Func, 0},
    -		{"Listener", Type, 0},
    -		{"LookupAddr", Func, 0},
    -		{"LookupCNAME", Func, 0},
    -		{"LookupHost", Func, 0},
    -		{"LookupIP", Func, 0},
    -		{"LookupMX", Func, 0},
    -		{"LookupNS", Func, 1},
    -		{"LookupPort", Func, 0},
    -		{"LookupSRV", Func, 0},
    -		{"LookupTXT", Func, 0},
    -		{"MX", Type, 0},
    -		{"MX.Host", Field, 0},
    -		{"MX.Pref", Field, 0},
    -		{"NS", Type, 1},
    -		{"NS.Host", Field, 1},
    -		{"OpError", Type, 0},
    -		{"OpError.Addr", Field, 0},
    -		{"OpError.Err", Field, 0},
    -		{"OpError.Net", Field, 0},
    -		{"OpError.Op", Field, 0},
    -		{"OpError.Source", Field, 5},
    -		{"PacketConn", Type, 0},
    -		{"ParseCIDR", Func, 0},
    -		{"ParseError", Type, 0},
    -		{"ParseError.Text", Field, 0},
    -		{"ParseError.Type", Field, 0},
    -		{"ParseIP", Func, 0},
    -		{"ParseMAC", Func, 0},
    -		{"Pipe", Func, 0},
    -		{"ResolveIPAddr", Func, 0},
    -		{"ResolveTCPAddr", Func, 0},
    -		{"ResolveUDPAddr", Func, 0},
    -		{"ResolveUnixAddr", Func, 0},
    -		{"Resolver", Type, 8},
    -		{"Resolver.Dial", Field, 9},
    -		{"Resolver.PreferGo", Field, 8},
    -		{"Resolver.StrictErrors", Field, 9},
    -		{"SRV", Type, 0},
    -		{"SRV.Port", Field, 0},
    -		{"SRV.Priority", Field, 0},
    -		{"SRV.Target", Field, 0},
    -		{"SRV.Weight", Field, 0},
    -		{"SplitHostPort", Func, 0},
    -		{"TCPAddr", Type, 0},
    -		{"TCPAddr.IP", Field, 0},
    -		{"TCPAddr.Port", Field, 0},
    -		{"TCPAddr.Zone", Field, 1},
    -		{"TCPAddrFromAddrPort", Func, 18},
    -		{"TCPConn", Type, 0},
    -		{"TCPListener", Type, 0},
    -		{"UDPAddr", Type, 0},
    -		{"UDPAddr.IP", Field, 0},
    -		{"UDPAddr.Port", Field, 0},
    -		{"UDPAddr.Zone", Field, 1},
    -		{"UDPAddrFromAddrPort", Func, 18},
    -		{"UDPConn", Type, 0},
    -		{"UnixAddr", Type, 0},
    -		{"UnixAddr.Name", Field, 0},
    -		{"UnixAddr.Net", Field, 0},
    -		{"UnixConn", Type, 0},
    -		{"UnixListener", Type, 0},
    -		{"UnknownNetworkError", Type, 0},
    +		{"(*AddrError).Error", Method, 0, ""},
    +		{"(*AddrError).Temporary", Method, 0, ""},
    +		{"(*AddrError).Timeout", Method, 0, ""},
    +		{"(*Buffers).Read", Method, 8, ""},
    +		{"(*Buffers).WriteTo", Method, 8, ""},
    +		{"(*DNSConfigError).Error", Method, 0, ""},
    +		{"(*DNSConfigError).Temporary", Method, 0, ""},
    +		{"(*DNSConfigError).Timeout", Method, 0, ""},
    +		{"(*DNSConfigError).Unwrap", Method, 13, ""},
    +		{"(*DNSError).Error", Method, 0, ""},
    +		{"(*DNSError).Temporary", Method, 0, ""},
    +		{"(*DNSError).Timeout", Method, 0, ""},
    +		{"(*DNSError).Unwrap", Method, 23, ""},
    +		{"(*Dialer).Dial", Method, 1, ""},
    +		{"(*Dialer).DialContext", Method, 7, ""},
    +		{"(*Dialer).MultipathTCP", Method, 21, ""},
    +		{"(*Dialer).SetMultipathTCP", Method, 21, ""},
    +		{"(*IP).UnmarshalText", Method, 2, ""},
    +		{"(*IPAddr).Network", Method, 0, ""},
    +		{"(*IPAddr).String", Method, 0, ""},
    +		{"(*IPConn).Close", Method, 0, ""},
    +		{"(*IPConn).File", Method, 0, ""},
    +		{"(*IPConn).LocalAddr", Method, 0, ""},
    +		{"(*IPConn).Read", Method, 0, ""},
    +		{"(*IPConn).ReadFrom", Method, 0, ""},
    +		{"(*IPConn).ReadFromIP", Method, 0, ""},
    +		{"(*IPConn).ReadMsgIP", Method, 1, ""},
    +		{"(*IPConn).RemoteAddr", Method, 0, ""},
    +		{"(*IPConn).SetDeadline", Method, 0, ""},
    +		{"(*IPConn).SetReadBuffer", Method, 0, ""},
    +		{"(*IPConn).SetReadDeadline", Method, 0, ""},
    +		{"(*IPConn).SetWriteBuffer", Method, 0, ""},
    +		{"(*IPConn).SetWriteDeadline", Method, 0, ""},
    +		{"(*IPConn).SyscallConn", Method, 9, ""},
    +		{"(*IPConn).Write", Method, 0, ""},
    +		{"(*IPConn).WriteMsgIP", Method, 1, ""},
    +		{"(*IPConn).WriteTo", Method, 0, ""},
    +		{"(*IPConn).WriteToIP", Method, 0, ""},
    +		{"(*IPNet).Contains", Method, 0, ""},
    +		{"(*IPNet).Network", Method, 0, ""},
    +		{"(*IPNet).String", Method, 0, ""},
    +		{"(*Interface).Addrs", Method, 0, ""},
    +		{"(*Interface).MulticastAddrs", Method, 0, ""},
    +		{"(*ListenConfig).Listen", Method, 11, ""},
    +		{"(*ListenConfig).ListenPacket", Method, 11, ""},
    +		{"(*ListenConfig).MultipathTCP", Method, 21, ""},
    +		{"(*ListenConfig).SetMultipathTCP", Method, 21, ""},
    +		{"(*OpError).Error", Method, 0, ""},
    +		{"(*OpError).Temporary", Method, 0, ""},
    +		{"(*OpError).Timeout", Method, 0, ""},
    +		{"(*OpError).Unwrap", Method, 13, ""},
    +		{"(*ParseError).Error", Method, 0, ""},
    +		{"(*ParseError).Temporary", Method, 17, ""},
    +		{"(*ParseError).Timeout", Method, 17, ""},
    +		{"(*Resolver).LookupAddr", Method, 8, ""},
    +		{"(*Resolver).LookupCNAME", Method, 8, ""},
    +		{"(*Resolver).LookupHost", Method, 8, ""},
    +		{"(*Resolver).LookupIP", Method, 15, ""},
    +		{"(*Resolver).LookupIPAddr", Method, 8, ""},
    +		{"(*Resolver).LookupMX", Method, 8, ""},
    +		{"(*Resolver).LookupNS", Method, 8, ""},
    +		{"(*Resolver).LookupNetIP", Method, 18, ""},
    +		{"(*Resolver).LookupPort", Method, 8, ""},
    +		{"(*Resolver).LookupSRV", Method, 8, ""},
    +		{"(*Resolver).LookupTXT", Method, 8, ""},
    +		{"(*TCPAddr).AddrPort", Method, 18, ""},
    +		{"(*TCPAddr).Network", Method, 0, ""},
    +		{"(*TCPAddr).String", Method, 0, ""},
    +		{"(*TCPConn).Close", Method, 0, ""},
    +		{"(*TCPConn).CloseRead", Method, 0, ""},
    +		{"(*TCPConn).CloseWrite", Method, 0, ""},
    +		{"(*TCPConn).File", Method, 0, ""},
    +		{"(*TCPConn).LocalAddr", Method, 0, ""},
    +		{"(*TCPConn).MultipathTCP", Method, 21, ""},
    +		{"(*TCPConn).Read", Method, 0, ""},
    +		{"(*TCPConn).ReadFrom", Method, 0, ""},
    +		{"(*TCPConn).RemoteAddr", Method, 0, ""},
    +		{"(*TCPConn).SetDeadline", Method, 0, ""},
    +		{"(*TCPConn).SetKeepAlive", Method, 0, ""},
    +		{"(*TCPConn).SetKeepAliveConfig", Method, 23, ""},
    +		{"(*TCPConn).SetKeepAlivePeriod", Method, 2, ""},
    +		{"(*TCPConn).SetLinger", Method, 0, ""},
    +		{"(*TCPConn).SetNoDelay", Method, 0, ""},
    +		{"(*TCPConn).SetReadBuffer", Method, 0, ""},
    +		{"(*TCPConn).SetReadDeadline", Method, 0, ""},
    +		{"(*TCPConn).SetWriteBuffer", Method, 0, ""},
    +		{"(*TCPConn).SetWriteDeadline", Method, 0, ""},
    +		{"(*TCPConn).SyscallConn", Method, 9, ""},
    +		{"(*TCPConn).Write", Method, 0, ""},
    +		{"(*TCPConn).WriteTo", Method, 22, ""},
    +		{"(*TCPListener).Accept", Method, 0, ""},
    +		{"(*TCPListener).AcceptTCP", Method, 0, ""},
    +		{"(*TCPListener).Addr", Method, 0, ""},
    +		{"(*TCPListener).Close", Method, 0, ""},
    +		{"(*TCPListener).File", Method, 0, ""},
    +		{"(*TCPListener).SetDeadline", Method, 0, ""},
    +		{"(*TCPListener).SyscallConn", Method, 10, ""},
    +		{"(*UDPAddr).AddrPort", Method, 18, ""},
    +		{"(*UDPAddr).Network", Method, 0, ""},
    +		{"(*UDPAddr).String", Method, 0, ""},
    +		{"(*UDPConn).Close", Method, 0, ""},
    +		{"(*UDPConn).File", Method, 0, ""},
    +		{"(*UDPConn).LocalAddr", Method, 0, ""},
    +		{"(*UDPConn).Read", Method, 0, ""},
    +		{"(*UDPConn).ReadFrom", Method, 0, ""},
    +		{"(*UDPConn).ReadFromUDP", Method, 0, ""},
    +		{"(*UDPConn).ReadFromUDPAddrPort", Method, 18, ""},
    +		{"(*UDPConn).ReadMsgUDP", Method, 1, ""},
    +		{"(*UDPConn).ReadMsgUDPAddrPort", Method, 18, ""},
    +		{"(*UDPConn).RemoteAddr", Method, 0, ""},
    +		{"(*UDPConn).SetDeadline", Method, 0, ""},
    +		{"(*UDPConn).SetReadBuffer", Method, 0, ""},
    +		{"(*UDPConn).SetReadDeadline", Method, 0, ""},
    +		{"(*UDPConn).SetWriteBuffer", Method, 0, ""},
    +		{"(*UDPConn).SetWriteDeadline", Method, 0, ""},
    +		{"(*UDPConn).SyscallConn", Method, 9, ""},
    +		{"(*UDPConn).Write", Method, 0, ""},
    +		{"(*UDPConn).WriteMsgUDP", Method, 1, ""},
    +		{"(*UDPConn).WriteMsgUDPAddrPort", Method, 18, ""},
    +		{"(*UDPConn).WriteTo", Method, 0, ""},
    +		{"(*UDPConn).WriteToUDP", Method, 0, ""},
    +		{"(*UDPConn).WriteToUDPAddrPort", Method, 18, ""},
    +		{"(*UnixAddr).Network", Method, 0, ""},
    +		{"(*UnixAddr).String", Method, 0, ""},
    +		{"(*UnixConn).Close", Method, 0, ""},
    +		{"(*UnixConn).CloseRead", Method, 1, ""},
    +		{"(*UnixConn).CloseWrite", Method, 1, ""},
    +		{"(*UnixConn).File", Method, 0, ""},
    +		{"(*UnixConn).LocalAddr", Method, 0, ""},
    +		{"(*UnixConn).Read", Method, 0, ""},
    +		{"(*UnixConn).ReadFrom", Method, 0, ""},
    +		{"(*UnixConn).ReadFromUnix", Method, 0, ""},
    +		{"(*UnixConn).ReadMsgUnix", Method, 0, ""},
    +		{"(*UnixConn).RemoteAddr", Method, 0, ""},
    +		{"(*UnixConn).SetDeadline", Method, 0, ""},
    +		{"(*UnixConn).SetReadBuffer", Method, 0, ""},
    +		{"(*UnixConn).SetReadDeadline", Method, 0, ""},
    +		{"(*UnixConn).SetWriteBuffer", Method, 0, ""},
    +		{"(*UnixConn).SetWriteDeadline", Method, 0, ""},
    +		{"(*UnixConn).SyscallConn", Method, 9, ""},
    +		{"(*UnixConn).Write", Method, 0, ""},
    +		{"(*UnixConn).WriteMsgUnix", Method, 0, ""},
    +		{"(*UnixConn).WriteTo", Method, 0, ""},
    +		{"(*UnixConn).WriteToUnix", Method, 0, ""},
    +		{"(*UnixListener).Accept", Method, 0, ""},
    +		{"(*UnixListener).AcceptUnix", Method, 0, ""},
    +		{"(*UnixListener).Addr", Method, 0, ""},
    +		{"(*UnixListener).Close", Method, 0, ""},
    +		{"(*UnixListener).File", Method, 0, ""},
    +		{"(*UnixListener).SetDeadline", Method, 0, ""},
    +		{"(*UnixListener).SetUnlinkOnClose", Method, 8, ""},
    +		{"(*UnixListener).SyscallConn", Method, 10, ""},
    +		{"(Flags).String", Method, 0, ""},
    +		{"(HardwareAddr).String", Method, 0, ""},
    +		{"(IP).AppendText", Method, 24, ""},
    +		{"(IP).DefaultMask", Method, 0, ""},
    +		{"(IP).Equal", Method, 0, ""},
    +		{"(IP).IsGlobalUnicast", Method, 0, ""},
    +		{"(IP).IsInterfaceLocalMulticast", Method, 0, ""},
    +		{"(IP).IsLinkLocalMulticast", Method, 0, ""},
    +		{"(IP).IsLinkLocalUnicast", Method, 0, ""},
    +		{"(IP).IsLoopback", Method, 0, ""},
    +		{"(IP).IsMulticast", Method, 0, ""},
    +		{"(IP).IsPrivate", Method, 17, ""},
    +		{"(IP).IsUnspecified", Method, 0, ""},
    +		{"(IP).MarshalText", Method, 2, ""},
    +		{"(IP).Mask", Method, 0, ""},
    +		{"(IP).String", Method, 0, ""},
    +		{"(IP).To16", Method, 0, ""},
    +		{"(IP).To4", Method, 0, ""},
    +		{"(IPMask).Size", Method, 0, ""},
    +		{"(IPMask).String", Method, 0, ""},
    +		{"(InvalidAddrError).Error", Method, 0, ""},
    +		{"(InvalidAddrError).Temporary", Method, 0, ""},
    +		{"(InvalidAddrError).Timeout", Method, 0, ""},
    +		{"(UnknownNetworkError).Error", Method, 0, ""},
    +		{"(UnknownNetworkError).Temporary", Method, 0, ""},
    +		{"(UnknownNetworkError).Timeout", Method, 0, ""},
    +		{"Addr", Type, 0, ""},
    +		{"AddrError", Type, 0, ""},
    +		{"AddrError.Addr", Field, 0, ""},
    +		{"AddrError.Err", Field, 0, ""},
    +		{"Buffers", Type, 8, ""},
    +		{"CIDRMask", Func, 0, "func(ones int, bits int) IPMask"},
    +		{"Conn", Type, 0, ""},
    +		{"DNSConfigError", Type, 0, ""},
    +		{"DNSConfigError.Err", Field, 0, ""},
    +		{"DNSError", Type, 0, ""},
    +		{"DNSError.Err", Field, 0, ""},
    +		{"DNSError.IsNotFound", Field, 13, ""},
    +		{"DNSError.IsTemporary", Field, 6, ""},
    +		{"DNSError.IsTimeout", Field, 0, ""},
    +		{"DNSError.Name", Field, 0, ""},
    +		{"DNSError.Server", Field, 0, ""},
    +		{"DNSError.UnwrapErr", Field, 23, ""},
    +		{"DefaultResolver", Var, 8, ""},
    +		{"Dial", Func, 0, "func(network string, address string) (Conn, error)"},
    +		{"DialIP", Func, 0, "func(network string, laddr *IPAddr, raddr *IPAddr) (*IPConn, error)"},
    +		{"DialTCP", Func, 0, "func(network string, laddr *TCPAddr, raddr *TCPAddr) (*TCPConn, error)"},
    +		{"DialTimeout", Func, 0, "func(network string, address string, timeout time.Duration) (Conn, error)"},
    +		{"DialUDP", Func, 0, "func(network string, laddr *UDPAddr, raddr *UDPAddr) (*UDPConn, error)"},
    +		{"DialUnix", Func, 0, "func(network string, laddr *UnixAddr, raddr *UnixAddr) (*UnixConn, error)"},
    +		{"Dialer", Type, 1, ""},
    +		{"Dialer.Cancel", Field, 6, ""},
    +		{"Dialer.Control", Field, 11, ""},
    +		{"Dialer.ControlContext", Field, 20, ""},
    +		{"Dialer.Deadline", Field, 1, ""},
    +		{"Dialer.DualStack", Field, 2, ""},
    +		{"Dialer.FallbackDelay", Field, 5, ""},
    +		{"Dialer.KeepAlive", Field, 3, ""},
    +		{"Dialer.KeepAliveConfig", Field, 23, ""},
    +		{"Dialer.LocalAddr", Field, 1, ""},
    +		{"Dialer.Resolver", Field, 8, ""},
    +		{"Dialer.Timeout", Field, 1, ""},
    +		{"ErrClosed", Var, 16, ""},
    +		{"ErrWriteToConnected", Var, 0, ""},
    +		{"Error", Type, 0, ""},
    +		{"FileConn", Func, 0, "func(f *os.File) (c Conn, err error)"},
    +		{"FileListener", Func, 0, "func(f *os.File) (ln Listener, err error)"},
    +		{"FilePacketConn", Func, 0, "func(f *os.File) (c PacketConn, err error)"},
    +		{"FlagBroadcast", Const, 0, ""},
    +		{"FlagLoopback", Const, 0, ""},
    +		{"FlagMulticast", Const, 0, ""},
    +		{"FlagPointToPoint", Const, 0, ""},
    +		{"FlagRunning", Const, 20, ""},
    +		{"FlagUp", Const, 0, ""},
    +		{"Flags", Type, 0, ""},
    +		{"HardwareAddr", Type, 0, ""},
    +		{"IP", Type, 0, ""},
    +		{"IPAddr", Type, 0, ""},
    +		{"IPAddr.IP", Field, 0, ""},
    +		{"IPAddr.Zone", Field, 1, ""},
    +		{"IPConn", Type, 0, ""},
    +		{"IPMask", Type, 0, ""},
    +		{"IPNet", Type, 0, ""},
    +		{"IPNet.IP", Field, 0, ""},
    +		{"IPNet.Mask", Field, 0, ""},
    +		{"IPv4", Func, 0, "func(a byte, b byte, c byte, d byte) IP"},
    +		{"IPv4Mask", Func, 0, "func(a byte, b byte, c byte, d byte) IPMask"},
    +		{"IPv4allrouter", Var, 0, ""},
    +		{"IPv4allsys", Var, 0, ""},
    +		{"IPv4bcast", Var, 0, ""},
    +		{"IPv4len", Const, 0, ""},
    +		{"IPv4zero", Var, 0, ""},
    +		{"IPv6interfacelocalallnodes", Var, 0, ""},
    +		{"IPv6len", Const, 0, ""},
    +		{"IPv6linklocalallnodes", Var, 0, ""},
    +		{"IPv6linklocalallrouters", Var, 0, ""},
    +		{"IPv6loopback", Var, 0, ""},
    +		{"IPv6unspecified", Var, 0, ""},
    +		{"IPv6zero", Var, 0, ""},
    +		{"Interface", Type, 0, ""},
    +		{"Interface.Flags", Field, 0, ""},
    +		{"Interface.HardwareAddr", Field, 0, ""},
    +		{"Interface.Index", Field, 0, ""},
    +		{"Interface.MTU", Field, 0, ""},
    +		{"Interface.Name", Field, 0, ""},
    +		{"InterfaceAddrs", Func, 0, "func() ([]Addr, error)"},
    +		{"InterfaceByIndex", Func, 0, "func(index int) (*Interface, error)"},
    +		{"InterfaceByName", Func, 0, "func(name string) (*Interface, error)"},
    +		{"Interfaces", Func, 0, "func() ([]Interface, error)"},
    +		{"InvalidAddrError", Type, 0, ""},
    +		{"JoinHostPort", Func, 0, "func(host string, port string) string"},
    +		{"KeepAliveConfig", Type, 23, ""},
    +		{"KeepAliveConfig.Count", Field, 23, ""},
    +		{"KeepAliveConfig.Enable", Field, 23, ""},
    +		{"KeepAliveConfig.Idle", Field, 23, ""},
    +		{"KeepAliveConfig.Interval", Field, 23, ""},
    +		{"Listen", Func, 0, "func(network string, address string) (Listener, error)"},
    +		{"ListenConfig", Type, 11, ""},
    +		{"ListenConfig.Control", Field, 11, ""},
    +		{"ListenConfig.KeepAlive", Field, 13, ""},
    +		{"ListenConfig.KeepAliveConfig", Field, 23, ""},
    +		{"ListenIP", Func, 0, "func(network string, laddr *IPAddr) (*IPConn, error)"},
    +		{"ListenMulticastUDP", Func, 0, "func(network string, ifi *Interface, gaddr *UDPAddr) (*UDPConn, error)"},
    +		{"ListenPacket", Func, 0, "func(network string, address string) (PacketConn, error)"},
    +		{"ListenTCP", Func, 0, "func(network string, laddr *TCPAddr) (*TCPListener, error)"},
    +		{"ListenUDP", Func, 0, "func(network string, laddr *UDPAddr) (*UDPConn, error)"},
    +		{"ListenUnix", Func, 0, "func(network string, laddr *UnixAddr) (*UnixListener, error)"},
    +		{"ListenUnixgram", Func, 0, "func(network string, laddr *UnixAddr) (*UnixConn, error)"},
    +		{"Listener", Type, 0, ""},
    +		{"LookupAddr", Func, 0, "func(addr string) (names []string, err error)"},
    +		{"LookupCNAME", Func, 0, "func(host string) (cname string, err error)"},
    +		{"LookupHost", Func, 0, "func(host string) (addrs []string, err error)"},
    +		{"LookupIP", Func, 0, "func(host string) ([]IP, error)"},
    +		{"LookupMX", Func, 0, "func(name string) ([]*MX, error)"},
    +		{"LookupNS", Func, 1, "func(name string) ([]*NS, error)"},
    +		{"LookupPort", Func, 0, "func(network string, service string) (port int, err error)"},
    +		{"LookupSRV", Func, 0, "func(service string, proto string, name string) (cname string, addrs []*SRV, err error)"},
    +		{"LookupTXT", Func, 0, "func(name string) ([]string, error)"},
    +		{"MX", Type, 0, ""},
    +		{"MX.Host", Field, 0, ""},
    +		{"MX.Pref", Field, 0, ""},
    +		{"NS", Type, 1, ""},
    +		{"NS.Host", Field, 1, ""},
    +		{"OpError", Type, 0, ""},
    +		{"OpError.Addr", Field, 0, ""},
    +		{"OpError.Err", Field, 0, ""},
    +		{"OpError.Net", Field, 0, ""},
    +		{"OpError.Op", Field, 0, ""},
    +		{"OpError.Source", Field, 5, ""},
    +		{"PacketConn", Type, 0, ""},
    +		{"ParseCIDR", Func, 0, "func(s string) (IP, *IPNet, error)"},
    +		{"ParseError", Type, 0, ""},
    +		{"ParseError.Text", Field, 0, ""},
    +		{"ParseError.Type", Field, 0, ""},
    +		{"ParseIP", Func, 0, "func(s string) IP"},
    +		{"ParseMAC", Func, 0, "func(s string) (hw HardwareAddr, err error)"},
    +		{"Pipe", Func, 0, "func() (Conn, Conn)"},
    +		{"ResolveIPAddr", Func, 0, "func(network string, address string) (*IPAddr, error)"},
    +		{"ResolveTCPAddr", Func, 0, "func(network string, address string) (*TCPAddr, error)"},
    +		{"ResolveUDPAddr", Func, 0, "func(network string, address string) (*UDPAddr, error)"},
    +		{"ResolveUnixAddr", Func, 0, "func(network string, address string) (*UnixAddr, error)"},
    +		{"Resolver", Type, 8, ""},
    +		{"Resolver.Dial", Field, 9, ""},
    +		{"Resolver.PreferGo", Field, 8, ""},
    +		{"Resolver.StrictErrors", Field, 9, ""},
    +		{"SRV", Type, 0, ""},
    +		{"SRV.Port", Field, 0, ""},
    +		{"SRV.Priority", Field, 0, ""},
    +		{"SRV.Target", Field, 0, ""},
    +		{"SRV.Weight", Field, 0, ""},
    +		{"SplitHostPort", Func, 0, "func(hostport string) (host string, port string, err error)"},
    +		{"TCPAddr", Type, 0, ""},
    +		{"TCPAddr.IP", Field, 0, ""},
    +		{"TCPAddr.Port", Field, 0, ""},
    +		{"TCPAddr.Zone", Field, 1, ""},
    +		{"TCPAddrFromAddrPort", Func, 18, "func(addr netip.AddrPort) *TCPAddr"},
    +		{"TCPConn", Type, 0, ""},
    +		{"TCPListener", Type, 0, ""},
    +		{"UDPAddr", Type, 0, ""},
    +		{"UDPAddr.IP", Field, 0, ""},
    +		{"UDPAddr.Port", Field, 0, ""},
    +		{"UDPAddr.Zone", Field, 1, ""},
    +		{"UDPAddrFromAddrPort", Func, 18, "func(addr netip.AddrPort) *UDPAddr"},
    +		{"UDPConn", Type, 0, ""},
    +		{"UnixAddr", Type, 0, ""},
    +		{"UnixAddr.Name", Field, 0, ""},
    +		{"UnixAddr.Net", Field, 0, ""},
    +		{"UnixConn", Type, 0, ""},
    +		{"UnixListener", Type, 0, ""},
    +		{"UnknownNetworkError", Type, 0, ""},
     	},
     	"net/http": {
    -		{"(*Client).CloseIdleConnections", Method, 12},
    -		{"(*Client).Do", Method, 0},
    -		{"(*Client).Get", Method, 0},
    -		{"(*Client).Head", Method, 0},
    -		{"(*Client).Post", Method, 0},
    -		{"(*Client).PostForm", Method, 0},
    -		{"(*Cookie).String", Method, 0},
    -		{"(*Cookie).Valid", Method, 18},
    -		{"(*MaxBytesError).Error", Method, 19},
    -		{"(*ProtocolError).Error", Method, 0},
    -		{"(*ProtocolError).Is", Method, 21},
    -		{"(*Request).AddCookie", Method, 0},
    -		{"(*Request).BasicAuth", Method, 4},
    -		{"(*Request).Clone", Method, 13},
    -		{"(*Request).Context", Method, 7},
    -		{"(*Request).Cookie", Method, 0},
    -		{"(*Request).Cookies", Method, 0},
    -		{"(*Request).CookiesNamed", Method, 23},
    -		{"(*Request).FormFile", Method, 0},
    -		{"(*Request).FormValue", Method, 0},
    -		{"(*Request).MultipartReader", Method, 0},
    -		{"(*Request).ParseForm", Method, 0},
    -		{"(*Request).ParseMultipartForm", Method, 0},
    -		{"(*Request).PathValue", Method, 22},
    -		{"(*Request).PostFormValue", Method, 1},
    -		{"(*Request).ProtoAtLeast", Method, 0},
    -		{"(*Request).Referer", Method, 0},
    -		{"(*Request).SetBasicAuth", Method, 0},
    -		{"(*Request).SetPathValue", Method, 22},
    -		{"(*Request).UserAgent", Method, 0},
    -		{"(*Request).WithContext", Method, 7},
    -		{"(*Request).Write", Method, 0},
    -		{"(*Request).WriteProxy", Method, 0},
    -		{"(*Response).Cookies", Method, 0},
    -		{"(*Response).Location", Method, 0},
    -		{"(*Response).ProtoAtLeast", Method, 0},
    -		{"(*Response).Write", Method, 0},
    -		{"(*ResponseController).EnableFullDuplex", Method, 21},
    -		{"(*ResponseController).Flush", Method, 20},
    -		{"(*ResponseController).Hijack", Method, 20},
    -		{"(*ResponseController).SetReadDeadline", Method, 20},
    -		{"(*ResponseController).SetWriteDeadline", Method, 20},
    -		{"(*ServeMux).Handle", Method, 0},
    -		{"(*ServeMux).HandleFunc", Method, 0},
    -		{"(*ServeMux).Handler", Method, 1},
    -		{"(*ServeMux).ServeHTTP", Method, 0},
    -		{"(*Server).Close", Method, 8},
    -		{"(*Server).ListenAndServe", Method, 0},
    -		{"(*Server).ListenAndServeTLS", Method, 0},
    -		{"(*Server).RegisterOnShutdown", Method, 9},
    -		{"(*Server).Serve", Method, 0},
    -		{"(*Server).ServeTLS", Method, 9},
    -		{"(*Server).SetKeepAlivesEnabled", Method, 3},
    -		{"(*Server).Shutdown", Method, 8},
    -		{"(*Transport).CancelRequest", Method, 1},
    -		{"(*Transport).Clone", Method, 13},
    -		{"(*Transport).CloseIdleConnections", Method, 0},
    -		{"(*Transport).RegisterProtocol", Method, 0},
    -		{"(*Transport).RoundTrip", Method, 0},
    -		{"(ConnState).String", Method, 3},
    -		{"(Dir).Open", Method, 0},
    -		{"(HandlerFunc).ServeHTTP", Method, 0},
    -		{"(Header).Add", Method, 0},
    -		{"(Header).Clone", Method, 13},
    -		{"(Header).Del", Method, 0},
    -		{"(Header).Get", Method, 0},
    -		{"(Header).Set", Method, 0},
    -		{"(Header).Values", Method, 14},
    -		{"(Header).Write", Method, 0},
    -		{"(Header).WriteSubset", Method, 0},
    -		{"AllowQuerySemicolons", Func, 17},
    -		{"CanonicalHeaderKey", Func, 0},
    -		{"Client", Type, 0},
    -		{"Client.CheckRedirect", Field, 0},
    -		{"Client.Jar", Field, 0},
    -		{"Client.Timeout", Field, 3},
    -		{"Client.Transport", Field, 0},
    -		{"CloseNotifier", Type, 1},
    -		{"ConnState", Type, 3},
    -		{"Cookie", Type, 0},
    -		{"Cookie.Domain", Field, 0},
    -		{"Cookie.Expires", Field, 0},
    -		{"Cookie.HttpOnly", Field, 0},
    -		{"Cookie.MaxAge", Field, 0},
    -		{"Cookie.Name", Field, 0},
    -		{"Cookie.Partitioned", Field, 23},
    -		{"Cookie.Path", Field, 0},
    -		{"Cookie.Quoted", Field, 23},
    -		{"Cookie.Raw", Field, 0},
    -		{"Cookie.RawExpires", Field, 0},
    -		{"Cookie.SameSite", Field, 11},
    -		{"Cookie.Secure", Field, 0},
    -		{"Cookie.Unparsed", Field, 0},
    -		{"Cookie.Value", Field, 0},
    -		{"CookieJar", Type, 0},
    -		{"DefaultClient", Var, 0},
    -		{"DefaultMaxHeaderBytes", Const, 0},
    -		{"DefaultMaxIdleConnsPerHost", Const, 0},
    -		{"DefaultServeMux", Var, 0},
    -		{"DefaultTransport", Var, 0},
    -		{"DetectContentType", Func, 0},
    -		{"Dir", Type, 0},
    -		{"ErrAbortHandler", Var, 8},
    -		{"ErrBodyNotAllowed", Var, 0},
    -		{"ErrBodyReadAfterClose", Var, 0},
    -		{"ErrContentLength", Var, 0},
    -		{"ErrHandlerTimeout", Var, 0},
    -		{"ErrHeaderTooLong", Var, 0},
    -		{"ErrHijacked", Var, 0},
    -		{"ErrLineTooLong", Var, 0},
    -		{"ErrMissingBoundary", Var, 0},
    -		{"ErrMissingContentLength", Var, 0},
    -		{"ErrMissingFile", Var, 0},
    -		{"ErrNoCookie", Var, 0},
    -		{"ErrNoLocation", Var, 0},
    -		{"ErrNotMultipart", Var, 0},
    -		{"ErrNotSupported", Var, 0},
    -		{"ErrSchemeMismatch", Var, 21},
    -		{"ErrServerClosed", Var, 8},
    -		{"ErrShortBody", Var, 0},
    -		{"ErrSkipAltProtocol", Var, 6},
    -		{"ErrUnexpectedTrailer", Var, 0},
    -		{"ErrUseLastResponse", Var, 7},
    -		{"ErrWriteAfterFlush", Var, 0},
    -		{"Error", Func, 0},
    -		{"FS", Func, 16},
    -		{"File", Type, 0},
    -		{"FileServer", Func, 0},
    -		{"FileServerFS", Func, 22},
    -		{"FileSystem", Type, 0},
    -		{"Flusher", Type, 0},
    -		{"Get", Func, 0},
    -		{"Handle", Func, 0},
    -		{"HandleFunc", Func, 0},
    -		{"Handler", Type, 0},
    -		{"HandlerFunc", Type, 0},
    -		{"Head", Func, 0},
    -		{"Header", Type, 0},
    -		{"Hijacker", Type, 0},
    -		{"ListenAndServe", Func, 0},
    -		{"ListenAndServeTLS", Func, 0},
    -		{"LocalAddrContextKey", Var, 7},
    -		{"MaxBytesError", Type, 19},
    -		{"MaxBytesError.Limit", Field, 19},
    -		{"MaxBytesHandler", Func, 18},
    -		{"MaxBytesReader", Func, 0},
    -		{"MethodConnect", Const, 6},
    -		{"MethodDelete", Const, 6},
    -		{"MethodGet", Const, 6},
    -		{"MethodHead", Const, 6},
    -		{"MethodOptions", Const, 6},
    -		{"MethodPatch", Const, 6},
    -		{"MethodPost", Const, 6},
    -		{"MethodPut", Const, 6},
    -		{"MethodTrace", Const, 6},
    -		{"NewFileTransport", Func, 0},
    -		{"NewFileTransportFS", Func, 22},
    -		{"NewRequest", Func, 0},
    -		{"NewRequestWithContext", Func, 13},
    -		{"NewResponseController", Func, 20},
    -		{"NewServeMux", Func, 0},
    -		{"NoBody", Var, 8},
    -		{"NotFound", Func, 0},
    -		{"NotFoundHandler", Func, 0},
    -		{"ParseCookie", Func, 23},
    -		{"ParseHTTPVersion", Func, 0},
    -		{"ParseSetCookie", Func, 23},
    -		{"ParseTime", Func, 1},
    -		{"Post", Func, 0},
    -		{"PostForm", Func, 0},
    -		{"ProtocolError", Type, 0},
    -		{"ProtocolError.ErrorString", Field, 0},
    -		{"ProxyFromEnvironment", Func, 0},
    -		{"ProxyURL", Func, 0},
    -		{"PushOptions", Type, 8},
    -		{"PushOptions.Header", Field, 8},
    -		{"PushOptions.Method", Field, 8},
    -		{"Pusher", Type, 8},
    -		{"ReadRequest", Func, 0},
    -		{"ReadResponse", Func, 0},
    -		{"Redirect", Func, 0},
    -		{"RedirectHandler", Func, 0},
    -		{"Request", Type, 0},
    -		{"Request.Body", Field, 0},
    -		{"Request.Cancel", Field, 5},
    -		{"Request.Close", Field, 0},
    -		{"Request.ContentLength", Field, 0},
    -		{"Request.Form", Field, 0},
    -		{"Request.GetBody", Field, 8},
    -		{"Request.Header", Field, 0},
    -		{"Request.Host", Field, 0},
    -		{"Request.Method", Field, 0},
    -		{"Request.MultipartForm", Field, 0},
    -		{"Request.Pattern", Field, 23},
    -		{"Request.PostForm", Field, 1},
    -		{"Request.Proto", Field, 0},
    -		{"Request.ProtoMajor", Field, 0},
    -		{"Request.ProtoMinor", Field, 0},
    -		{"Request.RemoteAddr", Field, 0},
    -		{"Request.RequestURI", Field, 0},
    -		{"Request.Response", Field, 7},
    -		{"Request.TLS", Field, 0},
    -		{"Request.Trailer", Field, 0},
    -		{"Request.TransferEncoding", Field, 0},
    -		{"Request.URL", Field, 0},
    -		{"Response", Type, 0},
    -		{"Response.Body", Field, 0},
    -		{"Response.Close", Field, 0},
    -		{"Response.ContentLength", Field, 0},
    -		{"Response.Header", Field, 0},
    -		{"Response.Proto", Field, 0},
    -		{"Response.ProtoMajor", Field, 0},
    -		{"Response.ProtoMinor", Field, 0},
    -		{"Response.Request", Field, 0},
    -		{"Response.Status", Field, 0},
    -		{"Response.StatusCode", Field, 0},
    -		{"Response.TLS", Field, 3},
    -		{"Response.Trailer", Field, 0},
    -		{"Response.TransferEncoding", Field, 0},
    -		{"Response.Uncompressed", Field, 7},
    -		{"ResponseController", Type, 20},
    -		{"ResponseWriter", Type, 0},
    -		{"RoundTripper", Type, 0},
    -		{"SameSite", Type, 11},
    -		{"SameSiteDefaultMode", Const, 11},
    -		{"SameSiteLaxMode", Const, 11},
    -		{"SameSiteNoneMode", Const, 13},
    -		{"SameSiteStrictMode", Const, 11},
    -		{"Serve", Func, 0},
    -		{"ServeContent", Func, 0},
    -		{"ServeFile", Func, 0},
    -		{"ServeFileFS", Func, 22},
    -		{"ServeMux", Type, 0},
    -		{"ServeTLS", Func, 9},
    -		{"Server", Type, 0},
    -		{"Server.Addr", Field, 0},
    -		{"Server.BaseContext", Field, 13},
    -		{"Server.ConnContext", Field, 13},
    -		{"Server.ConnState", Field, 3},
    -		{"Server.DisableGeneralOptionsHandler", Field, 20},
    -		{"Server.ErrorLog", Field, 3},
    -		{"Server.Handler", Field, 0},
    -		{"Server.IdleTimeout", Field, 8},
    -		{"Server.MaxHeaderBytes", Field, 0},
    -		{"Server.ReadHeaderTimeout", Field, 8},
    -		{"Server.ReadTimeout", Field, 0},
    -		{"Server.TLSConfig", Field, 0},
    -		{"Server.TLSNextProto", Field, 1},
    -		{"Server.WriteTimeout", Field, 0},
    -		{"ServerContextKey", Var, 7},
    -		{"SetCookie", Func, 0},
    -		{"StateActive", Const, 3},
    -		{"StateClosed", Const, 3},
    -		{"StateHijacked", Const, 3},
    -		{"StateIdle", Const, 3},
    -		{"StateNew", Const, 3},
    -		{"StatusAccepted", Const, 0},
    -		{"StatusAlreadyReported", Const, 7},
    -		{"StatusBadGateway", Const, 0},
    -		{"StatusBadRequest", Const, 0},
    -		{"StatusConflict", Const, 0},
    -		{"StatusContinue", Const, 0},
    -		{"StatusCreated", Const, 0},
    -		{"StatusEarlyHints", Const, 13},
    -		{"StatusExpectationFailed", Const, 0},
    -		{"StatusFailedDependency", Const, 7},
    -		{"StatusForbidden", Const, 0},
    -		{"StatusFound", Const, 0},
    -		{"StatusGatewayTimeout", Const, 0},
    -		{"StatusGone", Const, 0},
    -		{"StatusHTTPVersionNotSupported", Const, 0},
    -		{"StatusIMUsed", Const, 7},
    -		{"StatusInsufficientStorage", Const, 7},
    -		{"StatusInternalServerError", Const, 0},
    -		{"StatusLengthRequired", Const, 0},
    -		{"StatusLocked", Const, 7},
    -		{"StatusLoopDetected", Const, 7},
    -		{"StatusMethodNotAllowed", Const, 0},
    -		{"StatusMisdirectedRequest", Const, 11},
    -		{"StatusMovedPermanently", Const, 0},
    -		{"StatusMultiStatus", Const, 7},
    -		{"StatusMultipleChoices", Const, 0},
    -		{"StatusNetworkAuthenticationRequired", Const, 6},
    -		{"StatusNoContent", Const, 0},
    -		{"StatusNonAuthoritativeInfo", Const, 0},
    -		{"StatusNotAcceptable", Const, 0},
    -		{"StatusNotExtended", Const, 7},
    -		{"StatusNotFound", Const, 0},
    -		{"StatusNotImplemented", Const, 0},
    -		{"StatusNotModified", Const, 0},
    -		{"StatusOK", Const, 0},
    -		{"StatusPartialContent", Const, 0},
    -		{"StatusPaymentRequired", Const, 0},
    -		{"StatusPermanentRedirect", Const, 7},
    -		{"StatusPreconditionFailed", Const, 0},
    -		{"StatusPreconditionRequired", Const, 6},
    -		{"StatusProcessing", Const, 7},
    -		{"StatusProxyAuthRequired", Const, 0},
    -		{"StatusRequestEntityTooLarge", Const, 0},
    -		{"StatusRequestHeaderFieldsTooLarge", Const, 6},
    -		{"StatusRequestTimeout", Const, 0},
    -		{"StatusRequestURITooLong", Const, 0},
    -		{"StatusRequestedRangeNotSatisfiable", Const, 0},
    -		{"StatusResetContent", Const, 0},
    -		{"StatusSeeOther", Const, 0},
    -		{"StatusServiceUnavailable", Const, 0},
    -		{"StatusSwitchingProtocols", Const, 0},
    -		{"StatusTeapot", Const, 0},
    -		{"StatusTemporaryRedirect", Const, 0},
    -		{"StatusText", Func, 0},
    -		{"StatusTooEarly", Const, 12},
    -		{"StatusTooManyRequests", Const, 6},
    -		{"StatusUnauthorized", Const, 0},
    -		{"StatusUnavailableForLegalReasons", Const, 6},
    -		{"StatusUnprocessableEntity", Const, 7},
    -		{"StatusUnsupportedMediaType", Const, 0},
    -		{"StatusUpgradeRequired", Const, 7},
    -		{"StatusUseProxy", Const, 0},
    -		{"StatusVariantAlsoNegotiates", Const, 7},
    -		{"StripPrefix", Func, 0},
    -		{"TimeFormat", Const, 0},
    -		{"TimeoutHandler", Func, 0},
    -		{"TrailerPrefix", Const, 8},
    -		{"Transport", Type, 0},
    -		{"Transport.Dial", Field, 0},
    -		{"Transport.DialContext", Field, 7},
    -		{"Transport.DialTLS", Field, 4},
    -		{"Transport.DialTLSContext", Field, 14},
    -		{"Transport.DisableCompression", Field, 0},
    -		{"Transport.DisableKeepAlives", Field, 0},
    -		{"Transport.ExpectContinueTimeout", Field, 6},
    -		{"Transport.ForceAttemptHTTP2", Field, 13},
    -		{"Transport.GetProxyConnectHeader", Field, 16},
    -		{"Transport.IdleConnTimeout", Field, 7},
    -		{"Transport.MaxConnsPerHost", Field, 11},
    -		{"Transport.MaxIdleConns", Field, 7},
    -		{"Transport.MaxIdleConnsPerHost", Field, 0},
    -		{"Transport.MaxResponseHeaderBytes", Field, 7},
    -		{"Transport.OnProxyConnectResponse", Field, 20},
    -		{"Transport.Proxy", Field, 0},
    -		{"Transport.ProxyConnectHeader", Field, 8},
    -		{"Transport.ReadBufferSize", Field, 13},
    -		{"Transport.ResponseHeaderTimeout", Field, 1},
    -		{"Transport.TLSClientConfig", Field, 0},
    -		{"Transport.TLSHandshakeTimeout", Field, 3},
    -		{"Transport.TLSNextProto", Field, 6},
    -		{"Transport.WriteBufferSize", Field, 13},
    +		{"(*Client).CloseIdleConnections", Method, 12, ""},
    +		{"(*Client).Do", Method, 0, ""},
    +		{"(*Client).Get", Method, 0, ""},
    +		{"(*Client).Head", Method, 0, ""},
    +		{"(*Client).Post", Method, 0, ""},
    +		{"(*Client).PostForm", Method, 0, ""},
    +		{"(*Cookie).String", Method, 0, ""},
    +		{"(*Cookie).Valid", Method, 18, ""},
    +		{"(*CrossOriginProtection).AddInsecureBypassPattern", Method, 25, ""},
    +		{"(*CrossOriginProtection).AddTrustedOrigin", Method, 25, ""},
    +		{"(*CrossOriginProtection).Check", Method, 25, ""},
    +		{"(*CrossOriginProtection).Handler", Method, 25, ""},
    +		{"(*CrossOriginProtection).SetDenyHandler", Method, 25, ""},
    +		{"(*MaxBytesError).Error", Method, 19, ""},
    +		{"(*ProtocolError).Error", Method, 0, ""},
    +		{"(*ProtocolError).Is", Method, 21, ""},
    +		{"(*Protocols).SetHTTP1", Method, 24, ""},
    +		{"(*Protocols).SetHTTP2", Method, 24, ""},
    +		{"(*Protocols).SetUnencryptedHTTP2", Method, 24, ""},
    +		{"(*Request).AddCookie", Method, 0, ""},
    +		{"(*Request).BasicAuth", Method, 4, ""},
    +		{"(*Request).Clone", Method, 13, ""},
    +		{"(*Request).Context", Method, 7, ""},
    +		{"(*Request).Cookie", Method, 0, ""},
    +		{"(*Request).Cookies", Method, 0, ""},
    +		{"(*Request).CookiesNamed", Method, 23, ""},
    +		{"(*Request).FormFile", Method, 0, ""},
    +		{"(*Request).FormValue", Method, 0, ""},
    +		{"(*Request).MultipartReader", Method, 0, ""},
    +		{"(*Request).ParseForm", Method, 0, ""},
    +		{"(*Request).ParseMultipartForm", Method, 0, ""},
    +		{"(*Request).PathValue", Method, 22, ""},
    +		{"(*Request).PostFormValue", Method, 1, ""},
    +		{"(*Request).ProtoAtLeast", Method, 0, ""},
    +		{"(*Request).Referer", Method, 0, ""},
    +		{"(*Request).SetBasicAuth", Method, 0, ""},
    +		{"(*Request).SetPathValue", Method, 22, ""},
    +		{"(*Request).UserAgent", Method, 0, ""},
    +		{"(*Request).WithContext", Method, 7, ""},
    +		{"(*Request).Write", Method, 0, ""},
    +		{"(*Request).WriteProxy", Method, 0, ""},
    +		{"(*Response).Cookies", Method, 0, ""},
    +		{"(*Response).Location", Method, 0, ""},
    +		{"(*Response).ProtoAtLeast", Method, 0, ""},
    +		{"(*Response).Write", Method, 0, ""},
    +		{"(*ResponseController).EnableFullDuplex", Method, 21, ""},
    +		{"(*ResponseController).Flush", Method, 20, ""},
    +		{"(*ResponseController).Hijack", Method, 20, ""},
    +		{"(*ResponseController).SetReadDeadline", Method, 20, ""},
    +		{"(*ResponseController).SetWriteDeadline", Method, 20, ""},
    +		{"(*ServeMux).Handle", Method, 0, ""},
    +		{"(*ServeMux).HandleFunc", Method, 0, ""},
    +		{"(*ServeMux).Handler", Method, 1, ""},
    +		{"(*ServeMux).ServeHTTP", Method, 0, ""},
    +		{"(*Server).Close", Method, 8, ""},
    +		{"(*Server).ListenAndServe", Method, 0, ""},
    +		{"(*Server).ListenAndServeTLS", Method, 0, ""},
    +		{"(*Server).RegisterOnShutdown", Method, 9, ""},
    +		{"(*Server).Serve", Method, 0, ""},
    +		{"(*Server).ServeTLS", Method, 9, ""},
    +		{"(*Server).SetKeepAlivesEnabled", Method, 3, ""},
    +		{"(*Server).Shutdown", Method, 8, ""},
    +		{"(*Transport).CancelRequest", Method, 1, ""},
    +		{"(*Transport).Clone", Method, 13, ""},
    +		{"(*Transport).CloseIdleConnections", Method, 0, ""},
    +		{"(*Transport).RegisterProtocol", Method, 0, ""},
    +		{"(*Transport).RoundTrip", Method, 0, ""},
    +		{"(ConnState).String", Method, 3, ""},
    +		{"(Dir).Open", Method, 0, ""},
    +		{"(HandlerFunc).ServeHTTP", Method, 0, ""},
    +		{"(Header).Add", Method, 0, ""},
    +		{"(Header).Clone", Method, 13, ""},
    +		{"(Header).Del", Method, 0, ""},
    +		{"(Header).Get", Method, 0, ""},
    +		{"(Header).Set", Method, 0, ""},
    +		{"(Header).Values", Method, 14, ""},
    +		{"(Header).Write", Method, 0, ""},
    +		{"(Header).WriteSubset", Method, 0, ""},
    +		{"(Protocols).HTTP1", Method, 24, ""},
    +		{"(Protocols).HTTP2", Method, 24, ""},
    +		{"(Protocols).String", Method, 24, ""},
    +		{"(Protocols).UnencryptedHTTP2", Method, 24, ""},
    +		{"AllowQuerySemicolons", Func, 17, "func(h Handler) Handler"},
    +		{"CanonicalHeaderKey", Func, 0, "func(s string) string"},
    +		{"Client", Type, 0, ""},
    +		{"Client.CheckRedirect", Field, 0, ""},
    +		{"Client.Jar", Field, 0, ""},
    +		{"Client.Timeout", Field, 3, ""},
    +		{"Client.Transport", Field, 0, ""},
    +		{"CloseNotifier", Type, 1, ""},
    +		{"ConnState", Type, 3, ""},
    +		{"Cookie", Type, 0, ""},
    +		{"Cookie.Domain", Field, 0, ""},
    +		{"Cookie.Expires", Field, 0, ""},
    +		{"Cookie.HttpOnly", Field, 0, ""},
    +		{"Cookie.MaxAge", Field, 0, ""},
    +		{"Cookie.Name", Field, 0, ""},
    +		{"Cookie.Partitioned", Field, 23, ""},
    +		{"Cookie.Path", Field, 0, ""},
    +		{"Cookie.Quoted", Field, 23, ""},
    +		{"Cookie.Raw", Field, 0, ""},
    +		{"Cookie.RawExpires", Field, 0, ""},
    +		{"Cookie.SameSite", Field, 11, ""},
    +		{"Cookie.Secure", Field, 0, ""},
    +		{"Cookie.Unparsed", Field, 0, ""},
    +		{"Cookie.Value", Field, 0, ""},
    +		{"CookieJar", Type, 0, ""},
    +		{"CrossOriginProtection", Type, 25, ""},
    +		{"DefaultClient", Var, 0, ""},
    +		{"DefaultMaxHeaderBytes", Const, 0, ""},
    +		{"DefaultMaxIdleConnsPerHost", Const, 0, ""},
    +		{"DefaultServeMux", Var, 0, ""},
    +		{"DefaultTransport", Var, 0, ""},
    +		{"DetectContentType", Func, 0, "func(data []byte) string"},
    +		{"Dir", Type, 0, ""},
    +		{"ErrAbortHandler", Var, 8, ""},
    +		{"ErrBodyNotAllowed", Var, 0, ""},
    +		{"ErrBodyReadAfterClose", Var, 0, ""},
    +		{"ErrContentLength", Var, 0, ""},
    +		{"ErrHandlerTimeout", Var, 0, ""},
    +		{"ErrHeaderTooLong", Var, 0, ""},
    +		{"ErrHijacked", Var, 0, ""},
    +		{"ErrLineTooLong", Var, 0, ""},
    +		{"ErrMissingBoundary", Var, 0, ""},
    +		{"ErrMissingContentLength", Var, 0, ""},
    +		{"ErrMissingFile", Var, 0, ""},
    +		{"ErrNoCookie", Var, 0, ""},
    +		{"ErrNoLocation", Var, 0, ""},
    +		{"ErrNotMultipart", Var, 0, ""},
    +		{"ErrNotSupported", Var, 0, ""},
    +		{"ErrSchemeMismatch", Var, 21, ""},
    +		{"ErrServerClosed", Var, 8, ""},
    +		{"ErrShortBody", Var, 0, ""},
    +		{"ErrSkipAltProtocol", Var, 6, ""},
    +		{"ErrUnexpectedTrailer", Var, 0, ""},
    +		{"ErrUseLastResponse", Var, 7, ""},
    +		{"ErrWriteAfterFlush", Var, 0, ""},
    +		{"Error", Func, 0, "func(w ResponseWriter, error string, code int)"},
    +		{"FS", Func, 16, "func(fsys fs.FS) FileSystem"},
    +		{"File", Type, 0, ""},
    +		{"FileServer", Func, 0, "func(root FileSystem) Handler"},
    +		{"FileServerFS", Func, 22, "func(root fs.FS) Handler"},
    +		{"FileSystem", Type, 0, ""},
    +		{"Flusher", Type, 0, ""},
    +		{"Get", Func, 0, "func(url string) (resp *Response, err error)"},
    +		{"HTTP2Config", Type, 24, ""},
    +		{"HTTP2Config.CountError", Field, 24, ""},
    +		{"HTTP2Config.MaxConcurrentStreams", Field, 24, ""},
    +		{"HTTP2Config.MaxDecoderHeaderTableSize", Field, 24, ""},
    +		{"HTTP2Config.MaxEncoderHeaderTableSize", Field, 24, ""},
    +		{"HTTP2Config.MaxReadFrameSize", Field, 24, ""},
    +		{"HTTP2Config.MaxReceiveBufferPerConnection", Field, 24, ""},
    +		{"HTTP2Config.MaxReceiveBufferPerStream", Field, 24, ""},
    +		{"HTTP2Config.PermitProhibitedCipherSuites", Field, 24, ""},
    +		{"HTTP2Config.PingTimeout", Field, 24, ""},
    +		{"HTTP2Config.SendPingTimeout", Field, 24, ""},
    +		{"HTTP2Config.WriteByteTimeout", Field, 24, ""},
    +		{"Handle", Func, 0, "func(pattern string, handler Handler)"},
    +		{"HandleFunc", Func, 0, "func(pattern string, handler func(ResponseWriter, *Request))"},
    +		{"Handler", Type, 0, ""},
    +		{"HandlerFunc", Type, 0, ""},
    +		{"Head", Func, 0, "func(url string) (resp *Response, err error)"},
    +		{"Header", Type, 0, ""},
    +		{"Hijacker", Type, 0, ""},
    +		{"ListenAndServe", Func, 0, "func(addr string, handler Handler) error"},
    +		{"ListenAndServeTLS", Func, 0, "func(addr string, certFile string, keyFile string, handler Handler) error"},
    +		{"LocalAddrContextKey", Var, 7, ""},
    +		{"MaxBytesError", Type, 19, ""},
    +		{"MaxBytesError.Limit", Field, 19, ""},
    +		{"MaxBytesHandler", Func, 18, "func(h Handler, n int64) Handler"},
    +		{"MaxBytesReader", Func, 0, "func(w ResponseWriter, r io.ReadCloser, n int64) io.ReadCloser"},
    +		{"MethodConnect", Const, 6, ""},
    +		{"MethodDelete", Const, 6, ""},
    +		{"MethodGet", Const, 6, ""},
    +		{"MethodHead", Const, 6, ""},
    +		{"MethodOptions", Const, 6, ""},
    +		{"MethodPatch", Const, 6, ""},
    +		{"MethodPost", Const, 6, ""},
    +		{"MethodPut", Const, 6, ""},
    +		{"MethodTrace", Const, 6, ""},
    +		{"NewCrossOriginProtection", Func, 25, "func() *CrossOriginProtection"},
    +		{"NewFileTransport", Func, 0, "func(fs FileSystem) RoundTripper"},
    +		{"NewFileTransportFS", Func, 22, "func(fsys fs.FS) RoundTripper"},
    +		{"NewRequest", Func, 0, "func(method string, url string, body io.Reader) (*Request, error)"},
    +		{"NewRequestWithContext", Func, 13, "func(ctx context.Context, method string, url string, body io.Reader) (*Request, error)"},
    +		{"NewResponseController", Func, 20, "func(rw ResponseWriter) *ResponseController"},
    +		{"NewServeMux", Func, 0, "func() *ServeMux"},
    +		{"NoBody", Var, 8, ""},
    +		{"NotFound", Func, 0, "func(w ResponseWriter, r *Request)"},
    +		{"NotFoundHandler", Func, 0, "func() Handler"},
    +		{"ParseCookie", Func, 23, "func(line string) ([]*Cookie, error)"},
    +		{"ParseHTTPVersion", Func, 0, "func(vers string) (major int, minor int, ok bool)"},
    +		{"ParseSetCookie", Func, 23, "func(line string) (*Cookie, error)"},
    +		{"ParseTime", Func, 1, "func(text string) (t time.Time, err error)"},
    +		{"Post", Func, 0, "func(url string, contentType string, body io.Reader) (resp *Response, err error)"},
    +		{"PostForm", Func, 0, "func(url string, data url.Values) (resp *Response, err error)"},
    +		{"ProtocolError", Type, 0, ""},
    +		{"ProtocolError.ErrorString", Field, 0, ""},
    +		{"Protocols", Type, 24, ""},
    +		{"ProxyFromEnvironment", Func, 0, "func(req *Request) (*url.URL, error)"},
    +		{"ProxyURL", Func, 0, "func(fixedURL *url.URL) func(*Request) (*url.URL, error)"},
    +		{"PushOptions", Type, 8, ""},
    +		{"PushOptions.Header", Field, 8, ""},
    +		{"PushOptions.Method", Field, 8, ""},
    +		{"Pusher", Type, 8, ""},
    +		{"ReadRequest", Func, 0, "func(b *bufio.Reader) (*Request, error)"},
    +		{"ReadResponse", Func, 0, "func(r *bufio.Reader, req *Request) (*Response, error)"},
    +		{"Redirect", Func, 0, "func(w ResponseWriter, r *Request, url string, code int)"},
    +		{"RedirectHandler", Func, 0, "func(url string, code int) Handler"},
    +		{"Request", Type, 0, ""},
    +		{"Request.Body", Field, 0, ""},
    +		{"Request.Cancel", Field, 5, ""},
    +		{"Request.Close", Field, 0, ""},
    +		{"Request.ContentLength", Field, 0, ""},
    +		{"Request.Form", Field, 0, ""},
    +		{"Request.GetBody", Field, 8, ""},
    +		{"Request.Header", Field, 0, ""},
    +		{"Request.Host", Field, 0, ""},
    +		{"Request.Method", Field, 0, ""},
    +		{"Request.MultipartForm", Field, 0, ""},
    +		{"Request.Pattern", Field, 23, ""},
    +		{"Request.PostForm", Field, 1, ""},
    +		{"Request.Proto", Field, 0, ""},
    +		{"Request.ProtoMajor", Field, 0, ""},
    +		{"Request.ProtoMinor", Field, 0, ""},
    +		{"Request.RemoteAddr", Field, 0, ""},
    +		{"Request.RequestURI", Field, 0, ""},
    +		{"Request.Response", Field, 7, ""},
    +		{"Request.TLS", Field, 0, ""},
    +		{"Request.Trailer", Field, 0, ""},
    +		{"Request.TransferEncoding", Field, 0, ""},
    +		{"Request.URL", Field, 0, ""},
    +		{"Response", Type, 0, ""},
    +		{"Response.Body", Field, 0, ""},
    +		{"Response.Close", Field, 0, ""},
    +		{"Response.ContentLength", Field, 0, ""},
    +		{"Response.Header", Field, 0, ""},
    +		{"Response.Proto", Field, 0, ""},
    +		{"Response.ProtoMajor", Field, 0, ""},
    +		{"Response.ProtoMinor", Field, 0, ""},
    +		{"Response.Request", Field, 0, ""},
    +		{"Response.Status", Field, 0, ""},
    +		{"Response.StatusCode", Field, 0, ""},
    +		{"Response.TLS", Field, 3, ""},
    +		{"Response.Trailer", Field, 0, ""},
    +		{"Response.TransferEncoding", Field, 0, ""},
    +		{"Response.Uncompressed", Field, 7, ""},
    +		{"ResponseController", Type, 20, ""},
    +		{"ResponseWriter", Type, 0, ""},
    +		{"RoundTripper", Type, 0, ""},
    +		{"SameSite", Type, 11, ""},
    +		{"SameSiteDefaultMode", Const, 11, ""},
    +		{"SameSiteLaxMode", Const, 11, ""},
    +		{"SameSiteNoneMode", Const, 13, ""},
    +		{"SameSiteStrictMode", Const, 11, ""},
    +		{"Serve", Func, 0, "func(l net.Listener, handler Handler) error"},
    +		{"ServeContent", Func, 0, "func(w ResponseWriter, req *Request, name string, modtime time.Time, content io.ReadSeeker)"},
    +		{"ServeFile", Func, 0, "func(w ResponseWriter, r *Request, name string)"},
    +		{"ServeFileFS", Func, 22, "func(w ResponseWriter, r *Request, fsys fs.FS, name string)"},
    +		{"ServeMux", Type, 0, ""},
    +		{"ServeTLS", Func, 9, "func(l net.Listener, handler Handler, certFile string, keyFile string) error"},
    +		{"Server", Type, 0, ""},
    +		{"Server.Addr", Field, 0, ""},
    +		{"Server.BaseContext", Field, 13, ""},
    +		{"Server.ConnContext", Field, 13, ""},
    +		{"Server.ConnState", Field, 3, ""},
    +		{"Server.DisableGeneralOptionsHandler", Field, 20, ""},
    +		{"Server.ErrorLog", Field, 3, ""},
    +		{"Server.HTTP2", Field, 24, ""},
    +		{"Server.Handler", Field, 0, ""},
    +		{"Server.IdleTimeout", Field, 8, ""},
    +		{"Server.MaxHeaderBytes", Field, 0, ""},
    +		{"Server.Protocols", Field, 24, ""},
    +		{"Server.ReadHeaderTimeout", Field, 8, ""},
    +		{"Server.ReadTimeout", Field, 0, ""},
    +		{"Server.TLSConfig", Field, 0, ""},
    +		{"Server.TLSNextProto", Field, 1, ""},
    +		{"Server.WriteTimeout", Field, 0, ""},
    +		{"ServerContextKey", Var, 7, ""},
    +		{"SetCookie", Func, 0, "func(w ResponseWriter, cookie *Cookie)"},
    +		{"StateActive", Const, 3, ""},
    +		{"StateClosed", Const, 3, ""},
    +		{"StateHijacked", Const, 3, ""},
    +		{"StateIdle", Const, 3, ""},
    +		{"StateNew", Const, 3, ""},
    +		{"StatusAccepted", Const, 0, ""},
    +		{"StatusAlreadyReported", Const, 7, ""},
    +		{"StatusBadGateway", Const, 0, ""},
    +		{"StatusBadRequest", Const, 0, ""},
    +		{"StatusConflict", Const, 0, ""},
    +		{"StatusContinue", Const, 0, ""},
    +		{"StatusCreated", Const, 0, ""},
    +		{"StatusEarlyHints", Const, 13, ""},
    +		{"StatusExpectationFailed", Const, 0, ""},
    +		{"StatusFailedDependency", Const, 7, ""},
    +		{"StatusForbidden", Const, 0, ""},
    +		{"StatusFound", Const, 0, ""},
    +		{"StatusGatewayTimeout", Const, 0, ""},
    +		{"StatusGone", Const, 0, ""},
    +		{"StatusHTTPVersionNotSupported", Const, 0, ""},
    +		{"StatusIMUsed", Const, 7, ""},
    +		{"StatusInsufficientStorage", Const, 7, ""},
    +		{"StatusInternalServerError", Const, 0, ""},
    +		{"StatusLengthRequired", Const, 0, ""},
    +		{"StatusLocked", Const, 7, ""},
    +		{"StatusLoopDetected", Const, 7, ""},
    +		{"StatusMethodNotAllowed", Const, 0, ""},
    +		{"StatusMisdirectedRequest", Const, 11, ""},
    +		{"StatusMovedPermanently", Const, 0, ""},
    +		{"StatusMultiStatus", Const, 7, ""},
    +		{"StatusMultipleChoices", Const, 0, ""},
    +		{"StatusNetworkAuthenticationRequired", Const, 6, ""},
    +		{"StatusNoContent", Const, 0, ""},
    +		{"StatusNonAuthoritativeInfo", Const, 0, ""},
    +		{"StatusNotAcceptable", Const, 0, ""},
    +		{"StatusNotExtended", Const, 7, ""},
    +		{"StatusNotFound", Const, 0, ""},
    +		{"StatusNotImplemented", Const, 0, ""},
    +		{"StatusNotModified", Const, 0, ""},
    +		{"StatusOK", Const, 0, ""},
    +		{"StatusPartialContent", Const, 0, ""},
    +		{"StatusPaymentRequired", Const, 0, ""},
    +		{"StatusPermanentRedirect", Const, 7, ""},
    +		{"StatusPreconditionFailed", Const, 0, ""},
    +		{"StatusPreconditionRequired", Const, 6, ""},
    +		{"StatusProcessing", Const, 7, ""},
    +		{"StatusProxyAuthRequired", Const, 0, ""},
    +		{"StatusRequestEntityTooLarge", Const, 0, ""},
    +		{"StatusRequestHeaderFieldsTooLarge", Const, 6, ""},
    +		{"StatusRequestTimeout", Const, 0, ""},
    +		{"StatusRequestURITooLong", Const, 0, ""},
    +		{"StatusRequestedRangeNotSatisfiable", Const, 0, ""},
    +		{"StatusResetContent", Const, 0, ""},
    +		{"StatusSeeOther", Const, 0, ""},
    +		{"StatusServiceUnavailable", Const, 0, ""},
    +		{"StatusSwitchingProtocols", Const, 0, ""},
    +		{"StatusTeapot", Const, 0, ""},
    +		{"StatusTemporaryRedirect", Const, 0, ""},
    +		{"StatusText", Func, 0, "func(code int) string"},
    +		{"StatusTooEarly", Const, 12, ""},
    +		{"StatusTooManyRequests", Const, 6, ""},
    +		{"StatusUnauthorized", Const, 0, ""},
    +		{"StatusUnavailableForLegalReasons", Const, 6, ""},
    +		{"StatusUnprocessableEntity", Const, 7, ""},
    +		{"StatusUnsupportedMediaType", Const, 0, ""},
    +		{"StatusUpgradeRequired", Const, 7, ""},
    +		{"StatusUseProxy", Const, 0, ""},
    +		{"StatusVariantAlsoNegotiates", Const, 7, ""},
    +		{"StripPrefix", Func, 0, "func(prefix string, h Handler) Handler"},
    +		{"TimeFormat", Const, 0, ""},
    +		{"TimeoutHandler", Func, 0, "func(h Handler, dt time.Duration, msg string) Handler"},
    +		{"TrailerPrefix", Const, 8, ""},
    +		{"Transport", Type, 0, ""},
    +		{"Transport.Dial", Field, 0, ""},
    +		{"Transport.DialContext", Field, 7, ""},
    +		{"Transport.DialTLS", Field, 4, ""},
    +		{"Transport.DialTLSContext", Field, 14, ""},
    +		{"Transport.DisableCompression", Field, 0, ""},
    +		{"Transport.DisableKeepAlives", Field, 0, ""},
    +		{"Transport.ExpectContinueTimeout", Field, 6, ""},
    +		{"Transport.ForceAttemptHTTP2", Field, 13, ""},
    +		{"Transport.GetProxyConnectHeader", Field, 16, ""},
    +		{"Transport.HTTP2", Field, 24, ""},
    +		{"Transport.IdleConnTimeout", Field, 7, ""},
    +		{"Transport.MaxConnsPerHost", Field, 11, ""},
    +		{"Transport.MaxIdleConns", Field, 7, ""},
    +		{"Transport.MaxIdleConnsPerHost", Field, 0, ""},
    +		{"Transport.MaxResponseHeaderBytes", Field, 7, ""},
    +		{"Transport.OnProxyConnectResponse", Field, 20, ""},
    +		{"Transport.Protocols", Field, 24, ""},
    +		{"Transport.Proxy", Field, 0, ""},
    +		{"Transport.ProxyConnectHeader", Field, 8, ""},
    +		{"Transport.ReadBufferSize", Field, 13, ""},
    +		{"Transport.ResponseHeaderTimeout", Field, 1, ""},
    +		{"Transport.TLSClientConfig", Field, 0, ""},
    +		{"Transport.TLSHandshakeTimeout", Field, 3, ""},
    +		{"Transport.TLSNextProto", Field, 6, ""},
    +		{"Transport.WriteBufferSize", Field, 13, ""},
     	},
     	"net/http/cgi": {
    -		{"(*Handler).ServeHTTP", Method, 0},
    -		{"Handler", Type, 0},
    -		{"Handler.Args", Field, 0},
    -		{"Handler.Dir", Field, 0},
    -		{"Handler.Env", Field, 0},
    -		{"Handler.InheritEnv", Field, 0},
    -		{"Handler.Logger", Field, 0},
    -		{"Handler.Path", Field, 0},
    -		{"Handler.PathLocationHandler", Field, 0},
    -		{"Handler.Root", Field, 0},
    -		{"Handler.Stderr", Field, 7},
    -		{"Request", Func, 0},
    -		{"RequestFromMap", Func, 0},
    -		{"Serve", Func, 0},
    +		{"(*Handler).ServeHTTP", Method, 0, ""},
    +		{"Handler", Type, 0, ""},
    +		{"Handler.Args", Field, 0, ""},
    +		{"Handler.Dir", Field, 0, ""},
    +		{"Handler.Env", Field, 0, ""},
    +		{"Handler.InheritEnv", Field, 0, ""},
    +		{"Handler.Logger", Field, 0, ""},
    +		{"Handler.Path", Field, 0, ""},
    +		{"Handler.PathLocationHandler", Field, 0, ""},
    +		{"Handler.Root", Field, 0, ""},
    +		{"Handler.Stderr", Field, 7, ""},
    +		{"Request", Func, 0, "func() (*http.Request, error)"},
    +		{"RequestFromMap", Func, 0, "func(params map[string]string) (*http.Request, error)"},
    +		{"Serve", Func, 0, "func(handler http.Handler) error"},
     	},
     	"net/http/cookiejar": {
    -		{"(*Jar).Cookies", Method, 1},
    -		{"(*Jar).SetCookies", Method, 1},
    -		{"Jar", Type, 1},
    -		{"New", Func, 1},
    -		{"Options", Type, 1},
    -		{"Options.PublicSuffixList", Field, 1},
    -		{"PublicSuffixList", Type, 1},
    +		{"(*Jar).Cookies", Method, 1, ""},
    +		{"(*Jar).SetCookies", Method, 1, ""},
    +		{"Jar", Type, 1, ""},
    +		{"New", Func, 1, "func(o *Options) (*Jar, error)"},
    +		{"Options", Type, 1, ""},
    +		{"Options.PublicSuffixList", Field, 1, ""},
    +		{"PublicSuffixList", Type, 1, ""},
     	},
     	"net/http/fcgi": {
    -		{"ErrConnClosed", Var, 5},
    -		{"ErrRequestAborted", Var, 5},
    -		{"ProcessEnv", Func, 9},
    -		{"Serve", Func, 0},
    +		{"ErrConnClosed", Var, 5, ""},
    +		{"ErrRequestAborted", Var, 5, ""},
    +		{"ProcessEnv", Func, 9, "func(r *http.Request) map[string]string"},
    +		{"Serve", Func, 0, "func(l net.Listener, handler http.Handler) error"},
     	},
     	"net/http/httptest": {
    -		{"(*ResponseRecorder).Flush", Method, 0},
    -		{"(*ResponseRecorder).Header", Method, 0},
    -		{"(*ResponseRecorder).Result", Method, 7},
    -		{"(*ResponseRecorder).Write", Method, 0},
    -		{"(*ResponseRecorder).WriteHeader", Method, 0},
    -		{"(*ResponseRecorder).WriteString", Method, 6},
    -		{"(*Server).Certificate", Method, 9},
    -		{"(*Server).Client", Method, 9},
    -		{"(*Server).Close", Method, 0},
    -		{"(*Server).CloseClientConnections", Method, 0},
    -		{"(*Server).Start", Method, 0},
    -		{"(*Server).StartTLS", Method, 0},
    -		{"DefaultRemoteAddr", Const, 0},
    -		{"NewRecorder", Func, 0},
    -		{"NewRequest", Func, 7},
    -		{"NewRequestWithContext", Func, 23},
    -		{"NewServer", Func, 0},
    -		{"NewTLSServer", Func, 0},
    -		{"NewUnstartedServer", Func, 0},
    -		{"ResponseRecorder", Type, 0},
    -		{"ResponseRecorder.Body", Field, 0},
    -		{"ResponseRecorder.Code", Field, 0},
    -		{"ResponseRecorder.Flushed", Field, 0},
    -		{"ResponseRecorder.HeaderMap", Field, 0},
    -		{"Server", Type, 0},
    -		{"Server.Config", Field, 0},
    -		{"Server.EnableHTTP2", Field, 14},
    -		{"Server.Listener", Field, 0},
    -		{"Server.TLS", Field, 0},
    -		{"Server.URL", Field, 0},
    +		{"(*ResponseRecorder).Flush", Method, 0, ""},
    +		{"(*ResponseRecorder).Header", Method, 0, ""},
    +		{"(*ResponseRecorder).Result", Method, 7, ""},
    +		{"(*ResponseRecorder).Write", Method, 0, ""},
    +		{"(*ResponseRecorder).WriteHeader", Method, 0, ""},
    +		{"(*ResponseRecorder).WriteString", Method, 6, ""},
    +		{"(*Server).Certificate", Method, 9, ""},
    +		{"(*Server).Client", Method, 9, ""},
    +		{"(*Server).Close", Method, 0, ""},
    +		{"(*Server).CloseClientConnections", Method, 0, ""},
    +		{"(*Server).Start", Method, 0, ""},
    +		{"(*Server).StartTLS", Method, 0, ""},
    +		{"DefaultRemoteAddr", Const, 0, ""},
    +		{"NewRecorder", Func, 0, "func() *ResponseRecorder"},
    +		{"NewRequest", Func, 7, "func(method string, target string, body io.Reader) *http.Request"},
    +		{"NewRequestWithContext", Func, 23, "func(ctx context.Context, method string, target string, body io.Reader) *http.Request"},
    +		{"NewServer", Func, 0, "func(handler http.Handler) *Server"},
    +		{"NewTLSServer", Func, 0, "func(handler http.Handler) *Server"},
    +		{"NewUnstartedServer", Func, 0, "func(handler http.Handler) *Server"},
    +		{"ResponseRecorder", Type, 0, ""},
    +		{"ResponseRecorder.Body", Field, 0, ""},
    +		{"ResponseRecorder.Code", Field, 0, ""},
    +		{"ResponseRecorder.Flushed", Field, 0, ""},
    +		{"ResponseRecorder.HeaderMap", Field, 0, ""},
    +		{"Server", Type, 0, ""},
    +		{"Server.Config", Field, 0, ""},
    +		{"Server.EnableHTTP2", Field, 14, ""},
    +		{"Server.Listener", Field, 0, ""},
    +		{"Server.TLS", Field, 0, ""},
    +		{"Server.URL", Field, 0, ""},
     	},
     	"net/http/httptrace": {
    -		{"ClientTrace", Type, 7},
    -		{"ClientTrace.ConnectDone", Field, 7},
    -		{"ClientTrace.ConnectStart", Field, 7},
    -		{"ClientTrace.DNSDone", Field, 7},
    -		{"ClientTrace.DNSStart", Field, 7},
    -		{"ClientTrace.GetConn", Field, 7},
    -		{"ClientTrace.Got100Continue", Field, 7},
    -		{"ClientTrace.Got1xxResponse", Field, 11},
    -		{"ClientTrace.GotConn", Field, 7},
    -		{"ClientTrace.GotFirstResponseByte", Field, 7},
    -		{"ClientTrace.PutIdleConn", Field, 7},
    -		{"ClientTrace.TLSHandshakeDone", Field, 8},
    -		{"ClientTrace.TLSHandshakeStart", Field, 8},
    -		{"ClientTrace.Wait100Continue", Field, 7},
    -		{"ClientTrace.WroteHeaderField", Field, 11},
    -		{"ClientTrace.WroteHeaders", Field, 7},
    -		{"ClientTrace.WroteRequest", Field, 7},
    -		{"ContextClientTrace", Func, 7},
    -		{"DNSDoneInfo", Type, 7},
    -		{"DNSDoneInfo.Addrs", Field, 7},
    -		{"DNSDoneInfo.Coalesced", Field, 7},
    -		{"DNSDoneInfo.Err", Field, 7},
    -		{"DNSStartInfo", Type, 7},
    -		{"DNSStartInfo.Host", Field, 7},
    -		{"GotConnInfo", Type, 7},
    -		{"GotConnInfo.Conn", Field, 7},
    -		{"GotConnInfo.IdleTime", Field, 7},
    -		{"GotConnInfo.Reused", Field, 7},
    -		{"GotConnInfo.WasIdle", Field, 7},
    -		{"WithClientTrace", Func, 7},
    -		{"WroteRequestInfo", Type, 7},
    -		{"WroteRequestInfo.Err", Field, 7},
    +		{"ClientTrace", Type, 7, ""},
    +		{"ClientTrace.ConnectDone", Field, 7, ""},
    +		{"ClientTrace.ConnectStart", Field, 7, ""},
    +		{"ClientTrace.DNSDone", Field, 7, ""},
    +		{"ClientTrace.DNSStart", Field, 7, ""},
    +		{"ClientTrace.GetConn", Field, 7, ""},
    +		{"ClientTrace.Got100Continue", Field, 7, ""},
    +		{"ClientTrace.Got1xxResponse", Field, 11, ""},
    +		{"ClientTrace.GotConn", Field, 7, ""},
    +		{"ClientTrace.GotFirstResponseByte", Field, 7, ""},
    +		{"ClientTrace.PutIdleConn", Field, 7, ""},
    +		{"ClientTrace.TLSHandshakeDone", Field, 8, ""},
    +		{"ClientTrace.TLSHandshakeStart", Field, 8, ""},
    +		{"ClientTrace.Wait100Continue", Field, 7, ""},
    +		{"ClientTrace.WroteHeaderField", Field, 11, ""},
    +		{"ClientTrace.WroteHeaders", Field, 7, ""},
    +		{"ClientTrace.WroteRequest", Field, 7, ""},
    +		{"ContextClientTrace", Func, 7, "func(ctx context.Context) *ClientTrace"},
    +		{"DNSDoneInfo", Type, 7, ""},
    +		{"DNSDoneInfo.Addrs", Field, 7, ""},
    +		{"DNSDoneInfo.Coalesced", Field, 7, ""},
    +		{"DNSDoneInfo.Err", Field, 7, ""},
    +		{"DNSStartInfo", Type, 7, ""},
    +		{"DNSStartInfo.Host", Field, 7, ""},
    +		{"GotConnInfo", Type, 7, ""},
    +		{"GotConnInfo.Conn", Field, 7, ""},
    +		{"GotConnInfo.IdleTime", Field, 7, ""},
    +		{"GotConnInfo.Reused", Field, 7, ""},
    +		{"GotConnInfo.WasIdle", Field, 7, ""},
    +		{"WithClientTrace", Func, 7, "func(ctx context.Context, trace *ClientTrace) context.Context"},
    +		{"WroteRequestInfo", Type, 7, ""},
    +		{"WroteRequestInfo.Err", Field, 7, ""},
     	},
     	"net/http/httputil": {
    -		{"(*ClientConn).Close", Method, 0},
    -		{"(*ClientConn).Do", Method, 0},
    -		{"(*ClientConn).Hijack", Method, 0},
    -		{"(*ClientConn).Pending", Method, 0},
    -		{"(*ClientConn).Read", Method, 0},
    -		{"(*ClientConn).Write", Method, 0},
    -		{"(*ProxyRequest).SetURL", Method, 20},
    -		{"(*ProxyRequest).SetXForwarded", Method, 20},
    -		{"(*ReverseProxy).ServeHTTP", Method, 0},
    -		{"(*ServerConn).Close", Method, 0},
    -		{"(*ServerConn).Hijack", Method, 0},
    -		{"(*ServerConn).Pending", Method, 0},
    -		{"(*ServerConn).Read", Method, 0},
    -		{"(*ServerConn).Write", Method, 0},
    -		{"BufferPool", Type, 6},
    -		{"ClientConn", Type, 0},
    -		{"DumpRequest", Func, 0},
    -		{"DumpRequestOut", Func, 0},
    -		{"DumpResponse", Func, 0},
    -		{"ErrClosed", Var, 0},
    -		{"ErrLineTooLong", Var, 0},
    -		{"ErrPersistEOF", Var, 0},
    -		{"ErrPipeline", Var, 0},
    -		{"NewChunkedReader", Func, 0},
    -		{"NewChunkedWriter", Func, 0},
    -		{"NewClientConn", Func, 0},
    -		{"NewProxyClientConn", Func, 0},
    -		{"NewServerConn", Func, 0},
    -		{"NewSingleHostReverseProxy", Func, 0},
    -		{"ProxyRequest", Type, 20},
    -		{"ProxyRequest.In", Field, 20},
    -		{"ProxyRequest.Out", Field, 20},
    -		{"ReverseProxy", Type, 0},
    -		{"ReverseProxy.BufferPool", Field, 6},
    -		{"ReverseProxy.Director", Field, 0},
    -		{"ReverseProxy.ErrorHandler", Field, 11},
    -		{"ReverseProxy.ErrorLog", Field, 4},
    -		{"ReverseProxy.FlushInterval", Field, 0},
    -		{"ReverseProxy.ModifyResponse", Field, 8},
    -		{"ReverseProxy.Rewrite", Field, 20},
    -		{"ReverseProxy.Transport", Field, 0},
    -		{"ServerConn", Type, 0},
    +		{"(*ClientConn).Close", Method, 0, ""},
    +		{"(*ClientConn).Do", Method, 0, ""},
    +		{"(*ClientConn).Hijack", Method, 0, ""},
    +		{"(*ClientConn).Pending", Method, 0, ""},
    +		{"(*ClientConn).Read", Method, 0, ""},
    +		{"(*ClientConn).Write", Method, 0, ""},
    +		{"(*ProxyRequest).SetURL", Method, 20, ""},
    +		{"(*ProxyRequest).SetXForwarded", Method, 20, ""},
    +		{"(*ReverseProxy).ServeHTTP", Method, 0, ""},
    +		{"(*ServerConn).Close", Method, 0, ""},
    +		{"(*ServerConn).Hijack", Method, 0, ""},
    +		{"(*ServerConn).Pending", Method, 0, ""},
    +		{"(*ServerConn).Read", Method, 0, ""},
    +		{"(*ServerConn).Write", Method, 0, ""},
    +		{"BufferPool", Type, 6, ""},
    +		{"ClientConn", Type, 0, ""},
    +		{"DumpRequest", Func, 0, "func(req *http.Request, body bool) ([]byte, error)"},
    +		{"DumpRequestOut", Func, 0, "func(req *http.Request, body bool) ([]byte, error)"},
    +		{"DumpResponse", Func, 0, "func(resp *http.Response, body bool) ([]byte, error)"},
    +		{"ErrClosed", Var, 0, ""},
    +		{"ErrLineTooLong", Var, 0, ""},
    +		{"ErrPersistEOF", Var, 0, ""},
    +		{"ErrPipeline", Var, 0, ""},
    +		{"NewChunkedReader", Func, 0, "func(r io.Reader) io.Reader"},
    +		{"NewChunkedWriter", Func, 0, "func(w io.Writer) io.WriteCloser"},
    +		{"NewClientConn", Func, 0, "func(c net.Conn, r *bufio.Reader) *ClientConn"},
    +		{"NewProxyClientConn", Func, 0, "func(c net.Conn, r *bufio.Reader) *ClientConn"},
    +		{"NewServerConn", Func, 0, "func(c net.Conn, r *bufio.Reader) *ServerConn"},
    +		{"NewSingleHostReverseProxy", Func, 0, "func(target *url.URL) *ReverseProxy"},
    +		{"ProxyRequest", Type, 20, ""},
    +		{"ProxyRequest.In", Field, 20, ""},
    +		{"ProxyRequest.Out", Field, 20, ""},
    +		{"ReverseProxy", Type, 0, ""},
    +		{"ReverseProxy.BufferPool", Field, 6, ""},
    +		{"ReverseProxy.Director", Field, 0, ""},
    +		{"ReverseProxy.ErrorHandler", Field, 11, ""},
    +		{"ReverseProxy.ErrorLog", Field, 4, ""},
    +		{"ReverseProxy.FlushInterval", Field, 0, ""},
    +		{"ReverseProxy.ModifyResponse", Field, 8, ""},
    +		{"ReverseProxy.Rewrite", Field, 20, ""},
    +		{"ReverseProxy.Transport", Field, 0, ""},
    +		{"ServerConn", Type, 0, ""},
     	},
     	"net/http/pprof": {
    -		{"Cmdline", Func, 0},
    -		{"Handler", Func, 0},
    -		{"Index", Func, 0},
    -		{"Profile", Func, 0},
    -		{"Symbol", Func, 0},
    -		{"Trace", Func, 5},
    +		{"Cmdline", Func, 0, "func(w http.ResponseWriter, r *http.Request)"},
    +		{"Handler", Func, 0, "func(name string) http.Handler"},
    +		{"Index", Func, 0, "func(w http.ResponseWriter, r *http.Request)"},
    +		{"Profile", Func, 0, "func(w http.ResponseWriter, r *http.Request)"},
    +		{"Symbol", Func, 0, "func(w http.ResponseWriter, r *http.Request)"},
    +		{"Trace", Func, 5, "func(w http.ResponseWriter, r *http.Request)"},
     	},
     	"net/mail": {
    -		{"(*Address).String", Method, 0},
    -		{"(*AddressParser).Parse", Method, 5},
    -		{"(*AddressParser).ParseList", Method, 5},
    -		{"(Header).AddressList", Method, 0},
    -		{"(Header).Date", Method, 0},
    -		{"(Header).Get", Method, 0},
    -		{"Address", Type, 0},
    -		{"Address.Address", Field, 0},
    -		{"Address.Name", Field, 0},
    -		{"AddressParser", Type, 5},
    -		{"AddressParser.WordDecoder", Field, 5},
    -		{"ErrHeaderNotPresent", Var, 0},
    -		{"Header", Type, 0},
    -		{"Message", Type, 0},
    -		{"Message.Body", Field, 0},
    -		{"Message.Header", Field, 0},
    -		{"ParseAddress", Func, 1},
    -		{"ParseAddressList", Func, 1},
    -		{"ParseDate", Func, 8},
    -		{"ReadMessage", Func, 0},
    +		{"(*Address).String", Method, 0, ""},
    +		{"(*AddressParser).Parse", Method, 5, ""},
    +		{"(*AddressParser).ParseList", Method, 5, ""},
    +		{"(Header).AddressList", Method, 0, ""},
    +		{"(Header).Date", Method, 0, ""},
    +		{"(Header).Get", Method, 0, ""},
    +		{"Address", Type, 0, ""},
    +		{"Address.Address", Field, 0, ""},
    +		{"Address.Name", Field, 0, ""},
    +		{"AddressParser", Type, 5, ""},
    +		{"AddressParser.WordDecoder", Field, 5, ""},
    +		{"ErrHeaderNotPresent", Var, 0, ""},
    +		{"Header", Type, 0, ""},
    +		{"Message", Type, 0, ""},
    +		{"Message.Body", Field, 0, ""},
    +		{"Message.Header", Field, 0, ""},
    +		{"ParseAddress", Func, 1, "func(address string) (*Address, error)"},
    +		{"ParseAddressList", Func, 1, "func(list string) ([]*Address, error)"},
    +		{"ParseDate", Func, 8, "func(date string) (time.Time, error)"},
    +		{"ReadMessage", Func, 0, "func(r io.Reader) (msg *Message, err error)"},
     	},
     	"net/netip": {
    -		{"(*Addr).UnmarshalBinary", Method, 18},
    -		{"(*Addr).UnmarshalText", Method, 18},
    -		{"(*AddrPort).UnmarshalBinary", Method, 18},
    -		{"(*AddrPort).UnmarshalText", Method, 18},
    -		{"(*Prefix).UnmarshalBinary", Method, 18},
    -		{"(*Prefix).UnmarshalText", Method, 18},
    -		{"(Addr).AppendTo", Method, 18},
    -		{"(Addr).As16", Method, 18},
    -		{"(Addr).As4", Method, 18},
    -		{"(Addr).AsSlice", Method, 18},
    -		{"(Addr).BitLen", Method, 18},
    -		{"(Addr).Compare", Method, 18},
    -		{"(Addr).Is4", Method, 18},
    -		{"(Addr).Is4In6", Method, 18},
    -		{"(Addr).Is6", Method, 18},
    -		{"(Addr).IsGlobalUnicast", Method, 18},
    -		{"(Addr).IsInterfaceLocalMulticast", Method, 18},
    -		{"(Addr).IsLinkLocalMulticast", Method, 18},
    -		{"(Addr).IsLinkLocalUnicast", Method, 18},
    -		{"(Addr).IsLoopback", Method, 18},
    -		{"(Addr).IsMulticast", Method, 18},
    -		{"(Addr).IsPrivate", Method, 18},
    -		{"(Addr).IsUnspecified", Method, 18},
    -		{"(Addr).IsValid", Method, 18},
    -		{"(Addr).Less", Method, 18},
    -		{"(Addr).MarshalBinary", Method, 18},
    -		{"(Addr).MarshalText", Method, 18},
    -		{"(Addr).Next", Method, 18},
    -		{"(Addr).Prefix", Method, 18},
    -		{"(Addr).Prev", Method, 18},
    -		{"(Addr).String", Method, 18},
    -		{"(Addr).StringExpanded", Method, 18},
    -		{"(Addr).Unmap", Method, 18},
    -		{"(Addr).WithZone", Method, 18},
    -		{"(Addr).Zone", Method, 18},
    -		{"(AddrPort).Addr", Method, 18},
    -		{"(AddrPort).AppendTo", Method, 18},
    -		{"(AddrPort).Compare", Method, 22},
    -		{"(AddrPort).IsValid", Method, 18},
    -		{"(AddrPort).MarshalBinary", Method, 18},
    -		{"(AddrPort).MarshalText", Method, 18},
    -		{"(AddrPort).Port", Method, 18},
    -		{"(AddrPort).String", Method, 18},
    -		{"(Prefix).Addr", Method, 18},
    -		{"(Prefix).AppendTo", Method, 18},
    -		{"(Prefix).Bits", Method, 18},
    -		{"(Prefix).Contains", Method, 18},
    -		{"(Prefix).IsSingleIP", Method, 18},
    -		{"(Prefix).IsValid", Method, 18},
    -		{"(Prefix).MarshalBinary", Method, 18},
    -		{"(Prefix).MarshalText", Method, 18},
    -		{"(Prefix).Masked", Method, 18},
    -		{"(Prefix).Overlaps", Method, 18},
    -		{"(Prefix).String", Method, 18},
    -		{"Addr", Type, 18},
    -		{"AddrFrom16", Func, 18},
    -		{"AddrFrom4", Func, 18},
    -		{"AddrFromSlice", Func, 18},
    -		{"AddrPort", Type, 18},
    -		{"AddrPortFrom", Func, 18},
    -		{"IPv4Unspecified", Func, 18},
    -		{"IPv6LinkLocalAllNodes", Func, 18},
    -		{"IPv6LinkLocalAllRouters", Func, 20},
    -		{"IPv6Loopback", Func, 20},
    -		{"IPv6Unspecified", Func, 18},
    -		{"MustParseAddr", Func, 18},
    -		{"MustParseAddrPort", Func, 18},
    -		{"MustParsePrefix", Func, 18},
    -		{"ParseAddr", Func, 18},
    -		{"ParseAddrPort", Func, 18},
    -		{"ParsePrefix", Func, 18},
    -		{"Prefix", Type, 18},
    -		{"PrefixFrom", Func, 18},
    +		{"(*Addr).UnmarshalBinary", Method, 18, ""},
    +		{"(*Addr).UnmarshalText", Method, 18, ""},
    +		{"(*AddrPort).UnmarshalBinary", Method, 18, ""},
    +		{"(*AddrPort).UnmarshalText", Method, 18, ""},
    +		{"(*Prefix).UnmarshalBinary", Method, 18, ""},
    +		{"(*Prefix).UnmarshalText", Method, 18, ""},
    +		{"(Addr).AppendBinary", Method, 24, ""},
    +		{"(Addr).AppendText", Method, 24, ""},
    +		{"(Addr).AppendTo", Method, 18, ""},
    +		{"(Addr).As16", Method, 18, ""},
    +		{"(Addr).As4", Method, 18, ""},
    +		{"(Addr).AsSlice", Method, 18, ""},
    +		{"(Addr).BitLen", Method, 18, ""},
    +		{"(Addr).Compare", Method, 18, ""},
    +		{"(Addr).Is4", Method, 18, ""},
    +		{"(Addr).Is4In6", Method, 18, ""},
    +		{"(Addr).Is6", Method, 18, ""},
    +		{"(Addr).IsGlobalUnicast", Method, 18, ""},
    +		{"(Addr).IsInterfaceLocalMulticast", Method, 18, ""},
    +		{"(Addr).IsLinkLocalMulticast", Method, 18, ""},
    +		{"(Addr).IsLinkLocalUnicast", Method, 18, ""},
    +		{"(Addr).IsLoopback", Method, 18, ""},
    +		{"(Addr).IsMulticast", Method, 18, ""},
    +		{"(Addr).IsPrivate", Method, 18, ""},
    +		{"(Addr).IsUnspecified", Method, 18, ""},
    +		{"(Addr).IsValid", Method, 18, ""},
    +		{"(Addr).Less", Method, 18, ""},
    +		{"(Addr).MarshalBinary", Method, 18, ""},
    +		{"(Addr).MarshalText", Method, 18, ""},
    +		{"(Addr).Next", Method, 18, ""},
    +		{"(Addr).Prefix", Method, 18, ""},
    +		{"(Addr).Prev", Method, 18, ""},
    +		{"(Addr).String", Method, 18, ""},
    +		{"(Addr).StringExpanded", Method, 18, ""},
    +		{"(Addr).Unmap", Method, 18, ""},
    +		{"(Addr).WithZone", Method, 18, ""},
    +		{"(Addr).Zone", Method, 18, ""},
    +		{"(AddrPort).Addr", Method, 18, ""},
    +		{"(AddrPort).AppendBinary", Method, 24, ""},
    +		{"(AddrPort).AppendText", Method, 24, ""},
    +		{"(AddrPort).AppendTo", Method, 18, ""},
    +		{"(AddrPort).Compare", Method, 22, ""},
    +		{"(AddrPort).IsValid", Method, 18, ""},
    +		{"(AddrPort).MarshalBinary", Method, 18, ""},
    +		{"(AddrPort).MarshalText", Method, 18, ""},
    +		{"(AddrPort).Port", Method, 18, ""},
    +		{"(AddrPort).String", Method, 18, ""},
    +		{"(Prefix).Addr", Method, 18, ""},
    +		{"(Prefix).AppendBinary", Method, 24, ""},
    +		{"(Prefix).AppendText", Method, 24, ""},
    +		{"(Prefix).AppendTo", Method, 18, ""},
    +		{"(Prefix).Bits", Method, 18, ""},
    +		{"(Prefix).Contains", Method, 18, ""},
    +		{"(Prefix).IsSingleIP", Method, 18, ""},
    +		{"(Prefix).IsValid", Method, 18, ""},
    +		{"(Prefix).MarshalBinary", Method, 18, ""},
    +		{"(Prefix).MarshalText", Method, 18, ""},
    +		{"(Prefix).Masked", Method, 18, ""},
    +		{"(Prefix).Overlaps", Method, 18, ""},
    +		{"(Prefix).String", Method, 18, ""},
    +		{"Addr", Type, 18, ""},
    +		{"AddrFrom16", Func, 18, "func(addr [16]byte) Addr"},
    +		{"AddrFrom4", Func, 18, "func(addr [4]byte) Addr"},
    +		{"AddrFromSlice", Func, 18, "func(slice []byte) (ip Addr, ok bool)"},
    +		{"AddrPort", Type, 18, ""},
    +		{"AddrPortFrom", Func, 18, "func(ip Addr, port uint16) AddrPort"},
    +		{"IPv4Unspecified", Func, 18, "func() Addr"},
    +		{"IPv6LinkLocalAllNodes", Func, 18, "func() Addr"},
    +		{"IPv6LinkLocalAllRouters", Func, 20, "func() Addr"},
    +		{"IPv6Loopback", Func, 20, "func() Addr"},
    +		{"IPv6Unspecified", Func, 18, "func() Addr"},
    +		{"MustParseAddr", Func, 18, "func(s string) Addr"},
    +		{"MustParseAddrPort", Func, 18, "func(s string) AddrPort"},
    +		{"MustParsePrefix", Func, 18, "func(s string) Prefix"},
    +		{"ParseAddr", Func, 18, "func(s string) (Addr, error)"},
    +		{"ParseAddrPort", Func, 18, "func(s string) (AddrPort, error)"},
    +		{"ParsePrefix", Func, 18, "func(s string) (Prefix, error)"},
    +		{"Prefix", Type, 18, ""},
    +		{"PrefixFrom", Func, 18, "func(ip Addr, bits int) Prefix"},
     	},
     	"net/rpc": {
    -		{"(*Client).Call", Method, 0},
    -		{"(*Client).Close", Method, 0},
    -		{"(*Client).Go", Method, 0},
    -		{"(*Server).Accept", Method, 0},
    -		{"(*Server).HandleHTTP", Method, 0},
    -		{"(*Server).Register", Method, 0},
    -		{"(*Server).RegisterName", Method, 0},
    -		{"(*Server).ServeCodec", Method, 0},
    -		{"(*Server).ServeConn", Method, 0},
    -		{"(*Server).ServeHTTP", Method, 0},
    -		{"(*Server).ServeRequest", Method, 0},
    -		{"(ServerError).Error", Method, 0},
    -		{"Accept", Func, 0},
    -		{"Call", Type, 0},
    -		{"Call.Args", Field, 0},
    -		{"Call.Done", Field, 0},
    -		{"Call.Error", Field, 0},
    -		{"Call.Reply", Field, 0},
    -		{"Call.ServiceMethod", Field, 0},
    -		{"Client", Type, 0},
    -		{"ClientCodec", Type, 0},
    -		{"DefaultDebugPath", Const, 0},
    -		{"DefaultRPCPath", Const, 0},
    -		{"DefaultServer", Var, 0},
    -		{"Dial", Func, 0},
    -		{"DialHTTP", Func, 0},
    -		{"DialHTTPPath", Func, 0},
    -		{"ErrShutdown", Var, 0},
    -		{"HandleHTTP", Func, 0},
    -		{"NewClient", Func, 0},
    -		{"NewClientWithCodec", Func, 0},
    -		{"NewServer", Func, 0},
    -		{"Register", Func, 0},
    -		{"RegisterName", Func, 0},
    -		{"Request", Type, 0},
    -		{"Request.Seq", Field, 0},
    -		{"Request.ServiceMethod", Field, 0},
    -		{"Response", Type, 0},
    -		{"Response.Error", Field, 0},
    -		{"Response.Seq", Field, 0},
    -		{"Response.ServiceMethod", Field, 0},
    -		{"ServeCodec", Func, 0},
    -		{"ServeConn", Func, 0},
    -		{"ServeRequest", Func, 0},
    -		{"Server", Type, 0},
    -		{"ServerCodec", Type, 0},
    -		{"ServerError", Type, 0},
    +		{"(*Client).Call", Method, 0, ""},
    +		{"(*Client).Close", Method, 0, ""},
    +		{"(*Client).Go", Method, 0, ""},
    +		{"(*Server).Accept", Method, 0, ""},
    +		{"(*Server).HandleHTTP", Method, 0, ""},
    +		{"(*Server).Register", Method, 0, ""},
    +		{"(*Server).RegisterName", Method, 0, ""},
    +		{"(*Server).ServeCodec", Method, 0, ""},
    +		{"(*Server).ServeConn", Method, 0, ""},
    +		{"(*Server).ServeHTTP", Method, 0, ""},
    +		{"(*Server).ServeRequest", Method, 0, ""},
    +		{"(ServerError).Error", Method, 0, ""},
    +		{"Accept", Func, 0, "func(lis net.Listener)"},
    +		{"Call", Type, 0, ""},
    +		{"Call.Args", Field, 0, ""},
    +		{"Call.Done", Field, 0, ""},
    +		{"Call.Error", Field, 0, ""},
    +		{"Call.Reply", Field, 0, ""},
    +		{"Call.ServiceMethod", Field, 0, ""},
    +		{"Client", Type, 0, ""},
    +		{"ClientCodec", Type, 0, ""},
    +		{"DefaultDebugPath", Const, 0, ""},
    +		{"DefaultRPCPath", Const, 0, ""},
    +		{"DefaultServer", Var, 0, ""},
    +		{"Dial", Func, 0, "func(network string, address string) (*Client, error)"},
    +		{"DialHTTP", Func, 0, "func(network string, address string) (*Client, error)"},
    +		{"DialHTTPPath", Func, 0, "func(network string, address string, path string) (*Client, error)"},
    +		{"ErrShutdown", Var, 0, ""},
    +		{"HandleHTTP", Func, 0, "func()"},
    +		{"NewClient", Func, 0, "func(conn io.ReadWriteCloser) *Client"},
    +		{"NewClientWithCodec", Func, 0, "func(codec ClientCodec) *Client"},
    +		{"NewServer", Func, 0, "func() *Server"},
    +		{"Register", Func, 0, "func(rcvr any) error"},
    +		{"RegisterName", Func, 0, "func(name string, rcvr any) error"},
    +		{"Request", Type, 0, ""},
    +		{"Request.Seq", Field, 0, ""},
    +		{"Request.ServiceMethod", Field, 0, ""},
    +		{"Response", Type, 0, ""},
    +		{"Response.Error", Field, 0, ""},
    +		{"Response.Seq", Field, 0, ""},
    +		{"Response.ServiceMethod", Field, 0, ""},
    +		{"ServeCodec", Func, 0, "func(codec ServerCodec)"},
    +		{"ServeConn", Func, 0, "func(conn io.ReadWriteCloser)"},
    +		{"ServeRequest", Func, 0, "func(codec ServerCodec) error"},
    +		{"Server", Type, 0, ""},
    +		{"ServerCodec", Type, 0, ""},
    +		{"ServerError", Type, 0, ""},
     	},
     	"net/rpc/jsonrpc": {
    -		{"Dial", Func, 0},
    -		{"NewClient", Func, 0},
    -		{"NewClientCodec", Func, 0},
    -		{"NewServerCodec", Func, 0},
    -		{"ServeConn", Func, 0},
    +		{"Dial", Func, 0, "func(network string, address string) (*rpc.Client, error)"},
    +		{"NewClient", Func, 0, "func(conn io.ReadWriteCloser) *rpc.Client"},
    +		{"NewClientCodec", Func, 0, "func(conn io.ReadWriteCloser) rpc.ClientCodec"},
    +		{"NewServerCodec", Func, 0, "func(conn io.ReadWriteCloser) rpc.ServerCodec"},
    +		{"ServeConn", Func, 0, "func(conn io.ReadWriteCloser)"},
     	},
     	"net/smtp": {
    -		{"(*Client).Auth", Method, 0},
    -		{"(*Client).Close", Method, 2},
    -		{"(*Client).Data", Method, 0},
    -		{"(*Client).Extension", Method, 0},
    -		{"(*Client).Hello", Method, 1},
    -		{"(*Client).Mail", Method, 0},
    -		{"(*Client).Noop", Method, 10},
    -		{"(*Client).Quit", Method, 0},
    -		{"(*Client).Rcpt", Method, 0},
    -		{"(*Client).Reset", Method, 0},
    -		{"(*Client).StartTLS", Method, 0},
    -		{"(*Client).TLSConnectionState", Method, 5},
    -		{"(*Client).Verify", Method, 0},
    -		{"Auth", Type, 0},
    -		{"CRAMMD5Auth", Func, 0},
    -		{"Client", Type, 0},
    -		{"Client.Text", Field, 0},
    -		{"Dial", Func, 0},
    -		{"NewClient", Func, 0},
    -		{"PlainAuth", Func, 0},
    -		{"SendMail", Func, 0},
    -		{"ServerInfo", Type, 0},
    -		{"ServerInfo.Auth", Field, 0},
    -		{"ServerInfo.Name", Field, 0},
    -		{"ServerInfo.TLS", Field, 0},
    +		{"(*Client).Auth", Method, 0, ""},
    +		{"(*Client).Close", Method, 2, ""},
    +		{"(*Client).Data", Method, 0, ""},
    +		{"(*Client).Extension", Method, 0, ""},
    +		{"(*Client).Hello", Method, 1, ""},
    +		{"(*Client).Mail", Method, 0, ""},
    +		{"(*Client).Noop", Method, 10, ""},
    +		{"(*Client).Quit", Method, 0, ""},
    +		{"(*Client).Rcpt", Method, 0, ""},
    +		{"(*Client).Reset", Method, 0, ""},
    +		{"(*Client).StartTLS", Method, 0, ""},
    +		{"(*Client).TLSConnectionState", Method, 5, ""},
    +		{"(*Client).Verify", Method, 0, ""},
    +		{"Auth", Type, 0, ""},
    +		{"CRAMMD5Auth", Func, 0, "func(username string, secret string) Auth"},
    +		{"Client", Type, 0, ""},
    +		{"Client.Text", Field, 0, ""},
    +		{"Dial", Func, 0, "func(addr string) (*Client, error)"},
    +		{"NewClient", Func, 0, "func(conn net.Conn, host string) (*Client, error)"},
    +		{"PlainAuth", Func, 0, "func(identity string, username string, password string, host string) Auth"},
    +		{"SendMail", Func, 0, "func(addr string, a Auth, from string, to []string, msg []byte) error"},
    +		{"ServerInfo", Type, 0, ""},
    +		{"ServerInfo.Auth", Field, 0, ""},
    +		{"ServerInfo.Name", Field, 0, ""},
    +		{"ServerInfo.TLS", Field, 0, ""},
     	},
     	"net/textproto": {
    -		{"(*Conn).Close", Method, 0},
    -		{"(*Conn).Cmd", Method, 0},
    -		{"(*Conn).DotReader", Method, 0},
    -		{"(*Conn).DotWriter", Method, 0},
    -		{"(*Conn).EndRequest", Method, 0},
    -		{"(*Conn).EndResponse", Method, 0},
    -		{"(*Conn).Next", Method, 0},
    -		{"(*Conn).PrintfLine", Method, 0},
    -		{"(*Conn).ReadCodeLine", Method, 0},
    -		{"(*Conn).ReadContinuedLine", Method, 0},
    -		{"(*Conn).ReadContinuedLineBytes", Method, 0},
    -		{"(*Conn).ReadDotBytes", Method, 0},
    -		{"(*Conn).ReadDotLines", Method, 0},
    -		{"(*Conn).ReadLine", Method, 0},
    -		{"(*Conn).ReadLineBytes", Method, 0},
    -		{"(*Conn).ReadMIMEHeader", Method, 0},
    -		{"(*Conn).ReadResponse", Method, 0},
    -		{"(*Conn).StartRequest", Method, 0},
    -		{"(*Conn).StartResponse", Method, 0},
    -		{"(*Error).Error", Method, 0},
    -		{"(*Pipeline).EndRequest", Method, 0},
    -		{"(*Pipeline).EndResponse", Method, 0},
    -		{"(*Pipeline).Next", Method, 0},
    -		{"(*Pipeline).StartRequest", Method, 0},
    -		{"(*Pipeline).StartResponse", Method, 0},
    -		{"(*Reader).DotReader", Method, 0},
    -		{"(*Reader).ReadCodeLine", Method, 0},
    -		{"(*Reader).ReadContinuedLine", Method, 0},
    -		{"(*Reader).ReadContinuedLineBytes", Method, 0},
    -		{"(*Reader).ReadDotBytes", Method, 0},
    -		{"(*Reader).ReadDotLines", Method, 0},
    -		{"(*Reader).ReadLine", Method, 0},
    -		{"(*Reader).ReadLineBytes", Method, 0},
    -		{"(*Reader).ReadMIMEHeader", Method, 0},
    -		{"(*Reader).ReadResponse", Method, 0},
    -		{"(*Writer).DotWriter", Method, 0},
    -		{"(*Writer).PrintfLine", Method, 0},
    -		{"(MIMEHeader).Add", Method, 0},
    -		{"(MIMEHeader).Del", Method, 0},
    -		{"(MIMEHeader).Get", Method, 0},
    -		{"(MIMEHeader).Set", Method, 0},
    -		{"(MIMEHeader).Values", Method, 14},
    -		{"(ProtocolError).Error", Method, 0},
    -		{"CanonicalMIMEHeaderKey", Func, 0},
    -		{"Conn", Type, 0},
    -		{"Conn.Pipeline", Field, 0},
    -		{"Conn.Reader", Field, 0},
    -		{"Conn.Writer", Field, 0},
    -		{"Dial", Func, 0},
    -		{"Error", Type, 0},
    -		{"Error.Code", Field, 0},
    -		{"Error.Msg", Field, 0},
    -		{"MIMEHeader", Type, 0},
    -		{"NewConn", Func, 0},
    -		{"NewReader", Func, 0},
    -		{"NewWriter", Func, 0},
    -		{"Pipeline", Type, 0},
    -		{"ProtocolError", Type, 0},
    -		{"Reader", Type, 0},
    -		{"Reader.R", Field, 0},
    -		{"TrimBytes", Func, 1},
    -		{"TrimString", Func, 1},
    -		{"Writer", Type, 0},
    -		{"Writer.W", Field, 0},
    +		{"(*Conn).Close", Method, 0, ""},
    +		{"(*Conn).Cmd", Method, 0, ""},
    +		{"(*Conn).DotReader", Method, 0, ""},
    +		{"(*Conn).DotWriter", Method, 0, ""},
    +		{"(*Conn).EndRequest", Method, 0, ""},
    +		{"(*Conn).EndResponse", Method, 0, ""},
    +		{"(*Conn).Next", Method, 0, ""},
    +		{"(*Conn).PrintfLine", Method, 0, ""},
    +		{"(*Conn).ReadCodeLine", Method, 0, ""},
    +		{"(*Conn).ReadContinuedLine", Method, 0, ""},
    +		{"(*Conn).ReadContinuedLineBytes", Method, 0, ""},
    +		{"(*Conn).ReadDotBytes", Method, 0, ""},
    +		{"(*Conn).ReadDotLines", Method, 0, ""},
    +		{"(*Conn).ReadLine", Method, 0, ""},
    +		{"(*Conn).ReadLineBytes", Method, 0, ""},
    +		{"(*Conn).ReadMIMEHeader", Method, 0, ""},
    +		{"(*Conn).ReadResponse", Method, 0, ""},
    +		{"(*Conn).StartRequest", Method, 0, ""},
    +		{"(*Conn).StartResponse", Method, 0, ""},
    +		{"(*Error).Error", Method, 0, ""},
    +		{"(*Pipeline).EndRequest", Method, 0, ""},
    +		{"(*Pipeline).EndResponse", Method, 0, ""},
    +		{"(*Pipeline).Next", Method, 0, ""},
    +		{"(*Pipeline).StartRequest", Method, 0, ""},
    +		{"(*Pipeline).StartResponse", Method, 0, ""},
    +		{"(*Reader).DotReader", Method, 0, ""},
    +		{"(*Reader).ReadCodeLine", Method, 0, ""},
    +		{"(*Reader).ReadContinuedLine", Method, 0, ""},
    +		{"(*Reader).ReadContinuedLineBytes", Method, 0, ""},
    +		{"(*Reader).ReadDotBytes", Method, 0, ""},
    +		{"(*Reader).ReadDotLines", Method, 0, ""},
    +		{"(*Reader).ReadLine", Method, 0, ""},
    +		{"(*Reader).ReadLineBytes", Method, 0, ""},
    +		{"(*Reader).ReadMIMEHeader", Method, 0, ""},
    +		{"(*Reader).ReadResponse", Method, 0, ""},
    +		{"(*Writer).DotWriter", Method, 0, ""},
    +		{"(*Writer).PrintfLine", Method, 0, ""},
    +		{"(MIMEHeader).Add", Method, 0, ""},
    +		{"(MIMEHeader).Del", Method, 0, ""},
    +		{"(MIMEHeader).Get", Method, 0, ""},
    +		{"(MIMEHeader).Set", Method, 0, ""},
    +		{"(MIMEHeader).Values", Method, 14, ""},
    +		{"(ProtocolError).Error", Method, 0, ""},
    +		{"CanonicalMIMEHeaderKey", Func, 0, "func(s string) string"},
    +		{"Conn", Type, 0, ""},
    +		{"Conn.Pipeline", Field, 0, ""},
    +		{"Conn.Reader", Field, 0, ""},
    +		{"Conn.Writer", Field, 0, ""},
    +		{"Dial", Func, 0, "func(network string, addr string) (*Conn, error)"},
    +		{"Error", Type, 0, ""},
    +		{"Error.Code", Field, 0, ""},
    +		{"Error.Msg", Field, 0, ""},
    +		{"MIMEHeader", Type, 0, ""},
    +		{"NewConn", Func, 0, "func(conn io.ReadWriteCloser) *Conn"},
    +		{"NewReader", Func, 0, "func(r *bufio.Reader) *Reader"},
    +		{"NewWriter", Func, 0, "func(w *bufio.Writer) *Writer"},
    +		{"Pipeline", Type, 0, ""},
    +		{"ProtocolError", Type, 0, ""},
    +		{"Reader", Type, 0, ""},
    +		{"Reader.R", Field, 0, ""},
    +		{"TrimBytes", Func, 1, "func(b []byte) []byte"},
    +		{"TrimString", Func, 1, "func(s string) string"},
    +		{"Writer", Type, 0, ""},
    +		{"Writer.W", Field, 0, ""},
     	},
     	"net/url": {
    -		{"(*Error).Error", Method, 0},
    -		{"(*Error).Temporary", Method, 6},
    -		{"(*Error).Timeout", Method, 6},
    -		{"(*Error).Unwrap", Method, 13},
    -		{"(*URL).EscapedFragment", Method, 15},
    -		{"(*URL).EscapedPath", Method, 5},
    -		{"(*URL).Hostname", Method, 8},
    -		{"(*URL).IsAbs", Method, 0},
    -		{"(*URL).JoinPath", Method, 19},
    -		{"(*URL).MarshalBinary", Method, 8},
    -		{"(*URL).Parse", Method, 0},
    -		{"(*URL).Port", Method, 8},
    -		{"(*URL).Query", Method, 0},
    -		{"(*URL).Redacted", Method, 15},
    -		{"(*URL).RequestURI", Method, 0},
    -		{"(*URL).ResolveReference", Method, 0},
    -		{"(*URL).String", Method, 0},
    -		{"(*URL).UnmarshalBinary", Method, 8},
    -		{"(*Userinfo).Password", Method, 0},
    -		{"(*Userinfo).String", Method, 0},
    -		{"(*Userinfo).Username", Method, 0},
    -		{"(EscapeError).Error", Method, 0},
    -		{"(InvalidHostError).Error", Method, 6},
    -		{"(Values).Add", Method, 0},
    -		{"(Values).Del", Method, 0},
    -		{"(Values).Encode", Method, 0},
    -		{"(Values).Get", Method, 0},
    -		{"(Values).Has", Method, 17},
    -		{"(Values).Set", Method, 0},
    -		{"Error", Type, 0},
    -		{"Error.Err", Field, 0},
    -		{"Error.Op", Field, 0},
    -		{"Error.URL", Field, 0},
    -		{"EscapeError", Type, 0},
    -		{"InvalidHostError", Type, 6},
    -		{"JoinPath", Func, 19},
    -		{"Parse", Func, 0},
    -		{"ParseQuery", Func, 0},
    -		{"ParseRequestURI", Func, 0},
    -		{"PathEscape", Func, 8},
    -		{"PathUnescape", Func, 8},
    -		{"QueryEscape", Func, 0},
    -		{"QueryUnescape", Func, 0},
    -		{"URL", Type, 0},
    -		{"URL.ForceQuery", Field, 7},
    -		{"URL.Fragment", Field, 0},
    -		{"URL.Host", Field, 0},
    -		{"URL.OmitHost", Field, 19},
    -		{"URL.Opaque", Field, 0},
    -		{"URL.Path", Field, 0},
    -		{"URL.RawFragment", Field, 15},
    -		{"URL.RawPath", Field, 5},
    -		{"URL.RawQuery", Field, 0},
    -		{"URL.Scheme", Field, 0},
    -		{"URL.User", Field, 0},
    -		{"User", Func, 0},
    -		{"UserPassword", Func, 0},
    -		{"Userinfo", Type, 0},
    -		{"Values", Type, 0},
    +		{"(*Error).Error", Method, 0, ""},
    +		{"(*Error).Temporary", Method, 6, ""},
    +		{"(*Error).Timeout", Method, 6, ""},
    +		{"(*Error).Unwrap", Method, 13, ""},
    +		{"(*URL).AppendBinary", Method, 24, ""},
    +		{"(*URL).EscapedFragment", Method, 15, ""},
    +		{"(*URL).EscapedPath", Method, 5, ""},
    +		{"(*URL).Hostname", Method, 8, ""},
    +		{"(*URL).IsAbs", Method, 0, ""},
    +		{"(*URL).JoinPath", Method, 19, ""},
    +		{"(*URL).MarshalBinary", Method, 8, ""},
    +		{"(*URL).Parse", Method, 0, ""},
    +		{"(*URL).Port", Method, 8, ""},
    +		{"(*URL).Query", Method, 0, ""},
    +		{"(*URL).Redacted", Method, 15, ""},
    +		{"(*URL).RequestURI", Method, 0, ""},
    +		{"(*URL).ResolveReference", Method, 0, ""},
    +		{"(*URL).String", Method, 0, ""},
    +		{"(*URL).UnmarshalBinary", Method, 8, ""},
    +		{"(*Userinfo).Password", Method, 0, ""},
    +		{"(*Userinfo).String", Method, 0, ""},
    +		{"(*Userinfo).Username", Method, 0, ""},
    +		{"(EscapeError).Error", Method, 0, ""},
    +		{"(InvalidHostError).Error", Method, 6, ""},
    +		{"(Values).Add", Method, 0, ""},
    +		{"(Values).Del", Method, 0, ""},
    +		{"(Values).Encode", Method, 0, ""},
    +		{"(Values).Get", Method, 0, ""},
    +		{"(Values).Has", Method, 17, ""},
    +		{"(Values).Set", Method, 0, ""},
    +		{"Error", Type, 0, ""},
    +		{"Error.Err", Field, 0, ""},
    +		{"Error.Op", Field, 0, ""},
    +		{"Error.URL", Field, 0, ""},
    +		{"EscapeError", Type, 0, ""},
    +		{"InvalidHostError", Type, 6, ""},
    +		{"JoinPath", Func, 19, "func(base string, elem ...string) (result string, err error)"},
    +		{"Parse", Func, 0, "func(rawURL string) (*URL, error)"},
    +		{"ParseQuery", Func, 0, "func(query string) (Values, error)"},
    +		{"ParseRequestURI", Func, 0, "func(rawURL string) (*URL, error)"},
    +		{"PathEscape", Func, 8, "func(s string) string"},
    +		{"PathUnescape", Func, 8, "func(s string) (string, error)"},
    +		{"QueryEscape", Func, 0, "func(s string) string"},
    +		{"QueryUnescape", Func, 0, "func(s string) (string, error)"},
    +		{"URL", Type, 0, ""},
    +		{"URL.ForceQuery", Field, 7, ""},
    +		{"URL.Fragment", Field, 0, ""},
    +		{"URL.Host", Field, 0, ""},
    +		{"URL.OmitHost", Field, 19, ""},
    +		{"URL.Opaque", Field, 0, ""},
    +		{"URL.Path", Field, 0, ""},
    +		{"URL.RawFragment", Field, 15, ""},
    +		{"URL.RawPath", Field, 5, ""},
    +		{"URL.RawQuery", Field, 0, ""},
    +		{"URL.Scheme", Field, 0, ""},
    +		{"URL.User", Field, 0, ""},
    +		{"User", Func, 0, "func(username string) *Userinfo"},
    +		{"UserPassword", Func, 0, "func(username string, password string) *Userinfo"},
    +		{"Userinfo", Type, 0, ""},
    +		{"Values", Type, 0, ""},
     	},
     	"os": {
    -		{"(*File).Chdir", Method, 0},
    -		{"(*File).Chmod", Method, 0},
    -		{"(*File).Chown", Method, 0},
    -		{"(*File).Close", Method, 0},
    -		{"(*File).Fd", Method, 0},
    -		{"(*File).Name", Method, 0},
    -		{"(*File).Read", Method, 0},
    -		{"(*File).ReadAt", Method, 0},
    -		{"(*File).ReadDir", Method, 16},
    -		{"(*File).ReadFrom", Method, 15},
    -		{"(*File).Readdir", Method, 0},
    -		{"(*File).Readdirnames", Method, 0},
    -		{"(*File).Seek", Method, 0},
    -		{"(*File).SetDeadline", Method, 10},
    -		{"(*File).SetReadDeadline", Method, 10},
    -		{"(*File).SetWriteDeadline", Method, 10},
    -		{"(*File).Stat", Method, 0},
    -		{"(*File).Sync", Method, 0},
    -		{"(*File).SyscallConn", Method, 12},
    -		{"(*File).Truncate", Method, 0},
    -		{"(*File).Write", Method, 0},
    -		{"(*File).WriteAt", Method, 0},
    -		{"(*File).WriteString", Method, 0},
    -		{"(*File).WriteTo", Method, 22},
    -		{"(*LinkError).Error", Method, 0},
    -		{"(*LinkError).Unwrap", Method, 13},
    -		{"(*PathError).Error", Method, 0},
    -		{"(*PathError).Timeout", Method, 10},
    -		{"(*PathError).Unwrap", Method, 13},
    -		{"(*Process).Kill", Method, 0},
    -		{"(*Process).Release", Method, 0},
    -		{"(*Process).Signal", Method, 0},
    -		{"(*Process).Wait", Method, 0},
    -		{"(*ProcessState).ExitCode", Method, 12},
    -		{"(*ProcessState).Exited", Method, 0},
    -		{"(*ProcessState).Pid", Method, 0},
    -		{"(*ProcessState).String", Method, 0},
    -		{"(*ProcessState).Success", Method, 0},
    -		{"(*ProcessState).Sys", Method, 0},
    -		{"(*ProcessState).SysUsage", Method, 0},
    -		{"(*ProcessState).SystemTime", Method, 0},
    -		{"(*ProcessState).UserTime", Method, 0},
    -		{"(*SyscallError).Error", Method, 0},
    -		{"(*SyscallError).Timeout", Method, 10},
    -		{"(*SyscallError).Unwrap", Method, 13},
    -		{"(FileMode).IsDir", Method, 0},
    -		{"(FileMode).IsRegular", Method, 1},
    -		{"(FileMode).Perm", Method, 0},
    -		{"(FileMode).String", Method, 0},
    -		{"Args", Var, 0},
    -		{"Chdir", Func, 0},
    -		{"Chmod", Func, 0},
    -		{"Chown", Func, 0},
    -		{"Chtimes", Func, 0},
    -		{"Clearenv", Func, 0},
    -		{"CopyFS", Func, 23},
    -		{"Create", Func, 0},
    -		{"CreateTemp", Func, 16},
    -		{"DevNull", Const, 0},
    -		{"DirEntry", Type, 16},
    -		{"DirFS", Func, 16},
    -		{"Environ", Func, 0},
    -		{"ErrClosed", Var, 8},
    -		{"ErrDeadlineExceeded", Var, 15},
    -		{"ErrExist", Var, 0},
    -		{"ErrInvalid", Var, 0},
    -		{"ErrNoDeadline", Var, 10},
    -		{"ErrNotExist", Var, 0},
    -		{"ErrPermission", Var, 0},
    -		{"ErrProcessDone", Var, 16},
    -		{"Executable", Func, 8},
    -		{"Exit", Func, 0},
    -		{"Expand", Func, 0},
    -		{"ExpandEnv", Func, 0},
    -		{"File", Type, 0},
    -		{"FileInfo", Type, 0},
    -		{"FileMode", Type, 0},
    -		{"FindProcess", Func, 0},
    -		{"Getegid", Func, 0},
    -		{"Getenv", Func, 0},
    -		{"Geteuid", Func, 0},
    -		{"Getgid", Func, 0},
    -		{"Getgroups", Func, 0},
    -		{"Getpagesize", Func, 0},
    -		{"Getpid", Func, 0},
    -		{"Getppid", Func, 0},
    -		{"Getuid", Func, 0},
    -		{"Getwd", Func, 0},
    -		{"Hostname", Func, 0},
    -		{"Interrupt", Var, 0},
    -		{"IsExist", Func, 0},
    -		{"IsNotExist", Func, 0},
    -		{"IsPathSeparator", Func, 0},
    -		{"IsPermission", Func, 0},
    -		{"IsTimeout", Func, 10},
    -		{"Kill", Var, 0},
    -		{"Lchown", Func, 0},
    -		{"Link", Func, 0},
    -		{"LinkError", Type, 0},
    -		{"LinkError.Err", Field, 0},
    -		{"LinkError.New", Field, 0},
    -		{"LinkError.Old", Field, 0},
    -		{"LinkError.Op", Field, 0},
    -		{"LookupEnv", Func, 5},
    -		{"Lstat", Func, 0},
    -		{"Mkdir", Func, 0},
    -		{"MkdirAll", Func, 0},
    -		{"MkdirTemp", Func, 16},
    -		{"ModeAppend", Const, 0},
    -		{"ModeCharDevice", Const, 0},
    -		{"ModeDevice", Const, 0},
    -		{"ModeDir", Const, 0},
    -		{"ModeExclusive", Const, 0},
    -		{"ModeIrregular", Const, 11},
    -		{"ModeNamedPipe", Const, 0},
    -		{"ModePerm", Const, 0},
    -		{"ModeSetgid", Const, 0},
    -		{"ModeSetuid", Const, 0},
    -		{"ModeSocket", Const, 0},
    -		{"ModeSticky", Const, 0},
    -		{"ModeSymlink", Const, 0},
    -		{"ModeTemporary", Const, 0},
    -		{"ModeType", Const, 0},
    -		{"NewFile", Func, 0},
    -		{"NewSyscallError", Func, 0},
    -		{"O_APPEND", Const, 0},
    -		{"O_CREATE", Const, 0},
    -		{"O_EXCL", Const, 0},
    -		{"O_RDONLY", Const, 0},
    -		{"O_RDWR", Const, 0},
    -		{"O_SYNC", Const, 0},
    -		{"O_TRUNC", Const, 0},
    -		{"O_WRONLY", Const, 0},
    -		{"Open", Func, 0},
    -		{"OpenFile", Func, 0},
    -		{"PathError", Type, 0},
    -		{"PathError.Err", Field, 0},
    -		{"PathError.Op", Field, 0},
    -		{"PathError.Path", Field, 0},
    -		{"PathListSeparator", Const, 0},
    -		{"PathSeparator", Const, 0},
    -		{"Pipe", Func, 0},
    -		{"ProcAttr", Type, 0},
    -		{"ProcAttr.Dir", Field, 0},
    -		{"ProcAttr.Env", Field, 0},
    -		{"ProcAttr.Files", Field, 0},
    -		{"ProcAttr.Sys", Field, 0},
    -		{"Process", Type, 0},
    -		{"Process.Pid", Field, 0},
    -		{"ProcessState", Type, 0},
    -		{"ReadDir", Func, 16},
    -		{"ReadFile", Func, 16},
    -		{"Readlink", Func, 0},
    -		{"Remove", Func, 0},
    -		{"RemoveAll", Func, 0},
    -		{"Rename", Func, 0},
    -		{"SEEK_CUR", Const, 0},
    -		{"SEEK_END", Const, 0},
    -		{"SEEK_SET", Const, 0},
    -		{"SameFile", Func, 0},
    -		{"Setenv", Func, 0},
    -		{"Signal", Type, 0},
    -		{"StartProcess", Func, 0},
    -		{"Stat", Func, 0},
    -		{"Stderr", Var, 0},
    -		{"Stdin", Var, 0},
    -		{"Stdout", Var, 0},
    -		{"Symlink", Func, 0},
    -		{"SyscallError", Type, 0},
    -		{"SyscallError.Err", Field, 0},
    -		{"SyscallError.Syscall", Field, 0},
    -		{"TempDir", Func, 0},
    -		{"Truncate", Func, 0},
    -		{"Unsetenv", Func, 4},
    -		{"UserCacheDir", Func, 11},
    -		{"UserConfigDir", Func, 13},
    -		{"UserHomeDir", Func, 12},
    -		{"WriteFile", Func, 16},
    +		{"(*File).Chdir", Method, 0, ""},
    +		{"(*File).Chmod", Method, 0, ""},
    +		{"(*File).Chown", Method, 0, ""},
    +		{"(*File).Close", Method, 0, ""},
    +		{"(*File).Fd", Method, 0, ""},
    +		{"(*File).Name", Method, 0, ""},
    +		{"(*File).Read", Method, 0, ""},
    +		{"(*File).ReadAt", Method, 0, ""},
    +		{"(*File).ReadDir", Method, 16, ""},
    +		{"(*File).ReadFrom", Method, 15, ""},
    +		{"(*File).Readdir", Method, 0, ""},
    +		{"(*File).Readdirnames", Method, 0, ""},
    +		{"(*File).Seek", Method, 0, ""},
    +		{"(*File).SetDeadline", Method, 10, ""},
    +		{"(*File).SetReadDeadline", Method, 10, ""},
    +		{"(*File).SetWriteDeadline", Method, 10, ""},
    +		{"(*File).Stat", Method, 0, ""},
    +		{"(*File).Sync", Method, 0, ""},
    +		{"(*File).SyscallConn", Method, 12, ""},
    +		{"(*File).Truncate", Method, 0, ""},
    +		{"(*File).Write", Method, 0, ""},
    +		{"(*File).WriteAt", Method, 0, ""},
    +		{"(*File).WriteString", Method, 0, ""},
    +		{"(*File).WriteTo", Method, 22, ""},
    +		{"(*LinkError).Error", Method, 0, ""},
    +		{"(*LinkError).Unwrap", Method, 13, ""},
    +		{"(*PathError).Error", Method, 0, ""},
    +		{"(*PathError).Timeout", Method, 10, ""},
    +		{"(*PathError).Unwrap", Method, 13, ""},
    +		{"(*Process).Kill", Method, 0, ""},
    +		{"(*Process).Release", Method, 0, ""},
    +		{"(*Process).Signal", Method, 0, ""},
    +		{"(*Process).Wait", Method, 0, ""},
    +		{"(*ProcessState).ExitCode", Method, 12, ""},
    +		{"(*ProcessState).Exited", Method, 0, ""},
    +		{"(*ProcessState).Pid", Method, 0, ""},
    +		{"(*ProcessState).String", Method, 0, ""},
    +		{"(*ProcessState).Success", Method, 0, ""},
    +		{"(*ProcessState).Sys", Method, 0, ""},
    +		{"(*ProcessState).SysUsage", Method, 0, ""},
    +		{"(*ProcessState).SystemTime", Method, 0, ""},
    +		{"(*ProcessState).UserTime", Method, 0, ""},
    +		{"(*Root).Chmod", Method, 25, ""},
    +		{"(*Root).Chown", Method, 25, ""},
    +		{"(*Root).Chtimes", Method, 25, ""},
    +		{"(*Root).Close", Method, 24, ""},
    +		{"(*Root).Create", Method, 24, ""},
    +		{"(*Root).FS", Method, 24, ""},
    +		{"(*Root).Lchown", Method, 25, ""},
    +		{"(*Root).Link", Method, 25, ""},
    +		{"(*Root).Lstat", Method, 24, ""},
    +		{"(*Root).Mkdir", Method, 24, ""},
    +		{"(*Root).MkdirAll", Method, 25, ""},
    +		{"(*Root).Name", Method, 24, ""},
    +		{"(*Root).Open", Method, 24, ""},
    +		{"(*Root).OpenFile", Method, 24, ""},
    +		{"(*Root).OpenRoot", Method, 24, ""},
    +		{"(*Root).ReadFile", Method, 25, ""},
    +		{"(*Root).Readlink", Method, 25, ""},
    +		{"(*Root).Remove", Method, 24, ""},
    +		{"(*Root).RemoveAll", Method, 25, ""},
    +		{"(*Root).Rename", Method, 25, ""},
    +		{"(*Root).Stat", Method, 24, ""},
    +		{"(*Root).Symlink", Method, 25, ""},
    +		{"(*Root).WriteFile", Method, 25, ""},
    +		{"(*SyscallError).Error", Method, 0, ""},
    +		{"(*SyscallError).Timeout", Method, 10, ""},
    +		{"(*SyscallError).Unwrap", Method, 13, ""},
    +		{"(FileMode).IsDir", Method, 0, ""},
    +		{"(FileMode).IsRegular", Method, 1, ""},
    +		{"(FileMode).Perm", Method, 0, ""},
    +		{"(FileMode).String", Method, 0, ""},
    +		{"Args", Var, 0, ""},
    +		{"Chdir", Func, 0, "func(dir string) error"},
    +		{"Chmod", Func, 0, "func(name string, mode FileMode) error"},
    +		{"Chown", Func, 0, "func(name string, uid int, gid int) error"},
    +		{"Chtimes", Func, 0, "func(name string, atime time.Time, mtime time.Time) error"},
    +		{"Clearenv", Func, 0, "func()"},
    +		{"CopyFS", Func, 23, "func(dir string, fsys fs.FS) error"},
    +		{"Create", Func, 0, "func(name string) (*File, error)"},
    +		{"CreateTemp", Func, 16, "func(dir string, pattern string) (*File, error)"},
    +		{"DevNull", Const, 0, ""},
    +		{"DirEntry", Type, 16, ""},
    +		{"DirFS", Func, 16, "func(dir string) fs.FS"},
    +		{"Environ", Func, 0, "func() []string"},
    +		{"ErrClosed", Var, 8, ""},
    +		{"ErrDeadlineExceeded", Var, 15, ""},
    +		{"ErrExist", Var, 0, ""},
    +		{"ErrInvalid", Var, 0, ""},
    +		{"ErrNoDeadline", Var, 10, ""},
    +		{"ErrNotExist", Var, 0, ""},
    +		{"ErrPermission", Var, 0, ""},
    +		{"ErrProcessDone", Var, 16, ""},
    +		{"Executable", Func, 8, "func() (string, error)"},
    +		{"Exit", Func, 0, "func(code int)"},
    +		{"Expand", Func, 0, "func(s string, mapping func(string) string) string"},
    +		{"ExpandEnv", Func, 0, "func(s string) string"},
    +		{"File", Type, 0, ""},
    +		{"FileInfo", Type, 0, ""},
    +		{"FileMode", Type, 0, ""},
    +		{"FindProcess", Func, 0, "func(pid int) (*Process, error)"},
    +		{"Getegid", Func, 0, "func() int"},
    +		{"Getenv", Func, 0, "func(key string) string"},
    +		{"Geteuid", Func, 0, "func() int"},
    +		{"Getgid", Func, 0, "func() int"},
    +		{"Getgroups", Func, 0, "func() ([]int, error)"},
    +		{"Getpagesize", Func, 0, "func() int"},
    +		{"Getpid", Func, 0, "func() int"},
    +		{"Getppid", Func, 0, "func() int"},
    +		{"Getuid", Func, 0, "func() int"},
    +		{"Getwd", Func, 0, "func() (dir string, err error)"},
    +		{"Hostname", Func, 0, "func() (name string, err error)"},
    +		{"Interrupt", Var, 0, ""},
    +		{"IsExist", Func, 0, "func(err error) bool"},
    +		{"IsNotExist", Func, 0, "func(err error) bool"},
    +		{"IsPathSeparator", Func, 0, "func(c uint8) bool"},
    +		{"IsPermission", Func, 0, "func(err error) bool"},
    +		{"IsTimeout", Func, 10, "func(err error) bool"},
    +		{"Kill", Var, 0, ""},
    +		{"Lchown", Func, 0, "func(name string, uid int, gid int) error"},
    +		{"Link", Func, 0, "func(oldname string, newname string) error"},
    +		{"LinkError", Type, 0, ""},
    +		{"LinkError.Err", Field, 0, ""},
    +		{"LinkError.New", Field, 0, ""},
    +		{"LinkError.Old", Field, 0, ""},
    +		{"LinkError.Op", Field, 0, ""},
    +		{"LookupEnv", Func, 5, "func(key string) (string, bool)"},
    +		{"Lstat", Func, 0, "func(name string) (FileInfo, error)"},
    +		{"Mkdir", Func, 0, "func(name string, perm FileMode) error"},
    +		{"MkdirAll", Func, 0, "func(path string, perm FileMode) error"},
    +		{"MkdirTemp", Func, 16, "func(dir string, pattern string) (string, error)"},
    +		{"ModeAppend", Const, 0, ""},
    +		{"ModeCharDevice", Const, 0, ""},
    +		{"ModeDevice", Const, 0, ""},
    +		{"ModeDir", Const, 0, ""},
    +		{"ModeExclusive", Const, 0, ""},
    +		{"ModeIrregular", Const, 11, ""},
    +		{"ModeNamedPipe", Const, 0, ""},
    +		{"ModePerm", Const, 0, ""},
    +		{"ModeSetgid", Const, 0, ""},
    +		{"ModeSetuid", Const, 0, ""},
    +		{"ModeSocket", Const, 0, ""},
    +		{"ModeSticky", Const, 0, ""},
    +		{"ModeSymlink", Const, 0, ""},
    +		{"ModeTemporary", Const, 0, ""},
    +		{"ModeType", Const, 0, ""},
    +		{"NewFile", Func, 0, "func(fd uintptr, name string) *File"},
    +		{"NewSyscallError", Func, 0, "func(syscall string, err error) error"},
    +		{"O_APPEND", Const, 0, ""},
    +		{"O_CREATE", Const, 0, ""},
    +		{"O_EXCL", Const, 0, ""},
    +		{"O_RDONLY", Const, 0, ""},
    +		{"O_RDWR", Const, 0, ""},
    +		{"O_SYNC", Const, 0, ""},
    +		{"O_TRUNC", Const, 0, ""},
    +		{"O_WRONLY", Const, 0, ""},
    +		{"Open", Func, 0, "func(name string) (*File, error)"},
    +		{"OpenFile", Func, 0, "func(name string, flag int, perm FileMode) (*File, error)"},
    +		{"OpenInRoot", Func, 24, "func(dir string, name string) (*File, error)"},
    +		{"OpenRoot", Func, 24, "func(name string) (*Root, error)"},
    +		{"PathError", Type, 0, ""},
    +		{"PathError.Err", Field, 0, ""},
    +		{"PathError.Op", Field, 0, ""},
    +		{"PathError.Path", Field, 0, ""},
    +		{"PathListSeparator", Const, 0, ""},
    +		{"PathSeparator", Const, 0, ""},
    +		{"Pipe", Func, 0, "func() (r *File, w *File, err error)"},
    +		{"ProcAttr", Type, 0, ""},
    +		{"ProcAttr.Dir", Field, 0, ""},
    +		{"ProcAttr.Env", Field, 0, ""},
    +		{"ProcAttr.Files", Field, 0, ""},
    +		{"ProcAttr.Sys", Field, 0, ""},
    +		{"Process", Type, 0, ""},
    +		{"Process.Pid", Field, 0, ""},
    +		{"ProcessState", Type, 0, ""},
    +		{"ReadDir", Func, 16, "func(name string) ([]DirEntry, error)"},
    +		{"ReadFile", Func, 16, "func(name string) ([]byte, error)"},
    +		{"Readlink", Func, 0, "func(name string) (string, error)"},
    +		{"Remove", Func, 0, "func(name string) error"},
    +		{"RemoveAll", Func, 0, "func(path string) error"},
    +		{"Rename", Func, 0, "func(oldpath string, newpath string) error"},
    +		{"Root", Type, 24, ""},
    +		{"SEEK_CUR", Const, 0, ""},
    +		{"SEEK_END", Const, 0, ""},
    +		{"SEEK_SET", Const, 0, ""},
    +		{"SameFile", Func, 0, "func(fi1 FileInfo, fi2 FileInfo) bool"},
    +		{"Setenv", Func, 0, "func(key string, value string) error"},
    +		{"Signal", Type, 0, ""},
    +		{"StartProcess", Func, 0, "func(name string, argv []string, attr *ProcAttr) (*Process, error)"},
    +		{"Stat", Func, 0, "func(name string) (FileInfo, error)"},
    +		{"Stderr", Var, 0, ""},
    +		{"Stdin", Var, 0, ""},
    +		{"Stdout", Var, 0, ""},
    +		{"Symlink", Func, 0, "func(oldname string, newname string) error"},
    +		{"SyscallError", Type, 0, ""},
    +		{"SyscallError.Err", Field, 0, ""},
    +		{"SyscallError.Syscall", Field, 0, ""},
    +		{"TempDir", Func, 0, "func() string"},
    +		{"Truncate", Func, 0, "func(name string, size int64) error"},
    +		{"Unsetenv", Func, 4, "func(key string) error"},
    +		{"UserCacheDir", Func, 11, "func() (string, error)"},
    +		{"UserConfigDir", Func, 13, "func() (string, error)"},
    +		{"UserHomeDir", Func, 12, "func() (string, error)"},
    +		{"WriteFile", Func, 16, "func(name string, data []byte, perm FileMode) error"},
     	},
     	"os/exec": {
    -		{"(*Cmd).CombinedOutput", Method, 0},
    -		{"(*Cmd).Environ", Method, 19},
    -		{"(*Cmd).Output", Method, 0},
    -		{"(*Cmd).Run", Method, 0},
    -		{"(*Cmd).Start", Method, 0},
    -		{"(*Cmd).StderrPipe", Method, 0},
    -		{"(*Cmd).StdinPipe", Method, 0},
    -		{"(*Cmd).StdoutPipe", Method, 0},
    -		{"(*Cmd).String", Method, 13},
    -		{"(*Cmd).Wait", Method, 0},
    -		{"(*Error).Error", Method, 0},
    -		{"(*Error).Unwrap", Method, 13},
    -		{"(*ExitError).Error", Method, 0},
    -		{"(ExitError).ExitCode", Method, 12},
    -		{"(ExitError).Exited", Method, 0},
    -		{"(ExitError).Pid", Method, 0},
    -		{"(ExitError).String", Method, 0},
    -		{"(ExitError).Success", Method, 0},
    -		{"(ExitError).Sys", Method, 0},
    -		{"(ExitError).SysUsage", Method, 0},
    -		{"(ExitError).SystemTime", Method, 0},
    -		{"(ExitError).UserTime", Method, 0},
    -		{"Cmd", Type, 0},
    -		{"Cmd.Args", Field, 0},
    -		{"Cmd.Cancel", Field, 20},
    -		{"Cmd.Dir", Field, 0},
    -		{"Cmd.Env", Field, 0},
    -		{"Cmd.Err", Field, 19},
    -		{"Cmd.ExtraFiles", Field, 0},
    -		{"Cmd.Path", Field, 0},
    -		{"Cmd.Process", Field, 0},
    -		{"Cmd.ProcessState", Field, 0},
    -		{"Cmd.Stderr", Field, 0},
    -		{"Cmd.Stdin", Field, 0},
    -		{"Cmd.Stdout", Field, 0},
    -		{"Cmd.SysProcAttr", Field, 0},
    -		{"Cmd.WaitDelay", Field, 20},
    -		{"Command", Func, 0},
    -		{"CommandContext", Func, 7},
    -		{"ErrDot", Var, 19},
    -		{"ErrNotFound", Var, 0},
    -		{"ErrWaitDelay", Var, 20},
    -		{"Error", Type, 0},
    -		{"Error.Err", Field, 0},
    -		{"Error.Name", Field, 0},
    -		{"ExitError", Type, 0},
    -		{"ExitError.ProcessState", Field, 0},
    -		{"ExitError.Stderr", Field, 6},
    -		{"LookPath", Func, 0},
    +		{"(*Cmd).CombinedOutput", Method, 0, ""},
    +		{"(*Cmd).Environ", Method, 19, ""},
    +		{"(*Cmd).Output", Method, 0, ""},
    +		{"(*Cmd).Run", Method, 0, ""},
    +		{"(*Cmd).Start", Method, 0, ""},
    +		{"(*Cmd).StderrPipe", Method, 0, ""},
    +		{"(*Cmd).StdinPipe", Method, 0, ""},
    +		{"(*Cmd).StdoutPipe", Method, 0, ""},
    +		{"(*Cmd).String", Method, 13, ""},
    +		{"(*Cmd).Wait", Method, 0, ""},
    +		{"(*Error).Error", Method, 0, ""},
    +		{"(*Error).Unwrap", Method, 13, ""},
    +		{"(*ExitError).Error", Method, 0, ""},
    +		{"(ExitError).ExitCode", Method, 12, ""},
    +		{"(ExitError).Exited", Method, 0, ""},
    +		{"(ExitError).Pid", Method, 0, ""},
    +		{"(ExitError).String", Method, 0, ""},
    +		{"(ExitError).Success", Method, 0, ""},
    +		{"(ExitError).Sys", Method, 0, ""},
    +		{"(ExitError).SysUsage", Method, 0, ""},
    +		{"(ExitError).SystemTime", Method, 0, ""},
    +		{"(ExitError).UserTime", Method, 0, ""},
    +		{"Cmd", Type, 0, ""},
    +		{"Cmd.Args", Field, 0, ""},
    +		{"Cmd.Cancel", Field, 20, ""},
    +		{"Cmd.Dir", Field, 0, ""},
    +		{"Cmd.Env", Field, 0, ""},
    +		{"Cmd.Err", Field, 19, ""},
    +		{"Cmd.ExtraFiles", Field, 0, ""},
    +		{"Cmd.Path", Field, 0, ""},
    +		{"Cmd.Process", Field, 0, ""},
    +		{"Cmd.ProcessState", Field, 0, ""},
    +		{"Cmd.Stderr", Field, 0, ""},
    +		{"Cmd.Stdin", Field, 0, ""},
    +		{"Cmd.Stdout", Field, 0, ""},
    +		{"Cmd.SysProcAttr", Field, 0, ""},
    +		{"Cmd.WaitDelay", Field, 20, ""},
    +		{"Command", Func, 0, "func(name string, arg ...string) *Cmd"},
    +		{"CommandContext", Func, 7, "func(ctx context.Context, name string, arg ...string) *Cmd"},
    +		{"ErrDot", Var, 19, ""},
    +		{"ErrNotFound", Var, 0, ""},
    +		{"ErrWaitDelay", Var, 20, ""},
    +		{"Error", Type, 0, ""},
    +		{"Error.Err", Field, 0, ""},
    +		{"Error.Name", Field, 0, ""},
    +		{"ExitError", Type, 0, ""},
    +		{"ExitError.ProcessState", Field, 0, ""},
    +		{"ExitError.Stderr", Field, 6, ""},
    +		{"LookPath", Func, 0, "func(file string) (string, error)"},
     	},
     	"os/signal": {
    -		{"Ignore", Func, 5},
    -		{"Ignored", Func, 11},
    -		{"Notify", Func, 0},
    -		{"NotifyContext", Func, 16},
    -		{"Reset", Func, 5},
    -		{"Stop", Func, 1},
    +		{"Ignore", Func, 5, "func(sig ...os.Signal)"},
    +		{"Ignored", Func, 11, "func(sig os.Signal) bool"},
    +		{"Notify", Func, 0, "func(c chan<- os.Signal, sig ...os.Signal)"},
    +		{"NotifyContext", Func, 16, "func(parent context.Context, signals ...os.Signal) (ctx context.Context, stop context.CancelFunc)"},
    +		{"Reset", Func, 5, "func(sig ...os.Signal)"},
    +		{"Stop", Func, 1, "func(c chan<- os.Signal)"},
     	},
     	"os/user": {
    -		{"(*User).GroupIds", Method, 7},
    -		{"(UnknownGroupError).Error", Method, 7},
    -		{"(UnknownGroupIdError).Error", Method, 7},
    -		{"(UnknownUserError).Error", Method, 0},
    -		{"(UnknownUserIdError).Error", Method, 0},
    -		{"Current", Func, 0},
    -		{"Group", Type, 7},
    -		{"Group.Gid", Field, 7},
    -		{"Group.Name", Field, 7},
    -		{"Lookup", Func, 0},
    -		{"LookupGroup", Func, 7},
    -		{"LookupGroupId", Func, 7},
    -		{"LookupId", Func, 0},
    -		{"UnknownGroupError", Type, 7},
    -		{"UnknownGroupIdError", Type, 7},
    -		{"UnknownUserError", Type, 0},
    -		{"UnknownUserIdError", Type, 0},
    -		{"User", Type, 0},
    -		{"User.Gid", Field, 0},
    -		{"User.HomeDir", Field, 0},
    -		{"User.Name", Field, 0},
    -		{"User.Uid", Field, 0},
    -		{"User.Username", Field, 0},
    +		{"(*User).GroupIds", Method, 7, ""},
    +		{"(UnknownGroupError).Error", Method, 7, ""},
    +		{"(UnknownGroupIdError).Error", Method, 7, ""},
    +		{"(UnknownUserError).Error", Method, 0, ""},
    +		{"(UnknownUserIdError).Error", Method, 0, ""},
    +		{"Current", Func, 0, "func() (*User, error)"},
    +		{"Group", Type, 7, ""},
    +		{"Group.Gid", Field, 7, ""},
    +		{"Group.Name", Field, 7, ""},
    +		{"Lookup", Func, 0, "func(username string) (*User, error)"},
    +		{"LookupGroup", Func, 7, "func(name string) (*Group, error)"},
    +		{"LookupGroupId", Func, 7, "func(gid string) (*Group, error)"},
    +		{"LookupId", Func, 0, "func(uid string) (*User, error)"},
    +		{"UnknownGroupError", Type, 7, ""},
    +		{"UnknownGroupIdError", Type, 7, ""},
    +		{"UnknownUserError", Type, 0, ""},
    +		{"UnknownUserIdError", Type, 0, ""},
    +		{"User", Type, 0, ""},
    +		{"User.Gid", Field, 0, ""},
    +		{"User.HomeDir", Field, 0, ""},
    +		{"User.Name", Field, 0, ""},
    +		{"User.Uid", Field, 0, ""},
    +		{"User.Username", Field, 0, ""},
     	},
     	"path": {
    -		{"Base", Func, 0},
    -		{"Clean", Func, 0},
    -		{"Dir", Func, 0},
    -		{"ErrBadPattern", Var, 0},
    -		{"Ext", Func, 0},
    -		{"IsAbs", Func, 0},
    -		{"Join", Func, 0},
    -		{"Match", Func, 0},
    -		{"Split", Func, 0},
    +		{"Base", Func, 0, "func(path string) string"},
    +		{"Clean", Func, 0, "func(path string) string"},
    +		{"Dir", Func, 0, "func(path string) string"},
    +		{"ErrBadPattern", Var, 0, ""},
    +		{"Ext", Func, 0, "func(path string) string"},
    +		{"IsAbs", Func, 0, "func(path string) bool"},
    +		{"Join", Func, 0, "func(elem ...string) string"},
    +		{"Match", Func, 0, "func(pattern string, name string) (matched bool, err error)"},
    +		{"Split", Func, 0, "func(path string) (dir string, file string)"},
     	},
     	"path/filepath": {
    -		{"Abs", Func, 0},
    -		{"Base", Func, 0},
    -		{"Clean", Func, 0},
    -		{"Dir", Func, 0},
    -		{"ErrBadPattern", Var, 0},
    -		{"EvalSymlinks", Func, 0},
    -		{"Ext", Func, 0},
    -		{"FromSlash", Func, 0},
    -		{"Glob", Func, 0},
    -		{"HasPrefix", Func, 0},
    -		{"IsAbs", Func, 0},
    -		{"IsLocal", Func, 20},
    -		{"Join", Func, 0},
    -		{"ListSeparator", Const, 0},
    -		{"Localize", Func, 23},
    -		{"Match", Func, 0},
    -		{"Rel", Func, 0},
    -		{"Separator", Const, 0},
    -		{"SkipAll", Var, 20},
    -		{"SkipDir", Var, 0},
    -		{"Split", Func, 0},
    -		{"SplitList", Func, 0},
    -		{"ToSlash", Func, 0},
    -		{"VolumeName", Func, 0},
    -		{"Walk", Func, 0},
    -		{"WalkDir", Func, 16},
    -		{"WalkFunc", Type, 0},
    +		{"Abs", Func, 0, "func(path string) (string, error)"},
    +		{"Base", Func, 0, "func(path string) string"},
    +		{"Clean", Func, 0, "func(path string) string"},
    +		{"Dir", Func, 0, "func(path string) string"},
    +		{"ErrBadPattern", Var, 0, ""},
    +		{"EvalSymlinks", Func, 0, "func(path string) (string, error)"},
    +		{"Ext", Func, 0, "func(path string) string"},
    +		{"FromSlash", Func, 0, "func(path string) string"},
    +		{"Glob", Func, 0, "func(pattern string) (matches []string, err error)"},
    +		{"HasPrefix", Func, 0, "func(p string, prefix string) bool"},
    +		{"IsAbs", Func, 0, "func(path string) bool"},
    +		{"IsLocal", Func, 20, "func(path string) bool"},
    +		{"Join", Func, 0, "func(elem ...string) string"},
    +		{"ListSeparator", Const, 0, ""},
    +		{"Localize", Func, 23, "func(path string) (string, error)"},
    +		{"Match", Func, 0, "func(pattern string, name string) (matched bool, err error)"},
    +		{"Rel", Func, 0, "func(basepath string, targpath string) (string, error)"},
    +		{"Separator", Const, 0, ""},
    +		{"SkipAll", Var, 20, ""},
    +		{"SkipDir", Var, 0, ""},
    +		{"Split", Func, 0, "func(path string) (dir string, file string)"},
    +		{"SplitList", Func, 0, "func(path string) []string"},
    +		{"ToSlash", Func, 0, "func(path string) string"},
    +		{"VolumeName", Func, 0, "func(path string) string"},
    +		{"Walk", Func, 0, "func(root string, fn WalkFunc) error"},
    +		{"WalkDir", Func, 16, "func(root string, fn fs.WalkDirFunc) error"},
    +		{"WalkFunc", Type, 0, ""},
     	},
     	"plugin": {
    -		{"(*Plugin).Lookup", Method, 8},
    -		{"Open", Func, 8},
    -		{"Plugin", Type, 8},
    -		{"Symbol", Type, 8},
    +		{"(*Plugin).Lookup", Method, 8, ""},
    +		{"Open", Func, 8, "func(path string) (*Plugin, error)"},
    +		{"Plugin", Type, 8, ""},
    +		{"Symbol", Type, 8, ""},
     	},
     	"reflect": {
    -		{"(*MapIter).Key", Method, 12},
    -		{"(*MapIter).Next", Method, 12},
    -		{"(*MapIter).Reset", Method, 18},
    -		{"(*MapIter).Value", Method, 12},
    -		{"(*ValueError).Error", Method, 0},
    -		{"(ChanDir).String", Method, 0},
    -		{"(Kind).String", Method, 0},
    -		{"(Method).IsExported", Method, 17},
    -		{"(StructField).IsExported", Method, 17},
    -		{"(StructTag).Get", Method, 0},
    -		{"(StructTag).Lookup", Method, 7},
    -		{"(Value).Addr", Method, 0},
    -		{"(Value).Bool", Method, 0},
    -		{"(Value).Bytes", Method, 0},
    -		{"(Value).Call", Method, 0},
    -		{"(Value).CallSlice", Method, 0},
    -		{"(Value).CanAddr", Method, 0},
    -		{"(Value).CanComplex", Method, 18},
    -		{"(Value).CanConvert", Method, 17},
    -		{"(Value).CanFloat", Method, 18},
    -		{"(Value).CanInt", Method, 18},
    -		{"(Value).CanInterface", Method, 0},
    -		{"(Value).CanSet", Method, 0},
    -		{"(Value).CanUint", Method, 18},
    -		{"(Value).Cap", Method, 0},
    -		{"(Value).Clear", Method, 21},
    -		{"(Value).Close", Method, 0},
    -		{"(Value).Comparable", Method, 20},
    -		{"(Value).Complex", Method, 0},
    -		{"(Value).Convert", Method, 1},
    -		{"(Value).Elem", Method, 0},
    -		{"(Value).Equal", Method, 20},
    -		{"(Value).Field", Method, 0},
    -		{"(Value).FieldByIndex", Method, 0},
    -		{"(Value).FieldByIndexErr", Method, 18},
    -		{"(Value).FieldByName", Method, 0},
    -		{"(Value).FieldByNameFunc", Method, 0},
    -		{"(Value).Float", Method, 0},
    -		{"(Value).Grow", Method, 20},
    -		{"(Value).Index", Method, 0},
    -		{"(Value).Int", Method, 0},
    -		{"(Value).Interface", Method, 0},
    -		{"(Value).InterfaceData", Method, 0},
    -		{"(Value).IsNil", Method, 0},
    -		{"(Value).IsValid", Method, 0},
    -		{"(Value).IsZero", Method, 13},
    -		{"(Value).Kind", Method, 0},
    -		{"(Value).Len", Method, 0},
    -		{"(Value).MapIndex", Method, 0},
    -		{"(Value).MapKeys", Method, 0},
    -		{"(Value).MapRange", Method, 12},
    -		{"(Value).Method", Method, 0},
    -		{"(Value).MethodByName", Method, 0},
    -		{"(Value).NumField", Method, 0},
    -		{"(Value).NumMethod", Method, 0},
    -		{"(Value).OverflowComplex", Method, 0},
    -		{"(Value).OverflowFloat", Method, 0},
    -		{"(Value).OverflowInt", Method, 0},
    -		{"(Value).OverflowUint", Method, 0},
    -		{"(Value).Pointer", Method, 0},
    -		{"(Value).Recv", Method, 0},
    -		{"(Value).Send", Method, 0},
    -		{"(Value).Seq", Method, 23},
    -		{"(Value).Seq2", Method, 23},
    -		{"(Value).Set", Method, 0},
    -		{"(Value).SetBool", Method, 0},
    -		{"(Value).SetBytes", Method, 0},
    -		{"(Value).SetCap", Method, 2},
    -		{"(Value).SetComplex", Method, 0},
    -		{"(Value).SetFloat", Method, 0},
    -		{"(Value).SetInt", Method, 0},
    -		{"(Value).SetIterKey", Method, 18},
    -		{"(Value).SetIterValue", Method, 18},
    -		{"(Value).SetLen", Method, 0},
    -		{"(Value).SetMapIndex", Method, 0},
    -		{"(Value).SetPointer", Method, 0},
    -		{"(Value).SetString", Method, 0},
    -		{"(Value).SetUint", Method, 0},
    -		{"(Value).SetZero", Method, 20},
    -		{"(Value).Slice", Method, 0},
    -		{"(Value).Slice3", Method, 2},
    -		{"(Value).String", Method, 0},
    -		{"(Value).TryRecv", Method, 0},
    -		{"(Value).TrySend", Method, 0},
    -		{"(Value).Type", Method, 0},
    -		{"(Value).Uint", Method, 0},
    -		{"(Value).UnsafeAddr", Method, 0},
    -		{"(Value).UnsafePointer", Method, 18},
    -		{"Append", Func, 0},
    -		{"AppendSlice", Func, 0},
    -		{"Array", Const, 0},
    -		{"ArrayOf", Func, 5},
    -		{"Bool", Const, 0},
    -		{"BothDir", Const, 0},
    -		{"Chan", Const, 0},
    -		{"ChanDir", Type, 0},
    -		{"ChanOf", Func, 1},
    -		{"Complex128", Const, 0},
    -		{"Complex64", Const, 0},
    -		{"Copy", Func, 0},
    -		{"DeepEqual", Func, 0},
    -		{"Float32", Const, 0},
    -		{"Float64", Const, 0},
    -		{"Func", Const, 0},
    -		{"FuncOf", Func, 5},
    -		{"Indirect", Func, 0},
    -		{"Int", Const, 0},
    -		{"Int16", Const, 0},
    -		{"Int32", Const, 0},
    -		{"Int64", Const, 0},
    -		{"Int8", Const, 0},
    -		{"Interface", Const, 0},
    -		{"Invalid", Const, 0},
    -		{"Kind", Type, 0},
    -		{"MakeChan", Func, 0},
    -		{"MakeFunc", Func, 1},
    -		{"MakeMap", Func, 0},
    -		{"MakeMapWithSize", Func, 9},
    -		{"MakeSlice", Func, 0},
    -		{"Map", Const, 0},
    -		{"MapIter", Type, 12},
    -		{"MapOf", Func, 1},
    -		{"Method", Type, 0},
    -		{"Method.Func", Field, 0},
    -		{"Method.Index", Field, 0},
    -		{"Method.Name", Field, 0},
    -		{"Method.PkgPath", Field, 0},
    -		{"Method.Type", Field, 0},
    -		{"New", Func, 0},
    -		{"NewAt", Func, 0},
    -		{"Pointer", Const, 18},
    -		{"PointerTo", Func, 18},
    -		{"Ptr", Const, 0},
    -		{"PtrTo", Func, 0},
    -		{"RecvDir", Const, 0},
    -		{"Select", Func, 1},
    -		{"SelectCase", Type, 1},
    -		{"SelectCase.Chan", Field, 1},
    -		{"SelectCase.Dir", Field, 1},
    -		{"SelectCase.Send", Field, 1},
    -		{"SelectDefault", Const, 1},
    -		{"SelectDir", Type, 1},
    -		{"SelectRecv", Const, 1},
    -		{"SelectSend", Const, 1},
    -		{"SendDir", Const, 0},
    -		{"Slice", Const, 0},
    -		{"SliceAt", Func, 23},
    -		{"SliceHeader", Type, 0},
    -		{"SliceHeader.Cap", Field, 0},
    -		{"SliceHeader.Data", Field, 0},
    -		{"SliceHeader.Len", Field, 0},
    -		{"SliceOf", Func, 1},
    -		{"String", Const, 0},
    -		{"StringHeader", Type, 0},
    -		{"StringHeader.Data", Field, 0},
    -		{"StringHeader.Len", Field, 0},
    -		{"Struct", Const, 0},
    -		{"StructField", Type, 0},
    -		{"StructField.Anonymous", Field, 0},
    -		{"StructField.Index", Field, 0},
    -		{"StructField.Name", Field, 0},
    -		{"StructField.Offset", Field, 0},
    -		{"StructField.PkgPath", Field, 0},
    -		{"StructField.Tag", Field, 0},
    -		{"StructField.Type", Field, 0},
    -		{"StructOf", Func, 7},
    -		{"StructTag", Type, 0},
    -		{"Swapper", Func, 8},
    -		{"Type", Type, 0},
    -		{"TypeFor", Func, 22},
    -		{"TypeOf", Func, 0},
    -		{"Uint", Const, 0},
    -		{"Uint16", Const, 0},
    -		{"Uint32", Const, 0},
    -		{"Uint64", Const, 0},
    -		{"Uint8", Const, 0},
    -		{"Uintptr", Const, 0},
    -		{"UnsafePointer", Const, 0},
    -		{"Value", Type, 0},
    -		{"ValueError", Type, 0},
    -		{"ValueError.Kind", Field, 0},
    -		{"ValueError.Method", Field, 0},
    -		{"ValueOf", Func, 0},
    -		{"VisibleFields", Func, 17},
    -		{"Zero", Func, 0},
    +		{"(*MapIter).Key", Method, 12, ""},
    +		{"(*MapIter).Next", Method, 12, ""},
    +		{"(*MapIter).Reset", Method, 18, ""},
    +		{"(*MapIter).Value", Method, 12, ""},
    +		{"(*ValueError).Error", Method, 0, ""},
    +		{"(ChanDir).String", Method, 0, ""},
    +		{"(Kind).String", Method, 0, ""},
    +		{"(Method).IsExported", Method, 17, ""},
    +		{"(StructField).IsExported", Method, 17, ""},
    +		{"(StructTag).Get", Method, 0, ""},
    +		{"(StructTag).Lookup", Method, 7, ""},
    +		{"(Value).Addr", Method, 0, ""},
    +		{"(Value).Bool", Method, 0, ""},
    +		{"(Value).Bytes", Method, 0, ""},
    +		{"(Value).Call", Method, 0, ""},
    +		{"(Value).CallSlice", Method, 0, ""},
    +		{"(Value).CanAddr", Method, 0, ""},
    +		{"(Value).CanComplex", Method, 18, ""},
    +		{"(Value).CanConvert", Method, 17, ""},
    +		{"(Value).CanFloat", Method, 18, ""},
    +		{"(Value).CanInt", Method, 18, ""},
    +		{"(Value).CanInterface", Method, 0, ""},
    +		{"(Value).CanSet", Method, 0, ""},
    +		{"(Value).CanUint", Method, 18, ""},
    +		{"(Value).Cap", Method, 0, ""},
    +		{"(Value).Clear", Method, 21, ""},
    +		{"(Value).Close", Method, 0, ""},
    +		{"(Value).Comparable", Method, 20, ""},
    +		{"(Value).Complex", Method, 0, ""},
    +		{"(Value).Convert", Method, 1, ""},
    +		{"(Value).Elem", Method, 0, ""},
    +		{"(Value).Equal", Method, 20, ""},
    +		{"(Value).Field", Method, 0, ""},
    +		{"(Value).FieldByIndex", Method, 0, ""},
    +		{"(Value).FieldByIndexErr", Method, 18, ""},
    +		{"(Value).FieldByName", Method, 0, ""},
    +		{"(Value).FieldByNameFunc", Method, 0, ""},
    +		{"(Value).Float", Method, 0, ""},
    +		{"(Value).Grow", Method, 20, ""},
    +		{"(Value).Index", Method, 0, ""},
    +		{"(Value).Int", Method, 0, ""},
    +		{"(Value).Interface", Method, 0, ""},
    +		{"(Value).InterfaceData", Method, 0, ""},
    +		{"(Value).IsNil", Method, 0, ""},
    +		{"(Value).IsValid", Method, 0, ""},
    +		{"(Value).IsZero", Method, 13, ""},
    +		{"(Value).Kind", Method, 0, ""},
    +		{"(Value).Len", Method, 0, ""},
    +		{"(Value).MapIndex", Method, 0, ""},
    +		{"(Value).MapKeys", Method, 0, ""},
    +		{"(Value).MapRange", Method, 12, ""},
    +		{"(Value).Method", Method, 0, ""},
    +		{"(Value).MethodByName", Method, 0, ""},
    +		{"(Value).NumField", Method, 0, ""},
    +		{"(Value).NumMethod", Method, 0, ""},
    +		{"(Value).OverflowComplex", Method, 0, ""},
    +		{"(Value).OverflowFloat", Method, 0, ""},
    +		{"(Value).OverflowInt", Method, 0, ""},
    +		{"(Value).OverflowUint", Method, 0, ""},
    +		{"(Value).Pointer", Method, 0, ""},
    +		{"(Value).Recv", Method, 0, ""},
    +		{"(Value).Send", Method, 0, ""},
    +		{"(Value).Seq", Method, 23, ""},
    +		{"(Value).Seq2", Method, 23, ""},
    +		{"(Value).Set", Method, 0, ""},
    +		{"(Value).SetBool", Method, 0, ""},
    +		{"(Value).SetBytes", Method, 0, ""},
    +		{"(Value).SetCap", Method, 2, ""},
    +		{"(Value).SetComplex", Method, 0, ""},
    +		{"(Value).SetFloat", Method, 0, ""},
    +		{"(Value).SetInt", Method, 0, ""},
    +		{"(Value).SetIterKey", Method, 18, ""},
    +		{"(Value).SetIterValue", Method, 18, ""},
    +		{"(Value).SetLen", Method, 0, ""},
    +		{"(Value).SetMapIndex", Method, 0, ""},
    +		{"(Value).SetPointer", Method, 0, ""},
    +		{"(Value).SetString", Method, 0, ""},
    +		{"(Value).SetUint", Method, 0, ""},
    +		{"(Value).SetZero", Method, 20, ""},
    +		{"(Value).Slice", Method, 0, ""},
    +		{"(Value).Slice3", Method, 2, ""},
    +		{"(Value).String", Method, 0, ""},
    +		{"(Value).TryRecv", Method, 0, ""},
    +		{"(Value).TrySend", Method, 0, ""},
    +		{"(Value).Type", Method, 0, ""},
    +		{"(Value).Uint", Method, 0, ""},
    +		{"(Value).UnsafeAddr", Method, 0, ""},
    +		{"(Value).UnsafePointer", Method, 18, ""},
    +		{"Append", Func, 0, "func(s Value, x ...Value) Value"},
    +		{"AppendSlice", Func, 0, "func(s Value, t Value) Value"},
    +		{"Array", Const, 0, ""},
    +		{"ArrayOf", Func, 5, "func(length int, elem Type) Type"},
    +		{"Bool", Const, 0, ""},
    +		{"BothDir", Const, 0, ""},
    +		{"Chan", Const, 0, ""},
    +		{"ChanDir", Type, 0, ""},
    +		{"ChanOf", Func, 1, "func(dir ChanDir, t Type) Type"},
    +		{"Complex128", Const, 0, ""},
    +		{"Complex64", Const, 0, ""},
    +		{"Copy", Func, 0, "func(dst Value, src Value) int"},
    +		{"DeepEqual", Func, 0, "func(x any, y any) bool"},
    +		{"Float32", Const, 0, ""},
    +		{"Float64", Const, 0, ""},
    +		{"Func", Const, 0, ""},
    +		{"FuncOf", Func, 5, "func(in []Type, out []Type, variadic bool) Type"},
    +		{"Indirect", Func, 0, "func(v Value) Value"},
    +		{"Int", Const, 0, ""},
    +		{"Int16", Const, 0, ""},
    +		{"Int32", Const, 0, ""},
    +		{"Int64", Const, 0, ""},
    +		{"Int8", Const, 0, ""},
    +		{"Interface", Const, 0, ""},
    +		{"Invalid", Const, 0, ""},
    +		{"Kind", Type, 0, ""},
    +		{"MakeChan", Func, 0, "func(typ Type, buffer int) Value"},
    +		{"MakeFunc", Func, 1, "func(typ Type, fn func(args []Value) (results []Value)) Value"},
    +		{"MakeMap", Func, 0, "func(typ Type) Value"},
    +		{"MakeMapWithSize", Func, 9, "func(typ Type, n int) Value"},
    +		{"MakeSlice", Func, 0, "func(typ Type, len int, cap int) Value"},
    +		{"Map", Const, 0, ""},
    +		{"MapIter", Type, 12, ""},
    +		{"MapOf", Func, 1, "func(key Type, elem Type) Type"},
    +		{"Method", Type, 0, ""},
    +		{"Method.Func", Field, 0, ""},
    +		{"Method.Index", Field, 0, ""},
    +		{"Method.Name", Field, 0, ""},
    +		{"Method.PkgPath", Field, 0, ""},
    +		{"Method.Type", Field, 0, ""},
    +		{"New", Func, 0, "func(typ Type) Value"},
    +		{"NewAt", Func, 0, "func(typ Type, p unsafe.Pointer) Value"},
    +		{"Pointer", Const, 18, ""},
    +		{"PointerTo", Func, 18, "func(t Type) Type"},
    +		{"Ptr", Const, 0, ""},
    +		{"PtrTo", Func, 0, "func(t Type) Type"},
    +		{"RecvDir", Const, 0, ""},
    +		{"Select", Func, 1, "func(cases []SelectCase) (chosen int, recv Value, recvOK bool)"},
    +		{"SelectCase", Type, 1, ""},
    +		{"SelectCase.Chan", Field, 1, ""},
    +		{"SelectCase.Dir", Field, 1, ""},
    +		{"SelectCase.Send", Field, 1, ""},
    +		{"SelectDefault", Const, 1, ""},
    +		{"SelectDir", Type, 1, ""},
    +		{"SelectRecv", Const, 1, ""},
    +		{"SelectSend", Const, 1, ""},
    +		{"SendDir", Const, 0, ""},
    +		{"Slice", Const, 0, ""},
    +		{"SliceAt", Func, 23, "func(typ Type, p unsafe.Pointer, n int) Value"},
    +		{"SliceHeader", Type, 0, ""},
    +		{"SliceHeader.Cap", Field, 0, ""},
    +		{"SliceHeader.Data", Field, 0, ""},
    +		{"SliceHeader.Len", Field, 0, ""},
    +		{"SliceOf", Func, 1, "func(t Type) Type"},
    +		{"String", Const, 0, ""},
    +		{"StringHeader", Type, 0, ""},
    +		{"StringHeader.Data", Field, 0, ""},
    +		{"StringHeader.Len", Field, 0, ""},
    +		{"Struct", Const, 0, ""},
    +		{"StructField", Type, 0, ""},
    +		{"StructField.Anonymous", Field, 0, ""},
    +		{"StructField.Index", Field, 0, ""},
    +		{"StructField.Name", Field, 0, ""},
    +		{"StructField.Offset", Field, 0, ""},
    +		{"StructField.PkgPath", Field, 0, ""},
    +		{"StructField.Tag", Field, 0, ""},
    +		{"StructField.Type", Field, 0, ""},
    +		{"StructOf", Func, 7, "func(fields []StructField) Type"},
    +		{"StructTag", Type, 0, ""},
    +		{"Swapper", Func, 8, "func(slice any) func(i int, j int)"},
    +		{"Type", Type, 0, ""},
    +		{"TypeAssert", Func, 25, "func[T any](v Value) (T, bool)"},
    +		{"TypeFor", Func, 22, "func[T any]() Type"},
    +		{"TypeOf", Func, 0, "func(i any) Type"},
    +		{"Uint", Const, 0, ""},
    +		{"Uint16", Const, 0, ""},
    +		{"Uint32", Const, 0, ""},
    +		{"Uint64", Const, 0, ""},
    +		{"Uint8", Const, 0, ""},
    +		{"Uintptr", Const, 0, ""},
    +		{"UnsafePointer", Const, 0, ""},
    +		{"Value", Type, 0, ""},
    +		{"ValueError", Type, 0, ""},
    +		{"ValueError.Kind", Field, 0, ""},
    +		{"ValueError.Method", Field, 0, ""},
    +		{"ValueOf", Func, 0, "func(i any) Value"},
    +		{"VisibleFields", Func, 17, "func(t Type) []StructField"},
    +		{"Zero", Func, 0, "func(typ Type) Value"},
     	},
     	"regexp": {
    -		{"(*Regexp).Copy", Method, 6},
    -		{"(*Regexp).Expand", Method, 0},
    -		{"(*Regexp).ExpandString", Method, 0},
    -		{"(*Regexp).Find", Method, 0},
    -		{"(*Regexp).FindAll", Method, 0},
    -		{"(*Regexp).FindAllIndex", Method, 0},
    -		{"(*Regexp).FindAllString", Method, 0},
    -		{"(*Regexp).FindAllStringIndex", Method, 0},
    -		{"(*Regexp).FindAllStringSubmatch", Method, 0},
    -		{"(*Regexp).FindAllStringSubmatchIndex", Method, 0},
    -		{"(*Regexp).FindAllSubmatch", Method, 0},
    -		{"(*Regexp).FindAllSubmatchIndex", Method, 0},
    -		{"(*Regexp).FindIndex", Method, 0},
    -		{"(*Regexp).FindReaderIndex", Method, 0},
    -		{"(*Regexp).FindReaderSubmatchIndex", Method, 0},
    -		{"(*Regexp).FindString", Method, 0},
    -		{"(*Regexp).FindStringIndex", Method, 0},
    -		{"(*Regexp).FindStringSubmatch", Method, 0},
    -		{"(*Regexp).FindStringSubmatchIndex", Method, 0},
    -		{"(*Regexp).FindSubmatch", Method, 0},
    -		{"(*Regexp).FindSubmatchIndex", Method, 0},
    -		{"(*Regexp).LiteralPrefix", Method, 0},
    -		{"(*Regexp).Longest", Method, 1},
    -		{"(*Regexp).MarshalText", Method, 21},
    -		{"(*Regexp).Match", Method, 0},
    -		{"(*Regexp).MatchReader", Method, 0},
    -		{"(*Regexp).MatchString", Method, 0},
    -		{"(*Regexp).NumSubexp", Method, 0},
    -		{"(*Regexp).ReplaceAll", Method, 0},
    -		{"(*Regexp).ReplaceAllFunc", Method, 0},
    -		{"(*Regexp).ReplaceAllLiteral", Method, 0},
    -		{"(*Regexp).ReplaceAllLiteralString", Method, 0},
    -		{"(*Regexp).ReplaceAllString", Method, 0},
    -		{"(*Regexp).ReplaceAllStringFunc", Method, 0},
    -		{"(*Regexp).Split", Method, 1},
    -		{"(*Regexp).String", Method, 0},
    -		{"(*Regexp).SubexpIndex", Method, 15},
    -		{"(*Regexp).SubexpNames", Method, 0},
    -		{"(*Regexp).UnmarshalText", Method, 21},
    -		{"Compile", Func, 0},
    -		{"CompilePOSIX", Func, 0},
    -		{"Match", Func, 0},
    -		{"MatchReader", Func, 0},
    -		{"MatchString", Func, 0},
    -		{"MustCompile", Func, 0},
    -		{"MustCompilePOSIX", Func, 0},
    -		{"QuoteMeta", Func, 0},
    -		{"Regexp", Type, 0},
    +		{"(*Regexp).AppendText", Method, 24, ""},
    +		{"(*Regexp).Copy", Method, 6, ""},
    +		{"(*Regexp).Expand", Method, 0, ""},
    +		{"(*Regexp).ExpandString", Method, 0, ""},
    +		{"(*Regexp).Find", Method, 0, ""},
    +		{"(*Regexp).FindAll", Method, 0, ""},
    +		{"(*Regexp).FindAllIndex", Method, 0, ""},
    +		{"(*Regexp).FindAllString", Method, 0, ""},
    +		{"(*Regexp).FindAllStringIndex", Method, 0, ""},
    +		{"(*Regexp).FindAllStringSubmatch", Method, 0, ""},
    +		{"(*Regexp).FindAllStringSubmatchIndex", Method, 0, ""},
    +		{"(*Regexp).FindAllSubmatch", Method, 0, ""},
    +		{"(*Regexp).FindAllSubmatchIndex", Method, 0, ""},
    +		{"(*Regexp).FindIndex", Method, 0, ""},
    +		{"(*Regexp).FindReaderIndex", Method, 0, ""},
    +		{"(*Regexp).FindReaderSubmatchIndex", Method, 0, ""},
    +		{"(*Regexp).FindString", Method, 0, ""},
    +		{"(*Regexp).FindStringIndex", Method, 0, ""},
    +		{"(*Regexp).FindStringSubmatch", Method, 0, ""},
    +		{"(*Regexp).FindStringSubmatchIndex", Method, 0, ""},
    +		{"(*Regexp).FindSubmatch", Method, 0, ""},
    +		{"(*Regexp).FindSubmatchIndex", Method, 0, ""},
    +		{"(*Regexp).LiteralPrefix", Method, 0, ""},
    +		{"(*Regexp).Longest", Method, 1, ""},
    +		{"(*Regexp).MarshalText", Method, 21, ""},
    +		{"(*Regexp).Match", Method, 0, ""},
    +		{"(*Regexp).MatchReader", Method, 0, ""},
    +		{"(*Regexp).MatchString", Method, 0, ""},
    +		{"(*Regexp).NumSubexp", Method, 0, ""},
    +		{"(*Regexp).ReplaceAll", Method, 0, ""},
    +		{"(*Regexp).ReplaceAllFunc", Method, 0, ""},
    +		{"(*Regexp).ReplaceAllLiteral", Method, 0, ""},
    +		{"(*Regexp).ReplaceAllLiteralString", Method, 0, ""},
    +		{"(*Regexp).ReplaceAllString", Method, 0, ""},
    +		{"(*Regexp).ReplaceAllStringFunc", Method, 0, ""},
    +		{"(*Regexp).Split", Method, 1, ""},
    +		{"(*Regexp).String", Method, 0, ""},
    +		{"(*Regexp).SubexpIndex", Method, 15, ""},
    +		{"(*Regexp).SubexpNames", Method, 0, ""},
    +		{"(*Regexp).UnmarshalText", Method, 21, ""},
    +		{"Compile", Func, 0, "func(expr string) (*Regexp, error)"},
    +		{"CompilePOSIX", Func, 0, "func(expr string) (*Regexp, error)"},
    +		{"Match", Func, 0, "func(pattern string, b []byte) (matched bool, err error)"},
    +		{"MatchReader", Func, 0, "func(pattern string, r io.RuneReader) (matched bool, err error)"},
    +		{"MatchString", Func, 0, "func(pattern string, s string) (matched bool, err error)"},
    +		{"MustCompile", Func, 0, "func(str string) *Regexp"},
    +		{"MustCompilePOSIX", Func, 0, "func(str string) *Regexp"},
    +		{"QuoteMeta", Func, 0, "func(s string) string"},
    +		{"Regexp", Type, 0, ""},
     	},
     	"regexp/syntax": {
    -		{"(*Error).Error", Method, 0},
    -		{"(*Inst).MatchEmptyWidth", Method, 0},
    -		{"(*Inst).MatchRune", Method, 0},
    -		{"(*Inst).MatchRunePos", Method, 3},
    -		{"(*Inst).String", Method, 0},
    -		{"(*Prog).Prefix", Method, 0},
    -		{"(*Prog).StartCond", Method, 0},
    -		{"(*Prog).String", Method, 0},
    -		{"(*Regexp).CapNames", Method, 0},
    -		{"(*Regexp).Equal", Method, 0},
    -		{"(*Regexp).MaxCap", Method, 0},
    -		{"(*Regexp).Simplify", Method, 0},
    -		{"(*Regexp).String", Method, 0},
    -		{"(ErrorCode).String", Method, 0},
    -		{"(InstOp).String", Method, 3},
    -		{"(Op).String", Method, 11},
    -		{"ClassNL", Const, 0},
    -		{"Compile", Func, 0},
    -		{"DotNL", Const, 0},
    -		{"EmptyBeginLine", Const, 0},
    -		{"EmptyBeginText", Const, 0},
    -		{"EmptyEndLine", Const, 0},
    -		{"EmptyEndText", Const, 0},
    -		{"EmptyNoWordBoundary", Const, 0},
    -		{"EmptyOp", Type, 0},
    -		{"EmptyOpContext", Func, 0},
    -		{"EmptyWordBoundary", Const, 0},
    -		{"ErrInternalError", Const, 0},
    -		{"ErrInvalidCharClass", Const, 0},
    -		{"ErrInvalidCharRange", Const, 0},
    -		{"ErrInvalidEscape", Const, 0},
    -		{"ErrInvalidNamedCapture", Const, 0},
    -		{"ErrInvalidPerlOp", Const, 0},
    -		{"ErrInvalidRepeatOp", Const, 0},
    -		{"ErrInvalidRepeatSize", Const, 0},
    -		{"ErrInvalidUTF8", Const, 0},
    -		{"ErrLarge", Const, 20},
    -		{"ErrMissingBracket", Const, 0},
    -		{"ErrMissingParen", Const, 0},
    -		{"ErrMissingRepeatArgument", Const, 0},
    -		{"ErrNestingDepth", Const, 19},
    -		{"ErrTrailingBackslash", Const, 0},
    -		{"ErrUnexpectedParen", Const, 1},
    -		{"Error", Type, 0},
    -		{"Error.Code", Field, 0},
    -		{"Error.Expr", Field, 0},
    -		{"ErrorCode", Type, 0},
    -		{"Flags", Type, 0},
    -		{"FoldCase", Const, 0},
    -		{"Inst", Type, 0},
    -		{"Inst.Arg", Field, 0},
    -		{"Inst.Op", Field, 0},
    -		{"Inst.Out", Field, 0},
    -		{"Inst.Rune", Field, 0},
    -		{"InstAlt", Const, 0},
    -		{"InstAltMatch", Const, 0},
    -		{"InstCapture", Const, 0},
    -		{"InstEmptyWidth", Const, 0},
    -		{"InstFail", Const, 0},
    -		{"InstMatch", Const, 0},
    -		{"InstNop", Const, 0},
    -		{"InstOp", Type, 0},
    -		{"InstRune", Const, 0},
    -		{"InstRune1", Const, 0},
    -		{"InstRuneAny", Const, 0},
    -		{"InstRuneAnyNotNL", Const, 0},
    -		{"IsWordChar", Func, 0},
    -		{"Literal", Const, 0},
    -		{"MatchNL", Const, 0},
    -		{"NonGreedy", Const, 0},
    -		{"OneLine", Const, 0},
    -		{"Op", Type, 0},
    -		{"OpAlternate", Const, 0},
    -		{"OpAnyChar", Const, 0},
    -		{"OpAnyCharNotNL", Const, 0},
    -		{"OpBeginLine", Const, 0},
    -		{"OpBeginText", Const, 0},
    -		{"OpCapture", Const, 0},
    -		{"OpCharClass", Const, 0},
    -		{"OpConcat", Const, 0},
    -		{"OpEmptyMatch", Const, 0},
    -		{"OpEndLine", Const, 0},
    -		{"OpEndText", Const, 0},
    -		{"OpLiteral", Const, 0},
    -		{"OpNoMatch", Const, 0},
    -		{"OpNoWordBoundary", Const, 0},
    -		{"OpPlus", Const, 0},
    -		{"OpQuest", Const, 0},
    -		{"OpRepeat", Const, 0},
    -		{"OpStar", Const, 0},
    -		{"OpWordBoundary", Const, 0},
    -		{"POSIX", Const, 0},
    -		{"Parse", Func, 0},
    -		{"Perl", Const, 0},
    -		{"PerlX", Const, 0},
    -		{"Prog", Type, 0},
    -		{"Prog.Inst", Field, 0},
    -		{"Prog.NumCap", Field, 0},
    -		{"Prog.Start", Field, 0},
    -		{"Regexp", Type, 0},
    -		{"Regexp.Cap", Field, 0},
    -		{"Regexp.Flags", Field, 0},
    -		{"Regexp.Max", Field, 0},
    -		{"Regexp.Min", Field, 0},
    -		{"Regexp.Name", Field, 0},
    -		{"Regexp.Op", Field, 0},
    -		{"Regexp.Rune", Field, 0},
    -		{"Regexp.Rune0", Field, 0},
    -		{"Regexp.Sub", Field, 0},
    -		{"Regexp.Sub0", Field, 0},
    -		{"Simple", Const, 0},
    -		{"UnicodeGroups", Const, 0},
    -		{"WasDollar", Const, 0},
    +		{"(*Error).Error", Method, 0, ""},
    +		{"(*Inst).MatchEmptyWidth", Method, 0, ""},
    +		{"(*Inst).MatchRune", Method, 0, ""},
    +		{"(*Inst).MatchRunePos", Method, 3, ""},
    +		{"(*Inst).String", Method, 0, ""},
    +		{"(*Prog).Prefix", Method, 0, ""},
    +		{"(*Prog).StartCond", Method, 0, ""},
    +		{"(*Prog).String", Method, 0, ""},
    +		{"(*Regexp).CapNames", Method, 0, ""},
    +		{"(*Regexp).Equal", Method, 0, ""},
    +		{"(*Regexp).MaxCap", Method, 0, ""},
    +		{"(*Regexp).Simplify", Method, 0, ""},
    +		{"(*Regexp).String", Method, 0, ""},
    +		{"(ErrorCode).String", Method, 0, ""},
    +		{"(InstOp).String", Method, 3, ""},
    +		{"(Op).String", Method, 11, ""},
    +		{"ClassNL", Const, 0, ""},
    +		{"Compile", Func, 0, "func(re *Regexp) (*Prog, error)"},
    +		{"DotNL", Const, 0, ""},
    +		{"EmptyBeginLine", Const, 0, ""},
    +		{"EmptyBeginText", Const, 0, ""},
    +		{"EmptyEndLine", Const, 0, ""},
    +		{"EmptyEndText", Const, 0, ""},
    +		{"EmptyNoWordBoundary", Const, 0, ""},
    +		{"EmptyOp", Type, 0, ""},
    +		{"EmptyOpContext", Func, 0, "func(r1 rune, r2 rune) EmptyOp"},
    +		{"EmptyWordBoundary", Const, 0, ""},
    +		{"ErrInternalError", Const, 0, ""},
    +		{"ErrInvalidCharClass", Const, 0, ""},
    +		{"ErrInvalidCharRange", Const, 0, ""},
    +		{"ErrInvalidEscape", Const, 0, ""},
    +		{"ErrInvalidNamedCapture", Const, 0, ""},
    +		{"ErrInvalidPerlOp", Const, 0, ""},
    +		{"ErrInvalidRepeatOp", Const, 0, ""},
    +		{"ErrInvalidRepeatSize", Const, 0, ""},
    +		{"ErrInvalidUTF8", Const, 0, ""},
    +		{"ErrLarge", Const, 20, ""},
    +		{"ErrMissingBracket", Const, 0, ""},
    +		{"ErrMissingParen", Const, 0, ""},
    +		{"ErrMissingRepeatArgument", Const, 0, ""},
    +		{"ErrNestingDepth", Const, 19, ""},
    +		{"ErrTrailingBackslash", Const, 0, ""},
    +		{"ErrUnexpectedParen", Const, 1, ""},
    +		{"Error", Type, 0, ""},
    +		{"Error.Code", Field, 0, ""},
    +		{"Error.Expr", Field, 0, ""},
    +		{"ErrorCode", Type, 0, ""},
    +		{"Flags", Type, 0, ""},
    +		{"FoldCase", Const, 0, ""},
    +		{"Inst", Type, 0, ""},
    +		{"Inst.Arg", Field, 0, ""},
    +		{"Inst.Op", Field, 0, ""},
    +		{"Inst.Out", Field, 0, ""},
    +		{"Inst.Rune", Field, 0, ""},
    +		{"InstAlt", Const, 0, ""},
    +		{"InstAltMatch", Const, 0, ""},
    +		{"InstCapture", Const, 0, ""},
    +		{"InstEmptyWidth", Const, 0, ""},
    +		{"InstFail", Const, 0, ""},
    +		{"InstMatch", Const, 0, ""},
    +		{"InstNop", Const, 0, ""},
    +		{"InstOp", Type, 0, ""},
    +		{"InstRune", Const, 0, ""},
    +		{"InstRune1", Const, 0, ""},
    +		{"InstRuneAny", Const, 0, ""},
    +		{"InstRuneAnyNotNL", Const, 0, ""},
    +		{"IsWordChar", Func, 0, "func(r rune) bool"},
    +		{"Literal", Const, 0, ""},
    +		{"MatchNL", Const, 0, ""},
    +		{"NonGreedy", Const, 0, ""},
    +		{"OneLine", Const, 0, ""},
    +		{"Op", Type, 0, ""},
    +		{"OpAlternate", Const, 0, ""},
    +		{"OpAnyChar", Const, 0, ""},
    +		{"OpAnyCharNotNL", Const, 0, ""},
    +		{"OpBeginLine", Const, 0, ""},
    +		{"OpBeginText", Const, 0, ""},
    +		{"OpCapture", Const, 0, ""},
    +		{"OpCharClass", Const, 0, ""},
    +		{"OpConcat", Const, 0, ""},
    +		{"OpEmptyMatch", Const, 0, ""},
    +		{"OpEndLine", Const, 0, ""},
    +		{"OpEndText", Const, 0, ""},
    +		{"OpLiteral", Const, 0, ""},
    +		{"OpNoMatch", Const, 0, ""},
    +		{"OpNoWordBoundary", Const, 0, ""},
    +		{"OpPlus", Const, 0, ""},
    +		{"OpQuest", Const, 0, ""},
    +		{"OpRepeat", Const, 0, ""},
    +		{"OpStar", Const, 0, ""},
    +		{"OpWordBoundary", Const, 0, ""},
    +		{"POSIX", Const, 0, ""},
    +		{"Parse", Func, 0, "func(s string, flags Flags) (*Regexp, error)"},
    +		{"Perl", Const, 0, ""},
    +		{"PerlX", Const, 0, ""},
    +		{"Prog", Type, 0, ""},
    +		{"Prog.Inst", Field, 0, ""},
    +		{"Prog.NumCap", Field, 0, ""},
    +		{"Prog.Start", Field, 0, ""},
    +		{"Regexp", Type, 0, ""},
    +		{"Regexp.Cap", Field, 0, ""},
    +		{"Regexp.Flags", Field, 0, ""},
    +		{"Regexp.Max", Field, 0, ""},
    +		{"Regexp.Min", Field, 0, ""},
    +		{"Regexp.Name", Field, 0, ""},
    +		{"Regexp.Op", Field, 0, ""},
    +		{"Regexp.Rune", Field, 0, ""},
    +		{"Regexp.Rune0", Field, 0, ""},
    +		{"Regexp.Sub", Field, 0, ""},
    +		{"Regexp.Sub0", Field, 0, ""},
    +		{"Simple", Const, 0, ""},
    +		{"UnicodeGroups", Const, 0, ""},
    +		{"WasDollar", Const, 0, ""},
     	},
     	"runtime": {
    -		{"(*BlockProfileRecord).Stack", Method, 1},
    -		{"(*Frames).Next", Method, 7},
    -		{"(*Func).Entry", Method, 0},
    -		{"(*Func).FileLine", Method, 0},
    -		{"(*Func).Name", Method, 0},
    -		{"(*MemProfileRecord).InUseBytes", Method, 0},
    -		{"(*MemProfileRecord).InUseObjects", Method, 0},
    -		{"(*MemProfileRecord).Stack", Method, 0},
    -		{"(*PanicNilError).Error", Method, 21},
    -		{"(*PanicNilError).RuntimeError", Method, 21},
    -		{"(*Pinner).Pin", Method, 21},
    -		{"(*Pinner).Unpin", Method, 21},
    -		{"(*StackRecord).Stack", Method, 0},
    -		{"(*TypeAssertionError).Error", Method, 0},
    -		{"(*TypeAssertionError).RuntimeError", Method, 0},
    -		{"BlockProfile", Func, 1},
    -		{"BlockProfileRecord", Type, 1},
    -		{"BlockProfileRecord.Count", Field, 1},
    -		{"BlockProfileRecord.Cycles", Field, 1},
    -		{"BlockProfileRecord.StackRecord", Field, 1},
    -		{"Breakpoint", Func, 0},
    -		{"CPUProfile", Func, 0},
    -		{"Caller", Func, 0},
    -		{"Callers", Func, 0},
    -		{"CallersFrames", Func, 7},
    -		{"Compiler", Const, 0},
    -		{"Error", Type, 0},
    -		{"Frame", Type, 7},
    -		{"Frame.Entry", Field, 7},
    -		{"Frame.File", Field, 7},
    -		{"Frame.Func", Field, 7},
    -		{"Frame.Function", Field, 7},
    -		{"Frame.Line", Field, 7},
    -		{"Frame.PC", Field, 7},
    -		{"Frames", Type, 7},
    -		{"Func", Type, 0},
    -		{"FuncForPC", Func, 0},
    -		{"GC", Func, 0},
    -		{"GOARCH", Const, 0},
    -		{"GOMAXPROCS", Func, 0},
    -		{"GOOS", Const, 0},
    -		{"GOROOT", Func, 0},
    -		{"Goexit", Func, 0},
    -		{"GoroutineProfile", Func, 0},
    -		{"Gosched", Func, 0},
    -		{"KeepAlive", Func, 7},
    -		{"LockOSThread", Func, 0},
    -		{"MemProfile", Func, 0},
    -		{"MemProfileRate", Var, 0},
    -		{"MemProfileRecord", Type, 0},
    -		{"MemProfileRecord.AllocBytes", Field, 0},
    -		{"MemProfileRecord.AllocObjects", Field, 0},
    -		{"MemProfileRecord.FreeBytes", Field, 0},
    -		{"MemProfileRecord.FreeObjects", Field, 0},
    -		{"MemProfileRecord.Stack0", Field, 0},
    -		{"MemStats", Type, 0},
    -		{"MemStats.Alloc", Field, 0},
    -		{"MemStats.BuckHashSys", Field, 0},
    -		{"MemStats.BySize", Field, 0},
    -		{"MemStats.DebugGC", Field, 0},
    -		{"MemStats.EnableGC", Field, 0},
    -		{"MemStats.Frees", Field, 0},
    -		{"MemStats.GCCPUFraction", Field, 5},
    -		{"MemStats.GCSys", Field, 2},
    -		{"MemStats.HeapAlloc", Field, 0},
    -		{"MemStats.HeapIdle", Field, 0},
    -		{"MemStats.HeapInuse", Field, 0},
    -		{"MemStats.HeapObjects", Field, 0},
    -		{"MemStats.HeapReleased", Field, 0},
    -		{"MemStats.HeapSys", Field, 0},
    -		{"MemStats.LastGC", Field, 0},
    -		{"MemStats.Lookups", Field, 0},
    -		{"MemStats.MCacheInuse", Field, 0},
    -		{"MemStats.MCacheSys", Field, 0},
    -		{"MemStats.MSpanInuse", Field, 0},
    -		{"MemStats.MSpanSys", Field, 0},
    -		{"MemStats.Mallocs", Field, 0},
    -		{"MemStats.NextGC", Field, 0},
    -		{"MemStats.NumForcedGC", Field, 8},
    -		{"MemStats.NumGC", Field, 0},
    -		{"MemStats.OtherSys", Field, 2},
    -		{"MemStats.PauseEnd", Field, 4},
    -		{"MemStats.PauseNs", Field, 0},
    -		{"MemStats.PauseTotalNs", Field, 0},
    -		{"MemStats.StackInuse", Field, 0},
    -		{"MemStats.StackSys", Field, 0},
    -		{"MemStats.Sys", Field, 0},
    -		{"MemStats.TotalAlloc", Field, 0},
    -		{"MutexProfile", Func, 8},
    -		{"NumCPU", Func, 0},
    -		{"NumCgoCall", Func, 0},
    -		{"NumGoroutine", Func, 0},
    -		{"PanicNilError", Type, 21},
    -		{"Pinner", Type, 21},
    -		{"ReadMemStats", Func, 0},
    -		{"ReadTrace", Func, 5},
    -		{"SetBlockProfileRate", Func, 1},
    -		{"SetCPUProfileRate", Func, 0},
    -		{"SetCgoTraceback", Func, 7},
    -		{"SetFinalizer", Func, 0},
    -		{"SetMutexProfileFraction", Func, 8},
    -		{"Stack", Func, 0},
    -		{"StackRecord", Type, 0},
    -		{"StackRecord.Stack0", Field, 0},
    -		{"StartTrace", Func, 5},
    -		{"StopTrace", Func, 5},
    -		{"ThreadCreateProfile", Func, 0},
    -		{"TypeAssertionError", Type, 0},
    -		{"UnlockOSThread", Func, 0},
    -		{"Version", Func, 0},
    +		{"(*BlockProfileRecord).Stack", Method, 1, ""},
    +		{"(*Frames).Next", Method, 7, ""},
    +		{"(*Func).Entry", Method, 0, ""},
    +		{"(*Func).FileLine", Method, 0, ""},
    +		{"(*Func).Name", Method, 0, ""},
    +		{"(*MemProfileRecord).InUseBytes", Method, 0, ""},
    +		{"(*MemProfileRecord).InUseObjects", Method, 0, ""},
    +		{"(*MemProfileRecord).Stack", Method, 0, ""},
    +		{"(*PanicNilError).Error", Method, 21, ""},
    +		{"(*PanicNilError).RuntimeError", Method, 21, ""},
    +		{"(*Pinner).Pin", Method, 21, ""},
    +		{"(*Pinner).Unpin", Method, 21, ""},
    +		{"(*StackRecord).Stack", Method, 0, ""},
    +		{"(*TypeAssertionError).Error", Method, 0, ""},
    +		{"(*TypeAssertionError).RuntimeError", Method, 0, ""},
    +		{"(Cleanup).Stop", Method, 24, ""},
    +		{"AddCleanup", Func, 24, "func[T, S any](ptr *T, cleanup func(S), arg S) Cleanup"},
    +		{"BlockProfile", Func, 1, "func(p []BlockProfileRecord) (n int, ok bool)"},
    +		{"BlockProfileRecord", Type, 1, ""},
    +		{"BlockProfileRecord.Count", Field, 1, ""},
    +		{"BlockProfileRecord.Cycles", Field, 1, ""},
    +		{"BlockProfileRecord.StackRecord", Field, 1, ""},
    +		{"Breakpoint", Func, 0, "func()"},
    +		{"CPUProfile", Func, 0, "func() []byte"},
    +		{"Caller", Func, 0, "func(skip int) (pc uintptr, file string, line int, ok bool)"},
    +		{"Callers", Func, 0, "func(skip int, pc []uintptr) int"},
    +		{"CallersFrames", Func, 7, "func(callers []uintptr) *Frames"},
    +		{"Cleanup", Type, 24, ""},
    +		{"Compiler", Const, 0, ""},
    +		{"Error", Type, 0, ""},
    +		{"Frame", Type, 7, ""},
    +		{"Frame.Entry", Field, 7, ""},
    +		{"Frame.File", Field, 7, ""},
    +		{"Frame.Func", Field, 7, ""},
    +		{"Frame.Function", Field, 7, ""},
    +		{"Frame.Line", Field, 7, ""},
    +		{"Frame.PC", Field, 7, ""},
    +		{"Frames", Type, 7, ""},
    +		{"Func", Type, 0, ""},
    +		{"FuncForPC", Func, 0, "func(pc uintptr) *Func"},
    +		{"GC", Func, 0, "func()"},
    +		{"GOARCH", Const, 0, ""},
    +		{"GOMAXPROCS", Func, 0, "func(n int) int"},
    +		{"GOOS", Const, 0, ""},
    +		{"GOROOT", Func, 0, "func() string"},
    +		{"Goexit", Func, 0, "func()"},
    +		{"GoroutineProfile", Func, 0, "func(p []StackRecord) (n int, ok bool)"},
    +		{"Gosched", Func, 0, "func()"},
    +		{"KeepAlive", Func, 7, "func(x any)"},
    +		{"LockOSThread", Func, 0, "func()"},
    +		{"MemProfile", Func, 0, "func(p []MemProfileRecord, inuseZero bool) (n int, ok bool)"},
    +		{"MemProfileRate", Var, 0, ""},
    +		{"MemProfileRecord", Type, 0, ""},
    +		{"MemProfileRecord.AllocBytes", Field, 0, ""},
    +		{"MemProfileRecord.AllocObjects", Field, 0, ""},
    +		{"MemProfileRecord.FreeBytes", Field, 0, ""},
    +		{"MemProfileRecord.FreeObjects", Field, 0, ""},
    +		{"MemProfileRecord.Stack0", Field, 0, ""},
    +		{"MemStats", Type, 0, ""},
    +		{"MemStats.Alloc", Field, 0, ""},
    +		{"MemStats.BuckHashSys", Field, 0, ""},
    +		{"MemStats.BySize", Field, 0, ""},
    +		{"MemStats.DebugGC", Field, 0, ""},
    +		{"MemStats.EnableGC", Field, 0, ""},
    +		{"MemStats.Frees", Field, 0, ""},
    +		{"MemStats.GCCPUFraction", Field, 5, ""},
    +		{"MemStats.GCSys", Field, 2, ""},
    +		{"MemStats.HeapAlloc", Field, 0, ""},
    +		{"MemStats.HeapIdle", Field, 0, ""},
    +		{"MemStats.HeapInuse", Field, 0, ""},
    +		{"MemStats.HeapObjects", Field, 0, ""},
    +		{"MemStats.HeapReleased", Field, 0, ""},
    +		{"MemStats.HeapSys", Field, 0, ""},
    +		{"MemStats.LastGC", Field, 0, ""},
    +		{"MemStats.Lookups", Field, 0, ""},
    +		{"MemStats.MCacheInuse", Field, 0, ""},
    +		{"MemStats.MCacheSys", Field, 0, ""},
    +		{"MemStats.MSpanInuse", Field, 0, ""},
    +		{"MemStats.MSpanSys", Field, 0, ""},
    +		{"MemStats.Mallocs", Field, 0, ""},
    +		{"MemStats.NextGC", Field, 0, ""},
    +		{"MemStats.NumForcedGC", Field, 8, ""},
    +		{"MemStats.NumGC", Field, 0, ""},
    +		{"MemStats.OtherSys", Field, 2, ""},
    +		{"MemStats.PauseEnd", Field, 4, ""},
    +		{"MemStats.PauseNs", Field, 0, ""},
    +		{"MemStats.PauseTotalNs", Field, 0, ""},
    +		{"MemStats.StackInuse", Field, 0, ""},
    +		{"MemStats.StackSys", Field, 0, ""},
    +		{"MemStats.Sys", Field, 0, ""},
    +		{"MemStats.TotalAlloc", Field, 0, ""},
    +		{"MutexProfile", Func, 8, "func(p []BlockProfileRecord) (n int, ok bool)"},
    +		{"NumCPU", Func, 0, "func() int"},
    +		{"NumCgoCall", Func, 0, "func() int64"},
    +		{"NumGoroutine", Func, 0, "func() int"},
    +		{"PanicNilError", Type, 21, ""},
    +		{"Pinner", Type, 21, ""},
    +		{"ReadMemStats", Func, 0, "func(m *MemStats)"},
    +		{"ReadTrace", Func, 5, "func() []byte"},
    +		{"SetBlockProfileRate", Func, 1, "func(rate int)"},
    +		{"SetCPUProfileRate", Func, 0, "func(hz int)"},
    +		{"SetCgoTraceback", Func, 7, "func(version int, traceback unsafe.Pointer, context unsafe.Pointer, symbolizer unsafe.Pointer)"},
    +		{"SetDefaultGOMAXPROCS", Func, 25, "func()"},
    +		{"SetFinalizer", Func, 0, "func(obj any, finalizer any)"},
    +		{"SetMutexProfileFraction", Func, 8, "func(rate int) int"},
    +		{"Stack", Func, 0, "func(buf []byte, all bool) int"},
    +		{"StackRecord", Type, 0, ""},
    +		{"StackRecord.Stack0", Field, 0, ""},
    +		{"StartTrace", Func, 5, "func() error"},
    +		{"StopTrace", Func, 5, "func()"},
    +		{"ThreadCreateProfile", Func, 0, "func(p []StackRecord) (n int, ok bool)"},
    +		{"TypeAssertionError", Type, 0, ""},
    +		{"UnlockOSThread", Func, 0, "func()"},
    +		{"Version", Func, 0, "func() string"},
     	},
     	"runtime/cgo": {
    -		{"(Handle).Delete", Method, 17},
    -		{"(Handle).Value", Method, 17},
    -		{"Handle", Type, 17},
    -		{"Incomplete", Type, 20},
    -		{"NewHandle", Func, 17},
    +		{"(Handle).Delete", Method, 17, ""},
    +		{"(Handle).Value", Method, 17, ""},
    +		{"Handle", Type, 17, ""},
    +		{"Incomplete", Type, 20, ""},
    +		{"NewHandle", Func, 17, ""},
     	},
     	"runtime/coverage": {
    -		{"ClearCounters", Func, 20},
    -		{"WriteCounters", Func, 20},
    -		{"WriteCountersDir", Func, 20},
    -		{"WriteMeta", Func, 20},
    -		{"WriteMetaDir", Func, 20},
    +		{"ClearCounters", Func, 20, "func() error"},
    +		{"WriteCounters", Func, 20, "func(w io.Writer) error"},
    +		{"WriteCountersDir", Func, 20, "func(dir string) error"},
    +		{"WriteMeta", Func, 20, "func(w io.Writer) error"},
    +		{"WriteMetaDir", Func, 20, "func(dir string) error"},
     	},
     	"runtime/debug": {
    -		{"(*BuildInfo).String", Method, 18},
    -		{"BuildInfo", Type, 12},
    -		{"BuildInfo.Deps", Field, 12},
    -		{"BuildInfo.GoVersion", Field, 18},
    -		{"BuildInfo.Main", Field, 12},
    -		{"BuildInfo.Path", Field, 12},
    -		{"BuildInfo.Settings", Field, 18},
    -		{"BuildSetting", Type, 18},
    -		{"BuildSetting.Key", Field, 18},
    -		{"BuildSetting.Value", Field, 18},
    -		{"CrashOptions", Type, 23},
    -		{"FreeOSMemory", Func, 1},
    -		{"GCStats", Type, 1},
    -		{"GCStats.LastGC", Field, 1},
    -		{"GCStats.NumGC", Field, 1},
    -		{"GCStats.Pause", Field, 1},
    -		{"GCStats.PauseEnd", Field, 4},
    -		{"GCStats.PauseQuantiles", Field, 1},
    -		{"GCStats.PauseTotal", Field, 1},
    -		{"Module", Type, 12},
    -		{"Module.Path", Field, 12},
    -		{"Module.Replace", Field, 12},
    -		{"Module.Sum", Field, 12},
    -		{"Module.Version", Field, 12},
    -		{"ParseBuildInfo", Func, 18},
    -		{"PrintStack", Func, 0},
    -		{"ReadBuildInfo", Func, 12},
    -		{"ReadGCStats", Func, 1},
    -		{"SetCrashOutput", Func, 23},
    -		{"SetGCPercent", Func, 1},
    -		{"SetMaxStack", Func, 2},
    -		{"SetMaxThreads", Func, 2},
    -		{"SetMemoryLimit", Func, 19},
    -		{"SetPanicOnFault", Func, 3},
    -		{"SetTraceback", Func, 6},
    -		{"Stack", Func, 0},
    -		{"WriteHeapDump", Func, 3},
    +		{"(*BuildInfo).String", Method, 18, ""},
    +		{"BuildInfo", Type, 12, ""},
    +		{"BuildInfo.Deps", Field, 12, ""},
    +		{"BuildInfo.GoVersion", Field, 18, ""},
    +		{"BuildInfo.Main", Field, 12, ""},
    +		{"BuildInfo.Path", Field, 12, ""},
    +		{"BuildInfo.Settings", Field, 18, ""},
    +		{"BuildSetting", Type, 18, ""},
    +		{"BuildSetting.Key", Field, 18, ""},
    +		{"BuildSetting.Value", Field, 18, ""},
    +		{"CrashOptions", Type, 23, ""},
    +		{"FreeOSMemory", Func, 1, "func()"},
    +		{"GCStats", Type, 1, ""},
    +		{"GCStats.LastGC", Field, 1, ""},
    +		{"GCStats.NumGC", Field, 1, ""},
    +		{"GCStats.Pause", Field, 1, ""},
    +		{"GCStats.PauseEnd", Field, 4, ""},
    +		{"GCStats.PauseQuantiles", Field, 1, ""},
    +		{"GCStats.PauseTotal", Field, 1, ""},
    +		{"Module", Type, 12, ""},
    +		{"Module.Path", Field, 12, ""},
    +		{"Module.Replace", Field, 12, ""},
    +		{"Module.Sum", Field, 12, ""},
    +		{"Module.Version", Field, 12, ""},
    +		{"ParseBuildInfo", Func, 18, "func(data string) (bi *BuildInfo, err error)"},
    +		{"PrintStack", Func, 0, "func()"},
    +		{"ReadBuildInfo", Func, 12, "func() (info *BuildInfo, ok bool)"},
    +		{"ReadGCStats", Func, 1, "func(stats *GCStats)"},
    +		{"SetCrashOutput", Func, 23, "func(f *os.File, opts CrashOptions) error"},
    +		{"SetGCPercent", Func, 1, "func(percent int) int"},
    +		{"SetMaxStack", Func, 2, "func(bytes int) int"},
    +		{"SetMaxThreads", Func, 2, "func(threads int) int"},
    +		{"SetMemoryLimit", Func, 19, "func(limit int64) int64"},
    +		{"SetPanicOnFault", Func, 3, "func(enabled bool) bool"},
    +		{"SetTraceback", Func, 6, "func(level string)"},
    +		{"Stack", Func, 0, "func() []byte"},
    +		{"WriteHeapDump", Func, 3, "func(fd uintptr)"},
     	},
     	"runtime/metrics": {
    -		{"(Value).Float64", Method, 16},
    -		{"(Value).Float64Histogram", Method, 16},
    -		{"(Value).Kind", Method, 16},
    -		{"(Value).Uint64", Method, 16},
    -		{"All", Func, 16},
    -		{"Description", Type, 16},
    -		{"Description.Cumulative", Field, 16},
    -		{"Description.Description", Field, 16},
    -		{"Description.Kind", Field, 16},
    -		{"Description.Name", Field, 16},
    -		{"Float64Histogram", Type, 16},
    -		{"Float64Histogram.Buckets", Field, 16},
    -		{"Float64Histogram.Counts", Field, 16},
    -		{"KindBad", Const, 16},
    -		{"KindFloat64", Const, 16},
    -		{"KindFloat64Histogram", Const, 16},
    -		{"KindUint64", Const, 16},
    -		{"Read", Func, 16},
    -		{"Sample", Type, 16},
    -		{"Sample.Name", Field, 16},
    -		{"Sample.Value", Field, 16},
    -		{"Value", Type, 16},
    -		{"ValueKind", Type, 16},
    +		{"(Value).Float64", Method, 16, ""},
    +		{"(Value).Float64Histogram", Method, 16, ""},
    +		{"(Value).Kind", Method, 16, ""},
    +		{"(Value).Uint64", Method, 16, ""},
    +		{"All", Func, 16, "func() []Description"},
    +		{"Description", Type, 16, ""},
    +		{"Description.Cumulative", Field, 16, ""},
    +		{"Description.Description", Field, 16, ""},
    +		{"Description.Kind", Field, 16, ""},
    +		{"Description.Name", Field, 16, ""},
    +		{"Float64Histogram", Type, 16, ""},
    +		{"Float64Histogram.Buckets", Field, 16, ""},
    +		{"Float64Histogram.Counts", Field, 16, ""},
    +		{"KindBad", Const, 16, ""},
    +		{"KindFloat64", Const, 16, ""},
    +		{"KindFloat64Histogram", Const, 16, ""},
    +		{"KindUint64", Const, 16, ""},
    +		{"Read", Func, 16, "func(m []Sample)"},
    +		{"Sample", Type, 16, ""},
    +		{"Sample.Name", Field, 16, ""},
    +		{"Sample.Value", Field, 16, ""},
    +		{"Value", Type, 16, ""},
    +		{"ValueKind", Type, 16, ""},
     	},
     	"runtime/pprof": {
    -		{"(*Profile).Add", Method, 0},
    -		{"(*Profile).Count", Method, 0},
    -		{"(*Profile).Name", Method, 0},
    -		{"(*Profile).Remove", Method, 0},
    -		{"(*Profile).WriteTo", Method, 0},
    -		{"Do", Func, 9},
    -		{"ForLabels", Func, 9},
    -		{"Label", Func, 9},
    -		{"LabelSet", Type, 9},
    -		{"Labels", Func, 9},
    -		{"Lookup", Func, 0},
    -		{"NewProfile", Func, 0},
    -		{"Profile", Type, 0},
    -		{"Profiles", Func, 0},
    -		{"SetGoroutineLabels", Func, 9},
    -		{"StartCPUProfile", Func, 0},
    -		{"StopCPUProfile", Func, 0},
    -		{"WithLabels", Func, 9},
    -		{"WriteHeapProfile", Func, 0},
    +		{"(*Profile).Add", Method, 0, ""},
    +		{"(*Profile).Count", Method, 0, ""},
    +		{"(*Profile).Name", Method, 0, ""},
    +		{"(*Profile).Remove", Method, 0, ""},
    +		{"(*Profile).WriteTo", Method, 0, ""},
    +		{"Do", Func, 9, "func(ctx context.Context, labels LabelSet, f func(context.Context))"},
    +		{"ForLabels", Func, 9, "func(ctx context.Context, f func(key string, value string) bool)"},
    +		{"Label", Func, 9, "func(ctx context.Context, key string) (string, bool)"},
    +		{"LabelSet", Type, 9, ""},
    +		{"Labels", Func, 9, "func(args ...string) LabelSet"},
    +		{"Lookup", Func, 0, "func(name string) *Profile"},
    +		{"NewProfile", Func, 0, "func(name string) *Profile"},
    +		{"Profile", Type, 0, ""},
    +		{"Profiles", Func, 0, "func() []*Profile"},
    +		{"SetGoroutineLabels", Func, 9, "func(ctx context.Context)"},
    +		{"StartCPUProfile", Func, 0, "func(w io.Writer) error"},
    +		{"StopCPUProfile", Func, 0, "func()"},
    +		{"WithLabels", Func, 9, "func(ctx context.Context, labels LabelSet) context.Context"},
    +		{"WriteHeapProfile", Func, 0, "func(w io.Writer) error"},
     	},
     	"runtime/trace": {
    -		{"(*Region).End", Method, 11},
    -		{"(*Task).End", Method, 11},
    -		{"IsEnabled", Func, 11},
    -		{"Log", Func, 11},
    -		{"Logf", Func, 11},
    -		{"NewTask", Func, 11},
    -		{"Region", Type, 11},
    -		{"Start", Func, 5},
    -		{"StartRegion", Func, 11},
    -		{"Stop", Func, 5},
    -		{"Task", Type, 11},
    -		{"WithRegion", Func, 11},
    +		{"(*FlightRecorder).Enabled", Method, 25, ""},
    +		{"(*FlightRecorder).Start", Method, 25, ""},
    +		{"(*FlightRecorder).Stop", Method, 25, ""},
    +		{"(*FlightRecorder).WriteTo", Method, 25, ""},
    +		{"(*Region).End", Method, 11, ""},
    +		{"(*Task).End", Method, 11, ""},
    +		{"FlightRecorder", Type, 25, ""},
    +		{"FlightRecorderConfig", Type, 25, ""},
    +		{"FlightRecorderConfig.MaxBytes", Field, 25, ""},
    +		{"FlightRecorderConfig.MinAge", Field, 25, ""},
    +		{"IsEnabled", Func, 11, "func() bool"},
    +		{"Log", Func, 11, "func(ctx context.Context, category string, message string)"},
    +		{"Logf", Func, 11, "func(ctx context.Context, category string, format string, args ...any)"},
    +		{"NewFlightRecorder", Func, 25, "func(cfg FlightRecorderConfig) *FlightRecorder"},
    +		{"NewTask", Func, 11, "func(pctx context.Context, taskType string) (ctx context.Context, task *Task)"},
    +		{"Region", Type, 11, ""},
    +		{"Start", Func, 5, "func(w io.Writer) error"},
    +		{"StartRegion", Func, 11, "func(ctx context.Context, regionType string) *Region"},
    +		{"Stop", Func, 5, "func()"},
    +		{"Task", Type, 11, ""},
    +		{"WithRegion", Func, 11, "func(ctx context.Context, regionType string, fn func())"},
     	},
     	"slices": {
    -		{"All", Func, 23},
    -		{"AppendSeq", Func, 23},
    -		{"Backward", Func, 23},
    -		{"BinarySearch", Func, 21},
    -		{"BinarySearchFunc", Func, 21},
    -		{"Chunk", Func, 23},
    -		{"Clip", Func, 21},
    -		{"Clone", Func, 21},
    -		{"Collect", Func, 23},
    -		{"Compact", Func, 21},
    -		{"CompactFunc", Func, 21},
    -		{"Compare", Func, 21},
    -		{"CompareFunc", Func, 21},
    -		{"Concat", Func, 22},
    -		{"Contains", Func, 21},
    -		{"ContainsFunc", Func, 21},
    -		{"Delete", Func, 21},
    -		{"DeleteFunc", Func, 21},
    -		{"Equal", Func, 21},
    -		{"EqualFunc", Func, 21},
    -		{"Grow", Func, 21},
    -		{"Index", Func, 21},
    -		{"IndexFunc", Func, 21},
    -		{"Insert", Func, 21},
    -		{"IsSorted", Func, 21},
    -		{"IsSortedFunc", Func, 21},
    -		{"Max", Func, 21},
    -		{"MaxFunc", Func, 21},
    -		{"Min", Func, 21},
    -		{"MinFunc", Func, 21},
    -		{"Repeat", Func, 23},
    -		{"Replace", Func, 21},
    -		{"Reverse", Func, 21},
    -		{"Sort", Func, 21},
    -		{"SortFunc", Func, 21},
    -		{"SortStableFunc", Func, 21},
    -		{"Sorted", Func, 23},
    -		{"SortedFunc", Func, 23},
    -		{"SortedStableFunc", Func, 23},
    -		{"Values", Func, 23},
    +		{"All", Func, 23, "func[Slice ~[]E, E any](s Slice) iter.Seq2[int, E]"},
    +		{"AppendSeq", Func, 23, "func[Slice ~[]E, E any](s Slice, seq iter.Seq[E]) Slice"},
    +		{"Backward", Func, 23, "func[Slice ~[]E, E any](s Slice) iter.Seq2[int, E]"},
    +		{"BinarySearch", Func, 21, "func[S ~[]E, E cmp.Ordered](x S, target E) (int, bool)"},
    +		{"BinarySearchFunc", Func, 21, "func[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool)"},
    +		{"Chunk", Func, 23, "func[Slice ~[]E, E any](s Slice, n int) iter.Seq[Slice]"},
    +		{"Clip", Func, 21, "func[S ~[]E, E any](s S) S"},
    +		{"Clone", Func, 21, "func[S ~[]E, E any](s S) S"},
    +		{"Collect", Func, 23, "func[E any](seq iter.Seq[E]) []E"},
    +		{"Compact", Func, 21, "func[S ~[]E, E comparable](s S) S"},
    +		{"CompactFunc", Func, 21, "func[S ~[]E, E any](s S, eq func(E, E) bool) S"},
    +		{"Compare", Func, 21, "func[S ~[]E, E cmp.Ordered](s1 S, s2 S) int"},
    +		{"CompareFunc", Func, 21, "func[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int"},
    +		{"Concat", Func, 22, "func[S ~[]E, E any](slices ...S) S"},
    +		{"Contains", Func, 21, "func[S ~[]E, E comparable](s S, v E) bool"},
    +		{"ContainsFunc", Func, 21, "func[S ~[]E, E any](s S, f func(E) bool) bool"},
    +		{"Delete", Func, 21, "func[S ~[]E, E any](s S, i int, j int) S"},
    +		{"DeleteFunc", Func, 21, "func[S ~[]E, E any](s S, del func(E) bool) S"},
    +		{"Equal", Func, 21, "func[S ~[]E, E comparable](s1 S, s2 S) bool"},
    +		{"EqualFunc", Func, 21, "func[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool"},
    +		{"Grow", Func, 21, "func[S ~[]E, E any](s S, n int) S"},
    +		{"Index", Func, 21, "func[S ~[]E, E comparable](s S, v E) int"},
    +		{"IndexFunc", Func, 21, "func[S ~[]E, E any](s S, f func(E) bool) int"},
    +		{"Insert", Func, 21, "func[S ~[]E, E any](s S, i int, v ...E) S"},
    +		{"IsSorted", Func, 21, "func[S ~[]E, E cmp.Ordered](x S) bool"},
    +		{"IsSortedFunc", Func, 21, "func[S ~[]E, E any](x S, cmp func(a E, b E) int) bool"},
    +		{"Max", Func, 21, "func[S ~[]E, E cmp.Ordered](x S) E"},
    +		{"MaxFunc", Func, 21, "func[S ~[]E, E any](x S, cmp func(a E, b E) int) E"},
    +		{"Min", Func, 21, "func[S ~[]E, E cmp.Ordered](x S) E"},
    +		{"MinFunc", Func, 21, "func[S ~[]E, E any](x S, cmp func(a E, b E) int) E"},
    +		{"Repeat", Func, 23, "func[S ~[]E, E any](x S, count int) S"},
    +		{"Replace", Func, 21, "func[S ~[]E, E any](s S, i int, j int, v ...E) S"},
    +		{"Reverse", Func, 21, "func[S ~[]E, E any](s S)"},
    +		{"Sort", Func, 21, "func[S ~[]E, E cmp.Ordered](x S)"},
    +		{"SortFunc", Func, 21, "func[S ~[]E, E any](x S, cmp func(a E, b E) int)"},
    +		{"SortStableFunc", Func, 21, "func[S ~[]E, E any](x S, cmp func(a E, b E) int)"},
    +		{"Sorted", Func, 23, "func[E cmp.Ordered](seq iter.Seq[E]) []E"},
    +		{"SortedFunc", Func, 23, "func[E any](seq iter.Seq[E], cmp func(E, E) int) []E"},
    +		{"SortedStableFunc", Func, 23, "func[E any](seq iter.Seq[E], cmp func(E, E) int) []E"},
    +		{"Values", Func, 23, "func[Slice ~[]E, E any](s Slice) iter.Seq[E]"},
     	},
     	"sort": {
    -		{"(Float64Slice).Len", Method, 0},
    -		{"(Float64Slice).Less", Method, 0},
    -		{"(Float64Slice).Search", Method, 0},
    -		{"(Float64Slice).Sort", Method, 0},
    -		{"(Float64Slice).Swap", Method, 0},
    -		{"(IntSlice).Len", Method, 0},
    -		{"(IntSlice).Less", Method, 0},
    -		{"(IntSlice).Search", Method, 0},
    -		{"(IntSlice).Sort", Method, 0},
    -		{"(IntSlice).Swap", Method, 0},
    -		{"(StringSlice).Len", Method, 0},
    -		{"(StringSlice).Less", Method, 0},
    -		{"(StringSlice).Search", Method, 0},
    -		{"(StringSlice).Sort", Method, 0},
    -		{"(StringSlice).Swap", Method, 0},
    -		{"Find", Func, 19},
    -		{"Float64Slice", Type, 0},
    -		{"Float64s", Func, 0},
    -		{"Float64sAreSorted", Func, 0},
    -		{"IntSlice", Type, 0},
    -		{"Interface", Type, 0},
    -		{"Ints", Func, 0},
    -		{"IntsAreSorted", Func, 0},
    -		{"IsSorted", Func, 0},
    -		{"Reverse", Func, 1},
    -		{"Search", Func, 0},
    -		{"SearchFloat64s", Func, 0},
    -		{"SearchInts", Func, 0},
    -		{"SearchStrings", Func, 0},
    -		{"Slice", Func, 8},
    -		{"SliceIsSorted", Func, 8},
    -		{"SliceStable", Func, 8},
    -		{"Sort", Func, 0},
    -		{"Stable", Func, 2},
    -		{"StringSlice", Type, 0},
    -		{"Strings", Func, 0},
    -		{"StringsAreSorted", Func, 0},
    +		{"(Float64Slice).Len", Method, 0, ""},
    +		{"(Float64Slice).Less", Method, 0, ""},
    +		{"(Float64Slice).Search", Method, 0, ""},
    +		{"(Float64Slice).Sort", Method, 0, ""},
    +		{"(Float64Slice).Swap", Method, 0, ""},
    +		{"(IntSlice).Len", Method, 0, ""},
    +		{"(IntSlice).Less", Method, 0, ""},
    +		{"(IntSlice).Search", Method, 0, ""},
    +		{"(IntSlice).Sort", Method, 0, ""},
    +		{"(IntSlice).Swap", Method, 0, ""},
    +		{"(StringSlice).Len", Method, 0, ""},
    +		{"(StringSlice).Less", Method, 0, ""},
    +		{"(StringSlice).Search", Method, 0, ""},
    +		{"(StringSlice).Sort", Method, 0, ""},
    +		{"(StringSlice).Swap", Method, 0, ""},
    +		{"Find", Func, 19, "func(n int, cmp func(int) int) (i int, found bool)"},
    +		{"Float64Slice", Type, 0, ""},
    +		{"Float64s", Func, 0, "func(x []float64)"},
    +		{"Float64sAreSorted", Func, 0, "func(x []float64) bool"},
    +		{"IntSlice", Type, 0, ""},
    +		{"Interface", Type, 0, ""},
    +		{"Ints", Func, 0, "func(x []int)"},
    +		{"IntsAreSorted", Func, 0, "func(x []int) bool"},
    +		{"IsSorted", Func, 0, "func(data Interface) bool"},
    +		{"Reverse", Func, 1, "func(data Interface) Interface"},
    +		{"Search", Func, 0, "func(n int, f func(int) bool) int"},
    +		{"SearchFloat64s", Func, 0, "func(a []float64, x float64) int"},
    +		{"SearchInts", Func, 0, "func(a []int, x int) int"},
    +		{"SearchStrings", Func, 0, "func(a []string, x string) int"},
    +		{"Slice", Func, 8, "func(x any, less func(i int, j int) bool)"},
    +		{"SliceIsSorted", Func, 8, "func(x any, less func(i int, j int) bool) bool"},
    +		{"SliceStable", Func, 8, "func(x any, less func(i int, j int) bool)"},
    +		{"Sort", Func, 0, "func(data Interface)"},
    +		{"Stable", Func, 2, "func(data Interface)"},
    +		{"StringSlice", Type, 0, ""},
    +		{"Strings", Func, 0, "func(x []string)"},
    +		{"StringsAreSorted", Func, 0, "func(x []string) bool"},
     	},
     	"strconv": {
    -		{"(*NumError).Error", Method, 0},
    -		{"(*NumError).Unwrap", Method, 14},
    -		{"AppendBool", Func, 0},
    -		{"AppendFloat", Func, 0},
    -		{"AppendInt", Func, 0},
    -		{"AppendQuote", Func, 0},
    -		{"AppendQuoteRune", Func, 0},
    -		{"AppendQuoteRuneToASCII", Func, 0},
    -		{"AppendQuoteRuneToGraphic", Func, 6},
    -		{"AppendQuoteToASCII", Func, 0},
    -		{"AppendQuoteToGraphic", Func, 6},
    -		{"AppendUint", Func, 0},
    -		{"Atoi", Func, 0},
    -		{"CanBackquote", Func, 0},
    -		{"ErrRange", Var, 0},
    -		{"ErrSyntax", Var, 0},
    -		{"FormatBool", Func, 0},
    -		{"FormatComplex", Func, 15},
    -		{"FormatFloat", Func, 0},
    -		{"FormatInt", Func, 0},
    -		{"FormatUint", Func, 0},
    -		{"IntSize", Const, 0},
    -		{"IsGraphic", Func, 6},
    -		{"IsPrint", Func, 0},
    -		{"Itoa", Func, 0},
    -		{"NumError", Type, 0},
    -		{"NumError.Err", Field, 0},
    -		{"NumError.Func", Field, 0},
    -		{"NumError.Num", Field, 0},
    -		{"ParseBool", Func, 0},
    -		{"ParseComplex", Func, 15},
    -		{"ParseFloat", Func, 0},
    -		{"ParseInt", Func, 0},
    -		{"ParseUint", Func, 0},
    -		{"Quote", Func, 0},
    -		{"QuoteRune", Func, 0},
    -		{"QuoteRuneToASCII", Func, 0},
    -		{"QuoteRuneToGraphic", Func, 6},
    -		{"QuoteToASCII", Func, 0},
    -		{"QuoteToGraphic", Func, 6},
    -		{"QuotedPrefix", Func, 17},
    -		{"Unquote", Func, 0},
    -		{"UnquoteChar", Func, 0},
    +		{"(*NumError).Error", Method, 0, ""},
    +		{"(*NumError).Unwrap", Method, 14, ""},
    +		{"AppendBool", Func, 0, "func(dst []byte, b bool) []byte"},
    +		{"AppendFloat", Func, 0, "func(dst []byte, f float64, fmt byte, prec int, bitSize int) []byte"},
    +		{"AppendInt", Func, 0, "func(dst []byte, i int64, base int) []byte"},
    +		{"AppendQuote", Func, 0, "func(dst []byte, s string) []byte"},
    +		{"AppendQuoteRune", Func, 0, "func(dst []byte, r rune) []byte"},
    +		{"AppendQuoteRuneToASCII", Func, 0, "func(dst []byte, r rune) []byte"},
    +		{"AppendQuoteRuneToGraphic", Func, 6, "func(dst []byte, r rune) []byte"},
    +		{"AppendQuoteToASCII", Func, 0, "func(dst []byte, s string) []byte"},
    +		{"AppendQuoteToGraphic", Func, 6, "func(dst []byte, s string) []byte"},
    +		{"AppendUint", Func, 0, "func(dst []byte, i uint64, base int) []byte"},
    +		{"Atoi", Func, 0, "func(s string) (int, error)"},
    +		{"CanBackquote", Func, 0, "func(s string) bool"},
    +		{"ErrRange", Var, 0, ""},
    +		{"ErrSyntax", Var, 0, ""},
    +		{"FormatBool", Func, 0, "func(b bool) string"},
    +		{"FormatComplex", Func, 15, "func(c complex128, fmt byte, prec int, bitSize int) string"},
    +		{"FormatFloat", Func, 0, "func(f float64, fmt byte, prec int, bitSize int) string"},
    +		{"FormatInt", Func, 0, "func(i int64, base int) string"},
    +		{"FormatUint", Func, 0, "func(i uint64, base int) string"},
    +		{"IntSize", Const, 0, ""},
    +		{"IsGraphic", Func, 6, "func(r rune) bool"},
    +		{"IsPrint", Func, 0, "func(r rune) bool"},
    +		{"Itoa", Func, 0, "func(i int) string"},
    +		{"NumError", Type, 0, ""},
    +		{"NumError.Err", Field, 0, ""},
    +		{"NumError.Func", Field, 0, ""},
    +		{"NumError.Num", Field, 0, ""},
    +		{"ParseBool", Func, 0, "func(str string) (bool, error)"},
    +		{"ParseComplex", Func, 15, "func(s string, bitSize int) (complex128, error)"},
    +		{"ParseFloat", Func, 0, "func(s string, bitSize int) (float64, error)"},
    +		{"ParseInt", Func, 0, "func(s string, base int, bitSize int) (i int64, err error)"},
    +		{"ParseUint", Func, 0, "func(s string, base int, bitSize int) (uint64, error)"},
    +		{"Quote", Func, 0, "func(s string) string"},
    +		{"QuoteRune", Func, 0, "func(r rune) string"},
    +		{"QuoteRuneToASCII", Func, 0, "func(r rune) string"},
    +		{"QuoteRuneToGraphic", Func, 6, "func(r rune) string"},
    +		{"QuoteToASCII", Func, 0, "func(s string) string"},
    +		{"QuoteToGraphic", Func, 6, "func(s string) string"},
    +		{"QuotedPrefix", Func, 17, "func(s string) (string, error)"},
    +		{"Unquote", Func, 0, "func(s string) (string, error)"},
    +		{"UnquoteChar", Func, 0, "func(s string, quote byte) (value rune, multibyte bool, tail string, err error)"},
     	},
     	"strings": {
    -		{"(*Builder).Cap", Method, 12},
    -		{"(*Builder).Grow", Method, 10},
    -		{"(*Builder).Len", Method, 10},
    -		{"(*Builder).Reset", Method, 10},
    -		{"(*Builder).String", Method, 10},
    -		{"(*Builder).Write", Method, 10},
    -		{"(*Builder).WriteByte", Method, 10},
    -		{"(*Builder).WriteRune", Method, 10},
    -		{"(*Builder).WriteString", Method, 10},
    -		{"(*Reader).Len", Method, 0},
    -		{"(*Reader).Read", Method, 0},
    -		{"(*Reader).ReadAt", Method, 0},
    -		{"(*Reader).ReadByte", Method, 0},
    -		{"(*Reader).ReadRune", Method, 0},
    -		{"(*Reader).Reset", Method, 7},
    -		{"(*Reader).Seek", Method, 0},
    -		{"(*Reader).Size", Method, 5},
    -		{"(*Reader).UnreadByte", Method, 0},
    -		{"(*Reader).UnreadRune", Method, 0},
    -		{"(*Reader).WriteTo", Method, 1},
    -		{"(*Replacer).Replace", Method, 0},
    -		{"(*Replacer).WriteString", Method, 0},
    -		{"Builder", Type, 10},
    -		{"Clone", Func, 18},
    -		{"Compare", Func, 5},
    -		{"Contains", Func, 0},
    -		{"ContainsAny", Func, 0},
    -		{"ContainsFunc", Func, 21},
    -		{"ContainsRune", Func, 0},
    -		{"Count", Func, 0},
    -		{"Cut", Func, 18},
    -		{"CutPrefix", Func, 20},
    -		{"CutSuffix", Func, 20},
    -		{"EqualFold", Func, 0},
    -		{"Fields", Func, 0},
    -		{"FieldsFunc", Func, 0},
    -		{"HasPrefix", Func, 0},
    -		{"HasSuffix", Func, 0},
    -		{"Index", Func, 0},
    -		{"IndexAny", Func, 0},
    -		{"IndexByte", Func, 2},
    -		{"IndexFunc", Func, 0},
    -		{"IndexRune", Func, 0},
    -		{"Join", Func, 0},
    -		{"LastIndex", Func, 0},
    -		{"LastIndexAny", Func, 0},
    -		{"LastIndexByte", Func, 5},
    -		{"LastIndexFunc", Func, 0},
    -		{"Map", Func, 0},
    -		{"NewReader", Func, 0},
    -		{"NewReplacer", Func, 0},
    -		{"Reader", Type, 0},
    -		{"Repeat", Func, 0},
    -		{"Replace", Func, 0},
    -		{"ReplaceAll", Func, 12},
    -		{"Replacer", Type, 0},
    -		{"Split", Func, 0},
    -		{"SplitAfter", Func, 0},
    -		{"SplitAfterN", Func, 0},
    -		{"SplitN", Func, 0},
    -		{"Title", Func, 0},
    -		{"ToLower", Func, 0},
    -		{"ToLowerSpecial", Func, 0},
    -		{"ToTitle", Func, 0},
    -		{"ToTitleSpecial", Func, 0},
    -		{"ToUpper", Func, 0},
    -		{"ToUpperSpecial", Func, 0},
    -		{"ToValidUTF8", Func, 13},
    -		{"Trim", Func, 0},
    -		{"TrimFunc", Func, 0},
    -		{"TrimLeft", Func, 0},
    -		{"TrimLeftFunc", Func, 0},
    -		{"TrimPrefix", Func, 1},
    -		{"TrimRight", Func, 0},
    -		{"TrimRightFunc", Func, 0},
    -		{"TrimSpace", Func, 0},
    -		{"TrimSuffix", Func, 1},
    +		{"(*Builder).Cap", Method, 12, ""},
    +		{"(*Builder).Grow", Method, 10, ""},
    +		{"(*Builder).Len", Method, 10, ""},
    +		{"(*Builder).Reset", Method, 10, ""},
    +		{"(*Builder).String", Method, 10, ""},
    +		{"(*Builder).Write", Method, 10, ""},
    +		{"(*Builder).WriteByte", Method, 10, ""},
    +		{"(*Builder).WriteRune", Method, 10, ""},
    +		{"(*Builder).WriteString", Method, 10, ""},
    +		{"(*Reader).Len", Method, 0, ""},
    +		{"(*Reader).Read", Method, 0, ""},
    +		{"(*Reader).ReadAt", Method, 0, ""},
    +		{"(*Reader).ReadByte", Method, 0, ""},
    +		{"(*Reader).ReadRune", Method, 0, ""},
    +		{"(*Reader).Reset", Method, 7, ""},
    +		{"(*Reader).Seek", Method, 0, ""},
    +		{"(*Reader).Size", Method, 5, ""},
    +		{"(*Reader).UnreadByte", Method, 0, ""},
    +		{"(*Reader).UnreadRune", Method, 0, ""},
    +		{"(*Reader).WriteTo", Method, 1, ""},
    +		{"(*Replacer).Replace", Method, 0, ""},
    +		{"(*Replacer).WriteString", Method, 0, ""},
    +		{"Builder", Type, 10, ""},
    +		{"Clone", Func, 18, "func(s string) string"},
    +		{"Compare", Func, 5, "func(a string, b string) int"},
    +		{"Contains", Func, 0, "func(s string, substr string) bool"},
    +		{"ContainsAny", Func, 0, "func(s string, chars string) bool"},
    +		{"ContainsFunc", Func, 21, "func(s string, f func(rune) bool) bool"},
    +		{"ContainsRune", Func, 0, "func(s string, r rune) bool"},
    +		{"Count", Func, 0, "func(s string, substr string) int"},
    +		{"Cut", Func, 18, "func(s string, sep string) (before string, after string, found bool)"},
    +		{"CutPrefix", Func, 20, "func(s string, prefix string) (after string, found bool)"},
    +		{"CutSuffix", Func, 20, "func(s string, suffix string) (before string, found bool)"},
    +		{"EqualFold", Func, 0, "func(s string, t string) bool"},
    +		{"Fields", Func, 0, "func(s string) []string"},
    +		{"FieldsFunc", Func, 0, "func(s string, f func(rune) bool) []string"},
    +		{"FieldsFuncSeq", Func, 24, "func(s string, f func(rune) bool) iter.Seq[string]"},
    +		{"FieldsSeq", Func, 24, "func(s string) iter.Seq[string]"},
    +		{"HasPrefix", Func, 0, "func(s string, prefix string) bool"},
    +		{"HasSuffix", Func, 0, "func(s string, suffix string) bool"},
    +		{"Index", Func, 0, "func(s string, substr string) int"},
    +		{"IndexAny", Func, 0, "func(s string, chars string) int"},
    +		{"IndexByte", Func, 2, "func(s string, c byte) int"},
    +		{"IndexFunc", Func, 0, "func(s string, f func(rune) bool) int"},
    +		{"IndexRune", Func, 0, "func(s string, r rune) int"},
    +		{"Join", Func, 0, "func(elems []string, sep string) string"},
    +		{"LastIndex", Func, 0, "func(s string, substr string) int"},
    +		{"LastIndexAny", Func, 0, "func(s string, chars string) int"},
    +		{"LastIndexByte", Func, 5, "func(s string, c byte) int"},
    +		{"LastIndexFunc", Func, 0, "func(s string, f func(rune) bool) int"},
    +		{"Lines", Func, 24, "func(s string) iter.Seq[string]"},
    +		{"Map", Func, 0, "func(mapping func(rune) rune, s string) string"},
    +		{"NewReader", Func, 0, "func(s string) *Reader"},
    +		{"NewReplacer", Func, 0, "func(oldnew ...string) *Replacer"},
    +		{"Reader", Type, 0, ""},
    +		{"Repeat", Func, 0, "func(s string, count int) string"},
    +		{"Replace", Func, 0, "func(s string, old string, new string, n int) string"},
    +		{"ReplaceAll", Func, 12, "func(s string, old string, new string) string"},
    +		{"Replacer", Type, 0, ""},
    +		{"Split", Func, 0, "func(s string, sep string) []string"},
    +		{"SplitAfter", Func, 0, "func(s string, sep string) []string"},
    +		{"SplitAfterN", Func, 0, "func(s string, sep string, n int) []string"},
    +		{"SplitAfterSeq", Func, 24, "func(s string, sep string) iter.Seq[string]"},
    +		{"SplitN", Func, 0, "func(s string, sep string, n int) []string"},
    +		{"SplitSeq", Func, 24, "func(s string, sep string) iter.Seq[string]"},
    +		{"Title", Func, 0, "func(s string) string"},
    +		{"ToLower", Func, 0, "func(s string) string"},
    +		{"ToLowerSpecial", Func, 0, "func(c unicode.SpecialCase, s string) string"},
    +		{"ToTitle", Func, 0, "func(s string) string"},
    +		{"ToTitleSpecial", Func, 0, "func(c unicode.SpecialCase, s string) string"},
    +		{"ToUpper", Func, 0, "func(s string) string"},
    +		{"ToUpperSpecial", Func, 0, "func(c unicode.SpecialCase, s string) string"},
    +		{"ToValidUTF8", Func, 13, "func(s string, replacement string) string"},
    +		{"Trim", Func, 0, "func(s string, cutset string) string"},
    +		{"TrimFunc", Func, 0, "func(s string, f func(rune) bool) string"},
    +		{"TrimLeft", Func, 0, "func(s string, cutset string) string"},
    +		{"TrimLeftFunc", Func, 0, "func(s string, f func(rune) bool) string"},
    +		{"TrimPrefix", Func, 1, "func(s string, prefix string) string"},
    +		{"TrimRight", Func, 0, "func(s string, cutset string) string"},
    +		{"TrimRightFunc", Func, 0, "func(s string, f func(rune) bool) string"},
    +		{"TrimSpace", Func, 0, "func(s string) string"},
    +		{"TrimSuffix", Func, 1, "func(s string, suffix string) string"},
     	},
     	"structs": {
    -		{"HostLayout", Type, 23},
    +		{"HostLayout", Type, 23, ""},
     	},
     	"sync": {
    -		{"(*Cond).Broadcast", Method, 0},
    -		{"(*Cond).Signal", Method, 0},
    -		{"(*Cond).Wait", Method, 0},
    -		{"(*Map).Clear", Method, 23},
    -		{"(*Map).CompareAndDelete", Method, 20},
    -		{"(*Map).CompareAndSwap", Method, 20},
    -		{"(*Map).Delete", Method, 9},
    -		{"(*Map).Load", Method, 9},
    -		{"(*Map).LoadAndDelete", Method, 15},
    -		{"(*Map).LoadOrStore", Method, 9},
    -		{"(*Map).Range", Method, 9},
    -		{"(*Map).Store", Method, 9},
    -		{"(*Map).Swap", Method, 20},
    -		{"(*Mutex).Lock", Method, 0},
    -		{"(*Mutex).TryLock", Method, 18},
    -		{"(*Mutex).Unlock", Method, 0},
    -		{"(*Once).Do", Method, 0},
    -		{"(*Pool).Get", Method, 3},
    -		{"(*Pool).Put", Method, 3},
    -		{"(*RWMutex).Lock", Method, 0},
    -		{"(*RWMutex).RLock", Method, 0},
    -		{"(*RWMutex).RLocker", Method, 0},
    -		{"(*RWMutex).RUnlock", Method, 0},
    -		{"(*RWMutex).TryLock", Method, 18},
    -		{"(*RWMutex).TryRLock", Method, 18},
    -		{"(*RWMutex).Unlock", Method, 0},
    -		{"(*WaitGroup).Add", Method, 0},
    -		{"(*WaitGroup).Done", Method, 0},
    -		{"(*WaitGroup).Wait", Method, 0},
    -		{"Cond", Type, 0},
    -		{"Cond.L", Field, 0},
    -		{"Locker", Type, 0},
    -		{"Map", Type, 9},
    -		{"Mutex", Type, 0},
    -		{"NewCond", Func, 0},
    -		{"Once", Type, 0},
    -		{"OnceFunc", Func, 21},
    -		{"OnceValue", Func, 21},
    -		{"OnceValues", Func, 21},
    -		{"Pool", Type, 3},
    -		{"Pool.New", Field, 3},
    -		{"RWMutex", Type, 0},
    -		{"WaitGroup", Type, 0},
    +		{"(*Cond).Broadcast", Method, 0, ""},
    +		{"(*Cond).Signal", Method, 0, ""},
    +		{"(*Cond).Wait", Method, 0, ""},
    +		{"(*Map).Clear", Method, 23, ""},
    +		{"(*Map).CompareAndDelete", Method, 20, ""},
    +		{"(*Map).CompareAndSwap", Method, 20, ""},
    +		{"(*Map).Delete", Method, 9, ""},
    +		{"(*Map).Load", Method, 9, ""},
    +		{"(*Map).LoadAndDelete", Method, 15, ""},
    +		{"(*Map).LoadOrStore", Method, 9, ""},
    +		{"(*Map).Range", Method, 9, ""},
    +		{"(*Map).Store", Method, 9, ""},
    +		{"(*Map).Swap", Method, 20, ""},
    +		{"(*Mutex).Lock", Method, 0, ""},
    +		{"(*Mutex).TryLock", Method, 18, ""},
    +		{"(*Mutex).Unlock", Method, 0, ""},
    +		{"(*Once).Do", Method, 0, ""},
    +		{"(*Pool).Get", Method, 3, ""},
    +		{"(*Pool).Put", Method, 3, ""},
    +		{"(*RWMutex).Lock", Method, 0, ""},
    +		{"(*RWMutex).RLock", Method, 0, ""},
    +		{"(*RWMutex).RLocker", Method, 0, ""},
    +		{"(*RWMutex).RUnlock", Method, 0, ""},
    +		{"(*RWMutex).TryLock", Method, 18, ""},
    +		{"(*RWMutex).TryRLock", Method, 18, ""},
    +		{"(*RWMutex).Unlock", Method, 0, ""},
    +		{"(*WaitGroup).Add", Method, 0, ""},
    +		{"(*WaitGroup).Done", Method, 0, ""},
    +		{"(*WaitGroup).Go", Method, 25, ""},
    +		{"(*WaitGroup).Wait", Method, 0, ""},
    +		{"Cond", Type, 0, ""},
    +		{"Cond.L", Field, 0, ""},
    +		{"Locker", Type, 0, ""},
    +		{"Map", Type, 9, ""},
    +		{"Mutex", Type, 0, ""},
    +		{"NewCond", Func, 0, "func(l Locker) *Cond"},
    +		{"Once", Type, 0, ""},
    +		{"OnceFunc", Func, 21, "func(f func()) func()"},
    +		{"OnceValue", Func, 21, "func[T any](f func() T) func() T"},
    +		{"OnceValues", Func, 21, "func[T1, T2 any](f func() (T1, T2)) func() (T1, T2)"},
    +		{"Pool", Type, 3, ""},
    +		{"Pool.New", Field, 3, ""},
    +		{"RWMutex", Type, 0, ""},
    +		{"WaitGroup", Type, 0, ""},
     	},
     	"sync/atomic": {
    -		{"(*Bool).CompareAndSwap", Method, 19},
    -		{"(*Bool).Load", Method, 19},
    -		{"(*Bool).Store", Method, 19},
    -		{"(*Bool).Swap", Method, 19},
    -		{"(*Int32).Add", Method, 19},
    -		{"(*Int32).And", Method, 23},
    -		{"(*Int32).CompareAndSwap", Method, 19},
    -		{"(*Int32).Load", Method, 19},
    -		{"(*Int32).Or", Method, 23},
    -		{"(*Int32).Store", Method, 19},
    -		{"(*Int32).Swap", Method, 19},
    -		{"(*Int64).Add", Method, 19},
    -		{"(*Int64).And", Method, 23},
    -		{"(*Int64).CompareAndSwap", Method, 19},
    -		{"(*Int64).Load", Method, 19},
    -		{"(*Int64).Or", Method, 23},
    -		{"(*Int64).Store", Method, 19},
    -		{"(*Int64).Swap", Method, 19},
    -		{"(*Pointer).CompareAndSwap", Method, 19},
    -		{"(*Pointer).Load", Method, 19},
    -		{"(*Pointer).Store", Method, 19},
    -		{"(*Pointer).Swap", Method, 19},
    -		{"(*Uint32).Add", Method, 19},
    -		{"(*Uint32).And", Method, 23},
    -		{"(*Uint32).CompareAndSwap", Method, 19},
    -		{"(*Uint32).Load", Method, 19},
    -		{"(*Uint32).Or", Method, 23},
    -		{"(*Uint32).Store", Method, 19},
    -		{"(*Uint32).Swap", Method, 19},
    -		{"(*Uint64).Add", Method, 19},
    -		{"(*Uint64).And", Method, 23},
    -		{"(*Uint64).CompareAndSwap", Method, 19},
    -		{"(*Uint64).Load", Method, 19},
    -		{"(*Uint64).Or", Method, 23},
    -		{"(*Uint64).Store", Method, 19},
    -		{"(*Uint64).Swap", Method, 19},
    -		{"(*Uintptr).Add", Method, 19},
    -		{"(*Uintptr).And", Method, 23},
    -		{"(*Uintptr).CompareAndSwap", Method, 19},
    -		{"(*Uintptr).Load", Method, 19},
    -		{"(*Uintptr).Or", Method, 23},
    -		{"(*Uintptr).Store", Method, 19},
    -		{"(*Uintptr).Swap", Method, 19},
    -		{"(*Value).CompareAndSwap", Method, 17},
    -		{"(*Value).Load", Method, 4},
    -		{"(*Value).Store", Method, 4},
    -		{"(*Value).Swap", Method, 17},
    -		{"AddInt32", Func, 0},
    -		{"AddInt64", Func, 0},
    -		{"AddUint32", Func, 0},
    -		{"AddUint64", Func, 0},
    -		{"AddUintptr", Func, 0},
    -		{"AndInt32", Func, 23},
    -		{"AndInt64", Func, 23},
    -		{"AndUint32", Func, 23},
    -		{"AndUint64", Func, 23},
    -		{"AndUintptr", Func, 23},
    -		{"Bool", Type, 19},
    -		{"CompareAndSwapInt32", Func, 0},
    -		{"CompareAndSwapInt64", Func, 0},
    -		{"CompareAndSwapPointer", Func, 0},
    -		{"CompareAndSwapUint32", Func, 0},
    -		{"CompareAndSwapUint64", Func, 0},
    -		{"CompareAndSwapUintptr", Func, 0},
    -		{"Int32", Type, 19},
    -		{"Int64", Type, 19},
    -		{"LoadInt32", Func, 0},
    -		{"LoadInt64", Func, 0},
    -		{"LoadPointer", Func, 0},
    -		{"LoadUint32", Func, 0},
    -		{"LoadUint64", Func, 0},
    -		{"LoadUintptr", Func, 0},
    -		{"OrInt32", Func, 23},
    -		{"OrInt64", Func, 23},
    -		{"OrUint32", Func, 23},
    -		{"OrUint64", Func, 23},
    -		{"OrUintptr", Func, 23},
    -		{"Pointer", Type, 19},
    -		{"StoreInt32", Func, 0},
    -		{"StoreInt64", Func, 0},
    -		{"StorePointer", Func, 0},
    -		{"StoreUint32", Func, 0},
    -		{"StoreUint64", Func, 0},
    -		{"StoreUintptr", Func, 0},
    -		{"SwapInt32", Func, 2},
    -		{"SwapInt64", Func, 2},
    -		{"SwapPointer", Func, 2},
    -		{"SwapUint32", Func, 2},
    -		{"SwapUint64", Func, 2},
    -		{"SwapUintptr", Func, 2},
    -		{"Uint32", Type, 19},
    -		{"Uint64", Type, 19},
    -		{"Uintptr", Type, 19},
    -		{"Value", Type, 4},
    +		{"(*Bool).CompareAndSwap", Method, 19, ""},
    +		{"(*Bool).Load", Method, 19, ""},
    +		{"(*Bool).Store", Method, 19, ""},
    +		{"(*Bool).Swap", Method, 19, ""},
    +		{"(*Int32).Add", Method, 19, ""},
    +		{"(*Int32).And", Method, 23, ""},
    +		{"(*Int32).CompareAndSwap", Method, 19, ""},
    +		{"(*Int32).Load", Method, 19, ""},
    +		{"(*Int32).Or", Method, 23, ""},
    +		{"(*Int32).Store", Method, 19, ""},
    +		{"(*Int32).Swap", Method, 19, ""},
    +		{"(*Int64).Add", Method, 19, ""},
    +		{"(*Int64).And", Method, 23, ""},
    +		{"(*Int64).CompareAndSwap", Method, 19, ""},
    +		{"(*Int64).Load", Method, 19, ""},
    +		{"(*Int64).Or", Method, 23, ""},
    +		{"(*Int64).Store", Method, 19, ""},
    +		{"(*Int64).Swap", Method, 19, ""},
    +		{"(*Pointer).CompareAndSwap", Method, 19, ""},
    +		{"(*Pointer).Load", Method, 19, ""},
    +		{"(*Pointer).Store", Method, 19, ""},
    +		{"(*Pointer).Swap", Method, 19, ""},
    +		{"(*Uint32).Add", Method, 19, ""},
    +		{"(*Uint32).And", Method, 23, ""},
    +		{"(*Uint32).CompareAndSwap", Method, 19, ""},
    +		{"(*Uint32).Load", Method, 19, ""},
    +		{"(*Uint32).Or", Method, 23, ""},
    +		{"(*Uint32).Store", Method, 19, ""},
    +		{"(*Uint32).Swap", Method, 19, ""},
    +		{"(*Uint64).Add", Method, 19, ""},
    +		{"(*Uint64).And", Method, 23, ""},
    +		{"(*Uint64).CompareAndSwap", Method, 19, ""},
    +		{"(*Uint64).Load", Method, 19, ""},
    +		{"(*Uint64).Or", Method, 23, ""},
    +		{"(*Uint64).Store", Method, 19, ""},
    +		{"(*Uint64).Swap", Method, 19, ""},
    +		{"(*Uintptr).Add", Method, 19, ""},
    +		{"(*Uintptr).And", Method, 23, ""},
    +		{"(*Uintptr).CompareAndSwap", Method, 19, ""},
    +		{"(*Uintptr).Load", Method, 19, ""},
    +		{"(*Uintptr).Or", Method, 23, ""},
    +		{"(*Uintptr).Store", Method, 19, ""},
    +		{"(*Uintptr).Swap", Method, 19, ""},
    +		{"(*Value).CompareAndSwap", Method, 17, ""},
    +		{"(*Value).Load", Method, 4, ""},
    +		{"(*Value).Store", Method, 4, ""},
    +		{"(*Value).Swap", Method, 17, ""},
    +		{"AddInt32", Func, 0, "func(addr *int32, delta int32) (new int32)"},
    +		{"AddInt64", Func, 0, "func(addr *int64, delta int64) (new int64)"},
    +		{"AddUint32", Func, 0, "func(addr *uint32, delta uint32) (new uint32)"},
    +		{"AddUint64", Func, 0, "func(addr *uint64, delta uint64) (new uint64)"},
    +		{"AddUintptr", Func, 0, "func(addr *uintptr, delta uintptr) (new uintptr)"},
    +		{"AndInt32", Func, 23, "func(addr *int32, mask int32) (old int32)"},
    +		{"AndInt64", Func, 23, "func(addr *int64, mask int64) (old int64)"},
    +		{"AndUint32", Func, 23, "func(addr *uint32, mask uint32) (old uint32)"},
    +		{"AndUint64", Func, 23, "func(addr *uint64, mask uint64) (old uint64)"},
    +		{"AndUintptr", Func, 23, "func(addr *uintptr, mask uintptr) (old uintptr)"},
    +		{"Bool", Type, 19, ""},
    +		{"CompareAndSwapInt32", Func, 0, "func(addr *int32, old int32, new int32) (swapped bool)"},
    +		{"CompareAndSwapInt64", Func, 0, "func(addr *int64, old int64, new int64) (swapped bool)"},
    +		{"CompareAndSwapPointer", Func, 0, "func(addr *unsafe.Pointer, old unsafe.Pointer, new unsafe.Pointer) (swapped bool)"},
    +		{"CompareAndSwapUint32", Func, 0, "func(addr *uint32, old uint32, new uint32) (swapped bool)"},
    +		{"CompareAndSwapUint64", Func, 0, "func(addr *uint64, old uint64, new uint64) (swapped bool)"},
    +		{"CompareAndSwapUintptr", Func, 0, "func(addr *uintptr, old uintptr, new uintptr) (swapped bool)"},
    +		{"Int32", Type, 19, ""},
    +		{"Int64", Type, 19, ""},
    +		{"LoadInt32", Func, 0, "func(addr *int32) (val int32)"},
    +		{"LoadInt64", Func, 0, "func(addr *int64) (val int64)"},
    +		{"LoadPointer", Func, 0, "func(addr *unsafe.Pointer) (val unsafe.Pointer)"},
    +		{"LoadUint32", Func, 0, "func(addr *uint32) (val uint32)"},
    +		{"LoadUint64", Func, 0, "func(addr *uint64) (val uint64)"},
    +		{"LoadUintptr", Func, 0, "func(addr *uintptr) (val uintptr)"},
    +		{"OrInt32", Func, 23, "func(addr *int32, mask int32) (old int32)"},
    +		{"OrInt64", Func, 23, "func(addr *int64, mask int64) (old int64)"},
    +		{"OrUint32", Func, 23, "func(addr *uint32, mask uint32) (old uint32)"},
    +		{"OrUint64", Func, 23, "func(addr *uint64, mask uint64) (old uint64)"},
    +		{"OrUintptr", Func, 23, "func(addr *uintptr, mask uintptr) (old uintptr)"},
    +		{"Pointer", Type, 19, ""},
    +		{"StoreInt32", Func, 0, "func(addr *int32, val int32)"},
    +		{"StoreInt64", Func, 0, "func(addr *int64, val int64)"},
    +		{"StorePointer", Func, 0, "func(addr *unsafe.Pointer, val unsafe.Pointer)"},
    +		{"StoreUint32", Func, 0, "func(addr *uint32, val uint32)"},
    +		{"StoreUint64", Func, 0, "func(addr *uint64, val uint64)"},
    +		{"StoreUintptr", Func, 0, "func(addr *uintptr, val uintptr)"},
    +		{"SwapInt32", Func, 2, "func(addr *int32, new int32) (old int32)"},
    +		{"SwapInt64", Func, 2, "func(addr *int64, new int64) (old int64)"},
    +		{"SwapPointer", Func, 2, "func(addr *unsafe.Pointer, new unsafe.Pointer) (old unsafe.Pointer)"},
    +		{"SwapUint32", Func, 2, "func(addr *uint32, new uint32) (old uint32)"},
    +		{"SwapUint64", Func, 2, "func(addr *uint64, new uint64) (old uint64)"},
    +		{"SwapUintptr", Func, 2, "func(addr *uintptr, new uintptr) (old uintptr)"},
    +		{"Uint32", Type, 19, ""},
    +		{"Uint64", Type, 19, ""},
    +		{"Uintptr", Type, 19, ""},
    +		{"Value", Type, 4, ""},
     	},
     	"syscall": {
    -		{"(*Cmsghdr).SetLen", Method, 0},
    -		{"(*DLL).FindProc", Method, 0},
    -		{"(*DLL).MustFindProc", Method, 0},
    -		{"(*DLL).Release", Method, 0},
    -		{"(*DLLError).Error", Method, 0},
    -		{"(*DLLError).Unwrap", Method, 16},
    -		{"(*Filetime).Nanoseconds", Method, 0},
    -		{"(*Iovec).SetLen", Method, 0},
    -		{"(*LazyDLL).Handle", Method, 0},
    -		{"(*LazyDLL).Load", Method, 0},
    -		{"(*LazyDLL).NewProc", Method, 0},
    -		{"(*LazyProc).Addr", Method, 0},
    -		{"(*LazyProc).Call", Method, 0},
    -		{"(*LazyProc).Find", Method, 0},
    -		{"(*Msghdr).SetControllen", Method, 0},
    -		{"(*Proc).Addr", Method, 0},
    -		{"(*Proc).Call", Method, 0},
    -		{"(*PtraceRegs).PC", Method, 0},
    -		{"(*PtraceRegs).SetPC", Method, 0},
    -		{"(*RawSockaddrAny).Sockaddr", Method, 0},
    -		{"(*SID).Copy", Method, 0},
    -		{"(*SID).Len", Method, 0},
    -		{"(*SID).LookupAccount", Method, 0},
    -		{"(*SID).String", Method, 0},
    -		{"(*Timespec).Nano", Method, 0},
    -		{"(*Timespec).Unix", Method, 0},
    -		{"(*Timeval).Nano", Method, 0},
    -		{"(*Timeval).Nanoseconds", Method, 0},
    -		{"(*Timeval).Unix", Method, 0},
    -		{"(Errno).Error", Method, 0},
    -		{"(Errno).Is", Method, 13},
    -		{"(Errno).Temporary", Method, 0},
    -		{"(Errno).Timeout", Method, 0},
    -		{"(Signal).Signal", Method, 0},
    -		{"(Signal).String", Method, 0},
    -		{"(Token).Close", Method, 0},
    -		{"(Token).GetTokenPrimaryGroup", Method, 0},
    -		{"(Token).GetTokenUser", Method, 0},
    -		{"(Token).GetUserProfileDirectory", Method, 0},
    -		{"(WaitStatus).Continued", Method, 0},
    -		{"(WaitStatus).CoreDump", Method, 0},
    -		{"(WaitStatus).ExitStatus", Method, 0},
    -		{"(WaitStatus).Exited", Method, 0},
    -		{"(WaitStatus).Signal", Method, 0},
    -		{"(WaitStatus).Signaled", Method, 0},
    -		{"(WaitStatus).StopSignal", Method, 0},
    -		{"(WaitStatus).Stopped", Method, 0},
    -		{"(WaitStatus).TrapCause", Method, 0},
    -		{"AF_ALG", Const, 0},
    -		{"AF_APPLETALK", Const, 0},
    -		{"AF_ARP", Const, 0},
    -		{"AF_ASH", Const, 0},
    -		{"AF_ATM", Const, 0},
    -		{"AF_ATMPVC", Const, 0},
    -		{"AF_ATMSVC", Const, 0},
    -		{"AF_AX25", Const, 0},
    -		{"AF_BLUETOOTH", Const, 0},
    -		{"AF_BRIDGE", Const, 0},
    -		{"AF_CAIF", Const, 0},
    -		{"AF_CAN", Const, 0},
    -		{"AF_CCITT", Const, 0},
    -		{"AF_CHAOS", Const, 0},
    -		{"AF_CNT", Const, 0},
    -		{"AF_COIP", Const, 0},
    -		{"AF_DATAKIT", Const, 0},
    -		{"AF_DECnet", Const, 0},
    -		{"AF_DLI", Const, 0},
    -		{"AF_E164", Const, 0},
    -		{"AF_ECMA", Const, 0},
    -		{"AF_ECONET", Const, 0},
    -		{"AF_ENCAP", Const, 1},
    -		{"AF_FILE", Const, 0},
    -		{"AF_HYLINK", Const, 0},
    -		{"AF_IEEE80211", Const, 0},
    -		{"AF_IEEE802154", Const, 0},
    -		{"AF_IMPLINK", Const, 0},
    -		{"AF_INET", Const, 0},
    -		{"AF_INET6", Const, 0},
    -		{"AF_INET6_SDP", Const, 3},
    -		{"AF_INET_SDP", Const, 3},
    -		{"AF_IPX", Const, 0},
    -		{"AF_IRDA", Const, 0},
    -		{"AF_ISDN", Const, 0},
    -		{"AF_ISO", Const, 0},
    -		{"AF_IUCV", Const, 0},
    -		{"AF_KEY", Const, 0},
    -		{"AF_LAT", Const, 0},
    -		{"AF_LINK", Const, 0},
    -		{"AF_LLC", Const, 0},
    -		{"AF_LOCAL", Const, 0},
    -		{"AF_MAX", Const, 0},
    -		{"AF_MPLS", Const, 1},
    -		{"AF_NATM", Const, 0},
    -		{"AF_NDRV", Const, 0},
    -		{"AF_NETBEUI", Const, 0},
    -		{"AF_NETBIOS", Const, 0},
    -		{"AF_NETGRAPH", Const, 0},
    -		{"AF_NETLINK", Const, 0},
    -		{"AF_NETROM", Const, 0},
    -		{"AF_NS", Const, 0},
    -		{"AF_OROUTE", Const, 1},
    -		{"AF_OSI", Const, 0},
    -		{"AF_PACKET", Const, 0},
    -		{"AF_PHONET", Const, 0},
    -		{"AF_PPP", Const, 0},
    -		{"AF_PPPOX", Const, 0},
    -		{"AF_PUP", Const, 0},
    -		{"AF_RDS", Const, 0},
    -		{"AF_RESERVED_36", Const, 0},
    -		{"AF_ROSE", Const, 0},
    -		{"AF_ROUTE", Const, 0},
    -		{"AF_RXRPC", Const, 0},
    -		{"AF_SCLUSTER", Const, 0},
    -		{"AF_SECURITY", Const, 0},
    -		{"AF_SIP", Const, 0},
    -		{"AF_SLOW", Const, 0},
    -		{"AF_SNA", Const, 0},
    -		{"AF_SYSTEM", Const, 0},
    -		{"AF_TIPC", Const, 0},
    -		{"AF_UNIX", Const, 0},
    -		{"AF_UNSPEC", Const, 0},
    -		{"AF_UTUN", Const, 16},
    -		{"AF_VENDOR00", Const, 0},
    -		{"AF_VENDOR01", Const, 0},
    -		{"AF_VENDOR02", Const, 0},
    -		{"AF_VENDOR03", Const, 0},
    -		{"AF_VENDOR04", Const, 0},
    -		{"AF_VENDOR05", Const, 0},
    -		{"AF_VENDOR06", Const, 0},
    -		{"AF_VENDOR07", Const, 0},
    -		{"AF_VENDOR08", Const, 0},
    -		{"AF_VENDOR09", Const, 0},
    -		{"AF_VENDOR10", Const, 0},
    -		{"AF_VENDOR11", Const, 0},
    -		{"AF_VENDOR12", Const, 0},
    -		{"AF_VENDOR13", Const, 0},
    -		{"AF_VENDOR14", Const, 0},
    -		{"AF_VENDOR15", Const, 0},
    -		{"AF_VENDOR16", Const, 0},
    -		{"AF_VENDOR17", Const, 0},
    -		{"AF_VENDOR18", Const, 0},
    -		{"AF_VENDOR19", Const, 0},
    -		{"AF_VENDOR20", Const, 0},
    -		{"AF_VENDOR21", Const, 0},
    -		{"AF_VENDOR22", Const, 0},
    -		{"AF_VENDOR23", Const, 0},
    -		{"AF_VENDOR24", Const, 0},
    -		{"AF_VENDOR25", Const, 0},
    -		{"AF_VENDOR26", Const, 0},
    -		{"AF_VENDOR27", Const, 0},
    -		{"AF_VENDOR28", Const, 0},
    -		{"AF_VENDOR29", Const, 0},
    -		{"AF_VENDOR30", Const, 0},
    -		{"AF_VENDOR31", Const, 0},
    -		{"AF_VENDOR32", Const, 0},
    -		{"AF_VENDOR33", Const, 0},
    -		{"AF_VENDOR34", Const, 0},
    -		{"AF_VENDOR35", Const, 0},
    -		{"AF_VENDOR36", Const, 0},
    -		{"AF_VENDOR37", Const, 0},
    -		{"AF_VENDOR38", Const, 0},
    -		{"AF_VENDOR39", Const, 0},
    -		{"AF_VENDOR40", Const, 0},
    -		{"AF_VENDOR41", Const, 0},
    -		{"AF_VENDOR42", Const, 0},
    -		{"AF_VENDOR43", Const, 0},
    -		{"AF_VENDOR44", Const, 0},
    -		{"AF_VENDOR45", Const, 0},
    -		{"AF_VENDOR46", Const, 0},
    -		{"AF_VENDOR47", Const, 0},
    -		{"AF_WANPIPE", Const, 0},
    -		{"AF_X25", Const, 0},
    -		{"AI_CANONNAME", Const, 1},
    -		{"AI_NUMERICHOST", Const, 1},
    -		{"AI_PASSIVE", Const, 1},
    -		{"APPLICATION_ERROR", Const, 0},
    -		{"ARPHRD_ADAPT", Const, 0},
    -		{"ARPHRD_APPLETLK", Const, 0},
    -		{"ARPHRD_ARCNET", Const, 0},
    -		{"ARPHRD_ASH", Const, 0},
    -		{"ARPHRD_ATM", Const, 0},
    -		{"ARPHRD_AX25", Const, 0},
    -		{"ARPHRD_BIF", Const, 0},
    -		{"ARPHRD_CHAOS", Const, 0},
    -		{"ARPHRD_CISCO", Const, 0},
    -		{"ARPHRD_CSLIP", Const, 0},
    -		{"ARPHRD_CSLIP6", Const, 0},
    -		{"ARPHRD_DDCMP", Const, 0},
    -		{"ARPHRD_DLCI", Const, 0},
    -		{"ARPHRD_ECONET", Const, 0},
    -		{"ARPHRD_EETHER", Const, 0},
    -		{"ARPHRD_ETHER", Const, 0},
    -		{"ARPHRD_EUI64", Const, 0},
    -		{"ARPHRD_FCAL", Const, 0},
    -		{"ARPHRD_FCFABRIC", Const, 0},
    -		{"ARPHRD_FCPL", Const, 0},
    -		{"ARPHRD_FCPP", Const, 0},
    -		{"ARPHRD_FDDI", Const, 0},
    -		{"ARPHRD_FRAD", Const, 0},
    -		{"ARPHRD_FRELAY", Const, 1},
    -		{"ARPHRD_HDLC", Const, 0},
    -		{"ARPHRD_HIPPI", Const, 0},
    -		{"ARPHRD_HWX25", Const, 0},
    -		{"ARPHRD_IEEE1394", Const, 0},
    -		{"ARPHRD_IEEE802", Const, 0},
    -		{"ARPHRD_IEEE80211", Const, 0},
    -		{"ARPHRD_IEEE80211_PRISM", Const, 0},
    -		{"ARPHRD_IEEE80211_RADIOTAP", Const, 0},
    -		{"ARPHRD_IEEE802154", Const, 0},
    -		{"ARPHRD_IEEE802154_PHY", Const, 0},
    -		{"ARPHRD_IEEE802_TR", Const, 0},
    -		{"ARPHRD_INFINIBAND", Const, 0},
    -		{"ARPHRD_IPDDP", Const, 0},
    -		{"ARPHRD_IPGRE", Const, 0},
    -		{"ARPHRD_IRDA", Const, 0},
    -		{"ARPHRD_LAPB", Const, 0},
    -		{"ARPHRD_LOCALTLK", Const, 0},
    -		{"ARPHRD_LOOPBACK", Const, 0},
    -		{"ARPHRD_METRICOM", Const, 0},
    -		{"ARPHRD_NETROM", Const, 0},
    -		{"ARPHRD_NONE", Const, 0},
    -		{"ARPHRD_PIMREG", Const, 0},
    -		{"ARPHRD_PPP", Const, 0},
    -		{"ARPHRD_PRONET", Const, 0},
    -		{"ARPHRD_RAWHDLC", Const, 0},
    -		{"ARPHRD_ROSE", Const, 0},
    -		{"ARPHRD_RSRVD", Const, 0},
    -		{"ARPHRD_SIT", Const, 0},
    -		{"ARPHRD_SKIP", Const, 0},
    -		{"ARPHRD_SLIP", Const, 0},
    -		{"ARPHRD_SLIP6", Const, 0},
    -		{"ARPHRD_STRIP", Const, 1},
    -		{"ARPHRD_TUNNEL", Const, 0},
    -		{"ARPHRD_TUNNEL6", Const, 0},
    -		{"ARPHRD_VOID", Const, 0},
    -		{"ARPHRD_X25", Const, 0},
    -		{"AUTHTYPE_CLIENT", Const, 0},
    -		{"AUTHTYPE_SERVER", Const, 0},
    -		{"Accept", Func, 0},
    -		{"Accept4", Func, 1},
    -		{"AcceptEx", Func, 0},
    -		{"Access", Func, 0},
    -		{"Acct", Func, 0},
    -		{"AddrinfoW", Type, 1},
    -		{"AddrinfoW.Addr", Field, 1},
    -		{"AddrinfoW.Addrlen", Field, 1},
    -		{"AddrinfoW.Canonname", Field, 1},
    -		{"AddrinfoW.Family", Field, 1},
    -		{"AddrinfoW.Flags", Field, 1},
    -		{"AddrinfoW.Next", Field, 1},
    -		{"AddrinfoW.Protocol", Field, 1},
    -		{"AddrinfoW.Socktype", Field, 1},
    -		{"Adjtime", Func, 0},
    -		{"Adjtimex", Func, 0},
    -		{"AllThreadsSyscall", Func, 16},
    -		{"AllThreadsSyscall6", Func, 16},
    -		{"AttachLsf", Func, 0},
    -		{"B0", Const, 0},
    -		{"B1000000", Const, 0},
    -		{"B110", Const, 0},
    -		{"B115200", Const, 0},
    -		{"B1152000", Const, 0},
    -		{"B1200", Const, 0},
    -		{"B134", Const, 0},
    -		{"B14400", Const, 1},
    -		{"B150", Const, 0},
    -		{"B1500000", Const, 0},
    -		{"B1800", Const, 0},
    -		{"B19200", Const, 0},
    -		{"B200", Const, 0},
    -		{"B2000000", Const, 0},
    -		{"B230400", Const, 0},
    -		{"B2400", Const, 0},
    -		{"B2500000", Const, 0},
    -		{"B28800", Const, 1},
    -		{"B300", Const, 0},
    -		{"B3000000", Const, 0},
    -		{"B3500000", Const, 0},
    -		{"B38400", Const, 0},
    -		{"B4000000", Const, 0},
    -		{"B460800", Const, 0},
    -		{"B4800", Const, 0},
    -		{"B50", Const, 0},
    -		{"B500000", Const, 0},
    -		{"B57600", Const, 0},
    -		{"B576000", Const, 0},
    -		{"B600", Const, 0},
    -		{"B7200", Const, 1},
    -		{"B75", Const, 0},
    -		{"B76800", Const, 1},
    -		{"B921600", Const, 0},
    -		{"B9600", Const, 0},
    -		{"BASE_PROTOCOL", Const, 2},
    -		{"BIOCFEEDBACK", Const, 0},
    -		{"BIOCFLUSH", Const, 0},
    -		{"BIOCGBLEN", Const, 0},
    -		{"BIOCGDIRECTION", Const, 0},
    -		{"BIOCGDIRFILT", Const, 1},
    -		{"BIOCGDLT", Const, 0},
    -		{"BIOCGDLTLIST", Const, 0},
    -		{"BIOCGETBUFMODE", Const, 0},
    -		{"BIOCGETIF", Const, 0},
    -		{"BIOCGETZMAX", Const, 0},
    -		{"BIOCGFEEDBACK", Const, 1},
    -		{"BIOCGFILDROP", Const, 1},
    -		{"BIOCGHDRCMPLT", Const, 0},
    -		{"BIOCGRSIG", Const, 0},
    -		{"BIOCGRTIMEOUT", Const, 0},
    -		{"BIOCGSEESENT", Const, 0},
    -		{"BIOCGSTATS", Const, 0},
    -		{"BIOCGSTATSOLD", Const, 1},
    -		{"BIOCGTSTAMP", Const, 1},
    -		{"BIOCIMMEDIATE", Const, 0},
    -		{"BIOCLOCK", Const, 0},
    -		{"BIOCPROMISC", Const, 0},
    -		{"BIOCROTZBUF", Const, 0},
    -		{"BIOCSBLEN", Const, 0},
    -		{"BIOCSDIRECTION", Const, 0},
    -		{"BIOCSDIRFILT", Const, 1},
    -		{"BIOCSDLT", Const, 0},
    -		{"BIOCSETBUFMODE", Const, 0},
    -		{"BIOCSETF", Const, 0},
    -		{"BIOCSETFNR", Const, 0},
    -		{"BIOCSETIF", Const, 0},
    -		{"BIOCSETWF", Const, 0},
    -		{"BIOCSETZBUF", Const, 0},
    -		{"BIOCSFEEDBACK", Const, 1},
    -		{"BIOCSFILDROP", Const, 1},
    -		{"BIOCSHDRCMPLT", Const, 0},
    -		{"BIOCSRSIG", Const, 0},
    -		{"BIOCSRTIMEOUT", Const, 0},
    -		{"BIOCSSEESENT", Const, 0},
    -		{"BIOCSTCPF", Const, 1},
    -		{"BIOCSTSTAMP", Const, 1},
    -		{"BIOCSUDPF", Const, 1},
    -		{"BIOCVERSION", Const, 0},
    -		{"BPF_A", Const, 0},
    -		{"BPF_ABS", Const, 0},
    -		{"BPF_ADD", Const, 0},
    -		{"BPF_ALIGNMENT", Const, 0},
    -		{"BPF_ALIGNMENT32", Const, 1},
    -		{"BPF_ALU", Const, 0},
    -		{"BPF_AND", Const, 0},
    -		{"BPF_B", Const, 0},
    -		{"BPF_BUFMODE_BUFFER", Const, 0},
    -		{"BPF_BUFMODE_ZBUF", Const, 0},
    -		{"BPF_DFLTBUFSIZE", Const, 1},
    -		{"BPF_DIRECTION_IN", Const, 1},
    -		{"BPF_DIRECTION_OUT", Const, 1},
    -		{"BPF_DIV", Const, 0},
    -		{"BPF_H", Const, 0},
    -		{"BPF_IMM", Const, 0},
    -		{"BPF_IND", Const, 0},
    -		{"BPF_JA", Const, 0},
    -		{"BPF_JEQ", Const, 0},
    -		{"BPF_JGE", Const, 0},
    -		{"BPF_JGT", Const, 0},
    -		{"BPF_JMP", Const, 0},
    -		{"BPF_JSET", Const, 0},
    -		{"BPF_K", Const, 0},
    -		{"BPF_LD", Const, 0},
    -		{"BPF_LDX", Const, 0},
    -		{"BPF_LEN", Const, 0},
    -		{"BPF_LSH", Const, 0},
    -		{"BPF_MAJOR_VERSION", Const, 0},
    -		{"BPF_MAXBUFSIZE", Const, 0},
    -		{"BPF_MAXINSNS", Const, 0},
    -		{"BPF_MEM", Const, 0},
    -		{"BPF_MEMWORDS", Const, 0},
    -		{"BPF_MINBUFSIZE", Const, 0},
    -		{"BPF_MINOR_VERSION", Const, 0},
    -		{"BPF_MISC", Const, 0},
    -		{"BPF_MSH", Const, 0},
    -		{"BPF_MUL", Const, 0},
    -		{"BPF_NEG", Const, 0},
    -		{"BPF_OR", Const, 0},
    -		{"BPF_RELEASE", Const, 0},
    -		{"BPF_RET", Const, 0},
    -		{"BPF_RSH", Const, 0},
    -		{"BPF_ST", Const, 0},
    -		{"BPF_STX", Const, 0},
    -		{"BPF_SUB", Const, 0},
    -		{"BPF_TAX", Const, 0},
    -		{"BPF_TXA", Const, 0},
    -		{"BPF_T_BINTIME", Const, 1},
    -		{"BPF_T_BINTIME_FAST", Const, 1},
    -		{"BPF_T_BINTIME_MONOTONIC", Const, 1},
    -		{"BPF_T_BINTIME_MONOTONIC_FAST", Const, 1},
    -		{"BPF_T_FAST", Const, 1},
    -		{"BPF_T_FLAG_MASK", Const, 1},
    -		{"BPF_T_FORMAT_MASK", Const, 1},
    -		{"BPF_T_MICROTIME", Const, 1},
    -		{"BPF_T_MICROTIME_FAST", Const, 1},
    -		{"BPF_T_MICROTIME_MONOTONIC", Const, 1},
    -		{"BPF_T_MICROTIME_MONOTONIC_FAST", Const, 1},
    -		{"BPF_T_MONOTONIC", Const, 1},
    -		{"BPF_T_MONOTONIC_FAST", Const, 1},
    -		{"BPF_T_NANOTIME", Const, 1},
    -		{"BPF_T_NANOTIME_FAST", Const, 1},
    -		{"BPF_T_NANOTIME_MONOTONIC", Const, 1},
    -		{"BPF_T_NANOTIME_MONOTONIC_FAST", Const, 1},
    -		{"BPF_T_NONE", Const, 1},
    -		{"BPF_T_NORMAL", Const, 1},
    -		{"BPF_W", Const, 0},
    -		{"BPF_X", Const, 0},
    -		{"BRKINT", Const, 0},
    -		{"Bind", Func, 0},
    -		{"BindToDevice", Func, 0},
    -		{"BpfBuflen", Func, 0},
    -		{"BpfDatalink", Func, 0},
    -		{"BpfHdr", Type, 0},
    -		{"BpfHdr.Caplen", Field, 0},
    -		{"BpfHdr.Datalen", Field, 0},
    -		{"BpfHdr.Hdrlen", Field, 0},
    -		{"BpfHdr.Pad_cgo_0", Field, 0},
    -		{"BpfHdr.Tstamp", Field, 0},
    -		{"BpfHeadercmpl", Func, 0},
    -		{"BpfInsn", Type, 0},
    -		{"BpfInsn.Code", Field, 0},
    -		{"BpfInsn.Jf", Field, 0},
    -		{"BpfInsn.Jt", Field, 0},
    -		{"BpfInsn.K", Field, 0},
    -		{"BpfInterface", Func, 0},
    -		{"BpfJump", Func, 0},
    -		{"BpfProgram", Type, 0},
    -		{"BpfProgram.Insns", Field, 0},
    -		{"BpfProgram.Len", Field, 0},
    -		{"BpfProgram.Pad_cgo_0", Field, 0},
    -		{"BpfStat", Type, 0},
    -		{"BpfStat.Capt", Field, 2},
    -		{"BpfStat.Drop", Field, 0},
    -		{"BpfStat.Padding", Field, 2},
    -		{"BpfStat.Recv", Field, 0},
    -		{"BpfStats", Func, 0},
    -		{"BpfStmt", Func, 0},
    -		{"BpfTimeout", Func, 0},
    -		{"BpfTimeval", Type, 2},
    -		{"BpfTimeval.Sec", Field, 2},
    -		{"BpfTimeval.Usec", Field, 2},
    -		{"BpfVersion", Type, 0},
    -		{"BpfVersion.Major", Field, 0},
    -		{"BpfVersion.Minor", Field, 0},
    -		{"BpfZbuf", Type, 0},
    -		{"BpfZbuf.Bufa", Field, 0},
    -		{"BpfZbuf.Bufb", Field, 0},
    -		{"BpfZbuf.Buflen", Field, 0},
    -		{"BpfZbufHeader", Type, 0},
    -		{"BpfZbufHeader.Kernel_gen", Field, 0},
    -		{"BpfZbufHeader.Kernel_len", Field, 0},
    -		{"BpfZbufHeader.User_gen", Field, 0},
    -		{"BpfZbufHeader.X_bzh_pad", Field, 0},
    -		{"ByHandleFileInformation", Type, 0},
    -		{"ByHandleFileInformation.CreationTime", Field, 0},
    -		{"ByHandleFileInformation.FileAttributes", Field, 0},
    -		{"ByHandleFileInformation.FileIndexHigh", Field, 0},
    -		{"ByHandleFileInformation.FileIndexLow", Field, 0},
    -		{"ByHandleFileInformation.FileSizeHigh", Field, 0},
    -		{"ByHandleFileInformation.FileSizeLow", Field, 0},
    -		{"ByHandleFileInformation.LastAccessTime", Field, 0},
    -		{"ByHandleFileInformation.LastWriteTime", Field, 0},
    -		{"ByHandleFileInformation.NumberOfLinks", Field, 0},
    -		{"ByHandleFileInformation.VolumeSerialNumber", Field, 0},
    -		{"BytePtrFromString", Func, 1},
    -		{"ByteSliceFromString", Func, 1},
    -		{"CCR0_FLUSH", Const, 1},
    -		{"CERT_CHAIN_POLICY_AUTHENTICODE", Const, 0},
    -		{"CERT_CHAIN_POLICY_AUTHENTICODE_TS", Const, 0},
    -		{"CERT_CHAIN_POLICY_BASE", Const, 0},
    -		{"CERT_CHAIN_POLICY_BASIC_CONSTRAINTS", Const, 0},
    -		{"CERT_CHAIN_POLICY_EV", Const, 0},
    -		{"CERT_CHAIN_POLICY_MICROSOFT_ROOT", Const, 0},
    -		{"CERT_CHAIN_POLICY_NT_AUTH", Const, 0},
    -		{"CERT_CHAIN_POLICY_SSL", Const, 0},
    -		{"CERT_E_CN_NO_MATCH", Const, 0},
    -		{"CERT_E_EXPIRED", Const, 0},
    -		{"CERT_E_PURPOSE", Const, 0},
    -		{"CERT_E_ROLE", Const, 0},
    -		{"CERT_E_UNTRUSTEDROOT", Const, 0},
    -		{"CERT_STORE_ADD_ALWAYS", Const, 0},
    -		{"CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG", Const, 0},
    -		{"CERT_STORE_PROV_MEMORY", Const, 0},
    -		{"CERT_TRUST_HAS_EXCLUDED_NAME_CONSTRAINT", Const, 0},
    -		{"CERT_TRUST_HAS_NOT_DEFINED_NAME_CONSTRAINT", Const, 0},
    -		{"CERT_TRUST_HAS_NOT_PERMITTED_NAME_CONSTRAINT", Const, 0},
    -		{"CERT_TRUST_HAS_NOT_SUPPORTED_CRITICAL_EXT", Const, 0},
    -		{"CERT_TRUST_HAS_NOT_SUPPORTED_NAME_CONSTRAINT", Const, 0},
    -		{"CERT_TRUST_INVALID_BASIC_CONSTRAINTS", Const, 0},
    -		{"CERT_TRUST_INVALID_EXTENSION", Const, 0},
    -		{"CERT_TRUST_INVALID_NAME_CONSTRAINTS", Const, 0},
    -		{"CERT_TRUST_INVALID_POLICY_CONSTRAINTS", Const, 0},
    -		{"CERT_TRUST_IS_CYCLIC", Const, 0},
    -		{"CERT_TRUST_IS_EXPLICIT_DISTRUST", Const, 0},
    -		{"CERT_TRUST_IS_NOT_SIGNATURE_VALID", Const, 0},
    -		{"CERT_TRUST_IS_NOT_TIME_VALID", Const, 0},
    -		{"CERT_TRUST_IS_NOT_VALID_FOR_USAGE", Const, 0},
    -		{"CERT_TRUST_IS_OFFLINE_REVOCATION", Const, 0},
    -		{"CERT_TRUST_IS_REVOKED", Const, 0},
    -		{"CERT_TRUST_IS_UNTRUSTED_ROOT", Const, 0},
    -		{"CERT_TRUST_NO_ERROR", Const, 0},
    -		{"CERT_TRUST_NO_ISSUANCE_CHAIN_POLICY", Const, 0},
    -		{"CERT_TRUST_REVOCATION_STATUS_UNKNOWN", Const, 0},
    -		{"CFLUSH", Const, 1},
    -		{"CLOCAL", Const, 0},
    -		{"CLONE_CHILD_CLEARTID", Const, 2},
    -		{"CLONE_CHILD_SETTID", Const, 2},
    -		{"CLONE_CLEAR_SIGHAND", Const, 20},
    -		{"CLONE_CSIGNAL", Const, 3},
    -		{"CLONE_DETACHED", Const, 2},
    -		{"CLONE_FILES", Const, 2},
    -		{"CLONE_FS", Const, 2},
    -		{"CLONE_INTO_CGROUP", Const, 20},
    -		{"CLONE_IO", Const, 2},
    -		{"CLONE_NEWCGROUP", Const, 20},
    -		{"CLONE_NEWIPC", Const, 2},
    -		{"CLONE_NEWNET", Const, 2},
    -		{"CLONE_NEWNS", Const, 2},
    -		{"CLONE_NEWPID", Const, 2},
    -		{"CLONE_NEWTIME", Const, 20},
    -		{"CLONE_NEWUSER", Const, 2},
    -		{"CLONE_NEWUTS", Const, 2},
    -		{"CLONE_PARENT", Const, 2},
    -		{"CLONE_PARENT_SETTID", Const, 2},
    -		{"CLONE_PID", Const, 3},
    -		{"CLONE_PIDFD", Const, 20},
    -		{"CLONE_PTRACE", Const, 2},
    -		{"CLONE_SETTLS", Const, 2},
    -		{"CLONE_SIGHAND", Const, 2},
    -		{"CLONE_SYSVSEM", Const, 2},
    -		{"CLONE_THREAD", Const, 2},
    -		{"CLONE_UNTRACED", Const, 2},
    -		{"CLONE_VFORK", Const, 2},
    -		{"CLONE_VM", Const, 2},
    -		{"CPUID_CFLUSH", Const, 1},
    -		{"CREAD", Const, 0},
    -		{"CREATE_ALWAYS", Const, 0},
    -		{"CREATE_NEW", Const, 0},
    -		{"CREATE_NEW_PROCESS_GROUP", Const, 1},
    -		{"CREATE_UNICODE_ENVIRONMENT", Const, 0},
    -		{"CRYPT_DEFAULT_CONTAINER_OPTIONAL", Const, 0},
    -		{"CRYPT_DELETEKEYSET", Const, 0},
    -		{"CRYPT_MACHINE_KEYSET", Const, 0},
    -		{"CRYPT_NEWKEYSET", Const, 0},
    -		{"CRYPT_SILENT", Const, 0},
    -		{"CRYPT_VERIFYCONTEXT", Const, 0},
    -		{"CS5", Const, 0},
    -		{"CS6", Const, 0},
    -		{"CS7", Const, 0},
    -		{"CS8", Const, 0},
    -		{"CSIZE", Const, 0},
    -		{"CSTART", Const, 1},
    -		{"CSTATUS", Const, 1},
    -		{"CSTOP", Const, 1},
    -		{"CSTOPB", Const, 0},
    -		{"CSUSP", Const, 1},
    -		{"CTL_MAXNAME", Const, 0},
    -		{"CTL_NET", Const, 0},
    -		{"CTL_QUERY", Const, 1},
    -		{"CTRL_BREAK_EVENT", Const, 1},
    -		{"CTRL_CLOSE_EVENT", Const, 14},
    -		{"CTRL_C_EVENT", Const, 1},
    -		{"CTRL_LOGOFF_EVENT", Const, 14},
    -		{"CTRL_SHUTDOWN_EVENT", Const, 14},
    -		{"CancelIo", Func, 0},
    -		{"CancelIoEx", Func, 1},
    -		{"CertAddCertificateContextToStore", Func, 0},
    -		{"CertChainContext", Type, 0},
    -		{"CertChainContext.ChainCount", Field, 0},
    -		{"CertChainContext.Chains", Field, 0},
    -		{"CertChainContext.HasRevocationFreshnessTime", Field, 0},
    -		{"CertChainContext.LowerQualityChainCount", Field, 0},
    -		{"CertChainContext.LowerQualityChains", Field, 0},
    -		{"CertChainContext.RevocationFreshnessTime", Field, 0},
    -		{"CertChainContext.Size", Field, 0},
    -		{"CertChainContext.TrustStatus", Field, 0},
    -		{"CertChainElement", Type, 0},
    -		{"CertChainElement.ApplicationUsage", Field, 0},
    -		{"CertChainElement.CertContext", Field, 0},
    -		{"CertChainElement.ExtendedErrorInfo", Field, 0},
    -		{"CertChainElement.IssuanceUsage", Field, 0},
    -		{"CertChainElement.RevocationInfo", Field, 0},
    -		{"CertChainElement.Size", Field, 0},
    -		{"CertChainElement.TrustStatus", Field, 0},
    -		{"CertChainPara", Type, 0},
    -		{"CertChainPara.CacheResync", Field, 0},
    -		{"CertChainPara.CheckRevocationFreshnessTime", Field, 0},
    -		{"CertChainPara.RequestedUsage", Field, 0},
    -		{"CertChainPara.RequstedIssuancePolicy", Field, 0},
    -		{"CertChainPara.RevocationFreshnessTime", Field, 0},
    -		{"CertChainPara.Size", Field, 0},
    -		{"CertChainPara.URLRetrievalTimeout", Field, 0},
    -		{"CertChainPolicyPara", Type, 0},
    -		{"CertChainPolicyPara.ExtraPolicyPara", Field, 0},
    -		{"CertChainPolicyPara.Flags", Field, 0},
    -		{"CertChainPolicyPara.Size", Field, 0},
    -		{"CertChainPolicyStatus", Type, 0},
    -		{"CertChainPolicyStatus.ChainIndex", Field, 0},
    -		{"CertChainPolicyStatus.ElementIndex", Field, 0},
    -		{"CertChainPolicyStatus.Error", Field, 0},
    -		{"CertChainPolicyStatus.ExtraPolicyStatus", Field, 0},
    -		{"CertChainPolicyStatus.Size", Field, 0},
    -		{"CertCloseStore", Func, 0},
    -		{"CertContext", Type, 0},
    -		{"CertContext.CertInfo", Field, 0},
    -		{"CertContext.EncodedCert", Field, 0},
    -		{"CertContext.EncodingType", Field, 0},
    -		{"CertContext.Length", Field, 0},
    -		{"CertContext.Store", Field, 0},
    -		{"CertCreateCertificateContext", Func, 0},
    -		{"CertEnhKeyUsage", Type, 0},
    -		{"CertEnhKeyUsage.Length", Field, 0},
    -		{"CertEnhKeyUsage.UsageIdentifiers", Field, 0},
    -		{"CertEnumCertificatesInStore", Func, 0},
    -		{"CertFreeCertificateChain", Func, 0},
    -		{"CertFreeCertificateContext", Func, 0},
    -		{"CertGetCertificateChain", Func, 0},
    -		{"CertInfo", Type, 11},
    -		{"CertOpenStore", Func, 0},
    -		{"CertOpenSystemStore", Func, 0},
    -		{"CertRevocationCrlInfo", Type, 11},
    -		{"CertRevocationInfo", Type, 0},
    -		{"CertRevocationInfo.CrlInfo", Field, 0},
    -		{"CertRevocationInfo.FreshnessTime", Field, 0},
    -		{"CertRevocationInfo.HasFreshnessTime", Field, 0},
    -		{"CertRevocationInfo.OidSpecificInfo", Field, 0},
    -		{"CertRevocationInfo.RevocationOid", Field, 0},
    -		{"CertRevocationInfo.RevocationResult", Field, 0},
    -		{"CertRevocationInfo.Size", Field, 0},
    -		{"CertSimpleChain", Type, 0},
    -		{"CertSimpleChain.Elements", Field, 0},
    -		{"CertSimpleChain.HasRevocationFreshnessTime", Field, 0},
    -		{"CertSimpleChain.NumElements", Field, 0},
    -		{"CertSimpleChain.RevocationFreshnessTime", Field, 0},
    -		{"CertSimpleChain.Size", Field, 0},
    -		{"CertSimpleChain.TrustListInfo", Field, 0},
    -		{"CertSimpleChain.TrustStatus", Field, 0},
    -		{"CertTrustListInfo", Type, 11},
    -		{"CertTrustStatus", Type, 0},
    -		{"CertTrustStatus.ErrorStatus", Field, 0},
    -		{"CertTrustStatus.InfoStatus", Field, 0},
    -		{"CertUsageMatch", Type, 0},
    -		{"CertUsageMatch.Type", Field, 0},
    -		{"CertUsageMatch.Usage", Field, 0},
    -		{"CertVerifyCertificateChainPolicy", Func, 0},
    -		{"Chdir", Func, 0},
    -		{"CheckBpfVersion", Func, 0},
    -		{"Chflags", Func, 0},
    -		{"Chmod", Func, 0},
    -		{"Chown", Func, 0},
    -		{"Chroot", Func, 0},
    -		{"Clearenv", Func, 0},
    -		{"Close", Func, 0},
    -		{"CloseHandle", Func, 0},
    -		{"CloseOnExec", Func, 0},
    -		{"Closesocket", Func, 0},
    -		{"CmsgLen", Func, 0},
    -		{"CmsgSpace", Func, 0},
    -		{"Cmsghdr", Type, 0},
    -		{"Cmsghdr.Len", Field, 0},
    -		{"Cmsghdr.Level", Field, 0},
    -		{"Cmsghdr.Type", Field, 0},
    -		{"Cmsghdr.X__cmsg_data", Field, 0},
    -		{"CommandLineToArgv", Func, 0},
    -		{"ComputerName", Func, 0},
    -		{"Conn", Type, 9},
    -		{"Connect", Func, 0},
    -		{"ConnectEx", Func, 1},
    -		{"ConvertSidToStringSid", Func, 0},
    -		{"ConvertStringSidToSid", Func, 0},
    -		{"CopySid", Func, 0},
    -		{"Creat", Func, 0},
    -		{"CreateDirectory", Func, 0},
    -		{"CreateFile", Func, 0},
    -		{"CreateFileMapping", Func, 0},
    -		{"CreateHardLink", Func, 4},
    -		{"CreateIoCompletionPort", Func, 0},
    -		{"CreatePipe", Func, 0},
    -		{"CreateProcess", Func, 0},
    -		{"CreateProcessAsUser", Func, 10},
    -		{"CreateSymbolicLink", Func, 4},
    -		{"CreateToolhelp32Snapshot", Func, 4},
    -		{"Credential", Type, 0},
    -		{"Credential.Gid", Field, 0},
    -		{"Credential.Groups", Field, 0},
    -		{"Credential.NoSetGroups", Field, 9},
    -		{"Credential.Uid", Field, 0},
    -		{"CryptAcquireContext", Func, 0},
    -		{"CryptGenRandom", Func, 0},
    -		{"CryptReleaseContext", Func, 0},
    -		{"DIOCBSFLUSH", Const, 1},
    -		{"DIOCOSFPFLUSH", Const, 1},
    -		{"DLL", Type, 0},
    -		{"DLL.Handle", Field, 0},
    -		{"DLL.Name", Field, 0},
    -		{"DLLError", Type, 0},
    -		{"DLLError.Err", Field, 0},
    -		{"DLLError.Msg", Field, 0},
    -		{"DLLError.ObjName", Field, 0},
    -		{"DLT_A429", Const, 0},
    -		{"DLT_A653_ICM", Const, 0},
    -		{"DLT_AIRONET_HEADER", Const, 0},
    -		{"DLT_AOS", Const, 1},
    -		{"DLT_APPLE_IP_OVER_IEEE1394", Const, 0},
    -		{"DLT_ARCNET", Const, 0},
    -		{"DLT_ARCNET_LINUX", Const, 0},
    -		{"DLT_ATM_CLIP", Const, 0},
    -		{"DLT_ATM_RFC1483", Const, 0},
    -		{"DLT_AURORA", Const, 0},
    -		{"DLT_AX25", Const, 0},
    -		{"DLT_AX25_KISS", Const, 0},
    -		{"DLT_BACNET_MS_TP", Const, 0},
    -		{"DLT_BLUETOOTH_HCI_H4", Const, 0},
    -		{"DLT_BLUETOOTH_HCI_H4_WITH_PHDR", Const, 0},
    -		{"DLT_CAN20B", Const, 0},
    -		{"DLT_CAN_SOCKETCAN", Const, 1},
    -		{"DLT_CHAOS", Const, 0},
    -		{"DLT_CHDLC", Const, 0},
    -		{"DLT_CISCO_IOS", Const, 0},
    -		{"DLT_C_HDLC", Const, 0},
    -		{"DLT_C_HDLC_WITH_DIR", Const, 0},
    -		{"DLT_DBUS", Const, 1},
    -		{"DLT_DECT", Const, 1},
    -		{"DLT_DOCSIS", Const, 0},
    -		{"DLT_DVB_CI", Const, 1},
    -		{"DLT_ECONET", Const, 0},
    -		{"DLT_EN10MB", Const, 0},
    -		{"DLT_EN3MB", Const, 0},
    -		{"DLT_ENC", Const, 0},
    -		{"DLT_ERF", Const, 0},
    -		{"DLT_ERF_ETH", Const, 0},
    -		{"DLT_ERF_POS", Const, 0},
    -		{"DLT_FC_2", Const, 1},
    -		{"DLT_FC_2_WITH_FRAME_DELIMS", Const, 1},
    -		{"DLT_FDDI", Const, 0},
    -		{"DLT_FLEXRAY", Const, 0},
    -		{"DLT_FRELAY", Const, 0},
    -		{"DLT_FRELAY_WITH_DIR", Const, 0},
    -		{"DLT_GCOM_SERIAL", Const, 0},
    -		{"DLT_GCOM_T1E1", Const, 0},
    -		{"DLT_GPF_F", Const, 0},
    -		{"DLT_GPF_T", Const, 0},
    -		{"DLT_GPRS_LLC", Const, 0},
    -		{"DLT_GSMTAP_ABIS", Const, 1},
    -		{"DLT_GSMTAP_UM", Const, 1},
    -		{"DLT_HDLC", Const, 1},
    -		{"DLT_HHDLC", Const, 0},
    -		{"DLT_HIPPI", Const, 1},
    -		{"DLT_IBM_SN", Const, 0},
    -		{"DLT_IBM_SP", Const, 0},
    -		{"DLT_IEEE802", Const, 0},
    -		{"DLT_IEEE802_11", Const, 0},
    -		{"DLT_IEEE802_11_RADIO", Const, 0},
    -		{"DLT_IEEE802_11_RADIO_AVS", Const, 0},
    -		{"DLT_IEEE802_15_4", Const, 0},
    -		{"DLT_IEEE802_15_4_LINUX", Const, 0},
    -		{"DLT_IEEE802_15_4_NOFCS", Const, 1},
    -		{"DLT_IEEE802_15_4_NONASK_PHY", Const, 0},
    -		{"DLT_IEEE802_16_MAC_CPS", Const, 0},
    -		{"DLT_IEEE802_16_MAC_CPS_RADIO", Const, 0},
    -		{"DLT_IPFILTER", Const, 0},
    -		{"DLT_IPMB", Const, 0},
    -		{"DLT_IPMB_LINUX", Const, 0},
    -		{"DLT_IPNET", Const, 1},
    -		{"DLT_IPOIB", Const, 1},
    -		{"DLT_IPV4", Const, 1},
    -		{"DLT_IPV6", Const, 1},
    -		{"DLT_IP_OVER_FC", Const, 0},
    -		{"DLT_JUNIPER_ATM1", Const, 0},
    -		{"DLT_JUNIPER_ATM2", Const, 0},
    -		{"DLT_JUNIPER_ATM_CEMIC", Const, 1},
    -		{"DLT_JUNIPER_CHDLC", Const, 0},
    -		{"DLT_JUNIPER_ES", Const, 0},
    -		{"DLT_JUNIPER_ETHER", Const, 0},
    -		{"DLT_JUNIPER_FIBRECHANNEL", Const, 1},
    -		{"DLT_JUNIPER_FRELAY", Const, 0},
    -		{"DLT_JUNIPER_GGSN", Const, 0},
    -		{"DLT_JUNIPER_ISM", Const, 0},
    -		{"DLT_JUNIPER_MFR", Const, 0},
    -		{"DLT_JUNIPER_MLFR", Const, 0},
    -		{"DLT_JUNIPER_MLPPP", Const, 0},
    -		{"DLT_JUNIPER_MONITOR", Const, 0},
    -		{"DLT_JUNIPER_PIC_PEER", Const, 0},
    -		{"DLT_JUNIPER_PPP", Const, 0},
    -		{"DLT_JUNIPER_PPPOE", Const, 0},
    -		{"DLT_JUNIPER_PPPOE_ATM", Const, 0},
    -		{"DLT_JUNIPER_SERVICES", Const, 0},
    -		{"DLT_JUNIPER_SRX_E2E", Const, 1},
    -		{"DLT_JUNIPER_ST", Const, 0},
    -		{"DLT_JUNIPER_VP", Const, 0},
    -		{"DLT_JUNIPER_VS", Const, 1},
    -		{"DLT_LAPB_WITH_DIR", Const, 0},
    -		{"DLT_LAPD", Const, 0},
    -		{"DLT_LIN", Const, 0},
    -		{"DLT_LINUX_EVDEV", Const, 1},
    -		{"DLT_LINUX_IRDA", Const, 0},
    -		{"DLT_LINUX_LAPD", Const, 0},
    -		{"DLT_LINUX_PPP_WITHDIRECTION", Const, 0},
    -		{"DLT_LINUX_SLL", Const, 0},
    -		{"DLT_LOOP", Const, 0},
    -		{"DLT_LTALK", Const, 0},
    -		{"DLT_MATCHING_MAX", Const, 1},
    -		{"DLT_MATCHING_MIN", Const, 1},
    -		{"DLT_MFR", Const, 0},
    -		{"DLT_MOST", Const, 0},
    -		{"DLT_MPEG_2_TS", Const, 1},
    -		{"DLT_MPLS", Const, 1},
    -		{"DLT_MTP2", Const, 0},
    -		{"DLT_MTP2_WITH_PHDR", Const, 0},
    -		{"DLT_MTP3", Const, 0},
    -		{"DLT_MUX27010", Const, 1},
    -		{"DLT_NETANALYZER", Const, 1},
    -		{"DLT_NETANALYZER_TRANSPARENT", Const, 1},
    -		{"DLT_NFC_LLCP", Const, 1},
    -		{"DLT_NFLOG", Const, 1},
    -		{"DLT_NG40", Const, 1},
    -		{"DLT_NULL", Const, 0},
    -		{"DLT_PCI_EXP", Const, 0},
    -		{"DLT_PFLOG", Const, 0},
    -		{"DLT_PFSYNC", Const, 0},
    -		{"DLT_PPI", Const, 0},
    -		{"DLT_PPP", Const, 0},
    -		{"DLT_PPP_BSDOS", Const, 0},
    -		{"DLT_PPP_ETHER", Const, 0},
    -		{"DLT_PPP_PPPD", Const, 0},
    -		{"DLT_PPP_SERIAL", Const, 0},
    -		{"DLT_PPP_WITH_DIR", Const, 0},
    -		{"DLT_PPP_WITH_DIRECTION", Const, 0},
    -		{"DLT_PRISM_HEADER", Const, 0},
    -		{"DLT_PRONET", Const, 0},
    -		{"DLT_RAIF1", Const, 0},
    -		{"DLT_RAW", Const, 0},
    -		{"DLT_RAWAF_MASK", Const, 1},
    -		{"DLT_RIO", Const, 0},
    -		{"DLT_SCCP", Const, 0},
    -		{"DLT_SITA", Const, 0},
    -		{"DLT_SLIP", Const, 0},
    -		{"DLT_SLIP_BSDOS", Const, 0},
    -		{"DLT_STANAG_5066_D_PDU", Const, 1},
    -		{"DLT_SUNATM", Const, 0},
    -		{"DLT_SYMANTEC_FIREWALL", Const, 0},
    -		{"DLT_TZSP", Const, 0},
    -		{"DLT_USB", Const, 0},
    -		{"DLT_USB_LINUX", Const, 0},
    -		{"DLT_USB_LINUX_MMAPPED", Const, 1},
    -		{"DLT_USER0", Const, 0},
    -		{"DLT_USER1", Const, 0},
    -		{"DLT_USER10", Const, 0},
    -		{"DLT_USER11", Const, 0},
    -		{"DLT_USER12", Const, 0},
    -		{"DLT_USER13", Const, 0},
    -		{"DLT_USER14", Const, 0},
    -		{"DLT_USER15", Const, 0},
    -		{"DLT_USER2", Const, 0},
    -		{"DLT_USER3", Const, 0},
    -		{"DLT_USER4", Const, 0},
    -		{"DLT_USER5", Const, 0},
    -		{"DLT_USER6", Const, 0},
    -		{"DLT_USER7", Const, 0},
    -		{"DLT_USER8", Const, 0},
    -		{"DLT_USER9", Const, 0},
    -		{"DLT_WIHART", Const, 1},
    -		{"DLT_X2E_SERIAL", Const, 0},
    -		{"DLT_X2E_XORAYA", Const, 0},
    -		{"DNSMXData", Type, 0},
    -		{"DNSMXData.NameExchange", Field, 0},
    -		{"DNSMXData.Pad", Field, 0},
    -		{"DNSMXData.Preference", Field, 0},
    -		{"DNSPTRData", Type, 0},
    -		{"DNSPTRData.Host", Field, 0},
    -		{"DNSRecord", Type, 0},
    -		{"DNSRecord.Data", Field, 0},
    -		{"DNSRecord.Dw", Field, 0},
    -		{"DNSRecord.Length", Field, 0},
    -		{"DNSRecord.Name", Field, 0},
    -		{"DNSRecord.Next", Field, 0},
    -		{"DNSRecord.Reserved", Field, 0},
    -		{"DNSRecord.Ttl", Field, 0},
    -		{"DNSRecord.Type", Field, 0},
    -		{"DNSSRVData", Type, 0},
    -		{"DNSSRVData.Pad", Field, 0},
    -		{"DNSSRVData.Port", Field, 0},
    -		{"DNSSRVData.Priority", Field, 0},
    -		{"DNSSRVData.Target", Field, 0},
    -		{"DNSSRVData.Weight", Field, 0},
    -		{"DNSTXTData", Type, 0},
    -		{"DNSTXTData.StringArray", Field, 0},
    -		{"DNSTXTData.StringCount", Field, 0},
    -		{"DNS_INFO_NO_RECORDS", Const, 4},
    -		{"DNS_TYPE_A", Const, 0},
    -		{"DNS_TYPE_A6", Const, 0},
    -		{"DNS_TYPE_AAAA", Const, 0},
    -		{"DNS_TYPE_ADDRS", Const, 0},
    -		{"DNS_TYPE_AFSDB", Const, 0},
    -		{"DNS_TYPE_ALL", Const, 0},
    -		{"DNS_TYPE_ANY", Const, 0},
    -		{"DNS_TYPE_ATMA", Const, 0},
    -		{"DNS_TYPE_AXFR", Const, 0},
    -		{"DNS_TYPE_CERT", Const, 0},
    -		{"DNS_TYPE_CNAME", Const, 0},
    -		{"DNS_TYPE_DHCID", Const, 0},
    -		{"DNS_TYPE_DNAME", Const, 0},
    -		{"DNS_TYPE_DNSKEY", Const, 0},
    -		{"DNS_TYPE_DS", Const, 0},
    -		{"DNS_TYPE_EID", Const, 0},
    -		{"DNS_TYPE_GID", Const, 0},
    -		{"DNS_TYPE_GPOS", Const, 0},
    -		{"DNS_TYPE_HINFO", Const, 0},
    -		{"DNS_TYPE_ISDN", Const, 0},
    -		{"DNS_TYPE_IXFR", Const, 0},
    -		{"DNS_TYPE_KEY", Const, 0},
    -		{"DNS_TYPE_KX", Const, 0},
    -		{"DNS_TYPE_LOC", Const, 0},
    -		{"DNS_TYPE_MAILA", Const, 0},
    -		{"DNS_TYPE_MAILB", Const, 0},
    -		{"DNS_TYPE_MB", Const, 0},
    -		{"DNS_TYPE_MD", Const, 0},
    -		{"DNS_TYPE_MF", Const, 0},
    -		{"DNS_TYPE_MG", Const, 0},
    -		{"DNS_TYPE_MINFO", Const, 0},
    -		{"DNS_TYPE_MR", Const, 0},
    -		{"DNS_TYPE_MX", Const, 0},
    -		{"DNS_TYPE_NAPTR", Const, 0},
    -		{"DNS_TYPE_NBSTAT", Const, 0},
    -		{"DNS_TYPE_NIMLOC", Const, 0},
    -		{"DNS_TYPE_NS", Const, 0},
    -		{"DNS_TYPE_NSAP", Const, 0},
    -		{"DNS_TYPE_NSAPPTR", Const, 0},
    -		{"DNS_TYPE_NSEC", Const, 0},
    -		{"DNS_TYPE_NULL", Const, 0},
    -		{"DNS_TYPE_NXT", Const, 0},
    -		{"DNS_TYPE_OPT", Const, 0},
    -		{"DNS_TYPE_PTR", Const, 0},
    -		{"DNS_TYPE_PX", Const, 0},
    -		{"DNS_TYPE_RP", Const, 0},
    -		{"DNS_TYPE_RRSIG", Const, 0},
    -		{"DNS_TYPE_RT", Const, 0},
    -		{"DNS_TYPE_SIG", Const, 0},
    -		{"DNS_TYPE_SINK", Const, 0},
    -		{"DNS_TYPE_SOA", Const, 0},
    -		{"DNS_TYPE_SRV", Const, 0},
    -		{"DNS_TYPE_TEXT", Const, 0},
    -		{"DNS_TYPE_TKEY", Const, 0},
    -		{"DNS_TYPE_TSIG", Const, 0},
    -		{"DNS_TYPE_UID", Const, 0},
    -		{"DNS_TYPE_UINFO", Const, 0},
    -		{"DNS_TYPE_UNSPEC", Const, 0},
    -		{"DNS_TYPE_WINS", Const, 0},
    -		{"DNS_TYPE_WINSR", Const, 0},
    -		{"DNS_TYPE_WKS", Const, 0},
    -		{"DNS_TYPE_X25", Const, 0},
    -		{"DT_BLK", Const, 0},
    -		{"DT_CHR", Const, 0},
    -		{"DT_DIR", Const, 0},
    -		{"DT_FIFO", Const, 0},
    -		{"DT_LNK", Const, 0},
    -		{"DT_REG", Const, 0},
    -		{"DT_SOCK", Const, 0},
    -		{"DT_UNKNOWN", Const, 0},
    -		{"DT_WHT", Const, 0},
    -		{"DUPLICATE_CLOSE_SOURCE", Const, 0},
    -		{"DUPLICATE_SAME_ACCESS", Const, 0},
    -		{"DeleteFile", Func, 0},
    -		{"DetachLsf", Func, 0},
    -		{"DeviceIoControl", Func, 4},
    -		{"Dirent", Type, 0},
    -		{"Dirent.Fileno", Field, 0},
    -		{"Dirent.Ino", Field, 0},
    -		{"Dirent.Name", Field, 0},
    -		{"Dirent.Namlen", Field, 0},
    -		{"Dirent.Off", Field, 0},
    -		{"Dirent.Pad0", Field, 12},
    -		{"Dirent.Pad1", Field, 12},
    -		{"Dirent.Pad_cgo_0", Field, 0},
    -		{"Dirent.Reclen", Field, 0},
    -		{"Dirent.Seekoff", Field, 0},
    -		{"Dirent.Type", Field, 0},
    -		{"Dirent.X__d_padding", Field, 3},
    -		{"DnsNameCompare", Func, 4},
    -		{"DnsQuery", Func, 0},
    -		{"DnsRecordListFree", Func, 0},
    -		{"DnsSectionAdditional", Const, 4},
    -		{"DnsSectionAnswer", Const, 4},
    -		{"DnsSectionAuthority", Const, 4},
    -		{"DnsSectionQuestion", Const, 4},
    -		{"Dup", Func, 0},
    -		{"Dup2", Func, 0},
    -		{"Dup3", Func, 2},
    -		{"DuplicateHandle", Func, 0},
    -		{"E2BIG", Const, 0},
    -		{"EACCES", Const, 0},
    -		{"EADDRINUSE", Const, 0},
    -		{"EADDRNOTAVAIL", Const, 0},
    -		{"EADV", Const, 0},
    -		{"EAFNOSUPPORT", Const, 0},
    -		{"EAGAIN", Const, 0},
    -		{"EALREADY", Const, 0},
    -		{"EAUTH", Const, 0},
    -		{"EBADARCH", Const, 0},
    -		{"EBADE", Const, 0},
    -		{"EBADEXEC", Const, 0},
    -		{"EBADF", Const, 0},
    -		{"EBADFD", Const, 0},
    -		{"EBADMACHO", Const, 0},
    -		{"EBADMSG", Const, 0},
    -		{"EBADR", Const, 0},
    -		{"EBADRPC", Const, 0},
    -		{"EBADRQC", Const, 0},
    -		{"EBADSLT", Const, 0},
    -		{"EBFONT", Const, 0},
    -		{"EBUSY", Const, 0},
    -		{"ECANCELED", Const, 0},
    -		{"ECAPMODE", Const, 1},
    -		{"ECHILD", Const, 0},
    -		{"ECHO", Const, 0},
    -		{"ECHOCTL", Const, 0},
    -		{"ECHOE", Const, 0},
    -		{"ECHOK", Const, 0},
    -		{"ECHOKE", Const, 0},
    -		{"ECHONL", Const, 0},
    -		{"ECHOPRT", Const, 0},
    -		{"ECHRNG", Const, 0},
    -		{"ECOMM", Const, 0},
    -		{"ECONNABORTED", Const, 0},
    -		{"ECONNREFUSED", Const, 0},
    -		{"ECONNRESET", Const, 0},
    -		{"EDEADLK", Const, 0},
    -		{"EDEADLOCK", Const, 0},
    -		{"EDESTADDRREQ", Const, 0},
    -		{"EDEVERR", Const, 0},
    -		{"EDOM", Const, 0},
    -		{"EDOOFUS", Const, 0},
    -		{"EDOTDOT", Const, 0},
    -		{"EDQUOT", Const, 0},
    -		{"EEXIST", Const, 0},
    -		{"EFAULT", Const, 0},
    -		{"EFBIG", Const, 0},
    -		{"EFER_LMA", Const, 1},
    -		{"EFER_LME", Const, 1},
    -		{"EFER_NXE", Const, 1},
    -		{"EFER_SCE", Const, 1},
    -		{"EFTYPE", Const, 0},
    -		{"EHOSTDOWN", Const, 0},
    -		{"EHOSTUNREACH", Const, 0},
    -		{"EHWPOISON", Const, 0},
    -		{"EIDRM", Const, 0},
    -		{"EILSEQ", Const, 0},
    -		{"EINPROGRESS", Const, 0},
    -		{"EINTR", Const, 0},
    -		{"EINVAL", Const, 0},
    -		{"EIO", Const, 0},
    -		{"EIPSEC", Const, 1},
    -		{"EISCONN", Const, 0},
    -		{"EISDIR", Const, 0},
    -		{"EISNAM", Const, 0},
    -		{"EKEYEXPIRED", Const, 0},
    -		{"EKEYREJECTED", Const, 0},
    -		{"EKEYREVOKED", Const, 0},
    -		{"EL2HLT", Const, 0},
    -		{"EL2NSYNC", Const, 0},
    -		{"EL3HLT", Const, 0},
    -		{"EL3RST", Const, 0},
    -		{"ELAST", Const, 0},
    -		{"ELF_NGREG", Const, 0},
    -		{"ELF_PRARGSZ", Const, 0},
    -		{"ELIBACC", Const, 0},
    -		{"ELIBBAD", Const, 0},
    -		{"ELIBEXEC", Const, 0},
    -		{"ELIBMAX", Const, 0},
    -		{"ELIBSCN", Const, 0},
    -		{"ELNRNG", Const, 0},
    -		{"ELOOP", Const, 0},
    -		{"EMEDIUMTYPE", Const, 0},
    -		{"EMFILE", Const, 0},
    -		{"EMLINK", Const, 0},
    -		{"EMSGSIZE", Const, 0},
    -		{"EMT_TAGOVF", Const, 1},
    -		{"EMULTIHOP", Const, 0},
    -		{"EMUL_ENABLED", Const, 1},
    -		{"EMUL_LINUX", Const, 1},
    -		{"EMUL_LINUX32", Const, 1},
    -		{"EMUL_MAXID", Const, 1},
    -		{"EMUL_NATIVE", Const, 1},
    -		{"ENAMETOOLONG", Const, 0},
    -		{"ENAVAIL", Const, 0},
    -		{"ENDRUNDISC", Const, 1},
    -		{"ENEEDAUTH", Const, 0},
    -		{"ENETDOWN", Const, 0},
    -		{"ENETRESET", Const, 0},
    -		{"ENETUNREACH", Const, 0},
    -		{"ENFILE", Const, 0},
    -		{"ENOANO", Const, 0},
    -		{"ENOATTR", Const, 0},
    -		{"ENOBUFS", Const, 0},
    -		{"ENOCSI", Const, 0},
    -		{"ENODATA", Const, 0},
    -		{"ENODEV", Const, 0},
    -		{"ENOENT", Const, 0},
    -		{"ENOEXEC", Const, 0},
    -		{"ENOKEY", Const, 0},
    -		{"ENOLCK", Const, 0},
    -		{"ENOLINK", Const, 0},
    -		{"ENOMEDIUM", Const, 0},
    -		{"ENOMEM", Const, 0},
    -		{"ENOMSG", Const, 0},
    -		{"ENONET", Const, 0},
    -		{"ENOPKG", Const, 0},
    -		{"ENOPOLICY", Const, 0},
    -		{"ENOPROTOOPT", Const, 0},
    -		{"ENOSPC", Const, 0},
    -		{"ENOSR", Const, 0},
    -		{"ENOSTR", Const, 0},
    -		{"ENOSYS", Const, 0},
    -		{"ENOTBLK", Const, 0},
    -		{"ENOTCAPABLE", Const, 0},
    -		{"ENOTCONN", Const, 0},
    -		{"ENOTDIR", Const, 0},
    -		{"ENOTEMPTY", Const, 0},
    -		{"ENOTNAM", Const, 0},
    -		{"ENOTRECOVERABLE", Const, 0},
    -		{"ENOTSOCK", Const, 0},
    -		{"ENOTSUP", Const, 0},
    -		{"ENOTTY", Const, 0},
    -		{"ENOTUNIQ", Const, 0},
    -		{"ENXIO", Const, 0},
    -		{"EN_SW_CTL_INF", Const, 1},
    -		{"EN_SW_CTL_PREC", Const, 1},
    -		{"EN_SW_CTL_ROUND", Const, 1},
    -		{"EN_SW_DATACHAIN", Const, 1},
    -		{"EN_SW_DENORM", Const, 1},
    -		{"EN_SW_INVOP", Const, 1},
    -		{"EN_SW_OVERFLOW", Const, 1},
    -		{"EN_SW_PRECLOSS", Const, 1},
    -		{"EN_SW_UNDERFLOW", Const, 1},
    -		{"EN_SW_ZERODIV", Const, 1},
    -		{"EOPNOTSUPP", Const, 0},
    -		{"EOVERFLOW", Const, 0},
    -		{"EOWNERDEAD", Const, 0},
    -		{"EPERM", Const, 0},
    -		{"EPFNOSUPPORT", Const, 0},
    -		{"EPIPE", Const, 0},
    -		{"EPOLLERR", Const, 0},
    -		{"EPOLLET", Const, 0},
    -		{"EPOLLHUP", Const, 0},
    -		{"EPOLLIN", Const, 0},
    -		{"EPOLLMSG", Const, 0},
    -		{"EPOLLONESHOT", Const, 0},
    -		{"EPOLLOUT", Const, 0},
    -		{"EPOLLPRI", Const, 0},
    -		{"EPOLLRDBAND", Const, 0},
    -		{"EPOLLRDHUP", Const, 0},
    -		{"EPOLLRDNORM", Const, 0},
    -		{"EPOLLWRBAND", Const, 0},
    -		{"EPOLLWRNORM", Const, 0},
    -		{"EPOLL_CLOEXEC", Const, 0},
    -		{"EPOLL_CTL_ADD", Const, 0},
    -		{"EPOLL_CTL_DEL", Const, 0},
    -		{"EPOLL_CTL_MOD", Const, 0},
    -		{"EPOLL_NONBLOCK", Const, 0},
    -		{"EPROCLIM", Const, 0},
    -		{"EPROCUNAVAIL", Const, 0},
    -		{"EPROGMISMATCH", Const, 0},
    -		{"EPROGUNAVAIL", Const, 0},
    -		{"EPROTO", Const, 0},
    -		{"EPROTONOSUPPORT", Const, 0},
    -		{"EPROTOTYPE", Const, 0},
    -		{"EPWROFF", Const, 0},
    -		{"EQFULL", Const, 16},
    -		{"ERANGE", Const, 0},
    -		{"EREMCHG", Const, 0},
    -		{"EREMOTE", Const, 0},
    -		{"EREMOTEIO", Const, 0},
    -		{"ERESTART", Const, 0},
    -		{"ERFKILL", Const, 0},
    -		{"EROFS", Const, 0},
    -		{"ERPCMISMATCH", Const, 0},
    -		{"ERROR_ACCESS_DENIED", Const, 0},
    -		{"ERROR_ALREADY_EXISTS", Const, 0},
    -		{"ERROR_BROKEN_PIPE", Const, 0},
    -		{"ERROR_BUFFER_OVERFLOW", Const, 0},
    -		{"ERROR_DIR_NOT_EMPTY", Const, 8},
    -		{"ERROR_ENVVAR_NOT_FOUND", Const, 0},
    -		{"ERROR_FILE_EXISTS", Const, 0},
    -		{"ERROR_FILE_NOT_FOUND", Const, 0},
    -		{"ERROR_HANDLE_EOF", Const, 2},
    -		{"ERROR_INSUFFICIENT_BUFFER", Const, 0},
    -		{"ERROR_IO_PENDING", Const, 0},
    -		{"ERROR_MOD_NOT_FOUND", Const, 0},
    -		{"ERROR_MORE_DATA", Const, 3},
    -		{"ERROR_NETNAME_DELETED", Const, 3},
    -		{"ERROR_NOT_FOUND", Const, 1},
    -		{"ERROR_NO_MORE_FILES", Const, 0},
    -		{"ERROR_OPERATION_ABORTED", Const, 0},
    -		{"ERROR_PATH_NOT_FOUND", Const, 0},
    -		{"ERROR_PRIVILEGE_NOT_HELD", Const, 4},
    -		{"ERROR_PROC_NOT_FOUND", Const, 0},
    -		{"ESHLIBVERS", Const, 0},
    -		{"ESHUTDOWN", Const, 0},
    -		{"ESOCKTNOSUPPORT", Const, 0},
    -		{"ESPIPE", Const, 0},
    -		{"ESRCH", Const, 0},
    -		{"ESRMNT", Const, 0},
    -		{"ESTALE", Const, 0},
    -		{"ESTRPIPE", Const, 0},
    -		{"ETHERCAP_JUMBO_MTU", Const, 1},
    -		{"ETHERCAP_VLAN_HWTAGGING", Const, 1},
    -		{"ETHERCAP_VLAN_MTU", Const, 1},
    -		{"ETHERMIN", Const, 1},
    -		{"ETHERMTU", Const, 1},
    -		{"ETHERMTU_JUMBO", Const, 1},
    -		{"ETHERTYPE_8023", Const, 1},
    -		{"ETHERTYPE_AARP", Const, 1},
    -		{"ETHERTYPE_ACCTON", Const, 1},
    -		{"ETHERTYPE_AEONIC", Const, 1},
    -		{"ETHERTYPE_ALPHA", Const, 1},
    -		{"ETHERTYPE_AMBER", Const, 1},
    -		{"ETHERTYPE_AMOEBA", Const, 1},
    -		{"ETHERTYPE_AOE", Const, 1},
    -		{"ETHERTYPE_APOLLO", Const, 1},
    -		{"ETHERTYPE_APOLLODOMAIN", Const, 1},
    -		{"ETHERTYPE_APPLETALK", Const, 1},
    -		{"ETHERTYPE_APPLITEK", Const, 1},
    -		{"ETHERTYPE_ARGONAUT", Const, 1},
    -		{"ETHERTYPE_ARP", Const, 1},
    -		{"ETHERTYPE_AT", Const, 1},
    -		{"ETHERTYPE_ATALK", Const, 1},
    -		{"ETHERTYPE_ATOMIC", Const, 1},
    -		{"ETHERTYPE_ATT", Const, 1},
    -		{"ETHERTYPE_ATTSTANFORD", Const, 1},
    -		{"ETHERTYPE_AUTOPHON", Const, 1},
    -		{"ETHERTYPE_AXIS", Const, 1},
    -		{"ETHERTYPE_BCLOOP", Const, 1},
    -		{"ETHERTYPE_BOFL", Const, 1},
    -		{"ETHERTYPE_CABLETRON", Const, 1},
    -		{"ETHERTYPE_CHAOS", Const, 1},
    -		{"ETHERTYPE_COMDESIGN", Const, 1},
    -		{"ETHERTYPE_COMPUGRAPHIC", Const, 1},
    -		{"ETHERTYPE_COUNTERPOINT", Const, 1},
    -		{"ETHERTYPE_CRONUS", Const, 1},
    -		{"ETHERTYPE_CRONUSVLN", Const, 1},
    -		{"ETHERTYPE_DCA", Const, 1},
    -		{"ETHERTYPE_DDE", Const, 1},
    -		{"ETHERTYPE_DEBNI", Const, 1},
    -		{"ETHERTYPE_DECAM", Const, 1},
    -		{"ETHERTYPE_DECCUST", Const, 1},
    -		{"ETHERTYPE_DECDIAG", Const, 1},
    -		{"ETHERTYPE_DECDNS", Const, 1},
    -		{"ETHERTYPE_DECDTS", Const, 1},
    -		{"ETHERTYPE_DECEXPER", Const, 1},
    -		{"ETHERTYPE_DECLAST", Const, 1},
    -		{"ETHERTYPE_DECLTM", Const, 1},
    -		{"ETHERTYPE_DECMUMPS", Const, 1},
    -		{"ETHERTYPE_DECNETBIOS", Const, 1},
    -		{"ETHERTYPE_DELTACON", Const, 1},
    -		{"ETHERTYPE_DIDDLE", Const, 1},
    -		{"ETHERTYPE_DLOG1", Const, 1},
    -		{"ETHERTYPE_DLOG2", Const, 1},
    -		{"ETHERTYPE_DN", Const, 1},
    -		{"ETHERTYPE_DOGFIGHT", Const, 1},
    -		{"ETHERTYPE_DSMD", Const, 1},
    -		{"ETHERTYPE_ECMA", Const, 1},
    -		{"ETHERTYPE_ENCRYPT", Const, 1},
    -		{"ETHERTYPE_ES", Const, 1},
    -		{"ETHERTYPE_EXCELAN", Const, 1},
    -		{"ETHERTYPE_EXPERDATA", Const, 1},
    -		{"ETHERTYPE_FLIP", Const, 1},
    -		{"ETHERTYPE_FLOWCONTROL", Const, 1},
    -		{"ETHERTYPE_FRARP", Const, 1},
    -		{"ETHERTYPE_GENDYN", Const, 1},
    -		{"ETHERTYPE_HAYES", Const, 1},
    -		{"ETHERTYPE_HIPPI_FP", Const, 1},
    -		{"ETHERTYPE_HITACHI", Const, 1},
    -		{"ETHERTYPE_HP", Const, 1},
    -		{"ETHERTYPE_IEEEPUP", Const, 1},
    -		{"ETHERTYPE_IEEEPUPAT", Const, 1},
    -		{"ETHERTYPE_IMLBL", Const, 1},
    -		{"ETHERTYPE_IMLBLDIAG", Const, 1},
    -		{"ETHERTYPE_IP", Const, 1},
    -		{"ETHERTYPE_IPAS", Const, 1},
    -		{"ETHERTYPE_IPV6", Const, 1},
    -		{"ETHERTYPE_IPX", Const, 1},
    -		{"ETHERTYPE_IPXNEW", Const, 1},
    -		{"ETHERTYPE_KALPANA", Const, 1},
    -		{"ETHERTYPE_LANBRIDGE", Const, 1},
    -		{"ETHERTYPE_LANPROBE", Const, 1},
    -		{"ETHERTYPE_LAT", Const, 1},
    -		{"ETHERTYPE_LBACK", Const, 1},
    -		{"ETHERTYPE_LITTLE", Const, 1},
    -		{"ETHERTYPE_LLDP", Const, 1},
    -		{"ETHERTYPE_LOGICRAFT", Const, 1},
    -		{"ETHERTYPE_LOOPBACK", Const, 1},
    -		{"ETHERTYPE_MATRA", Const, 1},
    -		{"ETHERTYPE_MAX", Const, 1},
    -		{"ETHERTYPE_MERIT", Const, 1},
    -		{"ETHERTYPE_MICP", Const, 1},
    -		{"ETHERTYPE_MOPDL", Const, 1},
    -		{"ETHERTYPE_MOPRC", Const, 1},
    -		{"ETHERTYPE_MOTOROLA", Const, 1},
    -		{"ETHERTYPE_MPLS", Const, 1},
    -		{"ETHERTYPE_MPLS_MCAST", Const, 1},
    -		{"ETHERTYPE_MUMPS", Const, 1},
    -		{"ETHERTYPE_NBPCC", Const, 1},
    -		{"ETHERTYPE_NBPCLAIM", Const, 1},
    -		{"ETHERTYPE_NBPCLREQ", Const, 1},
    -		{"ETHERTYPE_NBPCLRSP", Const, 1},
    -		{"ETHERTYPE_NBPCREQ", Const, 1},
    -		{"ETHERTYPE_NBPCRSP", Const, 1},
    -		{"ETHERTYPE_NBPDG", Const, 1},
    -		{"ETHERTYPE_NBPDGB", Const, 1},
    -		{"ETHERTYPE_NBPDLTE", Const, 1},
    -		{"ETHERTYPE_NBPRAR", Const, 1},
    -		{"ETHERTYPE_NBPRAS", Const, 1},
    -		{"ETHERTYPE_NBPRST", Const, 1},
    -		{"ETHERTYPE_NBPSCD", Const, 1},
    -		{"ETHERTYPE_NBPVCD", Const, 1},
    -		{"ETHERTYPE_NBS", Const, 1},
    -		{"ETHERTYPE_NCD", Const, 1},
    -		{"ETHERTYPE_NESTAR", Const, 1},
    -		{"ETHERTYPE_NETBEUI", Const, 1},
    -		{"ETHERTYPE_NOVELL", Const, 1},
    -		{"ETHERTYPE_NS", Const, 1},
    -		{"ETHERTYPE_NSAT", Const, 1},
    -		{"ETHERTYPE_NSCOMPAT", Const, 1},
    -		{"ETHERTYPE_NTRAILER", Const, 1},
    -		{"ETHERTYPE_OS9", Const, 1},
    -		{"ETHERTYPE_OS9NET", Const, 1},
    -		{"ETHERTYPE_PACER", Const, 1},
    -		{"ETHERTYPE_PAE", Const, 1},
    -		{"ETHERTYPE_PCS", Const, 1},
    -		{"ETHERTYPE_PLANNING", Const, 1},
    -		{"ETHERTYPE_PPP", Const, 1},
    -		{"ETHERTYPE_PPPOE", Const, 1},
    -		{"ETHERTYPE_PPPOEDISC", Const, 1},
    -		{"ETHERTYPE_PRIMENTS", Const, 1},
    -		{"ETHERTYPE_PUP", Const, 1},
    -		{"ETHERTYPE_PUPAT", Const, 1},
    -		{"ETHERTYPE_QINQ", Const, 1},
    -		{"ETHERTYPE_RACAL", Const, 1},
    -		{"ETHERTYPE_RATIONAL", Const, 1},
    -		{"ETHERTYPE_RAWFR", Const, 1},
    -		{"ETHERTYPE_RCL", Const, 1},
    -		{"ETHERTYPE_RDP", Const, 1},
    -		{"ETHERTYPE_RETIX", Const, 1},
    -		{"ETHERTYPE_REVARP", Const, 1},
    -		{"ETHERTYPE_SCA", Const, 1},
    -		{"ETHERTYPE_SECTRA", Const, 1},
    -		{"ETHERTYPE_SECUREDATA", Const, 1},
    -		{"ETHERTYPE_SGITW", Const, 1},
    -		{"ETHERTYPE_SG_BOUNCE", Const, 1},
    -		{"ETHERTYPE_SG_DIAG", Const, 1},
    -		{"ETHERTYPE_SG_NETGAMES", Const, 1},
    -		{"ETHERTYPE_SG_RESV", Const, 1},
    -		{"ETHERTYPE_SIMNET", Const, 1},
    -		{"ETHERTYPE_SLOW", Const, 1},
    -		{"ETHERTYPE_SLOWPROTOCOLS", Const, 1},
    -		{"ETHERTYPE_SNA", Const, 1},
    -		{"ETHERTYPE_SNMP", Const, 1},
    -		{"ETHERTYPE_SONIX", Const, 1},
    -		{"ETHERTYPE_SPIDER", Const, 1},
    -		{"ETHERTYPE_SPRITE", Const, 1},
    -		{"ETHERTYPE_STP", Const, 1},
    -		{"ETHERTYPE_TALARIS", Const, 1},
    -		{"ETHERTYPE_TALARISMC", Const, 1},
    -		{"ETHERTYPE_TCPCOMP", Const, 1},
    -		{"ETHERTYPE_TCPSM", Const, 1},
    -		{"ETHERTYPE_TEC", Const, 1},
    -		{"ETHERTYPE_TIGAN", Const, 1},
    -		{"ETHERTYPE_TRAIL", Const, 1},
    -		{"ETHERTYPE_TRANSETHER", Const, 1},
    -		{"ETHERTYPE_TYMSHARE", Const, 1},
    -		{"ETHERTYPE_UBBST", Const, 1},
    -		{"ETHERTYPE_UBDEBUG", Const, 1},
    -		{"ETHERTYPE_UBDIAGLOOP", Const, 1},
    -		{"ETHERTYPE_UBDL", Const, 1},
    -		{"ETHERTYPE_UBNIU", Const, 1},
    -		{"ETHERTYPE_UBNMC", Const, 1},
    -		{"ETHERTYPE_VALID", Const, 1},
    -		{"ETHERTYPE_VARIAN", Const, 1},
    -		{"ETHERTYPE_VAXELN", Const, 1},
    -		{"ETHERTYPE_VEECO", Const, 1},
    -		{"ETHERTYPE_VEXP", Const, 1},
    -		{"ETHERTYPE_VGLAB", Const, 1},
    -		{"ETHERTYPE_VINES", Const, 1},
    -		{"ETHERTYPE_VINESECHO", Const, 1},
    -		{"ETHERTYPE_VINESLOOP", Const, 1},
    -		{"ETHERTYPE_VITAL", Const, 1},
    -		{"ETHERTYPE_VLAN", Const, 1},
    -		{"ETHERTYPE_VLTLMAN", Const, 1},
    -		{"ETHERTYPE_VPROD", Const, 1},
    -		{"ETHERTYPE_VURESERVED", Const, 1},
    -		{"ETHERTYPE_WATERLOO", Const, 1},
    -		{"ETHERTYPE_WELLFLEET", Const, 1},
    -		{"ETHERTYPE_X25", Const, 1},
    -		{"ETHERTYPE_X75", Const, 1},
    -		{"ETHERTYPE_XNSSM", Const, 1},
    -		{"ETHERTYPE_XTP", Const, 1},
    -		{"ETHER_ADDR_LEN", Const, 1},
    -		{"ETHER_ALIGN", Const, 1},
    -		{"ETHER_CRC_LEN", Const, 1},
    -		{"ETHER_CRC_POLY_BE", Const, 1},
    -		{"ETHER_CRC_POLY_LE", Const, 1},
    -		{"ETHER_HDR_LEN", Const, 1},
    -		{"ETHER_MAX_DIX_LEN", Const, 1},
    -		{"ETHER_MAX_LEN", Const, 1},
    -		{"ETHER_MAX_LEN_JUMBO", Const, 1},
    -		{"ETHER_MIN_LEN", Const, 1},
    -		{"ETHER_PPPOE_ENCAP_LEN", Const, 1},
    -		{"ETHER_TYPE_LEN", Const, 1},
    -		{"ETHER_VLAN_ENCAP_LEN", Const, 1},
    -		{"ETH_P_1588", Const, 0},
    -		{"ETH_P_8021Q", Const, 0},
    -		{"ETH_P_802_2", Const, 0},
    -		{"ETH_P_802_3", Const, 0},
    -		{"ETH_P_AARP", Const, 0},
    -		{"ETH_P_ALL", Const, 0},
    -		{"ETH_P_AOE", Const, 0},
    -		{"ETH_P_ARCNET", Const, 0},
    -		{"ETH_P_ARP", Const, 0},
    -		{"ETH_P_ATALK", Const, 0},
    -		{"ETH_P_ATMFATE", Const, 0},
    -		{"ETH_P_ATMMPOA", Const, 0},
    -		{"ETH_P_AX25", Const, 0},
    -		{"ETH_P_BPQ", Const, 0},
    -		{"ETH_P_CAIF", Const, 0},
    -		{"ETH_P_CAN", Const, 0},
    -		{"ETH_P_CONTROL", Const, 0},
    -		{"ETH_P_CUST", Const, 0},
    -		{"ETH_P_DDCMP", Const, 0},
    -		{"ETH_P_DEC", Const, 0},
    -		{"ETH_P_DIAG", Const, 0},
    -		{"ETH_P_DNA_DL", Const, 0},
    -		{"ETH_P_DNA_RC", Const, 0},
    -		{"ETH_P_DNA_RT", Const, 0},
    -		{"ETH_P_DSA", Const, 0},
    -		{"ETH_P_ECONET", Const, 0},
    -		{"ETH_P_EDSA", Const, 0},
    -		{"ETH_P_FCOE", Const, 0},
    -		{"ETH_P_FIP", Const, 0},
    -		{"ETH_P_HDLC", Const, 0},
    -		{"ETH_P_IEEE802154", Const, 0},
    -		{"ETH_P_IEEEPUP", Const, 0},
    -		{"ETH_P_IEEEPUPAT", Const, 0},
    -		{"ETH_P_IP", Const, 0},
    -		{"ETH_P_IPV6", Const, 0},
    -		{"ETH_P_IPX", Const, 0},
    -		{"ETH_P_IRDA", Const, 0},
    -		{"ETH_P_LAT", Const, 0},
    -		{"ETH_P_LINK_CTL", Const, 0},
    -		{"ETH_P_LOCALTALK", Const, 0},
    -		{"ETH_P_LOOP", Const, 0},
    -		{"ETH_P_MOBITEX", Const, 0},
    -		{"ETH_P_MPLS_MC", Const, 0},
    -		{"ETH_P_MPLS_UC", Const, 0},
    -		{"ETH_P_PAE", Const, 0},
    -		{"ETH_P_PAUSE", Const, 0},
    -		{"ETH_P_PHONET", Const, 0},
    -		{"ETH_P_PPPTALK", Const, 0},
    -		{"ETH_P_PPP_DISC", Const, 0},
    -		{"ETH_P_PPP_MP", Const, 0},
    -		{"ETH_P_PPP_SES", Const, 0},
    -		{"ETH_P_PUP", Const, 0},
    -		{"ETH_P_PUPAT", Const, 0},
    -		{"ETH_P_RARP", Const, 0},
    -		{"ETH_P_SCA", Const, 0},
    -		{"ETH_P_SLOW", Const, 0},
    -		{"ETH_P_SNAP", Const, 0},
    -		{"ETH_P_TEB", Const, 0},
    -		{"ETH_P_TIPC", Const, 0},
    -		{"ETH_P_TRAILER", Const, 0},
    -		{"ETH_P_TR_802_2", Const, 0},
    -		{"ETH_P_WAN_PPP", Const, 0},
    -		{"ETH_P_WCCP", Const, 0},
    -		{"ETH_P_X25", Const, 0},
    -		{"ETIME", Const, 0},
    -		{"ETIMEDOUT", Const, 0},
    -		{"ETOOMANYREFS", Const, 0},
    -		{"ETXTBSY", Const, 0},
    -		{"EUCLEAN", Const, 0},
    -		{"EUNATCH", Const, 0},
    -		{"EUSERS", Const, 0},
    -		{"EVFILT_AIO", Const, 0},
    -		{"EVFILT_FS", Const, 0},
    -		{"EVFILT_LIO", Const, 0},
    -		{"EVFILT_MACHPORT", Const, 0},
    -		{"EVFILT_PROC", Const, 0},
    -		{"EVFILT_READ", Const, 0},
    -		{"EVFILT_SIGNAL", Const, 0},
    -		{"EVFILT_SYSCOUNT", Const, 0},
    -		{"EVFILT_THREADMARKER", Const, 0},
    -		{"EVFILT_TIMER", Const, 0},
    -		{"EVFILT_USER", Const, 0},
    -		{"EVFILT_VM", Const, 0},
    -		{"EVFILT_VNODE", Const, 0},
    -		{"EVFILT_WRITE", Const, 0},
    -		{"EV_ADD", Const, 0},
    -		{"EV_CLEAR", Const, 0},
    -		{"EV_DELETE", Const, 0},
    -		{"EV_DISABLE", Const, 0},
    -		{"EV_DISPATCH", Const, 0},
    -		{"EV_DROP", Const, 3},
    -		{"EV_ENABLE", Const, 0},
    -		{"EV_EOF", Const, 0},
    -		{"EV_ERROR", Const, 0},
    -		{"EV_FLAG0", Const, 0},
    -		{"EV_FLAG1", Const, 0},
    -		{"EV_ONESHOT", Const, 0},
    -		{"EV_OOBAND", Const, 0},
    -		{"EV_POLL", Const, 0},
    -		{"EV_RECEIPT", Const, 0},
    -		{"EV_SYSFLAGS", Const, 0},
    -		{"EWINDOWS", Const, 0},
    -		{"EWOULDBLOCK", Const, 0},
    -		{"EXDEV", Const, 0},
    -		{"EXFULL", Const, 0},
    -		{"EXTA", Const, 0},
    -		{"EXTB", Const, 0},
    -		{"EXTPROC", Const, 0},
    -		{"Environ", Func, 0},
    -		{"EpollCreate", Func, 0},
    -		{"EpollCreate1", Func, 0},
    -		{"EpollCtl", Func, 0},
    -		{"EpollEvent", Type, 0},
    -		{"EpollEvent.Events", Field, 0},
    -		{"EpollEvent.Fd", Field, 0},
    -		{"EpollEvent.Pad", Field, 0},
    -		{"EpollEvent.PadFd", Field, 0},
    -		{"EpollWait", Func, 0},
    -		{"Errno", Type, 0},
    -		{"EscapeArg", Func, 0},
    -		{"Exchangedata", Func, 0},
    -		{"Exec", Func, 0},
    -		{"Exit", Func, 0},
    -		{"ExitProcess", Func, 0},
    -		{"FD_CLOEXEC", Const, 0},
    -		{"FD_SETSIZE", Const, 0},
    -		{"FILE_ACTION_ADDED", Const, 0},
    -		{"FILE_ACTION_MODIFIED", Const, 0},
    -		{"FILE_ACTION_REMOVED", Const, 0},
    -		{"FILE_ACTION_RENAMED_NEW_NAME", Const, 0},
    -		{"FILE_ACTION_RENAMED_OLD_NAME", Const, 0},
    -		{"FILE_APPEND_DATA", Const, 0},
    -		{"FILE_ATTRIBUTE_ARCHIVE", Const, 0},
    -		{"FILE_ATTRIBUTE_DIRECTORY", Const, 0},
    -		{"FILE_ATTRIBUTE_HIDDEN", Const, 0},
    -		{"FILE_ATTRIBUTE_NORMAL", Const, 0},
    -		{"FILE_ATTRIBUTE_READONLY", Const, 0},
    -		{"FILE_ATTRIBUTE_REPARSE_POINT", Const, 4},
    -		{"FILE_ATTRIBUTE_SYSTEM", Const, 0},
    -		{"FILE_BEGIN", Const, 0},
    -		{"FILE_CURRENT", Const, 0},
    -		{"FILE_END", Const, 0},
    -		{"FILE_FLAG_BACKUP_SEMANTICS", Const, 0},
    -		{"FILE_FLAG_OPEN_REPARSE_POINT", Const, 4},
    -		{"FILE_FLAG_OVERLAPPED", Const, 0},
    -		{"FILE_LIST_DIRECTORY", Const, 0},
    -		{"FILE_MAP_COPY", Const, 0},
    -		{"FILE_MAP_EXECUTE", Const, 0},
    -		{"FILE_MAP_READ", Const, 0},
    -		{"FILE_MAP_WRITE", Const, 0},
    -		{"FILE_NOTIFY_CHANGE_ATTRIBUTES", Const, 0},
    -		{"FILE_NOTIFY_CHANGE_CREATION", Const, 0},
    -		{"FILE_NOTIFY_CHANGE_DIR_NAME", Const, 0},
    -		{"FILE_NOTIFY_CHANGE_FILE_NAME", Const, 0},
    -		{"FILE_NOTIFY_CHANGE_LAST_ACCESS", Const, 0},
    -		{"FILE_NOTIFY_CHANGE_LAST_WRITE", Const, 0},
    -		{"FILE_NOTIFY_CHANGE_SIZE", Const, 0},
    -		{"FILE_SHARE_DELETE", Const, 0},
    -		{"FILE_SHARE_READ", Const, 0},
    -		{"FILE_SHARE_WRITE", Const, 0},
    -		{"FILE_SKIP_COMPLETION_PORT_ON_SUCCESS", Const, 2},
    -		{"FILE_SKIP_SET_EVENT_ON_HANDLE", Const, 2},
    -		{"FILE_TYPE_CHAR", Const, 0},
    -		{"FILE_TYPE_DISK", Const, 0},
    -		{"FILE_TYPE_PIPE", Const, 0},
    -		{"FILE_TYPE_REMOTE", Const, 0},
    -		{"FILE_TYPE_UNKNOWN", Const, 0},
    -		{"FILE_WRITE_ATTRIBUTES", Const, 0},
    -		{"FLUSHO", Const, 0},
    -		{"FORMAT_MESSAGE_ALLOCATE_BUFFER", Const, 0},
    -		{"FORMAT_MESSAGE_ARGUMENT_ARRAY", Const, 0},
    -		{"FORMAT_MESSAGE_FROM_HMODULE", Const, 0},
    -		{"FORMAT_MESSAGE_FROM_STRING", Const, 0},
    -		{"FORMAT_MESSAGE_FROM_SYSTEM", Const, 0},
    -		{"FORMAT_MESSAGE_IGNORE_INSERTS", Const, 0},
    -		{"FORMAT_MESSAGE_MAX_WIDTH_MASK", Const, 0},
    -		{"FSCTL_GET_REPARSE_POINT", Const, 4},
    -		{"F_ADDFILESIGS", Const, 0},
    -		{"F_ADDSIGS", Const, 0},
    -		{"F_ALLOCATEALL", Const, 0},
    -		{"F_ALLOCATECONTIG", Const, 0},
    -		{"F_CANCEL", Const, 0},
    -		{"F_CHKCLEAN", Const, 0},
    -		{"F_CLOSEM", Const, 1},
    -		{"F_DUP2FD", Const, 0},
    -		{"F_DUP2FD_CLOEXEC", Const, 1},
    -		{"F_DUPFD", Const, 0},
    -		{"F_DUPFD_CLOEXEC", Const, 0},
    -		{"F_EXLCK", Const, 0},
    -		{"F_FINDSIGS", Const, 16},
    -		{"F_FLUSH_DATA", Const, 0},
    -		{"F_FREEZE_FS", Const, 0},
    -		{"F_FSCTL", Const, 1},
    -		{"F_FSDIRMASK", Const, 1},
    -		{"F_FSIN", Const, 1},
    -		{"F_FSINOUT", Const, 1},
    -		{"F_FSOUT", Const, 1},
    -		{"F_FSPRIV", Const, 1},
    -		{"F_FSVOID", Const, 1},
    -		{"F_FULLFSYNC", Const, 0},
    -		{"F_GETCODEDIR", Const, 16},
    -		{"F_GETFD", Const, 0},
    -		{"F_GETFL", Const, 0},
    -		{"F_GETLEASE", Const, 0},
    -		{"F_GETLK", Const, 0},
    -		{"F_GETLK64", Const, 0},
    -		{"F_GETLKPID", Const, 0},
    -		{"F_GETNOSIGPIPE", Const, 0},
    -		{"F_GETOWN", Const, 0},
    -		{"F_GETOWN_EX", Const, 0},
    -		{"F_GETPATH", Const, 0},
    -		{"F_GETPATH_MTMINFO", Const, 0},
    -		{"F_GETPIPE_SZ", Const, 0},
    -		{"F_GETPROTECTIONCLASS", Const, 0},
    -		{"F_GETPROTECTIONLEVEL", Const, 16},
    -		{"F_GETSIG", Const, 0},
    -		{"F_GLOBAL_NOCACHE", Const, 0},
    -		{"F_LOCK", Const, 0},
    -		{"F_LOG2PHYS", Const, 0},
    -		{"F_LOG2PHYS_EXT", Const, 0},
    -		{"F_MARKDEPENDENCY", Const, 0},
    -		{"F_MAXFD", Const, 1},
    -		{"F_NOCACHE", Const, 0},
    -		{"F_NODIRECT", Const, 0},
    -		{"F_NOTIFY", Const, 0},
    -		{"F_OGETLK", Const, 0},
    -		{"F_OK", Const, 0},
    -		{"F_OSETLK", Const, 0},
    -		{"F_OSETLKW", Const, 0},
    -		{"F_PARAM_MASK", Const, 1},
    -		{"F_PARAM_MAX", Const, 1},
    -		{"F_PATHPKG_CHECK", Const, 0},
    -		{"F_PEOFPOSMODE", Const, 0},
    -		{"F_PREALLOCATE", Const, 0},
    -		{"F_RDADVISE", Const, 0},
    -		{"F_RDAHEAD", Const, 0},
    -		{"F_RDLCK", Const, 0},
    -		{"F_READAHEAD", Const, 0},
    -		{"F_READBOOTSTRAP", Const, 0},
    -		{"F_SETBACKINGSTORE", Const, 0},
    -		{"F_SETFD", Const, 0},
    -		{"F_SETFL", Const, 0},
    -		{"F_SETLEASE", Const, 0},
    -		{"F_SETLK", Const, 0},
    -		{"F_SETLK64", Const, 0},
    -		{"F_SETLKW", Const, 0},
    -		{"F_SETLKW64", Const, 0},
    -		{"F_SETLKWTIMEOUT", Const, 16},
    -		{"F_SETLK_REMOTE", Const, 0},
    -		{"F_SETNOSIGPIPE", Const, 0},
    -		{"F_SETOWN", Const, 0},
    -		{"F_SETOWN_EX", Const, 0},
    -		{"F_SETPIPE_SZ", Const, 0},
    -		{"F_SETPROTECTIONCLASS", Const, 0},
    -		{"F_SETSIG", Const, 0},
    -		{"F_SETSIZE", Const, 0},
    -		{"F_SHLCK", Const, 0},
    -		{"F_SINGLE_WRITER", Const, 16},
    -		{"F_TEST", Const, 0},
    -		{"F_THAW_FS", Const, 0},
    -		{"F_TLOCK", Const, 0},
    -		{"F_TRANSCODEKEY", Const, 16},
    -		{"F_ULOCK", Const, 0},
    -		{"F_UNLCK", Const, 0},
    -		{"F_UNLCKSYS", Const, 0},
    -		{"F_VOLPOSMODE", Const, 0},
    -		{"F_WRITEBOOTSTRAP", Const, 0},
    -		{"F_WRLCK", Const, 0},
    -		{"Faccessat", Func, 0},
    -		{"Fallocate", Func, 0},
    -		{"Fbootstraptransfer_t", Type, 0},
    -		{"Fbootstraptransfer_t.Buffer", Field, 0},
    -		{"Fbootstraptransfer_t.Length", Field, 0},
    -		{"Fbootstraptransfer_t.Offset", Field, 0},
    -		{"Fchdir", Func, 0},
    -		{"Fchflags", Func, 0},
    -		{"Fchmod", Func, 0},
    -		{"Fchmodat", Func, 0},
    -		{"Fchown", Func, 0},
    -		{"Fchownat", Func, 0},
    -		{"FcntlFlock", Func, 3},
    -		{"FdSet", Type, 0},
    -		{"FdSet.Bits", Field, 0},
    -		{"FdSet.X__fds_bits", Field, 0},
    -		{"Fdatasync", Func, 0},
    -		{"FileNotifyInformation", Type, 0},
    -		{"FileNotifyInformation.Action", Field, 0},
    -		{"FileNotifyInformation.FileName", Field, 0},
    -		{"FileNotifyInformation.FileNameLength", Field, 0},
    -		{"FileNotifyInformation.NextEntryOffset", Field, 0},
    -		{"Filetime", Type, 0},
    -		{"Filetime.HighDateTime", Field, 0},
    -		{"Filetime.LowDateTime", Field, 0},
    -		{"FindClose", Func, 0},
    -		{"FindFirstFile", Func, 0},
    -		{"FindNextFile", Func, 0},
    -		{"Flock", Func, 0},
    -		{"Flock_t", Type, 0},
    -		{"Flock_t.Len", Field, 0},
    -		{"Flock_t.Pad_cgo_0", Field, 0},
    -		{"Flock_t.Pad_cgo_1", Field, 3},
    -		{"Flock_t.Pid", Field, 0},
    -		{"Flock_t.Start", Field, 0},
    -		{"Flock_t.Sysid", Field, 0},
    -		{"Flock_t.Type", Field, 0},
    -		{"Flock_t.Whence", Field, 0},
    -		{"FlushBpf", Func, 0},
    -		{"FlushFileBuffers", Func, 0},
    -		{"FlushViewOfFile", Func, 0},
    -		{"ForkExec", Func, 0},
    -		{"ForkLock", Var, 0},
    -		{"FormatMessage", Func, 0},
    -		{"Fpathconf", Func, 0},
    -		{"FreeAddrInfoW", Func, 1},
    -		{"FreeEnvironmentStrings", Func, 0},
    -		{"FreeLibrary", Func, 0},
    -		{"Fsid", Type, 0},
    -		{"Fsid.Val", Field, 0},
    -		{"Fsid.X__fsid_val", Field, 2},
    -		{"Fsid.X__val", Field, 0},
    -		{"Fstat", Func, 0},
    -		{"Fstatat", Func, 12},
    -		{"Fstatfs", Func, 0},
    -		{"Fstore_t", Type, 0},
    -		{"Fstore_t.Bytesalloc", Field, 0},
    -		{"Fstore_t.Flags", Field, 0},
    -		{"Fstore_t.Length", Field, 0},
    -		{"Fstore_t.Offset", Field, 0},
    -		{"Fstore_t.Posmode", Field, 0},
    -		{"Fsync", Func, 0},
    -		{"Ftruncate", Func, 0},
    -		{"FullPath", Func, 4},
    -		{"Futimes", Func, 0},
    -		{"Futimesat", Func, 0},
    -		{"GENERIC_ALL", Const, 0},
    -		{"GENERIC_EXECUTE", Const, 0},
    -		{"GENERIC_READ", Const, 0},
    -		{"GENERIC_WRITE", Const, 0},
    -		{"GUID", Type, 1},
    -		{"GUID.Data1", Field, 1},
    -		{"GUID.Data2", Field, 1},
    -		{"GUID.Data3", Field, 1},
    -		{"GUID.Data4", Field, 1},
    -		{"GetAcceptExSockaddrs", Func, 0},
    -		{"GetAdaptersInfo", Func, 0},
    -		{"GetAddrInfoW", Func, 1},
    -		{"GetCommandLine", Func, 0},
    -		{"GetComputerName", Func, 0},
    -		{"GetConsoleMode", Func, 1},
    -		{"GetCurrentDirectory", Func, 0},
    -		{"GetCurrentProcess", Func, 0},
    -		{"GetEnvironmentStrings", Func, 0},
    -		{"GetEnvironmentVariable", Func, 0},
    -		{"GetExitCodeProcess", Func, 0},
    -		{"GetFileAttributes", Func, 0},
    -		{"GetFileAttributesEx", Func, 0},
    -		{"GetFileExInfoStandard", Const, 0},
    -		{"GetFileExMaxInfoLevel", Const, 0},
    -		{"GetFileInformationByHandle", Func, 0},
    -		{"GetFileType", Func, 0},
    -		{"GetFullPathName", Func, 0},
    -		{"GetHostByName", Func, 0},
    -		{"GetIfEntry", Func, 0},
    -		{"GetLastError", Func, 0},
    -		{"GetLengthSid", Func, 0},
    -		{"GetLongPathName", Func, 0},
    -		{"GetProcAddress", Func, 0},
    -		{"GetProcessTimes", Func, 0},
    -		{"GetProtoByName", Func, 0},
    -		{"GetQueuedCompletionStatus", Func, 0},
    -		{"GetServByName", Func, 0},
    -		{"GetShortPathName", Func, 0},
    -		{"GetStartupInfo", Func, 0},
    -		{"GetStdHandle", Func, 0},
    -		{"GetSystemTimeAsFileTime", Func, 0},
    -		{"GetTempPath", Func, 0},
    -		{"GetTimeZoneInformation", Func, 0},
    -		{"GetTokenInformation", Func, 0},
    -		{"GetUserNameEx", Func, 0},
    -		{"GetUserProfileDirectory", Func, 0},
    -		{"GetVersion", Func, 0},
    -		{"Getcwd", Func, 0},
    -		{"Getdents", Func, 0},
    -		{"Getdirentries", Func, 0},
    -		{"Getdtablesize", Func, 0},
    -		{"Getegid", Func, 0},
    -		{"Getenv", Func, 0},
    -		{"Geteuid", Func, 0},
    -		{"Getfsstat", Func, 0},
    -		{"Getgid", Func, 0},
    -		{"Getgroups", Func, 0},
    -		{"Getpagesize", Func, 0},
    -		{"Getpeername", Func, 0},
    -		{"Getpgid", Func, 0},
    -		{"Getpgrp", Func, 0},
    -		{"Getpid", Func, 0},
    -		{"Getppid", Func, 0},
    -		{"Getpriority", Func, 0},
    -		{"Getrlimit", Func, 0},
    -		{"Getrusage", Func, 0},
    -		{"Getsid", Func, 0},
    -		{"Getsockname", Func, 0},
    -		{"Getsockopt", Func, 1},
    -		{"GetsockoptByte", Func, 0},
    -		{"GetsockoptICMPv6Filter", Func, 2},
    -		{"GetsockoptIPMreq", Func, 0},
    -		{"GetsockoptIPMreqn", Func, 0},
    -		{"GetsockoptIPv6MTUInfo", Func, 2},
    -		{"GetsockoptIPv6Mreq", Func, 0},
    -		{"GetsockoptInet4Addr", Func, 0},
    -		{"GetsockoptInt", Func, 0},
    -		{"GetsockoptUcred", Func, 1},
    -		{"Gettid", Func, 0},
    -		{"Gettimeofday", Func, 0},
    -		{"Getuid", Func, 0},
    -		{"Getwd", Func, 0},
    -		{"Getxattr", Func, 1},
    -		{"HANDLE_FLAG_INHERIT", Const, 0},
    -		{"HKEY_CLASSES_ROOT", Const, 0},
    -		{"HKEY_CURRENT_CONFIG", Const, 0},
    -		{"HKEY_CURRENT_USER", Const, 0},
    -		{"HKEY_DYN_DATA", Const, 0},
    -		{"HKEY_LOCAL_MACHINE", Const, 0},
    -		{"HKEY_PERFORMANCE_DATA", Const, 0},
    -		{"HKEY_USERS", Const, 0},
    -		{"HUPCL", Const, 0},
    -		{"Handle", Type, 0},
    -		{"Hostent", Type, 0},
    -		{"Hostent.AddrList", Field, 0},
    -		{"Hostent.AddrType", Field, 0},
    -		{"Hostent.Aliases", Field, 0},
    -		{"Hostent.Length", Field, 0},
    -		{"Hostent.Name", Field, 0},
    -		{"ICANON", Const, 0},
    -		{"ICMP6_FILTER", Const, 2},
    -		{"ICMPV6_FILTER", Const, 2},
    -		{"ICMPv6Filter", Type, 2},
    -		{"ICMPv6Filter.Data", Field, 2},
    -		{"ICMPv6Filter.Filt", Field, 2},
    -		{"ICRNL", Const, 0},
    -		{"IEXTEN", Const, 0},
    -		{"IFAN_ARRIVAL", Const, 1},
    -		{"IFAN_DEPARTURE", Const, 1},
    -		{"IFA_ADDRESS", Const, 0},
    -		{"IFA_ANYCAST", Const, 0},
    -		{"IFA_BROADCAST", Const, 0},
    -		{"IFA_CACHEINFO", Const, 0},
    -		{"IFA_F_DADFAILED", Const, 0},
    -		{"IFA_F_DEPRECATED", Const, 0},
    -		{"IFA_F_HOMEADDRESS", Const, 0},
    -		{"IFA_F_NODAD", Const, 0},
    -		{"IFA_F_OPTIMISTIC", Const, 0},
    -		{"IFA_F_PERMANENT", Const, 0},
    -		{"IFA_F_SECONDARY", Const, 0},
    -		{"IFA_F_TEMPORARY", Const, 0},
    -		{"IFA_F_TENTATIVE", Const, 0},
    -		{"IFA_LABEL", Const, 0},
    -		{"IFA_LOCAL", Const, 0},
    -		{"IFA_MAX", Const, 0},
    -		{"IFA_MULTICAST", Const, 0},
    -		{"IFA_ROUTE", Const, 1},
    -		{"IFA_UNSPEC", Const, 0},
    -		{"IFF_ALLMULTI", Const, 0},
    -		{"IFF_ALTPHYS", Const, 0},
    -		{"IFF_AUTOMEDIA", Const, 0},
    -		{"IFF_BROADCAST", Const, 0},
    -		{"IFF_CANTCHANGE", Const, 0},
    -		{"IFF_CANTCONFIG", Const, 1},
    -		{"IFF_DEBUG", Const, 0},
    -		{"IFF_DRV_OACTIVE", Const, 0},
    -		{"IFF_DRV_RUNNING", Const, 0},
    -		{"IFF_DYING", Const, 0},
    -		{"IFF_DYNAMIC", Const, 0},
    -		{"IFF_LINK0", Const, 0},
    -		{"IFF_LINK1", Const, 0},
    -		{"IFF_LINK2", Const, 0},
    -		{"IFF_LOOPBACK", Const, 0},
    -		{"IFF_MASTER", Const, 0},
    -		{"IFF_MONITOR", Const, 0},
    -		{"IFF_MULTICAST", Const, 0},
    -		{"IFF_NOARP", Const, 0},
    -		{"IFF_NOTRAILERS", Const, 0},
    -		{"IFF_NO_PI", Const, 0},
    -		{"IFF_OACTIVE", Const, 0},
    -		{"IFF_ONE_QUEUE", Const, 0},
    -		{"IFF_POINTOPOINT", Const, 0},
    -		{"IFF_POINTTOPOINT", Const, 0},
    -		{"IFF_PORTSEL", Const, 0},
    -		{"IFF_PPROMISC", Const, 0},
    -		{"IFF_PROMISC", Const, 0},
    -		{"IFF_RENAMING", Const, 0},
    -		{"IFF_RUNNING", Const, 0},
    -		{"IFF_SIMPLEX", Const, 0},
    -		{"IFF_SLAVE", Const, 0},
    -		{"IFF_SMART", Const, 0},
    -		{"IFF_STATICARP", Const, 0},
    -		{"IFF_TAP", Const, 0},
    -		{"IFF_TUN", Const, 0},
    -		{"IFF_TUN_EXCL", Const, 0},
    -		{"IFF_UP", Const, 0},
    -		{"IFF_VNET_HDR", Const, 0},
    -		{"IFLA_ADDRESS", Const, 0},
    -		{"IFLA_BROADCAST", Const, 0},
    -		{"IFLA_COST", Const, 0},
    -		{"IFLA_IFALIAS", Const, 0},
    -		{"IFLA_IFNAME", Const, 0},
    -		{"IFLA_LINK", Const, 0},
    -		{"IFLA_LINKINFO", Const, 0},
    -		{"IFLA_LINKMODE", Const, 0},
    -		{"IFLA_MAP", Const, 0},
    -		{"IFLA_MASTER", Const, 0},
    -		{"IFLA_MAX", Const, 0},
    -		{"IFLA_MTU", Const, 0},
    -		{"IFLA_NET_NS_PID", Const, 0},
    -		{"IFLA_OPERSTATE", Const, 0},
    -		{"IFLA_PRIORITY", Const, 0},
    -		{"IFLA_PROTINFO", Const, 0},
    -		{"IFLA_QDISC", Const, 0},
    -		{"IFLA_STATS", Const, 0},
    -		{"IFLA_TXQLEN", Const, 0},
    -		{"IFLA_UNSPEC", Const, 0},
    -		{"IFLA_WEIGHT", Const, 0},
    -		{"IFLA_WIRELESS", Const, 0},
    -		{"IFNAMSIZ", Const, 0},
    -		{"IFT_1822", Const, 0},
    -		{"IFT_A12MPPSWITCH", Const, 0},
    -		{"IFT_AAL2", Const, 0},
    -		{"IFT_AAL5", Const, 0},
    -		{"IFT_ADSL", Const, 0},
    -		{"IFT_AFLANE8023", Const, 0},
    -		{"IFT_AFLANE8025", Const, 0},
    -		{"IFT_ARAP", Const, 0},
    -		{"IFT_ARCNET", Const, 0},
    -		{"IFT_ARCNETPLUS", Const, 0},
    -		{"IFT_ASYNC", Const, 0},
    -		{"IFT_ATM", Const, 0},
    -		{"IFT_ATMDXI", Const, 0},
    -		{"IFT_ATMFUNI", Const, 0},
    -		{"IFT_ATMIMA", Const, 0},
    -		{"IFT_ATMLOGICAL", Const, 0},
    -		{"IFT_ATMRADIO", Const, 0},
    -		{"IFT_ATMSUBINTERFACE", Const, 0},
    -		{"IFT_ATMVCIENDPT", Const, 0},
    -		{"IFT_ATMVIRTUAL", Const, 0},
    -		{"IFT_BGPPOLICYACCOUNTING", Const, 0},
    -		{"IFT_BLUETOOTH", Const, 1},
    -		{"IFT_BRIDGE", Const, 0},
    -		{"IFT_BSC", Const, 0},
    -		{"IFT_CARP", Const, 0},
    -		{"IFT_CCTEMUL", Const, 0},
    -		{"IFT_CELLULAR", Const, 0},
    -		{"IFT_CEPT", Const, 0},
    -		{"IFT_CES", Const, 0},
    -		{"IFT_CHANNEL", Const, 0},
    -		{"IFT_CNR", Const, 0},
    -		{"IFT_COFFEE", Const, 0},
    -		{"IFT_COMPOSITELINK", Const, 0},
    -		{"IFT_DCN", Const, 0},
    -		{"IFT_DIGITALPOWERLINE", Const, 0},
    -		{"IFT_DIGITALWRAPPEROVERHEADCHANNEL", Const, 0},
    -		{"IFT_DLSW", Const, 0},
    -		{"IFT_DOCSCABLEDOWNSTREAM", Const, 0},
    -		{"IFT_DOCSCABLEMACLAYER", Const, 0},
    -		{"IFT_DOCSCABLEUPSTREAM", Const, 0},
    -		{"IFT_DOCSCABLEUPSTREAMCHANNEL", Const, 1},
    -		{"IFT_DS0", Const, 0},
    -		{"IFT_DS0BUNDLE", Const, 0},
    -		{"IFT_DS1FDL", Const, 0},
    -		{"IFT_DS3", Const, 0},
    -		{"IFT_DTM", Const, 0},
    -		{"IFT_DUMMY", Const, 1},
    -		{"IFT_DVBASILN", Const, 0},
    -		{"IFT_DVBASIOUT", Const, 0},
    -		{"IFT_DVBRCCDOWNSTREAM", Const, 0},
    -		{"IFT_DVBRCCMACLAYER", Const, 0},
    -		{"IFT_DVBRCCUPSTREAM", Const, 0},
    -		{"IFT_ECONET", Const, 1},
    -		{"IFT_ENC", Const, 0},
    -		{"IFT_EON", Const, 0},
    -		{"IFT_EPLRS", Const, 0},
    -		{"IFT_ESCON", Const, 0},
    -		{"IFT_ETHER", Const, 0},
    -		{"IFT_FAITH", Const, 0},
    -		{"IFT_FAST", Const, 0},
    -		{"IFT_FASTETHER", Const, 0},
    -		{"IFT_FASTETHERFX", Const, 0},
    -		{"IFT_FDDI", Const, 0},
    -		{"IFT_FIBRECHANNEL", Const, 0},
    -		{"IFT_FRAMERELAYINTERCONNECT", Const, 0},
    -		{"IFT_FRAMERELAYMPI", Const, 0},
    -		{"IFT_FRDLCIENDPT", Const, 0},
    -		{"IFT_FRELAY", Const, 0},
    -		{"IFT_FRELAYDCE", Const, 0},
    -		{"IFT_FRF16MFRBUNDLE", Const, 0},
    -		{"IFT_FRFORWARD", Const, 0},
    -		{"IFT_G703AT2MB", Const, 0},
    -		{"IFT_G703AT64K", Const, 0},
    -		{"IFT_GIF", Const, 0},
    -		{"IFT_GIGABITETHERNET", Const, 0},
    -		{"IFT_GR303IDT", Const, 0},
    -		{"IFT_GR303RDT", Const, 0},
    -		{"IFT_H323GATEKEEPER", Const, 0},
    -		{"IFT_H323PROXY", Const, 0},
    -		{"IFT_HDH1822", Const, 0},
    -		{"IFT_HDLC", Const, 0},
    -		{"IFT_HDSL2", Const, 0},
    -		{"IFT_HIPERLAN2", Const, 0},
    -		{"IFT_HIPPI", Const, 0},
    -		{"IFT_HIPPIINTERFACE", Const, 0},
    -		{"IFT_HOSTPAD", Const, 0},
    -		{"IFT_HSSI", Const, 0},
    -		{"IFT_HY", Const, 0},
    -		{"IFT_IBM370PARCHAN", Const, 0},
    -		{"IFT_IDSL", Const, 0},
    -		{"IFT_IEEE1394", Const, 0},
    -		{"IFT_IEEE80211", Const, 0},
    -		{"IFT_IEEE80212", Const, 0},
    -		{"IFT_IEEE8023ADLAG", Const, 0},
    -		{"IFT_IFGSN", Const, 0},
    -		{"IFT_IMT", Const, 0},
    -		{"IFT_INFINIBAND", Const, 1},
    -		{"IFT_INTERLEAVE", Const, 0},
    -		{"IFT_IP", Const, 0},
    -		{"IFT_IPFORWARD", Const, 0},
    -		{"IFT_IPOVERATM", Const, 0},
    -		{"IFT_IPOVERCDLC", Const, 0},
    -		{"IFT_IPOVERCLAW", Const, 0},
    -		{"IFT_IPSWITCH", Const, 0},
    -		{"IFT_IPXIP", Const, 0},
    -		{"IFT_ISDN", Const, 0},
    -		{"IFT_ISDNBASIC", Const, 0},
    -		{"IFT_ISDNPRIMARY", Const, 0},
    -		{"IFT_ISDNS", Const, 0},
    -		{"IFT_ISDNU", Const, 0},
    -		{"IFT_ISO88022LLC", Const, 0},
    -		{"IFT_ISO88023", Const, 0},
    -		{"IFT_ISO88024", Const, 0},
    -		{"IFT_ISO88025", Const, 0},
    -		{"IFT_ISO88025CRFPINT", Const, 0},
    -		{"IFT_ISO88025DTR", Const, 0},
    -		{"IFT_ISO88025FIBER", Const, 0},
    -		{"IFT_ISO88026", Const, 0},
    -		{"IFT_ISUP", Const, 0},
    -		{"IFT_L2VLAN", Const, 0},
    -		{"IFT_L3IPVLAN", Const, 0},
    -		{"IFT_L3IPXVLAN", Const, 0},
    -		{"IFT_LAPB", Const, 0},
    -		{"IFT_LAPD", Const, 0},
    -		{"IFT_LAPF", Const, 0},
    -		{"IFT_LINEGROUP", Const, 1},
    -		{"IFT_LOCALTALK", Const, 0},
    -		{"IFT_LOOP", Const, 0},
    -		{"IFT_MEDIAMAILOVERIP", Const, 0},
    -		{"IFT_MFSIGLINK", Const, 0},
    -		{"IFT_MIOX25", Const, 0},
    -		{"IFT_MODEM", Const, 0},
    -		{"IFT_MPC", Const, 0},
    -		{"IFT_MPLS", Const, 0},
    -		{"IFT_MPLSTUNNEL", Const, 0},
    -		{"IFT_MSDSL", Const, 0},
    -		{"IFT_MVL", Const, 0},
    -		{"IFT_MYRINET", Const, 0},
    -		{"IFT_NFAS", Const, 0},
    -		{"IFT_NSIP", Const, 0},
    -		{"IFT_OPTICALCHANNEL", Const, 0},
    -		{"IFT_OPTICALTRANSPORT", Const, 0},
    -		{"IFT_OTHER", Const, 0},
    -		{"IFT_P10", Const, 0},
    -		{"IFT_P80", Const, 0},
    -		{"IFT_PARA", Const, 0},
    -		{"IFT_PDP", Const, 0},
    -		{"IFT_PFLOG", Const, 0},
    -		{"IFT_PFLOW", Const, 1},
    -		{"IFT_PFSYNC", Const, 0},
    -		{"IFT_PLC", Const, 0},
    -		{"IFT_PON155", Const, 1},
    -		{"IFT_PON622", Const, 1},
    -		{"IFT_POS", Const, 0},
    -		{"IFT_PPP", Const, 0},
    -		{"IFT_PPPMULTILINKBUNDLE", Const, 0},
    -		{"IFT_PROPATM", Const, 1},
    -		{"IFT_PROPBWAP2MP", Const, 0},
    -		{"IFT_PROPCNLS", Const, 0},
    -		{"IFT_PROPDOCSWIRELESSDOWNSTREAM", Const, 0},
    -		{"IFT_PROPDOCSWIRELESSMACLAYER", Const, 0},
    -		{"IFT_PROPDOCSWIRELESSUPSTREAM", Const, 0},
    -		{"IFT_PROPMUX", Const, 0},
    -		{"IFT_PROPVIRTUAL", Const, 0},
    -		{"IFT_PROPWIRELESSP2P", Const, 0},
    -		{"IFT_PTPSERIAL", Const, 0},
    -		{"IFT_PVC", Const, 0},
    -		{"IFT_Q2931", Const, 1},
    -		{"IFT_QLLC", Const, 0},
    -		{"IFT_RADIOMAC", Const, 0},
    -		{"IFT_RADSL", Const, 0},
    -		{"IFT_REACHDSL", Const, 0},
    -		{"IFT_RFC1483", Const, 0},
    -		{"IFT_RS232", Const, 0},
    -		{"IFT_RSRB", Const, 0},
    -		{"IFT_SDLC", Const, 0},
    -		{"IFT_SDSL", Const, 0},
    -		{"IFT_SHDSL", Const, 0},
    -		{"IFT_SIP", Const, 0},
    -		{"IFT_SIPSIG", Const, 1},
    -		{"IFT_SIPTG", Const, 1},
    -		{"IFT_SLIP", Const, 0},
    -		{"IFT_SMDSDXI", Const, 0},
    -		{"IFT_SMDSICIP", Const, 0},
    -		{"IFT_SONET", Const, 0},
    -		{"IFT_SONETOVERHEADCHANNEL", Const, 0},
    -		{"IFT_SONETPATH", Const, 0},
    -		{"IFT_SONETVT", Const, 0},
    -		{"IFT_SRP", Const, 0},
    -		{"IFT_SS7SIGLINK", Const, 0},
    -		{"IFT_STACKTOSTACK", Const, 0},
    -		{"IFT_STARLAN", Const, 0},
    -		{"IFT_STF", Const, 0},
    -		{"IFT_T1", Const, 0},
    -		{"IFT_TDLC", Const, 0},
    -		{"IFT_TELINK", Const, 1},
    -		{"IFT_TERMPAD", Const, 0},
    -		{"IFT_TR008", Const, 0},
    -		{"IFT_TRANSPHDLC", Const, 0},
    -		{"IFT_TUNNEL", Const, 0},
    -		{"IFT_ULTRA", Const, 0},
    -		{"IFT_USB", Const, 0},
    -		{"IFT_V11", Const, 0},
    -		{"IFT_V35", Const, 0},
    -		{"IFT_V36", Const, 0},
    -		{"IFT_V37", Const, 0},
    -		{"IFT_VDSL", Const, 0},
    -		{"IFT_VIRTUALIPADDRESS", Const, 0},
    -		{"IFT_VIRTUALTG", Const, 1},
    -		{"IFT_VOICEDID", Const, 1},
    -		{"IFT_VOICEEM", Const, 0},
    -		{"IFT_VOICEEMFGD", Const, 1},
    -		{"IFT_VOICEENCAP", Const, 0},
    -		{"IFT_VOICEFGDEANA", Const, 1},
    -		{"IFT_VOICEFXO", Const, 0},
    -		{"IFT_VOICEFXS", Const, 0},
    -		{"IFT_VOICEOVERATM", Const, 0},
    -		{"IFT_VOICEOVERCABLE", Const, 1},
    -		{"IFT_VOICEOVERFRAMERELAY", Const, 0},
    -		{"IFT_VOICEOVERIP", Const, 0},
    -		{"IFT_X213", Const, 0},
    -		{"IFT_X25", Const, 0},
    -		{"IFT_X25DDN", Const, 0},
    -		{"IFT_X25HUNTGROUP", Const, 0},
    -		{"IFT_X25MLP", Const, 0},
    -		{"IFT_X25PLE", Const, 0},
    -		{"IFT_XETHER", Const, 0},
    -		{"IGNBRK", Const, 0},
    -		{"IGNCR", Const, 0},
    -		{"IGNORE", Const, 0},
    -		{"IGNPAR", Const, 0},
    -		{"IMAXBEL", Const, 0},
    -		{"INFINITE", Const, 0},
    -		{"INLCR", Const, 0},
    -		{"INPCK", Const, 0},
    -		{"INVALID_FILE_ATTRIBUTES", Const, 0},
    -		{"IN_ACCESS", Const, 0},
    -		{"IN_ALL_EVENTS", Const, 0},
    -		{"IN_ATTRIB", Const, 0},
    -		{"IN_CLASSA_HOST", Const, 0},
    -		{"IN_CLASSA_MAX", Const, 0},
    -		{"IN_CLASSA_NET", Const, 0},
    -		{"IN_CLASSA_NSHIFT", Const, 0},
    -		{"IN_CLASSB_HOST", Const, 0},
    -		{"IN_CLASSB_MAX", Const, 0},
    -		{"IN_CLASSB_NET", Const, 0},
    -		{"IN_CLASSB_NSHIFT", Const, 0},
    -		{"IN_CLASSC_HOST", Const, 0},
    -		{"IN_CLASSC_NET", Const, 0},
    -		{"IN_CLASSC_NSHIFT", Const, 0},
    -		{"IN_CLASSD_HOST", Const, 0},
    -		{"IN_CLASSD_NET", Const, 0},
    -		{"IN_CLASSD_NSHIFT", Const, 0},
    -		{"IN_CLOEXEC", Const, 0},
    -		{"IN_CLOSE", Const, 0},
    -		{"IN_CLOSE_NOWRITE", Const, 0},
    -		{"IN_CLOSE_WRITE", Const, 0},
    -		{"IN_CREATE", Const, 0},
    -		{"IN_DELETE", Const, 0},
    -		{"IN_DELETE_SELF", Const, 0},
    -		{"IN_DONT_FOLLOW", Const, 0},
    -		{"IN_EXCL_UNLINK", Const, 0},
    -		{"IN_IGNORED", Const, 0},
    -		{"IN_ISDIR", Const, 0},
    -		{"IN_LINKLOCALNETNUM", Const, 0},
    -		{"IN_LOOPBACKNET", Const, 0},
    -		{"IN_MASK_ADD", Const, 0},
    -		{"IN_MODIFY", Const, 0},
    -		{"IN_MOVE", Const, 0},
    -		{"IN_MOVED_FROM", Const, 0},
    -		{"IN_MOVED_TO", Const, 0},
    -		{"IN_MOVE_SELF", Const, 0},
    -		{"IN_NONBLOCK", Const, 0},
    -		{"IN_ONESHOT", Const, 0},
    -		{"IN_ONLYDIR", Const, 0},
    -		{"IN_OPEN", Const, 0},
    -		{"IN_Q_OVERFLOW", Const, 0},
    -		{"IN_RFC3021_HOST", Const, 1},
    -		{"IN_RFC3021_MASK", Const, 1},
    -		{"IN_RFC3021_NET", Const, 1},
    -		{"IN_RFC3021_NSHIFT", Const, 1},
    -		{"IN_UNMOUNT", Const, 0},
    -		{"IOC_IN", Const, 1},
    -		{"IOC_INOUT", Const, 1},
    -		{"IOC_OUT", Const, 1},
    -		{"IOC_VENDOR", Const, 3},
    -		{"IOC_WS2", Const, 1},
    -		{"IO_REPARSE_TAG_SYMLINK", Const, 4},
    -		{"IPMreq", Type, 0},
    -		{"IPMreq.Interface", Field, 0},
    -		{"IPMreq.Multiaddr", Field, 0},
    -		{"IPMreqn", Type, 0},
    -		{"IPMreqn.Address", Field, 0},
    -		{"IPMreqn.Ifindex", Field, 0},
    -		{"IPMreqn.Multiaddr", Field, 0},
    -		{"IPPROTO_3PC", Const, 0},
    -		{"IPPROTO_ADFS", Const, 0},
    -		{"IPPROTO_AH", Const, 0},
    -		{"IPPROTO_AHIP", Const, 0},
    -		{"IPPROTO_APES", Const, 0},
    -		{"IPPROTO_ARGUS", Const, 0},
    -		{"IPPROTO_AX25", Const, 0},
    -		{"IPPROTO_BHA", Const, 0},
    -		{"IPPROTO_BLT", Const, 0},
    -		{"IPPROTO_BRSATMON", Const, 0},
    -		{"IPPROTO_CARP", Const, 0},
    -		{"IPPROTO_CFTP", Const, 0},
    -		{"IPPROTO_CHAOS", Const, 0},
    -		{"IPPROTO_CMTP", Const, 0},
    -		{"IPPROTO_COMP", Const, 0},
    -		{"IPPROTO_CPHB", Const, 0},
    -		{"IPPROTO_CPNX", Const, 0},
    -		{"IPPROTO_DCCP", Const, 0},
    -		{"IPPROTO_DDP", Const, 0},
    -		{"IPPROTO_DGP", Const, 0},
    -		{"IPPROTO_DIVERT", Const, 0},
    -		{"IPPROTO_DIVERT_INIT", Const, 3},
    -		{"IPPROTO_DIVERT_RESP", Const, 3},
    -		{"IPPROTO_DONE", Const, 0},
    -		{"IPPROTO_DSTOPTS", Const, 0},
    -		{"IPPROTO_EGP", Const, 0},
    -		{"IPPROTO_EMCON", Const, 0},
    -		{"IPPROTO_ENCAP", Const, 0},
    -		{"IPPROTO_EON", Const, 0},
    -		{"IPPROTO_ESP", Const, 0},
    -		{"IPPROTO_ETHERIP", Const, 0},
    -		{"IPPROTO_FRAGMENT", Const, 0},
    -		{"IPPROTO_GGP", Const, 0},
    -		{"IPPROTO_GMTP", Const, 0},
    -		{"IPPROTO_GRE", Const, 0},
    -		{"IPPROTO_HELLO", Const, 0},
    -		{"IPPROTO_HMP", Const, 0},
    -		{"IPPROTO_HOPOPTS", Const, 0},
    -		{"IPPROTO_ICMP", Const, 0},
    -		{"IPPROTO_ICMPV6", Const, 0},
    -		{"IPPROTO_IDP", Const, 0},
    -		{"IPPROTO_IDPR", Const, 0},
    -		{"IPPROTO_IDRP", Const, 0},
    -		{"IPPROTO_IGMP", Const, 0},
    -		{"IPPROTO_IGP", Const, 0},
    -		{"IPPROTO_IGRP", Const, 0},
    -		{"IPPROTO_IL", Const, 0},
    -		{"IPPROTO_INLSP", Const, 0},
    -		{"IPPROTO_INP", Const, 0},
    -		{"IPPROTO_IP", Const, 0},
    -		{"IPPROTO_IPCOMP", Const, 0},
    -		{"IPPROTO_IPCV", Const, 0},
    -		{"IPPROTO_IPEIP", Const, 0},
    -		{"IPPROTO_IPIP", Const, 0},
    -		{"IPPROTO_IPPC", Const, 0},
    -		{"IPPROTO_IPV4", Const, 0},
    -		{"IPPROTO_IPV6", Const, 0},
    -		{"IPPROTO_IPV6_ICMP", Const, 1},
    -		{"IPPROTO_IRTP", Const, 0},
    -		{"IPPROTO_KRYPTOLAN", Const, 0},
    -		{"IPPROTO_LARP", Const, 0},
    -		{"IPPROTO_LEAF1", Const, 0},
    -		{"IPPROTO_LEAF2", Const, 0},
    -		{"IPPROTO_MAX", Const, 0},
    -		{"IPPROTO_MAXID", Const, 0},
    -		{"IPPROTO_MEAS", Const, 0},
    -		{"IPPROTO_MH", Const, 1},
    -		{"IPPROTO_MHRP", Const, 0},
    -		{"IPPROTO_MICP", Const, 0},
    -		{"IPPROTO_MOBILE", Const, 0},
    -		{"IPPROTO_MPLS", Const, 1},
    -		{"IPPROTO_MTP", Const, 0},
    -		{"IPPROTO_MUX", Const, 0},
    -		{"IPPROTO_ND", Const, 0},
    -		{"IPPROTO_NHRP", Const, 0},
    -		{"IPPROTO_NONE", Const, 0},
    -		{"IPPROTO_NSP", Const, 0},
    -		{"IPPROTO_NVPII", Const, 0},
    -		{"IPPROTO_OLD_DIVERT", Const, 0},
    -		{"IPPROTO_OSPFIGP", Const, 0},
    -		{"IPPROTO_PFSYNC", Const, 0},
    -		{"IPPROTO_PGM", Const, 0},
    -		{"IPPROTO_PIGP", Const, 0},
    -		{"IPPROTO_PIM", Const, 0},
    -		{"IPPROTO_PRM", Const, 0},
    -		{"IPPROTO_PUP", Const, 0},
    -		{"IPPROTO_PVP", Const, 0},
    -		{"IPPROTO_RAW", Const, 0},
    -		{"IPPROTO_RCCMON", Const, 0},
    -		{"IPPROTO_RDP", Const, 0},
    -		{"IPPROTO_ROUTING", Const, 0},
    -		{"IPPROTO_RSVP", Const, 0},
    -		{"IPPROTO_RVD", Const, 0},
    -		{"IPPROTO_SATEXPAK", Const, 0},
    -		{"IPPROTO_SATMON", Const, 0},
    -		{"IPPROTO_SCCSP", Const, 0},
    -		{"IPPROTO_SCTP", Const, 0},
    -		{"IPPROTO_SDRP", Const, 0},
    -		{"IPPROTO_SEND", Const, 1},
    -		{"IPPROTO_SEP", Const, 0},
    -		{"IPPROTO_SKIP", Const, 0},
    -		{"IPPROTO_SPACER", Const, 0},
    -		{"IPPROTO_SRPC", Const, 0},
    -		{"IPPROTO_ST", Const, 0},
    -		{"IPPROTO_SVMTP", Const, 0},
    -		{"IPPROTO_SWIPE", Const, 0},
    -		{"IPPROTO_TCF", Const, 0},
    -		{"IPPROTO_TCP", Const, 0},
    -		{"IPPROTO_TLSP", Const, 0},
    -		{"IPPROTO_TP", Const, 0},
    -		{"IPPROTO_TPXX", Const, 0},
    -		{"IPPROTO_TRUNK1", Const, 0},
    -		{"IPPROTO_TRUNK2", Const, 0},
    -		{"IPPROTO_TTP", Const, 0},
    -		{"IPPROTO_UDP", Const, 0},
    -		{"IPPROTO_UDPLITE", Const, 0},
    -		{"IPPROTO_VINES", Const, 0},
    -		{"IPPROTO_VISA", Const, 0},
    -		{"IPPROTO_VMTP", Const, 0},
    -		{"IPPROTO_VRRP", Const, 1},
    -		{"IPPROTO_WBEXPAK", Const, 0},
    -		{"IPPROTO_WBMON", Const, 0},
    -		{"IPPROTO_WSN", Const, 0},
    -		{"IPPROTO_XNET", Const, 0},
    -		{"IPPROTO_XTP", Const, 0},
    -		{"IPV6_2292DSTOPTS", Const, 0},
    -		{"IPV6_2292HOPLIMIT", Const, 0},
    -		{"IPV6_2292HOPOPTS", Const, 0},
    -		{"IPV6_2292NEXTHOP", Const, 0},
    -		{"IPV6_2292PKTINFO", Const, 0},
    -		{"IPV6_2292PKTOPTIONS", Const, 0},
    -		{"IPV6_2292RTHDR", Const, 0},
    -		{"IPV6_ADDRFORM", Const, 0},
    -		{"IPV6_ADD_MEMBERSHIP", Const, 0},
    -		{"IPV6_AUTHHDR", Const, 0},
    -		{"IPV6_AUTH_LEVEL", Const, 1},
    -		{"IPV6_AUTOFLOWLABEL", Const, 0},
    -		{"IPV6_BINDANY", Const, 0},
    -		{"IPV6_BINDV6ONLY", Const, 0},
    -		{"IPV6_BOUND_IF", Const, 0},
    -		{"IPV6_CHECKSUM", Const, 0},
    -		{"IPV6_DEFAULT_MULTICAST_HOPS", Const, 0},
    -		{"IPV6_DEFAULT_MULTICAST_LOOP", Const, 0},
    -		{"IPV6_DEFHLIM", Const, 0},
    -		{"IPV6_DONTFRAG", Const, 0},
    -		{"IPV6_DROP_MEMBERSHIP", Const, 0},
    -		{"IPV6_DSTOPTS", Const, 0},
    -		{"IPV6_ESP_NETWORK_LEVEL", Const, 1},
    -		{"IPV6_ESP_TRANS_LEVEL", Const, 1},
    -		{"IPV6_FAITH", Const, 0},
    -		{"IPV6_FLOWINFO_MASK", Const, 0},
    -		{"IPV6_FLOWLABEL_MASK", Const, 0},
    -		{"IPV6_FRAGTTL", Const, 0},
    -		{"IPV6_FW_ADD", Const, 0},
    -		{"IPV6_FW_DEL", Const, 0},
    -		{"IPV6_FW_FLUSH", Const, 0},
    -		{"IPV6_FW_GET", Const, 0},
    -		{"IPV6_FW_ZERO", Const, 0},
    -		{"IPV6_HLIMDEC", Const, 0},
    -		{"IPV6_HOPLIMIT", Const, 0},
    -		{"IPV6_HOPOPTS", Const, 0},
    -		{"IPV6_IPCOMP_LEVEL", Const, 1},
    -		{"IPV6_IPSEC_POLICY", Const, 0},
    -		{"IPV6_JOIN_ANYCAST", Const, 0},
    -		{"IPV6_JOIN_GROUP", Const, 0},
    -		{"IPV6_LEAVE_ANYCAST", Const, 0},
    -		{"IPV6_LEAVE_GROUP", Const, 0},
    -		{"IPV6_MAXHLIM", Const, 0},
    -		{"IPV6_MAXOPTHDR", Const, 0},
    -		{"IPV6_MAXPACKET", Const, 0},
    -		{"IPV6_MAX_GROUP_SRC_FILTER", Const, 0},
    -		{"IPV6_MAX_MEMBERSHIPS", Const, 0},
    -		{"IPV6_MAX_SOCK_SRC_FILTER", Const, 0},
    -		{"IPV6_MIN_MEMBERSHIPS", Const, 0},
    -		{"IPV6_MMTU", Const, 0},
    -		{"IPV6_MSFILTER", Const, 0},
    -		{"IPV6_MTU", Const, 0},
    -		{"IPV6_MTU_DISCOVER", Const, 0},
    -		{"IPV6_MULTICAST_HOPS", Const, 0},
    -		{"IPV6_MULTICAST_IF", Const, 0},
    -		{"IPV6_MULTICAST_LOOP", Const, 0},
    -		{"IPV6_NEXTHOP", Const, 0},
    -		{"IPV6_OPTIONS", Const, 1},
    -		{"IPV6_PATHMTU", Const, 0},
    -		{"IPV6_PIPEX", Const, 1},
    -		{"IPV6_PKTINFO", Const, 0},
    -		{"IPV6_PMTUDISC_DO", Const, 0},
    -		{"IPV6_PMTUDISC_DONT", Const, 0},
    -		{"IPV6_PMTUDISC_PROBE", Const, 0},
    -		{"IPV6_PMTUDISC_WANT", Const, 0},
    -		{"IPV6_PORTRANGE", Const, 0},
    -		{"IPV6_PORTRANGE_DEFAULT", Const, 0},
    -		{"IPV6_PORTRANGE_HIGH", Const, 0},
    -		{"IPV6_PORTRANGE_LOW", Const, 0},
    -		{"IPV6_PREFER_TEMPADDR", Const, 0},
    -		{"IPV6_RECVDSTOPTS", Const, 0},
    -		{"IPV6_RECVDSTPORT", Const, 3},
    -		{"IPV6_RECVERR", Const, 0},
    -		{"IPV6_RECVHOPLIMIT", Const, 0},
    -		{"IPV6_RECVHOPOPTS", Const, 0},
    -		{"IPV6_RECVPATHMTU", Const, 0},
    -		{"IPV6_RECVPKTINFO", Const, 0},
    -		{"IPV6_RECVRTHDR", Const, 0},
    -		{"IPV6_RECVTCLASS", Const, 0},
    -		{"IPV6_ROUTER_ALERT", Const, 0},
    -		{"IPV6_RTABLE", Const, 1},
    -		{"IPV6_RTHDR", Const, 0},
    -		{"IPV6_RTHDRDSTOPTS", Const, 0},
    -		{"IPV6_RTHDR_LOOSE", Const, 0},
    -		{"IPV6_RTHDR_STRICT", Const, 0},
    -		{"IPV6_RTHDR_TYPE_0", Const, 0},
    -		{"IPV6_RXDSTOPTS", Const, 0},
    -		{"IPV6_RXHOPOPTS", Const, 0},
    -		{"IPV6_SOCKOPT_RESERVED1", Const, 0},
    -		{"IPV6_TCLASS", Const, 0},
    -		{"IPV6_UNICAST_HOPS", Const, 0},
    -		{"IPV6_USE_MIN_MTU", Const, 0},
    -		{"IPV6_V6ONLY", Const, 0},
    -		{"IPV6_VERSION", Const, 0},
    -		{"IPV6_VERSION_MASK", Const, 0},
    -		{"IPV6_XFRM_POLICY", Const, 0},
    -		{"IP_ADD_MEMBERSHIP", Const, 0},
    -		{"IP_ADD_SOURCE_MEMBERSHIP", Const, 0},
    -		{"IP_AUTH_LEVEL", Const, 1},
    -		{"IP_BINDANY", Const, 0},
    -		{"IP_BLOCK_SOURCE", Const, 0},
    -		{"IP_BOUND_IF", Const, 0},
    -		{"IP_DEFAULT_MULTICAST_LOOP", Const, 0},
    -		{"IP_DEFAULT_MULTICAST_TTL", Const, 0},
    -		{"IP_DF", Const, 0},
    -		{"IP_DIVERTFL", Const, 3},
    -		{"IP_DONTFRAG", Const, 0},
    -		{"IP_DROP_MEMBERSHIP", Const, 0},
    -		{"IP_DROP_SOURCE_MEMBERSHIP", Const, 0},
    -		{"IP_DUMMYNET3", Const, 0},
    -		{"IP_DUMMYNET_CONFIGURE", Const, 0},
    -		{"IP_DUMMYNET_DEL", Const, 0},
    -		{"IP_DUMMYNET_FLUSH", Const, 0},
    -		{"IP_DUMMYNET_GET", Const, 0},
    -		{"IP_EF", Const, 1},
    -		{"IP_ERRORMTU", Const, 1},
    -		{"IP_ESP_NETWORK_LEVEL", Const, 1},
    -		{"IP_ESP_TRANS_LEVEL", Const, 1},
    -		{"IP_FAITH", Const, 0},
    -		{"IP_FREEBIND", Const, 0},
    -		{"IP_FW3", Const, 0},
    -		{"IP_FW_ADD", Const, 0},
    -		{"IP_FW_DEL", Const, 0},
    -		{"IP_FW_FLUSH", Const, 0},
    -		{"IP_FW_GET", Const, 0},
    -		{"IP_FW_NAT_CFG", Const, 0},
    -		{"IP_FW_NAT_DEL", Const, 0},
    -		{"IP_FW_NAT_GET_CONFIG", Const, 0},
    -		{"IP_FW_NAT_GET_LOG", Const, 0},
    -		{"IP_FW_RESETLOG", Const, 0},
    -		{"IP_FW_TABLE_ADD", Const, 0},
    -		{"IP_FW_TABLE_DEL", Const, 0},
    -		{"IP_FW_TABLE_FLUSH", Const, 0},
    -		{"IP_FW_TABLE_GETSIZE", Const, 0},
    -		{"IP_FW_TABLE_LIST", Const, 0},
    -		{"IP_FW_ZERO", Const, 0},
    -		{"IP_HDRINCL", Const, 0},
    -		{"IP_IPCOMP_LEVEL", Const, 1},
    -		{"IP_IPSECFLOWINFO", Const, 1},
    -		{"IP_IPSEC_LOCAL_AUTH", Const, 1},
    -		{"IP_IPSEC_LOCAL_CRED", Const, 1},
    -		{"IP_IPSEC_LOCAL_ID", Const, 1},
    -		{"IP_IPSEC_POLICY", Const, 0},
    -		{"IP_IPSEC_REMOTE_AUTH", Const, 1},
    -		{"IP_IPSEC_REMOTE_CRED", Const, 1},
    -		{"IP_IPSEC_REMOTE_ID", Const, 1},
    -		{"IP_MAXPACKET", Const, 0},
    -		{"IP_MAX_GROUP_SRC_FILTER", Const, 0},
    -		{"IP_MAX_MEMBERSHIPS", Const, 0},
    -		{"IP_MAX_SOCK_MUTE_FILTER", Const, 0},
    -		{"IP_MAX_SOCK_SRC_FILTER", Const, 0},
    -		{"IP_MAX_SOURCE_FILTER", Const, 0},
    -		{"IP_MF", Const, 0},
    -		{"IP_MINFRAGSIZE", Const, 1},
    -		{"IP_MINTTL", Const, 0},
    -		{"IP_MIN_MEMBERSHIPS", Const, 0},
    -		{"IP_MSFILTER", Const, 0},
    -		{"IP_MSS", Const, 0},
    -		{"IP_MTU", Const, 0},
    -		{"IP_MTU_DISCOVER", Const, 0},
    -		{"IP_MULTICAST_IF", Const, 0},
    -		{"IP_MULTICAST_IFINDEX", Const, 0},
    -		{"IP_MULTICAST_LOOP", Const, 0},
    -		{"IP_MULTICAST_TTL", Const, 0},
    -		{"IP_MULTICAST_VIF", Const, 0},
    -		{"IP_NAT__XXX", Const, 0},
    -		{"IP_OFFMASK", Const, 0},
    -		{"IP_OLD_FW_ADD", Const, 0},
    -		{"IP_OLD_FW_DEL", Const, 0},
    -		{"IP_OLD_FW_FLUSH", Const, 0},
    -		{"IP_OLD_FW_GET", Const, 0},
    -		{"IP_OLD_FW_RESETLOG", Const, 0},
    -		{"IP_OLD_FW_ZERO", Const, 0},
    -		{"IP_ONESBCAST", Const, 0},
    -		{"IP_OPTIONS", Const, 0},
    -		{"IP_ORIGDSTADDR", Const, 0},
    -		{"IP_PASSSEC", Const, 0},
    -		{"IP_PIPEX", Const, 1},
    -		{"IP_PKTINFO", Const, 0},
    -		{"IP_PKTOPTIONS", Const, 0},
    -		{"IP_PMTUDISC", Const, 0},
    -		{"IP_PMTUDISC_DO", Const, 0},
    -		{"IP_PMTUDISC_DONT", Const, 0},
    -		{"IP_PMTUDISC_PROBE", Const, 0},
    -		{"IP_PMTUDISC_WANT", Const, 0},
    -		{"IP_PORTRANGE", Const, 0},
    -		{"IP_PORTRANGE_DEFAULT", Const, 0},
    -		{"IP_PORTRANGE_HIGH", Const, 0},
    -		{"IP_PORTRANGE_LOW", Const, 0},
    -		{"IP_RECVDSTADDR", Const, 0},
    -		{"IP_RECVDSTPORT", Const, 1},
    -		{"IP_RECVERR", Const, 0},
    -		{"IP_RECVIF", Const, 0},
    -		{"IP_RECVOPTS", Const, 0},
    -		{"IP_RECVORIGDSTADDR", Const, 0},
    -		{"IP_RECVPKTINFO", Const, 0},
    -		{"IP_RECVRETOPTS", Const, 0},
    -		{"IP_RECVRTABLE", Const, 1},
    -		{"IP_RECVTOS", Const, 0},
    -		{"IP_RECVTTL", Const, 0},
    -		{"IP_RETOPTS", Const, 0},
    -		{"IP_RF", Const, 0},
    -		{"IP_ROUTER_ALERT", Const, 0},
    -		{"IP_RSVP_OFF", Const, 0},
    -		{"IP_RSVP_ON", Const, 0},
    -		{"IP_RSVP_VIF_OFF", Const, 0},
    -		{"IP_RSVP_VIF_ON", Const, 0},
    -		{"IP_RTABLE", Const, 1},
    -		{"IP_SENDSRCADDR", Const, 0},
    -		{"IP_STRIPHDR", Const, 0},
    -		{"IP_TOS", Const, 0},
    -		{"IP_TRAFFIC_MGT_BACKGROUND", Const, 0},
    -		{"IP_TRANSPARENT", Const, 0},
    -		{"IP_TTL", Const, 0},
    -		{"IP_UNBLOCK_SOURCE", Const, 0},
    -		{"IP_XFRM_POLICY", Const, 0},
    -		{"IPv6MTUInfo", Type, 2},
    -		{"IPv6MTUInfo.Addr", Field, 2},
    -		{"IPv6MTUInfo.Mtu", Field, 2},
    -		{"IPv6Mreq", Type, 0},
    -		{"IPv6Mreq.Interface", Field, 0},
    -		{"IPv6Mreq.Multiaddr", Field, 0},
    -		{"ISIG", Const, 0},
    -		{"ISTRIP", Const, 0},
    -		{"IUCLC", Const, 0},
    -		{"IUTF8", Const, 0},
    -		{"IXANY", Const, 0},
    -		{"IXOFF", Const, 0},
    -		{"IXON", Const, 0},
    -		{"IfAddrmsg", Type, 0},
    -		{"IfAddrmsg.Family", Field, 0},
    -		{"IfAddrmsg.Flags", Field, 0},
    -		{"IfAddrmsg.Index", Field, 0},
    -		{"IfAddrmsg.Prefixlen", Field, 0},
    -		{"IfAddrmsg.Scope", Field, 0},
    -		{"IfAnnounceMsghdr", Type, 1},
    -		{"IfAnnounceMsghdr.Hdrlen", Field, 2},
    -		{"IfAnnounceMsghdr.Index", Field, 1},
    -		{"IfAnnounceMsghdr.Msglen", Field, 1},
    -		{"IfAnnounceMsghdr.Name", Field, 1},
    -		{"IfAnnounceMsghdr.Type", Field, 1},
    -		{"IfAnnounceMsghdr.Version", Field, 1},
    -		{"IfAnnounceMsghdr.What", Field, 1},
    -		{"IfData", Type, 0},
    -		{"IfData.Addrlen", Field, 0},
    -		{"IfData.Baudrate", Field, 0},
    -		{"IfData.Capabilities", Field, 2},
    -		{"IfData.Collisions", Field, 0},
    -		{"IfData.Datalen", Field, 0},
    -		{"IfData.Epoch", Field, 0},
    -		{"IfData.Hdrlen", Field, 0},
    -		{"IfData.Hwassist", Field, 0},
    -		{"IfData.Ibytes", Field, 0},
    -		{"IfData.Ierrors", Field, 0},
    -		{"IfData.Imcasts", Field, 0},
    -		{"IfData.Ipackets", Field, 0},
    -		{"IfData.Iqdrops", Field, 0},
    -		{"IfData.Lastchange", Field, 0},
    -		{"IfData.Link_state", Field, 0},
    -		{"IfData.Mclpool", Field, 2},
    -		{"IfData.Metric", Field, 0},
    -		{"IfData.Mtu", Field, 0},
    -		{"IfData.Noproto", Field, 0},
    -		{"IfData.Obytes", Field, 0},
    -		{"IfData.Oerrors", Field, 0},
    -		{"IfData.Omcasts", Field, 0},
    -		{"IfData.Opackets", Field, 0},
    -		{"IfData.Pad", Field, 2},
    -		{"IfData.Pad_cgo_0", Field, 2},
    -		{"IfData.Pad_cgo_1", Field, 2},
    -		{"IfData.Physical", Field, 0},
    -		{"IfData.Recvquota", Field, 0},
    -		{"IfData.Recvtiming", Field, 0},
    -		{"IfData.Reserved1", Field, 0},
    -		{"IfData.Reserved2", Field, 0},
    -		{"IfData.Spare_char1", Field, 0},
    -		{"IfData.Spare_char2", Field, 0},
    -		{"IfData.Type", Field, 0},
    -		{"IfData.Typelen", Field, 0},
    -		{"IfData.Unused1", Field, 0},
    -		{"IfData.Unused2", Field, 0},
    -		{"IfData.Xmitquota", Field, 0},
    -		{"IfData.Xmittiming", Field, 0},
    -		{"IfInfomsg", Type, 0},
    -		{"IfInfomsg.Change", Field, 0},
    -		{"IfInfomsg.Family", Field, 0},
    -		{"IfInfomsg.Flags", Field, 0},
    -		{"IfInfomsg.Index", Field, 0},
    -		{"IfInfomsg.Type", Field, 0},
    -		{"IfInfomsg.X__ifi_pad", Field, 0},
    -		{"IfMsghdr", Type, 0},
    -		{"IfMsghdr.Addrs", Field, 0},
    -		{"IfMsghdr.Data", Field, 0},
    -		{"IfMsghdr.Flags", Field, 0},
    -		{"IfMsghdr.Hdrlen", Field, 2},
    -		{"IfMsghdr.Index", Field, 0},
    -		{"IfMsghdr.Msglen", Field, 0},
    -		{"IfMsghdr.Pad1", Field, 2},
    -		{"IfMsghdr.Pad2", Field, 2},
    -		{"IfMsghdr.Pad_cgo_0", Field, 0},
    -		{"IfMsghdr.Pad_cgo_1", Field, 2},
    -		{"IfMsghdr.Tableid", Field, 2},
    -		{"IfMsghdr.Type", Field, 0},
    -		{"IfMsghdr.Version", Field, 0},
    -		{"IfMsghdr.Xflags", Field, 2},
    -		{"IfaMsghdr", Type, 0},
    -		{"IfaMsghdr.Addrs", Field, 0},
    -		{"IfaMsghdr.Flags", Field, 0},
    -		{"IfaMsghdr.Hdrlen", Field, 2},
    -		{"IfaMsghdr.Index", Field, 0},
    -		{"IfaMsghdr.Metric", Field, 0},
    -		{"IfaMsghdr.Msglen", Field, 0},
    -		{"IfaMsghdr.Pad1", Field, 2},
    -		{"IfaMsghdr.Pad2", Field, 2},
    -		{"IfaMsghdr.Pad_cgo_0", Field, 0},
    -		{"IfaMsghdr.Tableid", Field, 2},
    -		{"IfaMsghdr.Type", Field, 0},
    -		{"IfaMsghdr.Version", Field, 0},
    -		{"IfmaMsghdr", Type, 0},
    -		{"IfmaMsghdr.Addrs", Field, 0},
    -		{"IfmaMsghdr.Flags", Field, 0},
    -		{"IfmaMsghdr.Index", Field, 0},
    -		{"IfmaMsghdr.Msglen", Field, 0},
    -		{"IfmaMsghdr.Pad_cgo_0", Field, 0},
    -		{"IfmaMsghdr.Type", Field, 0},
    -		{"IfmaMsghdr.Version", Field, 0},
    -		{"IfmaMsghdr2", Type, 0},
    -		{"IfmaMsghdr2.Addrs", Field, 0},
    -		{"IfmaMsghdr2.Flags", Field, 0},
    -		{"IfmaMsghdr2.Index", Field, 0},
    -		{"IfmaMsghdr2.Msglen", Field, 0},
    -		{"IfmaMsghdr2.Pad_cgo_0", Field, 0},
    -		{"IfmaMsghdr2.Refcount", Field, 0},
    -		{"IfmaMsghdr2.Type", Field, 0},
    -		{"IfmaMsghdr2.Version", Field, 0},
    -		{"ImplementsGetwd", Const, 0},
    -		{"Inet4Pktinfo", Type, 0},
    -		{"Inet4Pktinfo.Addr", Field, 0},
    -		{"Inet4Pktinfo.Ifindex", Field, 0},
    -		{"Inet4Pktinfo.Spec_dst", Field, 0},
    -		{"Inet6Pktinfo", Type, 0},
    -		{"Inet6Pktinfo.Addr", Field, 0},
    -		{"Inet6Pktinfo.Ifindex", Field, 0},
    -		{"InotifyAddWatch", Func, 0},
    -		{"InotifyEvent", Type, 0},
    -		{"InotifyEvent.Cookie", Field, 0},
    -		{"InotifyEvent.Len", Field, 0},
    -		{"InotifyEvent.Mask", Field, 0},
    -		{"InotifyEvent.Name", Field, 0},
    -		{"InotifyEvent.Wd", Field, 0},
    -		{"InotifyInit", Func, 0},
    -		{"InotifyInit1", Func, 0},
    -		{"InotifyRmWatch", Func, 0},
    -		{"InterfaceAddrMessage", Type, 0},
    -		{"InterfaceAddrMessage.Data", Field, 0},
    -		{"InterfaceAddrMessage.Header", Field, 0},
    -		{"InterfaceAnnounceMessage", Type, 1},
    -		{"InterfaceAnnounceMessage.Header", Field, 1},
    -		{"InterfaceInfo", Type, 0},
    -		{"InterfaceInfo.Address", Field, 0},
    -		{"InterfaceInfo.BroadcastAddress", Field, 0},
    -		{"InterfaceInfo.Flags", Field, 0},
    -		{"InterfaceInfo.Netmask", Field, 0},
    -		{"InterfaceMessage", Type, 0},
    -		{"InterfaceMessage.Data", Field, 0},
    -		{"InterfaceMessage.Header", Field, 0},
    -		{"InterfaceMulticastAddrMessage", Type, 0},
    -		{"InterfaceMulticastAddrMessage.Data", Field, 0},
    -		{"InterfaceMulticastAddrMessage.Header", Field, 0},
    -		{"InvalidHandle", Const, 0},
    -		{"Ioperm", Func, 0},
    -		{"Iopl", Func, 0},
    -		{"Iovec", Type, 0},
    -		{"Iovec.Base", Field, 0},
    -		{"Iovec.Len", Field, 0},
    -		{"IpAdapterInfo", Type, 0},
    -		{"IpAdapterInfo.AdapterName", Field, 0},
    -		{"IpAdapterInfo.Address", Field, 0},
    -		{"IpAdapterInfo.AddressLength", Field, 0},
    -		{"IpAdapterInfo.ComboIndex", Field, 0},
    -		{"IpAdapterInfo.CurrentIpAddress", Field, 0},
    -		{"IpAdapterInfo.Description", Field, 0},
    -		{"IpAdapterInfo.DhcpEnabled", Field, 0},
    -		{"IpAdapterInfo.DhcpServer", Field, 0},
    -		{"IpAdapterInfo.GatewayList", Field, 0},
    -		{"IpAdapterInfo.HaveWins", Field, 0},
    -		{"IpAdapterInfo.Index", Field, 0},
    -		{"IpAdapterInfo.IpAddressList", Field, 0},
    -		{"IpAdapterInfo.LeaseExpires", Field, 0},
    -		{"IpAdapterInfo.LeaseObtained", Field, 0},
    -		{"IpAdapterInfo.Next", Field, 0},
    -		{"IpAdapterInfo.PrimaryWinsServer", Field, 0},
    -		{"IpAdapterInfo.SecondaryWinsServer", Field, 0},
    -		{"IpAdapterInfo.Type", Field, 0},
    -		{"IpAddrString", Type, 0},
    -		{"IpAddrString.Context", Field, 0},
    -		{"IpAddrString.IpAddress", Field, 0},
    -		{"IpAddrString.IpMask", Field, 0},
    -		{"IpAddrString.Next", Field, 0},
    -		{"IpAddressString", Type, 0},
    -		{"IpAddressString.String", Field, 0},
    -		{"IpMaskString", Type, 0},
    -		{"IpMaskString.String", Field, 2},
    -		{"Issetugid", Func, 0},
    -		{"KEY_ALL_ACCESS", Const, 0},
    -		{"KEY_CREATE_LINK", Const, 0},
    -		{"KEY_CREATE_SUB_KEY", Const, 0},
    -		{"KEY_ENUMERATE_SUB_KEYS", Const, 0},
    -		{"KEY_EXECUTE", Const, 0},
    -		{"KEY_NOTIFY", Const, 0},
    -		{"KEY_QUERY_VALUE", Const, 0},
    -		{"KEY_READ", Const, 0},
    -		{"KEY_SET_VALUE", Const, 0},
    -		{"KEY_WOW64_32KEY", Const, 0},
    -		{"KEY_WOW64_64KEY", Const, 0},
    -		{"KEY_WRITE", Const, 0},
    -		{"Kevent", Func, 0},
    -		{"Kevent_t", Type, 0},
    -		{"Kevent_t.Data", Field, 0},
    -		{"Kevent_t.Fflags", Field, 0},
    -		{"Kevent_t.Filter", Field, 0},
    -		{"Kevent_t.Flags", Field, 0},
    -		{"Kevent_t.Ident", Field, 0},
    -		{"Kevent_t.Pad_cgo_0", Field, 2},
    -		{"Kevent_t.Udata", Field, 0},
    -		{"Kill", Func, 0},
    -		{"Klogctl", Func, 0},
    -		{"Kqueue", Func, 0},
    -		{"LANG_ENGLISH", Const, 0},
    -		{"LAYERED_PROTOCOL", Const, 2},
    -		{"LCNT_OVERLOAD_FLUSH", Const, 1},
    -		{"LINUX_REBOOT_CMD_CAD_OFF", Const, 0},
    -		{"LINUX_REBOOT_CMD_CAD_ON", Const, 0},
    -		{"LINUX_REBOOT_CMD_HALT", Const, 0},
    -		{"LINUX_REBOOT_CMD_KEXEC", Const, 0},
    -		{"LINUX_REBOOT_CMD_POWER_OFF", Const, 0},
    -		{"LINUX_REBOOT_CMD_RESTART", Const, 0},
    -		{"LINUX_REBOOT_CMD_RESTART2", Const, 0},
    -		{"LINUX_REBOOT_CMD_SW_SUSPEND", Const, 0},
    -		{"LINUX_REBOOT_MAGIC1", Const, 0},
    -		{"LINUX_REBOOT_MAGIC2", Const, 0},
    -		{"LOCK_EX", Const, 0},
    -		{"LOCK_NB", Const, 0},
    -		{"LOCK_SH", Const, 0},
    -		{"LOCK_UN", Const, 0},
    -		{"LazyDLL", Type, 0},
    -		{"LazyDLL.Name", Field, 0},
    -		{"LazyProc", Type, 0},
    -		{"LazyProc.Name", Field, 0},
    -		{"Lchown", Func, 0},
    -		{"Linger", Type, 0},
    -		{"Linger.Linger", Field, 0},
    -		{"Linger.Onoff", Field, 0},
    -		{"Link", Func, 0},
    -		{"Listen", Func, 0},
    -		{"Listxattr", Func, 1},
    -		{"LoadCancelIoEx", Func, 1},
    -		{"LoadConnectEx", Func, 1},
    -		{"LoadCreateSymbolicLink", Func, 4},
    -		{"LoadDLL", Func, 0},
    -		{"LoadGetAddrInfo", Func, 1},
    -		{"LoadLibrary", Func, 0},
    -		{"LoadSetFileCompletionNotificationModes", Func, 2},
    -		{"LocalFree", Func, 0},
    -		{"Log2phys_t", Type, 0},
    -		{"Log2phys_t.Contigbytes", Field, 0},
    -		{"Log2phys_t.Devoffset", Field, 0},
    -		{"Log2phys_t.Flags", Field, 0},
    -		{"LookupAccountName", Func, 0},
    -		{"LookupAccountSid", Func, 0},
    -		{"LookupSID", Func, 0},
    -		{"LsfJump", Func, 0},
    -		{"LsfSocket", Func, 0},
    -		{"LsfStmt", Func, 0},
    -		{"Lstat", Func, 0},
    -		{"MADV_AUTOSYNC", Const, 1},
    -		{"MADV_CAN_REUSE", Const, 0},
    -		{"MADV_CORE", Const, 1},
    -		{"MADV_DOFORK", Const, 0},
    -		{"MADV_DONTFORK", Const, 0},
    -		{"MADV_DONTNEED", Const, 0},
    -		{"MADV_FREE", Const, 0},
    -		{"MADV_FREE_REUSABLE", Const, 0},
    -		{"MADV_FREE_REUSE", Const, 0},
    -		{"MADV_HUGEPAGE", Const, 0},
    -		{"MADV_HWPOISON", Const, 0},
    -		{"MADV_MERGEABLE", Const, 0},
    -		{"MADV_NOCORE", Const, 1},
    -		{"MADV_NOHUGEPAGE", Const, 0},
    -		{"MADV_NORMAL", Const, 0},
    -		{"MADV_NOSYNC", Const, 1},
    -		{"MADV_PROTECT", Const, 1},
    -		{"MADV_RANDOM", Const, 0},
    -		{"MADV_REMOVE", Const, 0},
    -		{"MADV_SEQUENTIAL", Const, 0},
    -		{"MADV_SPACEAVAIL", Const, 3},
    -		{"MADV_UNMERGEABLE", Const, 0},
    -		{"MADV_WILLNEED", Const, 0},
    -		{"MADV_ZERO_WIRED_PAGES", Const, 0},
    -		{"MAP_32BIT", Const, 0},
    -		{"MAP_ALIGNED_SUPER", Const, 3},
    -		{"MAP_ALIGNMENT_16MB", Const, 3},
    -		{"MAP_ALIGNMENT_1TB", Const, 3},
    -		{"MAP_ALIGNMENT_256TB", Const, 3},
    -		{"MAP_ALIGNMENT_4GB", Const, 3},
    -		{"MAP_ALIGNMENT_64KB", Const, 3},
    -		{"MAP_ALIGNMENT_64PB", Const, 3},
    -		{"MAP_ALIGNMENT_MASK", Const, 3},
    -		{"MAP_ALIGNMENT_SHIFT", Const, 3},
    -		{"MAP_ANON", Const, 0},
    -		{"MAP_ANONYMOUS", Const, 0},
    -		{"MAP_COPY", Const, 0},
    -		{"MAP_DENYWRITE", Const, 0},
    -		{"MAP_EXECUTABLE", Const, 0},
    -		{"MAP_FILE", Const, 0},
    -		{"MAP_FIXED", Const, 0},
    -		{"MAP_FLAGMASK", Const, 3},
    -		{"MAP_GROWSDOWN", Const, 0},
    -		{"MAP_HASSEMAPHORE", Const, 0},
    -		{"MAP_HUGETLB", Const, 0},
    -		{"MAP_INHERIT", Const, 3},
    -		{"MAP_INHERIT_COPY", Const, 3},
    -		{"MAP_INHERIT_DEFAULT", Const, 3},
    -		{"MAP_INHERIT_DONATE_COPY", Const, 3},
    -		{"MAP_INHERIT_NONE", Const, 3},
    -		{"MAP_INHERIT_SHARE", Const, 3},
    -		{"MAP_JIT", Const, 0},
    -		{"MAP_LOCKED", Const, 0},
    -		{"MAP_NOCACHE", Const, 0},
    -		{"MAP_NOCORE", Const, 1},
    -		{"MAP_NOEXTEND", Const, 0},
    -		{"MAP_NONBLOCK", Const, 0},
    -		{"MAP_NORESERVE", Const, 0},
    -		{"MAP_NOSYNC", Const, 1},
    -		{"MAP_POPULATE", Const, 0},
    -		{"MAP_PREFAULT_READ", Const, 1},
    -		{"MAP_PRIVATE", Const, 0},
    -		{"MAP_RENAME", Const, 0},
    -		{"MAP_RESERVED0080", Const, 0},
    -		{"MAP_RESERVED0100", Const, 1},
    -		{"MAP_SHARED", Const, 0},
    -		{"MAP_STACK", Const, 0},
    -		{"MAP_TRYFIXED", Const, 3},
    -		{"MAP_TYPE", Const, 0},
    -		{"MAP_WIRED", Const, 3},
    -		{"MAXIMUM_REPARSE_DATA_BUFFER_SIZE", Const, 4},
    -		{"MAXLEN_IFDESCR", Const, 0},
    -		{"MAXLEN_PHYSADDR", Const, 0},
    -		{"MAX_ADAPTER_ADDRESS_LENGTH", Const, 0},
    -		{"MAX_ADAPTER_DESCRIPTION_LENGTH", Const, 0},
    -		{"MAX_ADAPTER_NAME_LENGTH", Const, 0},
    -		{"MAX_COMPUTERNAME_LENGTH", Const, 0},
    -		{"MAX_INTERFACE_NAME_LEN", Const, 0},
    -		{"MAX_LONG_PATH", Const, 0},
    -		{"MAX_PATH", Const, 0},
    -		{"MAX_PROTOCOL_CHAIN", Const, 2},
    -		{"MCL_CURRENT", Const, 0},
    -		{"MCL_FUTURE", Const, 0},
    -		{"MNT_DETACH", Const, 0},
    -		{"MNT_EXPIRE", Const, 0},
    -		{"MNT_FORCE", Const, 0},
    -		{"MSG_BCAST", Const, 1},
    -		{"MSG_CMSG_CLOEXEC", Const, 0},
    -		{"MSG_COMPAT", Const, 0},
    -		{"MSG_CONFIRM", Const, 0},
    -		{"MSG_CONTROLMBUF", Const, 1},
    -		{"MSG_CTRUNC", Const, 0},
    -		{"MSG_DONTROUTE", Const, 0},
    -		{"MSG_DONTWAIT", Const, 0},
    -		{"MSG_EOF", Const, 0},
    -		{"MSG_EOR", Const, 0},
    -		{"MSG_ERRQUEUE", Const, 0},
    -		{"MSG_FASTOPEN", Const, 1},
    -		{"MSG_FIN", Const, 0},
    -		{"MSG_FLUSH", Const, 0},
    -		{"MSG_HAVEMORE", Const, 0},
    -		{"MSG_HOLD", Const, 0},
    -		{"MSG_IOVUSRSPACE", Const, 1},
    -		{"MSG_LENUSRSPACE", Const, 1},
    -		{"MSG_MCAST", Const, 1},
    -		{"MSG_MORE", Const, 0},
    -		{"MSG_NAMEMBUF", Const, 1},
    -		{"MSG_NBIO", Const, 0},
    -		{"MSG_NEEDSA", Const, 0},
    -		{"MSG_NOSIGNAL", Const, 0},
    -		{"MSG_NOTIFICATION", Const, 0},
    -		{"MSG_OOB", Const, 0},
    -		{"MSG_PEEK", Const, 0},
    -		{"MSG_PROXY", Const, 0},
    -		{"MSG_RCVMORE", Const, 0},
    -		{"MSG_RST", Const, 0},
    -		{"MSG_SEND", Const, 0},
    -		{"MSG_SYN", Const, 0},
    -		{"MSG_TRUNC", Const, 0},
    -		{"MSG_TRYHARD", Const, 0},
    -		{"MSG_USERFLAGS", Const, 1},
    -		{"MSG_WAITALL", Const, 0},
    -		{"MSG_WAITFORONE", Const, 0},
    -		{"MSG_WAITSTREAM", Const, 0},
    -		{"MS_ACTIVE", Const, 0},
    -		{"MS_ASYNC", Const, 0},
    -		{"MS_BIND", Const, 0},
    -		{"MS_DEACTIVATE", Const, 0},
    -		{"MS_DIRSYNC", Const, 0},
    -		{"MS_INVALIDATE", Const, 0},
    -		{"MS_I_VERSION", Const, 0},
    -		{"MS_KERNMOUNT", Const, 0},
    -		{"MS_KILLPAGES", Const, 0},
    -		{"MS_MANDLOCK", Const, 0},
    -		{"MS_MGC_MSK", Const, 0},
    -		{"MS_MGC_VAL", Const, 0},
    -		{"MS_MOVE", Const, 0},
    -		{"MS_NOATIME", Const, 0},
    -		{"MS_NODEV", Const, 0},
    -		{"MS_NODIRATIME", Const, 0},
    -		{"MS_NOEXEC", Const, 0},
    -		{"MS_NOSUID", Const, 0},
    -		{"MS_NOUSER", Const, 0},
    -		{"MS_POSIXACL", Const, 0},
    -		{"MS_PRIVATE", Const, 0},
    -		{"MS_RDONLY", Const, 0},
    -		{"MS_REC", Const, 0},
    -		{"MS_RELATIME", Const, 0},
    -		{"MS_REMOUNT", Const, 0},
    -		{"MS_RMT_MASK", Const, 0},
    -		{"MS_SHARED", Const, 0},
    -		{"MS_SILENT", Const, 0},
    -		{"MS_SLAVE", Const, 0},
    -		{"MS_STRICTATIME", Const, 0},
    -		{"MS_SYNC", Const, 0},
    -		{"MS_SYNCHRONOUS", Const, 0},
    -		{"MS_UNBINDABLE", Const, 0},
    -		{"Madvise", Func, 0},
    -		{"MapViewOfFile", Func, 0},
    -		{"MaxTokenInfoClass", Const, 0},
    -		{"Mclpool", Type, 2},
    -		{"Mclpool.Alive", Field, 2},
    -		{"Mclpool.Cwm", Field, 2},
    -		{"Mclpool.Grown", Field, 2},
    -		{"Mclpool.Hwm", Field, 2},
    -		{"Mclpool.Lwm", Field, 2},
    -		{"MibIfRow", Type, 0},
    -		{"MibIfRow.AdminStatus", Field, 0},
    -		{"MibIfRow.Descr", Field, 0},
    -		{"MibIfRow.DescrLen", Field, 0},
    -		{"MibIfRow.InDiscards", Field, 0},
    -		{"MibIfRow.InErrors", Field, 0},
    -		{"MibIfRow.InNUcastPkts", Field, 0},
    -		{"MibIfRow.InOctets", Field, 0},
    -		{"MibIfRow.InUcastPkts", Field, 0},
    -		{"MibIfRow.InUnknownProtos", Field, 0},
    -		{"MibIfRow.Index", Field, 0},
    -		{"MibIfRow.LastChange", Field, 0},
    -		{"MibIfRow.Mtu", Field, 0},
    -		{"MibIfRow.Name", Field, 0},
    -		{"MibIfRow.OperStatus", Field, 0},
    -		{"MibIfRow.OutDiscards", Field, 0},
    -		{"MibIfRow.OutErrors", Field, 0},
    -		{"MibIfRow.OutNUcastPkts", Field, 0},
    -		{"MibIfRow.OutOctets", Field, 0},
    -		{"MibIfRow.OutQLen", Field, 0},
    -		{"MibIfRow.OutUcastPkts", Field, 0},
    -		{"MibIfRow.PhysAddr", Field, 0},
    -		{"MibIfRow.PhysAddrLen", Field, 0},
    -		{"MibIfRow.Speed", Field, 0},
    -		{"MibIfRow.Type", Field, 0},
    -		{"Mkdir", Func, 0},
    -		{"Mkdirat", Func, 0},
    -		{"Mkfifo", Func, 0},
    -		{"Mknod", Func, 0},
    -		{"Mknodat", Func, 0},
    -		{"Mlock", Func, 0},
    -		{"Mlockall", Func, 0},
    -		{"Mmap", Func, 0},
    -		{"Mount", Func, 0},
    -		{"MoveFile", Func, 0},
    -		{"Mprotect", Func, 0},
    -		{"Msghdr", Type, 0},
    -		{"Msghdr.Control", Field, 0},
    -		{"Msghdr.Controllen", Field, 0},
    -		{"Msghdr.Flags", Field, 0},
    -		{"Msghdr.Iov", Field, 0},
    -		{"Msghdr.Iovlen", Field, 0},
    -		{"Msghdr.Name", Field, 0},
    -		{"Msghdr.Namelen", Field, 0},
    -		{"Msghdr.Pad_cgo_0", Field, 0},
    -		{"Msghdr.Pad_cgo_1", Field, 0},
    -		{"Munlock", Func, 0},
    -		{"Munlockall", Func, 0},
    -		{"Munmap", Func, 0},
    -		{"MustLoadDLL", Func, 0},
    -		{"NAME_MAX", Const, 0},
    -		{"NETLINK_ADD_MEMBERSHIP", Const, 0},
    -		{"NETLINK_AUDIT", Const, 0},
    -		{"NETLINK_BROADCAST_ERROR", Const, 0},
    -		{"NETLINK_CONNECTOR", Const, 0},
    -		{"NETLINK_DNRTMSG", Const, 0},
    -		{"NETLINK_DROP_MEMBERSHIP", Const, 0},
    -		{"NETLINK_ECRYPTFS", Const, 0},
    -		{"NETLINK_FIB_LOOKUP", Const, 0},
    -		{"NETLINK_FIREWALL", Const, 0},
    -		{"NETLINK_GENERIC", Const, 0},
    -		{"NETLINK_INET_DIAG", Const, 0},
    -		{"NETLINK_IP6_FW", Const, 0},
    -		{"NETLINK_ISCSI", Const, 0},
    -		{"NETLINK_KOBJECT_UEVENT", Const, 0},
    -		{"NETLINK_NETFILTER", Const, 0},
    -		{"NETLINK_NFLOG", Const, 0},
    -		{"NETLINK_NO_ENOBUFS", Const, 0},
    -		{"NETLINK_PKTINFO", Const, 0},
    -		{"NETLINK_RDMA", Const, 0},
    -		{"NETLINK_ROUTE", Const, 0},
    -		{"NETLINK_SCSITRANSPORT", Const, 0},
    -		{"NETLINK_SELINUX", Const, 0},
    -		{"NETLINK_UNUSED", Const, 0},
    -		{"NETLINK_USERSOCK", Const, 0},
    -		{"NETLINK_XFRM", Const, 0},
    -		{"NET_RT_DUMP", Const, 0},
    -		{"NET_RT_DUMP2", Const, 0},
    -		{"NET_RT_FLAGS", Const, 0},
    -		{"NET_RT_IFLIST", Const, 0},
    -		{"NET_RT_IFLIST2", Const, 0},
    -		{"NET_RT_IFLISTL", Const, 1},
    -		{"NET_RT_IFMALIST", Const, 0},
    -		{"NET_RT_MAXID", Const, 0},
    -		{"NET_RT_OIFLIST", Const, 1},
    -		{"NET_RT_OOIFLIST", Const, 1},
    -		{"NET_RT_STAT", Const, 0},
    -		{"NET_RT_STATS", Const, 1},
    -		{"NET_RT_TABLE", Const, 1},
    -		{"NET_RT_TRASH", Const, 0},
    -		{"NLA_ALIGNTO", Const, 0},
    -		{"NLA_F_NESTED", Const, 0},
    -		{"NLA_F_NET_BYTEORDER", Const, 0},
    -		{"NLA_HDRLEN", Const, 0},
    -		{"NLMSG_ALIGNTO", Const, 0},
    -		{"NLMSG_DONE", Const, 0},
    -		{"NLMSG_ERROR", Const, 0},
    -		{"NLMSG_HDRLEN", Const, 0},
    -		{"NLMSG_MIN_TYPE", Const, 0},
    -		{"NLMSG_NOOP", Const, 0},
    -		{"NLMSG_OVERRUN", Const, 0},
    -		{"NLM_F_ACK", Const, 0},
    -		{"NLM_F_APPEND", Const, 0},
    -		{"NLM_F_ATOMIC", Const, 0},
    -		{"NLM_F_CREATE", Const, 0},
    -		{"NLM_F_DUMP", Const, 0},
    -		{"NLM_F_ECHO", Const, 0},
    -		{"NLM_F_EXCL", Const, 0},
    -		{"NLM_F_MATCH", Const, 0},
    -		{"NLM_F_MULTI", Const, 0},
    -		{"NLM_F_REPLACE", Const, 0},
    -		{"NLM_F_REQUEST", Const, 0},
    -		{"NLM_F_ROOT", Const, 0},
    -		{"NOFLSH", Const, 0},
    -		{"NOTE_ABSOLUTE", Const, 0},
    -		{"NOTE_ATTRIB", Const, 0},
    -		{"NOTE_BACKGROUND", Const, 16},
    -		{"NOTE_CHILD", Const, 0},
    -		{"NOTE_CRITICAL", Const, 16},
    -		{"NOTE_DELETE", Const, 0},
    -		{"NOTE_EOF", Const, 1},
    -		{"NOTE_EXEC", Const, 0},
    -		{"NOTE_EXIT", Const, 0},
    -		{"NOTE_EXITSTATUS", Const, 0},
    -		{"NOTE_EXIT_CSERROR", Const, 16},
    -		{"NOTE_EXIT_DECRYPTFAIL", Const, 16},
    -		{"NOTE_EXIT_DETAIL", Const, 16},
    -		{"NOTE_EXIT_DETAIL_MASK", Const, 16},
    -		{"NOTE_EXIT_MEMORY", Const, 16},
    -		{"NOTE_EXIT_REPARENTED", Const, 16},
    -		{"NOTE_EXTEND", Const, 0},
    -		{"NOTE_FFAND", Const, 0},
    -		{"NOTE_FFCOPY", Const, 0},
    -		{"NOTE_FFCTRLMASK", Const, 0},
    -		{"NOTE_FFLAGSMASK", Const, 0},
    -		{"NOTE_FFNOP", Const, 0},
    -		{"NOTE_FFOR", Const, 0},
    -		{"NOTE_FORK", Const, 0},
    -		{"NOTE_LEEWAY", Const, 16},
    -		{"NOTE_LINK", Const, 0},
    -		{"NOTE_LOWAT", Const, 0},
    -		{"NOTE_NONE", Const, 0},
    -		{"NOTE_NSECONDS", Const, 0},
    -		{"NOTE_PCTRLMASK", Const, 0},
    -		{"NOTE_PDATAMASK", Const, 0},
    -		{"NOTE_REAP", Const, 0},
    -		{"NOTE_RENAME", Const, 0},
    -		{"NOTE_RESOURCEEND", Const, 0},
    -		{"NOTE_REVOKE", Const, 0},
    -		{"NOTE_SECONDS", Const, 0},
    -		{"NOTE_SIGNAL", Const, 0},
    -		{"NOTE_TRACK", Const, 0},
    -		{"NOTE_TRACKERR", Const, 0},
    -		{"NOTE_TRIGGER", Const, 0},
    -		{"NOTE_TRUNCATE", Const, 1},
    -		{"NOTE_USECONDS", Const, 0},
    -		{"NOTE_VM_ERROR", Const, 0},
    -		{"NOTE_VM_PRESSURE", Const, 0},
    -		{"NOTE_VM_PRESSURE_SUDDEN_TERMINATE", Const, 0},
    -		{"NOTE_VM_PRESSURE_TERMINATE", Const, 0},
    -		{"NOTE_WRITE", Const, 0},
    -		{"NameCanonical", Const, 0},
    -		{"NameCanonicalEx", Const, 0},
    -		{"NameDisplay", Const, 0},
    -		{"NameDnsDomain", Const, 0},
    -		{"NameFullyQualifiedDN", Const, 0},
    -		{"NameSamCompatible", Const, 0},
    -		{"NameServicePrincipal", Const, 0},
    -		{"NameUniqueId", Const, 0},
    -		{"NameUnknown", Const, 0},
    -		{"NameUserPrincipal", Const, 0},
    -		{"Nanosleep", Func, 0},
    -		{"NetApiBufferFree", Func, 0},
    -		{"NetGetJoinInformation", Func, 2},
    -		{"NetSetupDomainName", Const, 2},
    -		{"NetSetupUnjoined", Const, 2},
    -		{"NetSetupUnknownStatus", Const, 2},
    -		{"NetSetupWorkgroupName", Const, 2},
    -		{"NetUserGetInfo", Func, 0},
    -		{"NetlinkMessage", Type, 0},
    -		{"NetlinkMessage.Data", Field, 0},
    -		{"NetlinkMessage.Header", Field, 0},
    -		{"NetlinkRIB", Func, 0},
    -		{"NetlinkRouteAttr", Type, 0},
    -		{"NetlinkRouteAttr.Attr", Field, 0},
    -		{"NetlinkRouteAttr.Value", Field, 0},
    -		{"NetlinkRouteRequest", Type, 0},
    -		{"NetlinkRouteRequest.Data", Field, 0},
    -		{"NetlinkRouteRequest.Header", Field, 0},
    -		{"NewCallback", Func, 0},
    -		{"NewCallbackCDecl", Func, 3},
    -		{"NewLazyDLL", Func, 0},
    -		{"NlAttr", Type, 0},
    -		{"NlAttr.Len", Field, 0},
    -		{"NlAttr.Type", Field, 0},
    -		{"NlMsgerr", Type, 0},
    -		{"NlMsgerr.Error", Field, 0},
    -		{"NlMsgerr.Msg", Field, 0},
    -		{"NlMsghdr", Type, 0},
    -		{"NlMsghdr.Flags", Field, 0},
    -		{"NlMsghdr.Len", Field, 0},
    -		{"NlMsghdr.Pid", Field, 0},
    -		{"NlMsghdr.Seq", Field, 0},
    -		{"NlMsghdr.Type", Field, 0},
    -		{"NsecToFiletime", Func, 0},
    -		{"NsecToTimespec", Func, 0},
    -		{"NsecToTimeval", Func, 0},
    -		{"Ntohs", Func, 0},
    -		{"OCRNL", Const, 0},
    -		{"OFDEL", Const, 0},
    -		{"OFILL", Const, 0},
    -		{"OFIOGETBMAP", Const, 1},
    -		{"OID_PKIX_KP_SERVER_AUTH", Var, 0},
    -		{"OID_SERVER_GATED_CRYPTO", Var, 0},
    -		{"OID_SGC_NETSCAPE", Var, 0},
    -		{"OLCUC", Const, 0},
    -		{"ONLCR", Const, 0},
    -		{"ONLRET", Const, 0},
    -		{"ONOCR", Const, 0},
    -		{"ONOEOT", Const, 1},
    -		{"OPEN_ALWAYS", Const, 0},
    -		{"OPEN_EXISTING", Const, 0},
    -		{"OPOST", Const, 0},
    -		{"O_ACCMODE", Const, 0},
    -		{"O_ALERT", Const, 0},
    -		{"O_ALT_IO", Const, 1},
    -		{"O_APPEND", Const, 0},
    -		{"O_ASYNC", Const, 0},
    -		{"O_CLOEXEC", Const, 0},
    -		{"O_CREAT", Const, 0},
    -		{"O_DIRECT", Const, 0},
    -		{"O_DIRECTORY", Const, 0},
    -		{"O_DP_GETRAWENCRYPTED", Const, 16},
    -		{"O_DSYNC", Const, 0},
    -		{"O_EVTONLY", Const, 0},
    -		{"O_EXCL", Const, 0},
    -		{"O_EXEC", Const, 0},
    -		{"O_EXLOCK", Const, 0},
    -		{"O_FSYNC", Const, 0},
    -		{"O_LARGEFILE", Const, 0},
    -		{"O_NDELAY", Const, 0},
    -		{"O_NOATIME", Const, 0},
    -		{"O_NOCTTY", Const, 0},
    -		{"O_NOFOLLOW", Const, 0},
    -		{"O_NONBLOCK", Const, 0},
    -		{"O_NOSIGPIPE", Const, 1},
    -		{"O_POPUP", Const, 0},
    -		{"O_RDONLY", Const, 0},
    -		{"O_RDWR", Const, 0},
    -		{"O_RSYNC", Const, 0},
    -		{"O_SHLOCK", Const, 0},
    -		{"O_SYMLINK", Const, 0},
    -		{"O_SYNC", Const, 0},
    -		{"O_TRUNC", Const, 0},
    -		{"O_TTY_INIT", Const, 0},
    -		{"O_WRONLY", Const, 0},
    -		{"Open", Func, 0},
    -		{"OpenCurrentProcessToken", Func, 0},
    -		{"OpenProcess", Func, 0},
    -		{"OpenProcessToken", Func, 0},
    -		{"Openat", Func, 0},
    -		{"Overlapped", Type, 0},
    -		{"Overlapped.HEvent", Field, 0},
    -		{"Overlapped.Internal", Field, 0},
    -		{"Overlapped.InternalHigh", Field, 0},
    -		{"Overlapped.Offset", Field, 0},
    -		{"Overlapped.OffsetHigh", Field, 0},
    -		{"PACKET_ADD_MEMBERSHIP", Const, 0},
    -		{"PACKET_BROADCAST", Const, 0},
    -		{"PACKET_DROP_MEMBERSHIP", Const, 0},
    -		{"PACKET_FASTROUTE", Const, 0},
    -		{"PACKET_HOST", Const, 0},
    -		{"PACKET_LOOPBACK", Const, 0},
    -		{"PACKET_MR_ALLMULTI", Const, 0},
    -		{"PACKET_MR_MULTICAST", Const, 0},
    -		{"PACKET_MR_PROMISC", Const, 0},
    -		{"PACKET_MULTICAST", Const, 0},
    -		{"PACKET_OTHERHOST", Const, 0},
    -		{"PACKET_OUTGOING", Const, 0},
    -		{"PACKET_RECV_OUTPUT", Const, 0},
    -		{"PACKET_RX_RING", Const, 0},
    -		{"PACKET_STATISTICS", Const, 0},
    -		{"PAGE_EXECUTE_READ", Const, 0},
    -		{"PAGE_EXECUTE_READWRITE", Const, 0},
    -		{"PAGE_EXECUTE_WRITECOPY", Const, 0},
    -		{"PAGE_READONLY", Const, 0},
    -		{"PAGE_READWRITE", Const, 0},
    -		{"PAGE_WRITECOPY", Const, 0},
    -		{"PARENB", Const, 0},
    -		{"PARMRK", Const, 0},
    -		{"PARODD", Const, 0},
    -		{"PENDIN", Const, 0},
    -		{"PFL_HIDDEN", Const, 2},
    -		{"PFL_MATCHES_PROTOCOL_ZERO", Const, 2},
    -		{"PFL_MULTIPLE_PROTO_ENTRIES", Const, 2},
    -		{"PFL_NETWORKDIRECT_PROVIDER", Const, 2},
    -		{"PFL_RECOMMENDED_PROTO_ENTRY", Const, 2},
    -		{"PF_FLUSH", Const, 1},
    -		{"PKCS_7_ASN_ENCODING", Const, 0},
    -		{"PMC5_PIPELINE_FLUSH", Const, 1},
    -		{"PRIO_PGRP", Const, 2},
    -		{"PRIO_PROCESS", Const, 2},
    -		{"PRIO_USER", Const, 2},
    -		{"PRI_IOFLUSH", Const, 1},
    -		{"PROCESS_QUERY_INFORMATION", Const, 0},
    -		{"PROCESS_TERMINATE", Const, 2},
    -		{"PROT_EXEC", Const, 0},
    -		{"PROT_GROWSDOWN", Const, 0},
    -		{"PROT_GROWSUP", Const, 0},
    -		{"PROT_NONE", Const, 0},
    -		{"PROT_READ", Const, 0},
    -		{"PROT_WRITE", Const, 0},
    -		{"PROV_DH_SCHANNEL", Const, 0},
    -		{"PROV_DSS", Const, 0},
    -		{"PROV_DSS_DH", Const, 0},
    -		{"PROV_EC_ECDSA_FULL", Const, 0},
    -		{"PROV_EC_ECDSA_SIG", Const, 0},
    -		{"PROV_EC_ECNRA_FULL", Const, 0},
    -		{"PROV_EC_ECNRA_SIG", Const, 0},
    -		{"PROV_FORTEZZA", Const, 0},
    -		{"PROV_INTEL_SEC", Const, 0},
    -		{"PROV_MS_EXCHANGE", Const, 0},
    -		{"PROV_REPLACE_OWF", Const, 0},
    -		{"PROV_RNG", Const, 0},
    -		{"PROV_RSA_AES", Const, 0},
    -		{"PROV_RSA_FULL", Const, 0},
    -		{"PROV_RSA_SCHANNEL", Const, 0},
    -		{"PROV_RSA_SIG", Const, 0},
    -		{"PROV_SPYRUS_LYNKS", Const, 0},
    -		{"PROV_SSL", Const, 0},
    -		{"PR_CAPBSET_DROP", Const, 0},
    -		{"PR_CAPBSET_READ", Const, 0},
    -		{"PR_CLEAR_SECCOMP_FILTER", Const, 0},
    -		{"PR_ENDIAN_BIG", Const, 0},
    -		{"PR_ENDIAN_LITTLE", Const, 0},
    -		{"PR_ENDIAN_PPC_LITTLE", Const, 0},
    -		{"PR_FPEMU_NOPRINT", Const, 0},
    -		{"PR_FPEMU_SIGFPE", Const, 0},
    -		{"PR_FP_EXC_ASYNC", Const, 0},
    -		{"PR_FP_EXC_DISABLED", Const, 0},
    -		{"PR_FP_EXC_DIV", Const, 0},
    -		{"PR_FP_EXC_INV", Const, 0},
    -		{"PR_FP_EXC_NONRECOV", Const, 0},
    -		{"PR_FP_EXC_OVF", Const, 0},
    -		{"PR_FP_EXC_PRECISE", Const, 0},
    -		{"PR_FP_EXC_RES", Const, 0},
    -		{"PR_FP_EXC_SW_ENABLE", Const, 0},
    -		{"PR_FP_EXC_UND", Const, 0},
    -		{"PR_GET_DUMPABLE", Const, 0},
    -		{"PR_GET_ENDIAN", Const, 0},
    -		{"PR_GET_FPEMU", Const, 0},
    -		{"PR_GET_FPEXC", Const, 0},
    -		{"PR_GET_KEEPCAPS", Const, 0},
    -		{"PR_GET_NAME", Const, 0},
    -		{"PR_GET_PDEATHSIG", Const, 0},
    -		{"PR_GET_SECCOMP", Const, 0},
    -		{"PR_GET_SECCOMP_FILTER", Const, 0},
    -		{"PR_GET_SECUREBITS", Const, 0},
    -		{"PR_GET_TIMERSLACK", Const, 0},
    -		{"PR_GET_TIMING", Const, 0},
    -		{"PR_GET_TSC", Const, 0},
    -		{"PR_GET_UNALIGN", Const, 0},
    -		{"PR_MCE_KILL", Const, 0},
    -		{"PR_MCE_KILL_CLEAR", Const, 0},
    -		{"PR_MCE_KILL_DEFAULT", Const, 0},
    -		{"PR_MCE_KILL_EARLY", Const, 0},
    -		{"PR_MCE_KILL_GET", Const, 0},
    -		{"PR_MCE_KILL_LATE", Const, 0},
    -		{"PR_MCE_KILL_SET", Const, 0},
    -		{"PR_SECCOMP_FILTER_EVENT", Const, 0},
    -		{"PR_SECCOMP_FILTER_SYSCALL", Const, 0},
    -		{"PR_SET_DUMPABLE", Const, 0},
    -		{"PR_SET_ENDIAN", Const, 0},
    -		{"PR_SET_FPEMU", Const, 0},
    -		{"PR_SET_FPEXC", Const, 0},
    -		{"PR_SET_KEEPCAPS", Const, 0},
    -		{"PR_SET_NAME", Const, 0},
    -		{"PR_SET_PDEATHSIG", Const, 0},
    -		{"PR_SET_PTRACER", Const, 0},
    -		{"PR_SET_SECCOMP", Const, 0},
    -		{"PR_SET_SECCOMP_FILTER", Const, 0},
    -		{"PR_SET_SECUREBITS", Const, 0},
    -		{"PR_SET_TIMERSLACK", Const, 0},
    -		{"PR_SET_TIMING", Const, 0},
    -		{"PR_SET_TSC", Const, 0},
    -		{"PR_SET_UNALIGN", Const, 0},
    -		{"PR_TASK_PERF_EVENTS_DISABLE", Const, 0},
    -		{"PR_TASK_PERF_EVENTS_ENABLE", Const, 0},
    -		{"PR_TIMING_STATISTICAL", Const, 0},
    -		{"PR_TIMING_TIMESTAMP", Const, 0},
    -		{"PR_TSC_ENABLE", Const, 0},
    -		{"PR_TSC_SIGSEGV", Const, 0},
    -		{"PR_UNALIGN_NOPRINT", Const, 0},
    -		{"PR_UNALIGN_SIGBUS", Const, 0},
    -		{"PTRACE_ARCH_PRCTL", Const, 0},
    -		{"PTRACE_ATTACH", Const, 0},
    -		{"PTRACE_CONT", Const, 0},
    -		{"PTRACE_DETACH", Const, 0},
    -		{"PTRACE_EVENT_CLONE", Const, 0},
    -		{"PTRACE_EVENT_EXEC", Const, 0},
    -		{"PTRACE_EVENT_EXIT", Const, 0},
    -		{"PTRACE_EVENT_FORK", Const, 0},
    -		{"PTRACE_EVENT_VFORK", Const, 0},
    -		{"PTRACE_EVENT_VFORK_DONE", Const, 0},
    -		{"PTRACE_GETCRUNCHREGS", Const, 0},
    -		{"PTRACE_GETEVENTMSG", Const, 0},
    -		{"PTRACE_GETFPREGS", Const, 0},
    -		{"PTRACE_GETFPXREGS", Const, 0},
    -		{"PTRACE_GETHBPREGS", Const, 0},
    -		{"PTRACE_GETREGS", Const, 0},
    -		{"PTRACE_GETREGSET", Const, 0},
    -		{"PTRACE_GETSIGINFO", Const, 0},
    -		{"PTRACE_GETVFPREGS", Const, 0},
    -		{"PTRACE_GETWMMXREGS", Const, 0},
    -		{"PTRACE_GET_THREAD_AREA", Const, 0},
    -		{"PTRACE_KILL", Const, 0},
    -		{"PTRACE_OLDSETOPTIONS", Const, 0},
    -		{"PTRACE_O_MASK", Const, 0},
    -		{"PTRACE_O_TRACECLONE", Const, 0},
    -		{"PTRACE_O_TRACEEXEC", Const, 0},
    -		{"PTRACE_O_TRACEEXIT", Const, 0},
    -		{"PTRACE_O_TRACEFORK", Const, 0},
    -		{"PTRACE_O_TRACESYSGOOD", Const, 0},
    -		{"PTRACE_O_TRACEVFORK", Const, 0},
    -		{"PTRACE_O_TRACEVFORKDONE", Const, 0},
    -		{"PTRACE_PEEKDATA", Const, 0},
    -		{"PTRACE_PEEKTEXT", Const, 0},
    -		{"PTRACE_PEEKUSR", Const, 0},
    -		{"PTRACE_POKEDATA", Const, 0},
    -		{"PTRACE_POKETEXT", Const, 0},
    -		{"PTRACE_POKEUSR", Const, 0},
    -		{"PTRACE_SETCRUNCHREGS", Const, 0},
    -		{"PTRACE_SETFPREGS", Const, 0},
    -		{"PTRACE_SETFPXREGS", Const, 0},
    -		{"PTRACE_SETHBPREGS", Const, 0},
    -		{"PTRACE_SETOPTIONS", Const, 0},
    -		{"PTRACE_SETREGS", Const, 0},
    -		{"PTRACE_SETREGSET", Const, 0},
    -		{"PTRACE_SETSIGINFO", Const, 0},
    -		{"PTRACE_SETVFPREGS", Const, 0},
    -		{"PTRACE_SETWMMXREGS", Const, 0},
    -		{"PTRACE_SET_SYSCALL", Const, 0},
    -		{"PTRACE_SET_THREAD_AREA", Const, 0},
    -		{"PTRACE_SINGLEBLOCK", Const, 0},
    -		{"PTRACE_SINGLESTEP", Const, 0},
    -		{"PTRACE_SYSCALL", Const, 0},
    -		{"PTRACE_SYSEMU", Const, 0},
    -		{"PTRACE_SYSEMU_SINGLESTEP", Const, 0},
    -		{"PTRACE_TRACEME", Const, 0},
    -		{"PT_ATTACH", Const, 0},
    -		{"PT_ATTACHEXC", Const, 0},
    -		{"PT_CONTINUE", Const, 0},
    -		{"PT_DATA_ADDR", Const, 0},
    -		{"PT_DENY_ATTACH", Const, 0},
    -		{"PT_DETACH", Const, 0},
    -		{"PT_FIRSTMACH", Const, 0},
    -		{"PT_FORCEQUOTA", Const, 0},
    -		{"PT_KILL", Const, 0},
    -		{"PT_MASK", Const, 1},
    -		{"PT_READ_D", Const, 0},
    -		{"PT_READ_I", Const, 0},
    -		{"PT_READ_U", Const, 0},
    -		{"PT_SIGEXC", Const, 0},
    -		{"PT_STEP", Const, 0},
    -		{"PT_TEXT_ADDR", Const, 0},
    -		{"PT_TEXT_END_ADDR", Const, 0},
    -		{"PT_THUPDATE", Const, 0},
    -		{"PT_TRACE_ME", Const, 0},
    -		{"PT_WRITE_D", Const, 0},
    -		{"PT_WRITE_I", Const, 0},
    -		{"PT_WRITE_U", Const, 0},
    -		{"ParseDirent", Func, 0},
    -		{"ParseNetlinkMessage", Func, 0},
    -		{"ParseNetlinkRouteAttr", Func, 0},
    -		{"ParseRoutingMessage", Func, 0},
    -		{"ParseRoutingSockaddr", Func, 0},
    -		{"ParseSocketControlMessage", Func, 0},
    -		{"ParseUnixCredentials", Func, 0},
    -		{"ParseUnixRights", Func, 0},
    -		{"PathMax", Const, 0},
    -		{"Pathconf", Func, 0},
    -		{"Pause", Func, 0},
    -		{"Pipe", Func, 0},
    -		{"Pipe2", Func, 1},
    -		{"PivotRoot", Func, 0},
    -		{"Pointer", Type, 11},
    -		{"PostQueuedCompletionStatus", Func, 0},
    -		{"Pread", Func, 0},
    -		{"Proc", Type, 0},
    -		{"Proc.Dll", Field, 0},
    -		{"Proc.Name", Field, 0},
    -		{"ProcAttr", Type, 0},
    -		{"ProcAttr.Dir", Field, 0},
    -		{"ProcAttr.Env", Field, 0},
    -		{"ProcAttr.Files", Field, 0},
    -		{"ProcAttr.Sys", Field, 0},
    -		{"Process32First", Func, 4},
    -		{"Process32Next", Func, 4},
    -		{"ProcessEntry32", Type, 4},
    -		{"ProcessEntry32.DefaultHeapID", Field, 4},
    -		{"ProcessEntry32.ExeFile", Field, 4},
    -		{"ProcessEntry32.Flags", Field, 4},
    -		{"ProcessEntry32.ModuleID", Field, 4},
    -		{"ProcessEntry32.ParentProcessID", Field, 4},
    -		{"ProcessEntry32.PriClassBase", Field, 4},
    -		{"ProcessEntry32.ProcessID", Field, 4},
    -		{"ProcessEntry32.Size", Field, 4},
    -		{"ProcessEntry32.Threads", Field, 4},
    -		{"ProcessEntry32.Usage", Field, 4},
    -		{"ProcessInformation", Type, 0},
    -		{"ProcessInformation.Process", Field, 0},
    -		{"ProcessInformation.ProcessId", Field, 0},
    -		{"ProcessInformation.Thread", Field, 0},
    -		{"ProcessInformation.ThreadId", Field, 0},
    -		{"Protoent", Type, 0},
    -		{"Protoent.Aliases", Field, 0},
    -		{"Protoent.Name", Field, 0},
    -		{"Protoent.Proto", Field, 0},
    -		{"PtraceAttach", Func, 0},
    -		{"PtraceCont", Func, 0},
    -		{"PtraceDetach", Func, 0},
    -		{"PtraceGetEventMsg", Func, 0},
    -		{"PtraceGetRegs", Func, 0},
    -		{"PtracePeekData", Func, 0},
    -		{"PtracePeekText", Func, 0},
    -		{"PtracePokeData", Func, 0},
    -		{"PtracePokeText", Func, 0},
    -		{"PtraceRegs", Type, 0},
    -		{"PtraceRegs.Cs", Field, 0},
    -		{"PtraceRegs.Ds", Field, 0},
    -		{"PtraceRegs.Eax", Field, 0},
    -		{"PtraceRegs.Ebp", Field, 0},
    -		{"PtraceRegs.Ebx", Field, 0},
    -		{"PtraceRegs.Ecx", Field, 0},
    -		{"PtraceRegs.Edi", Field, 0},
    -		{"PtraceRegs.Edx", Field, 0},
    -		{"PtraceRegs.Eflags", Field, 0},
    -		{"PtraceRegs.Eip", Field, 0},
    -		{"PtraceRegs.Es", Field, 0},
    -		{"PtraceRegs.Esi", Field, 0},
    -		{"PtraceRegs.Esp", Field, 0},
    -		{"PtraceRegs.Fs", Field, 0},
    -		{"PtraceRegs.Fs_base", Field, 0},
    -		{"PtraceRegs.Gs", Field, 0},
    -		{"PtraceRegs.Gs_base", Field, 0},
    -		{"PtraceRegs.Orig_eax", Field, 0},
    -		{"PtraceRegs.Orig_rax", Field, 0},
    -		{"PtraceRegs.R10", Field, 0},
    -		{"PtraceRegs.R11", Field, 0},
    -		{"PtraceRegs.R12", Field, 0},
    -		{"PtraceRegs.R13", Field, 0},
    -		{"PtraceRegs.R14", Field, 0},
    -		{"PtraceRegs.R15", Field, 0},
    -		{"PtraceRegs.R8", Field, 0},
    -		{"PtraceRegs.R9", Field, 0},
    -		{"PtraceRegs.Rax", Field, 0},
    -		{"PtraceRegs.Rbp", Field, 0},
    -		{"PtraceRegs.Rbx", Field, 0},
    -		{"PtraceRegs.Rcx", Field, 0},
    -		{"PtraceRegs.Rdi", Field, 0},
    -		{"PtraceRegs.Rdx", Field, 0},
    -		{"PtraceRegs.Rip", Field, 0},
    -		{"PtraceRegs.Rsi", Field, 0},
    -		{"PtraceRegs.Rsp", Field, 0},
    -		{"PtraceRegs.Ss", Field, 0},
    -		{"PtraceRegs.Uregs", Field, 0},
    -		{"PtraceRegs.Xcs", Field, 0},
    -		{"PtraceRegs.Xds", Field, 0},
    -		{"PtraceRegs.Xes", Field, 0},
    -		{"PtraceRegs.Xfs", Field, 0},
    -		{"PtraceRegs.Xgs", Field, 0},
    -		{"PtraceRegs.Xss", Field, 0},
    -		{"PtraceSetOptions", Func, 0},
    -		{"PtraceSetRegs", Func, 0},
    -		{"PtraceSingleStep", Func, 0},
    -		{"PtraceSyscall", Func, 1},
    -		{"Pwrite", Func, 0},
    -		{"REG_BINARY", Const, 0},
    -		{"REG_DWORD", Const, 0},
    -		{"REG_DWORD_BIG_ENDIAN", Const, 0},
    -		{"REG_DWORD_LITTLE_ENDIAN", Const, 0},
    -		{"REG_EXPAND_SZ", Const, 0},
    -		{"REG_FULL_RESOURCE_DESCRIPTOR", Const, 0},
    -		{"REG_LINK", Const, 0},
    -		{"REG_MULTI_SZ", Const, 0},
    -		{"REG_NONE", Const, 0},
    -		{"REG_QWORD", Const, 0},
    -		{"REG_QWORD_LITTLE_ENDIAN", Const, 0},
    -		{"REG_RESOURCE_LIST", Const, 0},
    -		{"REG_RESOURCE_REQUIREMENTS_LIST", Const, 0},
    -		{"REG_SZ", Const, 0},
    -		{"RLIMIT_AS", Const, 0},
    -		{"RLIMIT_CORE", Const, 0},
    -		{"RLIMIT_CPU", Const, 0},
    -		{"RLIMIT_CPU_USAGE_MONITOR", Const, 16},
    -		{"RLIMIT_DATA", Const, 0},
    -		{"RLIMIT_FSIZE", Const, 0},
    -		{"RLIMIT_NOFILE", Const, 0},
    -		{"RLIMIT_STACK", Const, 0},
    -		{"RLIM_INFINITY", Const, 0},
    -		{"RTAX_ADVMSS", Const, 0},
    -		{"RTAX_AUTHOR", Const, 0},
    -		{"RTAX_BRD", Const, 0},
    -		{"RTAX_CWND", Const, 0},
    -		{"RTAX_DST", Const, 0},
    -		{"RTAX_FEATURES", Const, 0},
    -		{"RTAX_FEATURE_ALLFRAG", Const, 0},
    -		{"RTAX_FEATURE_ECN", Const, 0},
    -		{"RTAX_FEATURE_SACK", Const, 0},
    -		{"RTAX_FEATURE_TIMESTAMP", Const, 0},
    -		{"RTAX_GATEWAY", Const, 0},
    -		{"RTAX_GENMASK", Const, 0},
    -		{"RTAX_HOPLIMIT", Const, 0},
    -		{"RTAX_IFA", Const, 0},
    -		{"RTAX_IFP", Const, 0},
    -		{"RTAX_INITCWND", Const, 0},
    -		{"RTAX_INITRWND", Const, 0},
    -		{"RTAX_LABEL", Const, 1},
    -		{"RTAX_LOCK", Const, 0},
    -		{"RTAX_MAX", Const, 0},
    -		{"RTAX_MTU", Const, 0},
    -		{"RTAX_NETMASK", Const, 0},
    -		{"RTAX_REORDERING", Const, 0},
    -		{"RTAX_RTO_MIN", Const, 0},
    -		{"RTAX_RTT", Const, 0},
    -		{"RTAX_RTTVAR", Const, 0},
    -		{"RTAX_SRC", Const, 1},
    -		{"RTAX_SRCMASK", Const, 1},
    -		{"RTAX_SSTHRESH", Const, 0},
    -		{"RTAX_TAG", Const, 1},
    -		{"RTAX_UNSPEC", Const, 0},
    -		{"RTAX_WINDOW", Const, 0},
    -		{"RTA_ALIGNTO", Const, 0},
    -		{"RTA_AUTHOR", Const, 0},
    -		{"RTA_BRD", Const, 0},
    -		{"RTA_CACHEINFO", Const, 0},
    -		{"RTA_DST", Const, 0},
    -		{"RTA_FLOW", Const, 0},
    -		{"RTA_GATEWAY", Const, 0},
    -		{"RTA_GENMASK", Const, 0},
    -		{"RTA_IFA", Const, 0},
    -		{"RTA_IFP", Const, 0},
    -		{"RTA_IIF", Const, 0},
    -		{"RTA_LABEL", Const, 1},
    -		{"RTA_MAX", Const, 0},
    -		{"RTA_METRICS", Const, 0},
    -		{"RTA_MULTIPATH", Const, 0},
    -		{"RTA_NETMASK", Const, 0},
    -		{"RTA_OIF", Const, 0},
    -		{"RTA_PREFSRC", Const, 0},
    -		{"RTA_PRIORITY", Const, 0},
    -		{"RTA_SRC", Const, 0},
    -		{"RTA_SRCMASK", Const, 1},
    -		{"RTA_TABLE", Const, 0},
    -		{"RTA_TAG", Const, 1},
    -		{"RTA_UNSPEC", Const, 0},
    -		{"RTCF_DIRECTSRC", Const, 0},
    -		{"RTCF_DOREDIRECT", Const, 0},
    -		{"RTCF_LOG", Const, 0},
    -		{"RTCF_MASQ", Const, 0},
    -		{"RTCF_NAT", Const, 0},
    -		{"RTCF_VALVE", Const, 0},
    -		{"RTF_ADDRCLASSMASK", Const, 0},
    -		{"RTF_ADDRCONF", Const, 0},
    -		{"RTF_ALLONLINK", Const, 0},
    -		{"RTF_ANNOUNCE", Const, 1},
    -		{"RTF_BLACKHOLE", Const, 0},
    -		{"RTF_BROADCAST", Const, 0},
    -		{"RTF_CACHE", Const, 0},
    -		{"RTF_CLONED", Const, 1},
    -		{"RTF_CLONING", Const, 0},
    -		{"RTF_CONDEMNED", Const, 0},
    -		{"RTF_DEFAULT", Const, 0},
    -		{"RTF_DELCLONE", Const, 0},
    -		{"RTF_DONE", Const, 0},
    -		{"RTF_DYNAMIC", Const, 0},
    -		{"RTF_FLOW", Const, 0},
    -		{"RTF_FMASK", Const, 0},
    -		{"RTF_GATEWAY", Const, 0},
    -		{"RTF_GWFLAG_COMPAT", Const, 3},
    -		{"RTF_HOST", Const, 0},
    -		{"RTF_IFREF", Const, 0},
    -		{"RTF_IFSCOPE", Const, 0},
    -		{"RTF_INTERFACE", Const, 0},
    -		{"RTF_IRTT", Const, 0},
    -		{"RTF_LINKRT", Const, 0},
    -		{"RTF_LLDATA", Const, 0},
    -		{"RTF_LLINFO", Const, 0},
    -		{"RTF_LOCAL", Const, 0},
    -		{"RTF_MASK", Const, 1},
    -		{"RTF_MODIFIED", Const, 0},
    -		{"RTF_MPATH", Const, 1},
    -		{"RTF_MPLS", Const, 1},
    -		{"RTF_MSS", Const, 0},
    -		{"RTF_MTU", Const, 0},
    -		{"RTF_MULTICAST", Const, 0},
    -		{"RTF_NAT", Const, 0},
    -		{"RTF_NOFORWARD", Const, 0},
    -		{"RTF_NONEXTHOP", Const, 0},
    -		{"RTF_NOPMTUDISC", Const, 0},
    -		{"RTF_PERMANENT_ARP", Const, 1},
    -		{"RTF_PINNED", Const, 0},
    -		{"RTF_POLICY", Const, 0},
    -		{"RTF_PRCLONING", Const, 0},
    -		{"RTF_PROTO1", Const, 0},
    -		{"RTF_PROTO2", Const, 0},
    -		{"RTF_PROTO3", Const, 0},
    -		{"RTF_PROXY", Const, 16},
    -		{"RTF_REINSTATE", Const, 0},
    -		{"RTF_REJECT", Const, 0},
    -		{"RTF_RNH_LOCKED", Const, 0},
    -		{"RTF_ROUTER", Const, 16},
    -		{"RTF_SOURCE", Const, 1},
    -		{"RTF_SRC", Const, 1},
    -		{"RTF_STATIC", Const, 0},
    -		{"RTF_STICKY", Const, 0},
    -		{"RTF_THROW", Const, 0},
    -		{"RTF_TUNNEL", Const, 1},
    -		{"RTF_UP", Const, 0},
    -		{"RTF_USETRAILERS", Const, 1},
    -		{"RTF_WASCLONED", Const, 0},
    -		{"RTF_WINDOW", Const, 0},
    -		{"RTF_XRESOLVE", Const, 0},
    -		{"RTM_ADD", Const, 0},
    -		{"RTM_BASE", Const, 0},
    -		{"RTM_CHANGE", Const, 0},
    -		{"RTM_CHGADDR", Const, 1},
    -		{"RTM_DELACTION", Const, 0},
    -		{"RTM_DELADDR", Const, 0},
    -		{"RTM_DELADDRLABEL", Const, 0},
    -		{"RTM_DELETE", Const, 0},
    -		{"RTM_DELLINK", Const, 0},
    -		{"RTM_DELMADDR", Const, 0},
    -		{"RTM_DELNEIGH", Const, 0},
    -		{"RTM_DELQDISC", Const, 0},
    -		{"RTM_DELROUTE", Const, 0},
    -		{"RTM_DELRULE", Const, 0},
    -		{"RTM_DELTCLASS", Const, 0},
    -		{"RTM_DELTFILTER", Const, 0},
    -		{"RTM_DESYNC", Const, 1},
    -		{"RTM_F_CLONED", Const, 0},
    -		{"RTM_F_EQUALIZE", Const, 0},
    -		{"RTM_F_NOTIFY", Const, 0},
    -		{"RTM_F_PREFIX", Const, 0},
    -		{"RTM_GET", Const, 0},
    -		{"RTM_GET2", Const, 0},
    -		{"RTM_GETACTION", Const, 0},
    -		{"RTM_GETADDR", Const, 0},
    -		{"RTM_GETADDRLABEL", Const, 0},
    -		{"RTM_GETANYCAST", Const, 0},
    -		{"RTM_GETDCB", Const, 0},
    -		{"RTM_GETLINK", Const, 0},
    -		{"RTM_GETMULTICAST", Const, 0},
    -		{"RTM_GETNEIGH", Const, 0},
    -		{"RTM_GETNEIGHTBL", Const, 0},
    -		{"RTM_GETQDISC", Const, 0},
    -		{"RTM_GETROUTE", Const, 0},
    -		{"RTM_GETRULE", Const, 0},
    -		{"RTM_GETTCLASS", Const, 0},
    -		{"RTM_GETTFILTER", Const, 0},
    -		{"RTM_IEEE80211", Const, 0},
    -		{"RTM_IFANNOUNCE", Const, 0},
    -		{"RTM_IFINFO", Const, 0},
    -		{"RTM_IFINFO2", Const, 0},
    -		{"RTM_LLINFO_UPD", Const, 1},
    -		{"RTM_LOCK", Const, 0},
    -		{"RTM_LOSING", Const, 0},
    -		{"RTM_MAX", Const, 0},
    -		{"RTM_MAXSIZE", Const, 1},
    -		{"RTM_MISS", Const, 0},
    -		{"RTM_NEWACTION", Const, 0},
    -		{"RTM_NEWADDR", Const, 0},
    -		{"RTM_NEWADDRLABEL", Const, 0},
    -		{"RTM_NEWLINK", Const, 0},
    -		{"RTM_NEWMADDR", Const, 0},
    -		{"RTM_NEWMADDR2", Const, 0},
    -		{"RTM_NEWNDUSEROPT", Const, 0},
    -		{"RTM_NEWNEIGH", Const, 0},
    -		{"RTM_NEWNEIGHTBL", Const, 0},
    -		{"RTM_NEWPREFIX", Const, 0},
    -		{"RTM_NEWQDISC", Const, 0},
    -		{"RTM_NEWROUTE", Const, 0},
    -		{"RTM_NEWRULE", Const, 0},
    -		{"RTM_NEWTCLASS", Const, 0},
    -		{"RTM_NEWTFILTER", Const, 0},
    -		{"RTM_NR_FAMILIES", Const, 0},
    -		{"RTM_NR_MSGTYPES", Const, 0},
    -		{"RTM_OIFINFO", Const, 1},
    -		{"RTM_OLDADD", Const, 0},
    -		{"RTM_OLDDEL", Const, 0},
    -		{"RTM_OOIFINFO", Const, 1},
    -		{"RTM_REDIRECT", Const, 0},
    -		{"RTM_RESOLVE", Const, 0},
    -		{"RTM_RTTUNIT", Const, 0},
    -		{"RTM_SETDCB", Const, 0},
    -		{"RTM_SETGATE", Const, 1},
    -		{"RTM_SETLINK", Const, 0},
    -		{"RTM_SETNEIGHTBL", Const, 0},
    -		{"RTM_VERSION", Const, 0},
    -		{"RTNH_ALIGNTO", Const, 0},
    -		{"RTNH_F_DEAD", Const, 0},
    -		{"RTNH_F_ONLINK", Const, 0},
    -		{"RTNH_F_PERVASIVE", Const, 0},
    -		{"RTNLGRP_IPV4_IFADDR", Const, 1},
    -		{"RTNLGRP_IPV4_MROUTE", Const, 1},
    -		{"RTNLGRP_IPV4_ROUTE", Const, 1},
    -		{"RTNLGRP_IPV4_RULE", Const, 1},
    -		{"RTNLGRP_IPV6_IFADDR", Const, 1},
    -		{"RTNLGRP_IPV6_IFINFO", Const, 1},
    -		{"RTNLGRP_IPV6_MROUTE", Const, 1},
    -		{"RTNLGRP_IPV6_PREFIX", Const, 1},
    -		{"RTNLGRP_IPV6_ROUTE", Const, 1},
    -		{"RTNLGRP_IPV6_RULE", Const, 1},
    -		{"RTNLGRP_LINK", Const, 1},
    -		{"RTNLGRP_ND_USEROPT", Const, 1},
    -		{"RTNLGRP_NEIGH", Const, 1},
    -		{"RTNLGRP_NONE", Const, 1},
    -		{"RTNLGRP_NOTIFY", Const, 1},
    -		{"RTNLGRP_TC", Const, 1},
    -		{"RTN_ANYCAST", Const, 0},
    -		{"RTN_BLACKHOLE", Const, 0},
    -		{"RTN_BROADCAST", Const, 0},
    -		{"RTN_LOCAL", Const, 0},
    -		{"RTN_MAX", Const, 0},
    -		{"RTN_MULTICAST", Const, 0},
    -		{"RTN_NAT", Const, 0},
    -		{"RTN_PROHIBIT", Const, 0},
    -		{"RTN_THROW", Const, 0},
    -		{"RTN_UNICAST", Const, 0},
    -		{"RTN_UNREACHABLE", Const, 0},
    -		{"RTN_UNSPEC", Const, 0},
    -		{"RTN_XRESOLVE", Const, 0},
    -		{"RTPROT_BIRD", Const, 0},
    -		{"RTPROT_BOOT", Const, 0},
    -		{"RTPROT_DHCP", Const, 0},
    -		{"RTPROT_DNROUTED", Const, 0},
    -		{"RTPROT_GATED", Const, 0},
    -		{"RTPROT_KERNEL", Const, 0},
    -		{"RTPROT_MRT", Const, 0},
    -		{"RTPROT_NTK", Const, 0},
    -		{"RTPROT_RA", Const, 0},
    -		{"RTPROT_REDIRECT", Const, 0},
    -		{"RTPROT_STATIC", Const, 0},
    -		{"RTPROT_UNSPEC", Const, 0},
    -		{"RTPROT_XORP", Const, 0},
    -		{"RTPROT_ZEBRA", Const, 0},
    -		{"RTV_EXPIRE", Const, 0},
    -		{"RTV_HOPCOUNT", Const, 0},
    -		{"RTV_MTU", Const, 0},
    -		{"RTV_RPIPE", Const, 0},
    -		{"RTV_RTT", Const, 0},
    -		{"RTV_RTTVAR", Const, 0},
    -		{"RTV_SPIPE", Const, 0},
    -		{"RTV_SSTHRESH", Const, 0},
    -		{"RTV_WEIGHT", Const, 0},
    -		{"RT_CACHING_CONTEXT", Const, 1},
    -		{"RT_CLASS_DEFAULT", Const, 0},
    -		{"RT_CLASS_LOCAL", Const, 0},
    -		{"RT_CLASS_MAIN", Const, 0},
    -		{"RT_CLASS_MAX", Const, 0},
    -		{"RT_CLASS_UNSPEC", Const, 0},
    -		{"RT_DEFAULT_FIB", Const, 1},
    -		{"RT_NORTREF", Const, 1},
    -		{"RT_SCOPE_HOST", Const, 0},
    -		{"RT_SCOPE_LINK", Const, 0},
    -		{"RT_SCOPE_NOWHERE", Const, 0},
    -		{"RT_SCOPE_SITE", Const, 0},
    -		{"RT_SCOPE_UNIVERSE", Const, 0},
    -		{"RT_TABLEID_MAX", Const, 1},
    -		{"RT_TABLE_COMPAT", Const, 0},
    -		{"RT_TABLE_DEFAULT", Const, 0},
    -		{"RT_TABLE_LOCAL", Const, 0},
    -		{"RT_TABLE_MAIN", Const, 0},
    -		{"RT_TABLE_MAX", Const, 0},
    -		{"RT_TABLE_UNSPEC", Const, 0},
    -		{"RUSAGE_CHILDREN", Const, 0},
    -		{"RUSAGE_SELF", Const, 0},
    -		{"RUSAGE_THREAD", Const, 0},
    -		{"Radvisory_t", Type, 0},
    -		{"Radvisory_t.Count", Field, 0},
    -		{"Radvisory_t.Offset", Field, 0},
    -		{"Radvisory_t.Pad_cgo_0", Field, 0},
    -		{"RawConn", Type, 9},
    -		{"RawSockaddr", Type, 0},
    -		{"RawSockaddr.Data", Field, 0},
    -		{"RawSockaddr.Family", Field, 0},
    -		{"RawSockaddr.Len", Field, 0},
    -		{"RawSockaddrAny", Type, 0},
    -		{"RawSockaddrAny.Addr", Field, 0},
    -		{"RawSockaddrAny.Pad", Field, 0},
    -		{"RawSockaddrDatalink", Type, 0},
    -		{"RawSockaddrDatalink.Alen", Field, 0},
    -		{"RawSockaddrDatalink.Data", Field, 0},
    -		{"RawSockaddrDatalink.Family", Field, 0},
    -		{"RawSockaddrDatalink.Index", Field, 0},
    -		{"RawSockaddrDatalink.Len", Field, 0},
    -		{"RawSockaddrDatalink.Nlen", Field, 0},
    -		{"RawSockaddrDatalink.Pad_cgo_0", Field, 2},
    -		{"RawSockaddrDatalink.Slen", Field, 0},
    -		{"RawSockaddrDatalink.Type", Field, 0},
    -		{"RawSockaddrInet4", Type, 0},
    -		{"RawSockaddrInet4.Addr", Field, 0},
    -		{"RawSockaddrInet4.Family", Field, 0},
    -		{"RawSockaddrInet4.Len", Field, 0},
    -		{"RawSockaddrInet4.Port", Field, 0},
    -		{"RawSockaddrInet4.Zero", Field, 0},
    -		{"RawSockaddrInet6", Type, 0},
    -		{"RawSockaddrInet6.Addr", Field, 0},
    -		{"RawSockaddrInet6.Family", Field, 0},
    -		{"RawSockaddrInet6.Flowinfo", Field, 0},
    -		{"RawSockaddrInet6.Len", Field, 0},
    -		{"RawSockaddrInet6.Port", Field, 0},
    -		{"RawSockaddrInet6.Scope_id", Field, 0},
    -		{"RawSockaddrLinklayer", Type, 0},
    -		{"RawSockaddrLinklayer.Addr", Field, 0},
    -		{"RawSockaddrLinklayer.Family", Field, 0},
    -		{"RawSockaddrLinklayer.Halen", Field, 0},
    -		{"RawSockaddrLinklayer.Hatype", Field, 0},
    -		{"RawSockaddrLinklayer.Ifindex", Field, 0},
    -		{"RawSockaddrLinklayer.Pkttype", Field, 0},
    -		{"RawSockaddrLinklayer.Protocol", Field, 0},
    -		{"RawSockaddrNetlink", Type, 0},
    -		{"RawSockaddrNetlink.Family", Field, 0},
    -		{"RawSockaddrNetlink.Groups", Field, 0},
    -		{"RawSockaddrNetlink.Pad", Field, 0},
    -		{"RawSockaddrNetlink.Pid", Field, 0},
    -		{"RawSockaddrUnix", Type, 0},
    -		{"RawSockaddrUnix.Family", Field, 0},
    -		{"RawSockaddrUnix.Len", Field, 0},
    -		{"RawSockaddrUnix.Pad_cgo_0", Field, 2},
    -		{"RawSockaddrUnix.Path", Field, 0},
    -		{"RawSyscall", Func, 0},
    -		{"RawSyscall6", Func, 0},
    -		{"Read", Func, 0},
    -		{"ReadConsole", Func, 1},
    -		{"ReadDirectoryChanges", Func, 0},
    -		{"ReadDirent", Func, 0},
    -		{"ReadFile", Func, 0},
    -		{"Readlink", Func, 0},
    -		{"Reboot", Func, 0},
    -		{"Recvfrom", Func, 0},
    -		{"Recvmsg", Func, 0},
    -		{"RegCloseKey", Func, 0},
    -		{"RegEnumKeyEx", Func, 0},
    -		{"RegOpenKeyEx", Func, 0},
    -		{"RegQueryInfoKey", Func, 0},
    -		{"RegQueryValueEx", Func, 0},
    -		{"RemoveDirectory", Func, 0},
    -		{"Removexattr", Func, 1},
    -		{"Rename", Func, 0},
    -		{"Renameat", Func, 0},
    -		{"Revoke", Func, 0},
    -		{"Rlimit", Type, 0},
    -		{"Rlimit.Cur", Field, 0},
    -		{"Rlimit.Max", Field, 0},
    -		{"Rmdir", Func, 0},
    -		{"RouteMessage", Type, 0},
    -		{"RouteMessage.Data", Field, 0},
    -		{"RouteMessage.Header", Field, 0},
    -		{"RouteRIB", Func, 0},
    -		{"RoutingMessage", Type, 0},
    -		{"RtAttr", Type, 0},
    -		{"RtAttr.Len", Field, 0},
    -		{"RtAttr.Type", Field, 0},
    -		{"RtGenmsg", Type, 0},
    -		{"RtGenmsg.Family", Field, 0},
    -		{"RtMetrics", Type, 0},
    -		{"RtMetrics.Expire", Field, 0},
    -		{"RtMetrics.Filler", Field, 0},
    -		{"RtMetrics.Hopcount", Field, 0},
    -		{"RtMetrics.Locks", Field, 0},
    -		{"RtMetrics.Mtu", Field, 0},
    -		{"RtMetrics.Pad", Field, 3},
    -		{"RtMetrics.Pksent", Field, 0},
    -		{"RtMetrics.Recvpipe", Field, 0},
    -		{"RtMetrics.Refcnt", Field, 2},
    -		{"RtMetrics.Rtt", Field, 0},
    -		{"RtMetrics.Rttvar", Field, 0},
    -		{"RtMetrics.Sendpipe", Field, 0},
    -		{"RtMetrics.Ssthresh", Field, 0},
    -		{"RtMetrics.Weight", Field, 0},
    -		{"RtMsg", Type, 0},
    -		{"RtMsg.Dst_len", Field, 0},
    -		{"RtMsg.Family", Field, 0},
    -		{"RtMsg.Flags", Field, 0},
    -		{"RtMsg.Protocol", Field, 0},
    -		{"RtMsg.Scope", Field, 0},
    -		{"RtMsg.Src_len", Field, 0},
    -		{"RtMsg.Table", Field, 0},
    -		{"RtMsg.Tos", Field, 0},
    -		{"RtMsg.Type", Field, 0},
    -		{"RtMsghdr", Type, 0},
    -		{"RtMsghdr.Addrs", Field, 0},
    -		{"RtMsghdr.Errno", Field, 0},
    -		{"RtMsghdr.Flags", Field, 0},
    -		{"RtMsghdr.Fmask", Field, 0},
    -		{"RtMsghdr.Hdrlen", Field, 2},
    -		{"RtMsghdr.Index", Field, 0},
    -		{"RtMsghdr.Inits", Field, 0},
    -		{"RtMsghdr.Mpls", Field, 2},
    -		{"RtMsghdr.Msglen", Field, 0},
    -		{"RtMsghdr.Pad_cgo_0", Field, 0},
    -		{"RtMsghdr.Pad_cgo_1", Field, 2},
    -		{"RtMsghdr.Pid", Field, 0},
    -		{"RtMsghdr.Priority", Field, 2},
    -		{"RtMsghdr.Rmx", Field, 0},
    -		{"RtMsghdr.Seq", Field, 0},
    -		{"RtMsghdr.Tableid", Field, 2},
    -		{"RtMsghdr.Type", Field, 0},
    -		{"RtMsghdr.Use", Field, 0},
    -		{"RtMsghdr.Version", Field, 0},
    -		{"RtNexthop", Type, 0},
    -		{"RtNexthop.Flags", Field, 0},
    -		{"RtNexthop.Hops", Field, 0},
    -		{"RtNexthop.Ifindex", Field, 0},
    -		{"RtNexthop.Len", Field, 0},
    -		{"Rusage", Type, 0},
    -		{"Rusage.CreationTime", Field, 0},
    -		{"Rusage.ExitTime", Field, 0},
    -		{"Rusage.Idrss", Field, 0},
    -		{"Rusage.Inblock", Field, 0},
    -		{"Rusage.Isrss", Field, 0},
    -		{"Rusage.Ixrss", Field, 0},
    -		{"Rusage.KernelTime", Field, 0},
    -		{"Rusage.Majflt", Field, 0},
    -		{"Rusage.Maxrss", Field, 0},
    -		{"Rusage.Minflt", Field, 0},
    -		{"Rusage.Msgrcv", Field, 0},
    -		{"Rusage.Msgsnd", Field, 0},
    -		{"Rusage.Nivcsw", Field, 0},
    -		{"Rusage.Nsignals", Field, 0},
    -		{"Rusage.Nswap", Field, 0},
    -		{"Rusage.Nvcsw", Field, 0},
    -		{"Rusage.Oublock", Field, 0},
    -		{"Rusage.Stime", Field, 0},
    -		{"Rusage.UserTime", Field, 0},
    -		{"Rusage.Utime", Field, 0},
    -		{"SCM_BINTIME", Const, 0},
    -		{"SCM_CREDENTIALS", Const, 0},
    -		{"SCM_CREDS", Const, 0},
    -		{"SCM_RIGHTS", Const, 0},
    -		{"SCM_TIMESTAMP", Const, 0},
    -		{"SCM_TIMESTAMPING", Const, 0},
    -		{"SCM_TIMESTAMPNS", Const, 0},
    -		{"SCM_TIMESTAMP_MONOTONIC", Const, 0},
    -		{"SHUT_RD", Const, 0},
    -		{"SHUT_RDWR", Const, 0},
    -		{"SHUT_WR", Const, 0},
    -		{"SID", Type, 0},
    -		{"SIDAndAttributes", Type, 0},
    -		{"SIDAndAttributes.Attributes", Field, 0},
    -		{"SIDAndAttributes.Sid", Field, 0},
    -		{"SIGABRT", Const, 0},
    -		{"SIGALRM", Const, 0},
    -		{"SIGBUS", Const, 0},
    -		{"SIGCHLD", Const, 0},
    -		{"SIGCLD", Const, 0},
    -		{"SIGCONT", Const, 0},
    -		{"SIGEMT", Const, 0},
    -		{"SIGFPE", Const, 0},
    -		{"SIGHUP", Const, 0},
    -		{"SIGILL", Const, 0},
    -		{"SIGINFO", Const, 0},
    -		{"SIGINT", Const, 0},
    -		{"SIGIO", Const, 0},
    -		{"SIGIOT", Const, 0},
    -		{"SIGKILL", Const, 0},
    -		{"SIGLIBRT", Const, 1},
    -		{"SIGLWP", Const, 0},
    -		{"SIGPIPE", Const, 0},
    -		{"SIGPOLL", Const, 0},
    -		{"SIGPROF", Const, 0},
    -		{"SIGPWR", Const, 0},
    -		{"SIGQUIT", Const, 0},
    -		{"SIGSEGV", Const, 0},
    -		{"SIGSTKFLT", Const, 0},
    -		{"SIGSTOP", Const, 0},
    -		{"SIGSYS", Const, 0},
    -		{"SIGTERM", Const, 0},
    -		{"SIGTHR", Const, 0},
    -		{"SIGTRAP", Const, 0},
    -		{"SIGTSTP", Const, 0},
    -		{"SIGTTIN", Const, 0},
    -		{"SIGTTOU", Const, 0},
    -		{"SIGUNUSED", Const, 0},
    -		{"SIGURG", Const, 0},
    -		{"SIGUSR1", Const, 0},
    -		{"SIGUSR2", Const, 0},
    -		{"SIGVTALRM", Const, 0},
    -		{"SIGWINCH", Const, 0},
    -		{"SIGXCPU", Const, 0},
    -		{"SIGXFSZ", Const, 0},
    -		{"SIOCADDDLCI", Const, 0},
    -		{"SIOCADDMULTI", Const, 0},
    -		{"SIOCADDRT", Const, 0},
    -		{"SIOCAIFADDR", Const, 0},
    -		{"SIOCAIFGROUP", Const, 0},
    -		{"SIOCALIFADDR", Const, 0},
    -		{"SIOCARPIPLL", Const, 0},
    -		{"SIOCATMARK", Const, 0},
    -		{"SIOCAUTOADDR", Const, 0},
    -		{"SIOCAUTONETMASK", Const, 0},
    -		{"SIOCBRDGADD", Const, 1},
    -		{"SIOCBRDGADDS", Const, 1},
    -		{"SIOCBRDGARL", Const, 1},
    -		{"SIOCBRDGDADDR", Const, 1},
    -		{"SIOCBRDGDEL", Const, 1},
    -		{"SIOCBRDGDELS", Const, 1},
    -		{"SIOCBRDGFLUSH", Const, 1},
    -		{"SIOCBRDGFRL", Const, 1},
    -		{"SIOCBRDGGCACHE", Const, 1},
    -		{"SIOCBRDGGFD", Const, 1},
    -		{"SIOCBRDGGHT", Const, 1},
    -		{"SIOCBRDGGIFFLGS", Const, 1},
    -		{"SIOCBRDGGMA", Const, 1},
    -		{"SIOCBRDGGPARAM", Const, 1},
    -		{"SIOCBRDGGPRI", Const, 1},
    -		{"SIOCBRDGGRL", Const, 1},
    -		{"SIOCBRDGGSIFS", Const, 1},
    -		{"SIOCBRDGGTO", Const, 1},
    -		{"SIOCBRDGIFS", Const, 1},
    -		{"SIOCBRDGRTS", Const, 1},
    -		{"SIOCBRDGSADDR", Const, 1},
    -		{"SIOCBRDGSCACHE", Const, 1},
    -		{"SIOCBRDGSFD", Const, 1},
    -		{"SIOCBRDGSHT", Const, 1},
    -		{"SIOCBRDGSIFCOST", Const, 1},
    -		{"SIOCBRDGSIFFLGS", Const, 1},
    -		{"SIOCBRDGSIFPRIO", Const, 1},
    -		{"SIOCBRDGSMA", Const, 1},
    -		{"SIOCBRDGSPRI", Const, 1},
    -		{"SIOCBRDGSPROTO", Const, 1},
    -		{"SIOCBRDGSTO", Const, 1},
    -		{"SIOCBRDGSTXHC", Const, 1},
    -		{"SIOCDARP", Const, 0},
    -		{"SIOCDELDLCI", Const, 0},
    -		{"SIOCDELMULTI", Const, 0},
    -		{"SIOCDELRT", Const, 0},
    -		{"SIOCDEVPRIVATE", Const, 0},
    -		{"SIOCDIFADDR", Const, 0},
    -		{"SIOCDIFGROUP", Const, 0},
    -		{"SIOCDIFPHYADDR", Const, 0},
    -		{"SIOCDLIFADDR", Const, 0},
    -		{"SIOCDRARP", Const, 0},
    -		{"SIOCGARP", Const, 0},
    -		{"SIOCGDRVSPEC", Const, 0},
    -		{"SIOCGETKALIVE", Const, 1},
    -		{"SIOCGETLABEL", Const, 1},
    -		{"SIOCGETPFLOW", Const, 1},
    -		{"SIOCGETPFSYNC", Const, 1},
    -		{"SIOCGETSGCNT", Const, 0},
    -		{"SIOCGETVIFCNT", Const, 0},
    -		{"SIOCGETVLAN", Const, 0},
    -		{"SIOCGHIWAT", Const, 0},
    -		{"SIOCGIFADDR", Const, 0},
    -		{"SIOCGIFADDRPREF", Const, 1},
    -		{"SIOCGIFALIAS", Const, 1},
    -		{"SIOCGIFALTMTU", Const, 0},
    -		{"SIOCGIFASYNCMAP", Const, 0},
    -		{"SIOCGIFBOND", Const, 0},
    -		{"SIOCGIFBR", Const, 0},
    -		{"SIOCGIFBRDADDR", Const, 0},
    -		{"SIOCGIFCAP", Const, 0},
    -		{"SIOCGIFCONF", Const, 0},
    -		{"SIOCGIFCOUNT", Const, 0},
    -		{"SIOCGIFDATA", Const, 1},
    -		{"SIOCGIFDESCR", Const, 0},
    -		{"SIOCGIFDEVMTU", Const, 0},
    -		{"SIOCGIFDLT", Const, 1},
    -		{"SIOCGIFDSTADDR", Const, 0},
    -		{"SIOCGIFENCAP", Const, 0},
    -		{"SIOCGIFFIB", Const, 1},
    -		{"SIOCGIFFLAGS", Const, 0},
    -		{"SIOCGIFGATTR", Const, 1},
    -		{"SIOCGIFGENERIC", Const, 0},
    -		{"SIOCGIFGMEMB", Const, 0},
    -		{"SIOCGIFGROUP", Const, 0},
    -		{"SIOCGIFHARDMTU", Const, 3},
    -		{"SIOCGIFHWADDR", Const, 0},
    -		{"SIOCGIFINDEX", Const, 0},
    -		{"SIOCGIFKPI", Const, 0},
    -		{"SIOCGIFMAC", Const, 0},
    -		{"SIOCGIFMAP", Const, 0},
    -		{"SIOCGIFMEDIA", Const, 0},
    -		{"SIOCGIFMEM", Const, 0},
    -		{"SIOCGIFMETRIC", Const, 0},
    -		{"SIOCGIFMTU", Const, 0},
    -		{"SIOCGIFNAME", Const, 0},
    -		{"SIOCGIFNETMASK", Const, 0},
    -		{"SIOCGIFPDSTADDR", Const, 0},
    -		{"SIOCGIFPFLAGS", Const, 0},
    -		{"SIOCGIFPHYS", Const, 0},
    -		{"SIOCGIFPRIORITY", Const, 1},
    -		{"SIOCGIFPSRCADDR", Const, 0},
    -		{"SIOCGIFRDOMAIN", Const, 1},
    -		{"SIOCGIFRTLABEL", Const, 1},
    -		{"SIOCGIFSLAVE", Const, 0},
    -		{"SIOCGIFSTATUS", Const, 0},
    -		{"SIOCGIFTIMESLOT", Const, 1},
    -		{"SIOCGIFTXQLEN", Const, 0},
    -		{"SIOCGIFVLAN", Const, 0},
    -		{"SIOCGIFWAKEFLAGS", Const, 0},
    -		{"SIOCGIFXFLAGS", Const, 1},
    -		{"SIOCGLIFADDR", Const, 0},
    -		{"SIOCGLIFPHYADDR", Const, 0},
    -		{"SIOCGLIFPHYRTABLE", Const, 1},
    -		{"SIOCGLIFPHYTTL", Const, 3},
    -		{"SIOCGLINKSTR", Const, 1},
    -		{"SIOCGLOWAT", Const, 0},
    -		{"SIOCGPGRP", Const, 0},
    -		{"SIOCGPRIVATE_0", Const, 0},
    -		{"SIOCGPRIVATE_1", Const, 0},
    -		{"SIOCGRARP", Const, 0},
    -		{"SIOCGSPPPPARAMS", Const, 3},
    -		{"SIOCGSTAMP", Const, 0},
    -		{"SIOCGSTAMPNS", Const, 0},
    -		{"SIOCGVH", Const, 1},
    -		{"SIOCGVNETID", Const, 3},
    -		{"SIOCIFCREATE", Const, 0},
    -		{"SIOCIFCREATE2", Const, 0},
    -		{"SIOCIFDESTROY", Const, 0},
    -		{"SIOCIFGCLONERS", Const, 0},
    -		{"SIOCINITIFADDR", Const, 1},
    -		{"SIOCPROTOPRIVATE", Const, 0},
    -		{"SIOCRSLVMULTI", Const, 0},
    -		{"SIOCRTMSG", Const, 0},
    -		{"SIOCSARP", Const, 0},
    -		{"SIOCSDRVSPEC", Const, 0},
    -		{"SIOCSETKALIVE", Const, 1},
    -		{"SIOCSETLABEL", Const, 1},
    -		{"SIOCSETPFLOW", Const, 1},
    -		{"SIOCSETPFSYNC", Const, 1},
    -		{"SIOCSETVLAN", Const, 0},
    -		{"SIOCSHIWAT", Const, 0},
    -		{"SIOCSIFADDR", Const, 0},
    -		{"SIOCSIFADDRPREF", Const, 1},
    -		{"SIOCSIFALTMTU", Const, 0},
    -		{"SIOCSIFASYNCMAP", Const, 0},
    -		{"SIOCSIFBOND", Const, 0},
    -		{"SIOCSIFBR", Const, 0},
    -		{"SIOCSIFBRDADDR", Const, 0},
    -		{"SIOCSIFCAP", Const, 0},
    -		{"SIOCSIFDESCR", Const, 0},
    -		{"SIOCSIFDSTADDR", Const, 0},
    -		{"SIOCSIFENCAP", Const, 0},
    -		{"SIOCSIFFIB", Const, 1},
    -		{"SIOCSIFFLAGS", Const, 0},
    -		{"SIOCSIFGATTR", Const, 1},
    -		{"SIOCSIFGENERIC", Const, 0},
    -		{"SIOCSIFHWADDR", Const, 0},
    -		{"SIOCSIFHWBROADCAST", Const, 0},
    -		{"SIOCSIFKPI", Const, 0},
    -		{"SIOCSIFLINK", Const, 0},
    -		{"SIOCSIFLLADDR", Const, 0},
    -		{"SIOCSIFMAC", Const, 0},
    -		{"SIOCSIFMAP", Const, 0},
    -		{"SIOCSIFMEDIA", Const, 0},
    -		{"SIOCSIFMEM", Const, 0},
    -		{"SIOCSIFMETRIC", Const, 0},
    -		{"SIOCSIFMTU", Const, 0},
    -		{"SIOCSIFNAME", Const, 0},
    -		{"SIOCSIFNETMASK", Const, 0},
    -		{"SIOCSIFPFLAGS", Const, 0},
    -		{"SIOCSIFPHYADDR", Const, 0},
    -		{"SIOCSIFPHYS", Const, 0},
    -		{"SIOCSIFPRIORITY", Const, 1},
    -		{"SIOCSIFRDOMAIN", Const, 1},
    -		{"SIOCSIFRTLABEL", Const, 1},
    -		{"SIOCSIFRVNET", Const, 0},
    -		{"SIOCSIFSLAVE", Const, 0},
    -		{"SIOCSIFTIMESLOT", Const, 1},
    -		{"SIOCSIFTXQLEN", Const, 0},
    -		{"SIOCSIFVLAN", Const, 0},
    -		{"SIOCSIFVNET", Const, 0},
    -		{"SIOCSIFXFLAGS", Const, 1},
    -		{"SIOCSLIFPHYADDR", Const, 0},
    -		{"SIOCSLIFPHYRTABLE", Const, 1},
    -		{"SIOCSLIFPHYTTL", Const, 3},
    -		{"SIOCSLINKSTR", Const, 1},
    -		{"SIOCSLOWAT", Const, 0},
    -		{"SIOCSPGRP", Const, 0},
    -		{"SIOCSRARP", Const, 0},
    -		{"SIOCSSPPPPARAMS", Const, 3},
    -		{"SIOCSVH", Const, 1},
    -		{"SIOCSVNETID", Const, 3},
    -		{"SIOCZIFDATA", Const, 1},
    -		{"SIO_GET_EXTENSION_FUNCTION_POINTER", Const, 1},
    -		{"SIO_GET_INTERFACE_LIST", Const, 0},
    -		{"SIO_KEEPALIVE_VALS", Const, 3},
    -		{"SIO_UDP_CONNRESET", Const, 4},
    -		{"SOCK_CLOEXEC", Const, 0},
    -		{"SOCK_DCCP", Const, 0},
    -		{"SOCK_DGRAM", Const, 0},
    -		{"SOCK_FLAGS_MASK", Const, 1},
    -		{"SOCK_MAXADDRLEN", Const, 0},
    -		{"SOCK_NONBLOCK", Const, 0},
    -		{"SOCK_NOSIGPIPE", Const, 1},
    -		{"SOCK_PACKET", Const, 0},
    -		{"SOCK_RAW", Const, 0},
    -		{"SOCK_RDM", Const, 0},
    -		{"SOCK_SEQPACKET", Const, 0},
    -		{"SOCK_STREAM", Const, 0},
    -		{"SOL_AAL", Const, 0},
    -		{"SOL_ATM", Const, 0},
    -		{"SOL_DECNET", Const, 0},
    -		{"SOL_ICMPV6", Const, 0},
    -		{"SOL_IP", Const, 0},
    -		{"SOL_IPV6", Const, 0},
    -		{"SOL_IRDA", Const, 0},
    -		{"SOL_PACKET", Const, 0},
    -		{"SOL_RAW", Const, 0},
    -		{"SOL_SOCKET", Const, 0},
    -		{"SOL_TCP", Const, 0},
    -		{"SOL_X25", Const, 0},
    -		{"SOMAXCONN", Const, 0},
    -		{"SO_ACCEPTCONN", Const, 0},
    -		{"SO_ACCEPTFILTER", Const, 0},
    -		{"SO_ATTACH_FILTER", Const, 0},
    -		{"SO_BINDANY", Const, 1},
    -		{"SO_BINDTODEVICE", Const, 0},
    -		{"SO_BINTIME", Const, 0},
    -		{"SO_BROADCAST", Const, 0},
    -		{"SO_BSDCOMPAT", Const, 0},
    -		{"SO_DEBUG", Const, 0},
    -		{"SO_DETACH_FILTER", Const, 0},
    -		{"SO_DOMAIN", Const, 0},
    -		{"SO_DONTROUTE", Const, 0},
    -		{"SO_DONTTRUNC", Const, 0},
    -		{"SO_ERROR", Const, 0},
    -		{"SO_KEEPALIVE", Const, 0},
    -		{"SO_LABEL", Const, 0},
    -		{"SO_LINGER", Const, 0},
    -		{"SO_LINGER_SEC", Const, 0},
    -		{"SO_LISTENINCQLEN", Const, 0},
    -		{"SO_LISTENQLEN", Const, 0},
    -		{"SO_LISTENQLIMIT", Const, 0},
    -		{"SO_MARK", Const, 0},
    -		{"SO_NETPROC", Const, 1},
    -		{"SO_NKE", Const, 0},
    -		{"SO_NOADDRERR", Const, 0},
    -		{"SO_NOHEADER", Const, 1},
    -		{"SO_NOSIGPIPE", Const, 0},
    -		{"SO_NOTIFYCONFLICT", Const, 0},
    -		{"SO_NO_CHECK", Const, 0},
    -		{"SO_NO_DDP", Const, 0},
    -		{"SO_NO_OFFLOAD", Const, 0},
    -		{"SO_NP_EXTENSIONS", Const, 0},
    -		{"SO_NREAD", Const, 0},
    -		{"SO_NUMRCVPKT", Const, 16},
    -		{"SO_NWRITE", Const, 0},
    -		{"SO_OOBINLINE", Const, 0},
    -		{"SO_OVERFLOWED", Const, 1},
    -		{"SO_PASSCRED", Const, 0},
    -		{"SO_PASSSEC", Const, 0},
    -		{"SO_PEERCRED", Const, 0},
    -		{"SO_PEERLABEL", Const, 0},
    -		{"SO_PEERNAME", Const, 0},
    -		{"SO_PEERSEC", Const, 0},
    -		{"SO_PRIORITY", Const, 0},
    -		{"SO_PROTOCOL", Const, 0},
    -		{"SO_PROTOTYPE", Const, 1},
    -		{"SO_RANDOMPORT", Const, 0},
    -		{"SO_RCVBUF", Const, 0},
    -		{"SO_RCVBUFFORCE", Const, 0},
    -		{"SO_RCVLOWAT", Const, 0},
    -		{"SO_RCVTIMEO", Const, 0},
    -		{"SO_RESTRICTIONS", Const, 0},
    -		{"SO_RESTRICT_DENYIN", Const, 0},
    -		{"SO_RESTRICT_DENYOUT", Const, 0},
    -		{"SO_RESTRICT_DENYSET", Const, 0},
    -		{"SO_REUSEADDR", Const, 0},
    -		{"SO_REUSEPORT", Const, 0},
    -		{"SO_REUSESHAREUID", Const, 0},
    -		{"SO_RTABLE", Const, 1},
    -		{"SO_RXQ_OVFL", Const, 0},
    -		{"SO_SECURITY_AUTHENTICATION", Const, 0},
    -		{"SO_SECURITY_ENCRYPTION_NETWORK", Const, 0},
    -		{"SO_SECURITY_ENCRYPTION_TRANSPORT", Const, 0},
    -		{"SO_SETFIB", Const, 0},
    -		{"SO_SNDBUF", Const, 0},
    -		{"SO_SNDBUFFORCE", Const, 0},
    -		{"SO_SNDLOWAT", Const, 0},
    -		{"SO_SNDTIMEO", Const, 0},
    -		{"SO_SPLICE", Const, 1},
    -		{"SO_TIMESTAMP", Const, 0},
    -		{"SO_TIMESTAMPING", Const, 0},
    -		{"SO_TIMESTAMPNS", Const, 0},
    -		{"SO_TIMESTAMP_MONOTONIC", Const, 0},
    -		{"SO_TYPE", Const, 0},
    -		{"SO_UPCALLCLOSEWAIT", Const, 0},
    -		{"SO_UPDATE_ACCEPT_CONTEXT", Const, 0},
    -		{"SO_UPDATE_CONNECT_CONTEXT", Const, 1},
    -		{"SO_USELOOPBACK", Const, 0},
    -		{"SO_USER_COOKIE", Const, 1},
    -		{"SO_VENDOR", Const, 3},
    -		{"SO_WANTMORE", Const, 0},
    -		{"SO_WANTOOBFLAG", Const, 0},
    -		{"SSLExtraCertChainPolicyPara", Type, 0},
    -		{"SSLExtraCertChainPolicyPara.AuthType", Field, 0},
    -		{"SSLExtraCertChainPolicyPara.Checks", Field, 0},
    -		{"SSLExtraCertChainPolicyPara.ServerName", Field, 0},
    -		{"SSLExtraCertChainPolicyPara.Size", Field, 0},
    -		{"STANDARD_RIGHTS_ALL", Const, 0},
    -		{"STANDARD_RIGHTS_EXECUTE", Const, 0},
    -		{"STANDARD_RIGHTS_READ", Const, 0},
    -		{"STANDARD_RIGHTS_REQUIRED", Const, 0},
    -		{"STANDARD_RIGHTS_WRITE", Const, 0},
    -		{"STARTF_USESHOWWINDOW", Const, 0},
    -		{"STARTF_USESTDHANDLES", Const, 0},
    -		{"STD_ERROR_HANDLE", Const, 0},
    -		{"STD_INPUT_HANDLE", Const, 0},
    -		{"STD_OUTPUT_HANDLE", Const, 0},
    -		{"SUBLANG_ENGLISH_US", Const, 0},
    -		{"SW_FORCEMINIMIZE", Const, 0},
    -		{"SW_HIDE", Const, 0},
    -		{"SW_MAXIMIZE", Const, 0},
    -		{"SW_MINIMIZE", Const, 0},
    -		{"SW_NORMAL", Const, 0},
    -		{"SW_RESTORE", Const, 0},
    -		{"SW_SHOW", Const, 0},
    -		{"SW_SHOWDEFAULT", Const, 0},
    -		{"SW_SHOWMAXIMIZED", Const, 0},
    -		{"SW_SHOWMINIMIZED", Const, 0},
    -		{"SW_SHOWMINNOACTIVE", Const, 0},
    -		{"SW_SHOWNA", Const, 0},
    -		{"SW_SHOWNOACTIVATE", Const, 0},
    -		{"SW_SHOWNORMAL", Const, 0},
    -		{"SYMBOLIC_LINK_FLAG_DIRECTORY", Const, 4},
    -		{"SYNCHRONIZE", Const, 0},
    -		{"SYSCTL_VERSION", Const, 1},
    -		{"SYSCTL_VERS_0", Const, 1},
    -		{"SYSCTL_VERS_1", Const, 1},
    -		{"SYSCTL_VERS_MASK", Const, 1},
    -		{"SYS_ABORT2", Const, 0},
    -		{"SYS_ACCEPT", Const, 0},
    -		{"SYS_ACCEPT4", Const, 0},
    -		{"SYS_ACCEPT_NOCANCEL", Const, 0},
    -		{"SYS_ACCESS", Const, 0},
    -		{"SYS_ACCESS_EXTENDED", Const, 0},
    -		{"SYS_ACCT", Const, 0},
    -		{"SYS_ADD_KEY", Const, 0},
    -		{"SYS_ADD_PROFIL", Const, 0},
    -		{"SYS_ADJFREQ", Const, 1},
    -		{"SYS_ADJTIME", Const, 0},
    -		{"SYS_ADJTIMEX", Const, 0},
    -		{"SYS_AFS_SYSCALL", Const, 0},
    -		{"SYS_AIO_CANCEL", Const, 0},
    -		{"SYS_AIO_ERROR", Const, 0},
    -		{"SYS_AIO_FSYNC", Const, 0},
    -		{"SYS_AIO_MLOCK", Const, 14},
    -		{"SYS_AIO_READ", Const, 0},
    -		{"SYS_AIO_RETURN", Const, 0},
    -		{"SYS_AIO_SUSPEND", Const, 0},
    -		{"SYS_AIO_SUSPEND_NOCANCEL", Const, 0},
    -		{"SYS_AIO_WAITCOMPLETE", Const, 14},
    -		{"SYS_AIO_WRITE", Const, 0},
    -		{"SYS_ALARM", Const, 0},
    -		{"SYS_ARCH_PRCTL", Const, 0},
    -		{"SYS_ARM_FADVISE64_64", Const, 0},
    -		{"SYS_ARM_SYNC_FILE_RANGE", Const, 0},
    -		{"SYS_ATGETMSG", Const, 0},
    -		{"SYS_ATPGETREQ", Const, 0},
    -		{"SYS_ATPGETRSP", Const, 0},
    -		{"SYS_ATPSNDREQ", Const, 0},
    -		{"SYS_ATPSNDRSP", Const, 0},
    -		{"SYS_ATPUTMSG", Const, 0},
    -		{"SYS_ATSOCKET", Const, 0},
    -		{"SYS_AUDIT", Const, 0},
    -		{"SYS_AUDITCTL", Const, 0},
    -		{"SYS_AUDITON", Const, 0},
    -		{"SYS_AUDIT_SESSION_JOIN", Const, 0},
    -		{"SYS_AUDIT_SESSION_PORT", Const, 0},
    -		{"SYS_AUDIT_SESSION_SELF", Const, 0},
    -		{"SYS_BDFLUSH", Const, 0},
    -		{"SYS_BIND", Const, 0},
    -		{"SYS_BINDAT", Const, 3},
    -		{"SYS_BREAK", Const, 0},
    -		{"SYS_BRK", Const, 0},
    -		{"SYS_BSDTHREAD_CREATE", Const, 0},
    -		{"SYS_BSDTHREAD_REGISTER", Const, 0},
    -		{"SYS_BSDTHREAD_TERMINATE", Const, 0},
    -		{"SYS_CAPGET", Const, 0},
    -		{"SYS_CAPSET", Const, 0},
    -		{"SYS_CAP_ENTER", Const, 0},
    -		{"SYS_CAP_FCNTLS_GET", Const, 1},
    -		{"SYS_CAP_FCNTLS_LIMIT", Const, 1},
    -		{"SYS_CAP_GETMODE", Const, 0},
    -		{"SYS_CAP_GETRIGHTS", Const, 0},
    -		{"SYS_CAP_IOCTLS_GET", Const, 1},
    -		{"SYS_CAP_IOCTLS_LIMIT", Const, 1},
    -		{"SYS_CAP_NEW", Const, 0},
    -		{"SYS_CAP_RIGHTS_GET", Const, 1},
    -		{"SYS_CAP_RIGHTS_LIMIT", Const, 1},
    -		{"SYS_CHDIR", Const, 0},
    -		{"SYS_CHFLAGS", Const, 0},
    -		{"SYS_CHFLAGSAT", Const, 3},
    -		{"SYS_CHMOD", Const, 0},
    -		{"SYS_CHMOD_EXTENDED", Const, 0},
    -		{"SYS_CHOWN", Const, 0},
    -		{"SYS_CHOWN32", Const, 0},
    -		{"SYS_CHROOT", Const, 0},
    -		{"SYS_CHUD", Const, 0},
    -		{"SYS_CLOCK_ADJTIME", Const, 0},
    -		{"SYS_CLOCK_GETCPUCLOCKID2", Const, 1},
    -		{"SYS_CLOCK_GETRES", Const, 0},
    -		{"SYS_CLOCK_GETTIME", Const, 0},
    -		{"SYS_CLOCK_NANOSLEEP", Const, 0},
    -		{"SYS_CLOCK_SETTIME", Const, 0},
    -		{"SYS_CLONE", Const, 0},
    -		{"SYS_CLOSE", Const, 0},
    -		{"SYS_CLOSEFROM", Const, 0},
    -		{"SYS_CLOSE_NOCANCEL", Const, 0},
    -		{"SYS_CONNECT", Const, 0},
    -		{"SYS_CONNECTAT", Const, 3},
    -		{"SYS_CONNECT_NOCANCEL", Const, 0},
    -		{"SYS_COPYFILE", Const, 0},
    -		{"SYS_CPUSET", Const, 0},
    -		{"SYS_CPUSET_GETAFFINITY", Const, 0},
    -		{"SYS_CPUSET_GETID", Const, 0},
    -		{"SYS_CPUSET_SETAFFINITY", Const, 0},
    -		{"SYS_CPUSET_SETID", Const, 0},
    -		{"SYS_CREAT", Const, 0},
    -		{"SYS_CREATE_MODULE", Const, 0},
    -		{"SYS_CSOPS", Const, 0},
    -		{"SYS_CSOPS_AUDITTOKEN", Const, 16},
    -		{"SYS_DELETE", Const, 0},
    -		{"SYS_DELETE_MODULE", Const, 0},
    -		{"SYS_DUP", Const, 0},
    -		{"SYS_DUP2", Const, 0},
    -		{"SYS_DUP3", Const, 0},
    -		{"SYS_EACCESS", Const, 0},
    -		{"SYS_EPOLL_CREATE", Const, 0},
    -		{"SYS_EPOLL_CREATE1", Const, 0},
    -		{"SYS_EPOLL_CTL", Const, 0},
    -		{"SYS_EPOLL_CTL_OLD", Const, 0},
    -		{"SYS_EPOLL_PWAIT", Const, 0},
    -		{"SYS_EPOLL_WAIT", Const, 0},
    -		{"SYS_EPOLL_WAIT_OLD", Const, 0},
    -		{"SYS_EVENTFD", Const, 0},
    -		{"SYS_EVENTFD2", Const, 0},
    -		{"SYS_EXCHANGEDATA", Const, 0},
    -		{"SYS_EXECVE", Const, 0},
    -		{"SYS_EXIT", Const, 0},
    -		{"SYS_EXIT_GROUP", Const, 0},
    -		{"SYS_EXTATTRCTL", Const, 0},
    -		{"SYS_EXTATTR_DELETE_FD", Const, 0},
    -		{"SYS_EXTATTR_DELETE_FILE", Const, 0},
    -		{"SYS_EXTATTR_DELETE_LINK", Const, 0},
    -		{"SYS_EXTATTR_GET_FD", Const, 0},
    -		{"SYS_EXTATTR_GET_FILE", Const, 0},
    -		{"SYS_EXTATTR_GET_LINK", Const, 0},
    -		{"SYS_EXTATTR_LIST_FD", Const, 0},
    -		{"SYS_EXTATTR_LIST_FILE", Const, 0},
    -		{"SYS_EXTATTR_LIST_LINK", Const, 0},
    -		{"SYS_EXTATTR_SET_FD", Const, 0},
    -		{"SYS_EXTATTR_SET_FILE", Const, 0},
    -		{"SYS_EXTATTR_SET_LINK", Const, 0},
    -		{"SYS_FACCESSAT", Const, 0},
    -		{"SYS_FADVISE64", Const, 0},
    -		{"SYS_FADVISE64_64", Const, 0},
    -		{"SYS_FALLOCATE", Const, 0},
    -		{"SYS_FANOTIFY_INIT", Const, 0},
    -		{"SYS_FANOTIFY_MARK", Const, 0},
    -		{"SYS_FCHDIR", Const, 0},
    -		{"SYS_FCHFLAGS", Const, 0},
    -		{"SYS_FCHMOD", Const, 0},
    -		{"SYS_FCHMODAT", Const, 0},
    -		{"SYS_FCHMOD_EXTENDED", Const, 0},
    -		{"SYS_FCHOWN", Const, 0},
    -		{"SYS_FCHOWN32", Const, 0},
    -		{"SYS_FCHOWNAT", Const, 0},
    -		{"SYS_FCHROOT", Const, 1},
    -		{"SYS_FCNTL", Const, 0},
    -		{"SYS_FCNTL64", Const, 0},
    -		{"SYS_FCNTL_NOCANCEL", Const, 0},
    -		{"SYS_FDATASYNC", Const, 0},
    -		{"SYS_FEXECVE", Const, 0},
    -		{"SYS_FFCLOCK_GETCOUNTER", Const, 0},
    -		{"SYS_FFCLOCK_GETESTIMATE", Const, 0},
    -		{"SYS_FFCLOCK_SETESTIMATE", Const, 0},
    -		{"SYS_FFSCTL", Const, 0},
    -		{"SYS_FGETATTRLIST", Const, 0},
    -		{"SYS_FGETXATTR", Const, 0},
    -		{"SYS_FHOPEN", Const, 0},
    -		{"SYS_FHSTAT", Const, 0},
    -		{"SYS_FHSTATFS", Const, 0},
    -		{"SYS_FILEPORT_MAKEFD", Const, 0},
    -		{"SYS_FILEPORT_MAKEPORT", Const, 0},
    -		{"SYS_FKTRACE", Const, 1},
    -		{"SYS_FLISTXATTR", Const, 0},
    -		{"SYS_FLOCK", Const, 0},
    -		{"SYS_FORK", Const, 0},
    -		{"SYS_FPATHCONF", Const, 0},
    -		{"SYS_FREEBSD6_FTRUNCATE", Const, 0},
    -		{"SYS_FREEBSD6_LSEEK", Const, 0},
    -		{"SYS_FREEBSD6_MMAP", Const, 0},
    -		{"SYS_FREEBSD6_PREAD", Const, 0},
    -		{"SYS_FREEBSD6_PWRITE", Const, 0},
    -		{"SYS_FREEBSD6_TRUNCATE", Const, 0},
    -		{"SYS_FREMOVEXATTR", Const, 0},
    -		{"SYS_FSCTL", Const, 0},
    -		{"SYS_FSETATTRLIST", Const, 0},
    -		{"SYS_FSETXATTR", Const, 0},
    -		{"SYS_FSGETPATH", Const, 0},
    -		{"SYS_FSTAT", Const, 0},
    -		{"SYS_FSTAT64", Const, 0},
    -		{"SYS_FSTAT64_EXTENDED", Const, 0},
    -		{"SYS_FSTATAT", Const, 0},
    -		{"SYS_FSTATAT64", Const, 0},
    -		{"SYS_FSTATFS", Const, 0},
    -		{"SYS_FSTATFS64", Const, 0},
    -		{"SYS_FSTATV", Const, 0},
    -		{"SYS_FSTATVFS1", Const, 1},
    -		{"SYS_FSTAT_EXTENDED", Const, 0},
    -		{"SYS_FSYNC", Const, 0},
    -		{"SYS_FSYNC_NOCANCEL", Const, 0},
    -		{"SYS_FSYNC_RANGE", Const, 1},
    -		{"SYS_FTIME", Const, 0},
    -		{"SYS_FTRUNCATE", Const, 0},
    -		{"SYS_FTRUNCATE64", Const, 0},
    -		{"SYS_FUTEX", Const, 0},
    -		{"SYS_FUTIMENS", Const, 1},
    -		{"SYS_FUTIMES", Const, 0},
    -		{"SYS_FUTIMESAT", Const, 0},
    -		{"SYS_GETATTRLIST", Const, 0},
    -		{"SYS_GETAUDIT", Const, 0},
    -		{"SYS_GETAUDIT_ADDR", Const, 0},
    -		{"SYS_GETAUID", Const, 0},
    -		{"SYS_GETCONTEXT", Const, 0},
    -		{"SYS_GETCPU", Const, 0},
    -		{"SYS_GETCWD", Const, 0},
    -		{"SYS_GETDENTS", Const, 0},
    -		{"SYS_GETDENTS64", Const, 0},
    -		{"SYS_GETDIRENTRIES", Const, 0},
    -		{"SYS_GETDIRENTRIES64", Const, 0},
    -		{"SYS_GETDIRENTRIESATTR", Const, 0},
    -		{"SYS_GETDTABLECOUNT", Const, 1},
    -		{"SYS_GETDTABLESIZE", Const, 0},
    -		{"SYS_GETEGID", Const, 0},
    -		{"SYS_GETEGID32", Const, 0},
    -		{"SYS_GETEUID", Const, 0},
    -		{"SYS_GETEUID32", Const, 0},
    -		{"SYS_GETFH", Const, 0},
    -		{"SYS_GETFSSTAT", Const, 0},
    -		{"SYS_GETFSSTAT64", Const, 0},
    -		{"SYS_GETGID", Const, 0},
    -		{"SYS_GETGID32", Const, 0},
    -		{"SYS_GETGROUPS", Const, 0},
    -		{"SYS_GETGROUPS32", Const, 0},
    -		{"SYS_GETHOSTUUID", Const, 0},
    -		{"SYS_GETITIMER", Const, 0},
    -		{"SYS_GETLCID", Const, 0},
    -		{"SYS_GETLOGIN", Const, 0},
    -		{"SYS_GETLOGINCLASS", Const, 0},
    -		{"SYS_GETPEERNAME", Const, 0},
    -		{"SYS_GETPGID", Const, 0},
    -		{"SYS_GETPGRP", Const, 0},
    -		{"SYS_GETPID", Const, 0},
    -		{"SYS_GETPMSG", Const, 0},
    -		{"SYS_GETPPID", Const, 0},
    -		{"SYS_GETPRIORITY", Const, 0},
    -		{"SYS_GETRESGID", Const, 0},
    -		{"SYS_GETRESGID32", Const, 0},
    -		{"SYS_GETRESUID", Const, 0},
    -		{"SYS_GETRESUID32", Const, 0},
    -		{"SYS_GETRLIMIT", Const, 0},
    -		{"SYS_GETRTABLE", Const, 1},
    -		{"SYS_GETRUSAGE", Const, 0},
    -		{"SYS_GETSGROUPS", Const, 0},
    -		{"SYS_GETSID", Const, 0},
    -		{"SYS_GETSOCKNAME", Const, 0},
    -		{"SYS_GETSOCKOPT", Const, 0},
    -		{"SYS_GETTHRID", Const, 1},
    -		{"SYS_GETTID", Const, 0},
    -		{"SYS_GETTIMEOFDAY", Const, 0},
    -		{"SYS_GETUID", Const, 0},
    -		{"SYS_GETUID32", Const, 0},
    -		{"SYS_GETVFSSTAT", Const, 1},
    -		{"SYS_GETWGROUPS", Const, 0},
    -		{"SYS_GETXATTR", Const, 0},
    -		{"SYS_GET_KERNEL_SYMS", Const, 0},
    -		{"SYS_GET_MEMPOLICY", Const, 0},
    -		{"SYS_GET_ROBUST_LIST", Const, 0},
    -		{"SYS_GET_THREAD_AREA", Const, 0},
    -		{"SYS_GSSD_SYSCALL", Const, 14},
    -		{"SYS_GTTY", Const, 0},
    -		{"SYS_IDENTITYSVC", Const, 0},
    -		{"SYS_IDLE", Const, 0},
    -		{"SYS_INITGROUPS", Const, 0},
    -		{"SYS_INIT_MODULE", Const, 0},
    -		{"SYS_INOTIFY_ADD_WATCH", Const, 0},
    -		{"SYS_INOTIFY_INIT", Const, 0},
    -		{"SYS_INOTIFY_INIT1", Const, 0},
    -		{"SYS_INOTIFY_RM_WATCH", Const, 0},
    -		{"SYS_IOCTL", Const, 0},
    -		{"SYS_IOPERM", Const, 0},
    -		{"SYS_IOPL", Const, 0},
    -		{"SYS_IOPOLICYSYS", Const, 0},
    -		{"SYS_IOPRIO_GET", Const, 0},
    -		{"SYS_IOPRIO_SET", Const, 0},
    -		{"SYS_IO_CANCEL", Const, 0},
    -		{"SYS_IO_DESTROY", Const, 0},
    -		{"SYS_IO_GETEVENTS", Const, 0},
    -		{"SYS_IO_SETUP", Const, 0},
    -		{"SYS_IO_SUBMIT", Const, 0},
    -		{"SYS_IPC", Const, 0},
    -		{"SYS_ISSETUGID", Const, 0},
    -		{"SYS_JAIL", Const, 0},
    -		{"SYS_JAIL_ATTACH", Const, 0},
    -		{"SYS_JAIL_GET", Const, 0},
    -		{"SYS_JAIL_REMOVE", Const, 0},
    -		{"SYS_JAIL_SET", Const, 0},
    -		{"SYS_KAS_INFO", Const, 16},
    -		{"SYS_KDEBUG_TRACE", Const, 0},
    -		{"SYS_KENV", Const, 0},
    -		{"SYS_KEVENT", Const, 0},
    -		{"SYS_KEVENT64", Const, 0},
    -		{"SYS_KEXEC_LOAD", Const, 0},
    -		{"SYS_KEYCTL", Const, 0},
    -		{"SYS_KILL", Const, 0},
    -		{"SYS_KLDFIND", Const, 0},
    -		{"SYS_KLDFIRSTMOD", Const, 0},
    -		{"SYS_KLDLOAD", Const, 0},
    -		{"SYS_KLDNEXT", Const, 0},
    -		{"SYS_KLDSTAT", Const, 0},
    -		{"SYS_KLDSYM", Const, 0},
    -		{"SYS_KLDUNLOAD", Const, 0},
    -		{"SYS_KLDUNLOADF", Const, 0},
    -		{"SYS_KMQ_NOTIFY", Const, 14},
    -		{"SYS_KMQ_OPEN", Const, 14},
    -		{"SYS_KMQ_SETATTR", Const, 14},
    -		{"SYS_KMQ_TIMEDRECEIVE", Const, 14},
    -		{"SYS_KMQ_TIMEDSEND", Const, 14},
    -		{"SYS_KMQ_UNLINK", Const, 14},
    -		{"SYS_KQUEUE", Const, 0},
    -		{"SYS_KQUEUE1", Const, 1},
    -		{"SYS_KSEM_CLOSE", Const, 14},
    -		{"SYS_KSEM_DESTROY", Const, 14},
    -		{"SYS_KSEM_GETVALUE", Const, 14},
    -		{"SYS_KSEM_INIT", Const, 14},
    -		{"SYS_KSEM_OPEN", Const, 14},
    -		{"SYS_KSEM_POST", Const, 14},
    -		{"SYS_KSEM_TIMEDWAIT", Const, 14},
    -		{"SYS_KSEM_TRYWAIT", Const, 14},
    -		{"SYS_KSEM_UNLINK", Const, 14},
    -		{"SYS_KSEM_WAIT", Const, 14},
    -		{"SYS_KTIMER_CREATE", Const, 0},
    -		{"SYS_KTIMER_DELETE", Const, 0},
    -		{"SYS_KTIMER_GETOVERRUN", Const, 0},
    -		{"SYS_KTIMER_GETTIME", Const, 0},
    -		{"SYS_KTIMER_SETTIME", Const, 0},
    -		{"SYS_KTRACE", Const, 0},
    -		{"SYS_LCHFLAGS", Const, 0},
    -		{"SYS_LCHMOD", Const, 0},
    -		{"SYS_LCHOWN", Const, 0},
    -		{"SYS_LCHOWN32", Const, 0},
    -		{"SYS_LEDGER", Const, 16},
    -		{"SYS_LGETFH", Const, 0},
    -		{"SYS_LGETXATTR", Const, 0},
    -		{"SYS_LINK", Const, 0},
    -		{"SYS_LINKAT", Const, 0},
    -		{"SYS_LIO_LISTIO", Const, 0},
    -		{"SYS_LISTEN", Const, 0},
    -		{"SYS_LISTXATTR", Const, 0},
    -		{"SYS_LLISTXATTR", Const, 0},
    -		{"SYS_LOCK", Const, 0},
    -		{"SYS_LOOKUP_DCOOKIE", Const, 0},
    -		{"SYS_LPATHCONF", Const, 0},
    -		{"SYS_LREMOVEXATTR", Const, 0},
    -		{"SYS_LSEEK", Const, 0},
    -		{"SYS_LSETXATTR", Const, 0},
    -		{"SYS_LSTAT", Const, 0},
    -		{"SYS_LSTAT64", Const, 0},
    -		{"SYS_LSTAT64_EXTENDED", Const, 0},
    -		{"SYS_LSTATV", Const, 0},
    -		{"SYS_LSTAT_EXTENDED", Const, 0},
    -		{"SYS_LUTIMES", Const, 0},
    -		{"SYS_MAC_SYSCALL", Const, 0},
    -		{"SYS_MADVISE", Const, 0},
    -		{"SYS_MADVISE1", Const, 0},
    -		{"SYS_MAXSYSCALL", Const, 0},
    -		{"SYS_MBIND", Const, 0},
    -		{"SYS_MIGRATE_PAGES", Const, 0},
    -		{"SYS_MINCORE", Const, 0},
    -		{"SYS_MINHERIT", Const, 0},
    -		{"SYS_MKCOMPLEX", Const, 0},
    -		{"SYS_MKDIR", Const, 0},
    -		{"SYS_MKDIRAT", Const, 0},
    -		{"SYS_MKDIR_EXTENDED", Const, 0},
    -		{"SYS_MKFIFO", Const, 0},
    -		{"SYS_MKFIFOAT", Const, 0},
    -		{"SYS_MKFIFO_EXTENDED", Const, 0},
    -		{"SYS_MKNOD", Const, 0},
    -		{"SYS_MKNODAT", Const, 0},
    -		{"SYS_MLOCK", Const, 0},
    -		{"SYS_MLOCKALL", Const, 0},
    -		{"SYS_MMAP", Const, 0},
    -		{"SYS_MMAP2", Const, 0},
    -		{"SYS_MODCTL", Const, 1},
    -		{"SYS_MODFIND", Const, 0},
    -		{"SYS_MODFNEXT", Const, 0},
    -		{"SYS_MODIFY_LDT", Const, 0},
    -		{"SYS_MODNEXT", Const, 0},
    -		{"SYS_MODSTAT", Const, 0},
    -		{"SYS_MODWATCH", Const, 0},
    -		{"SYS_MOUNT", Const, 0},
    -		{"SYS_MOVE_PAGES", Const, 0},
    -		{"SYS_MPROTECT", Const, 0},
    -		{"SYS_MPX", Const, 0},
    -		{"SYS_MQUERY", Const, 1},
    -		{"SYS_MQ_GETSETATTR", Const, 0},
    -		{"SYS_MQ_NOTIFY", Const, 0},
    -		{"SYS_MQ_OPEN", Const, 0},
    -		{"SYS_MQ_TIMEDRECEIVE", Const, 0},
    -		{"SYS_MQ_TIMEDSEND", Const, 0},
    -		{"SYS_MQ_UNLINK", Const, 0},
    -		{"SYS_MREMAP", Const, 0},
    -		{"SYS_MSGCTL", Const, 0},
    -		{"SYS_MSGGET", Const, 0},
    -		{"SYS_MSGRCV", Const, 0},
    -		{"SYS_MSGRCV_NOCANCEL", Const, 0},
    -		{"SYS_MSGSND", Const, 0},
    -		{"SYS_MSGSND_NOCANCEL", Const, 0},
    -		{"SYS_MSGSYS", Const, 0},
    -		{"SYS_MSYNC", Const, 0},
    -		{"SYS_MSYNC_NOCANCEL", Const, 0},
    -		{"SYS_MUNLOCK", Const, 0},
    -		{"SYS_MUNLOCKALL", Const, 0},
    -		{"SYS_MUNMAP", Const, 0},
    -		{"SYS_NAME_TO_HANDLE_AT", Const, 0},
    -		{"SYS_NANOSLEEP", Const, 0},
    -		{"SYS_NEWFSTATAT", Const, 0},
    -		{"SYS_NFSCLNT", Const, 0},
    -		{"SYS_NFSSERVCTL", Const, 0},
    -		{"SYS_NFSSVC", Const, 0},
    -		{"SYS_NFSTAT", Const, 0},
    -		{"SYS_NICE", Const, 0},
    -		{"SYS_NLM_SYSCALL", Const, 14},
    -		{"SYS_NLSTAT", Const, 0},
    -		{"SYS_NMOUNT", Const, 0},
    -		{"SYS_NSTAT", Const, 0},
    -		{"SYS_NTP_ADJTIME", Const, 0},
    -		{"SYS_NTP_GETTIME", Const, 0},
    -		{"SYS_NUMA_GETAFFINITY", Const, 14},
    -		{"SYS_NUMA_SETAFFINITY", Const, 14},
    -		{"SYS_OABI_SYSCALL_BASE", Const, 0},
    -		{"SYS_OBREAK", Const, 0},
    -		{"SYS_OLDFSTAT", Const, 0},
    -		{"SYS_OLDLSTAT", Const, 0},
    -		{"SYS_OLDOLDUNAME", Const, 0},
    -		{"SYS_OLDSTAT", Const, 0},
    -		{"SYS_OLDUNAME", Const, 0},
    -		{"SYS_OPEN", Const, 0},
    -		{"SYS_OPENAT", Const, 0},
    -		{"SYS_OPENBSD_POLL", Const, 0},
    -		{"SYS_OPEN_BY_HANDLE_AT", Const, 0},
    -		{"SYS_OPEN_DPROTECTED_NP", Const, 16},
    -		{"SYS_OPEN_EXTENDED", Const, 0},
    -		{"SYS_OPEN_NOCANCEL", Const, 0},
    -		{"SYS_OVADVISE", Const, 0},
    -		{"SYS_PACCEPT", Const, 1},
    -		{"SYS_PATHCONF", Const, 0},
    -		{"SYS_PAUSE", Const, 0},
    -		{"SYS_PCICONFIG_IOBASE", Const, 0},
    -		{"SYS_PCICONFIG_READ", Const, 0},
    -		{"SYS_PCICONFIG_WRITE", Const, 0},
    -		{"SYS_PDFORK", Const, 0},
    -		{"SYS_PDGETPID", Const, 0},
    -		{"SYS_PDKILL", Const, 0},
    -		{"SYS_PERF_EVENT_OPEN", Const, 0},
    -		{"SYS_PERSONALITY", Const, 0},
    -		{"SYS_PID_HIBERNATE", Const, 0},
    -		{"SYS_PID_RESUME", Const, 0},
    -		{"SYS_PID_SHUTDOWN_SOCKETS", Const, 0},
    -		{"SYS_PID_SUSPEND", Const, 0},
    -		{"SYS_PIPE", Const, 0},
    -		{"SYS_PIPE2", Const, 0},
    -		{"SYS_PIVOT_ROOT", Const, 0},
    -		{"SYS_PMC_CONTROL", Const, 1},
    -		{"SYS_PMC_GET_INFO", Const, 1},
    -		{"SYS_POLL", Const, 0},
    -		{"SYS_POLLTS", Const, 1},
    -		{"SYS_POLL_NOCANCEL", Const, 0},
    -		{"SYS_POSIX_FADVISE", Const, 0},
    -		{"SYS_POSIX_FALLOCATE", Const, 0},
    -		{"SYS_POSIX_OPENPT", Const, 0},
    -		{"SYS_POSIX_SPAWN", Const, 0},
    -		{"SYS_PPOLL", Const, 0},
    -		{"SYS_PRCTL", Const, 0},
    -		{"SYS_PREAD", Const, 0},
    -		{"SYS_PREAD64", Const, 0},
    -		{"SYS_PREADV", Const, 0},
    -		{"SYS_PREAD_NOCANCEL", Const, 0},
    -		{"SYS_PRLIMIT64", Const, 0},
    -		{"SYS_PROCCTL", Const, 3},
    -		{"SYS_PROCESS_POLICY", Const, 0},
    -		{"SYS_PROCESS_VM_READV", Const, 0},
    -		{"SYS_PROCESS_VM_WRITEV", Const, 0},
    -		{"SYS_PROC_INFO", Const, 0},
    -		{"SYS_PROF", Const, 0},
    -		{"SYS_PROFIL", Const, 0},
    -		{"SYS_PSELECT", Const, 0},
    -		{"SYS_PSELECT6", Const, 0},
    -		{"SYS_PSET_ASSIGN", Const, 1},
    -		{"SYS_PSET_CREATE", Const, 1},
    -		{"SYS_PSET_DESTROY", Const, 1},
    -		{"SYS_PSYNCH_CVBROAD", Const, 0},
    -		{"SYS_PSYNCH_CVCLRPREPOST", Const, 0},
    -		{"SYS_PSYNCH_CVSIGNAL", Const, 0},
    -		{"SYS_PSYNCH_CVWAIT", Const, 0},
    -		{"SYS_PSYNCH_MUTEXDROP", Const, 0},
    -		{"SYS_PSYNCH_MUTEXWAIT", Const, 0},
    -		{"SYS_PSYNCH_RW_DOWNGRADE", Const, 0},
    -		{"SYS_PSYNCH_RW_LONGRDLOCK", Const, 0},
    -		{"SYS_PSYNCH_RW_RDLOCK", Const, 0},
    -		{"SYS_PSYNCH_RW_UNLOCK", Const, 0},
    -		{"SYS_PSYNCH_RW_UNLOCK2", Const, 0},
    -		{"SYS_PSYNCH_RW_UPGRADE", Const, 0},
    -		{"SYS_PSYNCH_RW_WRLOCK", Const, 0},
    -		{"SYS_PSYNCH_RW_YIELDWRLOCK", Const, 0},
    -		{"SYS_PTRACE", Const, 0},
    -		{"SYS_PUTPMSG", Const, 0},
    -		{"SYS_PWRITE", Const, 0},
    -		{"SYS_PWRITE64", Const, 0},
    -		{"SYS_PWRITEV", Const, 0},
    -		{"SYS_PWRITE_NOCANCEL", Const, 0},
    -		{"SYS_QUERY_MODULE", Const, 0},
    -		{"SYS_QUOTACTL", Const, 0},
    -		{"SYS_RASCTL", Const, 1},
    -		{"SYS_RCTL_ADD_RULE", Const, 0},
    -		{"SYS_RCTL_GET_LIMITS", Const, 0},
    -		{"SYS_RCTL_GET_RACCT", Const, 0},
    -		{"SYS_RCTL_GET_RULES", Const, 0},
    -		{"SYS_RCTL_REMOVE_RULE", Const, 0},
    -		{"SYS_READ", Const, 0},
    -		{"SYS_READAHEAD", Const, 0},
    -		{"SYS_READDIR", Const, 0},
    -		{"SYS_READLINK", Const, 0},
    -		{"SYS_READLINKAT", Const, 0},
    -		{"SYS_READV", Const, 0},
    -		{"SYS_READV_NOCANCEL", Const, 0},
    -		{"SYS_READ_NOCANCEL", Const, 0},
    -		{"SYS_REBOOT", Const, 0},
    -		{"SYS_RECV", Const, 0},
    -		{"SYS_RECVFROM", Const, 0},
    -		{"SYS_RECVFROM_NOCANCEL", Const, 0},
    -		{"SYS_RECVMMSG", Const, 0},
    -		{"SYS_RECVMSG", Const, 0},
    -		{"SYS_RECVMSG_NOCANCEL", Const, 0},
    -		{"SYS_REMAP_FILE_PAGES", Const, 0},
    -		{"SYS_REMOVEXATTR", Const, 0},
    -		{"SYS_RENAME", Const, 0},
    -		{"SYS_RENAMEAT", Const, 0},
    -		{"SYS_REQUEST_KEY", Const, 0},
    -		{"SYS_RESTART_SYSCALL", Const, 0},
    -		{"SYS_REVOKE", Const, 0},
    -		{"SYS_RFORK", Const, 0},
    -		{"SYS_RMDIR", Const, 0},
    -		{"SYS_RTPRIO", Const, 0},
    -		{"SYS_RTPRIO_THREAD", Const, 0},
    -		{"SYS_RT_SIGACTION", Const, 0},
    -		{"SYS_RT_SIGPENDING", Const, 0},
    -		{"SYS_RT_SIGPROCMASK", Const, 0},
    -		{"SYS_RT_SIGQUEUEINFO", Const, 0},
    -		{"SYS_RT_SIGRETURN", Const, 0},
    -		{"SYS_RT_SIGSUSPEND", Const, 0},
    -		{"SYS_RT_SIGTIMEDWAIT", Const, 0},
    -		{"SYS_RT_TGSIGQUEUEINFO", Const, 0},
    -		{"SYS_SBRK", Const, 0},
    -		{"SYS_SCHED_GETAFFINITY", Const, 0},
    -		{"SYS_SCHED_GETPARAM", Const, 0},
    -		{"SYS_SCHED_GETSCHEDULER", Const, 0},
    -		{"SYS_SCHED_GET_PRIORITY_MAX", Const, 0},
    -		{"SYS_SCHED_GET_PRIORITY_MIN", Const, 0},
    -		{"SYS_SCHED_RR_GET_INTERVAL", Const, 0},
    -		{"SYS_SCHED_SETAFFINITY", Const, 0},
    -		{"SYS_SCHED_SETPARAM", Const, 0},
    -		{"SYS_SCHED_SETSCHEDULER", Const, 0},
    -		{"SYS_SCHED_YIELD", Const, 0},
    -		{"SYS_SCTP_GENERIC_RECVMSG", Const, 0},
    -		{"SYS_SCTP_GENERIC_SENDMSG", Const, 0},
    -		{"SYS_SCTP_GENERIC_SENDMSG_IOV", Const, 0},
    -		{"SYS_SCTP_PEELOFF", Const, 0},
    -		{"SYS_SEARCHFS", Const, 0},
    -		{"SYS_SECURITY", Const, 0},
    -		{"SYS_SELECT", Const, 0},
    -		{"SYS_SELECT_NOCANCEL", Const, 0},
    -		{"SYS_SEMCONFIG", Const, 1},
    -		{"SYS_SEMCTL", Const, 0},
    -		{"SYS_SEMGET", Const, 0},
    -		{"SYS_SEMOP", Const, 0},
    -		{"SYS_SEMSYS", Const, 0},
    -		{"SYS_SEMTIMEDOP", Const, 0},
    -		{"SYS_SEM_CLOSE", Const, 0},
    -		{"SYS_SEM_DESTROY", Const, 0},
    -		{"SYS_SEM_GETVALUE", Const, 0},
    -		{"SYS_SEM_INIT", Const, 0},
    -		{"SYS_SEM_OPEN", Const, 0},
    -		{"SYS_SEM_POST", Const, 0},
    -		{"SYS_SEM_TRYWAIT", Const, 0},
    -		{"SYS_SEM_UNLINK", Const, 0},
    -		{"SYS_SEM_WAIT", Const, 0},
    -		{"SYS_SEM_WAIT_NOCANCEL", Const, 0},
    -		{"SYS_SEND", Const, 0},
    -		{"SYS_SENDFILE", Const, 0},
    -		{"SYS_SENDFILE64", Const, 0},
    -		{"SYS_SENDMMSG", Const, 0},
    -		{"SYS_SENDMSG", Const, 0},
    -		{"SYS_SENDMSG_NOCANCEL", Const, 0},
    -		{"SYS_SENDTO", Const, 0},
    -		{"SYS_SENDTO_NOCANCEL", Const, 0},
    -		{"SYS_SETATTRLIST", Const, 0},
    -		{"SYS_SETAUDIT", Const, 0},
    -		{"SYS_SETAUDIT_ADDR", Const, 0},
    -		{"SYS_SETAUID", Const, 0},
    -		{"SYS_SETCONTEXT", Const, 0},
    -		{"SYS_SETDOMAINNAME", Const, 0},
    -		{"SYS_SETEGID", Const, 0},
    -		{"SYS_SETEUID", Const, 0},
    -		{"SYS_SETFIB", Const, 0},
    -		{"SYS_SETFSGID", Const, 0},
    -		{"SYS_SETFSGID32", Const, 0},
    -		{"SYS_SETFSUID", Const, 0},
    -		{"SYS_SETFSUID32", Const, 0},
    -		{"SYS_SETGID", Const, 0},
    -		{"SYS_SETGID32", Const, 0},
    -		{"SYS_SETGROUPS", Const, 0},
    -		{"SYS_SETGROUPS32", Const, 0},
    -		{"SYS_SETHOSTNAME", Const, 0},
    -		{"SYS_SETITIMER", Const, 0},
    -		{"SYS_SETLCID", Const, 0},
    -		{"SYS_SETLOGIN", Const, 0},
    -		{"SYS_SETLOGINCLASS", Const, 0},
    -		{"SYS_SETNS", Const, 0},
    -		{"SYS_SETPGID", Const, 0},
    -		{"SYS_SETPRIORITY", Const, 0},
    -		{"SYS_SETPRIVEXEC", Const, 0},
    -		{"SYS_SETREGID", Const, 0},
    -		{"SYS_SETREGID32", Const, 0},
    -		{"SYS_SETRESGID", Const, 0},
    -		{"SYS_SETRESGID32", Const, 0},
    -		{"SYS_SETRESUID", Const, 0},
    -		{"SYS_SETRESUID32", Const, 0},
    -		{"SYS_SETREUID", Const, 0},
    -		{"SYS_SETREUID32", Const, 0},
    -		{"SYS_SETRLIMIT", Const, 0},
    -		{"SYS_SETRTABLE", Const, 1},
    -		{"SYS_SETSGROUPS", Const, 0},
    -		{"SYS_SETSID", Const, 0},
    -		{"SYS_SETSOCKOPT", Const, 0},
    -		{"SYS_SETTID", Const, 0},
    -		{"SYS_SETTID_WITH_PID", Const, 0},
    -		{"SYS_SETTIMEOFDAY", Const, 0},
    -		{"SYS_SETUID", Const, 0},
    -		{"SYS_SETUID32", Const, 0},
    -		{"SYS_SETWGROUPS", Const, 0},
    -		{"SYS_SETXATTR", Const, 0},
    -		{"SYS_SET_MEMPOLICY", Const, 0},
    -		{"SYS_SET_ROBUST_LIST", Const, 0},
    -		{"SYS_SET_THREAD_AREA", Const, 0},
    -		{"SYS_SET_TID_ADDRESS", Const, 0},
    -		{"SYS_SGETMASK", Const, 0},
    -		{"SYS_SHARED_REGION_CHECK_NP", Const, 0},
    -		{"SYS_SHARED_REGION_MAP_AND_SLIDE_NP", Const, 0},
    -		{"SYS_SHMAT", Const, 0},
    -		{"SYS_SHMCTL", Const, 0},
    -		{"SYS_SHMDT", Const, 0},
    -		{"SYS_SHMGET", Const, 0},
    -		{"SYS_SHMSYS", Const, 0},
    -		{"SYS_SHM_OPEN", Const, 0},
    -		{"SYS_SHM_UNLINK", Const, 0},
    -		{"SYS_SHUTDOWN", Const, 0},
    -		{"SYS_SIGACTION", Const, 0},
    -		{"SYS_SIGALTSTACK", Const, 0},
    -		{"SYS_SIGNAL", Const, 0},
    -		{"SYS_SIGNALFD", Const, 0},
    -		{"SYS_SIGNALFD4", Const, 0},
    -		{"SYS_SIGPENDING", Const, 0},
    -		{"SYS_SIGPROCMASK", Const, 0},
    -		{"SYS_SIGQUEUE", Const, 0},
    -		{"SYS_SIGQUEUEINFO", Const, 1},
    -		{"SYS_SIGRETURN", Const, 0},
    -		{"SYS_SIGSUSPEND", Const, 0},
    -		{"SYS_SIGSUSPEND_NOCANCEL", Const, 0},
    -		{"SYS_SIGTIMEDWAIT", Const, 0},
    -		{"SYS_SIGWAIT", Const, 0},
    -		{"SYS_SIGWAITINFO", Const, 0},
    -		{"SYS_SOCKET", Const, 0},
    -		{"SYS_SOCKETCALL", Const, 0},
    -		{"SYS_SOCKETPAIR", Const, 0},
    -		{"SYS_SPLICE", Const, 0},
    -		{"SYS_SSETMASK", Const, 0},
    -		{"SYS_SSTK", Const, 0},
    -		{"SYS_STACK_SNAPSHOT", Const, 0},
    -		{"SYS_STAT", Const, 0},
    -		{"SYS_STAT64", Const, 0},
    -		{"SYS_STAT64_EXTENDED", Const, 0},
    -		{"SYS_STATFS", Const, 0},
    -		{"SYS_STATFS64", Const, 0},
    -		{"SYS_STATV", Const, 0},
    -		{"SYS_STATVFS1", Const, 1},
    -		{"SYS_STAT_EXTENDED", Const, 0},
    -		{"SYS_STIME", Const, 0},
    -		{"SYS_STTY", Const, 0},
    -		{"SYS_SWAPCONTEXT", Const, 0},
    -		{"SYS_SWAPCTL", Const, 1},
    -		{"SYS_SWAPOFF", Const, 0},
    -		{"SYS_SWAPON", Const, 0},
    -		{"SYS_SYMLINK", Const, 0},
    -		{"SYS_SYMLINKAT", Const, 0},
    -		{"SYS_SYNC", Const, 0},
    -		{"SYS_SYNCFS", Const, 0},
    -		{"SYS_SYNC_FILE_RANGE", Const, 0},
    -		{"SYS_SYSARCH", Const, 0},
    -		{"SYS_SYSCALL", Const, 0},
    -		{"SYS_SYSCALL_BASE", Const, 0},
    -		{"SYS_SYSFS", Const, 0},
    -		{"SYS_SYSINFO", Const, 0},
    -		{"SYS_SYSLOG", Const, 0},
    -		{"SYS_TEE", Const, 0},
    -		{"SYS_TGKILL", Const, 0},
    -		{"SYS_THREAD_SELFID", Const, 0},
    -		{"SYS_THR_CREATE", Const, 0},
    -		{"SYS_THR_EXIT", Const, 0},
    -		{"SYS_THR_KILL", Const, 0},
    -		{"SYS_THR_KILL2", Const, 0},
    -		{"SYS_THR_NEW", Const, 0},
    -		{"SYS_THR_SELF", Const, 0},
    -		{"SYS_THR_SET_NAME", Const, 0},
    -		{"SYS_THR_SUSPEND", Const, 0},
    -		{"SYS_THR_WAKE", Const, 0},
    -		{"SYS_TIME", Const, 0},
    -		{"SYS_TIMERFD_CREATE", Const, 0},
    -		{"SYS_TIMERFD_GETTIME", Const, 0},
    -		{"SYS_TIMERFD_SETTIME", Const, 0},
    -		{"SYS_TIMER_CREATE", Const, 0},
    -		{"SYS_TIMER_DELETE", Const, 0},
    -		{"SYS_TIMER_GETOVERRUN", Const, 0},
    -		{"SYS_TIMER_GETTIME", Const, 0},
    -		{"SYS_TIMER_SETTIME", Const, 0},
    -		{"SYS_TIMES", Const, 0},
    -		{"SYS_TKILL", Const, 0},
    -		{"SYS_TRUNCATE", Const, 0},
    -		{"SYS_TRUNCATE64", Const, 0},
    -		{"SYS_TUXCALL", Const, 0},
    -		{"SYS_UGETRLIMIT", Const, 0},
    -		{"SYS_ULIMIT", Const, 0},
    -		{"SYS_UMASK", Const, 0},
    -		{"SYS_UMASK_EXTENDED", Const, 0},
    -		{"SYS_UMOUNT", Const, 0},
    -		{"SYS_UMOUNT2", Const, 0},
    -		{"SYS_UNAME", Const, 0},
    -		{"SYS_UNDELETE", Const, 0},
    -		{"SYS_UNLINK", Const, 0},
    -		{"SYS_UNLINKAT", Const, 0},
    -		{"SYS_UNMOUNT", Const, 0},
    -		{"SYS_UNSHARE", Const, 0},
    -		{"SYS_USELIB", Const, 0},
    -		{"SYS_USTAT", Const, 0},
    -		{"SYS_UTIME", Const, 0},
    -		{"SYS_UTIMENSAT", Const, 0},
    -		{"SYS_UTIMES", Const, 0},
    -		{"SYS_UTRACE", Const, 0},
    -		{"SYS_UUIDGEN", Const, 0},
    -		{"SYS_VADVISE", Const, 1},
    -		{"SYS_VFORK", Const, 0},
    -		{"SYS_VHANGUP", Const, 0},
    -		{"SYS_VM86", Const, 0},
    -		{"SYS_VM86OLD", Const, 0},
    -		{"SYS_VMSPLICE", Const, 0},
    -		{"SYS_VM_PRESSURE_MONITOR", Const, 0},
    -		{"SYS_VSERVER", Const, 0},
    -		{"SYS_WAIT4", Const, 0},
    -		{"SYS_WAIT4_NOCANCEL", Const, 0},
    -		{"SYS_WAIT6", Const, 1},
    -		{"SYS_WAITEVENT", Const, 0},
    -		{"SYS_WAITID", Const, 0},
    -		{"SYS_WAITID_NOCANCEL", Const, 0},
    -		{"SYS_WAITPID", Const, 0},
    -		{"SYS_WATCHEVENT", Const, 0},
    -		{"SYS_WORKQ_KERNRETURN", Const, 0},
    -		{"SYS_WORKQ_OPEN", Const, 0},
    -		{"SYS_WRITE", Const, 0},
    -		{"SYS_WRITEV", Const, 0},
    -		{"SYS_WRITEV_NOCANCEL", Const, 0},
    -		{"SYS_WRITE_NOCANCEL", Const, 0},
    -		{"SYS_YIELD", Const, 0},
    -		{"SYS__LLSEEK", Const, 0},
    -		{"SYS__LWP_CONTINUE", Const, 1},
    -		{"SYS__LWP_CREATE", Const, 1},
    -		{"SYS__LWP_CTL", Const, 1},
    -		{"SYS__LWP_DETACH", Const, 1},
    -		{"SYS__LWP_EXIT", Const, 1},
    -		{"SYS__LWP_GETNAME", Const, 1},
    -		{"SYS__LWP_GETPRIVATE", Const, 1},
    -		{"SYS__LWP_KILL", Const, 1},
    -		{"SYS__LWP_PARK", Const, 1},
    -		{"SYS__LWP_SELF", Const, 1},
    -		{"SYS__LWP_SETNAME", Const, 1},
    -		{"SYS__LWP_SETPRIVATE", Const, 1},
    -		{"SYS__LWP_SUSPEND", Const, 1},
    -		{"SYS__LWP_UNPARK", Const, 1},
    -		{"SYS__LWP_UNPARK_ALL", Const, 1},
    -		{"SYS__LWP_WAIT", Const, 1},
    -		{"SYS__LWP_WAKEUP", Const, 1},
    -		{"SYS__NEWSELECT", Const, 0},
    -		{"SYS__PSET_BIND", Const, 1},
    -		{"SYS__SCHED_GETAFFINITY", Const, 1},
    -		{"SYS__SCHED_GETPARAM", Const, 1},
    -		{"SYS__SCHED_SETAFFINITY", Const, 1},
    -		{"SYS__SCHED_SETPARAM", Const, 1},
    -		{"SYS__SYSCTL", Const, 0},
    -		{"SYS__UMTX_LOCK", Const, 0},
    -		{"SYS__UMTX_OP", Const, 0},
    -		{"SYS__UMTX_UNLOCK", Const, 0},
    -		{"SYS___ACL_ACLCHECK_FD", Const, 0},
    -		{"SYS___ACL_ACLCHECK_FILE", Const, 0},
    -		{"SYS___ACL_ACLCHECK_LINK", Const, 0},
    -		{"SYS___ACL_DELETE_FD", Const, 0},
    -		{"SYS___ACL_DELETE_FILE", Const, 0},
    -		{"SYS___ACL_DELETE_LINK", Const, 0},
    -		{"SYS___ACL_GET_FD", Const, 0},
    -		{"SYS___ACL_GET_FILE", Const, 0},
    -		{"SYS___ACL_GET_LINK", Const, 0},
    -		{"SYS___ACL_SET_FD", Const, 0},
    -		{"SYS___ACL_SET_FILE", Const, 0},
    -		{"SYS___ACL_SET_LINK", Const, 0},
    -		{"SYS___CAP_RIGHTS_GET", Const, 14},
    -		{"SYS___CLONE", Const, 1},
    -		{"SYS___DISABLE_THREADSIGNAL", Const, 0},
    -		{"SYS___GETCWD", Const, 0},
    -		{"SYS___GETLOGIN", Const, 1},
    -		{"SYS___GET_TCB", Const, 1},
    -		{"SYS___MAC_EXECVE", Const, 0},
    -		{"SYS___MAC_GETFSSTAT", Const, 0},
    -		{"SYS___MAC_GET_FD", Const, 0},
    -		{"SYS___MAC_GET_FILE", Const, 0},
    -		{"SYS___MAC_GET_LCID", Const, 0},
    -		{"SYS___MAC_GET_LCTX", Const, 0},
    -		{"SYS___MAC_GET_LINK", Const, 0},
    -		{"SYS___MAC_GET_MOUNT", Const, 0},
    -		{"SYS___MAC_GET_PID", Const, 0},
    -		{"SYS___MAC_GET_PROC", Const, 0},
    -		{"SYS___MAC_MOUNT", Const, 0},
    -		{"SYS___MAC_SET_FD", Const, 0},
    -		{"SYS___MAC_SET_FILE", Const, 0},
    -		{"SYS___MAC_SET_LCTX", Const, 0},
    -		{"SYS___MAC_SET_LINK", Const, 0},
    -		{"SYS___MAC_SET_PROC", Const, 0},
    -		{"SYS___MAC_SYSCALL", Const, 0},
    -		{"SYS___OLD_SEMWAIT_SIGNAL", Const, 0},
    -		{"SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL", Const, 0},
    -		{"SYS___POSIX_CHOWN", Const, 1},
    -		{"SYS___POSIX_FCHOWN", Const, 1},
    -		{"SYS___POSIX_LCHOWN", Const, 1},
    -		{"SYS___POSIX_RENAME", Const, 1},
    -		{"SYS___PTHREAD_CANCELED", Const, 0},
    -		{"SYS___PTHREAD_CHDIR", Const, 0},
    -		{"SYS___PTHREAD_FCHDIR", Const, 0},
    -		{"SYS___PTHREAD_KILL", Const, 0},
    -		{"SYS___PTHREAD_MARKCANCEL", Const, 0},
    -		{"SYS___PTHREAD_SIGMASK", Const, 0},
    -		{"SYS___QUOTACTL", Const, 1},
    -		{"SYS___SEMCTL", Const, 1},
    -		{"SYS___SEMWAIT_SIGNAL", Const, 0},
    -		{"SYS___SEMWAIT_SIGNAL_NOCANCEL", Const, 0},
    -		{"SYS___SETLOGIN", Const, 1},
    -		{"SYS___SETUGID", Const, 0},
    -		{"SYS___SET_TCB", Const, 1},
    -		{"SYS___SIGACTION_SIGTRAMP", Const, 1},
    -		{"SYS___SIGTIMEDWAIT", Const, 1},
    -		{"SYS___SIGWAIT", Const, 0},
    -		{"SYS___SIGWAIT_NOCANCEL", Const, 0},
    -		{"SYS___SYSCTL", Const, 0},
    -		{"SYS___TFORK", Const, 1},
    -		{"SYS___THREXIT", Const, 1},
    -		{"SYS___THRSIGDIVERT", Const, 1},
    -		{"SYS___THRSLEEP", Const, 1},
    -		{"SYS___THRWAKEUP", Const, 1},
    -		{"S_ARCH1", Const, 1},
    -		{"S_ARCH2", Const, 1},
    -		{"S_BLKSIZE", Const, 0},
    -		{"S_IEXEC", Const, 0},
    -		{"S_IFBLK", Const, 0},
    -		{"S_IFCHR", Const, 0},
    -		{"S_IFDIR", Const, 0},
    -		{"S_IFIFO", Const, 0},
    -		{"S_IFLNK", Const, 0},
    -		{"S_IFMT", Const, 0},
    -		{"S_IFREG", Const, 0},
    -		{"S_IFSOCK", Const, 0},
    -		{"S_IFWHT", Const, 0},
    -		{"S_IREAD", Const, 0},
    -		{"S_IRGRP", Const, 0},
    -		{"S_IROTH", Const, 0},
    -		{"S_IRUSR", Const, 0},
    -		{"S_IRWXG", Const, 0},
    -		{"S_IRWXO", Const, 0},
    -		{"S_IRWXU", Const, 0},
    -		{"S_ISGID", Const, 0},
    -		{"S_ISTXT", Const, 0},
    -		{"S_ISUID", Const, 0},
    -		{"S_ISVTX", Const, 0},
    -		{"S_IWGRP", Const, 0},
    -		{"S_IWOTH", Const, 0},
    -		{"S_IWRITE", Const, 0},
    -		{"S_IWUSR", Const, 0},
    -		{"S_IXGRP", Const, 0},
    -		{"S_IXOTH", Const, 0},
    -		{"S_IXUSR", Const, 0},
    -		{"S_LOGIN_SET", Const, 1},
    -		{"SecurityAttributes", Type, 0},
    -		{"SecurityAttributes.InheritHandle", Field, 0},
    -		{"SecurityAttributes.Length", Field, 0},
    -		{"SecurityAttributes.SecurityDescriptor", Field, 0},
    -		{"Seek", Func, 0},
    -		{"Select", Func, 0},
    -		{"Sendfile", Func, 0},
    -		{"Sendmsg", Func, 0},
    -		{"SendmsgN", Func, 3},
    -		{"Sendto", Func, 0},
    -		{"Servent", Type, 0},
    -		{"Servent.Aliases", Field, 0},
    -		{"Servent.Name", Field, 0},
    -		{"Servent.Port", Field, 0},
    -		{"Servent.Proto", Field, 0},
    -		{"SetBpf", Func, 0},
    -		{"SetBpfBuflen", Func, 0},
    -		{"SetBpfDatalink", Func, 0},
    -		{"SetBpfHeadercmpl", Func, 0},
    -		{"SetBpfImmediate", Func, 0},
    -		{"SetBpfInterface", Func, 0},
    -		{"SetBpfPromisc", Func, 0},
    -		{"SetBpfTimeout", Func, 0},
    -		{"SetCurrentDirectory", Func, 0},
    -		{"SetEndOfFile", Func, 0},
    -		{"SetEnvironmentVariable", Func, 0},
    -		{"SetFileAttributes", Func, 0},
    -		{"SetFileCompletionNotificationModes", Func, 2},
    -		{"SetFilePointer", Func, 0},
    -		{"SetFileTime", Func, 0},
    -		{"SetHandleInformation", Func, 0},
    -		{"SetKevent", Func, 0},
    -		{"SetLsfPromisc", Func, 0},
    -		{"SetNonblock", Func, 0},
    -		{"Setdomainname", Func, 0},
    -		{"Setegid", Func, 0},
    -		{"Setenv", Func, 0},
    -		{"Seteuid", Func, 0},
    -		{"Setfsgid", Func, 0},
    -		{"Setfsuid", Func, 0},
    -		{"Setgid", Func, 0},
    -		{"Setgroups", Func, 0},
    -		{"Sethostname", Func, 0},
    -		{"Setlogin", Func, 0},
    -		{"Setpgid", Func, 0},
    -		{"Setpriority", Func, 0},
    -		{"Setprivexec", Func, 0},
    -		{"Setregid", Func, 0},
    -		{"Setresgid", Func, 0},
    -		{"Setresuid", Func, 0},
    -		{"Setreuid", Func, 0},
    -		{"Setrlimit", Func, 0},
    -		{"Setsid", Func, 0},
    -		{"Setsockopt", Func, 0},
    -		{"SetsockoptByte", Func, 0},
    -		{"SetsockoptICMPv6Filter", Func, 2},
    -		{"SetsockoptIPMreq", Func, 0},
    -		{"SetsockoptIPMreqn", Func, 0},
    -		{"SetsockoptIPv6Mreq", Func, 0},
    -		{"SetsockoptInet4Addr", Func, 0},
    -		{"SetsockoptInt", Func, 0},
    -		{"SetsockoptLinger", Func, 0},
    -		{"SetsockoptString", Func, 0},
    -		{"SetsockoptTimeval", Func, 0},
    -		{"Settimeofday", Func, 0},
    -		{"Setuid", Func, 0},
    -		{"Setxattr", Func, 1},
    -		{"Shutdown", Func, 0},
    -		{"SidTypeAlias", Const, 0},
    -		{"SidTypeComputer", Const, 0},
    -		{"SidTypeDeletedAccount", Const, 0},
    -		{"SidTypeDomain", Const, 0},
    -		{"SidTypeGroup", Const, 0},
    -		{"SidTypeInvalid", Const, 0},
    -		{"SidTypeLabel", Const, 0},
    -		{"SidTypeUnknown", Const, 0},
    -		{"SidTypeUser", Const, 0},
    -		{"SidTypeWellKnownGroup", Const, 0},
    -		{"Signal", Type, 0},
    -		{"SizeofBpfHdr", Const, 0},
    -		{"SizeofBpfInsn", Const, 0},
    -		{"SizeofBpfProgram", Const, 0},
    -		{"SizeofBpfStat", Const, 0},
    -		{"SizeofBpfVersion", Const, 0},
    -		{"SizeofBpfZbuf", Const, 0},
    -		{"SizeofBpfZbufHeader", Const, 0},
    -		{"SizeofCmsghdr", Const, 0},
    -		{"SizeofICMPv6Filter", Const, 2},
    -		{"SizeofIPMreq", Const, 0},
    -		{"SizeofIPMreqn", Const, 0},
    -		{"SizeofIPv6MTUInfo", Const, 2},
    -		{"SizeofIPv6Mreq", Const, 0},
    -		{"SizeofIfAddrmsg", Const, 0},
    -		{"SizeofIfAnnounceMsghdr", Const, 1},
    -		{"SizeofIfData", Const, 0},
    -		{"SizeofIfInfomsg", Const, 0},
    -		{"SizeofIfMsghdr", Const, 0},
    -		{"SizeofIfaMsghdr", Const, 0},
    -		{"SizeofIfmaMsghdr", Const, 0},
    -		{"SizeofIfmaMsghdr2", Const, 0},
    -		{"SizeofInet4Pktinfo", Const, 0},
    -		{"SizeofInet6Pktinfo", Const, 0},
    -		{"SizeofInotifyEvent", Const, 0},
    -		{"SizeofLinger", Const, 0},
    -		{"SizeofMsghdr", Const, 0},
    -		{"SizeofNlAttr", Const, 0},
    -		{"SizeofNlMsgerr", Const, 0},
    -		{"SizeofNlMsghdr", Const, 0},
    -		{"SizeofRtAttr", Const, 0},
    -		{"SizeofRtGenmsg", Const, 0},
    -		{"SizeofRtMetrics", Const, 0},
    -		{"SizeofRtMsg", Const, 0},
    -		{"SizeofRtMsghdr", Const, 0},
    -		{"SizeofRtNexthop", Const, 0},
    -		{"SizeofSockFilter", Const, 0},
    -		{"SizeofSockFprog", Const, 0},
    -		{"SizeofSockaddrAny", Const, 0},
    -		{"SizeofSockaddrDatalink", Const, 0},
    -		{"SizeofSockaddrInet4", Const, 0},
    -		{"SizeofSockaddrInet6", Const, 0},
    -		{"SizeofSockaddrLinklayer", Const, 0},
    -		{"SizeofSockaddrNetlink", Const, 0},
    -		{"SizeofSockaddrUnix", Const, 0},
    -		{"SizeofTCPInfo", Const, 1},
    -		{"SizeofUcred", Const, 0},
    -		{"SlicePtrFromStrings", Func, 1},
    -		{"SockFilter", Type, 0},
    -		{"SockFilter.Code", Field, 0},
    -		{"SockFilter.Jf", Field, 0},
    -		{"SockFilter.Jt", Field, 0},
    -		{"SockFilter.K", Field, 0},
    -		{"SockFprog", Type, 0},
    -		{"SockFprog.Filter", Field, 0},
    -		{"SockFprog.Len", Field, 0},
    -		{"SockFprog.Pad_cgo_0", Field, 0},
    -		{"Sockaddr", Type, 0},
    -		{"SockaddrDatalink", Type, 0},
    -		{"SockaddrDatalink.Alen", Field, 0},
    -		{"SockaddrDatalink.Data", Field, 0},
    -		{"SockaddrDatalink.Family", Field, 0},
    -		{"SockaddrDatalink.Index", Field, 0},
    -		{"SockaddrDatalink.Len", Field, 0},
    -		{"SockaddrDatalink.Nlen", Field, 0},
    -		{"SockaddrDatalink.Slen", Field, 0},
    -		{"SockaddrDatalink.Type", Field, 0},
    -		{"SockaddrGen", Type, 0},
    -		{"SockaddrInet4", Type, 0},
    -		{"SockaddrInet4.Addr", Field, 0},
    -		{"SockaddrInet4.Port", Field, 0},
    -		{"SockaddrInet6", Type, 0},
    -		{"SockaddrInet6.Addr", Field, 0},
    -		{"SockaddrInet6.Port", Field, 0},
    -		{"SockaddrInet6.ZoneId", Field, 0},
    -		{"SockaddrLinklayer", Type, 0},
    -		{"SockaddrLinklayer.Addr", Field, 0},
    -		{"SockaddrLinklayer.Halen", Field, 0},
    -		{"SockaddrLinklayer.Hatype", Field, 0},
    -		{"SockaddrLinklayer.Ifindex", Field, 0},
    -		{"SockaddrLinklayer.Pkttype", Field, 0},
    -		{"SockaddrLinklayer.Protocol", Field, 0},
    -		{"SockaddrNetlink", Type, 0},
    -		{"SockaddrNetlink.Family", Field, 0},
    -		{"SockaddrNetlink.Groups", Field, 0},
    -		{"SockaddrNetlink.Pad", Field, 0},
    -		{"SockaddrNetlink.Pid", Field, 0},
    -		{"SockaddrUnix", Type, 0},
    -		{"SockaddrUnix.Name", Field, 0},
    -		{"Socket", Func, 0},
    -		{"SocketControlMessage", Type, 0},
    -		{"SocketControlMessage.Data", Field, 0},
    -		{"SocketControlMessage.Header", Field, 0},
    -		{"SocketDisableIPv6", Var, 0},
    -		{"Socketpair", Func, 0},
    -		{"Splice", Func, 0},
    -		{"StartProcess", Func, 0},
    -		{"StartupInfo", Type, 0},
    -		{"StartupInfo.Cb", Field, 0},
    -		{"StartupInfo.Desktop", Field, 0},
    -		{"StartupInfo.FillAttribute", Field, 0},
    -		{"StartupInfo.Flags", Field, 0},
    -		{"StartupInfo.ShowWindow", Field, 0},
    -		{"StartupInfo.StdErr", Field, 0},
    -		{"StartupInfo.StdInput", Field, 0},
    -		{"StartupInfo.StdOutput", Field, 0},
    -		{"StartupInfo.Title", Field, 0},
    -		{"StartupInfo.X", Field, 0},
    -		{"StartupInfo.XCountChars", Field, 0},
    -		{"StartupInfo.XSize", Field, 0},
    -		{"StartupInfo.Y", Field, 0},
    -		{"StartupInfo.YCountChars", Field, 0},
    -		{"StartupInfo.YSize", Field, 0},
    -		{"Stat", Func, 0},
    -		{"Stat_t", Type, 0},
    -		{"Stat_t.Atim", Field, 0},
    -		{"Stat_t.Atim_ext", Field, 12},
    -		{"Stat_t.Atimespec", Field, 0},
    -		{"Stat_t.Birthtimespec", Field, 0},
    -		{"Stat_t.Blksize", Field, 0},
    -		{"Stat_t.Blocks", Field, 0},
    -		{"Stat_t.Btim_ext", Field, 12},
    -		{"Stat_t.Ctim", Field, 0},
    -		{"Stat_t.Ctim_ext", Field, 12},
    -		{"Stat_t.Ctimespec", Field, 0},
    -		{"Stat_t.Dev", Field, 0},
    -		{"Stat_t.Flags", Field, 0},
    -		{"Stat_t.Gen", Field, 0},
    -		{"Stat_t.Gid", Field, 0},
    -		{"Stat_t.Ino", Field, 0},
    -		{"Stat_t.Lspare", Field, 0},
    -		{"Stat_t.Lspare0", Field, 2},
    -		{"Stat_t.Lspare1", Field, 2},
    -		{"Stat_t.Mode", Field, 0},
    -		{"Stat_t.Mtim", Field, 0},
    -		{"Stat_t.Mtim_ext", Field, 12},
    -		{"Stat_t.Mtimespec", Field, 0},
    -		{"Stat_t.Nlink", Field, 0},
    -		{"Stat_t.Pad_cgo_0", Field, 0},
    -		{"Stat_t.Pad_cgo_1", Field, 0},
    -		{"Stat_t.Pad_cgo_2", Field, 0},
    -		{"Stat_t.Padding0", Field, 12},
    -		{"Stat_t.Padding1", Field, 12},
    -		{"Stat_t.Qspare", Field, 0},
    -		{"Stat_t.Rdev", Field, 0},
    -		{"Stat_t.Size", Field, 0},
    -		{"Stat_t.Spare", Field, 2},
    -		{"Stat_t.Uid", Field, 0},
    -		{"Stat_t.X__pad0", Field, 0},
    -		{"Stat_t.X__pad1", Field, 0},
    -		{"Stat_t.X__pad2", Field, 0},
    -		{"Stat_t.X__st_birthtim", Field, 2},
    -		{"Stat_t.X__st_ino", Field, 0},
    -		{"Stat_t.X__unused", Field, 0},
    -		{"Statfs", Func, 0},
    -		{"Statfs_t", Type, 0},
    -		{"Statfs_t.Asyncreads", Field, 0},
    -		{"Statfs_t.Asyncwrites", Field, 0},
    -		{"Statfs_t.Bavail", Field, 0},
    -		{"Statfs_t.Bfree", Field, 0},
    -		{"Statfs_t.Blocks", Field, 0},
    -		{"Statfs_t.Bsize", Field, 0},
    -		{"Statfs_t.Charspare", Field, 0},
    -		{"Statfs_t.F_asyncreads", Field, 2},
    -		{"Statfs_t.F_asyncwrites", Field, 2},
    -		{"Statfs_t.F_bavail", Field, 2},
    -		{"Statfs_t.F_bfree", Field, 2},
    -		{"Statfs_t.F_blocks", Field, 2},
    -		{"Statfs_t.F_bsize", Field, 2},
    -		{"Statfs_t.F_ctime", Field, 2},
    -		{"Statfs_t.F_favail", Field, 2},
    -		{"Statfs_t.F_ffree", Field, 2},
    -		{"Statfs_t.F_files", Field, 2},
    -		{"Statfs_t.F_flags", Field, 2},
    -		{"Statfs_t.F_fsid", Field, 2},
    -		{"Statfs_t.F_fstypename", Field, 2},
    -		{"Statfs_t.F_iosize", Field, 2},
    -		{"Statfs_t.F_mntfromname", Field, 2},
    -		{"Statfs_t.F_mntfromspec", Field, 3},
    -		{"Statfs_t.F_mntonname", Field, 2},
    -		{"Statfs_t.F_namemax", Field, 2},
    -		{"Statfs_t.F_owner", Field, 2},
    -		{"Statfs_t.F_spare", Field, 2},
    -		{"Statfs_t.F_syncreads", Field, 2},
    -		{"Statfs_t.F_syncwrites", Field, 2},
    -		{"Statfs_t.Ffree", Field, 0},
    -		{"Statfs_t.Files", Field, 0},
    -		{"Statfs_t.Flags", Field, 0},
    -		{"Statfs_t.Frsize", Field, 0},
    -		{"Statfs_t.Fsid", Field, 0},
    -		{"Statfs_t.Fssubtype", Field, 0},
    -		{"Statfs_t.Fstypename", Field, 0},
    -		{"Statfs_t.Iosize", Field, 0},
    -		{"Statfs_t.Mntfromname", Field, 0},
    -		{"Statfs_t.Mntonname", Field, 0},
    -		{"Statfs_t.Mount_info", Field, 2},
    -		{"Statfs_t.Namelen", Field, 0},
    -		{"Statfs_t.Namemax", Field, 0},
    -		{"Statfs_t.Owner", Field, 0},
    -		{"Statfs_t.Pad_cgo_0", Field, 0},
    -		{"Statfs_t.Pad_cgo_1", Field, 2},
    -		{"Statfs_t.Reserved", Field, 0},
    -		{"Statfs_t.Spare", Field, 0},
    -		{"Statfs_t.Syncreads", Field, 0},
    -		{"Statfs_t.Syncwrites", Field, 0},
    -		{"Statfs_t.Type", Field, 0},
    -		{"Statfs_t.Version", Field, 0},
    -		{"Stderr", Var, 0},
    -		{"Stdin", Var, 0},
    -		{"Stdout", Var, 0},
    -		{"StringBytePtr", Func, 0},
    -		{"StringByteSlice", Func, 0},
    -		{"StringSlicePtr", Func, 0},
    -		{"StringToSid", Func, 0},
    -		{"StringToUTF16", Func, 0},
    -		{"StringToUTF16Ptr", Func, 0},
    -		{"Symlink", Func, 0},
    -		{"Sync", Func, 0},
    -		{"SyncFileRange", Func, 0},
    -		{"SysProcAttr", Type, 0},
    -		{"SysProcAttr.AdditionalInheritedHandles", Field, 17},
    -		{"SysProcAttr.AmbientCaps", Field, 9},
    -		{"SysProcAttr.CgroupFD", Field, 20},
    -		{"SysProcAttr.Chroot", Field, 0},
    -		{"SysProcAttr.Cloneflags", Field, 2},
    -		{"SysProcAttr.CmdLine", Field, 0},
    -		{"SysProcAttr.CreationFlags", Field, 1},
    -		{"SysProcAttr.Credential", Field, 0},
    -		{"SysProcAttr.Ctty", Field, 1},
    -		{"SysProcAttr.Foreground", Field, 5},
    -		{"SysProcAttr.GidMappings", Field, 4},
    -		{"SysProcAttr.GidMappingsEnableSetgroups", Field, 5},
    -		{"SysProcAttr.HideWindow", Field, 0},
    -		{"SysProcAttr.Jail", Field, 21},
    -		{"SysProcAttr.NoInheritHandles", Field, 16},
    -		{"SysProcAttr.Noctty", Field, 0},
    -		{"SysProcAttr.ParentProcess", Field, 17},
    -		{"SysProcAttr.Pdeathsig", Field, 0},
    -		{"SysProcAttr.Pgid", Field, 5},
    -		{"SysProcAttr.PidFD", Field, 22},
    -		{"SysProcAttr.ProcessAttributes", Field, 13},
    -		{"SysProcAttr.Ptrace", Field, 0},
    -		{"SysProcAttr.Setctty", Field, 0},
    -		{"SysProcAttr.Setpgid", Field, 0},
    -		{"SysProcAttr.Setsid", Field, 0},
    -		{"SysProcAttr.ThreadAttributes", Field, 13},
    -		{"SysProcAttr.Token", Field, 10},
    -		{"SysProcAttr.UidMappings", Field, 4},
    -		{"SysProcAttr.Unshareflags", Field, 7},
    -		{"SysProcAttr.UseCgroupFD", Field, 20},
    -		{"SysProcIDMap", Type, 4},
    -		{"SysProcIDMap.ContainerID", Field, 4},
    -		{"SysProcIDMap.HostID", Field, 4},
    -		{"SysProcIDMap.Size", Field, 4},
    -		{"Syscall", Func, 0},
    -		{"Syscall12", Func, 0},
    -		{"Syscall15", Func, 0},
    -		{"Syscall18", Func, 12},
    -		{"Syscall6", Func, 0},
    -		{"Syscall9", Func, 0},
    -		{"SyscallN", Func, 18},
    -		{"Sysctl", Func, 0},
    -		{"SysctlUint32", Func, 0},
    -		{"Sysctlnode", Type, 2},
    -		{"Sysctlnode.Flags", Field, 2},
    -		{"Sysctlnode.Name", Field, 2},
    -		{"Sysctlnode.Num", Field, 2},
    -		{"Sysctlnode.Un", Field, 2},
    -		{"Sysctlnode.Ver", Field, 2},
    -		{"Sysctlnode.X__rsvd", Field, 2},
    -		{"Sysctlnode.X_sysctl_desc", Field, 2},
    -		{"Sysctlnode.X_sysctl_func", Field, 2},
    -		{"Sysctlnode.X_sysctl_parent", Field, 2},
    -		{"Sysctlnode.X_sysctl_size", Field, 2},
    -		{"Sysinfo", Func, 0},
    -		{"Sysinfo_t", Type, 0},
    -		{"Sysinfo_t.Bufferram", Field, 0},
    -		{"Sysinfo_t.Freehigh", Field, 0},
    -		{"Sysinfo_t.Freeram", Field, 0},
    -		{"Sysinfo_t.Freeswap", Field, 0},
    -		{"Sysinfo_t.Loads", Field, 0},
    -		{"Sysinfo_t.Pad", Field, 0},
    -		{"Sysinfo_t.Pad_cgo_0", Field, 0},
    -		{"Sysinfo_t.Pad_cgo_1", Field, 0},
    -		{"Sysinfo_t.Procs", Field, 0},
    -		{"Sysinfo_t.Sharedram", Field, 0},
    -		{"Sysinfo_t.Totalhigh", Field, 0},
    -		{"Sysinfo_t.Totalram", Field, 0},
    -		{"Sysinfo_t.Totalswap", Field, 0},
    -		{"Sysinfo_t.Unit", Field, 0},
    -		{"Sysinfo_t.Uptime", Field, 0},
    -		{"Sysinfo_t.X_f", Field, 0},
    -		{"Systemtime", Type, 0},
    -		{"Systemtime.Day", Field, 0},
    -		{"Systemtime.DayOfWeek", Field, 0},
    -		{"Systemtime.Hour", Field, 0},
    -		{"Systemtime.Milliseconds", Field, 0},
    -		{"Systemtime.Minute", Field, 0},
    -		{"Systemtime.Month", Field, 0},
    -		{"Systemtime.Second", Field, 0},
    -		{"Systemtime.Year", Field, 0},
    -		{"TCGETS", Const, 0},
    -		{"TCIFLUSH", Const, 1},
    -		{"TCIOFLUSH", Const, 1},
    -		{"TCOFLUSH", Const, 1},
    -		{"TCPInfo", Type, 1},
    -		{"TCPInfo.Advmss", Field, 1},
    -		{"TCPInfo.Ato", Field, 1},
    -		{"TCPInfo.Backoff", Field, 1},
    -		{"TCPInfo.Ca_state", Field, 1},
    -		{"TCPInfo.Fackets", Field, 1},
    -		{"TCPInfo.Last_ack_recv", Field, 1},
    -		{"TCPInfo.Last_ack_sent", Field, 1},
    -		{"TCPInfo.Last_data_recv", Field, 1},
    -		{"TCPInfo.Last_data_sent", Field, 1},
    -		{"TCPInfo.Lost", Field, 1},
    -		{"TCPInfo.Options", Field, 1},
    -		{"TCPInfo.Pad_cgo_0", Field, 1},
    -		{"TCPInfo.Pmtu", Field, 1},
    -		{"TCPInfo.Probes", Field, 1},
    -		{"TCPInfo.Rcv_mss", Field, 1},
    -		{"TCPInfo.Rcv_rtt", Field, 1},
    -		{"TCPInfo.Rcv_space", Field, 1},
    -		{"TCPInfo.Rcv_ssthresh", Field, 1},
    -		{"TCPInfo.Reordering", Field, 1},
    -		{"TCPInfo.Retrans", Field, 1},
    -		{"TCPInfo.Retransmits", Field, 1},
    -		{"TCPInfo.Rto", Field, 1},
    -		{"TCPInfo.Rtt", Field, 1},
    -		{"TCPInfo.Rttvar", Field, 1},
    -		{"TCPInfo.Sacked", Field, 1},
    -		{"TCPInfo.Snd_cwnd", Field, 1},
    -		{"TCPInfo.Snd_mss", Field, 1},
    -		{"TCPInfo.Snd_ssthresh", Field, 1},
    -		{"TCPInfo.State", Field, 1},
    -		{"TCPInfo.Total_retrans", Field, 1},
    -		{"TCPInfo.Unacked", Field, 1},
    -		{"TCPKeepalive", Type, 3},
    -		{"TCPKeepalive.Interval", Field, 3},
    -		{"TCPKeepalive.OnOff", Field, 3},
    -		{"TCPKeepalive.Time", Field, 3},
    -		{"TCP_CA_NAME_MAX", Const, 0},
    -		{"TCP_CONGCTL", Const, 1},
    -		{"TCP_CONGESTION", Const, 0},
    -		{"TCP_CONNECTIONTIMEOUT", Const, 0},
    -		{"TCP_CORK", Const, 0},
    -		{"TCP_DEFER_ACCEPT", Const, 0},
    -		{"TCP_ENABLE_ECN", Const, 16},
    -		{"TCP_INFO", Const, 0},
    -		{"TCP_KEEPALIVE", Const, 0},
    -		{"TCP_KEEPCNT", Const, 0},
    -		{"TCP_KEEPIDLE", Const, 0},
    -		{"TCP_KEEPINIT", Const, 1},
    -		{"TCP_KEEPINTVL", Const, 0},
    -		{"TCP_LINGER2", Const, 0},
    -		{"TCP_MAXBURST", Const, 0},
    -		{"TCP_MAXHLEN", Const, 0},
    -		{"TCP_MAXOLEN", Const, 0},
    -		{"TCP_MAXSEG", Const, 0},
    -		{"TCP_MAXWIN", Const, 0},
    -		{"TCP_MAX_SACK", Const, 0},
    -		{"TCP_MAX_WINSHIFT", Const, 0},
    -		{"TCP_MD5SIG", Const, 0},
    -		{"TCP_MD5SIG_MAXKEYLEN", Const, 0},
    -		{"TCP_MINMSS", Const, 0},
    -		{"TCP_MINMSSOVERLOAD", Const, 0},
    -		{"TCP_MSS", Const, 0},
    -		{"TCP_NODELAY", Const, 0},
    -		{"TCP_NOOPT", Const, 0},
    -		{"TCP_NOPUSH", Const, 0},
    -		{"TCP_NOTSENT_LOWAT", Const, 16},
    -		{"TCP_NSTATES", Const, 1},
    -		{"TCP_QUICKACK", Const, 0},
    -		{"TCP_RXT_CONNDROPTIME", Const, 0},
    -		{"TCP_RXT_FINDROP", Const, 0},
    -		{"TCP_SACK_ENABLE", Const, 1},
    -		{"TCP_SENDMOREACKS", Const, 16},
    -		{"TCP_SYNCNT", Const, 0},
    -		{"TCP_VENDOR", Const, 3},
    -		{"TCP_WINDOW_CLAMP", Const, 0},
    -		{"TCSAFLUSH", Const, 1},
    -		{"TCSETS", Const, 0},
    -		{"TF_DISCONNECT", Const, 0},
    -		{"TF_REUSE_SOCKET", Const, 0},
    -		{"TF_USE_DEFAULT_WORKER", Const, 0},
    -		{"TF_USE_KERNEL_APC", Const, 0},
    -		{"TF_USE_SYSTEM_THREAD", Const, 0},
    -		{"TF_WRITE_BEHIND", Const, 0},
    -		{"TH32CS_INHERIT", Const, 4},
    -		{"TH32CS_SNAPALL", Const, 4},
    -		{"TH32CS_SNAPHEAPLIST", Const, 4},
    -		{"TH32CS_SNAPMODULE", Const, 4},
    -		{"TH32CS_SNAPMODULE32", Const, 4},
    -		{"TH32CS_SNAPPROCESS", Const, 4},
    -		{"TH32CS_SNAPTHREAD", Const, 4},
    -		{"TIME_ZONE_ID_DAYLIGHT", Const, 0},
    -		{"TIME_ZONE_ID_STANDARD", Const, 0},
    -		{"TIME_ZONE_ID_UNKNOWN", Const, 0},
    -		{"TIOCCBRK", Const, 0},
    -		{"TIOCCDTR", Const, 0},
    -		{"TIOCCONS", Const, 0},
    -		{"TIOCDCDTIMESTAMP", Const, 0},
    -		{"TIOCDRAIN", Const, 0},
    -		{"TIOCDSIMICROCODE", Const, 0},
    -		{"TIOCEXCL", Const, 0},
    -		{"TIOCEXT", Const, 0},
    -		{"TIOCFLAG_CDTRCTS", Const, 1},
    -		{"TIOCFLAG_CLOCAL", Const, 1},
    -		{"TIOCFLAG_CRTSCTS", Const, 1},
    -		{"TIOCFLAG_MDMBUF", Const, 1},
    -		{"TIOCFLAG_PPS", Const, 1},
    -		{"TIOCFLAG_SOFTCAR", Const, 1},
    -		{"TIOCFLUSH", Const, 0},
    -		{"TIOCGDEV", Const, 0},
    -		{"TIOCGDRAINWAIT", Const, 0},
    -		{"TIOCGETA", Const, 0},
    -		{"TIOCGETD", Const, 0},
    -		{"TIOCGFLAGS", Const, 1},
    -		{"TIOCGICOUNT", Const, 0},
    -		{"TIOCGLCKTRMIOS", Const, 0},
    -		{"TIOCGLINED", Const, 1},
    -		{"TIOCGPGRP", Const, 0},
    -		{"TIOCGPTN", Const, 0},
    -		{"TIOCGQSIZE", Const, 1},
    -		{"TIOCGRANTPT", Const, 1},
    -		{"TIOCGRS485", Const, 0},
    -		{"TIOCGSERIAL", Const, 0},
    -		{"TIOCGSID", Const, 0},
    -		{"TIOCGSIZE", Const, 1},
    -		{"TIOCGSOFTCAR", Const, 0},
    -		{"TIOCGTSTAMP", Const, 1},
    -		{"TIOCGWINSZ", Const, 0},
    -		{"TIOCINQ", Const, 0},
    -		{"TIOCIXOFF", Const, 0},
    -		{"TIOCIXON", Const, 0},
    -		{"TIOCLINUX", Const, 0},
    -		{"TIOCMBIC", Const, 0},
    -		{"TIOCMBIS", Const, 0},
    -		{"TIOCMGDTRWAIT", Const, 0},
    -		{"TIOCMGET", Const, 0},
    -		{"TIOCMIWAIT", Const, 0},
    -		{"TIOCMODG", Const, 0},
    -		{"TIOCMODS", Const, 0},
    -		{"TIOCMSDTRWAIT", Const, 0},
    -		{"TIOCMSET", Const, 0},
    -		{"TIOCM_CAR", Const, 0},
    -		{"TIOCM_CD", Const, 0},
    -		{"TIOCM_CTS", Const, 0},
    -		{"TIOCM_DCD", Const, 0},
    -		{"TIOCM_DSR", Const, 0},
    -		{"TIOCM_DTR", Const, 0},
    -		{"TIOCM_LE", Const, 0},
    -		{"TIOCM_RI", Const, 0},
    -		{"TIOCM_RNG", Const, 0},
    -		{"TIOCM_RTS", Const, 0},
    -		{"TIOCM_SR", Const, 0},
    -		{"TIOCM_ST", Const, 0},
    -		{"TIOCNOTTY", Const, 0},
    -		{"TIOCNXCL", Const, 0},
    -		{"TIOCOUTQ", Const, 0},
    -		{"TIOCPKT", Const, 0},
    -		{"TIOCPKT_DATA", Const, 0},
    -		{"TIOCPKT_DOSTOP", Const, 0},
    -		{"TIOCPKT_FLUSHREAD", Const, 0},
    -		{"TIOCPKT_FLUSHWRITE", Const, 0},
    -		{"TIOCPKT_IOCTL", Const, 0},
    -		{"TIOCPKT_NOSTOP", Const, 0},
    -		{"TIOCPKT_START", Const, 0},
    -		{"TIOCPKT_STOP", Const, 0},
    -		{"TIOCPTMASTER", Const, 0},
    -		{"TIOCPTMGET", Const, 1},
    -		{"TIOCPTSNAME", Const, 1},
    -		{"TIOCPTYGNAME", Const, 0},
    -		{"TIOCPTYGRANT", Const, 0},
    -		{"TIOCPTYUNLK", Const, 0},
    -		{"TIOCRCVFRAME", Const, 1},
    -		{"TIOCREMOTE", Const, 0},
    -		{"TIOCSBRK", Const, 0},
    -		{"TIOCSCONS", Const, 0},
    -		{"TIOCSCTTY", Const, 0},
    -		{"TIOCSDRAINWAIT", Const, 0},
    -		{"TIOCSDTR", Const, 0},
    -		{"TIOCSERCONFIG", Const, 0},
    -		{"TIOCSERGETLSR", Const, 0},
    -		{"TIOCSERGETMULTI", Const, 0},
    -		{"TIOCSERGSTRUCT", Const, 0},
    -		{"TIOCSERGWILD", Const, 0},
    -		{"TIOCSERSETMULTI", Const, 0},
    -		{"TIOCSERSWILD", Const, 0},
    -		{"TIOCSER_TEMT", Const, 0},
    -		{"TIOCSETA", Const, 0},
    -		{"TIOCSETAF", Const, 0},
    -		{"TIOCSETAW", Const, 0},
    -		{"TIOCSETD", Const, 0},
    -		{"TIOCSFLAGS", Const, 1},
    -		{"TIOCSIG", Const, 0},
    -		{"TIOCSLCKTRMIOS", Const, 0},
    -		{"TIOCSLINED", Const, 1},
    -		{"TIOCSPGRP", Const, 0},
    -		{"TIOCSPTLCK", Const, 0},
    -		{"TIOCSQSIZE", Const, 1},
    -		{"TIOCSRS485", Const, 0},
    -		{"TIOCSSERIAL", Const, 0},
    -		{"TIOCSSIZE", Const, 1},
    -		{"TIOCSSOFTCAR", Const, 0},
    -		{"TIOCSTART", Const, 0},
    -		{"TIOCSTAT", Const, 0},
    -		{"TIOCSTI", Const, 0},
    -		{"TIOCSTOP", Const, 0},
    -		{"TIOCSTSTAMP", Const, 1},
    -		{"TIOCSWINSZ", Const, 0},
    -		{"TIOCTIMESTAMP", Const, 0},
    -		{"TIOCUCNTL", Const, 0},
    -		{"TIOCVHANGUP", Const, 0},
    -		{"TIOCXMTFRAME", Const, 1},
    -		{"TOKEN_ADJUST_DEFAULT", Const, 0},
    -		{"TOKEN_ADJUST_GROUPS", Const, 0},
    -		{"TOKEN_ADJUST_PRIVILEGES", Const, 0},
    -		{"TOKEN_ADJUST_SESSIONID", Const, 11},
    -		{"TOKEN_ALL_ACCESS", Const, 0},
    -		{"TOKEN_ASSIGN_PRIMARY", Const, 0},
    -		{"TOKEN_DUPLICATE", Const, 0},
    -		{"TOKEN_EXECUTE", Const, 0},
    -		{"TOKEN_IMPERSONATE", Const, 0},
    -		{"TOKEN_QUERY", Const, 0},
    -		{"TOKEN_QUERY_SOURCE", Const, 0},
    -		{"TOKEN_READ", Const, 0},
    -		{"TOKEN_WRITE", Const, 0},
    -		{"TOSTOP", Const, 0},
    -		{"TRUNCATE_EXISTING", Const, 0},
    -		{"TUNATTACHFILTER", Const, 0},
    -		{"TUNDETACHFILTER", Const, 0},
    -		{"TUNGETFEATURES", Const, 0},
    -		{"TUNGETIFF", Const, 0},
    -		{"TUNGETSNDBUF", Const, 0},
    -		{"TUNGETVNETHDRSZ", Const, 0},
    -		{"TUNSETDEBUG", Const, 0},
    -		{"TUNSETGROUP", Const, 0},
    -		{"TUNSETIFF", Const, 0},
    -		{"TUNSETLINK", Const, 0},
    -		{"TUNSETNOCSUM", Const, 0},
    -		{"TUNSETOFFLOAD", Const, 0},
    -		{"TUNSETOWNER", Const, 0},
    -		{"TUNSETPERSIST", Const, 0},
    -		{"TUNSETSNDBUF", Const, 0},
    -		{"TUNSETTXFILTER", Const, 0},
    -		{"TUNSETVNETHDRSZ", Const, 0},
    -		{"Tee", Func, 0},
    -		{"TerminateProcess", Func, 0},
    -		{"Termios", Type, 0},
    -		{"Termios.Cc", Field, 0},
    -		{"Termios.Cflag", Field, 0},
    -		{"Termios.Iflag", Field, 0},
    -		{"Termios.Ispeed", Field, 0},
    -		{"Termios.Lflag", Field, 0},
    -		{"Termios.Line", Field, 0},
    -		{"Termios.Oflag", Field, 0},
    -		{"Termios.Ospeed", Field, 0},
    -		{"Termios.Pad_cgo_0", Field, 0},
    -		{"Tgkill", Func, 0},
    -		{"Time", Func, 0},
    -		{"Time_t", Type, 0},
    -		{"Times", Func, 0},
    -		{"Timespec", Type, 0},
    -		{"Timespec.Nsec", Field, 0},
    -		{"Timespec.Pad_cgo_0", Field, 2},
    -		{"Timespec.Sec", Field, 0},
    -		{"TimespecToNsec", Func, 0},
    -		{"Timeval", Type, 0},
    -		{"Timeval.Pad_cgo_0", Field, 0},
    -		{"Timeval.Sec", Field, 0},
    -		{"Timeval.Usec", Field, 0},
    -		{"Timeval32", Type, 0},
    -		{"Timeval32.Sec", Field, 0},
    -		{"Timeval32.Usec", Field, 0},
    -		{"TimevalToNsec", Func, 0},
    -		{"Timex", Type, 0},
    -		{"Timex.Calcnt", Field, 0},
    -		{"Timex.Constant", Field, 0},
    -		{"Timex.Errcnt", Field, 0},
    -		{"Timex.Esterror", Field, 0},
    -		{"Timex.Freq", Field, 0},
    -		{"Timex.Jitcnt", Field, 0},
    -		{"Timex.Jitter", Field, 0},
    -		{"Timex.Maxerror", Field, 0},
    -		{"Timex.Modes", Field, 0},
    -		{"Timex.Offset", Field, 0},
    -		{"Timex.Pad_cgo_0", Field, 0},
    -		{"Timex.Pad_cgo_1", Field, 0},
    -		{"Timex.Pad_cgo_2", Field, 0},
    -		{"Timex.Pad_cgo_3", Field, 0},
    -		{"Timex.Ppsfreq", Field, 0},
    -		{"Timex.Precision", Field, 0},
    -		{"Timex.Shift", Field, 0},
    -		{"Timex.Stabil", Field, 0},
    -		{"Timex.Status", Field, 0},
    -		{"Timex.Stbcnt", Field, 0},
    -		{"Timex.Tai", Field, 0},
    -		{"Timex.Tick", Field, 0},
    -		{"Timex.Time", Field, 0},
    -		{"Timex.Tolerance", Field, 0},
    -		{"Timezoneinformation", Type, 0},
    -		{"Timezoneinformation.Bias", Field, 0},
    -		{"Timezoneinformation.DaylightBias", Field, 0},
    -		{"Timezoneinformation.DaylightDate", Field, 0},
    -		{"Timezoneinformation.DaylightName", Field, 0},
    -		{"Timezoneinformation.StandardBias", Field, 0},
    -		{"Timezoneinformation.StandardDate", Field, 0},
    -		{"Timezoneinformation.StandardName", Field, 0},
    -		{"Tms", Type, 0},
    -		{"Tms.Cstime", Field, 0},
    -		{"Tms.Cutime", Field, 0},
    -		{"Tms.Stime", Field, 0},
    -		{"Tms.Utime", Field, 0},
    -		{"Token", Type, 0},
    -		{"TokenAccessInformation", Const, 0},
    -		{"TokenAuditPolicy", Const, 0},
    -		{"TokenDefaultDacl", Const, 0},
    -		{"TokenElevation", Const, 0},
    -		{"TokenElevationType", Const, 0},
    -		{"TokenGroups", Const, 0},
    -		{"TokenGroupsAndPrivileges", Const, 0},
    -		{"TokenHasRestrictions", Const, 0},
    -		{"TokenImpersonationLevel", Const, 0},
    -		{"TokenIntegrityLevel", Const, 0},
    -		{"TokenLinkedToken", Const, 0},
    -		{"TokenLogonSid", Const, 0},
    -		{"TokenMandatoryPolicy", Const, 0},
    -		{"TokenOrigin", Const, 0},
    -		{"TokenOwner", Const, 0},
    -		{"TokenPrimaryGroup", Const, 0},
    -		{"TokenPrivileges", Const, 0},
    -		{"TokenRestrictedSids", Const, 0},
    -		{"TokenSandBoxInert", Const, 0},
    -		{"TokenSessionId", Const, 0},
    -		{"TokenSessionReference", Const, 0},
    -		{"TokenSource", Const, 0},
    -		{"TokenStatistics", Const, 0},
    -		{"TokenType", Const, 0},
    -		{"TokenUIAccess", Const, 0},
    -		{"TokenUser", Const, 0},
    -		{"TokenVirtualizationAllowed", Const, 0},
    -		{"TokenVirtualizationEnabled", Const, 0},
    -		{"Tokenprimarygroup", Type, 0},
    -		{"Tokenprimarygroup.PrimaryGroup", Field, 0},
    -		{"Tokenuser", Type, 0},
    -		{"Tokenuser.User", Field, 0},
    -		{"TranslateAccountName", Func, 0},
    -		{"TranslateName", Func, 0},
    -		{"TransmitFile", Func, 0},
    -		{"TransmitFileBuffers", Type, 0},
    -		{"TransmitFileBuffers.Head", Field, 0},
    -		{"TransmitFileBuffers.HeadLength", Field, 0},
    -		{"TransmitFileBuffers.Tail", Field, 0},
    -		{"TransmitFileBuffers.TailLength", Field, 0},
    -		{"Truncate", Func, 0},
    -		{"UNIX_PATH_MAX", Const, 12},
    -		{"USAGE_MATCH_TYPE_AND", Const, 0},
    -		{"USAGE_MATCH_TYPE_OR", Const, 0},
    -		{"UTF16FromString", Func, 1},
    -		{"UTF16PtrFromString", Func, 1},
    -		{"UTF16ToString", Func, 0},
    -		{"Ucred", Type, 0},
    -		{"Ucred.Gid", Field, 0},
    -		{"Ucred.Pid", Field, 0},
    -		{"Ucred.Uid", Field, 0},
    -		{"Umask", Func, 0},
    -		{"Uname", Func, 0},
    -		{"Undelete", Func, 0},
    -		{"UnixCredentials", Func, 0},
    -		{"UnixRights", Func, 0},
    -		{"Unlink", Func, 0},
    -		{"Unlinkat", Func, 0},
    -		{"UnmapViewOfFile", Func, 0},
    -		{"Unmount", Func, 0},
    -		{"Unsetenv", Func, 4},
    -		{"Unshare", Func, 0},
    -		{"UserInfo10", Type, 0},
    -		{"UserInfo10.Comment", Field, 0},
    -		{"UserInfo10.FullName", Field, 0},
    -		{"UserInfo10.Name", Field, 0},
    -		{"UserInfo10.UsrComment", Field, 0},
    -		{"Ustat", Func, 0},
    -		{"Ustat_t", Type, 0},
    -		{"Ustat_t.Fname", Field, 0},
    -		{"Ustat_t.Fpack", Field, 0},
    -		{"Ustat_t.Pad_cgo_0", Field, 0},
    -		{"Ustat_t.Pad_cgo_1", Field, 0},
    -		{"Ustat_t.Tfree", Field, 0},
    -		{"Ustat_t.Tinode", Field, 0},
    -		{"Utimbuf", Type, 0},
    -		{"Utimbuf.Actime", Field, 0},
    -		{"Utimbuf.Modtime", Field, 0},
    -		{"Utime", Func, 0},
    -		{"Utimes", Func, 0},
    -		{"UtimesNano", Func, 1},
    -		{"Utsname", Type, 0},
    -		{"Utsname.Domainname", Field, 0},
    -		{"Utsname.Machine", Field, 0},
    -		{"Utsname.Nodename", Field, 0},
    -		{"Utsname.Release", Field, 0},
    -		{"Utsname.Sysname", Field, 0},
    -		{"Utsname.Version", Field, 0},
    -		{"VDISCARD", Const, 0},
    -		{"VDSUSP", Const, 1},
    -		{"VEOF", Const, 0},
    -		{"VEOL", Const, 0},
    -		{"VEOL2", Const, 0},
    -		{"VERASE", Const, 0},
    -		{"VERASE2", Const, 1},
    -		{"VINTR", Const, 0},
    -		{"VKILL", Const, 0},
    -		{"VLNEXT", Const, 0},
    -		{"VMIN", Const, 0},
    -		{"VQUIT", Const, 0},
    -		{"VREPRINT", Const, 0},
    -		{"VSTART", Const, 0},
    -		{"VSTATUS", Const, 1},
    -		{"VSTOP", Const, 0},
    -		{"VSUSP", Const, 0},
    -		{"VSWTC", Const, 0},
    -		{"VT0", Const, 1},
    -		{"VT1", Const, 1},
    -		{"VTDLY", Const, 1},
    -		{"VTIME", Const, 0},
    -		{"VWERASE", Const, 0},
    -		{"VirtualLock", Func, 0},
    -		{"VirtualUnlock", Func, 0},
    -		{"WAIT_ABANDONED", Const, 0},
    -		{"WAIT_FAILED", Const, 0},
    -		{"WAIT_OBJECT_0", Const, 0},
    -		{"WAIT_TIMEOUT", Const, 0},
    -		{"WALL", Const, 0},
    -		{"WALLSIG", Const, 1},
    -		{"WALTSIG", Const, 1},
    -		{"WCLONE", Const, 0},
    -		{"WCONTINUED", Const, 0},
    -		{"WCOREFLAG", Const, 0},
    -		{"WEXITED", Const, 0},
    -		{"WLINUXCLONE", Const, 0},
    -		{"WNOHANG", Const, 0},
    -		{"WNOTHREAD", Const, 0},
    -		{"WNOWAIT", Const, 0},
    -		{"WNOZOMBIE", Const, 1},
    -		{"WOPTSCHECKED", Const, 1},
    -		{"WORDSIZE", Const, 0},
    -		{"WSABuf", Type, 0},
    -		{"WSABuf.Buf", Field, 0},
    -		{"WSABuf.Len", Field, 0},
    -		{"WSACleanup", Func, 0},
    -		{"WSADESCRIPTION_LEN", Const, 0},
    -		{"WSAData", Type, 0},
    -		{"WSAData.Description", Field, 0},
    -		{"WSAData.HighVersion", Field, 0},
    -		{"WSAData.MaxSockets", Field, 0},
    -		{"WSAData.MaxUdpDg", Field, 0},
    -		{"WSAData.SystemStatus", Field, 0},
    -		{"WSAData.VendorInfo", Field, 0},
    -		{"WSAData.Version", Field, 0},
    -		{"WSAEACCES", Const, 2},
    -		{"WSAECONNABORTED", Const, 9},
    -		{"WSAECONNRESET", Const, 3},
    -		{"WSAENOPROTOOPT", Const, 23},
    -		{"WSAEnumProtocols", Func, 2},
    -		{"WSAID_CONNECTEX", Var, 1},
    -		{"WSAIoctl", Func, 0},
    -		{"WSAPROTOCOL_LEN", Const, 2},
    -		{"WSAProtocolChain", Type, 2},
    -		{"WSAProtocolChain.ChainEntries", Field, 2},
    -		{"WSAProtocolChain.ChainLen", Field, 2},
    -		{"WSAProtocolInfo", Type, 2},
    -		{"WSAProtocolInfo.AddressFamily", Field, 2},
    -		{"WSAProtocolInfo.CatalogEntryId", Field, 2},
    -		{"WSAProtocolInfo.MaxSockAddr", Field, 2},
    -		{"WSAProtocolInfo.MessageSize", Field, 2},
    -		{"WSAProtocolInfo.MinSockAddr", Field, 2},
    -		{"WSAProtocolInfo.NetworkByteOrder", Field, 2},
    -		{"WSAProtocolInfo.Protocol", Field, 2},
    -		{"WSAProtocolInfo.ProtocolChain", Field, 2},
    -		{"WSAProtocolInfo.ProtocolMaxOffset", Field, 2},
    -		{"WSAProtocolInfo.ProtocolName", Field, 2},
    -		{"WSAProtocolInfo.ProviderFlags", Field, 2},
    -		{"WSAProtocolInfo.ProviderId", Field, 2},
    -		{"WSAProtocolInfo.ProviderReserved", Field, 2},
    -		{"WSAProtocolInfo.SecurityScheme", Field, 2},
    -		{"WSAProtocolInfo.ServiceFlags1", Field, 2},
    -		{"WSAProtocolInfo.ServiceFlags2", Field, 2},
    -		{"WSAProtocolInfo.ServiceFlags3", Field, 2},
    -		{"WSAProtocolInfo.ServiceFlags4", Field, 2},
    -		{"WSAProtocolInfo.SocketType", Field, 2},
    -		{"WSAProtocolInfo.Version", Field, 2},
    -		{"WSARecv", Func, 0},
    -		{"WSARecvFrom", Func, 0},
    -		{"WSASYS_STATUS_LEN", Const, 0},
    -		{"WSASend", Func, 0},
    -		{"WSASendTo", Func, 0},
    -		{"WSASendto", Func, 0},
    -		{"WSAStartup", Func, 0},
    -		{"WSTOPPED", Const, 0},
    -		{"WTRAPPED", Const, 1},
    -		{"WUNTRACED", Const, 0},
    -		{"Wait4", Func, 0},
    -		{"WaitForSingleObject", Func, 0},
    -		{"WaitStatus", Type, 0},
    -		{"WaitStatus.ExitCode", Field, 0},
    -		{"Win32FileAttributeData", Type, 0},
    -		{"Win32FileAttributeData.CreationTime", Field, 0},
    -		{"Win32FileAttributeData.FileAttributes", Field, 0},
    -		{"Win32FileAttributeData.FileSizeHigh", Field, 0},
    -		{"Win32FileAttributeData.FileSizeLow", Field, 0},
    -		{"Win32FileAttributeData.LastAccessTime", Field, 0},
    -		{"Win32FileAttributeData.LastWriteTime", Field, 0},
    -		{"Win32finddata", Type, 0},
    -		{"Win32finddata.AlternateFileName", Field, 0},
    -		{"Win32finddata.CreationTime", Field, 0},
    -		{"Win32finddata.FileAttributes", Field, 0},
    -		{"Win32finddata.FileName", Field, 0},
    -		{"Win32finddata.FileSizeHigh", Field, 0},
    -		{"Win32finddata.FileSizeLow", Field, 0},
    -		{"Win32finddata.LastAccessTime", Field, 0},
    -		{"Win32finddata.LastWriteTime", Field, 0},
    -		{"Win32finddata.Reserved0", Field, 0},
    -		{"Win32finddata.Reserved1", Field, 0},
    -		{"Write", Func, 0},
    -		{"WriteConsole", Func, 1},
    -		{"WriteFile", Func, 0},
    -		{"X509_ASN_ENCODING", Const, 0},
    -		{"XCASE", Const, 0},
    -		{"XP1_CONNECTIONLESS", Const, 2},
    -		{"XP1_CONNECT_DATA", Const, 2},
    -		{"XP1_DISCONNECT_DATA", Const, 2},
    -		{"XP1_EXPEDITED_DATA", Const, 2},
    -		{"XP1_GRACEFUL_CLOSE", Const, 2},
    -		{"XP1_GUARANTEED_DELIVERY", Const, 2},
    -		{"XP1_GUARANTEED_ORDER", Const, 2},
    -		{"XP1_IFS_HANDLES", Const, 2},
    -		{"XP1_MESSAGE_ORIENTED", Const, 2},
    -		{"XP1_MULTIPOINT_CONTROL_PLANE", Const, 2},
    -		{"XP1_MULTIPOINT_DATA_PLANE", Const, 2},
    -		{"XP1_PARTIAL_MESSAGE", Const, 2},
    -		{"XP1_PSEUDO_STREAM", Const, 2},
    -		{"XP1_QOS_SUPPORTED", Const, 2},
    -		{"XP1_SAN_SUPPORT_SDP", Const, 2},
    -		{"XP1_SUPPORT_BROADCAST", Const, 2},
    -		{"XP1_SUPPORT_MULTIPOINT", Const, 2},
    -		{"XP1_UNI_RECV", Const, 2},
    -		{"XP1_UNI_SEND", Const, 2},
    +		{"(*Cmsghdr).SetLen", Method, 0, ""},
    +		{"(*DLL).FindProc", Method, 0, ""},
    +		{"(*DLL).MustFindProc", Method, 0, ""},
    +		{"(*DLL).Release", Method, 0, ""},
    +		{"(*DLLError).Error", Method, 0, ""},
    +		{"(*DLLError).Unwrap", Method, 16, ""},
    +		{"(*Filetime).Nanoseconds", Method, 0, ""},
    +		{"(*Iovec).SetLen", Method, 0, ""},
    +		{"(*LazyDLL).Handle", Method, 0, ""},
    +		{"(*LazyDLL).Load", Method, 0, ""},
    +		{"(*LazyDLL).NewProc", Method, 0, ""},
    +		{"(*LazyProc).Addr", Method, 0, ""},
    +		{"(*LazyProc).Call", Method, 0, ""},
    +		{"(*LazyProc).Find", Method, 0, ""},
    +		{"(*Msghdr).SetControllen", Method, 0, ""},
    +		{"(*Proc).Addr", Method, 0, ""},
    +		{"(*Proc).Call", Method, 0, ""},
    +		{"(*PtraceRegs).PC", Method, 0, ""},
    +		{"(*PtraceRegs).SetPC", Method, 0, ""},
    +		{"(*RawSockaddrAny).Sockaddr", Method, 0, ""},
    +		{"(*SID).Copy", Method, 0, ""},
    +		{"(*SID).Len", Method, 0, ""},
    +		{"(*SID).LookupAccount", Method, 0, ""},
    +		{"(*SID).String", Method, 0, ""},
    +		{"(*Timespec).Nano", Method, 0, ""},
    +		{"(*Timespec).Unix", Method, 0, ""},
    +		{"(*Timeval).Nano", Method, 0, ""},
    +		{"(*Timeval).Nanoseconds", Method, 0, ""},
    +		{"(*Timeval).Unix", Method, 0, ""},
    +		{"(Errno).Error", Method, 0, ""},
    +		{"(Errno).Is", Method, 13, ""},
    +		{"(Errno).Temporary", Method, 0, ""},
    +		{"(Errno).Timeout", Method, 0, ""},
    +		{"(Signal).Signal", Method, 0, ""},
    +		{"(Signal).String", Method, 0, ""},
    +		{"(Token).Close", Method, 0, ""},
    +		{"(Token).GetTokenPrimaryGroup", Method, 0, ""},
    +		{"(Token).GetTokenUser", Method, 0, ""},
    +		{"(Token).GetUserProfileDirectory", Method, 0, ""},
    +		{"(WaitStatus).Continued", Method, 0, ""},
    +		{"(WaitStatus).CoreDump", Method, 0, ""},
    +		{"(WaitStatus).ExitStatus", Method, 0, ""},
    +		{"(WaitStatus).Exited", Method, 0, ""},
    +		{"(WaitStatus).Signal", Method, 0, ""},
    +		{"(WaitStatus).Signaled", Method, 0, ""},
    +		{"(WaitStatus).StopSignal", Method, 0, ""},
    +		{"(WaitStatus).Stopped", Method, 0, ""},
    +		{"(WaitStatus).TrapCause", Method, 0, ""},
    +		{"AF_ALG", Const, 0, ""},
    +		{"AF_APPLETALK", Const, 0, ""},
    +		{"AF_ARP", Const, 0, ""},
    +		{"AF_ASH", Const, 0, ""},
    +		{"AF_ATM", Const, 0, ""},
    +		{"AF_ATMPVC", Const, 0, ""},
    +		{"AF_ATMSVC", Const, 0, ""},
    +		{"AF_AX25", Const, 0, ""},
    +		{"AF_BLUETOOTH", Const, 0, ""},
    +		{"AF_BRIDGE", Const, 0, ""},
    +		{"AF_CAIF", Const, 0, ""},
    +		{"AF_CAN", Const, 0, ""},
    +		{"AF_CCITT", Const, 0, ""},
    +		{"AF_CHAOS", Const, 0, ""},
    +		{"AF_CNT", Const, 0, ""},
    +		{"AF_COIP", Const, 0, ""},
    +		{"AF_DATAKIT", Const, 0, ""},
    +		{"AF_DECnet", Const, 0, ""},
    +		{"AF_DLI", Const, 0, ""},
    +		{"AF_E164", Const, 0, ""},
    +		{"AF_ECMA", Const, 0, ""},
    +		{"AF_ECONET", Const, 0, ""},
    +		{"AF_ENCAP", Const, 1, ""},
    +		{"AF_FILE", Const, 0, ""},
    +		{"AF_HYLINK", Const, 0, ""},
    +		{"AF_IEEE80211", Const, 0, ""},
    +		{"AF_IEEE802154", Const, 0, ""},
    +		{"AF_IMPLINK", Const, 0, ""},
    +		{"AF_INET", Const, 0, ""},
    +		{"AF_INET6", Const, 0, ""},
    +		{"AF_INET6_SDP", Const, 3, ""},
    +		{"AF_INET_SDP", Const, 3, ""},
    +		{"AF_IPX", Const, 0, ""},
    +		{"AF_IRDA", Const, 0, ""},
    +		{"AF_ISDN", Const, 0, ""},
    +		{"AF_ISO", Const, 0, ""},
    +		{"AF_IUCV", Const, 0, ""},
    +		{"AF_KEY", Const, 0, ""},
    +		{"AF_LAT", Const, 0, ""},
    +		{"AF_LINK", Const, 0, ""},
    +		{"AF_LLC", Const, 0, ""},
    +		{"AF_LOCAL", Const, 0, ""},
    +		{"AF_MAX", Const, 0, ""},
    +		{"AF_MPLS", Const, 1, ""},
    +		{"AF_NATM", Const, 0, ""},
    +		{"AF_NDRV", Const, 0, ""},
    +		{"AF_NETBEUI", Const, 0, ""},
    +		{"AF_NETBIOS", Const, 0, ""},
    +		{"AF_NETGRAPH", Const, 0, ""},
    +		{"AF_NETLINK", Const, 0, ""},
    +		{"AF_NETROM", Const, 0, ""},
    +		{"AF_NS", Const, 0, ""},
    +		{"AF_OROUTE", Const, 1, ""},
    +		{"AF_OSI", Const, 0, ""},
    +		{"AF_PACKET", Const, 0, ""},
    +		{"AF_PHONET", Const, 0, ""},
    +		{"AF_PPP", Const, 0, ""},
    +		{"AF_PPPOX", Const, 0, ""},
    +		{"AF_PUP", Const, 0, ""},
    +		{"AF_RDS", Const, 0, ""},
    +		{"AF_RESERVED_36", Const, 0, ""},
    +		{"AF_ROSE", Const, 0, ""},
    +		{"AF_ROUTE", Const, 0, ""},
    +		{"AF_RXRPC", Const, 0, ""},
    +		{"AF_SCLUSTER", Const, 0, ""},
    +		{"AF_SECURITY", Const, 0, ""},
    +		{"AF_SIP", Const, 0, ""},
    +		{"AF_SLOW", Const, 0, ""},
    +		{"AF_SNA", Const, 0, ""},
    +		{"AF_SYSTEM", Const, 0, ""},
    +		{"AF_TIPC", Const, 0, ""},
    +		{"AF_UNIX", Const, 0, ""},
    +		{"AF_UNSPEC", Const, 0, ""},
    +		{"AF_UTUN", Const, 16, ""},
    +		{"AF_VENDOR00", Const, 0, ""},
    +		{"AF_VENDOR01", Const, 0, ""},
    +		{"AF_VENDOR02", Const, 0, ""},
    +		{"AF_VENDOR03", Const, 0, ""},
    +		{"AF_VENDOR04", Const, 0, ""},
    +		{"AF_VENDOR05", Const, 0, ""},
    +		{"AF_VENDOR06", Const, 0, ""},
    +		{"AF_VENDOR07", Const, 0, ""},
    +		{"AF_VENDOR08", Const, 0, ""},
    +		{"AF_VENDOR09", Const, 0, ""},
    +		{"AF_VENDOR10", Const, 0, ""},
    +		{"AF_VENDOR11", Const, 0, ""},
    +		{"AF_VENDOR12", Const, 0, ""},
    +		{"AF_VENDOR13", Const, 0, ""},
    +		{"AF_VENDOR14", Const, 0, ""},
    +		{"AF_VENDOR15", Const, 0, ""},
    +		{"AF_VENDOR16", Const, 0, ""},
    +		{"AF_VENDOR17", Const, 0, ""},
    +		{"AF_VENDOR18", Const, 0, ""},
    +		{"AF_VENDOR19", Const, 0, ""},
    +		{"AF_VENDOR20", Const, 0, ""},
    +		{"AF_VENDOR21", Const, 0, ""},
    +		{"AF_VENDOR22", Const, 0, ""},
    +		{"AF_VENDOR23", Const, 0, ""},
    +		{"AF_VENDOR24", Const, 0, ""},
    +		{"AF_VENDOR25", Const, 0, ""},
    +		{"AF_VENDOR26", Const, 0, ""},
    +		{"AF_VENDOR27", Const, 0, ""},
    +		{"AF_VENDOR28", Const, 0, ""},
    +		{"AF_VENDOR29", Const, 0, ""},
    +		{"AF_VENDOR30", Const, 0, ""},
    +		{"AF_VENDOR31", Const, 0, ""},
    +		{"AF_VENDOR32", Const, 0, ""},
    +		{"AF_VENDOR33", Const, 0, ""},
    +		{"AF_VENDOR34", Const, 0, ""},
    +		{"AF_VENDOR35", Const, 0, ""},
    +		{"AF_VENDOR36", Const, 0, ""},
    +		{"AF_VENDOR37", Const, 0, ""},
    +		{"AF_VENDOR38", Const, 0, ""},
    +		{"AF_VENDOR39", Const, 0, ""},
    +		{"AF_VENDOR40", Const, 0, ""},
    +		{"AF_VENDOR41", Const, 0, ""},
    +		{"AF_VENDOR42", Const, 0, ""},
    +		{"AF_VENDOR43", Const, 0, ""},
    +		{"AF_VENDOR44", Const, 0, ""},
    +		{"AF_VENDOR45", Const, 0, ""},
    +		{"AF_VENDOR46", Const, 0, ""},
    +		{"AF_VENDOR47", Const, 0, ""},
    +		{"AF_WANPIPE", Const, 0, ""},
    +		{"AF_X25", Const, 0, ""},
    +		{"AI_CANONNAME", Const, 1, ""},
    +		{"AI_NUMERICHOST", Const, 1, ""},
    +		{"AI_PASSIVE", Const, 1, ""},
    +		{"APPLICATION_ERROR", Const, 0, ""},
    +		{"ARPHRD_ADAPT", Const, 0, ""},
    +		{"ARPHRD_APPLETLK", Const, 0, ""},
    +		{"ARPHRD_ARCNET", Const, 0, ""},
    +		{"ARPHRD_ASH", Const, 0, ""},
    +		{"ARPHRD_ATM", Const, 0, ""},
    +		{"ARPHRD_AX25", Const, 0, ""},
    +		{"ARPHRD_BIF", Const, 0, ""},
    +		{"ARPHRD_CHAOS", Const, 0, ""},
    +		{"ARPHRD_CISCO", Const, 0, ""},
    +		{"ARPHRD_CSLIP", Const, 0, ""},
    +		{"ARPHRD_CSLIP6", Const, 0, ""},
    +		{"ARPHRD_DDCMP", Const, 0, ""},
    +		{"ARPHRD_DLCI", Const, 0, ""},
    +		{"ARPHRD_ECONET", Const, 0, ""},
    +		{"ARPHRD_EETHER", Const, 0, ""},
    +		{"ARPHRD_ETHER", Const, 0, ""},
    +		{"ARPHRD_EUI64", Const, 0, ""},
    +		{"ARPHRD_FCAL", Const, 0, ""},
    +		{"ARPHRD_FCFABRIC", Const, 0, ""},
    +		{"ARPHRD_FCPL", Const, 0, ""},
    +		{"ARPHRD_FCPP", Const, 0, ""},
    +		{"ARPHRD_FDDI", Const, 0, ""},
    +		{"ARPHRD_FRAD", Const, 0, ""},
    +		{"ARPHRD_FRELAY", Const, 1, ""},
    +		{"ARPHRD_HDLC", Const, 0, ""},
    +		{"ARPHRD_HIPPI", Const, 0, ""},
    +		{"ARPHRD_HWX25", Const, 0, ""},
    +		{"ARPHRD_IEEE1394", Const, 0, ""},
    +		{"ARPHRD_IEEE802", Const, 0, ""},
    +		{"ARPHRD_IEEE80211", Const, 0, ""},
    +		{"ARPHRD_IEEE80211_PRISM", Const, 0, ""},
    +		{"ARPHRD_IEEE80211_RADIOTAP", Const, 0, ""},
    +		{"ARPHRD_IEEE802154", Const, 0, ""},
    +		{"ARPHRD_IEEE802154_PHY", Const, 0, ""},
    +		{"ARPHRD_IEEE802_TR", Const, 0, ""},
    +		{"ARPHRD_INFINIBAND", Const, 0, ""},
    +		{"ARPHRD_IPDDP", Const, 0, ""},
    +		{"ARPHRD_IPGRE", Const, 0, ""},
    +		{"ARPHRD_IRDA", Const, 0, ""},
    +		{"ARPHRD_LAPB", Const, 0, ""},
    +		{"ARPHRD_LOCALTLK", Const, 0, ""},
    +		{"ARPHRD_LOOPBACK", Const, 0, ""},
    +		{"ARPHRD_METRICOM", Const, 0, ""},
    +		{"ARPHRD_NETROM", Const, 0, ""},
    +		{"ARPHRD_NONE", Const, 0, ""},
    +		{"ARPHRD_PIMREG", Const, 0, ""},
    +		{"ARPHRD_PPP", Const, 0, ""},
    +		{"ARPHRD_PRONET", Const, 0, ""},
    +		{"ARPHRD_RAWHDLC", Const, 0, ""},
    +		{"ARPHRD_ROSE", Const, 0, ""},
    +		{"ARPHRD_RSRVD", Const, 0, ""},
    +		{"ARPHRD_SIT", Const, 0, ""},
    +		{"ARPHRD_SKIP", Const, 0, ""},
    +		{"ARPHRD_SLIP", Const, 0, ""},
    +		{"ARPHRD_SLIP6", Const, 0, ""},
    +		{"ARPHRD_STRIP", Const, 1, ""},
    +		{"ARPHRD_TUNNEL", Const, 0, ""},
    +		{"ARPHRD_TUNNEL6", Const, 0, ""},
    +		{"ARPHRD_VOID", Const, 0, ""},
    +		{"ARPHRD_X25", Const, 0, ""},
    +		{"AUTHTYPE_CLIENT", Const, 0, ""},
    +		{"AUTHTYPE_SERVER", Const, 0, ""},
    +		{"Accept", Func, 0, "func(fd int) (nfd int, sa Sockaddr, err error)"},
    +		{"Accept4", Func, 1, "func(fd int, flags int) (nfd int, sa Sockaddr, err error)"},
    +		{"AcceptEx", Func, 0, ""},
    +		{"Access", Func, 0, "func(path string, mode uint32) (err error)"},
    +		{"Acct", Func, 0, "func(path string) (err error)"},
    +		{"AddrinfoW", Type, 1, ""},
    +		{"AddrinfoW.Addr", Field, 1, ""},
    +		{"AddrinfoW.Addrlen", Field, 1, ""},
    +		{"AddrinfoW.Canonname", Field, 1, ""},
    +		{"AddrinfoW.Family", Field, 1, ""},
    +		{"AddrinfoW.Flags", Field, 1, ""},
    +		{"AddrinfoW.Next", Field, 1, ""},
    +		{"AddrinfoW.Protocol", Field, 1, ""},
    +		{"AddrinfoW.Socktype", Field, 1, ""},
    +		{"Adjtime", Func, 0, ""},
    +		{"Adjtimex", Func, 0, "func(buf *Timex) (state int, err error)"},
    +		{"AllThreadsSyscall", Func, 16, "func(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err Errno)"},
    +		{"AllThreadsSyscall6", Func, 16, "func(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err Errno)"},
    +		{"AttachLsf", Func, 0, "func(fd int, i []SockFilter) error"},
    +		{"B0", Const, 0, ""},
    +		{"B1000000", Const, 0, ""},
    +		{"B110", Const, 0, ""},
    +		{"B115200", Const, 0, ""},
    +		{"B1152000", Const, 0, ""},
    +		{"B1200", Const, 0, ""},
    +		{"B134", Const, 0, ""},
    +		{"B14400", Const, 1, ""},
    +		{"B150", Const, 0, ""},
    +		{"B1500000", Const, 0, ""},
    +		{"B1800", Const, 0, ""},
    +		{"B19200", Const, 0, ""},
    +		{"B200", Const, 0, ""},
    +		{"B2000000", Const, 0, ""},
    +		{"B230400", Const, 0, ""},
    +		{"B2400", Const, 0, ""},
    +		{"B2500000", Const, 0, ""},
    +		{"B28800", Const, 1, ""},
    +		{"B300", Const, 0, ""},
    +		{"B3000000", Const, 0, ""},
    +		{"B3500000", Const, 0, ""},
    +		{"B38400", Const, 0, ""},
    +		{"B4000000", Const, 0, ""},
    +		{"B460800", Const, 0, ""},
    +		{"B4800", Const, 0, ""},
    +		{"B50", Const, 0, ""},
    +		{"B500000", Const, 0, ""},
    +		{"B57600", Const, 0, ""},
    +		{"B576000", Const, 0, ""},
    +		{"B600", Const, 0, ""},
    +		{"B7200", Const, 1, ""},
    +		{"B75", Const, 0, ""},
    +		{"B76800", Const, 1, ""},
    +		{"B921600", Const, 0, ""},
    +		{"B9600", Const, 0, ""},
    +		{"BASE_PROTOCOL", Const, 2, ""},
    +		{"BIOCFEEDBACK", Const, 0, ""},
    +		{"BIOCFLUSH", Const, 0, ""},
    +		{"BIOCGBLEN", Const, 0, ""},
    +		{"BIOCGDIRECTION", Const, 0, ""},
    +		{"BIOCGDIRFILT", Const, 1, ""},
    +		{"BIOCGDLT", Const, 0, ""},
    +		{"BIOCGDLTLIST", Const, 0, ""},
    +		{"BIOCGETBUFMODE", Const, 0, ""},
    +		{"BIOCGETIF", Const, 0, ""},
    +		{"BIOCGETZMAX", Const, 0, ""},
    +		{"BIOCGFEEDBACK", Const, 1, ""},
    +		{"BIOCGFILDROP", Const, 1, ""},
    +		{"BIOCGHDRCMPLT", Const, 0, ""},
    +		{"BIOCGRSIG", Const, 0, ""},
    +		{"BIOCGRTIMEOUT", Const, 0, ""},
    +		{"BIOCGSEESENT", Const, 0, ""},
    +		{"BIOCGSTATS", Const, 0, ""},
    +		{"BIOCGSTATSOLD", Const, 1, ""},
    +		{"BIOCGTSTAMP", Const, 1, ""},
    +		{"BIOCIMMEDIATE", Const, 0, ""},
    +		{"BIOCLOCK", Const, 0, ""},
    +		{"BIOCPROMISC", Const, 0, ""},
    +		{"BIOCROTZBUF", Const, 0, ""},
    +		{"BIOCSBLEN", Const, 0, ""},
    +		{"BIOCSDIRECTION", Const, 0, ""},
    +		{"BIOCSDIRFILT", Const, 1, ""},
    +		{"BIOCSDLT", Const, 0, ""},
    +		{"BIOCSETBUFMODE", Const, 0, ""},
    +		{"BIOCSETF", Const, 0, ""},
    +		{"BIOCSETFNR", Const, 0, ""},
    +		{"BIOCSETIF", Const, 0, ""},
    +		{"BIOCSETWF", Const, 0, ""},
    +		{"BIOCSETZBUF", Const, 0, ""},
    +		{"BIOCSFEEDBACK", Const, 1, ""},
    +		{"BIOCSFILDROP", Const, 1, ""},
    +		{"BIOCSHDRCMPLT", Const, 0, ""},
    +		{"BIOCSRSIG", Const, 0, ""},
    +		{"BIOCSRTIMEOUT", Const, 0, ""},
    +		{"BIOCSSEESENT", Const, 0, ""},
    +		{"BIOCSTCPF", Const, 1, ""},
    +		{"BIOCSTSTAMP", Const, 1, ""},
    +		{"BIOCSUDPF", Const, 1, ""},
    +		{"BIOCVERSION", Const, 0, ""},
    +		{"BPF_A", Const, 0, ""},
    +		{"BPF_ABS", Const, 0, ""},
    +		{"BPF_ADD", Const, 0, ""},
    +		{"BPF_ALIGNMENT", Const, 0, ""},
    +		{"BPF_ALIGNMENT32", Const, 1, ""},
    +		{"BPF_ALU", Const, 0, ""},
    +		{"BPF_AND", Const, 0, ""},
    +		{"BPF_B", Const, 0, ""},
    +		{"BPF_BUFMODE_BUFFER", Const, 0, ""},
    +		{"BPF_BUFMODE_ZBUF", Const, 0, ""},
    +		{"BPF_DFLTBUFSIZE", Const, 1, ""},
    +		{"BPF_DIRECTION_IN", Const, 1, ""},
    +		{"BPF_DIRECTION_OUT", Const, 1, ""},
    +		{"BPF_DIV", Const, 0, ""},
    +		{"BPF_H", Const, 0, ""},
    +		{"BPF_IMM", Const, 0, ""},
    +		{"BPF_IND", Const, 0, ""},
    +		{"BPF_JA", Const, 0, ""},
    +		{"BPF_JEQ", Const, 0, ""},
    +		{"BPF_JGE", Const, 0, ""},
    +		{"BPF_JGT", Const, 0, ""},
    +		{"BPF_JMP", Const, 0, ""},
    +		{"BPF_JSET", Const, 0, ""},
    +		{"BPF_K", Const, 0, ""},
    +		{"BPF_LD", Const, 0, ""},
    +		{"BPF_LDX", Const, 0, ""},
    +		{"BPF_LEN", Const, 0, ""},
    +		{"BPF_LSH", Const, 0, ""},
    +		{"BPF_MAJOR_VERSION", Const, 0, ""},
    +		{"BPF_MAXBUFSIZE", Const, 0, ""},
    +		{"BPF_MAXINSNS", Const, 0, ""},
    +		{"BPF_MEM", Const, 0, ""},
    +		{"BPF_MEMWORDS", Const, 0, ""},
    +		{"BPF_MINBUFSIZE", Const, 0, ""},
    +		{"BPF_MINOR_VERSION", Const, 0, ""},
    +		{"BPF_MISC", Const, 0, ""},
    +		{"BPF_MSH", Const, 0, ""},
    +		{"BPF_MUL", Const, 0, ""},
    +		{"BPF_NEG", Const, 0, ""},
    +		{"BPF_OR", Const, 0, ""},
    +		{"BPF_RELEASE", Const, 0, ""},
    +		{"BPF_RET", Const, 0, ""},
    +		{"BPF_RSH", Const, 0, ""},
    +		{"BPF_ST", Const, 0, ""},
    +		{"BPF_STX", Const, 0, ""},
    +		{"BPF_SUB", Const, 0, ""},
    +		{"BPF_TAX", Const, 0, ""},
    +		{"BPF_TXA", Const, 0, ""},
    +		{"BPF_T_BINTIME", Const, 1, ""},
    +		{"BPF_T_BINTIME_FAST", Const, 1, ""},
    +		{"BPF_T_BINTIME_MONOTONIC", Const, 1, ""},
    +		{"BPF_T_BINTIME_MONOTONIC_FAST", Const, 1, ""},
    +		{"BPF_T_FAST", Const, 1, ""},
    +		{"BPF_T_FLAG_MASK", Const, 1, ""},
    +		{"BPF_T_FORMAT_MASK", Const, 1, ""},
    +		{"BPF_T_MICROTIME", Const, 1, ""},
    +		{"BPF_T_MICROTIME_FAST", Const, 1, ""},
    +		{"BPF_T_MICROTIME_MONOTONIC", Const, 1, ""},
    +		{"BPF_T_MICROTIME_MONOTONIC_FAST", Const, 1, ""},
    +		{"BPF_T_MONOTONIC", Const, 1, ""},
    +		{"BPF_T_MONOTONIC_FAST", Const, 1, ""},
    +		{"BPF_T_NANOTIME", Const, 1, ""},
    +		{"BPF_T_NANOTIME_FAST", Const, 1, ""},
    +		{"BPF_T_NANOTIME_MONOTONIC", Const, 1, ""},
    +		{"BPF_T_NANOTIME_MONOTONIC_FAST", Const, 1, ""},
    +		{"BPF_T_NONE", Const, 1, ""},
    +		{"BPF_T_NORMAL", Const, 1, ""},
    +		{"BPF_W", Const, 0, ""},
    +		{"BPF_X", Const, 0, ""},
    +		{"BRKINT", Const, 0, ""},
    +		{"Bind", Func, 0, "func(fd int, sa Sockaddr) (err error)"},
    +		{"BindToDevice", Func, 0, "func(fd int, device string) (err error)"},
    +		{"BpfBuflen", Func, 0, ""},
    +		{"BpfDatalink", Func, 0, ""},
    +		{"BpfHdr", Type, 0, ""},
    +		{"BpfHdr.Caplen", Field, 0, ""},
    +		{"BpfHdr.Datalen", Field, 0, ""},
    +		{"BpfHdr.Hdrlen", Field, 0, ""},
    +		{"BpfHdr.Pad_cgo_0", Field, 0, ""},
    +		{"BpfHdr.Tstamp", Field, 0, ""},
    +		{"BpfHeadercmpl", Func, 0, ""},
    +		{"BpfInsn", Type, 0, ""},
    +		{"BpfInsn.Code", Field, 0, ""},
    +		{"BpfInsn.Jf", Field, 0, ""},
    +		{"BpfInsn.Jt", Field, 0, ""},
    +		{"BpfInsn.K", Field, 0, ""},
    +		{"BpfInterface", Func, 0, ""},
    +		{"BpfJump", Func, 0, ""},
    +		{"BpfProgram", Type, 0, ""},
    +		{"BpfProgram.Insns", Field, 0, ""},
    +		{"BpfProgram.Len", Field, 0, ""},
    +		{"BpfProgram.Pad_cgo_0", Field, 0, ""},
    +		{"BpfStat", Type, 0, ""},
    +		{"BpfStat.Capt", Field, 2, ""},
    +		{"BpfStat.Drop", Field, 0, ""},
    +		{"BpfStat.Padding", Field, 2, ""},
    +		{"BpfStat.Recv", Field, 0, ""},
    +		{"BpfStats", Func, 0, ""},
    +		{"BpfStmt", Func, 0, ""},
    +		{"BpfTimeout", Func, 0, ""},
    +		{"BpfTimeval", Type, 2, ""},
    +		{"BpfTimeval.Sec", Field, 2, ""},
    +		{"BpfTimeval.Usec", Field, 2, ""},
    +		{"BpfVersion", Type, 0, ""},
    +		{"BpfVersion.Major", Field, 0, ""},
    +		{"BpfVersion.Minor", Field, 0, ""},
    +		{"BpfZbuf", Type, 0, ""},
    +		{"BpfZbuf.Bufa", Field, 0, ""},
    +		{"BpfZbuf.Bufb", Field, 0, ""},
    +		{"BpfZbuf.Buflen", Field, 0, ""},
    +		{"BpfZbufHeader", Type, 0, ""},
    +		{"BpfZbufHeader.Kernel_gen", Field, 0, ""},
    +		{"BpfZbufHeader.Kernel_len", Field, 0, ""},
    +		{"BpfZbufHeader.User_gen", Field, 0, ""},
    +		{"BpfZbufHeader.X_bzh_pad", Field, 0, ""},
    +		{"ByHandleFileInformation", Type, 0, ""},
    +		{"ByHandleFileInformation.CreationTime", Field, 0, ""},
    +		{"ByHandleFileInformation.FileAttributes", Field, 0, ""},
    +		{"ByHandleFileInformation.FileIndexHigh", Field, 0, ""},
    +		{"ByHandleFileInformation.FileIndexLow", Field, 0, ""},
    +		{"ByHandleFileInformation.FileSizeHigh", Field, 0, ""},
    +		{"ByHandleFileInformation.FileSizeLow", Field, 0, ""},
    +		{"ByHandleFileInformation.LastAccessTime", Field, 0, ""},
    +		{"ByHandleFileInformation.LastWriteTime", Field, 0, ""},
    +		{"ByHandleFileInformation.NumberOfLinks", Field, 0, ""},
    +		{"ByHandleFileInformation.VolumeSerialNumber", Field, 0, ""},
    +		{"BytePtrFromString", Func, 1, "func(s string) (*byte, error)"},
    +		{"ByteSliceFromString", Func, 1, "func(s string) ([]byte, error)"},
    +		{"CCR0_FLUSH", Const, 1, ""},
    +		{"CERT_CHAIN_POLICY_AUTHENTICODE", Const, 0, ""},
    +		{"CERT_CHAIN_POLICY_AUTHENTICODE_TS", Const, 0, ""},
    +		{"CERT_CHAIN_POLICY_BASE", Const, 0, ""},
    +		{"CERT_CHAIN_POLICY_BASIC_CONSTRAINTS", Const, 0, ""},
    +		{"CERT_CHAIN_POLICY_EV", Const, 0, ""},
    +		{"CERT_CHAIN_POLICY_MICROSOFT_ROOT", Const, 0, ""},
    +		{"CERT_CHAIN_POLICY_NT_AUTH", Const, 0, ""},
    +		{"CERT_CHAIN_POLICY_SSL", Const, 0, ""},
    +		{"CERT_E_CN_NO_MATCH", Const, 0, ""},
    +		{"CERT_E_EXPIRED", Const, 0, ""},
    +		{"CERT_E_PURPOSE", Const, 0, ""},
    +		{"CERT_E_ROLE", Const, 0, ""},
    +		{"CERT_E_UNTRUSTEDROOT", Const, 0, ""},
    +		{"CERT_STORE_ADD_ALWAYS", Const, 0, ""},
    +		{"CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG", Const, 0, ""},
    +		{"CERT_STORE_PROV_MEMORY", Const, 0, ""},
    +		{"CERT_TRUST_HAS_EXCLUDED_NAME_CONSTRAINT", Const, 0, ""},
    +		{"CERT_TRUST_HAS_NOT_DEFINED_NAME_CONSTRAINT", Const, 0, ""},
    +		{"CERT_TRUST_HAS_NOT_PERMITTED_NAME_CONSTRAINT", Const, 0, ""},
    +		{"CERT_TRUST_HAS_NOT_SUPPORTED_CRITICAL_EXT", Const, 0, ""},
    +		{"CERT_TRUST_HAS_NOT_SUPPORTED_NAME_CONSTRAINT", Const, 0, ""},
    +		{"CERT_TRUST_INVALID_BASIC_CONSTRAINTS", Const, 0, ""},
    +		{"CERT_TRUST_INVALID_EXTENSION", Const, 0, ""},
    +		{"CERT_TRUST_INVALID_NAME_CONSTRAINTS", Const, 0, ""},
    +		{"CERT_TRUST_INVALID_POLICY_CONSTRAINTS", Const, 0, ""},
    +		{"CERT_TRUST_IS_CYCLIC", Const, 0, ""},
    +		{"CERT_TRUST_IS_EXPLICIT_DISTRUST", Const, 0, ""},
    +		{"CERT_TRUST_IS_NOT_SIGNATURE_VALID", Const, 0, ""},
    +		{"CERT_TRUST_IS_NOT_TIME_VALID", Const, 0, ""},
    +		{"CERT_TRUST_IS_NOT_VALID_FOR_USAGE", Const, 0, ""},
    +		{"CERT_TRUST_IS_OFFLINE_REVOCATION", Const, 0, ""},
    +		{"CERT_TRUST_IS_REVOKED", Const, 0, ""},
    +		{"CERT_TRUST_IS_UNTRUSTED_ROOT", Const, 0, ""},
    +		{"CERT_TRUST_NO_ERROR", Const, 0, ""},
    +		{"CERT_TRUST_NO_ISSUANCE_CHAIN_POLICY", Const, 0, ""},
    +		{"CERT_TRUST_REVOCATION_STATUS_UNKNOWN", Const, 0, ""},
    +		{"CFLUSH", Const, 1, ""},
    +		{"CLOCAL", Const, 0, ""},
    +		{"CLONE_CHILD_CLEARTID", Const, 2, ""},
    +		{"CLONE_CHILD_SETTID", Const, 2, ""},
    +		{"CLONE_CLEAR_SIGHAND", Const, 20, ""},
    +		{"CLONE_CSIGNAL", Const, 3, ""},
    +		{"CLONE_DETACHED", Const, 2, ""},
    +		{"CLONE_FILES", Const, 2, ""},
    +		{"CLONE_FS", Const, 2, ""},
    +		{"CLONE_INTO_CGROUP", Const, 20, ""},
    +		{"CLONE_IO", Const, 2, ""},
    +		{"CLONE_NEWCGROUP", Const, 20, ""},
    +		{"CLONE_NEWIPC", Const, 2, ""},
    +		{"CLONE_NEWNET", Const, 2, ""},
    +		{"CLONE_NEWNS", Const, 2, ""},
    +		{"CLONE_NEWPID", Const, 2, ""},
    +		{"CLONE_NEWTIME", Const, 20, ""},
    +		{"CLONE_NEWUSER", Const, 2, ""},
    +		{"CLONE_NEWUTS", Const, 2, ""},
    +		{"CLONE_PARENT", Const, 2, ""},
    +		{"CLONE_PARENT_SETTID", Const, 2, ""},
    +		{"CLONE_PID", Const, 3, ""},
    +		{"CLONE_PIDFD", Const, 20, ""},
    +		{"CLONE_PTRACE", Const, 2, ""},
    +		{"CLONE_SETTLS", Const, 2, ""},
    +		{"CLONE_SIGHAND", Const, 2, ""},
    +		{"CLONE_SYSVSEM", Const, 2, ""},
    +		{"CLONE_THREAD", Const, 2, ""},
    +		{"CLONE_UNTRACED", Const, 2, ""},
    +		{"CLONE_VFORK", Const, 2, ""},
    +		{"CLONE_VM", Const, 2, ""},
    +		{"CPUID_CFLUSH", Const, 1, ""},
    +		{"CREAD", Const, 0, ""},
    +		{"CREATE_ALWAYS", Const, 0, ""},
    +		{"CREATE_NEW", Const, 0, ""},
    +		{"CREATE_NEW_PROCESS_GROUP", Const, 1, ""},
    +		{"CREATE_UNICODE_ENVIRONMENT", Const, 0, ""},
    +		{"CRYPT_DEFAULT_CONTAINER_OPTIONAL", Const, 0, ""},
    +		{"CRYPT_DELETEKEYSET", Const, 0, ""},
    +		{"CRYPT_MACHINE_KEYSET", Const, 0, ""},
    +		{"CRYPT_NEWKEYSET", Const, 0, ""},
    +		{"CRYPT_SILENT", Const, 0, ""},
    +		{"CRYPT_VERIFYCONTEXT", Const, 0, ""},
    +		{"CS5", Const, 0, ""},
    +		{"CS6", Const, 0, ""},
    +		{"CS7", Const, 0, ""},
    +		{"CS8", Const, 0, ""},
    +		{"CSIZE", Const, 0, ""},
    +		{"CSTART", Const, 1, ""},
    +		{"CSTATUS", Const, 1, ""},
    +		{"CSTOP", Const, 1, ""},
    +		{"CSTOPB", Const, 0, ""},
    +		{"CSUSP", Const, 1, ""},
    +		{"CTL_MAXNAME", Const, 0, ""},
    +		{"CTL_NET", Const, 0, ""},
    +		{"CTL_QUERY", Const, 1, ""},
    +		{"CTRL_BREAK_EVENT", Const, 1, ""},
    +		{"CTRL_CLOSE_EVENT", Const, 14, ""},
    +		{"CTRL_C_EVENT", Const, 1, ""},
    +		{"CTRL_LOGOFF_EVENT", Const, 14, ""},
    +		{"CTRL_SHUTDOWN_EVENT", Const, 14, ""},
    +		{"CancelIo", Func, 0, ""},
    +		{"CancelIoEx", Func, 1, ""},
    +		{"CertAddCertificateContextToStore", Func, 0, ""},
    +		{"CertChainContext", Type, 0, ""},
    +		{"CertChainContext.ChainCount", Field, 0, ""},
    +		{"CertChainContext.Chains", Field, 0, ""},
    +		{"CertChainContext.HasRevocationFreshnessTime", Field, 0, ""},
    +		{"CertChainContext.LowerQualityChainCount", Field, 0, ""},
    +		{"CertChainContext.LowerQualityChains", Field, 0, ""},
    +		{"CertChainContext.RevocationFreshnessTime", Field, 0, ""},
    +		{"CertChainContext.Size", Field, 0, ""},
    +		{"CertChainContext.TrustStatus", Field, 0, ""},
    +		{"CertChainElement", Type, 0, ""},
    +		{"CertChainElement.ApplicationUsage", Field, 0, ""},
    +		{"CertChainElement.CertContext", Field, 0, ""},
    +		{"CertChainElement.ExtendedErrorInfo", Field, 0, ""},
    +		{"CertChainElement.IssuanceUsage", Field, 0, ""},
    +		{"CertChainElement.RevocationInfo", Field, 0, ""},
    +		{"CertChainElement.Size", Field, 0, ""},
    +		{"CertChainElement.TrustStatus", Field, 0, ""},
    +		{"CertChainPara", Type, 0, ""},
    +		{"CertChainPara.CacheResync", Field, 0, ""},
    +		{"CertChainPara.CheckRevocationFreshnessTime", Field, 0, ""},
    +		{"CertChainPara.RequestedUsage", Field, 0, ""},
    +		{"CertChainPara.RequstedIssuancePolicy", Field, 0, ""},
    +		{"CertChainPara.RevocationFreshnessTime", Field, 0, ""},
    +		{"CertChainPara.Size", Field, 0, ""},
    +		{"CertChainPara.URLRetrievalTimeout", Field, 0, ""},
    +		{"CertChainPolicyPara", Type, 0, ""},
    +		{"CertChainPolicyPara.ExtraPolicyPara", Field, 0, ""},
    +		{"CertChainPolicyPara.Flags", Field, 0, ""},
    +		{"CertChainPolicyPara.Size", Field, 0, ""},
    +		{"CertChainPolicyStatus", Type, 0, ""},
    +		{"CertChainPolicyStatus.ChainIndex", Field, 0, ""},
    +		{"CertChainPolicyStatus.ElementIndex", Field, 0, ""},
    +		{"CertChainPolicyStatus.Error", Field, 0, ""},
    +		{"CertChainPolicyStatus.ExtraPolicyStatus", Field, 0, ""},
    +		{"CertChainPolicyStatus.Size", Field, 0, ""},
    +		{"CertCloseStore", Func, 0, ""},
    +		{"CertContext", Type, 0, ""},
    +		{"CertContext.CertInfo", Field, 0, ""},
    +		{"CertContext.EncodedCert", Field, 0, ""},
    +		{"CertContext.EncodingType", Field, 0, ""},
    +		{"CertContext.Length", Field, 0, ""},
    +		{"CertContext.Store", Field, 0, ""},
    +		{"CertCreateCertificateContext", Func, 0, ""},
    +		{"CertEnhKeyUsage", Type, 0, ""},
    +		{"CertEnhKeyUsage.Length", Field, 0, ""},
    +		{"CertEnhKeyUsage.UsageIdentifiers", Field, 0, ""},
    +		{"CertEnumCertificatesInStore", Func, 0, ""},
    +		{"CertFreeCertificateChain", Func, 0, ""},
    +		{"CertFreeCertificateContext", Func, 0, ""},
    +		{"CertGetCertificateChain", Func, 0, ""},
    +		{"CertInfo", Type, 11, ""},
    +		{"CertOpenStore", Func, 0, ""},
    +		{"CertOpenSystemStore", Func, 0, ""},
    +		{"CertRevocationCrlInfo", Type, 11, ""},
    +		{"CertRevocationInfo", Type, 0, ""},
    +		{"CertRevocationInfo.CrlInfo", Field, 0, ""},
    +		{"CertRevocationInfo.FreshnessTime", Field, 0, ""},
    +		{"CertRevocationInfo.HasFreshnessTime", Field, 0, ""},
    +		{"CertRevocationInfo.OidSpecificInfo", Field, 0, ""},
    +		{"CertRevocationInfo.RevocationOid", Field, 0, ""},
    +		{"CertRevocationInfo.RevocationResult", Field, 0, ""},
    +		{"CertRevocationInfo.Size", Field, 0, ""},
    +		{"CertSimpleChain", Type, 0, ""},
    +		{"CertSimpleChain.Elements", Field, 0, ""},
    +		{"CertSimpleChain.HasRevocationFreshnessTime", Field, 0, ""},
    +		{"CertSimpleChain.NumElements", Field, 0, ""},
    +		{"CertSimpleChain.RevocationFreshnessTime", Field, 0, ""},
    +		{"CertSimpleChain.Size", Field, 0, ""},
    +		{"CertSimpleChain.TrustListInfo", Field, 0, ""},
    +		{"CertSimpleChain.TrustStatus", Field, 0, ""},
    +		{"CertTrustListInfo", Type, 11, ""},
    +		{"CertTrustStatus", Type, 0, ""},
    +		{"CertTrustStatus.ErrorStatus", Field, 0, ""},
    +		{"CertTrustStatus.InfoStatus", Field, 0, ""},
    +		{"CertUsageMatch", Type, 0, ""},
    +		{"CertUsageMatch.Type", Field, 0, ""},
    +		{"CertUsageMatch.Usage", Field, 0, ""},
    +		{"CertVerifyCertificateChainPolicy", Func, 0, ""},
    +		{"Chdir", Func, 0, "func(path string) (err error)"},
    +		{"CheckBpfVersion", Func, 0, ""},
    +		{"Chflags", Func, 0, ""},
    +		{"Chmod", Func, 0, "func(path string, mode uint32) (err error)"},
    +		{"Chown", Func, 0, "func(path string, uid int, gid int) (err error)"},
    +		{"Chroot", Func, 0, "func(path string) (err error)"},
    +		{"Clearenv", Func, 0, "func()"},
    +		{"Close", Func, 0, "func(fd int) (err error)"},
    +		{"CloseHandle", Func, 0, ""},
    +		{"CloseOnExec", Func, 0, "func(fd int)"},
    +		{"Closesocket", Func, 0, ""},
    +		{"CmsgLen", Func, 0, "func(datalen int) int"},
    +		{"CmsgSpace", Func, 0, "func(datalen int) int"},
    +		{"Cmsghdr", Type, 0, ""},
    +		{"Cmsghdr.Len", Field, 0, ""},
    +		{"Cmsghdr.Level", Field, 0, ""},
    +		{"Cmsghdr.Type", Field, 0, ""},
    +		{"Cmsghdr.X__cmsg_data", Field, 0, ""},
    +		{"CommandLineToArgv", Func, 0, ""},
    +		{"ComputerName", Func, 0, ""},
    +		{"Conn", Type, 9, ""},
    +		{"Connect", Func, 0, "func(fd int, sa Sockaddr) (err error)"},
    +		{"ConnectEx", Func, 1, ""},
    +		{"ConvertSidToStringSid", Func, 0, ""},
    +		{"ConvertStringSidToSid", Func, 0, ""},
    +		{"CopySid", Func, 0, ""},
    +		{"Creat", Func, 0, "func(path string, mode uint32) (fd int, err error)"},
    +		{"CreateDirectory", Func, 0, ""},
    +		{"CreateFile", Func, 0, ""},
    +		{"CreateFileMapping", Func, 0, ""},
    +		{"CreateHardLink", Func, 4, ""},
    +		{"CreateIoCompletionPort", Func, 0, ""},
    +		{"CreatePipe", Func, 0, ""},
    +		{"CreateProcess", Func, 0, ""},
    +		{"CreateProcessAsUser", Func, 10, ""},
    +		{"CreateSymbolicLink", Func, 4, ""},
    +		{"CreateToolhelp32Snapshot", Func, 4, ""},
    +		{"Credential", Type, 0, ""},
    +		{"Credential.Gid", Field, 0, ""},
    +		{"Credential.Groups", Field, 0, ""},
    +		{"Credential.NoSetGroups", Field, 9, ""},
    +		{"Credential.Uid", Field, 0, ""},
    +		{"CryptAcquireContext", Func, 0, ""},
    +		{"CryptGenRandom", Func, 0, ""},
    +		{"CryptReleaseContext", Func, 0, ""},
    +		{"DIOCBSFLUSH", Const, 1, ""},
    +		{"DIOCOSFPFLUSH", Const, 1, ""},
    +		{"DLL", Type, 0, ""},
    +		{"DLL.Handle", Field, 0, ""},
    +		{"DLL.Name", Field, 0, ""},
    +		{"DLLError", Type, 0, ""},
    +		{"DLLError.Err", Field, 0, ""},
    +		{"DLLError.Msg", Field, 0, ""},
    +		{"DLLError.ObjName", Field, 0, ""},
    +		{"DLT_A429", Const, 0, ""},
    +		{"DLT_A653_ICM", Const, 0, ""},
    +		{"DLT_AIRONET_HEADER", Const, 0, ""},
    +		{"DLT_AOS", Const, 1, ""},
    +		{"DLT_APPLE_IP_OVER_IEEE1394", Const, 0, ""},
    +		{"DLT_ARCNET", Const, 0, ""},
    +		{"DLT_ARCNET_LINUX", Const, 0, ""},
    +		{"DLT_ATM_CLIP", Const, 0, ""},
    +		{"DLT_ATM_RFC1483", Const, 0, ""},
    +		{"DLT_AURORA", Const, 0, ""},
    +		{"DLT_AX25", Const, 0, ""},
    +		{"DLT_AX25_KISS", Const, 0, ""},
    +		{"DLT_BACNET_MS_TP", Const, 0, ""},
    +		{"DLT_BLUETOOTH_HCI_H4", Const, 0, ""},
    +		{"DLT_BLUETOOTH_HCI_H4_WITH_PHDR", Const, 0, ""},
    +		{"DLT_CAN20B", Const, 0, ""},
    +		{"DLT_CAN_SOCKETCAN", Const, 1, ""},
    +		{"DLT_CHAOS", Const, 0, ""},
    +		{"DLT_CHDLC", Const, 0, ""},
    +		{"DLT_CISCO_IOS", Const, 0, ""},
    +		{"DLT_C_HDLC", Const, 0, ""},
    +		{"DLT_C_HDLC_WITH_DIR", Const, 0, ""},
    +		{"DLT_DBUS", Const, 1, ""},
    +		{"DLT_DECT", Const, 1, ""},
    +		{"DLT_DOCSIS", Const, 0, ""},
    +		{"DLT_DVB_CI", Const, 1, ""},
    +		{"DLT_ECONET", Const, 0, ""},
    +		{"DLT_EN10MB", Const, 0, ""},
    +		{"DLT_EN3MB", Const, 0, ""},
    +		{"DLT_ENC", Const, 0, ""},
    +		{"DLT_ERF", Const, 0, ""},
    +		{"DLT_ERF_ETH", Const, 0, ""},
    +		{"DLT_ERF_POS", Const, 0, ""},
    +		{"DLT_FC_2", Const, 1, ""},
    +		{"DLT_FC_2_WITH_FRAME_DELIMS", Const, 1, ""},
    +		{"DLT_FDDI", Const, 0, ""},
    +		{"DLT_FLEXRAY", Const, 0, ""},
    +		{"DLT_FRELAY", Const, 0, ""},
    +		{"DLT_FRELAY_WITH_DIR", Const, 0, ""},
    +		{"DLT_GCOM_SERIAL", Const, 0, ""},
    +		{"DLT_GCOM_T1E1", Const, 0, ""},
    +		{"DLT_GPF_F", Const, 0, ""},
    +		{"DLT_GPF_T", Const, 0, ""},
    +		{"DLT_GPRS_LLC", Const, 0, ""},
    +		{"DLT_GSMTAP_ABIS", Const, 1, ""},
    +		{"DLT_GSMTAP_UM", Const, 1, ""},
    +		{"DLT_HDLC", Const, 1, ""},
    +		{"DLT_HHDLC", Const, 0, ""},
    +		{"DLT_HIPPI", Const, 1, ""},
    +		{"DLT_IBM_SN", Const, 0, ""},
    +		{"DLT_IBM_SP", Const, 0, ""},
    +		{"DLT_IEEE802", Const, 0, ""},
    +		{"DLT_IEEE802_11", Const, 0, ""},
    +		{"DLT_IEEE802_11_RADIO", Const, 0, ""},
    +		{"DLT_IEEE802_11_RADIO_AVS", Const, 0, ""},
    +		{"DLT_IEEE802_15_4", Const, 0, ""},
    +		{"DLT_IEEE802_15_4_LINUX", Const, 0, ""},
    +		{"DLT_IEEE802_15_4_NOFCS", Const, 1, ""},
    +		{"DLT_IEEE802_15_4_NONASK_PHY", Const, 0, ""},
    +		{"DLT_IEEE802_16_MAC_CPS", Const, 0, ""},
    +		{"DLT_IEEE802_16_MAC_CPS_RADIO", Const, 0, ""},
    +		{"DLT_IPFILTER", Const, 0, ""},
    +		{"DLT_IPMB", Const, 0, ""},
    +		{"DLT_IPMB_LINUX", Const, 0, ""},
    +		{"DLT_IPNET", Const, 1, ""},
    +		{"DLT_IPOIB", Const, 1, ""},
    +		{"DLT_IPV4", Const, 1, ""},
    +		{"DLT_IPV6", Const, 1, ""},
    +		{"DLT_IP_OVER_FC", Const, 0, ""},
    +		{"DLT_JUNIPER_ATM1", Const, 0, ""},
    +		{"DLT_JUNIPER_ATM2", Const, 0, ""},
    +		{"DLT_JUNIPER_ATM_CEMIC", Const, 1, ""},
    +		{"DLT_JUNIPER_CHDLC", Const, 0, ""},
    +		{"DLT_JUNIPER_ES", Const, 0, ""},
    +		{"DLT_JUNIPER_ETHER", Const, 0, ""},
    +		{"DLT_JUNIPER_FIBRECHANNEL", Const, 1, ""},
    +		{"DLT_JUNIPER_FRELAY", Const, 0, ""},
    +		{"DLT_JUNIPER_GGSN", Const, 0, ""},
    +		{"DLT_JUNIPER_ISM", Const, 0, ""},
    +		{"DLT_JUNIPER_MFR", Const, 0, ""},
    +		{"DLT_JUNIPER_MLFR", Const, 0, ""},
    +		{"DLT_JUNIPER_MLPPP", Const, 0, ""},
    +		{"DLT_JUNIPER_MONITOR", Const, 0, ""},
    +		{"DLT_JUNIPER_PIC_PEER", Const, 0, ""},
    +		{"DLT_JUNIPER_PPP", Const, 0, ""},
    +		{"DLT_JUNIPER_PPPOE", Const, 0, ""},
    +		{"DLT_JUNIPER_PPPOE_ATM", Const, 0, ""},
    +		{"DLT_JUNIPER_SERVICES", Const, 0, ""},
    +		{"DLT_JUNIPER_SRX_E2E", Const, 1, ""},
    +		{"DLT_JUNIPER_ST", Const, 0, ""},
    +		{"DLT_JUNIPER_VP", Const, 0, ""},
    +		{"DLT_JUNIPER_VS", Const, 1, ""},
    +		{"DLT_LAPB_WITH_DIR", Const, 0, ""},
    +		{"DLT_LAPD", Const, 0, ""},
    +		{"DLT_LIN", Const, 0, ""},
    +		{"DLT_LINUX_EVDEV", Const, 1, ""},
    +		{"DLT_LINUX_IRDA", Const, 0, ""},
    +		{"DLT_LINUX_LAPD", Const, 0, ""},
    +		{"DLT_LINUX_PPP_WITHDIRECTION", Const, 0, ""},
    +		{"DLT_LINUX_SLL", Const, 0, ""},
    +		{"DLT_LOOP", Const, 0, ""},
    +		{"DLT_LTALK", Const, 0, ""},
    +		{"DLT_MATCHING_MAX", Const, 1, ""},
    +		{"DLT_MATCHING_MIN", Const, 1, ""},
    +		{"DLT_MFR", Const, 0, ""},
    +		{"DLT_MOST", Const, 0, ""},
    +		{"DLT_MPEG_2_TS", Const, 1, ""},
    +		{"DLT_MPLS", Const, 1, ""},
    +		{"DLT_MTP2", Const, 0, ""},
    +		{"DLT_MTP2_WITH_PHDR", Const, 0, ""},
    +		{"DLT_MTP3", Const, 0, ""},
    +		{"DLT_MUX27010", Const, 1, ""},
    +		{"DLT_NETANALYZER", Const, 1, ""},
    +		{"DLT_NETANALYZER_TRANSPARENT", Const, 1, ""},
    +		{"DLT_NFC_LLCP", Const, 1, ""},
    +		{"DLT_NFLOG", Const, 1, ""},
    +		{"DLT_NG40", Const, 1, ""},
    +		{"DLT_NULL", Const, 0, ""},
    +		{"DLT_PCI_EXP", Const, 0, ""},
    +		{"DLT_PFLOG", Const, 0, ""},
    +		{"DLT_PFSYNC", Const, 0, ""},
    +		{"DLT_PPI", Const, 0, ""},
    +		{"DLT_PPP", Const, 0, ""},
    +		{"DLT_PPP_BSDOS", Const, 0, ""},
    +		{"DLT_PPP_ETHER", Const, 0, ""},
    +		{"DLT_PPP_PPPD", Const, 0, ""},
    +		{"DLT_PPP_SERIAL", Const, 0, ""},
    +		{"DLT_PPP_WITH_DIR", Const, 0, ""},
    +		{"DLT_PPP_WITH_DIRECTION", Const, 0, ""},
    +		{"DLT_PRISM_HEADER", Const, 0, ""},
    +		{"DLT_PRONET", Const, 0, ""},
    +		{"DLT_RAIF1", Const, 0, ""},
    +		{"DLT_RAW", Const, 0, ""},
    +		{"DLT_RAWAF_MASK", Const, 1, ""},
    +		{"DLT_RIO", Const, 0, ""},
    +		{"DLT_SCCP", Const, 0, ""},
    +		{"DLT_SITA", Const, 0, ""},
    +		{"DLT_SLIP", Const, 0, ""},
    +		{"DLT_SLIP_BSDOS", Const, 0, ""},
    +		{"DLT_STANAG_5066_D_PDU", Const, 1, ""},
    +		{"DLT_SUNATM", Const, 0, ""},
    +		{"DLT_SYMANTEC_FIREWALL", Const, 0, ""},
    +		{"DLT_TZSP", Const, 0, ""},
    +		{"DLT_USB", Const, 0, ""},
    +		{"DLT_USB_LINUX", Const, 0, ""},
    +		{"DLT_USB_LINUX_MMAPPED", Const, 1, ""},
    +		{"DLT_USER0", Const, 0, ""},
    +		{"DLT_USER1", Const, 0, ""},
    +		{"DLT_USER10", Const, 0, ""},
    +		{"DLT_USER11", Const, 0, ""},
    +		{"DLT_USER12", Const, 0, ""},
    +		{"DLT_USER13", Const, 0, ""},
    +		{"DLT_USER14", Const, 0, ""},
    +		{"DLT_USER15", Const, 0, ""},
    +		{"DLT_USER2", Const, 0, ""},
    +		{"DLT_USER3", Const, 0, ""},
    +		{"DLT_USER4", Const, 0, ""},
    +		{"DLT_USER5", Const, 0, ""},
    +		{"DLT_USER6", Const, 0, ""},
    +		{"DLT_USER7", Const, 0, ""},
    +		{"DLT_USER8", Const, 0, ""},
    +		{"DLT_USER9", Const, 0, ""},
    +		{"DLT_WIHART", Const, 1, ""},
    +		{"DLT_X2E_SERIAL", Const, 0, ""},
    +		{"DLT_X2E_XORAYA", Const, 0, ""},
    +		{"DNSMXData", Type, 0, ""},
    +		{"DNSMXData.NameExchange", Field, 0, ""},
    +		{"DNSMXData.Pad", Field, 0, ""},
    +		{"DNSMXData.Preference", Field, 0, ""},
    +		{"DNSPTRData", Type, 0, ""},
    +		{"DNSPTRData.Host", Field, 0, ""},
    +		{"DNSRecord", Type, 0, ""},
    +		{"DNSRecord.Data", Field, 0, ""},
    +		{"DNSRecord.Dw", Field, 0, ""},
    +		{"DNSRecord.Length", Field, 0, ""},
    +		{"DNSRecord.Name", Field, 0, ""},
    +		{"DNSRecord.Next", Field, 0, ""},
    +		{"DNSRecord.Reserved", Field, 0, ""},
    +		{"DNSRecord.Ttl", Field, 0, ""},
    +		{"DNSRecord.Type", Field, 0, ""},
    +		{"DNSSRVData", Type, 0, ""},
    +		{"DNSSRVData.Pad", Field, 0, ""},
    +		{"DNSSRVData.Port", Field, 0, ""},
    +		{"DNSSRVData.Priority", Field, 0, ""},
    +		{"DNSSRVData.Target", Field, 0, ""},
    +		{"DNSSRVData.Weight", Field, 0, ""},
    +		{"DNSTXTData", Type, 0, ""},
    +		{"DNSTXTData.StringArray", Field, 0, ""},
    +		{"DNSTXTData.StringCount", Field, 0, ""},
    +		{"DNS_INFO_NO_RECORDS", Const, 4, ""},
    +		{"DNS_TYPE_A", Const, 0, ""},
    +		{"DNS_TYPE_A6", Const, 0, ""},
    +		{"DNS_TYPE_AAAA", Const, 0, ""},
    +		{"DNS_TYPE_ADDRS", Const, 0, ""},
    +		{"DNS_TYPE_AFSDB", Const, 0, ""},
    +		{"DNS_TYPE_ALL", Const, 0, ""},
    +		{"DNS_TYPE_ANY", Const, 0, ""},
    +		{"DNS_TYPE_ATMA", Const, 0, ""},
    +		{"DNS_TYPE_AXFR", Const, 0, ""},
    +		{"DNS_TYPE_CERT", Const, 0, ""},
    +		{"DNS_TYPE_CNAME", Const, 0, ""},
    +		{"DNS_TYPE_DHCID", Const, 0, ""},
    +		{"DNS_TYPE_DNAME", Const, 0, ""},
    +		{"DNS_TYPE_DNSKEY", Const, 0, ""},
    +		{"DNS_TYPE_DS", Const, 0, ""},
    +		{"DNS_TYPE_EID", Const, 0, ""},
    +		{"DNS_TYPE_GID", Const, 0, ""},
    +		{"DNS_TYPE_GPOS", Const, 0, ""},
    +		{"DNS_TYPE_HINFO", Const, 0, ""},
    +		{"DNS_TYPE_ISDN", Const, 0, ""},
    +		{"DNS_TYPE_IXFR", Const, 0, ""},
    +		{"DNS_TYPE_KEY", Const, 0, ""},
    +		{"DNS_TYPE_KX", Const, 0, ""},
    +		{"DNS_TYPE_LOC", Const, 0, ""},
    +		{"DNS_TYPE_MAILA", Const, 0, ""},
    +		{"DNS_TYPE_MAILB", Const, 0, ""},
    +		{"DNS_TYPE_MB", Const, 0, ""},
    +		{"DNS_TYPE_MD", Const, 0, ""},
    +		{"DNS_TYPE_MF", Const, 0, ""},
    +		{"DNS_TYPE_MG", Const, 0, ""},
    +		{"DNS_TYPE_MINFO", Const, 0, ""},
    +		{"DNS_TYPE_MR", Const, 0, ""},
    +		{"DNS_TYPE_MX", Const, 0, ""},
    +		{"DNS_TYPE_NAPTR", Const, 0, ""},
    +		{"DNS_TYPE_NBSTAT", Const, 0, ""},
    +		{"DNS_TYPE_NIMLOC", Const, 0, ""},
    +		{"DNS_TYPE_NS", Const, 0, ""},
    +		{"DNS_TYPE_NSAP", Const, 0, ""},
    +		{"DNS_TYPE_NSAPPTR", Const, 0, ""},
    +		{"DNS_TYPE_NSEC", Const, 0, ""},
    +		{"DNS_TYPE_NULL", Const, 0, ""},
    +		{"DNS_TYPE_NXT", Const, 0, ""},
    +		{"DNS_TYPE_OPT", Const, 0, ""},
    +		{"DNS_TYPE_PTR", Const, 0, ""},
    +		{"DNS_TYPE_PX", Const, 0, ""},
    +		{"DNS_TYPE_RP", Const, 0, ""},
    +		{"DNS_TYPE_RRSIG", Const, 0, ""},
    +		{"DNS_TYPE_RT", Const, 0, ""},
    +		{"DNS_TYPE_SIG", Const, 0, ""},
    +		{"DNS_TYPE_SINK", Const, 0, ""},
    +		{"DNS_TYPE_SOA", Const, 0, ""},
    +		{"DNS_TYPE_SRV", Const, 0, ""},
    +		{"DNS_TYPE_TEXT", Const, 0, ""},
    +		{"DNS_TYPE_TKEY", Const, 0, ""},
    +		{"DNS_TYPE_TSIG", Const, 0, ""},
    +		{"DNS_TYPE_UID", Const, 0, ""},
    +		{"DNS_TYPE_UINFO", Const, 0, ""},
    +		{"DNS_TYPE_UNSPEC", Const, 0, ""},
    +		{"DNS_TYPE_WINS", Const, 0, ""},
    +		{"DNS_TYPE_WINSR", Const, 0, ""},
    +		{"DNS_TYPE_WKS", Const, 0, ""},
    +		{"DNS_TYPE_X25", Const, 0, ""},
    +		{"DT_BLK", Const, 0, ""},
    +		{"DT_CHR", Const, 0, ""},
    +		{"DT_DIR", Const, 0, ""},
    +		{"DT_FIFO", Const, 0, ""},
    +		{"DT_LNK", Const, 0, ""},
    +		{"DT_REG", Const, 0, ""},
    +		{"DT_SOCK", Const, 0, ""},
    +		{"DT_UNKNOWN", Const, 0, ""},
    +		{"DT_WHT", Const, 0, ""},
    +		{"DUPLICATE_CLOSE_SOURCE", Const, 0, ""},
    +		{"DUPLICATE_SAME_ACCESS", Const, 0, ""},
    +		{"DeleteFile", Func, 0, ""},
    +		{"DetachLsf", Func, 0, "func(fd int) error"},
    +		{"DeviceIoControl", Func, 4, ""},
    +		{"Dirent", Type, 0, ""},
    +		{"Dirent.Fileno", Field, 0, ""},
    +		{"Dirent.Ino", Field, 0, ""},
    +		{"Dirent.Name", Field, 0, ""},
    +		{"Dirent.Namlen", Field, 0, ""},
    +		{"Dirent.Off", Field, 0, ""},
    +		{"Dirent.Pad0", Field, 12, ""},
    +		{"Dirent.Pad1", Field, 12, ""},
    +		{"Dirent.Pad_cgo_0", Field, 0, ""},
    +		{"Dirent.Reclen", Field, 0, ""},
    +		{"Dirent.Seekoff", Field, 0, ""},
    +		{"Dirent.Type", Field, 0, ""},
    +		{"Dirent.X__d_padding", Field, 3, ""},
    +		{"DnsNameCompare", Func, 4, ""},
    +		{"DnsQuery", Func, 0, ""},
    +		{"DnsRecordListFree", Func, 0, ""},
    +		{"DnsSectionAdditional", Const, 4, ""},
    +		{"DnsSectionAnswer", Const, 4, ""},
    +		{"DnsSectionAuthority", Const, 4, ""},
    +		{"DnsSectionQuestion", Const, 4, ""},
    +		{"Dup", Func, 0, "func(oldfd int) (fd int, err error)"},
    +		{"Dup2", Func, 0, "func(oldfd int, newfd int) (err error)"},
    +		{"Dup3", Func, 2, "func(oldfd int, newfd int, flags int) (err error)"},
    +		{"DuplicateHandle", Func, 0, ""},
    +		{"E2BIG", Const, 0, ""},
    +		{"EACCES", Const, 0, ""},
    +		{"EADDRINUSE", Const, 0, ""},
    +		{"EADDRNOTAVAIL", Const, 0, ""},
    +		{"EADV", Const, 0, ""},
    +		{"EAFNOSUPPORT", Const, 0, ""},
    +		{"EAGAIN", Const, 0, ""},
    +		{"EALREADY", Const, 0, ""},
    +		{"EAUTH", Const, 0, ""},
    +		{"EBADARCH", Const, 0, ""},
    +		{"EBADE", Const, 0, ""},
    +		{"EBADEXEC", Const, 0, ""},
    +		{"EBADF", Const, 0, ""},
    +		{"EBADFD", Const, 0, ""},
    +		{"EBADMACHO", Const, 0, ""},
    +		{"EBADMSG", Const, 0, ""},
    +		{"EBADR", Const, 0, ""},
    +		{"EBADRPC", Const, 0, ""},
    +		{"EBADRQC", Const, 0, ""},
    +		{"EBADSLT", Const, 0, ""},
    +		{"EBFONT", Const, 0, ""},
    +		{"EBUSY", Const, 0, ""},
    +		{"ECANCELED", Const, 0, ""},
    +		{"ECAPMODE", Const, 1, ""},
    +		{"ECHILD", Const, 0, ""},
    +		{"ECHO", Const, 0, ""},
    +		{"ECHOCTL", Const, 0, ""},
    +		{"ECHOE", Const, 0, ""},
    +		{"ECHOK", Const, 0, ""},
    +		{"ECHOKE", Const, 0, ""},
    +		{"ECHONL", Const, 0, ""},
    +		{"ECHOPRT", Const, 0, ""},
    +		{"ECHRNG", Const, 0, ""},
    +		{"ECOMM", Const, 0, ""},
    +		{"ECONNABORTED", Const, 0, ""},
    +		{"ECONNREFUSED", Const, 0, ""},
    +		{"ECONNRESET", Const, 0, ""},
    +		{"EDEADLK", Const, 0, ""},
    +		{"EDEADLOCK", Const, 0, ""},
    +		{"EDESTADDRREQ", Const, 0, ""},
    +		{"EDEVERR", Const, 0, ""},
    +		{"EDOM", Const, 0, ""},
    +		{"EDOOFUS", Const, 0, ""},
    +		{"EDOTDOT", Const, 0, ""},
    +		{"EDQUOT", Const, 0, ""},
    +		{"EEXIST", Const, 0, ""},
    +		{"EFAULT", Const, 0, ""},
    +		{"EFBIG", Const, 0, ""},
    +		{"EFER_LMA", Const, 1, ""},
    +		{"EFER_LME", Const, 1, ""},
    +		{"EFER_NXE", Const, 1, ""},
    +		{"EFER_SCE", Const, 1, ""},
    +		{"EFTYPE", Const, 0, ""},
    +		{"EHOSTDOWN", Const, 0, ""},
    +		{"EHOSTUNREACH", Const, 0, ""},
    +		{"EHWPOISON", Const, 0, ""},
    +		{"EIDRM", Const, 0, ""},
    +		{"EILSEQ", Const, 0, ""},
    +		{"EINPROGRESS", Const, 0, ""},
    +		{"EINTR", Const, 0, ""},
    +		{"EINVAL", Const, 0, ""},
    +		{"EIO", Const, 0, ""},
    +		{"EIPSEC", Const, 1, ""},
    +		{"EISCONN", Const, 0, ""},
    +		{"EISDIR", Const, 0, ""},
    +		{"EISNAM", Const, 0, ""},
    +		{"EKEYEXPIRED", Const, 0, ""},
    +		{"EKEYREJECTED", Const, 0, ""},
    +		{"EKEYREVOKED", Const, 0, ""},
    +		{"EL2HLT", Const, 0, ""},
    +		{"EL2NSYNC", Const, 0, ""},
    +		{"EL3HLT", Const, 0, ""},
    +		{"EL3RST", Const, 0, ""},
    +		{"ELAST", Const, 0, ""},
    +		{"ELF_NGREG", Const, 0, ""},
    +		{"ELF_PRARGSZ", Const, 0, ""},
    +		{"ELIBACC", Const, 0, ""},
    +		{"ELIBBAD", Const, 0, ""},
    +		{"ELIBEXEC", Const, 0, ""},
    +		{"ELIBMAX", Const, 0, ""},
    +		{"ELIBSCN", Const, 0, ""},
    +		{"ELNRNG", Const, 0, ""},
    +		{"ELOOP", Const, 0, ""},
    +		{"EMEDIUMTYPE", Const, 0, ""},
    +		{"EMFILE", Const, 0, ""},
    +		{"EMLINK", Const, 0, ""},
    +		{"EMSGSIZE", Const, 0, ""},
    +		{"EMT_TAGOVF", Const, 1, ""},
    +		{"EMULTIHOP", Const, 0, ""},
    +		{"EMUL_ENABLED", Const, 1, ""},
    +		{"EMUL_LINUX", Const, 1, ""},
    +		{"EMUL_LINUX32", Const, 1, ""},
    +		{"EMUL_MAXID", Const, 1, ""},
    +		{"EMUL_NATIVE", Const, 1, ""},
    +		{"ENAMETOOLONG", Const, 0, ""},
    +		{"ENAVAIL", Const, 0, ""},
    +		{"ENDRUNDISC", Const, 1, ""},
    +		{"ENEEDAUTH", Const, 0, ""},
    +		{"ENETDOWN", Const, 0, ""},
    +		{"ENETRESET", Const, 0, ""},
    +		{"ENETUNREACH", Const, 0, ""},
    +		{"ENFILE", Const, 0, ""},
    +		{"ENOANO", Const, 0, ""},
    +		{"ENOATTR", Const, 0, ""},
    +		{"ENOBUFS", Const, 0, ""},
    +		{"ENOCSI", Const, 0, ""},
    +		{"ENODATA", Const, 0, ""},
    +		{"ENODEV", Const, 0, ""},
    +		{"ENOENT", Const, 0, ""},
    +		{"ENOEXEC", Const, 0, ""},
    +		{"ENOKEY", Const, 0, ""},
    +		{"ENOLCK", Const, 0, ""},
    +		{"ENOLINK", Const, 0, ""},
    +		{"ENOMEDIUM", Const, 0, ""},
    +		{"ENOMEM", Const, 0, ""},
    +		{"ENOMSG", Const, 0, ""},
    +		{"ENONET", Const, 0, ""},
    +		{"ENOPKG", Const, 0, ""},
    +		{"ENOPOLICY", Const, 0, ""},
    +		{"ENOPROTOOPT", Const, 0, ""},
    +		{"ENOSPC", Const, 0, ""},
    +		{"ENOSR", Const, 0, ""},
    +		{"ENOSTR", Const, 0, ""},
    +		{"ENOSYS", Const, 0, ""},
    +		{"ENOTBLK", Const, 0, ""},
    +		{"ENOTCAPABLE", Const, 0, ""},
    +		{"ENOTCONN", Const, 0, ""},
    +		{"ENOTDIR", Const, 0, ""},
    +		{"ENOTEMPTY", Const, 0, ""},
    +		{"ENOTNAM", Const, 0, ""},
    +		{"ENOTRECOVERABLE", Const, 0, ""},
    +		{"ENOTSOCK", Const, 0, ""},
    +		{"ENOTSUP", Const, 0, ""},
    +		{"ENOTTY", Const, 0, ""},
    +		{"ENOTUNIQ", Const, 0, ""},
    +		{"ENXIO", Const, 0, ""},
    +		{"EN_SW_CTL_INF", Const, 1, ""},
    +		{"EN_SW_CTL_PREC", Const, 1, ""},
    +		{"EN_SW_CTL_ROUND", Const, 1, ""},
    +		{"EN_SW_DATACHAIN", Const, 1, ""},
    +		{"EN_SW_DENORM", Const, 1, ""},
    +		{"EN_SW_INVOP", Const, 1, ""},
    +		{"EN_SW_OVERFLOW", Const, 1, ""},
    +		{"EN_SW_PRECLOSS", Const, 1, ""},
    +		{"EN_SW_UNDERFLOW", Const, 1, ""},
    +		{"EN_SW_ZERODIV", Const, 1, ""},
    +		{"EOPNOTSUPP", Const, 0, ""},
    +		{"EOVERFLOW", Const, 0, ""},
    +		{"EOWNERDEAD", Const, 0, ""},
    +		{"EPERM", Const, 0, ""},
    +		{"EPFNOSUPPORT", Const, 0, ""},
    +		{"EPIPE", Const, 0, ""},
    +		{"EPOLLERR", Const, 0, ""},
    +		{"EPOLLET", Const, 0, ""},
    +		{"EPOLLHUP", Const, 0, ""},
    +		{"EPOLLIN", Const, 0, ""},
    +		{"EPOLLMSG", Const, 0, ""},
    +		{"EPOLLONESHOT", Const, 0, ""},
    +		{"EPOLLOUT", Const, 0, ""},
    +		{"EPOLLPRI", Const, 0, ""},
    +		{"EPOLLRDBAND", Const, 0, ""},
    +		{"EPOLLRDHUP", Const, 0, ""},
    +		{"EPOLLRDNORM", Const, 0, ""},
    +		{"EPOLLWRBAND", Const, 0, ""},
    +		{"EPOLLWRNORM", Const, 0, ""},
    +		{"EPOLL_CLOEXEC", Const, 0, ""},
    +		{"EPOLL_CTL_ADD", Const, 0, ""},
    +		{"EPOLL_CTL_DEL", Const, 0, ""},
    +		{"EPOLL_CTL_MOD", Const, 0, ""},
    +		{"EPOLL_NONBLOCK", Const, 0, ""},
    +		{"EPROCLIM", Const, 0, ""},
    +		{"EPROCUNAVAIL", Const, 0, ""},
    +		{"EPROGMISMATCH", Const, 0, ""},
    +		{"EPROGUNAVAIL", Const, 0, ""},
    +		{"EPROTO", Const, 0, ""},
    +		{"EPROTONOSUPPORT", Const, 0, ""},
    +		{"EPROTOTYPE", Const, 0, ""},
    +		{"EPWROFF", Const, 0, ""},
    +		{"EQFULL", Const, 16, ""},
    +		{"ERANGE", Const, 0, ""},
    +		{"EREMCHG", Const, 0, ""},
    +		{"EREMOTE", Const, 0, ""},
    +		{"EREMOTEIO", Const, 0, ""},
    +		{"ERESTART", Const, 0, ""},
    +		{"ERFKILL", Const, 0, ""},
    +		{"EROFS", Const, 0, ""},
    +		{"ERPCMISMATCH", Const, 0, ""},
    +		{"ERROR_ACCESS_DENIED", Const, 0, ""},
    +		{"ERROR_ALREADY_EXISTS", Const, 0, ""},
    +		{"ERROR_BROKEN_PIPE", Const, 0, ""},
    +		{"ERROR_BUFFER_OVERFLOW", Const, 0, ""},
    +		{"ERROR_DIR_NOT_EMPTY", Const, 8, ""},
    +		{"ERROR_ENVVAR_NOT_FOUND", Const, 0, ""},
    +		{"ERROR_FILE_EXISTS", Const, 0, ""},
    +		{"ERROR_FILE_NOT_FOUND", Const, 0, ""},
    +		{"ERROR_HANDLE_EOF", Const, 2, ""},
    +		{"ERROR_INSUFFICIENT_BUFFER", Const, 0, ""},
    +		{"ERROR_IO_PENDING", Const, 0, ""},
    +		{"ERROR_MOD_NOT_FOUND", Const, 0, ""},
    +		{"ERROR_MORE_DATA", Const, 3, ""},
    +		{"ERROR_NETNAME_DELETED", Const, 3, ""},
    +		{"ERROR_NOT_FOUND", Const, 1, ""},
    +		{"ERROR_NO_MORE_FILES", Const, 0, ""},
    +		{"ERROR_OPERATION_ABORTED", Const, 0, ""},
    +		{"ERROR_PATH_NOT_FOUND", Const, 0, ""},
    +		{"ERROR_PRIVILEGE_NOT_HELD", Const, 4, ""},
    +		{"ERROR_PROC_NOT_FOUND", Const, 0, ""},
    +		{"ESHLIBVERS", Const, 0, ""},
    +		{"ESHUTDOWN", Const, 0, ""},
    +		{"ESOCKTNOSUPPORT", Const, 0, ""},
    +		{"ESPIPE", Const, 0, ""},
    +		{"ESRCH", Const, 0, ""},
    +		{"ESRMNT", Const, 0, ""},
    +		{"ESTALE", Const, 0, ""},
    +		{"ESTRPIPE", Const, 0, ""},
    +		{"ETHERCAP_JUMBO_MTU", Const, 1, ""},
    +		{"ETHERCAP_VLAN_HWTAGGING", Const, 1, ""},
    +		{"ETHERCAP_VLAN_MTU", Const, 1, ""},
    +		{"ETHERMIN", Const, 1, ""},
    +		{"ETHERMTU", Const, 1, ""},
    +		{"ETHERMTU_JUMBO", Const, 1, ""},
    +		{"ETHERTYPE_8023", Const, 1, ""},
    +		{"ETHERTYPE_AARP", Const, 1, ""},
    +		{"ETHERTYPE_ACCTON", Const, 1, ""},
    +		{"ETHERTYPE_AEONIC", Const, 1, ""},
    +		{"ETHERTYPE_ALPHA", Const, 1, ""},
    +		{"ETHERTYPE_AMBER", Const, 1, ""},
    +		{"ETHERTYPE_AMOEBA", Const, 1, ""},
    +		{"ETHERTYPE_AOE", Const, 1, ""},
    +		{"ETHERTYPE_APOLLO", Const, 1, ""},
    +		{"ETHERTYPE_APOLLODOMAIN", Const, 1, ""},
    +		{"ETHERTYPE_APPLETALK", Const, 1, ""},
    +		{"ETHERTYPE_APPLITEK", Const, 1, ""},
    +		{"ETHERTYPE_ARGONAUT", Const, 1, ""},
    +		{"ETHERTYPE_ARP", Const, 1, ""},
    +		{"ETHERTYPE_AT", Const, 1, ""},
    +		{"ETHERTYPE_ATALK", Const, 1, ""},
    +		{"ETHERTYPE_ATOMIC", Const, 1, ""},
    +		{"ETHERTYPE_ATT", Const, 1, ""},
    +		{"ETHERTYPE_ATTSTANFORD", Const, 1, ""},
    +		{"ETHERTYPE_AUTOPHON", Const, 1, ""},
    +		{"ETHERTYPE_AXIS", Const, 1, ""},
    +		{"ETHERTYPE_BCLOOP", Const, 1, ""},
    +		{"ETHERTYPE_BOFL", Const, 1, ""},
    +		{"ETHERTYPE_CABLETRON", Const, 1, ""},
    +		{"ETHERTYPE_CHAOS", Const, 1, ""},
    +		{"ETHERTYPE_COMDESIGN", Const, 1, ""},
    +		{"ETHERTYPE_COMPUGRAPHIC", Const, 1, ""},
    +		{"ETHERTYPE_COUNTERPOINT", Const, 1, ""},
    +		{"ETHERTYPE_CRONUS", Const, 1, ""},
    +		{"ETHERTYPE_CRONUSVLN", Const, 1, ""},
    +		{"ETHERTYPE_DCA", Const, 1, ""},
    +		{"ETHERTYPE_DDE", Const, 1, ""},
    +		{"ETHERTYPE_DEBNI", Const, 1, ""},
    +		{"ETHERTYPE_DECAM", Const, 1, ""},
    +		{"ETHERTYPE_DECCUST", Const, 1, ""},
    +		{"ETHERTYPE_DECDIAG", Const, 1, ""},
    +		{"ETHERTYPE_DECDNS", Const, 1, ""},
    +		{"ETHERTYPE_DECDTS", Const, 1, ""},
    +		{"ETHERTYPE_DECEXPER", Const, 1, ""},
    +		{"ETHERTYPE_DECLAST", Const, 1, ""},
    +		{"ETHERTYPE_DECLTM", Const, 1, ""},
    +		{"ETHERTYPE_DECMUMPS", Const, 1, ""},
    +		{"ETHERTYPE_DECNETBIOS", Const, 1, ""},
    +		{"ETHERTYPE_DELTACON", Const, 1, ""},
    +		{"ETHERTYPE_DIDDLE", Const, 1, ""},
    +		{"ETHERTYPE_DLOG1", Const, 1, ""},
    +		{"ETHERTYPE_DLOG2", Const, 1, ""},
    +		{"ETHERTYPE_DN", Const, 1, ""},
    +		{"ETHERTYPE_DOGFIGHT", Const, 1, ""},
    +		{"ETHERTYPE_DSMD", Const, 1, ""},
    +		{"ETHERTYPE_ECMA", Const, 1, ""},
    +		{"ETHERTYPE_ENCRYPT", Const, 1, ""},
    +		{"ETHERTYPE_ES", Const, 1, ""},
    +		{"ETHERTYPE_EXCELAN", Const, 1, ""},
    +		{"ETHERTYPE_EXPERDATA", Const, 1, ""},
    +		{"ETHERTYPE_FLIP", Const, 1, ""},
    +		{"ETHERTYPE_FLOWCONTROL", Const, 1, ""},
    +		{"ETHERTYPE_FRARP", Const, 1, ""},
    +		{"ETHERTYPE_GENDYN", Const, 1, ""},
    +		{"ETHERTYPE_HAYES", Const, 1, ""},
    +		{"ETHERTYPE_HIPPI_FP", Const, 1, ""},
    +		{"ETHERTYPE_HITACHI", Const, 1, ""},
    +		{"ETHERTYPE_HP", Const, 1, ""},
    +		{"ETHERTYPE_IEEEPUP", Const, 1, ""},
    +		{"ETHERTYPE_IEEEPUPAT", Const, 1, ""},
    +		{"ETHERTYPE_IMLBL", Const, 1, ""},
    +		{"ETHERTYPE_IMLBLDIAG", Const, 1, ""},
    +		{"ETHERTYPE_IP", Const, 1, ""},
    +		{"ETHERTYPE_IPAS", Const, 1, ""},
    +		{"ETHERTYPE_IPV6", Const, 1, ""},
    +		{"ETHERTYPE_IPX", Const, 1, ""},
    +		{"ETHERTYPE_IPXNEW", Const, 1, ""},
    +		{"ETHERTYPE_KALPANA", Const, 1, ""},
    +		{"ETHERTYPE_LANBRIDGE", Const, 1, ""},
    +		{"ETHERTYPE_LANPROBE", Const, 1, ""},
    +		{"ETHERTYPE_LAT", Const, 1, ""},
    +		{"ETHERTYPE_LBACK", Const, 1, ""},
    +		{"ETHERTYPE_LITTLE", Const, 1, ""},
    +		{"ETHERTYPE_LLDP", Const, 1, ""},
    +		{"ETHERTYPE_LOGICRAFT", Const, 1, ""},
    +		{"ETHERTYPE_LOOPBACK", Const, 1, ""},
    +		{"ETHERTYPE_MATRA", Const, 1, ""},
    +		{"ETHERTYPE_MAX", Const, 1, ""},
    +		{"ETHERTYPE_MERIT", Const, 1, ""},
    +		{"ETHERTYPE_MICP", Const, 1, ""},
    +		{"ETHERTYPE_MOPDL", Const, 1, ""},
    +		{"ETHERTYPE_MOPRC", Const, 1, ""},
    +		{"ETHERTYPE_MOTOROLA", Const, 1, ""},
    +		{"ETHERTYPE_MPLS", Const, 1, ""},
    +		{"ETHERTYPE_MPLS_MCAST", Const, 1, ""},
    +		{"ETHERTYPE_MUMPS", Const, 1, ""},
    +		{"ETHERTYPE_NBPCC", Const, 1, ""},
    +		{"ETHERTYPE_NBPCLAIM", Const, 1, ""},
    +		{"ETHERTYPE_NBPCLREQ", Const, 1, ""},
    +		{"ETHERTYPE_NBPCLRSP", Const, 1, ""},
    +		{"ETHERTYPE_NBPCREQ", Const, 1, ""},
    +		{"ETHERTYPE_NBPCRSP", Const, 1, ""},
    +		{"ETHERTYPE_NBPDG", Const, 1, ""},
    +		{"ETHERTYPE_NBPDGB", Const, 1, ""},
    +		{"ETHERTYPE_NBPDLTE", Const, 1, ""},
    +		{"ETHERTYPE_NBPRAR", Const, 1, ""},
    +		{"ETHERTYPE_NBPRAS", Const, 1, ""},
    +		{"ETHERTYPE_NBPRST", Const, 1, ""},
    +		{"ETHERTYPE_NBPSCD", Const, 1, ""},
    +		{"ETHERTYPE_NBPVCD", Const, 1, ""},
    +		{"ETHERTYPE_NBS", Const, 1, ""},
    +		{"ETHERTYPE_NCD", Const, 1, ""},
    +		{"ETHERTYPE_NESTAR", Const, 1, ""},
    +		{"ETHERTYPE_NETBEUI", Const, 1, ""},
    +		{"ETHERTYPE_NOVELL", Const, 1, ""},
    +		{"ETHERTYPE_NS", Const, 1, ""},
    +		{"ETHERTYPE_NSAT", Const, 1, ""},
    +		{"ETHERTYPE_NSCOMPAT", Const, 1, ""},
    +		{"ETHERTYPE_NTRAILER", Const, 1, ""},
    +		{"ETHERTYPE_OS9", Const, 1, ""},
    +		{"ETHERTYPE_OS9NET", Const, 1, ""},
    +		{"ETHERTYPE_PACER", Const, 1, ""},
    +		{"ETHERTYPE_PAE", Const, 1, ""},
    +		{"ETHERTYPE_PCS", Const, 1, ""},
    +		{"ETHERTYPE_PLANNING", Const, 1, ""},
    +		{"ETHERTYPE_PPP", Const, 1, ""},
    +		{"ETHERTYPE_PPPOE", Const, 1, ""},
    +		{"ETHERTYPE_PPPOEDISC", Const, 1, ""},
    +		{"ETHERTYPE_PRIMENTS", Const, 1, ""},
    +		{"ETHERTYPE_PUP", Const, 1, ""},
    +		{"ETHERTYPE_PUPAT", Const, 1, ""},
    +		{"ETHERTYPE_QINQ", Const, 1, ""},
    +		{"ETHERTYPE_RACAL", Const, 1, ""},
    +		{"ETHERTYPE_RATIONAL", Const, 1, ""},
    +		{"ETHERTYPE_RAWFR", Const, 1, ""},
    +		{"ETHERTYPE_RCL", Const, 1, ""},
    +		{"ETHERTYPE_RDP", Const, 1, ""},
    +		{"ETHERTYPE_RETIX", Const, 1, ""},
    +		{"ETHERTYPE_REVARP", Const, 1, ""},
    +		{"ETHERTYPE_SCA", Const, 1, ""},
    +		{"ETHERTYPE_SECTRA", Const, 1, ""},
    +		{"ETHERTYPE_SECUREDATA", Const, 1, ""},
    +		{"ETHERTYPE_SGITW", Const, 1, ""},
    +		{"ETHERTYPE_SG_BOUNCE", Const, 1, ""},
    +		{"ETHERTYPE_SG_DIAG", Const, 1, ""},
    +		{"ETHERTYPE_SG_NETGAMES", Const, 1, ""},
    +		{"ETHERTYPE_SG_RESV", Const, 1, ""},
    +		{"ETHERTYPE_SIMNET", Const, 1, ""},
    +		{"ETHERTYPE_SLOW", Const, 1, ""},
    +		{"ETHERTYPE_SLOWPROTOCOLS", Const, 1, ""},
    +		{"ETHERTYPE_SNA", Const, 1, ""},
    +		{"ETHERTYPE_SNMP", Const, 1, ""},
    +		{"ETHERTYPE_SONIX", Const, 1, ""},
    +		{"ETHERTYPE_SPIDER", Const, 1, ""},
    +		{"ETHERTYPE_SPRITE", Const, 1, ""},
    +		{"ETHERTYPE_STP", Const, 1, ""},
    +		{"ETHERTYPE_TALARIS", Const, 1, ""},
    +		{"ETHERTYPE_TALARISMC", Const, 1, ""},
    +		{"ETHERTYPE_TCPCOMP", Const, 1, ""},
    +		{"ETHERTYPE_TCPSM", Const, 1, ""},
    +		{"ETHERTYPE_TEC", Const, 1, ""},
    +		{"ETHERTYPE_TIGAN", Const, 1, ""},
    +		{"ETHERTYPE_TRAIL", Const, 1, ""},
    +		{"ETHERTYPE_TRANSETHER", Const, 1, ""},
    +		{"ETHERTYPE_TYMSHARE", Const, 1, ""},
    +		{"ETHERTYPE_UBBST", Const, 1, ""},
    +		{"ETHERTYPE_UBDEBUG", Const, 1, ""},
    +		{"ETHERTYPE_UBDIAGLOOP", Const, 1, ""},
    +		{"ETHERTYPE_UBDL", Const, 1, ""},
    +		{"ETHERTYPE_UBNIU", Const, 1, ""},
    +		{"ETHERTYPE_UBNMC", Const, 1, ""},
    +		{"ETHERTYPE_VALID", Const, 1, ""},
    +		{"ETHERTYPE_VARIAN", Const, 1, ""},
    +		{"ETHERTYPE_VAXELN", Const, 1, ""},
    +		{"ETHERTYPE_VEECO", Const, 1, ""},
    +		{"ETHERTYPE_VEXP", Const, 1, ""},
    +		{"ETHERTYPE_VGLAB", Const, 1, ""},
    +		{"ETHERTYPE_VINES", Const, 1, ""},
    +		{"ETHERTYPE_VINESECHO", Const, 1, ""},
    +		{"ETHERTYPE_VINESLOOP", Const, 1, ""},
    +		{"ETHERTYPE_VITAL", Const, 1, ""},
    +		{"ETHERTYPE_VLAN", Const, 1, ""},
    +		{"ETHERTYPE_VLTLMAN", Const, 1, ""},
    +		{"ETHERTYPE_VPROD", Const, 1, ""},
    +		{"ETHERTYPE_VURESERVED", Const, 1, ""},
    +		{"ETHERTYPE_WATERLOO", Const, 1, ""},
    +		{"ETHERTYPE_WELLFLEET", Const, 1, ""},
    +		{"ETHERTYPE_X25", Const, 1, ""},
    +		{"ETHERTYPE_X75", Const, 1, ""},
    +		{"ETHERTYPE_XNSSM", Const, 1, ""},
    +		{"ETHERTYPE_XTP", Const, 1, ""},
    +		{"ETHER_ADDR_LEN", Const, 1, ""},
    +		{"ETHER_ALIGN", Const, 1, ""},
    +		{"ETHER_CRC_LEN", Const, 1, ""},
    +		{"ETHER_CRC_POLY_BE", Const, 1, ""},
    +		{"ETHER_CRC_POLY_LE", Const, 1, ""},
    +		{"ETHER_HDR_LEN", Const, 1, ""},
    +		{"ETHER_MAX_DIX_LEN", Const, 1, ""},
    +		{"ETHER_MAX_LEN", Const, 1, ""},
    +		{"ETHER_MAX_LEN_JUMBO", Const, 1, ""},
    +		{"ETHER_MIN_LEN", Const, 1, ""},
    +		{"ETHER_PPPOE_ENCAP_LEN", Const, 1, ""},
    +		{"ETHER_TYPE_LEN", Const, 1, ""},
    +		{"ETHER_VLAN_ENCAP_LEN", Const, 1, ""},
    +		{"ETH_P_1588", Const, 0, ""},
    +		{"ETH_P_8021Q", Const, 0, ""},
    +		{"ETH_P_802_2", Const, 0, ""},
    +		{"ETH_P_802_3", Const, 0, ""},
    +		{"ETH_P_AARP", Const, 0, ""},
    +		{"ETH_P_ALL", Const, 0, ""},
    +		{"ETH_P_AOE", Const, 0, ""},
    +		{"ETH_P_ARCNET", Const, 0, ""},
    +		{"ETH_P_ARP", Const, 0, ""},
    +		{"ETH_P_ATALK", Const, 0, ""},
    +		{"ETH_P_ATMFATE", Const, 0, ""},
    +		{"ETH_P_ATMMPOA", Const, 0, ""},
    +		{"ETH_P_AX25", Const, 0, ""},
    +		{"ETH_P_BPQ", Const, 0, ""},
    +		{"ETH_P_CAIF", Const, 0, ""},
    +		{"ETH_P_CAN", Const, 0, ""},
    +		{"ETH_P_CONTROL", Const, 0, ""},
    +		{"ETH_P_CUST", Const, 0, ""},
    +		{"ETH_P_DDCMP", Const, 0, ""},
    +		{"ETH_P_DEC", Const, 0, ""},
    +		{"ETH_P_DIAG", Const, 0, ""},
    +		{"ETH_P_DNA_DL", Const, 0, ""},
    +		{"ETH_P_DNA_RC", Const, 0, ""},
    +		{"ETH_P_DNA_RT", Const, 0, ""},
    +		{"ETH_P_DSA", Const, 0, ""},
    +		{"ETH_P_ECONET", Const, 0, ""},
    +		{"ETH_P_EDSA", Const, 0, ""},
    +		{"ETH_P_FCOE", Const, 0, ""},
    +		{"ETH_P_FIP", Const, 0, ""},
    +		{"ETH_P_HDLC", Const, 0, ""},
    +		{"ETH_P_IEEE802154", Const, 0, ""},
    +		{"ETH_P_IEEEPUP", Const, 0, ""},
    +		{"ETH_P_IEEEPUPAT", Const, 0, ""},
    +		{"ETH_P_IP", Const, 0, ""},
    +		{"ETH_P_IPV6", Const, 0, ""},
    +		{"ETH_P_IPX", Const, 0, ""},
    +		{"ETH_P_IRDA", Const, 0, ""},
    +		{"ETH_P_LAT", Const, 0, ""},
    +		{"ETH_P_LINK_CTL", Const, 0, ""},
    +		{"ETH_P_LOCALTALK", Const, 0, ""},
    +		{"ETH_P_LOOP", Const, 0, ""},
    +		{"ETH_P_MOBITEX", Const, 0, ""},
    +		{"ETH_P_MPLS_MC", Const, 0, ""},
    +		{"ETH_P_MPLS_UC", Const, 0, ""},
    +		{"ETH_P_PAE", Const, 0, ""},
    +		{"ETH_P_PAUSE", Const, 0, ""},
    +		{"ETH_P_PHONET", Const, 0, ""},
    +		{"ETH_P_PPPTALK", Const, 0, ""},
    +		{"ETH_P_PPP_DISC", Const, 0, ""},
    +		{"ETH_P_PPP_MP", Const, 0, ""},
    +		{"ETH_P_PPP_SES", Const, 0, ""},
    +		{"ETH_P_PUP", Const, 0, ""},
    +		{"ETH_P_PUPAT", Const, 0, ""},
    +		{"ETH_P_RARP", Const, 0, ""},
    +		{"ETH_P_SCA", Const, 0, ""},
    +		{"ETH_P_SLOW", Const, 0, ""},
    +		{"ETH_P_SNAP", Const, 0, ""},
    +		{"ETH_P_TEB", Const, 0, ""},
    +		{"ETH_P_TIPC", Const, 0, ""},
    +		{"ETH_P_TRAILER", Const, 0, ""},
    +		{"ETH_P_TR_802_2", Const, 0, ""},
    +		{"ETH_P_WAN_PPP", Const, 0, ""},
    +		{"ETH_P_WCCP", Const, 0, ""},
    +		{"ETH_P_X25", Const, 0, ""},
    +		{"ETIME", Const, 0, ""},
    +		{"ETIMEDOUT", Const, 0, ""},
    +		{"ETOOMANYREFS", Const, 0, ""},
    +		{"ETXTBSY", Const, 0, ""},
    +		{"EUCLEAN", Const, 0, ""},
    +		{"EUNATCH", Const, 0, ""},
    +		{"EUSERS", Const, 0, ""},
    +		{"EVFILT_AIO", Const, 0, ""},
    +		{"EVFILT_FS", Const, 0, ""},
    +		{"EVFILT_LIO", Const, 0, ""},
    +		{"EVFILT_MACHPORT", Const, 0, ""},
    +		{"EVFILT_PROC", Const, 0, ""},
    +		{"EVFILT_READ", Const, 0, ""},
    +		{"EVFILT_SIGNAL", Const, 0, ""},
    +		{"EVFILT_SYSCOUNT", Const, 0, ""},
    +		{"EVFILT_THREADMARKER", Const, 0, ""},
    +		{"EVFILT_TIMER", Const, 0, ""},
    +		{"EVFILT_USER", Const, 0, ""},
    +		{"EVFILT_VM", Const, 0, ""},
    +		{"EVFILT_VNODE", Const, 0, ""},
    +		{"EVFILT_WRITE", Const, 0, ""},
    +		{"EV_ADD", Const, 0, ""},
    +		{"EV_CLEAR", Const, 0, ""},
    +		{"EV_DELETE", Const, 0, ""},
    +		{"EV_DISABLE", Const, 0, ""},
    +		{"EV_DISPATCH", Const, 0, ""},
    +		{"EV_DROP", Const, 3, ""},
    +		{"EV_ENABLE", Const, 0, ""},
    +		{"EV_EOF", Const, 0, ""},
    +		{"EV_ERROR", Const, 0, ""},
    +		{"EV_FLAG0", Const, 0, ""},
    +		{"EV_FLAG1", Const, 0, ""},
    +		{"EV_ONESHOT", Const, 0, ""},
    +		{"EV_OOBAND", Const, 0, ""},
    +		{"EV_POLL", Const, 0, ""},
    +		{"EV_RECEIPT", Const, 0, ""},
    +		{"EV_SYSFLAGS", Const, 0, ""},
    +		{"EWINDOWS", Const, 0, ""},
    +		{"EWOULDBLOCK", Const, 0, ""},
    +		{"EXDEV", Const, 0, ""},
    +		{"EXFULL", Const, 0, ""},
    +		{"EXTA", Const, 0, ""},
    +		{"EXTB", Const, 0, ""},
    +		{"EXTPROC", Const, 0, ""},
    +		{"Environ", Func, 0, "func() []string"},
    +		{"EpollCreate", Func, 0, "func(size int) (fd int, err error)"},
    +		{"EpollCreate1", Func, 0, "func(flag int) (fd int, err error)"},
    +		{"EpollCtl", Func, 0, "func(epfd int, op int, fd int, event *EpollEvent) (err error)"},
    +		{"EpollEvent", Type, 0, ""},
    +		{"EpollEvent.Events", Field, 0, ""},
    +		{"EpollEvent.Fd", Field, 0, ""},
    +		{"EpollEvent.Pad", Field, 0, ""},
    +		{"EpollEvent.PadFd", Field, 0, ""},
    +		{"EpollWait", Func, 0, "func(epfd int, events []EpollEvent, msec int) (n int, err error)"},
    +		{"Errno", Type, 0, ""},
    +		{"EscapeArg", Func, 0, ""},
    +		{"Exchangedata", Func, 0, ""},
    +		{"Exec", Func, 0, "func(argv0 string, argv []string, envv []string) (err error)"},
    +		{"Exit", Func, 0, "func(code int)"},
    +		{"ExitProcess", Func, 0, ""},
    +		{"FD_CLOEXEC", Const, 0, ""},
    +		{"FD_SETSIZE", Const, 0, ""},
    +		{"FILE_ACTION_ADDED", Const, 0, ""},
    +		{"FILE_ACTION_MODIFIED", Const, 0, ""},
    +		{"FILE_ACTION_REMOVED", Const, 0, ""},
    +		{"FILE_ACTION_RENAMED_NEW_NAME", Const, 0, ""},
    +		{"FILE_ACTION_RENAMED_OLD_NAME", Const, 0, ""},
    +		{"FILE_APPEND_DATA", Const, 0, ""},
    +		{"FILE_ATTRIBUTE_ARCHIVE", Const, 0, ""},
    +		{"FILE_ATTRIBUTE_DIRECTORY", Const, 0, ""},
    +		{"FILE_ATTRIBUTE_HIDDEN", Const, 0, ""},
    +		{"FILE_ATTRIBUTE_NORMAL", Const, 0, ""},
    +		{"FILE_ATTRIBUTE_READONLY", Const, 0, ""},
    +		{"FILE_ATTRIBUTE_REPARSE_POINT", Const, 4, ""},
    +		{"FILE_ATTRIBUTE_SYSTEM", Const, 0, ""},
    +		{"FILE_BEGIN", Const, 0, ""},
    +		{"FILE_CURRENT", Const, 0, ""},
    +		{"FILE_END", Const, 0, ""},
    +		{"FILE_FLAG_BACKUP_SEMANTICS", Const, 0, ""},
    +		{"FILE_FLAG_OPEN_REPARSE_POINT", Const, 4, ""},
    +		{"FILE_FLAG_OVERLAPPED", Const, 0, ""},
    +		{"FILE_LIST_DIRECTORY", Const, 0, ""},
    +		{"FILE_MAP_COPY", Const, 0, ""},
    +		{"FILE_MAP_EXECUTE", Const, 0, ""},
    +		{"FILE_MAP_READ", Const, 0, ""},
    +		{"FILE_MAP_WRITE", Const, 0, ""},
    +		{"FILE_NOTIFY_CHANGE_ATTRIBUTES", Const, 0, ""},
    +		{"FILE_NOTIFY_CHANGE_CREATION", Const, 0, ""},
    +		{"FILE_NOTIFY_CHANGE_DIR_NAME", Const, 0, ""},
    +		{"FILE_NOTIFY_CHANGE_FILE_NAME", Const, 0, ""},
    +		{"FILE_NOTIFY_CHANGE_LAST_ACCESS", Const, 0, ""},
    +		{"FILE_NOTIFY_CHANGE_LAST_WRITE", Const, 0, ""},
    +		{"FILE_NOTIFY_CHANGE_SIZE", Const, 0, ""},
    +		{"FILE_SHARE_DELETE", Const, 0, ""},
    +		{"FILE_SHARE_READ", Const, 0, ""},
    +		{"FILE_SHARE_WRITE", Const, 0, ""},
    +		{"FILE_SKIP_COMPLETION_PORT_ON_SUCCESS", Const, 2, ""},
    +		{"FILE_SKIP_SET_EVENT_ON_HANDLE", Const, 2, ""},
    +		{"FILE_TYPE_CHAR", Const, 0, ""},
    +		{"FILE_TYPE_DISK", Const, 0, ""},
    +		{"FILE_TYPE_PIPE", Const, 0, ""},
    +		{"FILE_TYPE_REMOTE", Const, 0, ""},
    +		{"FILE_TYPE_UNKNOWN", Const, 0, ""},
    +		{"FILE_WRITE_ATTRIBUTES", Const, 0, ""},
    +		{"FLUSHO", Const, 0, ""},
    +		{"FORMAT_MESSAGE_ALLOCATE_BUFFER", Const, 0, ""},
    +		{"FORMAT_MESSAGE_ARGUMENT_ARRAY", Const, 0, ""},
    +		{"FORMAT_MESSAGE_FROM_HMODULE", Const, 0, ""},
    +		{"FORMAT_MESSAGE_FROM_STRING", Const, 0, ""},
    +		{"FORMAT_MESSAGE_FROM_SYSTEM", Const, 0, ""},
    +		{"FORMAT_MESSAGE_IGNORE_INSERTS", Const, 0, ""},
    +		{"FORMAT_MESSAGE_MAX_WIDTH_MASK", Const, 0, ""},
    +		{"FSCTL_GET_REPARSE_POINT", Const, 4, ""},
    +		{"F_ADDFILESIGS", Const, 0, ""},
    +		{"F_ADDSIGS", Const, 0, ""},
    +		{"F_ALLOCATEALL", Const, 0, ""},
    +		{"F_ALLOCATECONTIG", Const, 0, ""},
    +		{"F_CANCEL", Const, 0, ""},
    +		{"F_CHKCLEAN", Const, 0, ""},
    +		{"F_CLOSEM", Const, 1, ""},
    +		{"F_DUP2FD", Const, 0, ""},
    +		{"F_DUP2FD_CLOEXEC", Const, 1, ""},
    +		{"F_DUPFD", Const, 0, ""},
    +		{"F_DUPFD_CLOEXEC", Const, 0, ""},
    +		{"F_EXLCK", Const, 0, ""},
    +		{"F_FINDSIGS", Const, 16, ""},
    +		{"F_FLUSH_DATA", Const, 0, ""},
    +		{"F_FREEZE_FS", Const, 0, ""},
    +		{"F_FSCTL", Const, 1, ""},
    +		{"F_FSDIRMASK", Const, 1, ""},
    +		{"F_FSIN", Const, 1, ""},
    +		{"F_FSINOUT", Const, 1, ""},
    +		{"F_FSOUT", Const, 1, ""},
    +		{"F_FSPRIV", Const, 1, ""},
    +		{"F_FSVOID", Const, 1, ""},
    +		{"F_FULLFSYNC", Const, 0, ""},
    +		{"F_GETCODEDIR", Const, 16, ""},
    +		{"F_GETFD", Const, 0, ""},
    +		{"F_GETFL", Const, 0, ""},
    +		{"F_GETLEASE", Const, 0, ""},
    +		{"F_GETLK", Const, 0, ""},
    +		{"F_GETLK64", Const, 0, ""},
    +		{"F_GETLKPID", Const, 0, ""},
    +		{"F_GETNOSIGPIPE", Const, 0, ""},
    +		{"F_GETOWN", Const, 0, ""},
    +		{"F_GETOWN_EX", Const, 0, ""},
    +		{"F_GETPATH", Const, 0, ""},
    +		{"F_GETPATH_MTMINFO", Const, 0, ""},
    +		{"F_GETPIPE_SZ", Const, 0, ""},
    +		{"F_GETPROTECTIONCLASS", Const, 0, ""},
    +		{"F_GETPROTECTIONLEVEL", Const, 16, ""},
    +		{"F_GETSIG", Const, 0, ""},
    +		{"F_GLOBAL_NOCACHE", Const, 0, ""},
    +		{"F_LOCK", Const, 0, ""},
    +		{"F_LOG2PHYS", Const, 0, ""},
    +		{"F_LOG2PHYS_EXT", Const, 0, ""},
    +		{"F_MARKDEPENDENCY", Const, 0, ""},
    +		{"F_MAXFD", Const, 1, ""},
    +		{"F_NOCACHE", Const, 0, ""},
    +		{"F_NODIRECT", Const, 0, ""},
    +		{"F_NOTIFY", Const, 0, ""},
    +		{"F_OGETLK", Const, 0, ""},
    +		{"F_OK", Const, 0, ""},
    +		{"F_OSETLK", Const, 0, ""},
    +		{"F_OSETLKW", Const, 0, ""},
    +		{"F_PARAM_MASK", Const, 1, ""},
    +		{"F_PARAM_MAX", Const, 1, ""},
    +		{"F_PATHPKG_CHECK", Const, 0, ""},
    +		{"F_PEOFPOSMODE", Const, 0, ""},
    +		{"F_PREALLOCATE", Const, 0, ""},
    +		{"F_RDADVISE", Const, 0, ""},
    +		{"F_RDAHEAD", Const, 0, ""},
    +		{"F_RDLCK", Const, 0, ""},
    +		{"F_READAHEAD", Const, 0, ""},
    +		{"F_READBOOTSTRAP", Const, 0, ""},
    +		{"F_SETBACKINGSTORE", Const, 0, ""},
    +		{"F_SETFD", Const, 0, ""},
    +		{"F_SETFL", Const, 0, ""},
    +		{"F_SETLEASE", Const, 0, ""},
    +		{"F_SETLK", Const, 0, ""},
    +		{"F_SETLK64", Const, 0, ""},
    +		{"F_SETLKW", Const, 0, ""},
    +		{"F_SETLKW64", Const, 0, ""},
    +		{"F_SETLKWTIMEOUT", Const, 16, ""},
    +		{"F_SETLK_REMOTE", Const, 0, ""},
    +		{"F_SETNOSIGPIPE", Const, 0, ""},
    +		{"F_SETOWN", Const, 0, ""},
    +		{"F_SETOWN_EX", Const, 0, ""},
    +		{"F_SETPIPE_SZ", Const, 0, ""},
    +		{"F_SETPROTECTIONCLASS", Const, 0, ""},
    +		{"F_SETSIG", Const, 0, ""},
    +		{"F_SETSIZE", Const, 0, ""},
    +		{"F_SHLCK", Const, 0, ""},
    +		{"F_SINGLE_WRITER", Const, 16, ""},
    +		{"F_TEST", Const, 0, ""},
    +		{"F_THAW_FS", Const, 0, ""},
    +		{"F_TLOCK", Const, 0, ""},
    +		{"F_TRANSCODEKEY", Const, 16, ""},
    +		{"F_ULOCK", Const, 0, ""},
    +		{"F_UNLCK", Const, 0, ""},
    +		{"F_UNLCKSYS", Const, 0, ""},
    +		{"F_VOLPOSMODE", Const, 0, ""},
    +		{"F_WRITEBOOTSTRAP", Const, 0, ""},
    +		{"F_WRLCK", Const, 0, ""},
    +		{"Faccessat", Func, 0, "func(dirfd int, path string, mode uint32, flags int) (err error)"},
    +		{"Fallocate", Func, 0, "func(fd int, mode uint32, off int64, len int64) (err error)"},
    +		{"Fbootstraptransfer_t", Type, 0, ""},
    +		{"Fbootstraptransfer_t.Buffer", Field, 0, ""},
    +		{"Fbootstraptransfer_t.Length", Field, 0, ""},
    +		{"Fbootstraptransfer_t.Offset", Field, 0, ""},
    +		{"Fchdir", Func, 0, "func(fd int) (err error)"},
    +		{"Fchflags", Func, 0, ""},
    +		{"Fchmod", Func, 0, "func(fd int, mode uint32) (err error)"},
    +		{"Fchmodat", Func, 0, "func(dirfd int, path string, mode uint32, flags int) error"},
    +		{"Fchown", Func, 0, "func(fd int, uid int, gid int) (err error)"},
    +		{"Fchownat", Func, 0, "func(dirfd int, path string, uid int, gid int, flags int) (err error)"},
    +		{"FcntlFlock", Func, 3, "func(fd uintptr, cmd int, lk *Flock_t) error"},
    +		{"FdSet", Type, 0, ""},
    +		{"FdSet.Bits", Field, 0, ""},
    +		{"FdSet.X__fds_bits", Field, 0, ""},
    +		{"Fdatasync", Func, 0, "func(fd int) (err error)"},
    +		{"FileNotifyInformation", Type, 0, ""},
    +		{"FileNotifyInformation.Action", Field, 0, ""},
    +		{"FileNotifyInformation.FileName", Field, 0, ""},
    +		{"FileNotifyInformation.FileNameLength", Field, 0, ""},
    +		{"FileNotifyInformation.NextEntryOffset", Field, 0, ""},
    +		{"Filetime", Type, 0, ""},
    +		{"Filetime.HighDateTime", Field, 0, ""},
    +		{"Filetime.LowDateTime", Field, 0, ""},
    +		{"FindClose", Func, 0, ""},
    +		{"FindFirstFile", Func, 0, ""},
    +		{"FindNextFile", Func, 0, ""},
    +		{"Flock", Func, 0, "func(fd int, how int) (err error)"},
    +		{"Flock_t", Type, 0, ""},
    +		{"Flock_t.Len", Field, 0, ""},
    +		{"Flock_t.Pad_cgo_0", Field, 0, ""},
    +		{"Flock_t.Pad_cgo_1", Field, 3, ""},
    +		{"Flock_t.Pid", Field, 0, ""},
    +		{"Flock_t.Start", Field, 0, ""},
    +		{"Flock_t.Sysid", Field, 0, ""},
    +		{"Flock_t.Type", Field, 0, ""},
    +		{"Flock_t.Whence", Field, 0, ""},
    +		{"FlushBpf", Func, 0, ""},
    +		{"FlushFileBuffers", Func, 0, ""},
    +		{"FlushViewOfFile", Func, 0, ""},
    +		{"ForkExec", Func, 0, "func(argv0 string, argv []string, attr *ProcAttr) (pid int, err error)"},
    +		{"ForkLock", Var, 0, ""},
    +		{"FormatMessage", Func, 0, ""},
    +		{"Fpathconf", Func, 0, ""},
    +		{"FreeAddrInfoW", Func, 1, ""},
    +		{"FreeEnvironmentStrings", Func, 0, ""},
    +		{"FreeLibrary", Func, 0, ""},
    +		{"Fsid", Type, 0, ""},
    +		{"Fsid.Val", Field, 0, ""},
    +		{"Fsid.X__fsid_val", Field, 2, ""},
    +		{"Fsid.X__val", Field, 0, ""},
    +		{"Fstat", Func, 0, "func(fd int, stat *Stat_t) (err error)"},
    +		{"Fstatat", Func, 12, ""},
    +		{"Fstatfs", Func, 0, "func(fd int, buf *Statfs_t) (err error)"},
    +		{"Fstore_t", Type, 0, ""},
    +		{"Fstore_t.Bytesalloc", Field, 0, ""},
    +		{"Fstore_t.Flags", Field, 0, ""},
    +		{"Fstore_t.Length", Field, 0, ""},
    +		{"Fstore_t.Offset", Field, 0, ""},
    +		{"Fstore_t.Posmode", Field, 0, ""},
    +		{"Fsync", Func, 0, "func(fd int) (err error)"},
    +		{"Ftruncate", Func, 0, "func(fd int, length int64) (err error)"},
    +		{"FullPath", Func, 4, ""},
    +		{"Futimes", Func, 0, "func(fd int, tv []Timeval) (err error)"},
    +		{"Futimesat", Func, 0, "func(dirfd int, path string, tv []Timeval) (err error)"},
    +		{"GENERIC_ALL", Const, 0, ""},
    +		{"GENERIC_EXECUTE", Const, 0, ""},
    +		{"GENERIC_READ", Const, 0, ""},
    +		{"GENERIC_WRITE", Const, 0, ""},
    +		{"GUID", Type, 1, ""},
    +		{"GUID.Data1", Field, 1, ""},
    +		{"GUID.Data2", Field, 1, ""},
    +		{"GUID.Data3", Field, 1, ""},
    +		{"GUID.Data4", Field, 1, ""},
    +		{"GetAcceptExSockaddrs", Func, 0, ""},
    +		{"GetAdaptersInfo", Func, 0, ""},
    +		{"GetAddrInfoW", Func, 1, ""},
    +		{"GetCommandLine", Func, 0, ""},
    +		{"GetComputerName", Func, 0, ""},
    +		{"GetConsoleMode", Func, 1, ""},
    +		{"GetCurrentDirectory", Func, 0, ""},
    +		{"GetCurrentProcess", Func, 0, ""},
    +		{"GetEnvironmentStrings", Func, 0, ""},
    +		{"GetEnvironmentVariable", Func, 0, ""},
    +		{"GetExitCodeProcess", Func, 0, ""},
    +		{"GetFileAttributes", Func, 0, ""},
    +		{"GetFileAttributesEx", Func, 0, ""},
    +		{"GetFileExInfoStandard", Const, 0, ""},
    +		{"GetFileExMaxInfoLevel", Const, 0, ""},
    +		{"GetFileInformationByHandle", Func, 0, ""},
    +		{"GetFileType", Func, 0, ""},
    +		{"GetFullPathName", Func, 0, ""},
    +		{"GetHostByName", Func, 0, ""},
    +		{"GetIfEntry", Func, 0, ""},
    +		{"GetLastError", Func, 0, ""},
    +		{"GetLengthSid", Func, 0, ""},
    +		{"GetLongPathName", Func, 0, ""},
    +		{"GetProcAddress", Func, 0, ""},
    +		{"GetProcessTimes", Func, 0, ""},
    +		{"GetProtoByName", Func, 0, ""},
    +		{"GetQueuedCompletionStatus", Func, 0, ""},
    +		{"GetServByName", Func, 0, ""},
    +		{"GetShortPathName", Func, 0, ""},
    +		{"GetStartupInfo", Func, 0, ""},
    +		{"GetStdHandle", Func, 0, ""},
    +		{"GetSystemTimeAsFileTime", Func, 0, ""},
    +		{"GetTempPath", Func, 0, ""},
    +		{"GetTimeZoneInformation", Func, 0, ""},
    +		{"GetTokenInformation", Func, 0, ""},
    +		{"GetUserNameEx", Func, 0, ""},
    +		{"GetUserProfileDirectory", Func, 0, ""},
    +		{"GetVersion", Func, 0, ""},
    +		{"Getcwd", Func, 0, "func(buf []byte) (n int, err error)"},
    +		{"Getdents", Func, 0, "func(fd int, buf []byte) (n int, err error)"},
    +		{"Getdirentries", Func, 0, ""},
    +		{"Getdtablesize", Func, 0, ""},
    +		{"Getegid", Func, 0, "func() (egid int)"},
    +		{"Getenv", Func, 0, "func(key string) (value string, found bool)"},
    +		{"Geteuid", Func, 0, "func() (euid int)"},
    +		{"Getfsstat", Func, 0, ""},
    +		{"Getgid", Func, 0, "func() (gid int)"},
    +		{"Getgroups", Func, 0, "func() (gids []int, err error)"},
    +		{"Getpagesize", Func, 0, "func() int"},
    +		{"Getpeername", Func, 0, "func(fd int) (sa Sockaddr, err error)"},
    +		{"Getpgid", Func, 0, "func(pid int) (pgid int, err error)"},
    +		{"Getpgrp", Func, 0, "func() (pid int)"},
    +		{"Getpid", Func, 0, "func() (pid int)"},
    +		{"Getppid", Func, 0, "func() (ppid int)"},
    +		{"Getpriority", Func, 0, "func(which int, who int) (prio int, err error)"},
    +		{"Getrlimit", Func, 0, "func(resource int, rlim *Rlimit) (err error)"},
    +		{"Getrusage", Func, 0, "func(who int, rusage *Rusage) (err error)"},
    +		{"Getsid", Func, 0, ""},
    +		{"Getsockname", Func, 0, "func(fd int) (sa Sockaddr, err error)"},
    +		{"Getsockopt", Func, 1, ""},
    +		{"GetsockoptByte", Func, 0, ""},
    +		{"GetsockoptICMPv6Filter", Func, 2, "func(fd int, level int, opt int) (*ICMPv6Filter, error)"},
    +		{"GetsockoptIPMreq", Func, 0, "func(fd int, level int, opt int) (*IPMreq, error)"},
    +		{"GetsockoptIPMreqn", Func, 0, "func(fd int, level int, opt int) (*IPMreqn, error)"},
    +		{"GetsockoptIPv6MTUInfo", Func, 2, "func(fd int, level int, opt int) (*IPv6MTUInfo, error)"},
    +		{"GetsockoptIPv6Mreq", Func, 0, "func(fd int, level int, opt int) (*IPv6Mreq, error)"},
    +		{"GetsockoptInet4Addr", Func, 0, "func(fd int, level int, opt int) (value [4]byte, err error)"},
    +		{"GetsockoptInt", Func, 0, "func(fd int, level int, opt int) (value int, err error)"},
    +		{"GetsockoptUcred", Func, 1, "func(fd int, level int, opt int) (*Ucred, error)"},
    +		{"Gettid", Func, 0, "func() (tid int)"},
    +		{"Gettimeofday", Func, 0, "func(tv *Timeval) (err error)"},
    +		{"Getuid", Func, 0, "func() (uid int)"},
    +		{"Getwd", Func, 0, "func() (wd string, err error)"},
    +		{"Getxattr", Func, 1, "func(path string, attr string, dest []byte) (sz int, err error)"},
    +		{"HANDLE_FLAG_INHERIT", Const, 0, ""},
    +		{"HKEY_CLASSES_ROOT", Const, 0, ""},
    +		{"HKEY_CURRENT_CONFIG", Const, 0, ""},
    +		{"HKEY_CURRENT_USER", Const, 0, ""},
    +		{"HKEY_DYN_DATA", Const, 0, ""},
    +		{"HKEY_LOCAL_MACHINE", Const, 0, ""},
    +		{"HKEY_PERFORMANCE_DATA", Const, 0, ""},
    +		{"HKEY_USERS", Const, 0, ""},
    +		{"HUPCL", Const, 0, ""},
    +		{"Handle", Type, 0, ""},
    +		{"Hostent", Type, 0, ""},
    +		{"Hostent.AddrList", Field, 0, ""},
    +		{"Hostent.AddrType", Field, 0, ""},
    +		{"Hostent.Aliases", Field, 0, ""},
    +		{"Hostent.Length", Field, 0, ""},
    +		{"Hostent.Name", Field, 0, ""},
    +		{"ICANON", Const, 0, ""},
    +		{"ICMP6_FILTER", Const, 2, ""},
    +		{"ICMPV6_FILTER", Const, 2, ""},
    +		{"ICMPv6Filter", Type, 2, ""},
    +		{"ICMPv6Filter.Data", Field, 2, ""},
    +		{"ICMPv6Filter.Filt", Field, 2, ""},
    +		{"ICRNL", Const, 0, ""},
    +		{"IEXTEN", Const, 0, ""},
    +		{"IFAN_ARRIVAL", Const, 1, ""},
    +		{"IFAN_DEPARTURE", Const, 1, ""},
    +		{"IFA_ADDRESS", Const, 0, ""},
    +		{"IFA_ANYCAST", Const, 0, ""},
    +		{"IFA_BROADCAST", Const, 0, ""},
    +		{"IFA_CACHEINFO", Const, 0, ""},
    +		{"IFA_F_DADFAILED", Const, 0, ""},
    +		{"IFA_F_DEPRECATED", Const, 0, ""},
    +		{"IFA_F_HOMEADDRESS", Const, 0, ""},
    +		{"IFA_F_NODAD", Const, 0, ""},
    +		{"IFA_F_OPTIMISTIC", Const, 0, ""},
    +		{"IFA_F_PERMANENT", Const, 0, ""},
    +		{"IFA_F_SECONDARY", Const, 0, ""},
    +		{"IFA_F_TEMPORARY", Const, 0, ""},
    +		{"IFA_F_TENTATIVE", Const, 0, ""},
    +		{"IFA_LABEL", Const, 0, ""},
    +		{"IFA_LOCAL", Const, 0, ""},
    +		{"IFA_MAX", Const, 0, ""},
    +		{"IFA_MULTICAST", Const, 0, ""},
    +		{"IFA_ROUTE", Const, 1, ""},
    +		{"IFA_UNSPEC", Const, 0, ""},
    +		{"IFF_ALLMULTI", Const, 0, ""},
    +		{"IFF_ALTPHYS", Const, 0, ""},
    +		{"IFF_AUTOMEDIA", Const, 0, ""},
    +		{"IFF_BROADCAST", Const, 0, ""},
    +		{"IFF_CANTCHANGE", Const, 0, ""},
    +		{"IFF_CANTCONFIG", Const, 1, ""},
    +		{"IFF_DEBUG", Const, 0, ""},
    +		{"IFF_DRV_OACTIVE", Const, 0, ""},
    +		{"IFF_DRV_RUNNING", Const, 0, ""},
    +		{"IFF_DYING", Const, 0, ""},
    +		{"IFF_DYNAMIC", Const, 0, ""},
    +		{"IFF_LINK0", Const, 0, ""},
    +		{"IFF_LINK1", Const, 0, ""},
    +		{"IFF_LINK2", Const, 0, ""},
    +		{"IFF_LOOPBACK", Const, 0, ""},
    +		{"IFF_MASTER", Const, 0, ""},
    +		{"IFF_MONITOR", Const, 0, ""},
    +		{"IFF_MULTICAST", Const, 0, ""},
    +		{"IFF_NOARP", Const, 0, ""},
    +		{"IFF_NOTRAILERS", Const, 0, ""},
    +		{"IFF_NO_PI", Const, 0, ""},
    +		{"IFF_OACTIVE", Const, 0, ""},
    +		{"IFF_ONE_QUEUE", Const, 0, ""},
    +		{"IFF_POINTOPOINT", Const, 0, ""},
    +		{"IFF_POINTTOPOINT", Const, 0, ""},
    +		{"IFF_PORTSEL", Const, 0, ""},
    +		{"IFF_PPROMISC", Const, 0, ""},
    +		{"IFF_PROMISC", Const, 0, ""},
    +		{"IFF_RENAMING", Const, 0, ""},
    +		{"IFF_RUNNING", Const, 0, ""},
    +		{"IFF_SIMPLEX", Const, 0, ""},
    +		{"IFF_SLAVE", Const, 0, ""},
    +		{"IFF_SMART", Const, 0, ""},
    +		{"IFF_STATICARP", Const, 0, ""},
    +		{"IFF_TAP", Const, 0, ""},
    +		{"IFF_TUN", Const, 0, ""},
    +		{"IFF_TUN_EXCL", Const, 0, ""},
    +		{"IFF_UP", Const, 0, ""},
    +		{"IFF_VNET_HDR", Const, 0, ""},
    +		{"IFLA_ADDRESS", Const, 0, ""},
    +		{"IFLA_BROADCAST", Const, 0, ""},
    +		{"IFLA_COST", Const, 0, ""},
    +		{"IFLA_IFALIAS", Const, 0, ""},
    +		{"IFLA_IFNAME", Const, 0, ""},
    +		{"IFLA_LINK", Const, 0, ""},
    +		{"IFLA_LINKINFO", Const, 0, ""},
    +		{"IFLA_LINKMODE", Const, 0, ""},
    +		{"IFLA_MAP", Const, 0, ""},
    +		{"IFLA_MASTER", Const, 0, ""},
    +		{"IFLA_MAX", Const, 0, ""},
    +		{"IFLA_MTU", Const, 0, ""},
    +		{"IFLA_NET_NS_PID", Const, 0, ""},
    +		{"IFLA_OPERSTATE", Const, 0, ""},
    +		{"IFLA_PRIORITY", Const, 0, ""},
    +		{"IFLA_PROTINFO", Const, 0, ""},
    +		{"IFLA_QDISC", Const, 0, ""},
    +		{"IFLA_STATS", Const, 0, ""},
    +		{"IFLA_TXQLEN", Const, 0, ""},
    +		{"IFLA_UNSPEC", Const, 0, ""},
    +		{"IFLA_WEIGHT", Const, 0, ""},
    +		{"IFLA_WIRELESS", Const, 0, ""},
    +		{"IFNAMSIZ", Const, 0, ""},
    +		{"IFT_1822", Const, 0, ""},
    +		{"IFT_A12MPPSWITCH", Const, 0, ""},
    +		{"IFT_AAL2", Const, 0, ""},
    +		{"IFT_AAL5", Const, 0, ""},
    +		{"IFT_ADSL", Const, 0, ""},
    +		{"IFT_AFLANE8023", Const, 0, ""},
    +		{"IFT_AFLANE8025", Const, 0, ""},
    +		{"IFT_ARAP", Const, 0, ""},
    +		{"IFT_ARCNET", Const, 0, ""},
    +		{"IFT_ARCNETPLUS", Const, 0, ""},
    +		{"IFT_ASYNC", Const, 0, ""},
    +		{"IFT_ATM", Const, 0, ""},
    +		{"IFT_ATMDXI", Const, 0, ""},
    +		{"IFT_ATMFUNI", Const, 0, ""},
    +		{"IFT_ATMIMA", Const, 0, ""},
    +		{"IFT_ATMLOGICAL", Const, 0, ""},
    +		{"IFT_ATMRADIO", Const, 0, ""},
    +		{"IFT_ATMSUBINTERFACE", Const, 0, ""},
    +		{"IFT_ATMVCIENDPT", Const, 0, ""},
    +		{"IFT_ATMVIRTUAL", Const, 0, ""},
    +		{"IFT_BGPPOLICYACCOUNTING", Const, 0, ""},
    +		{"IFT_BLUETOOTH", Const, 1, ""},
    +		{"IFT_BRIDGE", Const, 0, ""},
    +		{"IFT_BSC", Const, 0, ""},
    +		{"IFT_CARP", Const, 0, ""},
    +		{"IFT_CCTEMUL", Const, 0, ""},
    +		{"IFT_CELLULAR", Const, 0, ""},
    +		{"IFT_CEPT", Const, 0, ""},
    +		{"IFT_CES", Const, 0, ""},
    +		{"IFT_CHANNEL", Const, 0, ""},
    +		{"IFT_CNR", Const, 0, ""},
    +		{"IFT_COFFEE", Const, 0, ""},
    +		{"IFT_COMPOSITELINK", Const, 0, ""},
    +		{"IFT_DCN", Const, 0, ""},
    +		{"IFT_DIGITALPOWERLINE", Const, 0, ""},
    +		{"IFT_DIGITALWRAPPEROVERHEADCHANNEL", Const, 0, ""},
    +		{"IFT_DLSW", Const, 0, ""},
    +		{"IFT_DOCSCABLEDOWNSTREAM", Const, 0, ""},
    +		{"IFT_DOCSCABLEMACLAYER", Const, 0, ""},
    +		{"IFT_DOCSCABLEUPSTREAM", Const, 0, ""},
    +		{"IFT_DOCSCABLEUPSTREAMCHANNEL", Const, 1, ""},
    +		{"IFT_DS0", Const, 0, ""},
    +		{"IFT_DS0BUNDLE", Const, 0, ""},
    +		{"IFT_DS1FDL", Const, 0, ""},
    +		{"IFT_DS3", Const, 0, ""},
    +		{"IFT_DTM", Const, 0, ""},
    +		{"IFT_DUMMY", Const, 1, ""},
    +		{"IFT_DVBASILN", Const, 0, ""},
    +		{"IFT_DVBASIOUT", Const, 0, ""},
    +		{"IFT_DVBRCCDOWNSTREAM", Const, 0, ""},
    +		{"IFT_DVBRCCMACLAYER", Const, 0, ""},
    +		{"IFT_DVBRCCUPSTREAM", Const, 0, ""},
    +		{"IFT_ECONET", Const, 1, ""},
    +		{"IFT_ENC", Const, 0, ""},
    +		{"IFT_EON", Const, 0, ""},
    +		{"IFT_EPLRS", Const, 0, ""},
    +		{"IFT_ESCON", Const, 0, ""},
    +		{"IFT_ETHER", Const, 0, ""},
    +		{"IFT_FAITH", Const, 0, ""},
    +		{"IFT_FAST", Const, 0, ""},
    +		{"IFT_FASTETHER", Const, 0, ""},
    +		{"IFT_FASTETHERFX", Const, 0, ""},
    +		{"IFT_FDDI", Const, 0, ""},
    +		{"IFT_FIBRECHANNEL", Const, 0, ""},
    +		{"IFT_FRAMERELAYINTERCONNECT", Const, 0, ""},
    +		{"IFT_FRAMERELAYMPI", Const, 0, ""},
    +		{"IFT_FRDLCIENDPT", Const, 0, ""},
    +		{"IFT_FRELAY", Const, 0, ""},
    +		{"IFT_FRELAYDCE", Const, 0, ""},
    +		{"IFT_FRF16MFRBUNDLE", Const, 0, ""},
    +		{"IFT_FRFORWARD", Const, 0, ""},
    +		{"IFT_G703AT2MB", Const, 0, ""},
    +		{"IFT_G703AT64K", Const, 0, ""},
    +		{"IFT_GIF", Const, 0, ""},
    +		{"IFT_GIGABITETHERNET", Const, 0, ""},
    +		{"IFT_GR303IDT", Const, 0, ""},
    +		{"IFT_GR303RDT", Const, 0, ""},
    +		{"IFT_H323GATEKEEPER", Const, 0, ""},
    +		{"IFT_H323PROXY", Const, 0, ""},
    +		{"IFT_HDH1822", Const, 0, ""},
    +		{"IFT_HDLC", Const, 0, ""},
    +		{"IFT_HDSL2", Const, 0, ""},
    +		{"IFT_HIPERLAN2", Const, 0, ""},
    +		{"IFT_HIPPI", Const, 0, ""},
    +		{"IFT_HIPPIINTERFACE", Const, 0, ""},
    +		{"IFT_HOSTPAD", Const, 0, ""},
    +		{"IFT_HSSI", Const, 0, ""},
    +		{"IFT_HY", Const, 0, ""},
    +		{"IFT_IBM370PARCHAN", Const, 0, ""},
    +		{"IFT_IDSL", Const, 0, ""},
    +		{"IFT_IEEE1394", Const, 0, ""},
    +		{"IFT_IEEE80211", Const, 0, ""},
    +		{"IFT_IEEE80212", Const, 0, ""},
    +		{"IFT_IEEE8023ADLAG", Const, 0, ""},
    +		{"IFT_IFGSN", Const, 0, ""},
    +		{"IFT_IMT", Const, 0, ""},
    +		{"IFT_INFINIBAND", Const, 1, ""},
    +		{"IFT_INTERLEAVE", Const, 0, ""},
    +		{"IFT_IP", Const, 0, ""},
    +		{"IFT_IPFORWARD", Const, 0, ""},
    +		{"IFT_IPOVERATM", Const, 0, ""},
    +		{"IFT_IPOVERCDLC", Const, 0, ""},
    +		{"IFT_IPOVERCLAW", Const, 0, ""},
    +		{"IFT_IPSWITCH", Const, 0, ""},
    +		{"IFT_IPXIP", Const, 0, ""},
    +		{"IFT_ISDN", Const, 0, ""},
    +		{"IFT_ISDNBASIC", Const, 0, ""},
    +		{"IFT_ISDNPRIMARY", Const, 0, ""},
    +		{"IFT_ISDNS", Const, 0, ""},
    +		{"IFT_ISDNU", Const, 0, ""},
    +		{"IFT_ISO88022LLC", Const, 0, ""},
    +		{"IFT_ISO88023", Const, 0, ""},
    +		{"IFT_ISO88024", Const, 0, ""},
    +		{"IFT_ISO88025", Const, 0, ""},
    +		{"IFT_ISO88025CRFPINT", Const, 0, ""},
    +		{"IFT_ISO88025DTR", Const, 0, ""},
    +		{"IFT_ISO88025FIBER", Const, 0, ""},
    +		{"IFT_ISO88026", Const, 0, ""},
    +		{"IFT_ISUP", Const, 0, ""},
    +		{"IFT_L2VLAN", Const, 0, ""},
    +		{"IFT_L3IPVLAN", Const, 0, ""},
    +		{"IFT_L3IPXVLAN", Const, 0, ""},
    +		{"IFT_LAPB", Const, 0, ""},
    +		{"IFT_LAPD", Const, 0, ""},
    +		{"IFT_LAPF", Const, 0, ""},
    +		{"IFT_LINEGROUP", Const, 1, ""},
    +		{"IFT_LOCALTALK", Const, 0, ""},
    +		{"IFT_LOOP", Const, 0, ""},
    +		{"IFT_MEDIAMAILOVERIP", Const, 0, ""},
    +		{"IFT_MFSIGLINK", Const, 0, ""},
    +		{"IFT_MIOX25", Const, 0, ""},
    +		{"IFT_MODEM", Const, 0, ""},
    +		{"IFT_MPC", Const, 0, ""},
    +		{"IFT_MPLS", Const, 0, ""},
    +		{"IFT_MPLSTUNNEL", Const, 0, ""},
    +		{"IFT_MSDSL", Const, 0, ""},
    +		{"IFT_MVL", Const, 0, ""},
    +		{"IFT_MYRINET", Const, 0, ""},
    +		{"IFT_NFAS", Const, 0, ""},
    +		{"IFT_NSIP", Const, 0, ""},
    +		{"IFT_OPTICALCHANNEL", Const, 0, ""},
    +		{"IFT_OPTICALTRANSPORT", Const, 0, ""},
    +		{"IFT_OTHER", Const, 0, ""},
    +		{"IFT_P10", Const, 0, ""},
    +		{"IFT_P80", Const, 0, ""},
    +		{"IFT_PARA", Const, 0, ""},
    +		{"IFT_PDP", Const, 0, ""},
    +		{"IFT_PFLOG", Const, 0, ""},
    +		{"IFT_PFLOW", Const, 1, ""},
    +		{"IFT_PFSYNC", Const, 0, ""},
    +		{"IFT_PLC", Const, 0, ""},
    +		{"IFT_PON155", Const, 1, ""},
    +		{"IFT_PON622", Const, 1, ""},
    +		{"IFT_POS", Const, 0, ""},
    +		{"IFT_PPP", Const, 0, ""},
    +		{"IFT_PPPMULTILINKBUNDLE", Const, 0, ""},
    +		{"IFT_PROPATM", Const, 1, ""},
    +		{"IFT_PROPBWAP2MP", Const, 0, ""},
    +		{"IFT_PROPCNLS", Const, 0, ""},
    +		{"IFT_PROPDOCSWIRELESSDOWNSTREAM", Const, 0, ""},
    +		{"IFT_PROPDOCSWIRELESSMACLAYER", Const, 0, ""},
    +		{"IFT_PROPDOCSWIRELESSUPSTREAM", Const, 0, ""},
    +		{"IFT_PROPMUX", Const, 0, ""},
    +		{"IFT_PROPVIRTUAL", Const, 0, ""},
    +		{"IFT_PROPWIRELESSP2P", Const, 0, ""},
    +		{"IFT_PTPSERIAL", Const, 0, ""},
    +		{"IFT_PVC", Const, 0, ""},
    +		{"IFT_Q2931", Const, 1, ""},
    +		{"IFT_QLLC", Const, 0, ""},
    +		{"IFT_RADIOMAC", Const, 0, ""},
    +		{"IFT_RADSL", Const, 0, ""},
    +		{"IFT_REACHDSL", Const, 0, ""},
    +		{"IFT_RFC1483", Const, 0, ""},
    +		{"IFT_RS232", Const, 0, ""},
    +		{"IFT_RSRB", Const, 0, ""},
    +		{"IFT_SDLC", Const, 0, ""},
    +		{"IFT_SDSL", Const, 0, ""},
    +		{"IFT_SHDSL", Const, 0, ""},
    +		{"IFT_SIP", Const, 0, ""},
    +		{"IFT_SIPSIG", Const, 1, ""},
    +		{"IFT_SIPTG", Const, 1, ""},
    +		{"IFT_SLIP", Const, 0, ""},
    +		{"IFT_SMDSDXI", Const, 0, ""},
    +		{"IFT_SMDSICIP", Const, 0, ""},
    +		{"IFT_SONET", Const, 0, ""},
    +		{"IFT_SONETOVERHEADCHANNEL", Const, 0, ""},
    +		{"IFT_SONETPATH", Const, 0, ""},
    +		{"IFT_SONETVT", Const, 0, ""},
    +		{"IFT_SRP", Const, 0, ""},
    +		{"IFT_SS7SIGLINK", Const, 0, ""},
    +		{"IFT_STACKTOSTACK", Const, 0, ""},
    +		{"IFT_STARLAN", Const, 0, ""},
    +		{"IFT_STF", Const, 0, ""},
    +		{"IFT_T1", Const, 0, ""},
    +		{"IFT_TDLC", Const, 0, ""},
    +		{"IFT_TELINK", Const, 1, ""},
    +		{"IFT_TERMPAD", Const, 0, ""},
    +		{"IFT_TR008", Const, 0, ""},
    +		{"IFT_TRANSPHDLC", Const, 0, ""},
    +		{"IFT_TUNNEL", Const, 0, ""},
    +		{"IFT_ULTRA", Const, 0, ""},
    +		{"IFT_USB", Const, 0, ""},
    +		{"IFT_V11", Const, 0, ""},
    +		{"IFT_V35", Const, 0, ""},
    +		{"IFT_V36", Const, 0, ""},
    +		{"IFT_V37", Const, 0, ""},
    +		{"IFT_VDSL", Const, 0, ""},
    +		{"IFT_VIRTUALIPADDRESS", Const, 0, ""},
    +		{"IFT_VIRTUALTG", Const, 1, ""},
    +		{"IFT_VOICEDID", Const, 1, ""},
    +		{"IFT_VOICEEM", Const, 0, ""},
    +		{"IFT_VOICEEMFGD", Const, 1, ""},
    +		{"IFT_VOICEENCAP", Const, 0, ""},
    +		{"IFT_VOICEFGDEANA", Const, 1, ""},
    +		{"IFT_VOICEFXO", Const, 0, ""},
    +		{"IFT_VOICEFXS", Const, 0, ""},
    +		{"IFT_VOICEOVERATM", Const, 0, ""},
    +		{"IFT_VOICEOVERCABLE", Const, 1, ""},
    +		{"IFT_VOICEOVERFRAMERELAY", Const, 0, ""},
    +		{"IFT_VOICEOVERIP", Const, 0, ""},
    +		{"IFT_X213", Const, 0, ""},
    +		{"IFT_X25", Const, 0, ""},
    +		{"IFT_X25DDN", Const, 0, ""},
    +		{"IFT_X25HUNTGROUP", Const, 0, ""},
    +		{"IFT_X25MLP", Const, 0, ""},
    +		{"IFT_X25PLE", Const, 0, ""},
    +		{"IFT_XETHER", Const, 0, ""},
    +		{"IGNBRK", Const, 0, ""},
    +		{"IGNCR", Const, 0, ""},
    +		{"IGNORE", Const, 0, ""},
    +		{"IGNPAR", Const, 0, ""},
    +		{"IMAXBEL", Const, 0, ""},
    +		{"INFINITE", Const, 0, ""},
    +		{"INLCR", Const, 0, ""},
    +		{"INPCK", Const, 0, ""},
    +		{"INVALID_FILE_ATTRIBUTES", Const, 0, ""},
    +		{"IN_ACCESS", Const, 0, ""},
    +		{"IN_ALL_EVENTS", Const, 0, ""},
    +		{"IN_ATTRIB", Const, 0, ""},
    +		{"IN_CLASSA_HOST", Const, 0, ""},
    +		{"IN_CLASSA_MAX", Const, 0, ""},
    +		{"IN_CLASSA_NET", Const, 0, ""},
    +		{"IN_CLASSA_NSHIFT", Const, 0, ""},
    +		{"IN_CLASSB_HOST", Const, 0, ""},
    +		{"IN_CLASSB_MAX", Const, 0, ""},
    +		{"IN_CLASSB_NET", Const, 0, ""},
    +		{"IN_CLASSB_NSHIFT", Const, 0, ""},
    +		{"IN_CLASSC_HOST", Const, 0, ""},
    +		{"IN_CLASSC_NET", Const, 0, ""},
    +		{"IN_CLASSC_NSHIFT", Const, 0, ""},
    +		{"IN_CLASSD_HOST", Const, 0, ""},
    +		{"IN_CLASSD_NET", Const, 0, ""},
    +		{"IN_CLASSD_NSHIFT", Const, 0, ""},
    +		{"IN_CLOEXEC", Const, 0, ""},
    +		{"IN_CLOSE", Const, 0, ""},
    +		{"IN_CLOSE_NOWRITE", Const, 0, ""},
    +		{"IN_CLOSE_WRITE", Const, 0, ""},
    +		{"IN_CREATE", Const, 0, ""},
    +		{"IN_DELETE", Const, 0, ""},
    +		{"IN_DELETE_SELF", Const, 0, ""},
    +		{"IN_DONT_FOLLOW", Const, 0, ""},
    +		{"IN_EXCL_UNLINK", Const, 0, ""},
    +		{"IN_IGNORED", Const, 0, ""},
    +		{"IN_ISDIR", Const, 0, ""},
    +		{"IN_LINKLOCALNETNUM", Const, 0, ""},
    +		{"IN_LOOPBACKNET", Const, 0, ""},
    +		{"IN_MASK_ADD", Const, 0, ""},
    +		{"IN_MODIFY", Const, 0, ""},
    +		{"IN_MOVE", Const, 0, ""},
    +		{"IN_MOVED_FROM", Const, 0, ""},
    +		{"IN_MOVED_TO", Const, 0, ""},
    +		{"IN_MOVE_SELF", Const, 0, ""},
    +		{"IN_NONBLOCK", Const, 0, ""},
    +		{"IN_ONESHOT", Const, 0, ""},
    +		{"IN_ONLYDIR", Const, 0, ""},
    +		{"IN_OPEN", Const, 0, ""},
    +		{"IN_Q_OVERFLOW", Const, 0, ""},
    +		{"IN_RFC3021_HOST", Const, 1, ""},
    +		{"IN_RFC3021_MASK", Const, 1, ""},
    +		{"IN_RFC3021_NET", Const, 1, ""},
    +		{"IN_RFC3021_NSHIFT", Const, 1, ""},
    +		{"IN_UNMOUNT", Const, 0, ""},
    +		{"IOC_IN", Const, 1, ""},
    +		{"IOC_INOUT", Const, 1, ""},
    +		{"IOC_OUT", Const, 1, ""},
    +		{"IOC_VENDOR", Const, 3, ""},
    +		{"IOC_WS2", Const, 1, ""},
    +		{"IO_REPARSE_TAG_SYMLINK", Const, 4, ""},
    +		{"IPMreq", Type, 0, ""},
    +		{"IPMreq.Interface", Field, 0, ""},
    +		{"IPMreq.Multiaddr", Field, 0, ""},
    +		{"IPMreqn", Type, 0, ""},
    +		{"IPMreqn.Address", Field, 0, ""},
    +		{"IPMreqn.Ifindex", Field, 0, ""},
    +		{"IPMreqn.Multiaddr", Field, 0, ""},
    +		{"IPPROTO_3PC", Const, 0, ""},
    +		{"IPPROTO_ADFS", Const, 0, ""},
    +		{"IPPROTO_AH", Const, 0, ""},
    +		{"IPPROTO_AHIP", Const, 0, ""},
    +		{"IPPROTO_APES", Const, 0, ""},
    +		{"IPPROTO_ARGUS", Const, 0, ""},
    +		{"IPPROTO_AX25", Const, 0, ""},
    +		{"IPPROTO_BHA", Const, 0, ""},
    +		{"IPPROTO_BLT", Const, 0, ""},
    +		{"IPPROTO_BRSATMON", Const, 0, ""},
    +		{"IPPROTO_CARP", Const, 0, ""},
    +		{"IPPROTO_CFTP", Const, 0, ""},
    +		{"IPPROTO_CHAOS", Const, 0, ""},
    +		{"IPPROTO_CMTP", Const, 0, ""},
    +		{"IPPROTO_COMP", Const, 0, ""},
    +		{"IPPROTO_CPHB", Const, 0, ""},
    +		{"IPPROTO_CPNX", Const, 0, ""},
    +		{"IPPROTO_DCCP", Const, 0, ""},
    +		{"IPPROTO_DDP", Const, 0, ""},
    +		{"IPPROTO_DGP", Const, 0, ""},
    +		{"IPPROTO_DIVERT", Const, 0, ""},
    +		{"IPPROTO_DIVERT_INIT", Const, 3, ""},
    +		{"IPPROTO_DIVERT_RESP", Const, 3, ""},
    +		{"IPPROTO_DONE", Const, 0, ""},
    +		{"IPPROTO_DSTOPTS", Const, 0, ""},
    +		{"IPPROTO_EGP", Const, 0, ""},
    +		{"IPPROTO_EMCON", Const, 0, ""},
    +		{"IPPROTO_ENCAP", Const, 0, ""},
    +		{"IPPROTO_EON", Const, 0, ""},
    +		{"IPPROTO_ESP", Const, 0, ""},
    +		{"IPPROTO_ETHERIP", Const, 0, ""},
    +		{"IPPROTO_FRAGMENT", Const, 0, ""},
    +		{"IPPROTO_GGP", Const, 0, ""},
    +		{"IPPROTO_GMTP", Const, 0, ""},
    +		{"IPPROTO_GRE", Const, 0, ""},
    +		{"IPPROTO_HELLO", Const, 0, ""},
    +		{"IPPROTO_HMP", Const, 0, ""},
    +		{"IPPROTO_HOPOPTS", Const, 0, ""},
    +		{"IPPROTO_ICMP", Const, 0, ""},
    +		{"IPPROTO_ICMPV6", Const, 0, ""},
    +		{"IPPROTO_IDP", Const, 0, ""},
    +		{"IPPROTO_IDPR", Const, 0, ""},
    +		{"IPPROTO_IDRP", Const, 0, ""},
    +		{"IPPROTO_IGMP", Const, 0, ""},
    +		{"IPPROTO_IGP", Const, 0, ""},
    +		{"IPPROTO_IGRP", Const, 0, ""},
    +		{"IPPROTO_IL", Const, 0, ""},
    +		{"IPPROTO_INLSP", Const, 0, ""},
    +		{"IPPROTO_INP", Const, 0, ""},
    +		{"IPPROTO_IP", Const, 0, ""},
    +		{"IPPROTO_IPCOMP", Const, 0, ""},
    +		{"IPPROTO_IPCV", Const, 0, ""},
    +		{"IPPROTO_IPEIP", Const, 0, ""},
    +		{"IPPROTO_IPIP", Const, 0, ""},
    +		{"IPPROTO_IPPC", Const, 0, ""},
    +		{"IPPROTO_IPV4", Const, 0, ""},
    +		{"IPPROTO_IPV6", Const, 0, ""},
    +		{"IPPROTO_IPV6_ICMP", Const, 1, ""},
    +		{"IPPROTO_IRTP", Const, 0, ""},
    +		{"IPPROTO_KRYPTOLAN", Const, 0, ""},
    +		{"IPPROTO_LARP", Const, 0, ""},
    +		{"IPPROTO_LEAF1", Const, 0, ""},
    +		{"IPPROTO_LEAF2", Const, 0, ""},
    +		{"IPPROTO_MAX", Const, 0, ""},
    +		{"IPPROTO_MAXID", Const, 0, ""},
    +		{"IPPROTO_MEAS", Const, 0, ""},
    +		{"IPPROTO_MH", Const, 1, ""},
    +		{"IPPROTO_MHRP", Const, 0, ""},
    +		{"IPPROTO_MICP", Const, 0, ""},
    +		{"IPPROTO_MOBILE", Const, 0, ""},
    +		{"IPPROTO_MPLS", Const, 1, ""},
    +		{"IPPROTO_MTP", Const, 0, ""},
    +		{"IPPROTO_MUX", Const, 0, ""},
    +		{"IPPROTO_ND", Const, 0, ""},
    +		{"IPPROTO_NHRP", Const, 0, ""},
    +		{"IPPROTO_NONE", Const, 0, ""},
    +		{"IPPROTO_NSP", Const, 0, ""},
    +		{"IPPROTO_NVPII", Const, 0, ""},
    +		{"IPPROTO_OLD_DIVERT", Const, 0, ""},
    +		{"IPPROTO_OSPFIGP", Const, 0, ""},
    +		{"IPPROTO_PFSYNC", Const, 0, ""},
    +		{"IPPROTO_PGM", Const, 0, ""},
    +		{"IPPROTO_PIGP", Const, 0, ""},
    +		{"IPPROTO_PIM", Const, 0, ""},
    +		{"IPPROTO_PRM", Const, 0, ""},
    +		{"IPPROTO_PUP", Const, 0, ""},
    +		{"IPPROTO_PVP", Const, 0, ""},
    +		{"IPPROTO_RAW", Const, 0, ""},
    +		{"IPPROTO_RCCMON", Const, 0, ""},
    +		{"IPPROTO_RDP", Const, 0, ""},
    +		{"IPPROTO_ROUTING", Const, 0, ""},
    +		{"IPPROTO_RSVP", Const, 0, ""},
    +		{"IPPROTO_RVD", Const, 0, ""},
    +		{"IPPROTO_SATEXPAK", Const, 0, ""},
    +		{"IPPROTO_SATMON", Const, 0, ""},
    +		{"IPPROTO_SCCSP", Const, 0, ""},
    +		{"IPPROTO_SCTP", Const, 0, ""},
    +		{"IPPROTO_SDRP", Const, 0, ""},
    +		{"IPPROTO_SEND", Const, 1, ""},
    +		{"IPPROTO_SEP", Const, 0, ""},
    +		{"IPPROTO_SKIP", Const, 0, ""},
    +		{"IPPROTO_SPACER", Const, 0, ""},
    +		{"IPPROTO_SRPC", Const, 0, ""},
    +		{"IPPROTO_ST", Const, 0, ""},
    +		{"IPPROTO_SVMTP", Const, 0, ""},
    +		{"IPPROTO_SWIPE", Const, 0, ""},
    +		{"IPPROTO_TCF", Const, 0, ""},
    +		{"IPPROTO_TCP", Const, 0, ""},
    +		{"IPPROTO_TLSP", Const, 0, ""},
    +		{"IPPROTO_TP", Const, 0, ""},
    +		{"IPPROTO_TPXX", Const, 0, ""},
    +		{"IPPROTO_TRUNK1", Const, 0, ""},
    +		{"IPPROTO_TRUNK2", Const, 0, ""},
    +		{"IPPROTO_TTP", Const, 0, ""},
    +		{"IPPROTO_UDP", Const, 0, ""},
    +		{"IPPROTO_UDPLITE", Const, 0, ""},
    +		{"IPPROTO_VINES", Const, 0, ""},
    +		{"IPPROTO_VISA", Const, 0, ""},
    +		{"IPPROTO_VMTP", Const, 0, ""},
    +		{"IPPROTO_VRRP", Const, 1, ""},
    +		{"IPPROTO_WBEXPAK", Const, 0, ""},
    +		{"IPPROTO_WBMON", Const, 0, ""},
    +		{"IPPROTO_WSN", Const, 0, ""},
    +		{"IPPROTO_XNET", Const, 0, ""},
    +		{"IPPROTO_XTP", Const, 0, ""},
    +		{"IPV6_2292DSTOPTS", Const, 0, ""},
    +		{"IPV6_2292HOPLIMIT", Const, 0, ""},
    +		{"IPV6_2292HOPOPTS", Const, 0, ""},
    +		{"IPV6_2292NEXTHOP", Const, 0, ""},
    +		{"IPV6_2292PKTINFO", Const, 0, ""},
    +		{"IPV6_2292PKTOPTIONS", Const, 0, ""},
    +		{"IPV6_2292RTHDR", Const, 0, ""},
    +		{"IPV6_ADDRFORM", Const, 0, ""},
    +		{"IPV6_ADD_MEMBERSHIP", Const, 0, ""},
    +		{"IPV6_AUTHHDR", Const, 0, ""},
    +		{"IPV6_AUTH_LEVEL", Const, 1, ""},
    +		{"IPV6_AUTOFLOWLABEL", Const, 0, ""},
    +		{"IPV6_BINDANY", Const, 0, ""},
    +		{"IPV6_BINDV6ONLY", Const, 0, ""},
    +		{"IPV6_BOUND_IF", Const, 0, ""},
    +		{"IPV6_CHECKSUM", Const, 0, ""},
    +		{"IPV6_DEFAULT_MULTICAST_HOPS", Const, 0, ""},
    +		{"IPV6_DEFAULT_MULTICAST_LOOP", Const, 0, ""},
    +		{"IPV6_DEFHLIM", Const, 0, ""},
    +		{"IPV6_DONTFRAG", Const, 0, ""},
    +		{"IPV6_DROP_MEMBERSHIP", Const, 0, ""},
    +		{"IPV6_DSTOPTS", Const, 0, ""},
    +		{"IPV6_ESP_NETWORK_LEVEL", Const, 1, ""},
    +		{"IPV6_ESP_TRANS_LEVEL", Const, 1, ""},
    +		{"IPV6_FAITH", Const, 0, ""},
    +		{"IPV6_FLOWINFO_MASK", Const, 0, ""},
    +		{"IPV6_FLOWLABEL_MASK", Const, 0, ""},
    +		{"IPV6_FRAGTTL", Const, 0, ""},
    +		{"IPV6_FW_ADD", Const, 0, ""},
    +		{"IPV6_FW_DEL", Const, 0, ""},
    +		{"IPV6_FW_FLUSH", Const, 0, ""},
    +		{"IPV6_FW_GET", Const, 0, ""},
    +		{"IPV6_FW_ZERO", Const, 0, ""},
    +		{"IPV6_HLIMDEC", Const, 0, ""},
    +		{"IPV6_HOPLIMIT", Const, 0, ""},
    +		{"IPV6_HOPOPTS", Const, 0, ""},
    +		{"IPV6_IPCOMP_LEVEL", Const, 1, ""},
    +		{"IPV6_IPSEC_POLICY", Const, 0, ""},
    +		{"IPV6_JOIN_ANYCAST", Const, 0, ""},
    +		{"IPV6_JOIN_GROUP", Const, 0, ""},
    +		{"IPV6_LEAVE_ANYCAST", Const, 0, ""},
    +		{"IPV6_LEAVE_GROUP", Const, 0, ""},
    +		{"IPV6_MAXHLIM", Const, 0, ""},
    +		{"IPV6_MAXOPTHDR", Const, 0, ""},
    +		{"IPV6_MAXPACKET", Const, 0, ""},
    +		{"IPV6_MAX_GROUP_SRC_FILTER", Const, 0, ""},
    +		{"IPV6_MAX_MEMBERSHIPS", Const, 0, ""},
    +		{"IPV6_MAX_SOCK_SRC_FILTER", Const, 0, ""},
    +		{"IPV6_MIN_MEMBERSHIPS", Const, 0, ""},
    +		{"IPV6_MMTU", Const, 0, ""},
    +		{"IPV6_MSFILTER", Const, 0, ""},
    +		{"IPV6_MTU", Const, 0, ""},
    +		{"IPV6_MTU_DISCOVER", Const, 0, ""},
    +		{"IPV6_MULTICAST_HOPS", Const, 0, ""},
    +		{"IPV6_MULTICAST_IF", Const, 0, ""},
    +		{"IPV6_MULTICAST_LOOP", Const, 0, ""},
    +		{"IPV6_NEXTHOP", Const, 0, ""},
    +		{"IPV6_OPTIONS", Const, 1, ""},
    +		{"IPV6_PATHMTU", Const, 0, ""},
    +		{"IPV6_PIPEX", Const, 1, ""},
    +		{"IPV6_PKTINFO", Const, 0, ""},
    +		{"IPV6_PMTUDISC_DO", Const, 0, ""},
    +		{"IPV6_PMTUDISC_DONT", Const, 0, ""},
    +		{"IPV6_PMTUDISC_PROBE", Const, 0, ""},
    +		{"IPV6_PMTUDISC_WANT", Const, 0, ""},
    +		{"IPV6_PORTRANGE", Const, 0, ""},
    +		{"IPV6_PORTRANGE_DEFAULT", Const, 0, ""},
    +		{"IPV6_PORTRANGE_HIGH", Const, 0, ""},
    +		{"IPV6_PORTRANGE_LOW", Const, 0, ""},
    +		{"IPV6_PREFER_TEMPADDR", Const, 0, ""},
    +		{"IPV6_RECVDSTOPTS", Const, 0, ""},
    +		{"IPV6_RECVDSTPORT", Const, 3, ""},
    +		{"IPV6_RECVERR", Const, 0, ""},
    +		{"IPV6_RECVHOPLIMIT", Const, 0, ""},
    +		{"IPV6_RECVHOPOPTS", Const, 0, ""},
    +		{"IPV6_RECVPATHMTU", Const, 0, ""},
    +		{"IPV6_RECVPKTINFO", Const, 0, ""},
    +		{"IPV6_RECVRTHDR", Const, 0, ""},
    +		{"IPV6_RECVTCLASS", Const, 0, ""},
    +		{"IPV6_ROUTER_ALERT", Const, 0, ""},
    +		{"IPV6_RTABLE", Const, 1, ""},
    +		{"IPV6_RTHDR", Const, 0, ""},
    +		{"IPV6_RTHDRDSTOPTS", Const, 0, ""},
    +		{"IPV6_RTHDR_LOOSE", Const, 0, ""},
    +		{"IPV6_RTHDR_STRICT", Const, 0, ""},
    +		{"IPV6_RTHDR_TYPE_0", Const, 0, ""},
    +		{"IPV6_RXDSTOPTS", Const, 0, ""},
    +		{"IPV6_RXHOPOPTS", Const, 0, ""},
    +		{"IPV6_SOCKOPT_RESERVED1", Const, 0, ""},
    +		{"IPV6_TCLASS", Const, 0, ""},
    +		{"IPV6_UNICAST_HOPS", Const, 0, ""},
    +		{"IPV6_USE_MIN_MTU", Const, 0, ""},
    +		{"IPV6_V6ONLY", Const, 0, ""},
    +		{"IPV6_VERSION", Const, 0, ""},
    +		{"IPV6_VERSION_MASK", Const, 0, ""},
    +		{"IPV6_XFRM_POLICY", Const, 0, ""},
    +		{"IP_ADD_MEMBERSHIP", Const, 0, ""},
    +		{"IP_ADD_SOURCE_MEMBERSHIP", Const, 0, ""},
    +		{"IP_AUTH_LEVEL", Const, 1, ""},
    +		{"IP_BINDANY", Const, 0, ""},
    +		{"IP_BLOCK_SOURCE", Const, 0, ""},
    +		{"IP_BOUND_IF", Const, 0, ""},
    +		{"IP_DEFAULT_MULTICAST_LOOP", Const, 0, ""},
    +		{"IP_DEFAULT_MULTICAST_TTL", Const, 0, ""},
    +		{"IP_DF", Const, 0, ""},
    +		{"IP_DIVERTFL", Const, 3, ""},
    +		{"IP_DONTFRAG", Const, 0, ""},
    +		{"IP_DROP_MEMBERSHIP", Const, 0, ""},
    +		{"IP_DROP_SOURCE_MEMBERSHIP", Const, 0, ""},
    +		{"IP_DUMMYNET3", Const, 0, ""},
    +		{"IP_DUMMYNET_CONFIGURE", Const, 0, ""},
    +		{"IP_DUMMYNET_DEL", Const, 0, ""},
    +		{"IP_DUMMYNET_FLUSH", Const, 0, ""},
    +		{"IP_DUMMYNET_GET", Const, 0, ""},
    +		{"IP_EF", Const, 1, ""},
    +		{"IP_ERRORMTU", Const, 1, ""},
    +		{"IP_ESP_NETWORK_LEVEL", Const, 1, ""},
    +		{"IP_ESP_TRANS_LEVEL", Const, 1, ""},
    +		{"IP_FAITH", Const, 0, ""},
    +		{"IP_FREEBIND", Const, 0, ""},
    +		{"IP_FW3", Const, 0, ""},
    +		{"IP_FW_ADD", Const, 0, ""},
    +		{"IP_FW_DEL", Const, 0, ""},
    +		{"IP_FW_FLUSH", Const, 0, ""},
    +		{"IP_FW_GET", Const, 0, ""},
    +		{"IP_FW_NAT_CFG", Const, 0, ""},
    +		{"IP_FW_NAT_DEL", Const, 0, ""},
    +		{"IP_FW_NAT_GET_CONFIG", Const, 0, ""},
    +		{"IP_FW_NAT_GET_LOG", Const, 0, ""},
    +		{"IP_FW_RESETLOG", Const, 0, ""},
    +		{"IP_FW_TABLE_ADD", Const, 0, ""},
    +		{"IP_FW_TABLE_DEL", Const, 0, ""},
    +		{"IP_FW_TABLE_FLUSH", Const, 0, ""},
    +		{"IP_FW_TABLE_GETSIZE", Const, 0, ""},
    +		{"IP_FW_TABLE_LIST", Const, 0, ""},
    +		{"IP_FW_ZERO", Const, 0, ""},
    +		{"IP_HDRINCL", Const, 0, ""},
    +		{"IP_IPCOMP_LEVEL", Const, 1, ""},
    +		{"IP_IPSECFLOWINFO", Const, 1, ""},
    +		{"IP_IPSEC_LOCAL_AUTH", Const, 1, ""},
    +		{"IP_IPSEC_LOCAL_CRED", Const, 1, ""},
    +		{"IP_IPSEC_LOCAL_ID", Const, 1, ""},
    +		{"IP_IPSEC_POLICY", Const, 0, ""},
    +		{"IP_IPSEC_REMOTE_AUTH", Const, 1, ""},
    +		{"IP_IPSEC_REMOTE_CRED", Const, 1, ""},
    +		{"IP_IPSEC_REMOTE_ID", Const, 1, ""},
    +		{"IP_MAXPACKET", Const, 0, ""},
    +		{"IP_MAX_GROUP_SRC_FILTER", Const, 0, ""},
    +		{"IP_MAX_MEMBERSHIPS", Const, 0, ""},
    +		{"IP_MAX_SOCK_MUTE_FILTER", Const, 0, ""},
    +		{"IP_MAX_SOCK_SRC_FILTER", Const, 0, ""},
    +		{"IP_MAX_SOURCE_FILTER", Const, 0, ""},
    +		{"IP_MF", Const, 0, ""},
    +		{"IP_MINFRAGSIZE", Const, 1, ""},
    +		{"IP_MINTTL", Const, 0, ""},
    +		{"IP_MIN_MEMBERSHIPS", Const, 0, ""},
    +		{"IP_MSFILTER", Const, 0, ""},
    +		{"IP_MSS", Const, 0, ""},
    +		{"IP_MTU", Const, 0, ""},
    +		{"IP_MTU_DISCOVER", Const, 0, ""},
    +		{"IP_MULTICAST_IF", Const, 0, ""},
    +		{"IP_MULTICAST_IFINDEX", Const, 0, ""},
    +		{"IP_MULTICAST_LOOP", Const, 0, ""},
    +		{"IP_MULTICAST_TTL", Const, 0, ""},
    +		{"IP_MULTICAST_VIF", Const, 0, ""},
    +		{"IP_NAT__XXX", Const, 0, ""},
    +		{"IP_OFFMASK", Const, 0, ""},
    +		{"IP_OLD_FW_ADD", Const, 0, ""},
    +		{"IP_OLD_FW_DEL", Const, 0, ""},
    +		{"IP_OLD_FW_FLUSH", Const, 0, ""},
    +		{"IP_OLD_FW_GET", Const, 0, ""},
    +		{"IP_OLD_FW_RESETLOG", Const, 0, ""},
    +		{"IP_OLD_FW_ZERO", Const, 0, ""},
    +		{"IP_ONESBCAST", Const, 0, ""},
    +		{"IP_OPTIONS", Const, 0, ""},
    +		{"IP_ORIGDSTADDR", Const, 0, ""},
    +		{"IP_PASSSEC", Const, 0, ""},
    +		{"IP_PIPEX", Const, 1, ""},
    +		{"IP_PKTINFO", Const, 0, ""},
    +		{"IP_PKTOPTIONS", Const, 0, ""},
    +		{"IP_PMTUDISC", Const, 0, ""},
    +		{"IP_PMTUDISC_DO", Const, 0, ""},
    +		{"IP_PMTUDISC_DONT", Const, 0, ""},
    +		{"IP_PMTUDISC_PROBE", Const, 0, ""},
    +		{"IP_PMTUDISC_WANT", Const, 0, ""},
    +		{"IP_PORTRANGE", Const, 0, ""},
    +		{"IP_PORTRANGE_DEFAULT", Const, 0, ""},
    +		{"IP_PORTRANGE_HIGH", Const, 0, ""},
    +		{"IP_PORTRANGE_LOW", Const, 0, ""},
    +		{"IP_RECVDSTADDR", Const, 0, ""},
    +		{"IP_RECVDSTPORT", Const, 1, ""},
    +		{"IP_RECVERR", Const, 0, ""},
    +		{"IP_RECVIF", Const, 0, ""},
    +		{"IP_RECVOPTS", Const, 0, ""},
    +		{"IP_RECVORIGDSTADDR", Const, 0, ""},
    +		{"IP_RECVPKTINFO", Const, 0, ""},
    +		{"IP_RECVRETOPTS", Const, 0, ""},
    +		{"IP_RECVRTABLE", Const, 1, ""},
    +		{"IP_RECVTOS", Const, 0, ""},
    +		{"IP_RECVTTL", Const, 0, ""},
    +		{"IP_RETOPTS", Const, 0, ""},
    +		{"IP_RF", Const, 0, ""},
    +		{"IP_ROUTER_ALERT", Const, 0, ""},
    +		{"IP_RSVP_OFF", Const, 0, ""},
    +		{"IP_RSVP_ON", Const, 0, ""},
    +		{"IP_RSVP_VIF_OFF", Const, 0, ""},
    +		{"IP_RSVP_VIF_ON", Const, 0, ""},
    +		{"IP_RTABLE", Const, 1, ""},
    +		{"IP_SENDSRCADDR", Const, 0, ""},
    +		{"IP_STRIPHDR", Const, 0, ""},
    +		{"IP_TOS", Const, 0, ""},
    +		{"IP_TRAFFIC_MGT_BACKGROUND", Const, 0, ""},
    +		{"IP_TRANSPARENT", Const, 0, ""},
    +		{"IP_TTL", Const, 0, ""},
    +		{"IP_UNBLOCK_SOURCE", Const, 0, ""},
    +		{"IP_XFRM_POLICY", Const, 0, ""},
    +		{"IPv6MTUInfo", Type, 2, ""},
    +		{"IPv6MTUInfo.Addr", Field, 2, ""},
    +		{"IPv6MTUInfo.Mtu", Field, 2, ""},
    +		{"IPv6Mreq", Type, 0, ""},
    +		{"IPv6Mreq.Interface", Field, 0, ""},
    +		{"IPv6Mreq.Multiaddr", Field, 0, ""},
    +		{"ISIG", Const, 0, ""},
    +		{"ISTRIP", Const, 0, ""},
    +		{"IUCLC", Const, 0, ""},
    +		{"IUTF8", Const, 0, ""},
    +		{"IXANY", Const, 0, ""},
    +		{"IXOFF", Const, 0, ""},
    +		{"IXON", Const, 0, ""},
    +		{"IfAddrmsg", Type, 0, ""},
    +		{"IfAddrmsg.Family", Field, 0, ""},
    +		{"IfAddrmsg.Flags", Field, 0, ""},
    +		{"IfAddrmsg.Index", Field, 0, ""},
    +		{"IfAddrmsg.Prefixlen", Field, 0, ""},
    +		{"IfAddrmsg.Scope", Field, 0, ""},
    +		{"IfAnnounceMsghdr", Type, 1, ""},
    +		{"IfAnnounceMsghdr.Hdrlen", Field, 2, ""},
    +		{"IfAnnounceMsghdr.Index", Field, 1, ""},
    +		{"IfAnnounceMsghdr.Msglen", Field, 1, ""},
    +		{"IfAnnounceMsghdr.Name", Field, 1, ""},
    +		{"IfAnnounceMsghdr.Type", Field, 1, ""},
    +		{"IfAnnounceMsghdr.Version", Field, 1, ""},
    +		{"IfAnnounceMsghdr.What", Field, 1, ""},
    +		{"IfData", Type, 0, ""},
    +		{"IfData.Addrlen", Field, 0, ""},
    +		{"IfData.Baudrate", Field, 0, ""},
    +		{"IfData.Capabilities", Field, 2, ""},
    +		{"IfData.Collisions", Field, 0, ""},
    +		{"IfData.Datalen", Field, 0, ""},
    +		{"IfData.Epoch", Field, 0, ""},
    +		{"IfData.Hdrlen", Field, 0, ""},
    +		{"IfData.Hwassist", Field, 0, ""},
    +		{"IfData.Ibytes", Field, 0, ""},
    +		{"IfData.Ierrors", Field, 0, ""},
    +		{"IfData.Imcasts", Field, 0, ""},
    +		{"IfData.Ipackets", Field, 0, ""},
    +		{"IfData.Iqdrops", Field, 0, ""},
    +		{"IfData.Lastchange", Field, 0, ""},
    +		{"IfData.Link_state", Field, 0, ""},
    +		{"IfData.Mclpool", Field, 2, ""},
    +		{"IfData.Metric", Field, 0, ""},
    +		{"IfData.Mtu", Field, 0, ""},
    +		{"IfData.Noproto", Field, 0, ""},
    +		{"IfData.Obytes", Field, 0, ""},
    +		{"IfData.Oerrors", Field, 0, ""},
    +		{"IfData.Omcasts", Field, 0, ""},
    +		{"IfData.Opackets", Field, 0, ""},
    +		{"IfData.Pad", Field, 2, ""},
    +		{"IfData.Pad_cgo_0", Field, 2, ""},
    +		{"IfData.Pad_cgo_1", Field, 2, ""},
    +		{"IfData.Physical", Field, 0, ""},
    +		{"IfData.Recvquota", Field, 0, ""},
    +		{"IfData.Recvtiming", Field, 0, ""},
    +		{"IfData.Reserved1", Field, 0, ""},
    +		{"IfData.Reserved2", Field, 0, ""},
    +		{"IfData.Spare_char1", Field, 0, ""},
    +		{"IfData.Spare_char2", Field, 0, ""},
    +		{"IfData.Type", Field, 0, ""},
    +		{"IfData.Typelen", Field, 0, ""},
    +		{"IfData.Unused1", Field, 0, ""},
    +		{"IfData.Unused2", Field, 0, ""},
    +		{"IfData.Xmitquota", Field, 0, ""},
    +		{"IfData.Xmittiming", Field, 0, ""},
    +		{"IfInfomsg", Type, 0, ""},
    +		{"IfInfomsg.Change", Field, 0, ""},
    +		{"IfInfomsg.Family", Field, 0, ""},
    +		{"IfInfomsg.Flags", Field, 0, ""},
    +		{"IfInfomsg.Index", Field, 0, ""},
    +		{"IfInfomsg.Type", Field, 0, ""},
    +		{"IfInfomsg.X__ifi_pad", Field, 0, ""},
    +		{"IfMsghdr", Type, 0, ""},
    +		{"IfMsghdr.Addrs", Field, 0, ""},
    +		{"IfMsghdr.Data", Field, 0, ""},
    +		{"IfMsghdr.Flags", Field, 0, ""},
    +		{"IfMsghdr.Hdrlen", Field, 2, ""},
    +		{"IfMsghdr.Index", Field, 0, ""},
    +		{"IfMsghdr.Msglen", Field, 0, ""},
    +		{"IfMsghdr.Pad1", Field, 2, ""},
    +		{"IfMsghdr.Pad2", Field, 2, ""},
    +		{"IfMsghdr.Pad_cgo_0", Field, 0, ""},
    +		{"IfMsghdr.Pad_cgo_1", Field, 2, ""},
    +		{"IfMsghdr.Tableid", Field, 2, ""},
    +		{"IfMsghdr.Type", Field, 0, ""},
    +		{"IfMsghdr.Version", Field, 0, ""},
    +		{"IfMsghdr.Xflags", Field, 2, ""},
    +		{"IfaMsghdr", Type, 0, ""},
    +		{"IfaMsghdr.Addrs", Field, 0, ""},
    +		{"IfaMsghdr.Flags", Field, 0, ""},
    +		{"IfaMsghdr.Hdrlen", Field, 2, ""},
    +		{"IfaMsghdr.Index", Field, 0, ""},
    +		{"IfaMsghdr.Metric", Field, 0, ""},
    +		{"IfaMsghdr.Msglen", Field, 0, ""},
    +		{"IfaMsghdr.Pad1", Field, 2, ""},
    +		{"IfaMsghdr.Pad2", Field, 2, ""},
    +		{"IfaMsghdr.Pad_cgo_0", Field, 0, ""},
    +		{"IfaMsghdr.Tableid", Field, 2, ""},
    +		{"IfaMsghdr.Type", Field, 0, ""},
    +		{"IfaMsghdr.Version", Field, 0, ""},
    +		{"IfmaMsghdr", Type, 0, ""},
    +		{"IfmaMsghdr.Addrs", Field, 0, ""},
    +		{"IfmaMsghdr.Flags", Field, 0, ""},
    +		{"IfmaMsghdr.Index", Field, 0, ""},
    +		{"IfmaMsghdr.Msglen", Field, 0, ""},
    +		{"IfmaMsghdr.Pad_cgo_0", Field, 0, ""},
    +		{"IfmaMsghdr.Type", Field, 0, ""},
    +		{"IfmaMsghdr.Version", Field, 0, ""},
    +		{"IfmaMsghdr2", Type, 0, ""},
    +		{"IfmaMsghdr2.Addrs", Field, 0, ""},
    +		{"IfmaMsghdr2.Flags", Field, 0, ""},
    +		{"IfmaMsghdr2.Index", Field, 0, ""},
    +		{"IfmaMsghdr2.Msglen", Field, 0, ""},
    +		{"IfmaMsghdr2.Pad_cgo_0", Field, 0, ""},
    +		{"IfmaMsghdr2.Refcount", Field, 0, ""},
    +		{"IfmaMsghdr2.Type", Field, 0, ""},
    +		{"IfmaMsghdr2.Version", Field, 0, ""},
    +		{"ImplementsGetwd", Const, 0, ""},
    +		{"Inet4Pktinfo", Type, 0, ""},
    +		{"Inet4Pktinfo.Addr", Field, 0, ""},
    +		{"Inet4Pktinfo.Ifindex", Field, 0, ""},
    +		{"Inet4Pktinfo.Spec_dst", Field, 0, ""},
    +		{"Inet6Pktinfo", Type, 0, ""},
    +		{"Inet6Pktinfo.Addr", Field, 0, ""},
    +		{"Inet6Pktinfo.Ifindex", Field, 0, ""},
    +		{"InotifyAddWatch", Func, 0, "func(fd int, pathname string, mask uint32) (watchdesc int, err error)"},
    +		{"InotifyEvent", Type, 0, ""},
    +		{"InotifyEvent.Cookie", Field, 0, ""},
    +		{"InotifyEvent.Len", Field, 0, ""},
    +		{"InotifyEvent.Mask", Field, 0, ""},
    +		{"InotifyEvent.Name", Field, 0, ""},
    +		{"InotifyEvent.Wd", Field, 0, ""},
    +		{"InotifyInit", Func, 0, "func() (fd int, err error)"},
    +		{"InotifyInit1", Func, 0, "func(flags int) (fd int, err error)"},
    +		{"InotifyRmWatch", Func, 0, "func(fd int, watchdesc uint32) (success int, err error)"},
    +		{"InterfaceAddrMessage", Type, 0, ""},
    +		{"InterfaceAddrMessage.Data", Field, 0, ""},
    +		{"InterfaceAddrMessage.Header", Field, 0, ""},
    +		{"InterfaceAnnounceMessage", Type, 1, ""},
    +		{"InterfaceAnnounceMessage.Header", Field, 1, ""},
    +		{"InterfaceInfo", Type, 0, ""},
    +		{"InterfaceInfo.Address", Field, 0, ""},
    +		{"InterfaceInfo.BroadcastAddress", Field, 0, ""},
    +		{"InterfaceInfo.Flags", Field, 0, ""},
    +		{"InterfaceInfo.Netmask", Field, 0, ""},
    +		{"InterfaceMessage", Type, 0, ""},
    +		{"InterfaceMessage.Data", Field, 0, ""},
    +		{"InterfaceMessage.Header", Field, 0, ""},
    +		{"InterfaceMulticastAddrMessage", Type, 0, ""},
    +		{"InterfaceMulticastAddrMessage.Data", Field, 0, ""},
    +		{"InterfaceMulticastAddrMessage.Header", Field, 0, ""},
    +		{"InvalidHandle", Const, 0, ""},
    +		{"Ioperm", Func, 0, "func(from int, num int, on int) (err error)"},
    +		{"Iopl", Func, 0, "func(level int) (err error)"},
    +		{"Iovec", Type, 0, ""},
    +		{"Iovec.Base", Field, 0, ""},
    +		{"Iovec.Len", Field, 0, ""},
    +		{"IpAdapterInfo", Type, 0, ""},
    +		{"IpAdapterInfo.AdapterName", Field, 0, ""},
    +		{"IpAdapterInfo.Address", Field, 0, ""},
    +		{"IpAdapterInfo.AddressLength", Field, 0, ""},
    +		{"IpAdapterInfo.ComboIndex", Field, 0, ""},
    +		{"IpAdapterInfo.CurrentIpAddress", Field, 0, ""},
    +		{"IpAdapterInfo.Description", Field, 0, ""},
    +		{"IpAdapterInfo.DhcpEnabled", Field, 0, ""},
    +		{"IpAdapterInfo.DhcpServer", Field, 0, ""},
    +		{"IpAdapterInfo.GatewayList", Field, 0, ""},
    +		{"IpAdapterInfo.HaveWins", Field, 0, ""},
    +		{"IpAdapterInfo.Index", Field, 0, ""},
    +		{"IpAdapterInfo.IpAddressList", Field, 0, ""},
    +		{"IpAdapterInfo.LeaseExpires", Field, 0, ""},
    +		{"IpAdapterInfo.LeaseObtained", Field, 0, ""},
    +		{"IpAdapterInfo.Next", Field, 0, ""},
    +		{"IpAdapterInfo.PrimaryWinsServer", Field, 0, ""},
    +		{"IpAdapterInfo.SecondaryWinsServer", Field, 0, ""},
    +		{"IpAdapterInfo.Type", Field, 0, ""},
    +		{"IpAddrString", Type, 0, ""},
    +		{"IpAddrString.Context", Field, 0, ""},
    +		{"IpAddrString.IpAddress", Field, 0, ""},
    +		{"IpAddrString.IpMask", Field, 0, ""},
    +		{"IpAddrString.Next", Field, 0, ""},
    +		{"IpAddressString", Type, 0, ""},
    +		{"IpAddressString.String", Field, 0, ""},
    +		{"IpMaskString", Type, 0, ""},
    +		{"IpMaskString.String", Field, 2, ""},
    +		{"Issetugid", Func, 0, ""},
    +		{"KEY_ALL_ACCESS", Const, 0, ""},
    +		{"KEY_CREATE_LINK", Const, 0, ""},
    +		{"KEY_CREATE_SUB_KEY", Const, 0, ""},
    +		{"KEY_ENUMERATE_SUB_KEYS", Const, 0, ""},
    +		{"KEY_EXECUTE", Const, 0, ""},
    +		{"KEY_NOTIFY", Const, 0, ""},
    +		{"KEY_QUERY_VALUE", Const, 0, ""},
    +		{"KEY_READ", Const, 0, ""},
    +		{"KEY_SET_VALUE", Const, 0, ""},
    +		{"KEY_WOW64_32KEY", Const, 0, ""},
    +		{"KEY_WOW64_64KEY", Const, 0, ""},
    +		{"KEY_WRITE", Const, 0, ""},
    +		{"Kevent", Func, 0, ""},
    +		{"Kevent_t", Type, 0, ""},
    +		{"Kevent_t.Data", Field, 0, ""},
    +		{"Kevent_t.Fflags", Field, 0, ""},
    +		{"Kevent_t.Filter", Field, 0, ""},
    +		{"Kevent_t.Flags", Field, 0, ""},
    +		{"Kevent_t.Ident", Field, 0, ""},
    +		{"Kevent_t.Pad_cgo_0", Field, 2, ""},
    +		{"Kevent_t.Udata", Field, 0, ""},
    +		{"Kill", Func, 0, "func(pid int, sig Signal) (err error)"},
    +		{"Klogctl", Func, 0, "func(typ int, buf []byte) (n int, err error)"},
    +		{"Kqueue", Func, 0, ""},
    +		{"LANG_ENGLISH", Const, 0, ""},
    +		{"LAYERED_PROTOCOL", Const, 2, ""},
    +		{"LCNT_OVERLOAD_FLUSH", Const, 1, ""},
    +		{"LINUX_REBOOT_CMD_CAD_OFF", Const, 0, ""},
    +		{"LINUX_REBOOT_CMD_CAD_ON", Const, 0, ""},
    +		{"LINUX_REBOOT_CMD_HALT", Const, 0, ""},
    +		{"LINUX_REBOOT_CMD_KEXEC", Const, 0, ""},
    +		{"LINUX_REBOOT_CMD_POWER_OFF", Const, 0, ""},
    +		{"LINUX_REBOOT_CMD_RESTART", Const, 0, ""},
    +		{"LINUX_REBOOT_CMD_RESTART2", Const, 0, ""},
    +		{"LINUX_REBOOT_CMD_SW_SUSPEND", Const, 0, ""},
    +		{"LINUX_REBOOT_MAGIC1", Const, 0, ""},
    +		{"LINUX_REBOOT_MAGIC2", Const, 0, ""},
    +		{"LOCK_EX", Const, 0, ""},
    +		{"LOCK_NB", Const, 0, ""},
    +		{"LOCK_SH", Const, 0, ""},
    +		{"LOCK_UN", Const, 0, ""},
    +		{"LazyDLL", Type, 0, ""},
    +		{"LazyDLL.Name", Field, 0, ""},
    +		{"LazyProc", Type, 0, ""},
    +		{"LazyProc.Name", Field, 0, ""},
    +		{"Lchown", Func, 0, "func(path string, uid int, gid int) (err error)"},
    +		{"Linger", Type, 0, ""},
    +		{"Linger.Linger", Field, 0, ""},
    +		{"Linger.Onoff", Field, 0, ""},
    +		{"Link", Func, 0, "func(oldpath string, newpath string) (err error)"},
    +		{"Listen", Func, 0, "func(s int, n int) (err error)"},
    +		{"Listxattr", Func, 1, "func(path string, dest []byte) (sz int, err error)"},
    +		{"LoadCancelIoEx", Func, 1, ""},
    +		{"LoadConnectEx", Func, 1, ""},
    +		{"LoadCreateSymbolicLink", Func, 4, ""},
    +		{"LoadDLL", Func, 0, ""},
    +		{"LoadGetAddrInfo", Func, 1, ""},
    +		{"LoadLibrary", Func, 0, ""},
    +		{"LoadSetFileCompletionNotificationModes", Func, 2, ""},
    +		{"LocalFree", Func, 0, ""},
    +		{"Log2phys_t", Type, 0, ""},
    +		{"Log2phys_t.Contigbytes", Field, 0, ""},
    +		{"Log2phys_t.Devoffset", Field, 0, ""},
    +		{"Log2phys_t.Flags", Field, 0, ""},
    +		{"LookupAccountName", Func, 0, ""},
    +		{"LookupAccountSid", Func, 0, ""},
    +		{"LookupSID", Func, 0, ""},
    +		{"LsfJump", Func, 0, "func(code int, k int, jt int, jf int) *SockFilter"},
    +		{"LsfSocket", Func, 0, "func(ifindex int, proto int) (int, error)"},
    +		{"LsfStmt", Func, 0, "func(code int, k int) *SockFilter"},
    +		{"Lstat", Func, 0, "func(path string, stat *Stat_t) (err error)"},
    +		{"MADV_AUTOSYNC", Const, 1, ""},
    +		{"MADV_CAN_REUSE", Const, 0, ""},
    +		{"MADV_CORE", Const, 1, ""},
    +		{"MADV_DOFORK", Const, 0, ""},
    +		{"MADV_DONTFORK", Const, 0, ""},
    +		{"MADV_DONTNEED", Const, 0, ""},
    +		{"MADV_FREE", Const, 0, ""},
    +		{"MADV_FREE_REUSABLE", Const, 0, ""},
    +		{"MADV_FREE_REUSE", Const, 0, ""},
    +		{"MADV_HUGEPAGE", Const, 0, ""},
    +		{"MADV_HWPOISON", Const, 0, ""},
    +		{"MADV_MERGEABLE", Const, 0, ""},
    +		{"MADV_NOCORE", Const, 1, ""},
    +		{"MADV_NOHUGEPAGE", Const, 0, ""},
    +		{"MADV_NORMAL", Const, 0, ""},
    +		{"MADV_NOSYNC", Const, 1, ""},
    +		{"MADV_PROTECT", Const, 1, ""},
    +		{"MADV_RANDOM", Const, 0, ""},
    +		{"MADV_REMOVE", Const, 0, ""},
    +		{"MADV_SEQUENTIAL", Const, 0, ""},
    +		{"MADV_SPACEAVAIL", Const, 3, ""},
    +		{"MADV_UNMERGEABLE", Const, 0, ""},
    +		{"MADV_WILLNEED", Const, 0, ""},
    +		{"MADV_ZERO_WIRED_PAGES", Const, 0, ""},
    +		{"MAP_32BIT", Const, 0, ""},
    +		{"MAP_ALIGNED_SUPER", Const, 3, ""},
    +		{"MAP_ALIGNMENT_16MB", Const, 3, ""},
    +		{"MAP_ALIGNMENT_1TB", Const, 3, ""},
    +		{"MAP_ALIGNMENT_256TB", Const, 3, ""},
    +		{"MAP_ALIGNMENT_4GB", Const, 3, ""},
    +		{"MAP_ALIGNMENT_64KB", Const, 3, ""},
    +		{"MAP_ALIGNMENT_64PB", Const, 3, ""},
    +		{"MAP_ALIGNMENT_MASK", Const, 3, ""},
    +		{"MAP_ALIGNMENT_SHIFT", Const, 3, ""},
    +		{"MAP_ANON", Const, 0, ""},
    +		{"MAP_ANONYMOUS", Const, 0, ""},
    +		{"MAP_COPY", Const, 0, ""},
    +		{"MAP_DENYWRITE", Const, 0, ""},
    +		{"MAP_EXECUTABLE", Const, 0, ""},
    +		{"MAP_FILE", Const, 0, ""},
    +		{"MAP_FIXED", Const, 0, ""},
    +		{"MAP_FLAGMASK", Const, 3, ""},
    +		{"MAP_GROWSDOWN", Const, 0, ""},
    +		{"MAP_HASSEMAPHORE", Const, 0, ""},
    +		{"MAP_HUGETLB", Const, 0, ""},
    +		{"MAP_INHERIT", Const, 3, ""},
    +		{"MAP_INHERIT_COPY", Const, 3, ""},
    +		{"MAP_INHERIT_DEFAULT", Const, 3, ""},
    +		{"MAP_INHERIT_DONATE_COPY", Const, 3, ""},
    +		{"MAP_INHERIT_NONE", Const, 3, ""},
    +		{"MAP_INHERIT_SHARE", Const, 3, ""},
    +		{"MAP_JIT", Const, 0, ""},
    +		{"MAP_LOCKED", Const, 0, ""},
    +		{"MAP_NOCACHE", Const, 0, ""},
    +		{"MAP_NOCORE", Const, 1, ""},
    +		{"MAP_NOEXTEND", Const, 0, ""},
    +		{"MAP_NONBLOCK", Const, 0, ""},
    +		{"MAP_NORESERVE", Const, 0, ""},
    +		{"MAP_NOSYNC", Const, 1, ""},
    +		{"MAP_POPULATE", Const, 0, ""},
    +		{"MAP_PREFAULT_READ", Const, 1, ""},
    +		{"MAP_PRIVATE", Const, 0, ""},
    +		{"MAP_RENAME", Const, 0, ""},
    +		{"MAP_RESERVED0080", Const, 0, ""},
    +		{"MAP_RESERVED0100", Const, 1, ""},
    +		{"MAP_SHARED", Const, 0, ""},
    +		{"MAP_STACK", Const, 0, ""},
    +		{"MAP_TRYFIXED", Const, 3, ""},
    +		{"MAP_TYPE", Const, 0, ""},
    +		{"MAP_WIRED", Const, 3, ""},
    +		{"MAXIMUM_REPARSE_DATA_BUFFER_SIZE", Const, 4, ""},
    +		{"MAXLEN_IFDESCR", Const, 0, ""},
    +		{"MAXLEN_PHYSADDR", Const, 0, ""},
    +		{"MAX_ADAPTER_ADDRESS_LENGTH", Const, 0, ""},
    +		{"MAX_ADAPTER_DESCRIPTION_LENGTH", Const, 0, ""},
    +		{"MAX_ADAPTER_NAME_LENGTH", Const, 0, ""},
    +		{"MAX_COMPUTERNAME_LENGTH", Const, 0, ""},
    +		{"MAX_INTERFACE_NAME_LEN", Const, 0, ""},
    +		{"MAX_LONG_PATH", Const, 0, ""},
    +		{"MAX_PATH", Const, 0, ""},
    +		{"MAX_PROTOCOL_CHAIN", Const, 2, ""},
    +		{"MCL_CURRENT", Const, 0, ""},
    +		{"MCL_FUTURE", Const, 0, ""},
    +		{"MNT_DETACH", Const, 0, ""},
    +		{"MNT_EXPIRE", Const, 0, ""},
    +		{"MNT_FORCE", Const, 0, ""},
    +		{"MSG_BCAST", Const, 1, ""},
    +		{"MSG_CMSG_CLOEXEC", Const, 0, ""},
    +		{"MSG_COMPAT", Const, 0, ""},
    +		{"MSG_CONFIRM", Const, 0, ""},
    +		{"MSG_CONTROLMBUF", Const, 1, ""},
    +		{"MSG_CTRUNC", Const, 0, ""},
    +		{"MSG_DONTROUTE", Const, 0, ""},
    +		{"MSG_DONTWAIT", Const, 0, ""},
    +		{"MSG_EOF", Const, 0, ""},
    +		{"MSG_EOR", Const, 0, ""},
    +		{"MSG_ERRQUEUE", Const, 0, ""},
    +		{"MSG_FASTOPEN", Const, 1, ""},
    +		{"MSG_FIN", Const, 0, ""},
    +		{"MSG_FLUSH", Const, 0, ""},
    +		{"MSG_HAVEMORE", Const, 0, ""},
    +		{"MSG_HOLD", Const, 0, ""},
    +		{"MSG_IOVUSRSPACE", Const, 1, ""},
    +		{"MSG_LENUSRSPACE", Const, 1, ""},
    +		{"MSG_MCAST", Const, 1, ""},
    +		{"MSG_MORE", Const, 0, ""},
    +		{"MSG_NAMEMBUF", Const, 1, ""},
    +		{"MSG_NBIO", Const, 0, ""},
    +		{"MSG_NEEDSA", Const, 0, ""},
    +		{"MSG_NOSIGNAL", Const, 0, ""},
    +		{"MSG_NOTIFICATION", Const, 0, ""},
    +		{"MSG_OOB", Const, 0, ""},
    +		{"MSG_PEEK", Const, 0, ""},
    +		{"MSG_PROXY", Const, 0, ""},
    +		{"MSG_RCVMORE", Const, 0, ""},
    +		{"MSG_RST", Const, 0, ""},
    +		{"MSG_SEND", Const, 0, ""},
    +		{"MSG_SYN", Const, 0, ""},
    +		{"MSG_TRUNC", Const, 0, ""},
    +		{"MSG_TRYHARD", Const, 0, ""},
    +		{"MSG_USERFLAGS", Const, 1, ""},
    +		{"MSG_WAITALL", Const, 0, ""},
    +		{"MSG_WAITFORONE", Const, 0, ""},
    +		{"MSG_WAITSTREAM", Const, 0, ""},
    +		{"MS_ACTIVE", Const, 0, ""},
    +		{"MS_ASYNC", Const, 0, ""},
    +		{"MS_BIND", Const, 0, ""},
    +		{"MS_DEACTIVATE", Const, 0, ""},
    +		{"MS_DIRSYNC", Const, 0, ""},
    +		{"MS_INVALIDATE", Const, 0, ""},
    +		{"MS_I_VERSION", Const, 0, ""},
    +		{"MS_KERNMOUNT", Const, 0, ""},
    +		{"MS_KILLPAGES", Const, 0, ""},
    +		{"MS_MANDLOCK", Const, 0, ""},
    +		{"MS_MGC_MSK", Const, 0, ""},
    +		{"MS_MGC_VAL", Const, 0, ""},
    +		{"MS_MOVE", Const, 0, ""},
    +		{"MS_NOATIME", Const, 0, ""},
    +		{"MS_NODEV", Const, 0, ""},
    +		{"MS_NODIRATIME", Const, 0, ""},
    +		{"MS_NOEXEC", Const, 0, ""},
    +		{"MS_NOSUID", Const, 0, ""},
    +		{"MS_NOUSER", Const, 0, ""},
    +		{"MS_POSIXACL", Const, 0, ""},
    +		{"MS_PRIVATE", Const, 0, ""},
    +		{"MS_RDONLY", Const, 0, ""},
    +		{"MS_REC", Const, 0, ""},
    +		{"MS_RELATIME", Const, 0, ""},
    +		{"MS_REMOUNT", Const, 0, ""},
    +		{"MS_RMT_MASK", Const, 0, ""},
    +		{"MS_SHARED", Const, 0, ""},
    +		{"MS_SILENT", Const, 0, ""},
    +		{"MS_SLAVE", Const, 0, ""},
    +		{"MS_STRICTATIME", Const, 0, ""},
    +		{"MS_SYNC", Const, 0, ""},
    +		{"MS_SYNCHRONOUS", Const, 0, ""},
    +		{"MS_UNBINDABLE", Const, 0, ""},
    +		{"Madvise", Func, 0, "func(b []byte, advice int) (err error)"},
    +		{"MapViewOfFile", Func, 0, ""},
    +		{"MaxTokenInfoClass", Const, 0, ""},
    +		{"Mclpool", Type, 2, ""},
    +		{"Mclpool.Alive", Field, 2, ""},
    +		{"Mclpool.Cwm", Field, 2, ""},
    +		{"Mclpool.Grown", Field, 2, ""},
    +		{"Mclpool.Hwm", Field, 2, ""},
    +		{"Mclpool.Lwm", Field, 2, ""},
    +		{"MibIfRow", Type, 0, ""},
    +		{"MibIfRow.AdminStatus", Field, 0, ""},
    +		{"MibIfRow.Descr", Field, 0, ""},
    +		{"MibIfRow.DescrLen", Field, 0, ""},
    +		{"MibIfRow.InDiscards", Field, 0, ""},
    +		{"MibIfRow.InErrors", Field, 0, ""},
    +		{"MibIfRow.InNUcastPkts", Field, 0, ""},
    +		{"MibIfRow.InOctets", Field, 0, ""},
    +		{"MibIfRow.InUcastPkts", Field, 0, ""},
    +		{"MibIfRow.InUnknownProtos", Field, 0, ""},
    +		{"MibIfRow.Index", Field, 0, ""},
    +		{"MibIfRow.LastChange", Field, 0, ""},
    +		{"MibIfRow.Mtu", Field, 0, ""},
    +		{"MibIfRow.Name", Field, 0, ""},
    +		{"MibIfRow.OperStatus", Field, 0, ""},
    +		{"MibIfRow.OutDiscards", Field, 0, ""},
    +		{"MibIfRow.OutErrors", Field, 0, ""},
    +		{"MibIfRow.OutNUcastPkts", Field, 0, ""},
    +		{"MibIfRow.OutOctets", Field, 0, ""},
    +		{"MibIfRow.OutQLen", Field, 0, ""},
    +		{"MibIfRow.OutUcastPkts", Field, 0, ""},
    +		{"MibIfRow.PhysAddr", Field, 0, ""},
    +		{"MibIfRow.PhysAddrLen", Field, 0, ""},
    +		{"MibIfRow.Speed", Field, 0, ""},
    +		{"MibIfRow.Type", Field, 0, ""},
    +		{"Mkdir", Func, 0, "func(path string, mode uint32) (err error)"},
    +		{"Mkdirat", Func, 0, "func(dirfd int, path string, mode uint32) (err error)"},
    +		{"Mkfifo", Func, 0, "func(path string, mode uint32) (err error)"},
    +		{"Mknod", Func, 0, "func(path string, mode uint32, dev int) (err error)"},
    +		{"Mknodat", Func, 0, "func(dirfd int, path string, mode uint32, dev int) (err error)"},
    +		{"Mlock", Func, 0, "func(b []byte) (err error)"},
    +		{"Mlockall", Func, 0, "func(flags int) (err error)"},
    +		{"Mmap", Func, 0, "func(fd int, offset int64, length int, prot int, flags int) (data []byte, err error)"},
    +		{"Mount", Func, 0, "func(source string, target string, fstype string, flags uintptr, data string) (err error)"},
    +		{"MoveFile", Func, 0, ""},
    +		{"Mprotect", Func, 0, "func(b []byte, prot int) (err error)"},
    +		{"Msghdr", Type, 0, ""},
    +		{"Msghdr.Control", Field, 0, ""},
    +		{"Msghdr.Controllen", Field, 0, ""},
    +		{"Msghdr.Flags", Field, 0, ""},
    +		{"Msghdr.Iov", Field, 0, ""},
    +		{"Msghdr.Iovlen", Field, 0, ""},
    +		{"Msghdr.Name", Field, 0, ""},
    +		{"Msghdr.Namelen", Field, 0, ""},
    +		{"Msghdr.Pad_cgo_0", Field, 0, ""},
    +		{"Msghdr.Pad_cgo_1", Field, 0, ""},
    +		{"Munlock", Func, 0, "func(b []byte) (err error)"},
    +		{"Munlockall", Func, 0, "func() (err error)"},
    +		{"Munmap", Func, 0, "func(b []byte) (err error)"},
    +		{"MustLoadDLL", Func, 0, ""},
    +		{"NAME_MAX", Const, 0, ""},
    +		{"NETLINK_ADD_MEMBERSHIP", Const, 0, ""},
    +		{"NETLINK_AUDIT", Const, 0, ""},
    +		{"NETLINK_BROADCAST_ERROR", Const, 0, ""},
    +		{"NETLINK_CONNECTOR", Const, 0, ""},
    +		{"NETLINK_DNRTMSG", Const, 0, ""},
    +		{"NETLINK_DROP_MEMBERSHIP", Const, 0, ""},
    +		{"NETLINK_ECRYPTFS", Const, 0, ""},
    +		{"NETLINK_FIB_LOOKUP", Const, 0, ""},
    +		{"NETLINK_FIREWALL", Const, 0, ""},
    +		{"NETLINK_GENERIC", Const, 0, ""},
    +		{"NETLINK_INET_DIAG", Const, 0, ""},
    +		{"NETLINK_IP6_FW", Const, 0, ""},
    +		{"NETLINK_ISCSI", Const, 0, ""},
    +		{"NETLINK_KOBJECT_UEVENT", Const, 0, ""},
    +		{"NETLINK_NETFILTER", Const, 0, ""},
    +		{"NETLINK_NFLOG", Const, 0, ""},
    +		{"NETLINK_NO_ENOBUFS", Const, 0, ""},
    +		{"NETLINK_PKTINFO", Const, 0, ""},
    +		{"NETLINK_RDMA", Const, 0, ""},
    +		{"NETLINK_ROUTE", Const, 0, ""},
    +		{"NETLINK_SCSITRANSPORT", Const, 0, ""},
    +		{"NETLINK_SELINUX", Const, 0, ""},
    +		{"NETLINK_UNUSED", Const, 0, ""},
    +		{"NETLINK_USERSOCK", Const, 0, ""},
    +		{"NETLINK_XFRM", Const, 0, ""},
    +		{"NET_RT_DUMP", Const, 0, ""},
    +		{"NET_RT_DUMP2", Const, 0, ""},
    +		{"NET_RT_FLAGS", Const, 0, ""},
    +		{"NET_RT_IFLIST", Const, 0, ""},
    +		{"NET_RT_IFLIST2", Const, 0, ""},
    +		{"NET_RT_IFLISTL", Const, 1, ""},
    +		{"NET_RT_IFMALIST", Const, 0, ""},
    +		{"NET_RT_MAXID", Const, 0, ""},
    +		{"NET_RT_OIFLIST", Const, 1, ""},
    +		{"NET_RT_OOIFLIST", Const, 1, ""},
    +		{"NET_RT_STAT", Const, 0, ""},
    +		{"NET_RT_STATS", Const, 1, ""},
    +		{"NET_RT_TABLE", Const, 1, ""},
    +		{"NET_RT_TRASH", Const, 0, ""},
    +		{"NLA_ALIGNTO", Const, 0, ""},
    +		{"NLA_F_NESTED", Const, 0, ""},
    +		{"NLA_F_NET_BYTEORDER", Const, 0, ""},
    +		{"NLA_HDRLEN", Const, 0, ""},
    +		{"NLMSG_ALIGNTO", Const, 0, ""},
    +		{"NLMSG_DONE", Const, 0, ""},
    +		{"NLMSG_ERROR", Const, 0, ""},
    +		{"NLMSG_HDRLEN", Const, 0, ""},
    +		{"NLMSG_MIN_TYPE", Const, 0, ""},
    +		{"NLMSG_NOOP", Const, 0, ""},
    +		{"NLMSG_OVERRUN", Const, 0, ""},
    +		{"NLM_F_ACK", Const, 0, ""},
    +		{"NLM_F_APPEND", Const, 0, ""},
    +		{"NLM_F_ATOMIC", Const, 0, ""},
    +		{"NLM_F_CREATE", Const, 0, ""},
    +		{"NLM_F_DUMP", Const, 0, ""},
    +		{"NLM_F_ECHO", Const, 0, ""},
    +		{"NLM_F_EXCL", Const, 0, ""},
    +		{"NLM_F_MATCH", Const, 0, ""},
    +		{"NLM_F_MULTI", Const, 0, ""},
    +		{"NLM_F_REPLACE", Const, 0, ""},
    +		{"NLM_F_REQUEST", Const, 0, ""},
    +		{"NLM_F_ROOT", Const, 0, ""},
    +		{"NOFLSH", Const, 0, ""},
    +		{"NOTE_ABSOLUTE", Const, 0, ""},
    +		{"NOTE_ATTRIB", Const, 0, ""},
    +		{"NOTE_BACKGROUND", Const, 16, ""},
    +		{"NOTE_CHILD", Const, 0, ""},
    +		{"NOTE_CRITICAL", Const, 16, ""},
    +		{"NOTE_DELETE", Const, 0, ""},
    +		{"NOTE_EOF", Const, 1, ""},
    +		{"NOTE_EXEC", Const, 0, ""},
    +		{"NOTE_EXIT", Const, 0, ""},
    +		{"NOTE_EXITSTATUS", Const, 0, ""},
    +		{"NOTE_EXIT_CSERROR", Const, 16, ""},
    +		{"NOTE_EXIT_DECRYPTFAIL", Const, 16, ""},
    +		{"NOTE_EXIT_DETAIL", Const, 16, ""},
    +		{"NOTE_EXIT_DETAIL_MASK", Const, 16, ""},
    +		{"NOTE_EXIT_MEMORY", Const, 16, ""},
    +		{"NOTE_EXIT_REPARENTED", Const, 16, ""},
    +		{"NOTE_EXTEND", Const, 0, ""},
    +		{"NOTE_FFAND", Const, 0, ""},
    +		{"NOTE_FFCOPY", Const, 0, ""},
    +		{"NOTE_FFCTRLMASK", Const, 0, ""},
    +		{"NOTE_FFLAGSMASK", Const, 0, ""},
    +		{"NOTE_FFNOP", Const, 0, ""},
    +		{"NOTE_FFOR", Const, 0, ""},
    +		{"NOTE_FORK", Const, 0, ""},
    +		{"NOTE_LEEWAY", Const, 16, ""},
    +		{"NOTE_LINK", Const, 0, ""},
    +		{"NOTE_LOWAT", Const, 0, ""},
    +		{"NOTE_NONE", Const, 0, ""},
    +		{"NOTE_NSECONDS", Const, 0, ""},
    +		{"NOTE_PCTRLMASK", Const, 0, ""},
    +		{"NOTE_PDATAMASK", Const, 0, ""},
    +		{"NOTE_REAP", Const, 0, ""},
    +		{"NOTE_RENAME", Const, 0, ""},
    +		{"NOTE_RESOURCEEND", Const, 0, ""},
    +		{"NOTE_REVOKE", Const, 0, ""},
    +		{"NOTE_SECONDS", Const, 0, ""},
    +		{"NOTE_SIGNAL", Const, 0, ""},
    +		{"NOTE_TRACK", Const, 0, ""},
    +		{"NOTE_TRACKERR", Const, 0, ""},
    +		{"NOTE_TRIGGER", Const, 0, ""},
    +		{"NOTE_TRUNCATE", Const, 1, ""},
    +		{"NOTE_USECONDS", Const, 0, ""},
    +		{"NOTE_VM_ERROR", Const, 0, ""},
    +		{"NOTE_VM_PRESSURE", Const, 0, ""},
    +		{"NOTE_VM_PRESSURE_SUDDEN_TERMINATE", Const, 0, ""},
    +		{"NOTE_VM_PRESSURE_TERMINATE", Const, 0, ""},
    +		{"NOTE_WRITE", Const, 0, ""},
    +		{"NameCanonical", Const, 0, ""},
    +		{"NameCanonicalEx", Const, 0, ""},
    +		{"NameDisplay", Const, 0, ""},
    +		{"NameDnsDomain", Const, 0, ""},
    +		{"NameFullyQualifiedDN", Const, 0, ""},
    +		{"NameSamCompatible", Const, 0, ""},
    +		{"NameServicePrincipal", Const, 0, ""},
    +		{"NameUniqueId", Const, 0, ""},
    +		{"NameUnknown", Const, 0, ""},
    +		{"NameUserPrincipal", Const, 0, ""},
    +		{"Nanosleep", Func, 0, "func(time *Timespec, leftover *Timespec) (err error)"},
    +		{"NetApiBufferFree", Func, 0, ""},
    +		{"NetGetJoinInformation", Func, 2, ""},
    +		{"NetSetupDomainName", Const, 2, ""},
    +		{"NetSetupUnjoined", Const, 2, ""},
    +		{"NetSetupUnknownStatus", Const, 2, ""},
    +		{"NetSetupWorkgroupName", Const, 2, ""},
    +		{"NetUserGetInfo", Func, 0, ""},
    +		{"NetlinkMessage", Type, 0, ""},
    +		{"NetlinkMessage.Data", Field, 0, ""},
    +		{"NetlinkMessage.Header", Field, 0, ""},
    +		{"NetlinkRIB", Func, 0, "func(proto int, family int) ([]byte, error)"},
    +		{"NetlinkRouteAttr", Type, 0, ""},
    +		{"NetlinkRouteAttr.Attr", Field, 0, ""},
    +		{"NetlinkRouteAttr.Value", Field, 0, ""},
    +		{"NetlinkRouteRequest", Type, 0, ""},
    +		{"NetlinkRouteRequest.Data", Field, 0, ""},
    +		{"NetlinkRouteRequest.Header", Field, 0, ""},
    +		{"NewCallback", Func, 0, ""},
    +		{"NewCallbackCDecl", Func, 3, ""},
    +		{"NewLazyDLL", Func, 0, ""},
    +		{"NlAttr", Type, 0, ""},
    +		{"NlAttr.Len", Field, 0, ""},
    +		{"NlAttr.Type", Field, 0, ""},
    +		{"NlMsgerr", Type, 0, ""},
    +		{"NlMsgerr.Error", Field, 0, ""},
    +		{"NlMsgerr.Msg", Field, 0, ""},
    +		{"NlMsghdr", Type, 0, ""},
    +		{"NlMsghdr.Flags", Field, 0, ""},
    +		{"NlMsghdr.Len", Field, 0, ""},
    +		{"NlMsghdr.Pid", Field, 0, ""},
    +		{"NlMsghdr.Seq", Field, 0, ""},
    +		{"NlMsghdr.Type", Field, 0, ""},
    +		{"NsecToFiletime", Func, 0, ""},
    +		{"NsecToTimespec", Func, 0, "func(nsec int64) Timespec"},
    +		{"NsecToTimeval", Func, 0, "func(nsec int64) Timeval"},
    +		{"Ntohs", Func, 0, ""},
    +		{"OCRNL", Const, 0, ""},
    +		{"OFDEL", Const, 0, ""},
    +		{"OFILL", Const, 0, ""},
    +		{"OFIOGETBMAP", Const, 1, ""},
    +		{"OID_PKIX_KP_SERVER_AUTH", Var, 0, ""},
    +		{"OID_SERVER_GATED_CRYPTO", Var, 0, ""},
    +		{"OID_SGC_NETSCAPE", Var, 0, ""},
    +		{"OLCUC", Const, 0, ""},
    +		{"ONLCR", Const, 0, ""},
    +		{"ONLRET", Const, 0, ""},
    +		{"ONOCR", Const, 0, ""},
    +		{"ONOEOT", Const, 1, ""},
    +		{"OPEN_ALWAYS", Const, 0, ""},
    +		{"OPEN_EXISTING", Const, 0, ""},
    +		{"OPOST", Const, 0, ""},
    +		{"O_ACCMODE", Const, 0, ""},
    +		{"O_ALERT", Const, 0, ""},
    +		{"O_ALT_IO", Const, 1, ""},
    +		{"O_APPEND", Const, 0, ""},
    +		{"O_ASYNC", Const, 0, ""},
    +		{"O_CLOEXEC", Const, 0, ""},
    +		{"O_CREAT", Const, 0, ""},
    +		{"O_DIRECT", Const, 0, ""},
    +		{"O_DIRECTORY", Const, 0, ""},
    +		{"O_DP_GETRAWENCRYPTED", Const, 16, ""},
    +		{"O_DSYNC", Const, 0, ""},
    +		{"O_EVTONLY", Const, 0, ""},
    +		{"O_EXCL", Const, 0, ""},
    +		{"O_EXEC", Const, 0, ""},
    +		{"O_EXLOCK", Const, 0, ""},
    +		{"O_FSYNC", Const, 0, ""},
    +		{"O_LARGEFILE", Const, 0, ""},
    +		{"O_NDELAY", Const, 0, ""},
    +		{"O_NOATIME", Const, 0, ""},
    +		{"O_NOCTTY", Const, 0, ""},
    +		{"O_NOFOLLOW", Const, 0, ""},
    +		{"O_NONBLOCK", Const, 0, ""},
    +		{"O_NOSIGPIPE", Const, 1, ""},
    +		{"O_POPUP", Const, 0, ""},
    +		{"O_RDONLY", Const, 0, ""},
    +		{"O_RDWR", Const, 0, ""},
    +		{"O_RSYNC", Const, 0, ""},
    +		{"O_SHLOCK", Const, 0, ""},
    +		{"O_SYMLINK", Const, 0, ""},
    +		{"O_SYNC", Const, 0, ""},
    +		{"O_TRUNC", Const, 0, ""},
    +		{"O_TTY_INIT", Const, 0, ""},
    +		{"O_WRONLY", Const, 0, ""},
    +		{"Open", Func, 0, "func(path string, mode int, perm uint32) (fd int, err error)"},
    +		{"OpenCurrentProcessToken", Func, 0, ""},
    +		{"OpenProcess", Func, 0, ""},
    +		{"OpenProcessToken", Func, 0, ""},
    +		{"Openat", Func, 0, "func(dirfd int, path string, flags int, mode uint32) (fd int, err error)"},
    +		{"Overlapped", Type, 0, ""},
    +		{"Overlapped.HEvent", Field, 0, ""},
    +		{"Overlapped.Internal", Field, 0, ""},
    +		{"Overlapped.InternalHigh", Field, 0, ""},
    +		{"Overlapped.Offset", Field, 0, ""},
    +		{"Overlapped.OffsetHigh", Field, 0, ""},
    +		{"PACKET_ADD_MEMBERSHIP", Const, 0, ""},
    +		{"PACKET_BROADCAST", Const, 0, ""},
    +		{"PACKET_DROP_MEMBERSHIP", Const, 0, ""},
    +		{"PACKET_FASTROUTE", Const, 0, ""},
    +		{"PACKET_HOST", Const, 0, ""},
    +		{"PACKET_LOOPBACK", Const, 0, ""},
    +		{"PACKET_MR_ALLMULTI", Const, 0, ""},
    +		{"PACKET_MR_MULTICAST", Const, 0, ""},
    +		{"PACKET_MR_PROMISC", Const, 0, ""},
    +		{"PACKET_MULTICAST", Const, 0, ""},
    +		{"PACKET_OTHERHOST", Const, 0, ""},
    +		{"PACKET_OUTGOING", Const, 0, ""},
    +		{"PACKET_RECV_OUTPUT", Const, 0, ""},
    +		{"PACKET_RX_RING", Const, 0, ""},
    +		{"PACKET_STATISTICS", Const, 0, ""},
    +		{"PAGE_EXECUTE_READ", Const, 0, ""},
    +		{"PAGE_EXECUTE_READWRITE", Const, 0, ""},
    +		{"PAGE_EXECUTE_WRITECOPY", Const, 0, ""},
    +		{"PAGE_READONLY", Const, 0, ""},
    +		{"PAGE_READWRITE", Const, 0, ""},
    +		{"PAGE_WRITECOPY", Const, 0, ""},
    +		{"PARENB", Const, 0, ""},
    +		{"PARMRK", Const, 0, ""},
    +		{"PARODD", Const, 0, ""},
    +		{"PENDIN", Const, 0, ""},
    +		{"PFL_HIDDEN", Const, 2, ""},
    +		{"PFL_MATCHES_PROTOCOL_ZERO", Const, 2, ""},
    +		{"PFL_MULTIPLE_PROTO_ENTRIES", Const, 2, ""},
    +		{"PFL_NETWORKDIRECT_PROVIDER", Const, 2, ""},
    +		{"PFL_RECOMMENDED_PROTO_ENTRY", Const, 2, ""},
    +		{"PF_FLUSH", Const, 1, ""},
    +		{"PKCS_7_ASN_ENCODING", Const, 0, ""},
    +		{"PMC5_PIPELINE_FLUSH", Const, 1, ""},
    +		{"PRIO_PGRP", Const, 2, ""},
    +		{"PRIO_PROCESS", Const, 2, ""},
    +		{"PRIO_USER", Const, 2, ""},
    +		{"PRI_IOFLUSH", Const, 1, ""},
    +		{"PROCESS_QUERY_INFORMATION", Const, 0, ""},
    +		{"PROCESS_TERMINATE", Const, 2, ""},
    +		{"PROT_EXEC", Const, 0, ""},
    +		{"PROT_GROWSDOWN", Const, 0, ""},
    +		{"PROT_GROWSUP", Const, 0, ""},
    +		{"PROT_NONE", Const, 0, ""},
    +		{"PROT_READ", Const, 0, ""},
    +		{"PROT_WRITE", Const, 0, ""},
    +		{"PROV_DH_SCHANNEL", Const, 0, ""},
    +		{"PROV_DSS", Const, 0, ""},
    +		{"PROV_DSS_DH", Const, 0, ""},
    +		{"PROV_EC_ECDSA_FULL", Const, 0, ""},
    +		{"PROV_EC_ECDSA_SIG", Const, 0, ""},
    +		{"PROV_EC_ECNRA_FULL", Const, 0, ""},
    +		{"PROV_EC_ECNRA_SIG", Const, 0, ""},
    +		{"PROV_FORTEZZA", Const, 0, ""},
    +		{"PROV_INTEL_SEC", Const, 0, ""},
    +		{"PROV_MS_EXCHANGE", Const, 0, ""},
    +		{"PROV_REPLACE_OWF", Const, 0, ""},
    +		{"PROV_RNG", Const, 0, ""},
    +		{"PROV_RSA_AES", Const, 0, ""},
    +		{"PROV_RSA_FULL", Const, 0, ""},
    +		{"PROV_RSA_SCHANNEL", Const, 0, ""},
    +		{"PROV_RSA_SIG", Const, 0, ""},
    +		{"PROV_SPYRUS_LYNKS", Const, 0, ""},
    +		{"PROV_SSL", Const, 0, ""},
    +		{"PR_CAPBSET_DROP", Const, 0, ""},
    +		{"PR_CAPBSET_READ", Const, 0, ""},
    +		{"PR_CLEAR_SECCOMP_FILTER", Const, 0, ""},
    +		{"PR_ENDIAN_BIG", Const, 0, ""},
    +		{"PR_ENDIAN_LITTLE", Const, 0, ""},
    +		{"PR_ENDIAN_PPC_LITTLE", Const, 0, ""},
    +		{"PR_FPEMU_NOPRINT", Const, 0, ""},
    +		{"PR_FPEMU_SIGFPE", Const, 0, ""},
    +		{"PR_FP_EXC_ASYNC", Const, 0, ""},
    +		{"PR_FP_EXC_DISABLED", Const, 0, ""},
    +		{"PR_FP_EXC_DIV", Const, 0, ""},
    +		{"PR_FP_EXC_INV", Const, 0, ""},
    +		{"PR_FP_EXC_NONRECOV", Const, 0, ""},
    +		{"PR_FP_EXC_OVF", Const, 0, ""},
    +		{"PR_FP_EXC_PRECISE", Const, 0, ""},
    +		{"PR_FP_EXC_RES", Const, 0, ""},
    +		{"PR_FP_EXC_SW_ENABLE", Const, 0, ""},
    +		{"PR_FP_EXC_UND", Const, 0, ""},
    +		{"PR_GET_DUMPABLE", Const, 0, ""},
    +		{"PR_GET_ENDIAN", Const, 0, ""},
    +		{"PR_GET_FPEMU", Const, 0, ""},
    +		{"PR_GET_FPEXC", Const, 0, ""},
    +		{"PR_GET_KEEPCAPS", Const, 0, ""},
    +		{"PR_GET_NAME", Const, 0, ""},
    +		{"PR_GET_PDEATHSIG", Const, 0, ""},
    +		{"PR_GET_SECCOMP", Const, 0, ""},
    +		{"PR_GET_SECCOMP_FILTER", Const, 0, ""},
    +		{"PR_GET_SECUREBITS", Const, 0, ""},
    +		{"PR_GET_TIMERSLACK", Const, 0, ""},
    +		{"PR_GET_TIMING", Const, 0, ""},
    +		{"PR_GET_TSC", Const, 0, ""},
    +		{"PR_GET_UNALIGN", Const, 0, ""},
    +		{"PR_MCE_KILL", Const, 0, ""},
    +		{"PR_MCE_KILL_CLEAR", Const, 0, ""},
    +		{"PR_MCE_KILL_DEFAULT", Const, 0, ""},
    +		{"PR_MCE_KILL_EARLY", Const, 0, ""},
    +		{"PR_MCE_KILL_GET", Const, 0, ""},
    +		{"PR_MCE_KILL_LATE", Const, 0, ""},
    +		{"PR_MCE_KILL_SET", Const, 0, ""},
    +		{"PR_SECCOMP_FILTER_EVENT", Const, 0, ""},
    +		{"PR_SECCOMP_FILTER_SYSCALL", Const, 0, ""},
    +		{"PR_SET_DUMPABLE", Const, 0, ""},
    +		{"PR_SET_ENDIAN", Const, 0, ""},
    +		{"PR_SET_FPEMU", Const, 0, ""},
    +		{"PR_SET_FPEXC", Const, 0, ""},
    +		{"PR_SET_KEEPCAPS", Const, 0, ""},
    +		{"PR_SET_NAME", Const, 0, ""},
    +		{"PR_SET_PDEATHSIG", Const, 0, ""},
    +		{"PR_SET_PTRACER", Const, 0, ""},
    +		{"PR_SET_SECCOMP", Const, 0, ""},
    +		{"PR_SET_SECCOMP_FILTER", Const, 0, ""},
    +		{"PR_SET_SECUREBITS", Const, 0, ""},
    +		{"PR_SET_TIMERSLACK", Const, 0, ""},
    +		{"PR_SET_TIMING", Const, 0, ""},
    +		{"PR_SET_TSC", Const, 0, ""},
    +		{"PR_SET_UNALIGN", Const, 0, ""},
    +		{"PR_TASK_PERF_EVENTS_DISABLE", Const, 0, ""},
    +		{"PR_TASK_PERF_EVENTS_ENABLE", Const, 0, ""},
    +		{"PR_TIMING_STATISTICAL", Const, 0, ""},
    +		{"PR_TIMING_TIMESTAMP", Const, 0, ""},
    +		{"PR_TSC_ENABLE", Const, 0, ""},
    +		{"PR_TSC_SIGSEGV", Const, 0, ""},
    +		{"PR_UNALIGN_NOPRINT", Const, 0, ""},
    +		{"PR_UNALIGN_SIGBUS", Const, 0, ""},
    +		{"PTRACE_ARCH_PRCTL", Const, 0, ""},
    +		{"PTRACE_ATTACH", Const, 0, ""},
    +		{"PTRACE_CONT", Const, 0, ""},
    +		{"PTRACE_DETACH", Const, 0, ""},
    +		{"PTRACE_EVENT_CLONE", Const, 0, ""},
    +		{"PTRACE_EVENT_EXEC", Const, 0, ""},
    +		{"PTRACE_EVENT_EXIT", Const, 0, ""},
    +		{"PTRACE_EVENT_FORK", Const, 0, ""},
    +		{"PTRACE_EVENT_VFORK", Const, 0, ""},
    +		{"PTRACE_EVENT_VFORK_DONE", Const, 0, ""},
    +		{"PTRACE_GETCRUNCHREGS", Const, 0, ""},
    +		{"PTRACE_GETEVENTMSG", Const, 0, ""},
    +		{"PTRACE_GETFPREGS", Const, 0, ""},
    +		{"PTRACE_GETFPXREGS", Const, 0, ""},
    +		{"PTRACE_GETHBPREGS", Const, 0, ""},
    +		{"PTRACE_GETREGS", Const, 0, ""},
    +		{"PTRACE_GETREGSET", Const, 0, ""},
    +		{"PTRACE_GETSIGINFO", Const, 0, ""},
    +		{"PTRACE_GETVFPREGS", Const, 0, ""},
    +		{"PTRACE_GETWMMXREGS", Const, 0, ""},
    +		{"PTRACE_GET_THREAD_AREA", Const, 0, ""},
    +		{"PTRACE_KILL", Const, 0, ""},
    +		{"PTRACE_OLDSETOPTIONS", Const, 0, ""},
    +		{"PTRACE_O_MASK", Const, 0, ""},
    +		{"PTRACE_O_TRACECLONE", Const, 0, ""},
    +		{"PTRACE_O_TRACEEXEC", Const, 0, ""},
    +		{"PTRACE_O_TRACEEXIT", Const, 0, ""},
    +		{"PTRACE_O_TRACEFORK", Const, 0, ""},
    +		{"PTRACE_O_TRACESYSGOOD", Const, 0, ""},
    +		{"PTRACE_O_TRACEVFORK", Const, 0, ""},
    +		{"PTRACE_O_TRACEVFORKDONE", Const, 0, ""},
    +		{"PTRACE_PEEKDATA", Const, 0, ""},
    +		{"PTRACE_PEEKTEXT", Const, 0, ""},
    +		{"PTRACE_PEEKUSR", Const, 0, ""},
    +		{"PTRACE_POKEDATA", Const, 0, ""},
    +		{"PTRACE_POKETEXT", Const, 0, ""},
    +		{"PTRACE_POKEUSR", Const, 0, ""},
    +		{"PTRACE_SETCRUNCHREGS", Const, 0, ""},
    +		{"PTRACE_SETFPREGS", Const, 0, ""},
    +		{"PTRACE_SETFPXREGS", Const, 0, ""},
    +		{"PTRACE_SETHBPREGS", Const, 0, ""},
    +		{"PTRACE_SETOPTIONS", Const, 0, ""},
    +		{"PTRACE_SETREGS", Const, 0, ""},
    +		{"PTRACE_SETREGSET", Const, 0, ""},
    +		{"PTRACE_SETSIGINFO", Const, 0, ""},
    +		{"PTRACE_SETVFPREGS", Const, 0, ""},
    +		{"PTRACE_SETWMMXREGS", Const, 0, ""},
    +		{"PTRACE_SET_SYSCALL", Const, 0, ""},
    +		{"PTRACE_SET_THREAD_AREA", Const, 0, ""},
    +		{"PTRACE_SINGLEBLOCK", Const, 0, ""},
    +		{"PTRACE_SINGLESTEP", Const, 0, ""},
    +		{"PTRACE_SYSCALL", Const, 0, ""},
    +		{"PTRACE_SYSEMU", Const, 0, ""},
    +		{"PTRACE_SYSEMU_SINGLESTEP", Const, 0, ""},
    +		{"PTRACE_TRACEME", Const, 0, ""},
    +		{"PT_ATTACH", Const, 0, ""},
    +		{"PT_ATTACHEXC", Const, 0, ""},
    +		{"PT_CONTINUE", Const, 0, ""},
    +		{"PT_DATA_ADDR", Const, 0, ""},
    +		{"PT_DENY_ATTACH", Const, 0, ""},
    +		{"PT_DETACH", Const, 0, ""},
    +		{"PT_FIRSTMACH", Const, 0, ""},
    +		{"PT_FORCEQUOTA", Const, 0, ""},
    +		{"PT_KILL", Const, 0, ""},
    +		{"PT_MASK", Const, 1, ""},
    +		{"PT_READ_D", Const, 0, ""},
    +		{"PT_READ_I", Const, 0, ""},
    +		{"PT_READ_U", Const, 0, ""},
    +		{"PT_SIGEXC", Const, 0, ""},
    +		{"PT_STEP", Const, 0, ""},
    +		{"PT_TEXT_ADDR", Const, 0, ""},
    +		{"PT_TEXT_END_ADDR", Const, 0, ""},
    +		{"PT_THUPDATE", Const, 0, ""},
    +		{"PT_TRACE_ME", Const, 0, ""},
    +		{"PT_WRITE_D", Const, 0, ""},
    +		{"PT_WRITE_I", Const, 0, ""},
    +		{"PT_WRITE_U", Const, 0, ""},
    +		{"ParseDirent", Func, 0, "func(buf []byte, max int, names []string) (consumed int, count int, newnames []string)"},
    +		{"ParseNetlinkMessage", Func, 0, "func(b []byte) ([]NetlinkMessage, error)"},
    +		{"ParseNetlinkRouteAttr", Func, 0, "func(m *NetlinkMessage) ([]NetlinkRouteAttr, error)"},
    +		{"ParseRoutingMessage", Func, 0, ""},
    +		{"ParseRoutingSockaddr", Func, 0, ""},
    +		{"ParseSocketControlMessage", Func, 0, "func(b []byte) ([]SocketControlMessage, error)"},
    +		{"ParseUnixCredentials", Func, 0, "func(m *SocketControlMessage) (*Ucred, error)"},
    +		{"ParseUnixRights", Func, 0, "func(m *SocketControlMessage) ([]int, error)"},
    +		{"PathMax", Const, 0, ""},
    +		{"Pathconf", Func, 0, ""},
    +		{"Pause", Func, 0, "func() (err error)"},
    +		{"Pipe", Func, 0, "func(p []int) error"},
    +		{"Pipe2", Func, 1, "func(p []int, flags int) error"},
    +		{"PivotRoot", Func, 0, "func(newroot string, putold string) (err error)"},
    +		{"Pointer", Type, 11, ""},
    +		{"PostQueuedCompletionStatus", Func, 0, ""},
    +		{"Pread", Func, 0, "func(fd int, p []byte, offset int64) (n int, err error)"},
    +		{"Proc", Type, 0, ""},
    +		{"Proc.Dll", Field, 0, ""},
    +		{"Proc.Name", Field, 0, ""},
    +		{"ProcAttr", Type, 0, ""},
    +		{"ProcAttr.Dir", Field, 0, ""},
    +		{"ProcAttr.Env", Field, 0, ""},
    +		{"ProcAttr.Files", Field, 0, ""},
    +		{"ProcAttr.Sys", Field, 0, ""},
    +		{"Process32First", Func, 4, ""},
    +		{"Process32Next", Func, 4, ""},
    +		{"ProcessEntry32", Type, 4, ""},
    +		{"ProcessEntry32.DefaultHeapID", Field, 4, ""},
    +		{"ProcessEntry32.ExeFile", Field, 4, ""},
    +		{"ProcessEntry32.Flags", Field, 4, ""},
    +		{"ProcessEntry32.ModuleID", Field, 4, ""},
    +		{"ProcessEntry32.ParentProcessID", Field, 4, ""},
    +		{"ProcessEntry32.PriClassBase", Field, 4, ""},
    +		{"ProcessEntry32.ProcessID", Field, 4, ""},
    +		{"ProcessEntry32.Size", Field, 4, ""},
    +		{"ProcessEntry32.Threads", Field, 4, ""},
    +		{"ProcessEntry32.Usage", Field, 4, ""},
    +		{"ProcessInformation", Type, 0, ""},
    +		{"ProcessInformation.Process", Field, 0, ""},
    +		{"ProcessInformation.ProcessId", Field, 0, ""},
    +		{"ProcessInformation.Thread", Field, 0, ""},
    +		{"ProcessInformation.ThreadId", Field, 0, ""},
    +		{"Protoent", Type, 0, ""},
    +		{"Protoent.Aliases", Field, 0, ""},
    +		{"Protoent.Name", Field, 0, ""},
    +		{"Protoent.Proto", Field, 0, ""},
    +		{"PtraceAttach", Func, 0, "func(pid int) (err error)"},
    +		{"PtraceCont", Func, 0, "func(pid int, signal int) (err error)"},
    +		{"PtraceDetach", Func, 0, "func(pid int) (err error)"},
    +		{"PtraceGetEventMsg", Func, 0, "func(pid int) (msg uint, err error)"},
    +		{"PtraceGetRegs", Func, 0, "func(pid int, regsout *PtraceRegs) (err error)"},
    +		{"PtracePeekData", Func, 0, "func(pid int, addr uintptr, out []byte) (count int, err error)"},
    +		{"PtracePeekText", Func, 0, "func(pid int, addr uintptr, out []byte) (count int, err error)"},
    +		{"PtracePokeData", Func, 0, "func(pid int, addr uintptr, data []byte) (count int, err error)"},
    +		{"PtracePokeText", Func, 0, "func(pid int, addr uintptr, data []byte) (count int, err error)"},
    +		{"PtraceRegs", Type, 0, ""},
    +		{"PtraceRegs.Cs", Field, 0, ""},
    +		{"PtraceRegs.Ds", Field, 0, ""},
    +		{"PtraceRegs.Eax", Field, 0, ""},
    +		{"PtraceRegs.Ebp", Field, 0, ""},
    +		{"PtraceRegs.Ebx", Field, 0, ""},
    +		{"PtraceRegs.Ecx", Field, 0, ""},
    +		{"PtraceRegs.Edi", Field, 0, ""},
    +		{"PtraceRegs.Edx", Field, 0, ""},
    +		{"PtraceRegs.Eflags", Field, 0, ""},
    +		{"PtraceRegs.Eip", Field, 0, ""},
    +		{"PtraceRegs.Es", Field, 0, ""},
    +		{"PtraceRegs.Esi", Field, 0, ""},
    +		{"PtraceRegs.Esp", Field, 0, ""},
    +		{"PtraceRegs.Fs", Field, 0, ""},
    +		{"PtraceRegs.Fs_base", Field, 0, ""},
    +		{"PtraceRegs.Gs", Field, 0, ""},
    +		{"PtraceRegs.Gs_base", Field, 0, ""},
    +		{"PtraceRegs.Orig_eax", Field, 0, ""},
    +		{"PtraceRegs.Orig_rax", Field, 0, ""},
    +		{"PtraceRegs.R10", Field, 0, ""},
    +		{"PtraceRegs.R11", Field, 0, ""},
    +		{"PtraceRegs.R12", Field, 0, ""},
    +		{"PtraceRegs.R13", Field, 0, ""},
    +		{"PtraceRegs.R14", Field, 0, ""},
    +		{"PtraceRegs.R15", Field, 0, ""},
    +		{"PtraceRegs.R8", Field, 0, ""},
    +		{"PtraceRegs.R9", Field, 0, ""},
    +		{"PtraceRegs.Rax", Field, 0, ""},
    +		{"PtraceRegs.Rbp", Field, 0, ""},
    +		{"PtraceRegs.Rbx", Field, 0, ""},
    +		{"PtraceRegs.Rcx", Field, 0, ""},
    +		{"PtraceRegs.Rdi", Field, 0, ""},
    +		{"PtraceRegs.Rdx", Field, 0, ""},
    +		{"PtraceRegs.Rip", Field, 0, ""},
    +		{"PtraceRegs.Rsi", Field, 0, ""},
    +		{"PtraceRegs.Rsp", Field, 0, ""},
    +		{"PtraceRegs.Ss", Field, 0, ""},
    +		{"PtraceRegs.Uregs", Field, 0, ""},
    +		{"PtraceRegs.Xcs", Field, 0, ""},
    +		{"PtraceRegs.Xds", Field, 0, ""},
    +		{"PtraceRegs.Xes", Field, 0, ""},
    +		{"PtraceRegs.Xfs", Field, 0, ""},
    +		{"PtraceRegs.Xgs", Field, 0, ""},
    +		{"PtraceRegs.Xss", Field, 0, ""},
    +		{"PtraceSetOptions", Func, 0, "func(pid int, options int) (err error)"},
    +		{"PtraceSetRegs", Func, 0, "func(pid int, regs *PtraceRegs) (err error)"},
    +		{"PtraceSingleStep", Func, 0, "func(pid int) (err error)"},
    +		{"PtraceSyscall", Func, 1, "func(pid int, signal int) (err error)"},
    +		{"Pwrite", Func, 0, "func(fd int, p []byte, offset int64) (n int, err error)"},
    +		{"REG_BINARY", Const, 0, ""},
    +		{"REG_DWORD", Const, 0, ""},
    +		{"REG_DWORD_BIG_ENDIAN", Const, 0, ""},
    +		{"REG_DWORD_LITTLE_ENDIAN", Const, 0, ""},
    +		{"REG_EXPAND_SZ", Const, 0, ""},
    +		{"REG_FULL_RESOURCE_DESCRIPTOR", Const, 0, ""},
    +		{"REG_LINK", Const, 0, ""},
    +		{"REG_MULTI_SZ", Const, 0, ""},
    +		{"REG_NONE", Const, 0, ""},
    +		{"REG_QWORD", Const, 0, ""},
    +		{"REG_QWORD_LITTLE_ENDIAN", Const, 0, ""},
    +		{"REG_RESOURCE_LIST", Const, 0, ""},
    +		{"REG_RESOURCE_REQUIREMENTS_LIST", Const, 0, ""},
    +		{"REG_SZ", Const, 0, ""},
    +		{"RLIMIT_AS", Const, 0, ""},
    +		{"RLIMIT_CORE", Const, 0, ""},
    +		{"RLIMIT_CPU", Const, 0, ""},
    +		{"RLIMIT_CPU_USAGE_MONITOR", Const, 16, ""},
    +		{"RLIMIT_DATA", Const, 0, ""},
    +		{"RLIMIT_FSIZE", Const, 0, ""},
    +		{"RLIMIT_NOFILE", Const, 0, ""},
    +		{"RLIMIT_STACK", Const, 0, ""},
    +		{"RLIM_INFINITY", Const, 0, ""},
    +		{"RTAX_ADVMSS", Const, 0, ""},
    +		{"RTAX_AUTHOR", Const, 0, ""},
    +		{"RTAX_BRD", Const, 0, ""},
    +		{"RTAX_CWND", Const, 0, ""},
    +		{"RTAX_DST", Const, 0, ""},
    +		{"RTAX_FEATURES", Const, 0, ""},
    +		{"RTAX_FEATURE_ALLFRAG", Const, 0, ""},
    +		{"RTAX_FEATURE_ECN", Const, 0, ""},
    +		{"RTAX_FEATURE_SACK", Const, 0, ""},
    +		{"RTAX_FEATURE_TIMESTAMP", Const, 0, ""},
    +		{"RTAX_GATEWAY", Const, 0, ""},
    +		{"RTAX_GENMASK", Const, 0, ""},
    +		{"RTAX_HOPLIMIT", Const, 0, ""},
    +		{"RTAX_IFA", Const, 0, ""},
    +		{"RTAX_IFP", Const, 0, ""},
    +		{"RTAX_INITCWND", Const, 0, ""},
    +		{"RTAX_INITRWND", Const, 0, ""},
    +		{"RTAX_LABEL", Const, 1, ""},
    +		{"RTAX_LOCK", Const, 0, ""},
    +		{"RTAX_MAX", Const, 0, ""},
    +		{"RTAX_MTU", Const, 0, ""},
    +		{"RTAX_NETMASK", Const, 0, ""},
    +		{"RTAX_REORDERING", Const, 0, ""},
    +		{"RTAX_RTO_MIN", Const, 0, ""},
    +		{"RTAX_RTT", Const, 0, ""},
    +		{"RTAX_RTTVAR", Const, 0, ""},
    +		{"RTAX_SRC", Const, 1, ""},
    +		{"RTAX_SRCMASK", Const, 1, ""},
    +		{"RTAX_SSTHRESH", Const, 0, ""},
    +		{"RTAX_TAG", Const, 1, ""},
    +		{"RTAX_UNSPEC", Const, 0, ""},
    +		{"RTAX_WINDOW", Const, 0, ""},
    +		{"RTA_ALIGNTO", Const, 0, ""},
    +		{"RTA_AUTHOR", Const, 0, ""},
    +		{"RTA_BRD", Const, 0, ""},
    +		{"RTA_CACHEINFO", Const, 0, ""},
    +		{"RTA_DST", Const, 0, ""},
    +		{"RTA_FLOW", Const, 0, ""},
    +		{"RTA_GATEWAY", Const, 0, ""},
    +		{"RTA_GENMASK", Const, 0, ""},
    +		{"RTA_IFA", Const, 0, ""},
    +		{"RTA_IFP", Const, 0, ""},
    +		{"RTA_IIF", Const, 0, ""},
    +		{"RTA_LABEL", Const, 1, ""},
    +		{"RTA_MAX", Const, 0, ""},
    +		{"RTA_METRICS", Const, 0, ""},
    +		{"RTA_MULTIPATH", Const, 0, ""},
    +		{"RTA_NETMASK", Const, 0, ""},
    +		{"RTA_OIF", Const, 0, ""},
    +		{"RTA_PREFSRC", Const, 0, ""},
    +		{"RTA_PRIORITY", Const, 0, ""},
    +		{"RTA_SRC", Const, 0, ""},
    +		{"RTA_SRCMASK", Const, 1, ""},
    +		{"RTA_TABLE", Const, 0, ""},
    +		{"RTA_TAG", Const, 1, ""},
    +		{"RTA_UNSPEC", Const, 0, ""},
    +		{"RTCF_DIRECTSRC", Const, 0, ""},
    +		{"RTCF_DOREDIRECT", Const, 0, ""},
    +		{"RTCF_LOG", Const, 0, ""},
    +		{"RTCF_MASQ", Const, 0, ""},
    +		{"RTCF_NAT", Const, 0, ""},
    +		{"RTCF_VALVE", Const, 0, ""},
    +		{"RTF_ADDRCLASSMASK", Const, 0, ""},
    +		{"RTF_ADDRCONF", Const, 0, ""},
    +		{"RTF_ALLONLINK", Const, 0, ""},
    +		{"RTF_ANNOUNCE", Const, 1, ""},
    +		{"RTF_BLACKHOLE", Const, 0, ""},
    +		{"RTF_BROADCAST", Const, 0, ""},
    +		{"RTF_CACHE", Const, 0, ""},
    +		{"RTF_CLONED", Const, 1, ""},
    +		{"RTF_CLONING", Const, 0, ""},
    +		{"RTF_CONDEMNED", Const, 0, ""},
    +		{"RTF_DEFAULT", Const, 0, ""},
    +		{"RTF_DELCLONE", Const, 0, ""},
    +		{"RTF_DONE", Const, 0, ""},
    +		{"RTF_DYNAMIC", Const, 0, ""},
    +		{"RTF_FLOW", Const, 0, ""},
    +		{"RTF_FMASK", Const, 0, ""},
    +		{"RTF_GATEWAY", Const, 0, ""},
    +		{"RTF_GWFLAG_COMPAT", Const, 3, ""},
    +		{"RTF_HOST", Const, 0, ""},
    +		{"RTF_IFREF", Const, 0, ""},
    +		{"RTF_IFSCOPE", Const, 0, ""},
    +		{"RTF_INTERFACE", Const, 0, ""},
    +		{"RTF_IRTT", Const, 0, ""},
    +		{"RTF_LINKRT", Const, 0, ""},
    +		{"RTF_LLDATA", Const, 0, ""},
    +		{"RTF_LLINFO", Const, 0, ""},
    +		{"RTF_LOCAL", Const, 0, ""},
    +		{"RTF_MASK", Const, 1, ""},
    +		{"RTF_MODIFIED", Const, 0, ""},
    +		{"RTF_MPATH", Const, 1, ""},
    +		{"RTF_MPLS", Const, 1, ""},
    +		{"RTF_MSS", Const, 0, ""},
    +		{"RTF_MTU", Const, 0, ""},
    +		{"RTF_MULTICAST", Const, 0, ""},
    +		{"RTF_NAT", Const, 0, ""},
    +		{"RTF_NOFORWARD", Const, 0, ""},
    +		{"RTF_NONEXTHOP", Const, 0, ""},
    +		{"RTF_NOPMTUDISC", Const, 0, ""},
    +		{"RTF_PERMANENT_ARP", Const, 1, ""},
    +		{"RTF_PINNED", Const, 0, ""},
    +		{"RTF_POLICY", Const, 0, ""},
    +		{"RTF_PRCLONING", Const, 0, ""},
    +		{"RTF_PROTO1", Const, 0, ""},
    +		{"RTF_PROTO2", Const, 0, ""},
    +		{"RTF_PROTO3", Const, 0, ""},
    +		{"RTF_PROXY", Const, 16, ""},
    +		{"RTF_REINSTATE", Const, 0, ""},
    +		{"RTF_REJECT", Const, 0, ""},
    +		{"RTF_RNH_LOCKED", Const, 0, ""},
    +		{"RTF_ROUTER", Const, 16, ""},
    +		{"RTF_SOURCE", Const, 1, ""},
    +		{"RTF_SRC", Const, 1, ""},
    +		{"RTF_STATIC", Const, 0, ""},
    +		{"RTF_STICKY", Const, 0, ""},
    +		{"RTF_THROW", Const, 0, ""},
    +		{"RTF_TUNNEL", Const, 1, ""},
    +		{"RTF_UP", Const, 0, ""},
    +		{"RTF_USETRAILERS", Const, 1, ""},
    +		{"RTF_WASCLONED", Const, 0, ""},
    +		{"RTF_WINDOW", Const, 0, ""},
    +		{"RTF_XRESOLVE", Const, 0, ""},
    +		{"RTM_ADD", Const, 0, ""},
    +		{"RTM_BASE", Const, 0, ""},
    +		{"RTM_CHANGE", Const, 0, ""},
    +		{"RTM_CHGADDR", Const, 1, ""},
    +		{"RTM_DELACTION", Const, 0, ""},
    +		{"RTM_DELADDR", Const, 0, ""},
    +		{"RTM_DELADDRLABEL", Const, 0, ""},
    +		{"RTM_DELETE", Const, 0, ""},
    +		{"RTM_DELLINK", Const, 0, ""},
    +		{"RTM_DELMADDR", Const, 0, ""},
    +		{"RTM_DELNEIGH", Const, 0, ""},
    +		{"RTM_DELQDISC", Const, 0, ""},
    +		{"RTM_DELROUTE", Const, 0, ""},
    +		{"RTM_DELRULE", Const, 0, ""},
    +		{"RTM_DELTCLASS", Const, 0, ""},
    +		{"RTM_DELTFILTER", Const, 0, ""},
    +		{"RTM_DESYNC", Const, 1, ""},
    +		{"RTM_F_CLONED", Const, 0, ""},
    +		{"RTM_F_EQUALIZE", Const, 0, ""},
    +		{"RTM_F_NOTIFY", Const, 0, ""},
    +		{"RTM_F_PREFIX", Const, 0, ""},
    +		{"RTM_GET", Const, 0, ""},
    +		{"RTM_GET2", Const, 0, ""},
    +		{"RTM_GETACTION", Const, 0, ""},
    +		{"RTM_GETADDR", Const, 0, ""},
    +		{"RTM_GETADDRLABEL", Const, 0, ""},
    +		{"RTM_GETANYCAST", Const, 0, ""},
    +		{"RTM_GETDCB", Const, 0, ""},
    +		{"RTM_GETLINK", Const, 0, ""},
    +		{"RTM_GETMULTICAST", Const, 0, ""},
    +		{"RTM_GETNEIGH", Const, 0, ""},
    +		{"RTM_GETNEIGHTBL", Const, 0, ""},
    +		{"RTM_GETQDISC", Const, 0, ""},
    +		{"RTM_GETROUTE", Const, 0, ""},
    +		{"RTM_GETRULE", Const, 0, ""},
    +		{"RTM_GETTCLASS", Const, 0, ""},
    +		{"RTM_GETTFILTER", Const, 0, ""},
    +		{"RTM_IEEE80211", Const, 0, ""},
    +		{"RTM_IFANNOUNCE", Const, 0, ""},
    +		{"RTM_IFINFO", Const, 0, ""},
    +		{"RTM_IFINFO2", Const, 0, ""},
    +		{"RTM_LLINFO_UPD", Const, 1, ""},
    +		{"RTM_LOCK", Const, 0, ""},
    +		{"RTM_LOSING", Const, 0, ""},
    +		{"RTM_MAX", Const, 0, ""},
    +		{"RTM_MAXSIZE", Const, 1, ""},
    +		{"RTM_MISS", Const, 0, ""},
    +		{"RTM_NEWACTION", Const, 0, ""},
    +		{"RTM_NEWADDR", Const, 0, ""},
    +		{"RTM_NEWADDRLABEL", Const, 0, ""},
    +		{"RTM_NEWLINK", Const, 0, ""},
    +		{"RTM_NEWMADDR", Const, 0, ""},
    +		{"RTM_NEWMADDR2", Const, 0, ""},
    +		{"RTM_NEWNDUSEROPT", Const, 0, ""},
    +		{"RTM_NEWNEIGH", Const, 0, ""},
    +		{"RTM_NEWNEIGHTBL", Const, 0, ""},
    +		{"RTM_NEWPREFIX", Const, 0, ""},
    +		{"RTM_NEWQDISC", Const, 0, ""},
    +		{"RTM_NEWROUTE", Const, 0, ""},
    +		{"RTM_NEWRULE", Const, 0, ""},
    +		{"RTM_NEWTCLASS", Const, 0, ""},
    +		{"RTM_NEWTFILTER", Const, 0, ""},
    +		{"RTM_NR_FAMILIES", Const, 0, ""},
    +		{"RTM_NR_MSGTYPES", Const, 0, ""},
    +		{"RTM_OIFINFO", Const, 1, ""},
    +		{"RTM_OLDADD", Const, 0, ""},
    +		{"RTM_OLDDEL", Const, 0, ""},
    +		{"RTM_OOIFINFO", Const, 1, ""},
    +		{"RTM_REDIRECT", Const, 0, ""},
    +		{"RTM_RESOLVE", Const, 0, ""},
    +		{"RTM_RTTUNIT", Const, 0, ""},
    +		{"RTM_SETDCB", Const, 0, ""},
    +		{"RTM_SETGATE", Const, 1, ""},
    +		{"RTM_SETLINK", Const, 0, ""},
    +		{"RTM_SETNEIGHTBL", Const, 0, ""},
    +		{"RTM_VERSION", Const, 0, ""},
    +		{"RTNH_ALIGNTO", Const, 0, ""},
    +		{"RTNH_F_DEAD", Const, 0, ""},
    +		{"RTNH_F_ONLINK", Const, 0, ""},
    +		{"RTNH_F_PERVASIVE", Const, 0, ""},
    +		{"RTNLGRP_IPV4_IFADDR", Const, 1, ""},
    +		{"RTNLGRP_IPV4_MROUTE", Const, 1, ""},
    +		{"RTNLGRP_IPV4_ROUTE", Const, 1, ""},
    +		{"RTNLGRP_IPV4_RULE", Const, 1, ""},
    +		{"RTNLGRP_IPV6_IFADDR", Const, 1, ""},
    +		{"RTNLGRP_IPV6_IFINFO", Const, 1, ""},
    +		{"RTNLGRP_IPV6_MROUTE", Const, 1, ""},
    +		{"RTNLGRP_IPV6_PREFIX", Const, 1, ""},
    +		{"RTNLGRP_IPV6_ROUTE", Const, 1, ""},
    +		{"RTNLGRP_IPV6_RULE", Const, 1, ""},
    +		{"RTNLGRP_LINK", Const, 1, ""},
    +		{"RTNLGRP_ND_USEROPT", Const, 1, ""},
    +		{"RTNLGRP_NEIGH", Const, 1, ""},
    +		{"RTNLGRP_NONE", Const, 1, ""},
    +		{"RTNLGRP_NOTIFY", Const, 1, ""},
    +		{"RTNLGRP_TC", Const, 1, ""},
    +		{"RTN_ANYCAST", Const, 0, ""},
    +		{"RTN_BLACKHOLE", Const, 0, ""},
    +		{"RTN_BROADCAST", Const, 0, ""},
    +		{"RTN_LOCAL", Const, 0, ""},
    +		{"RTN_MAX", Const, 0, ""},
    +		{"RTN_MULTICAST", Const, 0, ""},
    +		{"RTN_NAT", Const, 0, ""},
    +		{"RTN_PROHIBIT", Const, 0, ""},
    +		{"RTN_THROW", Const, 0, ""},
    +		{"RTN_UNICAST", Const, 0, ""},
    +		{"RTN_UNREACHABLE", Const, 0, ""},
    +		{"RTN_UNSPEC", Const, 0, ""},
    +		{"RTN_XRESOLVE", Const, 0, ""},
    +		{"RTPROT_BIRD", Const, 0, ""},
    +		{"RTPROT_BOOT", Const, 0, ""},
    +		{"RTPROT_DHCP", Const, 0, ""},
    +		{"RTPROT_DNROUTED", Const, 0, ""},
    +		{"RTPROT_GATED", Const, 0, ""},
    +		{"RTPROT_KERNEL", Const, 0, ""},
    +		{"RTPROT_MRT", Const, 0, ""},
    +		{"RTPROT_NTK", Const, 0, ""},
    +		{"RTPROT_RA", Const, 0, ""},
    +		{"RTPROT_REDIRECT", Const, 0, ""},
    +		{"RTPROT_STATIC", Const, 0, ""},
    +		{"RTPROT_UNSPEC", Const, 0, ""},
    +		{"RTPROT_XORP", Const, 0, ""},
    +		{"RTPROT_ZEBRA", Const, 0, ""},
    +		{"RTV_EXPIRE", Const, 0, ""},
    +		{"RTV_HOPCOUNT", Const, 0, ""},
    +		{"RTV_MTU", Const, 0, ""},
    +		{"RTV_RPIPE", Const, 0, ""},
    +		{"RTV_RTT", Const, 0, ""},
    +		{"RTV_RTTVAR", Const, 0, ""},
    +		{"RTV_SPIPE", Const, 0, ""},
    +		{"RTV_SSTHRESH", Const, 0, ""},
    +		{"RTV_WEIGHT", Const, 0, ""},
    +		{"RT_CACHING_CONTEXT", Const, 1, ""},
    +		{"RT_CLASS_DEFAULT", Const, 0, ""},
    +		{"RT_CLASS_LOCAL", Const, 0, ""},
    +		{"RT_CLASS_MAIN", Const, 0, ""},
    +		{"RT_CLASS_MAX", Const, 0, ""},
    +		{"RT_CLASS_UNSPEC", Const, 0, ""},
    +		{"RT_DEFAULT_FIB", Const, 1, ""},
    +		{"RT_NORTREF", Const, 1, ""},
    +		{"RT_SCOPE_HOST", Const, 0, ""},
    +		{"RT_SCOPE_LINK", Const, 0, ""},
    +		{"RT_SCOPE_NOWHERE", Const, 0, ""},
    +		{"RT_SCOPE_SITE", Const, 0, ""},
    +		{"RT_SCOPE_UNIVERSE", Const, 0, ""},
    +		{"RT_TABLEID_MAX", Const, 1, ""},
    +		{"RT_TABLE_COMPAT", Const, 0, ""},
    +		{"RT_TABLE_DEFAULT", Const, 0, ""},
    +		{"RT_TABLE_LOCAL", Const, 0, ""},
    +		{"RT_TABLE_MAIN", Const, 0, ""},
    +		{"RT_TABLE_MAX", Const, 0, ""},
    +		{"RT_TABLE_UNSPEC", Const, 0, ""},
    +		{"RUSAGE_CHILDREN", Const, 0, ""},
    +		{"RUSAGE_SELF", Const, 0, ""},
    +		{"RUSAGE_THREAD", Const, 0, ""},
    +		{"Radvisory_t", Type, 0, ""},
    +		{"Radvisory_t.Count", Field, 0, ""},
    +		{"Radvisory_t.Offset", Field, 0, ""},
    +		{"Radvisory_t.Pad_cgo_0", Field, 0, ""},
    +		{"RawConn", Type, 9, ""},
    +		{"RawSockaddr", Type, 0, ""},
    +		{"RawSockaddr.Data", Field, 0, ""},
    +		{"RawSockaddr.Family", Field, 0, ""},
    +		{"RawSockaddr.Len", Field, 0, ""},
    +		{"RawSockaddrAny", Type, 0, ""},
    +		{"RawSockaddrAny.Addr", Field, 0, ""},
    +		{"RawSockaddrAny.Pad", Field, 0, ""},
    +		{"RawSockaddrDatalink", Type, 0, ""},
    +		{"RawSockaddrDatalink.Alen", Field, 0, ""},
    +		{"RawSockaddrDatalink.Data", Field, 0, ""},
    +		{"RawSockaddrDatalink.Family", Field, 0, ""},
    +		{"RawSockaddrDatalink.Index", Field, 0, ""},
    +		{"RawSockaddrDatalink.Len", Field, 0, ""},
    +		{"RawSockaddrDatalink.Nlen", Field, 0, ""},
    +		{"RawSockaddrDatalink.Pad_cgo_0", Field, 2, ""},
    +		{"RawSockaddrDatalink.Slen", Field, 0, ""},
    +		{"RawSockaddrDatalink.Type", Field, 0, ""},
    +		{"RawSockaddrInet4", Type, 0, ""},
    +		{"RawSockaddrInet4.Addr", Field, 0, ""},
    +		{"RawSockaddrInet4.Family", Field, 0, ""},
    +		{"RawSockaddrInet4.Len", Field, 0, ""},
    +		{"RawSockaddrInet4.Port", Field, 0, ""},
    +		{"RawSockaddrInet4.Zero", Field, 0, ""},
    +		{"RawSockaddrInet6", Type, 0, ""},
    +		{"RawSockaddrInet6.Addr", Field, 0, ""},
    +		{"RawSockaddrInet6.Family", Field, 0, ""},
    +		{"RawSockaddrInet6.Flowinfo", Field, 0, ""},
    +		{"RawSockaddrInet6.Len", Field, 0, ""},
    +		{"RawSockaddrInet6.Port", Field, 0, ""},
    +		{"RawSockaddrInet6.Scope_id", Field, 0, ""},
    +		{"RawSockaddrLinklayer", Type, 0, ""},
    +		{"RawSockaddrLinklayer.Addr", Field, 0, ""},
    +		{"RawSockaddrLinklayer.Family", Field, 0, ""},
    +		{"RawSockaddrLinklayer.Halen", Field, 0, ""},
    +		{"RawSockaddrLinklayer.Hatype", Field, 0, ""},
    +		{"RawSockaddrLinklayer.Ifindex", Field, 0, ""},
    +		{"RawSockaddrLinklayer.Pkttype", Field, 0, ""},
    +		{"RawSockaddrLinklayer.Protocol", Field, 0, ""},
    +		{"RawSockaddrNetlink", Type, 0, ""},
    +		{"RawSockaddrNetlink.Family", Field, 0, ""},
    +		{"RawSockaddrNetlink.Groups", Field, 0, ""},
    +		{"RawSockaddrNetlink.Pad", Field, 0, ""},
    +		{"RawSockaddrNetlink.Pid", Field, 0, ""},
    +		{"RawSockaddrUnix", Type, 0, ""},
    +		{"RawSockaddrUnix.Family", Field, 0, ""},
    +		{"RawSockaddrUnix.Len", Field, 0, ""},
    +		{"RawSockaddrUnix.Pad_cgo_0", Field, 2, ""},
    +		{"RawSockaddrUnix.Path", Field, 0, ""},
    +		{"RawSyscall", Func, 0, "func(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err Errno)"},
    +		{"RawSyscall6", Func, 0, "func(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err Errno)"},
    +		{"Read", Func, 0, "func(fd int, p []byte) (n int, err error)"},
    +		{"ReadConsole", Func, 1, ""},
    +		{"ReadDirectoryChanges", Func, 0, ""},
    +		{"ReadDirent", Func, 0, "func(fd int, buf []byte) (n int, err error)"},
    +		{"ReadFile", Func, 0, ""},
    +		{"Readlink", Func, 0, "func(path string, buf []byte) (n int, err error)"},
    +		{"Reboot", Func, 0, "func(cmd int) (err error)"},
    +		{"Recvfrom", Func, 0, "func(fd int, p []byte, flags int) (n int, from Sockaddr, err error)"},
    +		{"Recvmsg", Func, 0, "func(fd int, p []byte, oob []byte, flags int) (n int, oobn int, recvflags int, from Sockaddr, err error)"},
    +		{"RegCloseKey", Func, 0, ""},
    +		{"RegEnumKeyEx", Func, 0, ""},
    +		{"RegOpenKeyEx", Func, 0, ""},
    +		{"RegQueryInfoKey", Func, 0, ""},
    +		{"RegQueryValueEx", Func, 0, ""},
    +		{"RemoveDirectory", Func, 0, ""},
    +		{"Removexattr", Func, 1, "func(path string, attr string) (err error)"},
    +		{"Rename", Func, 0, "func(oldpath string, newpath string) (err error)"},
    +		{"Renameat", Func, 0, "func(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)"},
    +		{"Revoke", Func, 0, ""},
    +		{"Rlimit", Type, 0, ""},
    +		{"Rlimit.Cur", Field, 0, ""},
    +		{"Rlimit.Max", Field, 0, ""},
    +		{"Rmdir", Func, 0, "func(path string) error"},
    +		{"RouteMessage", Type, 0, ""},
    +		{"RouteMessage.Data", Field, 0, ""},
    +		{"RouteMessage.Header", Field, 0, ""},
    +		{"RouteRIB", Func, 0, ""},
    +		{"RoutingMessage", Type, 0, ""},
    +		{"RtAttr", Type, 0, ""},
    +		{"RtAttr.Len", Field, 0, ""},
    +		{"RtAttr.Type", Field, 0, ""},
    +		{"RtGenmsg", Type, 0, ""},
    +		{"RtGenmsg.Family", Field, 0, ""},
    +		{"RtMetrics", Type, 0, ""},
    +		{"RtMetrics.Expire", Field, 0, ""},
    +		{"RtMetrics.Filler", Field, 0, ""},
    +		{"RtMetrics.Hopcount", Field, 0, ""},
    +		{"RtMetrics.Locks", Field, 0, ""},
    +		{"RtMetrics.Mtu", Field, 0, ""},
    +		{"RtMetrics.Pad", Field, 3, ""},
    +		{"RtMetrics.Pksent", Field, 0, ""},
    +		{"RtMetrics.Recvpipe", Field, 0, ""},
    +		{"RtMetrics.Refcnt", Field, 2, ""},
    +		{"RtMetrics.Rtt", Field, 0, ""},
    +		{"RtMetrics.Rttvar", Field, 0, ""},
    +		{"RtMetrics.Sendpipe", Field, 0, ""},
    +		{"RtMetrics.Ssthresh", Field, 0, ""},
    +		{"RtMetrics.Weight", Field, 0, ""},
    +		{"RtMsg", Type, 0, ""},
    +		{"RtMsg.Dst_len", Field, 0, ""},
    +		{"RtMsg.Family", Field, 0, ""},
    +		{"RtMsg.Flags", Field, 0, ""},
    +		{"RtMsg.Protocol", Field, 0, ""},
    +		{"RtMsg.Scope", Field, 0, ""},
    +		{"RtMsg.Src_len", Field, 0, ""},
    +		{"RtMsg.Table", Field, 0, ""},
    +		{"RtMsg.Tos", Field, 0, ""},
    +		{"RtMsg.Type", Field, 0, ""},
    +		{"RtMsghdr", Type, 0, ""},
    +		{"RtMsghdr.Addrs", Field, 0, ""},
    +		{"RtMsghdr.Errno", Field, 0, ""},
    +		{"RtMsghdr.Flags", Field, 0, ""},
    +		{"RtMsghdr.Fmask", Field, 0, ""},
    +		{"RtMsghdr.Hdrlen", Field, 2, ""},
    +		{"RtMsghdr.Index", Field, 0, ""},
    +		{"RtMsghdr.Inits", Field, 0, ""},
    +		{"RtMsghdr.Mpls", Field, 2, ""},
    +		{"RtMsghdr.Msglen", Field, 0, ""},
    +		{"RtMsghdr.Pad_cgo_0", Field, 0, ""},
    +		{"RtMsghdr.Pad_cgo_1", Field, 2, ""},
    +		{"RtMsghdr.Pid", Field, 0, ""},
    +		{"RtMsghdr.Priority", Field, 2, ""},
    +		{"RtMsghdr.Rmx", Field, 0, ""},
    +		{"RtMsghdr.Seq", Field, 0, ""},
    +		{"RtMsghdr.Tableid", Field, 2, ""},
    +		{"RtMsghdr.Type", Field, 0, ""},
    +		{"RtMsghdr.Use", Field, 0, ""},
    +		{"RtMsghdr.Version", Field, 0, ""},
    +		{"RtNexthop", Type, 0, ""},
    +		{"RtNexthop.Flags", Field, 0, ""},
    +		{"RtNexthop.Hops", Field, 0, ""},
    +		{"RtNexthop.Ifindex", Field, 0, ""},
    +		{"RtNexthop.Len", Field, 0, ""},
    +		{"Rusage", Type, 0, ""},
    +		{"Rusage.CreationTime", Field, 0, ""},
    +		{"Rusage.ExitTime", Field, 0, ""},
    +		{"Rusage.Idrss", Field, 0, ""},
    +		{"Rusage.Inblock", Field, 0, ""},
    +		{"Rusage.Isrss", Field, 0, ""},
    +		{"Rusage.Ixrss", Field, 0, ""},
    +		{"Rusage.KernelTime", Field, 0, ""},
    +		{"Rusage.Majflt", Field, 0, ""},
    +		{"Rusage.Maxrss", Field, 0, ""},
    +		{"Rusage.Minflt", Field, 0, ""},
    +		{"Rusage.Msgrcv", Field, 0, ""},
    +		{"Rusage.Msgsnd", Field, 0, ""},
    +		{"Rusage.Nivcsw", Field, 0, ""},
    +		{"Rusage.Nsignals", Field, 0, ""},
    +		{"Rusage.Nswap", Field, 0, ""},
    +		{"Rusage.Nvcsw", Field, 0, ""},
    +		{"Rusage.Oublock", Field, 0, ""},
    +		{"Rusage.Stime", Field, 0, ""},
    +		{"Rusage.UserTime", Field, 0, ""},
    +		{"Rusage.Utime", Field, 0, ""},
    +		{"SCM_BINTIME", Const, 0, ""},
    +		{"SCM_CREDENTIALS", Const, 0, ""},
    +		{"SCM_CREDS", Const, 0, ""},
    +		{"SCM_RIGHTS", Const, 0, ""},
    +		{"SCM_TIMESTAMP", Const, 0, ""},
    +		{"SCM_TIMESTAMPING", Const, 0, ""},
    +		{"SCM_TIMESTAMPNS", Const, 0, ""},
    +		{"SCM_TIMESTAMP_MONOTONIC", Const, 0, ""},
    +		{"SHUT_RD", Const, 0, ""},
    +		{"SHUT_RDWR", Const, 0, ""},
    +		{"SHUT_WR", Const, 0, ""},
    +		{"SID", Type, 0, ""},
    +		{"SIDAndAttributes", Type, 0, ""},
    +		{"SIDAndAttributes.Attributes", Field, 0, ""},
    +		{"SIDAndAttributes.Sid", Field, 0, ""},
    +		{"SIGABRT", Const, 0, ""},
    +		{"SIGALRM", Const, 0, ""},
    +		{"SIGBUS", Const, 0, ""},
    +		{"SIGCHLD", Const, 0, ""},
    +		{"SIGCLD", Const, 0, ""},
    +		{"SIGCONT", Const, 0, ""},
    +		{"SIGEMT", Const, 0, ""},
    +		{"SIGFPE", Const, 0, ""},
    +		{"SIGHUP", Const, 0, ""},
    +		{"SIGILL", Const, 0, ""},
    +		{"SIGINFO", Const, 0, ""},
    +		{"SIGINT", Const, 0, ""},
    +		{"SIGIO", Const, 0, ""},
    +		{"SIGIOT", Const, 0, ""},
    +		{"SIGKILL", Const, 0, ""},
    +		{"SIGLIBRT", Const, 1, ""},
    +		{"SIGLWP", Const, 0, ""},
    +		{"SIGPIPE", Const, 0, ""},
    +		{"SIGPOLL", Const, 0, ""},
    +		{"SIGPROF", Const, 0, ""},
    +		{"SIGPWR", Const, 0, ""},
    +		{"SIGQUIT", Const, 0, ""},
    +		{"SIGSEGV", Const, 0, ""},
    +		{"SIGSTKFLT", Const, 0, ""},
    +		{"SIGSTOP", Const, 0, ""},
    +		{"SIGSYS", Const, 0, ""},
    +		{"SIGTERM", Const, 0, ""},
    +		{"SIGTHR", Const, 0, ""},
    +		{"SIGTRAP", Const, 0, ""},
    +		{"SIGTSTP", Const, 0, ""},
    +		{"SIGTTIN", Const, 0, ""},
    +		{"SIGTTOU", Const, 0, ""},
    +		{"SIGUNUSED", Const, 0, ""},
    +		{"SIGURG", Const, 0, ""},
    +		{"SIGUSR1", Const, 0, ""},
    +		{"SIGUSR2", Const, 0, ""},
    +		{"SIGVTALRM", Const, 0, ""},
    +		{"SIGWINCH", Const, 0, ""},
    +		{"SIGXCPU", Const, 0, ""},
    +		{"SIGXFSZ", Const, 0, ""},
    +		{"SIOCADDDLCI", Const, 0, ""},
    +		{"SIOCADDMULTI", Const, 0, ""},
    +		{"SIOCADDRT", Const, 0, ""},
    +		{"SIOCAIFADDR", Const, 0, ""},
    +		{"SIOCAIFGROUP", Const, 0, ""},
    +		{"SIOCALIFADDR", Const, 0, ""},
    +		{"SIOCARPIPLL", Const, 0, ""},
    +		{"SIOCATMARK", Const, 0, ""},
    +		{"SIOCAUTOADDR", Const, 0, ""},
    +		{"SIOCAUTONETMASK", Const, 0, ""},
    +		{"SIOCBRDGADD", Const, 1, ""},
    +		{"SIOCBRDGADDS", Const, 1, ""},
    +		{"SIOCBRDGARL", Const, 1, ""},
    +		{"SIOCBRDGDADDR", Const, 1, ""},
    +		{"SIOCBRDGDEL", Const, 1, ""},
    +		{"SIOCBRDGDELS", Const, 1, ""},
    +		{"SIOCBRDGFLUSH", Const, 1, ""},
    +		{"SIOCBRDGFRL", Const, 1, ""},
    +		{"SIOCBRDGGCACHE", Const, 1, ""},
    +		{"SIOCBRDGGFD", Const, 1, ""},
    +		{"SIOCBRDGGHT", Const, 1, ""},
    +		{"SIOCBRDGGIFFLGS", Const, 1, ""},
    +		{"SIOCBRDGGMA", Const, 1, ""},
    +		{"SIOCBRDGGPARAM", Const, 1, ""},
    +		{"SIOCBRDGGPRI", Const, 1, ""},
    +		{"SIOCBRDGGRL", Const, 1, ""},
    +		{"SIOCBRDGGSIFS", Const, 1, ""},
    +		{"SIOCBRDGGTO", Const, 1, ""},
    +		{"SIOCBRDGIFS", Const, 1, ""},
    +		{"SIOCBRDGRTS", Const, 1, ""},
    +		{"SIOCBRDGSADDR", Const, 1, ""},
    +		{"SIOCBRDGSCACHE", Const, 1, ""},
    +		{"SIOCBRDGSFD", Const, 1, ""},
    +		{"SIOCBRDGSHT", Const, 1, ""},
    +		{"SIOCBRDGSIFCOST", Const, 1, ""},
    +		{"SIOCBRDGSIFFLGS", Const, 1, ""},
    +		{"SIOCBRDGSIFPRIO", Const, 1, ""},
    +		{"SIOCBRDGSMA", Const, 1, ""},
    +		{"SIOCBRDGSPRI", Const, 1, ""},
    +		{"SIOCBRDGSPROTO", Const, 1, ""},
    +		{"SIOCBRDGSTO", Const, 1, ""},
    +		{"SIOCBRDGSTXHC", Const, 1, ""},
    +		{"SIOCDARP", Const, 0, ""},
    +		{"SIOCDELDLCI", Const, 0, ""},
    +		{"SIOCDELMULTI", Const, 0, ""},
    +		{"SIOCDELRT", Const, 0, ""},
    +		{"SIOCDEVPRIVATE", Const, 0, ""},
    +		{"SIOCDIFADDR", Const, 0, ""},
    +		{"SIOCDIFGROUP", Const, 0, ""},
    +		{"SIOCDIFPHYADDR", Const, 0, ""},
    +		{"SIOCDLIFADDR", Const, 0, ""},
    +		{"SIOCDRARP", Const, 0, ""},
    +		{"SIOCGARP", Const, 0, ""},
    +		{"SIOCGDRVSPEC", Const, 0, ""},
    +		{"SIOCGETKALIVE", Const, 1, ""},
    +		{"SIOCGETLABEL", Const, 1, ""},
    +		{"SIOCGETPFLOW", Const, 1, ""},
    +		{"SIOCGETPFSYNC", Const, 1, ""},
    +		{"SIOCGETSGCNT", Const, 0, ""},
    +		{"SIOCGETVIFCNT", Const, 0, ""},
    +		{"SIOCGETVLAN", Const, 0, ""},
    +		{"SIOCGHIWAT", Const, 0, ""},
    +		{"SIOCGIFADDR", Const, 0, ""},
    +		{"SIOCGIFADDRPREF", Const, 1, ""},
    +		{"SIOCGIFALIAS", Const, 1, ""},
    +		{"SIOCGIFALTMTU", Const, 0, ""},
    +		{"SIOCGIFASYNCMAP", Const, 0, ""},
    +		{"SIOCGIFBOND", Const, 0, ""},
    +		{"SIOCGIFBR", Const, 0, ""},
    +		{"SIOCGIFBRDADDR", Const, 0, ""},
    +		{"SIOCGIFCAP", Const, 0, ""},
    +		{"SIOCGIFCONF", Const, 0, ""},
    +		{"SIOCGIFCOUNT", Const, 0, ""},
    +		{"SIOCGIFDATA", Const, 1, ""},
    +		{"SIOCGIFDESCR", Const, 0, ""},
    +		{"SIOCGIFDEVMTU", Const, 0, ""},
    +		{"SIOCGIFDLT", Const, 1, ""},
    +		{"SIOCGIFDSTADDR", Const, 0, ""},
    +		{"SIOCGIFENCAP", Const, 0, ""},
    +		{"SIOCGIFFIB", Const, 1, ""},
    +		{"SIOCGIFFLAGS", Const, 0, ""},
    +		{"SIOCGIFGATTR", Const, 1, ""},
    +		{"SIOCGIFGENERIC", Const, 0, ""},
    +		{"SIOCGIFGMEMB", Const, 0, ""},
    +		{"SIOCGIFGROUP", Const, 0, ""},
    +		{"SIOCGIFHARDMTU", Const, 3, ""},
    +		{"SIOCGIFHWADDR", Const, 0, ""},
    +		{"SIOCGIFINDEX", Const, 0, ""},
    +		{"SIOCGIFKPI", Const, 0, ""},
    +		{"SIOCGIFMAC", Const, 0, ""},
    +		{"SIOCGIFMAP", Const, 0, ""},
    +		{"SIOCGIFMEDIA", Const, 0, ""},
    +		{"SIOCGIFMEM", Const, 0, ""},
    +		{"SIOCGIFMETRIC", Const, 0, ""},
    +		{"SIOCGIFMTU", Const, 0, ""},
    +		{"SIOCGIFNAME", Const, 0, ""},
    +		{"SIOCGIFNETMASK", Const, 0, ""},
    +		{"SIOCGIFPDSTADDR", Const, 0, ""},
    +		{"SIOCGIFPFLAGS", Const, 0, ""},
    +		{"SIOCGIFPHYS", Const, 0, ""},
    +		{"SIOCGIFPRIORITY", Const, 1, ""},
    +		{"SIOCGIFPSRCADDR", Const, 0, ""},
    +		{"SIOCGIFRDOMAIN", Const, 1, ""},
    +		{"SIOCGIFRTLABEL", Const, 1, ""},
    +		{"SIOCGIFSLAVE", Const, 0, ""},
    +		{"SIOCGIFSTATUS", Const, 0, ""},
    +		{"SIOCGIFTIMESLOT", Const, 1, ""},
    +		{"SIOCGIFTXQLEN", Const, 0, ""},
    +		{"SIOCGIFVLAN", Const, 0, ""},
    +		{"SIOCGIFWAKEFLAGS", Const, 0, ""},
    +		{"SIOCGIFXFLAGS", Const, 1, ""},
    +		{"SIOCGLIFADDR", Const, 0, ""},
    +		{"SIOCGLIFPHYADDR", Const, 0, ""},
    +		{"SIOCGLIFPHYRTABLE", Const, 1, ""},
    +		{"SIOCGLIFPHYTTL", Const, 3, ""},
    +		{"SIOCGLINKSTR", Const, 1, ""},
    +		{"SIOCGLOWAT", Const, 0, ""},
    +		{"SIOCGPGRP", Const, 0, ""},
    +		{"SIOCGPRIVATE_0", Const, 0, ""},
    +		{"SIOCGPRIVATE_1", Const, 0, ""},
    +		{"SIOCGRARP", Const, 0, ""},
    +		{"SIOCGSPPPPARAMS", Const, 3, ""},
    +		{"SIOCGSTAMP", Const, 0, ""},
    +		{"SIOCGSTAMPNS", Const, 0, ""},
    +		{"SIOCGVH", Const, 1, ""},
    +		{"SIOCGVNETID", Const, 3, ""},
    +		{"SIOCIFCREATE", Const, 0, ""},
    +		{"SIOCIFCREATE2", Const, 0, ""},
    +		{"SIOCIFDESTROY", Const, 0, ""},
    +		{"SIOCIFGCLONERS", Const, 0, ""},
    +		{"SIOCINITIFADDR", Const, 1, ""},
    +		{"SIOCPROTOPRIVATE", Const, 0, ""},
    +		{"SIOCRSLVMULTI", Const, 0, ""},
    +		{"SIOCRTMSG", Const, 0, ""},
    +		{"SIOCSARP", Const, 0, ""},
    +		{"SIOCSDRVSPEC", Const, 0, ""},
    +		{"SIOCSETKALIVE", Const, 1, ""},
    +		{"SIOCSETLABEL", Const, 1, ""},
    +		{"SIOCSETPFLOW", Const, 1, ""},
    +		{"SIOCSETPFSYNC", Const, 1, ""},
    +		{"SIOCSETVLAN", Const, 0, ""},
    +		{"SIOCSHIWAT", Const, 0, ""},
    +		{"SIOCSIFADDR", Const, 0, ""},
    +		{"SIOCSIFADDRPREF", Const, 1, ""},
    +		{"SIOCSIFALTMTU", Const, 0, ""},
    +		{"SIOCSIFASYNCMAP", Const, 0, ""},
    +		{"SIOCSIFBOND", Const, 0, ""},
    +		{"SIOCSIFBR", Const, 0, ""},
    +		{"SIOCSIFBRDADDR", Const, 0, ""},
    +		{"SIOCSIFCAP", Const, 0, ""},
    +		{"SIOCSIFDESCR", Const, 0, ""},
    +		{"SIOCSIFDSTADDR", Const, 0, ""},
    +		{"SIOCSIFENCAP", Const, 0, ""},
    +		{"SIOCSIFFIB", Const, 1, ""},
    +		{"SIOCSIFFLAGS", Const, 0, ""},
    +		{"SIOCSIFGATTR", Const, 1, ""},
    +		{"SIOCSIFGENERIC", Const, 0, ""},
    +		{"SIOCSIFHWADDR", Const, 0, ""},
    +		{"SIOCSIFHWBROADCAST", Const, 0, ""},
    +		{"SIOCSIFKPI", Const, 0, ""},
    +		{"SIOCSIFLINK", Const, 0, ""},
    +		{"SIOCSIFLLADDR", Const, 0, ""},
    +		{"SIOCSIFMAC", Const, 0, ""},
    +		{"SIOCSIFMAP", Const, 0, ""},
    +		{"SIOCSIFMEDIA", Const, 0, ""},
    +		{"SIOCSIFMEM", Const, 0, ""},
    +		{"SIOCSIFMETRIC", Const, 0, ""},
    +		{"SIOCSIFMTU", Const, 0, ""},
    +		{"SIOCSIFNAME", Const, 0, ""},
    +		{"SIOCSIFNETMASK", Const, 0, ""},
    +		{"SIOCSIFPFLAGS", Const, 0, ""},
    +		{"SIOCSIFPHYADDR", Const, 0, ""},
    +		{"SIOCSIFPHYS", Const, 0, ""},
    +		{"SIOCSIFPRIORITY", Const, 1, ""},
    +		{"SIOCSIFRDOMAIN", Const, 1, ""},
    +		{"SIOCSIFRTLABEL", Const, 1, ""},
    +		{"SIOCSIFRVNET", Const, 0, ""},
    +		{"SIOCSIFSLAVE", Const, 0, ""},
    +		{"SIOCSIFTIMESLOT", Const, 1, ""},
    +		{"SIOCSIFTXQLEN", Const, 0, ""},
    +		{"SIOCSIFVLAN", Const, 0, ""},
    +		{"SIOCSIFVNET", Const, 0, ""},
    +		{"SIOCSIFXFLAGS", Const, 1, ""},
    +		{"SIOCSLIFPHYADDR", Const, 0, ""},
    +		{"SIOCSLIFPHYRTABLE", Const, 1, ""},
    +		{"SIOCSLIFPHYTTL", Const, 3, ""},
    +		{"SIOCSLINKSTR", Const, 1, ""},
    +		{"SIOCSLOWAT", Const, 0, ""},
    +		{"SIOCSPGRP", Const, 0, ""},
    +		{"SIOCSRARP", Const, 0, ""},
    +		{"SIOCSSPPPPARAMS", Const, 3, ""},
    +		{"SIOCSVH", Const, 1, ""},
    +		{"SIOCSVNETID", Const, 3, ""},
    +		{"SIOCZIFDATA", Const, 1, ""},
    +		{"SIO_GET_EXTENSION_FUNCTION_POINTER", Const, 1, ""},
    +		{"SIO_GET_INTERFACE_LIST", Const, 0, ""},
    +		{"SIO_KEEPALIVE_VALS", Const, 3, ""},
    +		{"SIO_UDP_CONNRESET", Const, 4, ""},
    +		{"SOCK_CLOEXEC", Const, 0, ""},
    +		{"SOCK_DCCP", Const, 0, ""},
    +		{"SOCK_DGRAM", Const, 0, ""},
    +		{"SOCK_FLAGS_MASK", Const, 1, ""},
    +		{"SOCK_MAXADDRLEN", Const, 0, ""},
    +		{"SOCK_NONBLOCK", Const, 0, ""},
    +		{"SOCK_NOSIGPIPE", Const, 1, ""},
    +		{"SOCK_PACKET", Const, 0, ""},
    +		{"SOCK_RAW", Const, 0, ""},
    +		{"SOCK_RDM", Const, 0, ""},
    +		{"SOCK_SEQPACKET", Const, 0, ""},
    +		{"SOCK_STREAM", Const, 0, ""},
    +		{"SOL_AAL", Const, 0, ""},
    +		{"SOL_ATM", Const, 0, ""},
    +		{"SOL_DECNET", Const, 0, ""},
    +		{"SOL_ICMPV6", Const, 0, ""},
    +		{"SOL_IP", Const, 0, ""},
    +		{"SOL_IPV6", Const, 0, ""},
    +		{"SOL_IRDA", Const, 0, ""},
    +		{"SOL_PACKET", Const, 0, ""},
    +		{"SOL_RAW", Const, 0, ""},
    +		{"SOL_SOCKET", Const, 0, ""},
    +		{"SOL_TCP", Const, 0, ""},
    +		{"SOL_X25", Const, 0, ""},
    +		{"SOMAXCONN", Const, 0, ""},
    +		{"SO_ACCEPTCONN", Const, 0, ""},
    +		{"SO_ACCEPTFILTER", Const, 0, ""},
    +		{"SO_ATTACH_FILTER", Const, 0, ""},
    +		{"SO_BINDANY", Const, 1, ""},
    +		{"SO_BINDTODEVICE", Const, 0, ""},
    +		{"SO_BINTIME", Const, 0, ""},
    +		{"SO_BROADCAST", Const, 0, ""},
    +		{"SO_BSDCOMPAT", Const, 0, ""},
    +		{"SO_DEBUG", Const, 0, ""},
    +		{"SO_DETACH_FILTER", Const, 0, ""},
    +		{"SO_DOMAIN", Const, 0, ""},
    +		{"SO_DONTROUTE", Const, 0, ""},
    +		{"SO_DONTTRUNC", Const, 0, ""},
    +		{"SO_ERROR", Const, 0, ""},
    +		{"SO_KEEPALIVE", Const, 0, ""},
    +		{"SO_LABEL", Const, 0, ""},
    +		{"SO_LINGER", Const, 0, ""},
    +		{"SO_LINGER_SEC", Const, 0, ""},
    +		{"SO_LISTENINCQLEN", Const, 0, ""},
    +		{"SO_LISTENQLEN", Const, 0, ""},
    +		{"SO_LISTENQLIMIT", Const, 0, ""},
    +		{"SO_MARK", Const, 0, ""},
    +		{"SO_NETPROC", Const, 1, ""},
    +		{"SO_NKE", Const, 0, ""},
    +		{"SO_NOADDRERR", Const, 0, ""},
    +		{"SO_NOHEADER", Const, 1, ""},
    +		{"SO_NOSIGPIPE", Const, 0, ""},
    +		{"SO_NOTIFYCONFLICT", Const, 0, ""},
    +		{"SO_NO_CHECK", Const, 0, ""},
    +		{"SO_NO_DDP", Const, 0, ""},
    +		{"SO_NO_OFFLOAD", Const, 0, ""},
    +		{"SO_NP_EXTENSIONS", Const, 0, ""},
    +		{"SO_NREAD", Const, 0, ""},
    +		{"SO_NUMRCVPKT", Const, 16, ""},
    +		{"SO_NWRITE", Const, 0, ""},
    +		{"SO_OOBINLINE", Const, 0, ""},
    +		{"SO_OVERFLOWED", Const, 1, ""},
    +		{"SO_PASSCRED", Const, 0, ""},
    +		{"SO_PASSSEC", Const, 0, ""},
    +		{"SO_PEERCRED", Const, 0, ""},
    +		{"SO_PEERLABEL", Const, 0, ""},
    +		{"SO_PEERNAME", Const, 0, ""},
    +		{"SO_PEERSEC", Const, 0, ""},
    +		{"SO_PRIORITY", Const, 0, ""},
    +		{"SO_PROTOCOL", Const, 0, ""},
    +		{"SO_PROTOTYPE", Const, 1, ""},
    +		{"SO_RANDOMPORT", Const, 0, ""},
    +		{"SO_RCVBUF", Const, 0, ""},
    +		{"SO_RCVBUFFORCE", Const, 0, ""},
    +		{"SO_RCVLOWAT", Const, 0, ""},
    +		{"SO_RCVTIMEO", Const, 0, ""},
    +		{"SO_RESTRICTIONS", Const, 0, ""},
    +		{"SO_RESTRICT_DENYIN", Const, 0, ""},
    +		{"SO_RESTRICT_DENYOUT", Const, 0, ""},
    +		{"SO_RESTRICT_DENYSET", Const, 0, ""},
    +		{"SO_REUSEADDR", Const, 0, ""},
    +		{"SO_REUSEPORT", Const, 0, ""},
    +		{"SO_REUSESHAREUID", Const, 0, ""},
    +		{"SO_RTABLE", Const, 1, ""},
    +		{"SO_RXQ_OVFL", Const, 0, ""},
    +		{"SO_SECURITY_AUTHENTICATION", Const, 0, ""},
    +		{"SO_SECURITY_ENCRYPTION_NETWORK", Const, 0, ""},
    +		{"SO_SECURITY_ENCRYPTION_TRANSPORT", Const, 0, ""},
    +		{"SO_SETFIB", Const, 0, ""},
    +		{"SO_SNDBUF", Const, 0, ""},
    +		{"SO_SNDBUFFORCE", Const, 0, ""},
    +		{"SO_SNDLOWAT", Const, 0, ""},
    +		{"SO_SNDTIMEO", Const, 0, ""},
    +		{"SO_SPLICE", Const, 1, ""},
    +		{"SO_TIMESTAMP", Const, 0, ""},
    +		{"SO_TIMESTAMPING", Const, 0, ""},
    +		{"SO_TIMESTAMPNS", Const, 0, ""},
    +		{"SO_TIMESTAMP_MONOTONIC", Const, 0, ""},
    +		{"SO_TYPE", Const, 0, ""},
    +		{"SO_UPCALLCLOSEWAIT", Const, 0, ""},
    +		{"SO_UPDATE_ACCEPT_CONTEXT", Const, 0, ""},
    +		{"SO_UPDATE_CONNECT_CONTEXT", Const, 1, ""},
    +		{"SO_USELOOPBACK", Const, 0, ""},
    +		{"SO_USER_COOKIE", Const, 1, ""},
    +		{"SO_VENDOR", Const, 3, ""},
    +		{"SO_WANTMORE", Const, 0, ""},
    +		{"SO_WANTOOBFLAG", Const, 0, ""},
    +		{"SSLExtraCertChainPolicyPara", Type, 0, ""},
    +		{"SSLExtraCertChainPolicyPara.AuthType", Field, 0, ""},
    +		{"SSLExtraCertChainPolicyPara.Checks", Field, 0, ""},
    +		{"SSLExtraCertChainPolicyPara.ServerName", Field, 0, ""},
    +		{"SSLExtraCertChainPolicyPara.Size", Field, 0, ""},
    +		{"STANDARD_RIGHTS_ALL", Const, 0, ""},
    +		{"STANDARD_RIGHTS_EXECUTE", Const, 0, ""},
    +		{"STANDARD_RIGHTS_READ", Const, 0, ""},
    +		{"STANDARD_RIGHTS_REQUIRED", Const, 0, ""},
    +		{"STANDARD_RIGHTS_WRITE", Const, 0, ""},
    +		{"STARTF_USESHOWWINDOW", Const, 0, ""},
    +		{"STARTF_USESTDHANDLES", Const, 0, ""},
    +		{"STD_ERROR_HANDLE", Const, 0, ""},
    +		{"STD_INPUT_HANDLE", Const, 0, ""},
    +		{"STD_OUTPUT_HANDLE", Const, 0, ""},
    +		{"SUBLANG_ENGLISH_US", Const, 0, ""},
    +		{"SW_FORCEMINIMIZE", Const, 0, ""},
    +		{"SW_HIDE", Const, 0, ""},
    +		{"SW_MAXIMIZE", Const, 0, ""},
    +		{"SW_MINIMIZE", Const, 0, ""},
    +		{"SW_NORMAL", Const, 0, ""},
    +		{"SW_RESTORE", Const, 0, ""},
    +		{"SW_SHOW", Const, 0, ""},
    +		{"SW_SHOWDEFAULT", Const, 0, ""},
    +		{"SW_SHOWMAXIMIZED", Const, 0, ""},
    +		{"SW_SHOWMINIMIZED", Const, 0, ""},
    +		{"SW_SHOWMINNOACTIVE", Const, 0, ""},
    +		{"SW_SHOWNA", Const, 0, ""},
    +		{"SW_SHOWNOACTIVATE", Const, 0, ""},
    +		{"SW_SHOWNORMAL", Const, 0, ""},
    +		{"SYMBOLIC_LINK_FLAG_DIRECTORY", Const, 4, ""},
    +		{"SYNCHRONIZE", Const, 0, ""},
    +		{"SYSCTL_VERSION", Const, 1, ""},
    +		{"SYSCTL_VERS_0", Const, 1, ""},
    +		{"SYSCTL_VERS_1", Const, 1, ""},
    +		{"SYSCTL_VERS_MASK", Const, 1, ""},
    +		{"SYS_ABORT2", Const, 0, ""},
    +		{"SYS_ACCEPT", Const, 0, ""},
    +		{"SYS_ACCEPT4", Const, 0, ""},
    +		{"SYS_ACCEPT_NOCANCEL", Const, 0, ""},
    +		{"SYS_ACCESS", Const, 0, ""},
    +		{"SYS_ACCESS_EXTENDED", Const, 0, ""},
    +		{"SYS_ACCT", Const, 0, ""},
    +		{"SYS_ADD_KEY", Const, 0, ""},
    +		{"SYS_ADD_PROFIL", Const, 0, ""},
    +		{"SYS_ADJFREQ", Const, 1, ""},
    +		{"SYS_ADJTIME", Const, 0, ""},
    +		{"SYS_ADJTIMEX", Const, 0, ""},
    +		{"SYS_AFS_SYSCALL", Const, 0, ""},
    +		{"SYS_AIO_CANCEL", Const, 0, ""},
    +		{"SYS_AIO_ERROR", Const, 0, ""},
    +		{"SYS_AIO_FSYNC", Const, 0, ""},
    +		{"SYS_AIO_MLOCK", Const, 14, ""},
    +		{"SYS_AIO_READ", Const, 0, ""},
    +		{"SYS_AIO_RETURN", Const, 0, ""},
    +		{"SYS_AIO_SUSPEND", Const, 0, ""},
    +		{"SYS_AIO_SUSPEND_NOCANCEL", Const, 0, ""},
    +		{"SYS_AIO_WAITCOMPLETE", Const, 14, ""},
    +		{"SYS_AIO_WRITE", Const, 0, ""},
    +		{"SYS_ALARM", Const, 0, ""},
    +		{"SYS_ARCH_PRCTL", Const, 0, ""},
    +		{"SYS_ARM_FADVISE64_64", Const, 0, ""},
    +		{"SYS_ARM_SYNC_FILE_RANGE", Const, 0, ""},
    +		{"SYS_ATGETMSG", Const, 0, ""},
    +		{"SYS_ATPGETREQ", Const, 0, ""},
    +		{"SYS_ATPGETRSP", Const, 0, ""},
    +		{"SYS_ATPSNDREQ", Const, 0, ""},
    +		{"SYS_ATPSNDRSP", Const, 0, ""},
    +		{"SYS_ATPUTMSG", Const, 0, ""},
    +		{"SYS_ATSOCKET", Const, 0, ""},
    +		{"SYS_AUDIT", Const, 0, ""},
    +		{"SYS_AUDITCTL", Const, 0, ""},
    +		{"SYS_AUDITON", Const, 0, ""},
    +		{"SYS_AUDIT_SESSION_JOIN", Const, 0, ""},
    +		{"SYS_AUDIT_SESSION_PORT", Const, 0, ""},
    +		{"SYS_AUDIT_SESSION_SELF", Const, 0, ""},
    +		{"SYS_BDFLUSH", Const, 0, ""},
    +		{"SYS_BIND", Const, 0, ""},
    +		{"SYS_BINDAT", Const, 3, ""},
    +		{"SYS_BREAK", Const, 0, ""},
    +		{"SYS_BRK", Const, 0, ""},
    +		{"SYS_BSDTHREAD_CREATE", Const, 0, ""},
    +		{"SYS_BSDTHREAD_REGISTER", Const, 0, ""},
    +		{"SYS_BSDTHREAD_TERMINATE", Const, 0, ""},
    +		{"SYS_CAPGET", Const, 0, ""},
    +		{"SYS_CAPSET", Const, 0, ""},
    +		{"SYS_CAP_ENTER", Const, 0, ""},
    +		{"SYS_CAP_FCNTLS_GET", Const, 1, ""},
    +		{"SYS_CAP_FCNTLS_LIMIT", Const, 1, ""},
    +		{"SYS_CAP_GETMODE", Const, 0, ""},
    +		{"SYS_CAP_GETRIGHTS", Const, 0, ""},
    +		{"SYS_CAP_IOCTLS_GET", Const, 1, ""},
    +		{"SYS_CAP_IOCTLS_LIMIT", Const, 1, ""},
    +		{"SYS_CAP_NEW", Const, 0, ""},
    +		{"SYS_CAP_RIGHTS_GET", Const, 1, ""},
    +		{"SYS_CAP_RIGHTS_LIMIT", Const, 1, ""},
    +		{"SYS_CHDIR", Const, 0, ""},
    +		{"SYS_CHFLAGS", Const, 0, ""},
    +		{"SYS_CHFLAGSAT", Const, 3, ""},
    +		{"SYS_CHMOD", Const, 0, ""},
    +		{"SYS_CHMOD_EXTENDED", Const, 0, ""},
    +		{"SYS_CHOWN", Const, 0, ""},
    +		{"SYS_CHOWN32", Const, 0, ""},
    +		{"SYS_CHROOT", Const, 0, ""},
    +		{"SYS_CHUD", Const, 0, ""},
    +		{"SYS_CLOCK_ADJTIME", Const, 0, ""},
    +		{"SYS_CLOCK_GETCPUCLOCKID2", Const, 1, ""},
    +		{"SYS_CLOCK_GETRES", Const, 0, ""},
    +		{"SYS_CLOCK_GETTIME", Const, 0, ""},
    +		{"SYS_CLOCK_NANOSLEEP", Const, 0, ""},
    +		{"SYS_CLOCK_SETTIME", Const, 0, ""},
    +		{"SYS_CLONE", Const, 0, ""},
    +		{"SYS_CLOSE", Const, 0, ""},
    +		{"SYS_CLOSEFROM", Const, 0, ""},
    +		{"SYS_CLOSE_NOCANCEL", Const, 0, ""},
    +		{"SYS_CONNECT", Const, 0, ""},
    +		{"SYS_CONNECTAT", Const, 3, ""},
    +		{"SYS_CONNECT_NOCANCEL", Const, 0, ""},
    +		{"SYS_COPYFILE", Const, 0, ""},
    +		{"SYS_CPUSET", Const, 0, ""},
    +		{"SYS_CPUSET_GETAFFINITY", Const, 0, ""},
    +		{"SYS_CPUSET_GETID", Const, 0, ""},
    +		{"SYS_CPUSET_SETAFFINITY", Const, 0, ""},
    +		{"SYS_CPUSET_SETID", Const, 0, ""},
    +		{"SYS_CREAT", Const, 0, ""},
    +		{"SYS_CREATE_MODULE", Const, 0, ""},
    +		{"SYS_CSOPS", Const, 0, ""},
    +		{"SYS_CSOPS_AUDITTOKEN", Const, 16, ""},
    +		{"SYS_DELETE", Const, 0, ""},
    +		{"SYS_DELETE_MODULE", Const, 0, ""},
    +		{"SYS_DUP", Const, 0, ""},
    +		{"SYS_DUP2", Const, 0, ""},
    +		{"SYS_DUP3", Const, 0, ""},
    +		{"SYS_EACCESS", Const, 0, ""},
    +		{"SYS_EPOLL_CREATE", Const, 0, ""},
    +		{"SYS_EPOLL_CREATE1", Const, 0, ""},
    +		{"SYS_EPOLL_CTL", Const, 0, ""},
    +		{"SYS_EPOLL_CTL_OLD", Const, 0, ""},
    +		{"SYS_EPOLL_PWAIT", Const, 0, ""},
    +		{"SYS_EPOLL_WAIT", Const, 0, ""},
    +		{"SYS_EPOLL_WAIT_OLD", Const, 0, ""},
    +		{"SYS_EVENTFD", Const, 0, ""},
    +		{"SYS_EVENTFD2", Const, 0, ""},
    +		{"SYS_EXCHANGEDATA", Const, 0, ""},
    +		{"SYS_EXECVE", Const, 0, ""},
    +		{"SYS_EXIT", Const, 0, ""},
    +		{"SYS_EXIT_GROUP", Const, 0, ""},
    +		{"SYS_EXTATTRCTL", Const, 0, ""},
    +		{"SYS_EXTATTR_DELETE_FD", Const, 0, ""},
    +		{"SYS_EXTATTR_DELETE_FILE", Const, 0, ""},
    +		{"SYS_EXTATTR_DELETE_LINK", Const, 0, ""},
    +		{"SYS_EXTATTR_GET_FD", Const, 0, ""},
    +		{"SYS_EXTATTR_GET_FILE", Const, 0, ""},
    +		{"SYS_EXTATTR_GET_LINK", Const, 0, ""},
    +		{"SYS_EXTATTR_LIST_FD", Const, 0, ""},
    +		{"SYS_EXTATTR_LIST_FILE", Const, 0, ""},
    +		{"SYS_EXTATTR_LIST_LINK", Const, 0, ""},
    +		{"SYS_EXTATTR_SET_FD", Const, 0, ""},
    +		{"SYS_EXTATTR_SET_FILE", Const, 0, ""},
    +		{"SYS_EXTATTR_SET_LINK", Const, 0, ""},
    +		{"SYS_FACCESSAT", Const, 0, ""},
    +		{"SYS_FADVISE64", Const, 0, ""},
    +		{"SYS_FADVISE64_64", Const, 0, ""},
    +		{"SYS_FALLOCATE", Const, 0, ""},
    +		{"SYS_FANOTIFY_INIT", Const, 0, ""},
    +		{"SYS_FANOTIFY_MARK", Const, 0, ""},
    +		{"SYS_FCHDIR", Const, 0, ""},
    +		{"SYS_FCHFLAGS", Const, 0, ""},
    +		{"SYS_FCHMOD", Const, 0, ""},
    +		{"SYS_FCHMODAT", Const, 0, ""},
    +		{"SYS_FCHMOD_EXTENDED", Const, 0, ""},
    +		{"SYS_FCHOWN", Const, 0, ""},
    +		{"SYS_FCHOWN32", Const, 0, ""},
    +		{"SYS_FCHOWNAT", Const, 0, ""},
    +		{"SYS_FCHROOT", Const, 1, ""},
    +		{"SYS_FCNTL", Const, 0, ""},
    +		{"SYS_FCNTL64", Const, 0, ""},
    +		{"SYS_FCNTL_NOCANCEL", Const, 0, ""},
    +		{"SYS_FDATASYNC", Const, 0, ""},
    +		{"SYS_FEXECVE", Const, 0, ""},
    +		{"SYS_FFCLOCK_GETCOUNTER", Const, 0, ""},
    +		{"SYS_FFCLOCK_GETESTIMATE", Const, 0, ""},
    +		{"SYS_FFCLOCK_SETESTIMATE", Const, 0, ""},
    +		{"SYS_FFSCTL", Const, 0, ""},
    +		{"SYS_FGETATTRLIST", Const, 0, ""},
    +		{"SYS_FGETXATTR", Const, 0, ""},
    +		{"SYS_FHOPEN", Const, 0, ""},
    +		{"SYS_FHSTAT", Const, 0, ""},
    +		{"SYS_FHSTATFS", Const, 0, ""},
    +		{"SYS_FILEPORT_MAKEFD", Const, 0, ""},
    +		{"SYS_FILEPORT_MAKEPORT", Const, 0, ""},
    +		{"SYS_FKTRACE", Const, 1, ""},
    +		{"SYS_FLISTXATTR", Const, 0, ""},
    +		{"SYS_FLOCK", Const, 0, ""},
    +		{"SYS_FORK", Const, 0, ""},
    +		{"SYS_FPATHCONF", Const, 0, ""},
    +		{"SYS_FREEBSD6_FTRUNCATE", Const, 0, ""},
    +		{"SYS_FREEBSD6_LSEEK", Const, 0, ""},
    +		{"SYS_FREEBSD6_MMAP", Const, 0, ""},
    +		{"SYS_FREEBSD6_PREAD", Const, 0, ""},
    +		{"SYS_FREEBSD6_PWRITE", Const, 0, ""},
    +		{"SYS_FREEBSD6_TRUNCATE", Const, 0, ""},
    +		{"SYS_FREMOVEXATTR", Const, 0, ""},
    +		{"SYS_FSCTL", Const, 0, ""},
    +		{"SYS_FSETATTRLIST", Const, 0, ""},
    +		{"SYS_FSETXATTR", Const, 0, ""},
    +		{"SYS_FSGETPATH", Const, 0, ""},
    +		{"SYS_FSTAT", Const, 0, ""},
    +		{"SYS_FSTAT64", Const, 0, ""},
    +		{"SYS_FSTAT64_EXTENDED", Const, 0, ""},
    +		{"SYS_FSTATAT", Const, 0, ""},
    +		{"SYS_FSTATAT64", Const, 0, ""},
    +		{"SYS_FSTATFS", Const, 0, ""},
    +		{"SYS_FSTATFS64", Const, 0, ""},
    +		{"SYS_FSTATV", Const, 0, ""},
    +		{"SYS_FSTATVFS1", Const, 1, ""},
    +		{"SYS_FSTAT_EXTENDED", Const, 0, ""},
    +		{"SYS_FSYNC", Const, 0, ""},
    +		{"SYS_FSYNC_NOCANCEL", Const, 0, ""},
    +		{"SYS_FSYNC_RANGE", Const, 1, ""},
    +		{"SYS_FTIME", Const, 0, ""},
    +		{"SYS_FTRUNCATE", Const, 0, ""},
    +		{"SYS_FTRUNCATE64", Const, 0, ""},
    +		{"SYS_FUTEX", Const, 0, ""},
    +		{"SYS_FUTIMENS", Const, 1, ""},
    +		{"SYS_FUTIMES", Const, 0, ""},
    +		{"SYS_FUTIMESAT", Const, 0, ""},
    +		{"SYS_GETATTRLIST", Const, 0, ""},
    +		{"SYS_GETAUDIT", Const, 0, ""},
    +		{"SYS_GETAUDIT_ADDR", Const, 0, ""},
    +		{"SYS_GETAUID", Const, 0, ""},
    +		{"SYS_GETCONTEXT", Const, 0, ""},
    +		{"SYS_GETCPU", Const, 0, ""},
    +		{"SYS_GETCWD", Const, 0, ""},
    +		{"SYS_GETDENTS", Const, 0, ""},
    +		{"SYS_GETDENTS64", Const, 0, ""},
    +		{"SYS_GETDIRENTRIES", Const, 0, ""},
    +		{"SYS_GETDIRENTRIES64", Const, 0, ""},
    +		{"SYS_GETDIRENTRIESATTR", Const, 0, ""},
    +		{"SYS_GETDTABLECOUNT", Const, 1, ""},
    +		{"SYS_GETDTABLESIZE", Const, 0, ""},
    +		{"SYS_GETEGID", Const, 0, ""},
    +		{"SYS_GETEGID32", Const, 0, ""},
    +		{"SYS_GETEUID", Const, 0, ""},
    +		{"SYS_GETEUID32", Const, 0, ""},
    +		{"SYS_GETFH", Const, 0, ""},
    +		{"SYS_GETFSSTAT", Const, 0, ""},
    +		{"SYS_GETFSSTAT64", Const, 0, ""},
    +		{"SYS_GETGID", Const, 0, ""},
    +		{"SYS_GETGID32", Const, 0, ""},
    +		{"SYS_GETGROUPS", Const, 0, ""},
    +		{"SYS_GETGROUPS32", Const, 0, ""},
    +		{"SYS_GETHOSTUUID", Const, 0, ""},
    +		{"SYS_GETITIMER", Const, 0, ""},
    +		{"SYS_GETLCID", Const, 0, ""},
    +		{"SYS_GETLOGIN", Const, 0, ""},
    +		{"SYS_GETLOGINCLASS", Const, 0, ""},
    +		{"SYS_GETPEERNAME", Const, 0, ""},
    +		{"SYS_GETPGID", Const, 0, ""},
    +		{"SYS_GETPGRP", Const, 0, ""},
    +		{"SYS_GETPID", Const, 0, ""},
    +		{"SYS_GETPMSG", Const, 0, ""},
    +		{"SYS_GETPPID", Const, 0, ""},
    +		{"SYS_GETPRIORITY", Const, 0, ""},
    +		{"SYS_GETRESGID", Const, 0, ""},
    +		{"SYS_GETRESGID32", Const, 0, ""},
    +		{"SYS_GETRESUID", Const, 0, ""},
    +		{"SYS_GETRESUID32", Const, 0, ""},
    +		{"SYS_GETRLIMIT", Const, 0, ""},
    +		{"SYS_GETRTABLE", Const, 1, ""},
    +		{"SYS_GETRUSAGE", Const, 0, ""},
    +		{"SYS_GETSGROUPS", Const, 0, ""},
    +		{"SYS_GETSID", Const, 0, ""},
    +		{"SYS_GETSOCKNAME", Const, 0, ""},
    +		{"SYS_GETSOCKOPT", Const, 0, ""},
    +		{"SYS_GETTHRID", Const, 1, ""},
    +		{"SYS_GETTID", Const, 0, ""},
    +		{"SYS_GETTIMEOFDAY", Const, 0, ""},
    +		{"SYS_GETUID", Const, 0, ""},
    +		{"SYS_GETUID32", Const, 0, ""},
    +		{"SYS_GETVFSSTAT", Const, 1, ""},
    +		{"SYS_GETWGROUPS", Const, 0, ""},
    +		{"SYS_GETXATTR", Const, 0, ""},
    +		{"SYS_GET_KERNEL_SYMS", Const, 0, ""},
    +		{"SYS_GET_MEMPOLICY", Const, 0, ""},
    +		{"SYS_GET_ROBUST_LIST", Const, 0, ""},
    +		{"SYS_GET_THREAD_AREA", Const, 0, ""},
    +		{"SYS_GSSD_SYSCALL", Const, 14, ""},
    +		{"SYS_GTTY", Const, 0, ""},
    +		{"SYS_IDENTITYSVC", Const, 0, ""},
    +		{"SYS_IDLE", Const, 0, ""},
    +		{"SYS_INITGROUPS", Const, 0, ""},
    +		{"SYS_INIT_MODULE", Const, 0, ""},
    +		{"SYS_INOTIFY_ADD_WATCH", Const, 0, ""},
    +		{"SYS_INOTIFY_INIT", Const, 0, ""},
    +		{"SYS_INOTIFY_INIT1", Const, 0, ""},
    +		{"SYS_INOTIFY_RM_WATCH", Const, 0, ""},
    +		{"SYS_IOCTL", Const, 0, ""},
    +		{"SYS_IOPERM", Const, 0, ""},
    +		{"SYS_IOPL", Const, 0, ""},
    +		{"SYS_IOPOLICYSYS", Const, 0, ""},
    +		{"SYS_IOPRIO_GET", Const, 0, ""},
    +		{"SYS_IOPRIO_SET", Const, 0, ""},
    +		{"SYS_IO_CANCEL", Const, 0, ""},
    +		{"SYS_IO_DESTROY", Const, 0, ""},
    +		{"SYS_IO_GETEVENTS", Const, 0, ""},
    +		{"SYS_IO_SETUP", Const, 0, ""},
    +		{"SYS_IO_SUBMIT", Const, 0, ""},
    +		{"SYS_IPC", Const, 0, ""},
    +		{"SYS_ISSETUGID", Const, 0, ""},
    +		{"SYS_JAIL", Const, 0, ""},
    +		{"SYS_JAIL_ATTACH", Const, 0, ""},
    +		{"SYS_JAIL_GET", Const, 0, ""},
    +		{"SYS_JAIL_REMOVE", Const, 0, ""},
    +		{"SYS_JAIL_SET", Const, 0, ""},
    +		{"SYS_KAS_INFO", Const, 16, ""},
    +		{"SYS_KDEBUG_TRACE", Const, 0, ""},
    +		{"SYS_KENV", Const, 0, ""},
    +		{"SYS_KEVENT", Const, 0, ""},
    +		{"SYS_KEVENT64", Const, 0, ""},
    +		{"SYS_KEXEC_LOAD", Const, 0, ""},
    +		{"SYS_KEYCTL", Const, 0, ""},
    +		{"SYS_KILL", Const, 0, ""},
    +		{"SYS_KLDFIND", Const, 0, ""},
    +		{"SYS_KLDFIRSTMOD", Const, 0, ""},
    +		{"SYS_KLDLOAD", Const, 0, ""},
    +		{"SYS_KLDNEXT", Const, 0, ""},
    +		{"SYS_KLDSTAT", Const, 0, ""},
    +		{"SYS_KLDSYM", Const, 0, ""},
    +		{"SYS_KLDUNLOAD", Const, 0, ""},
    +		{"SYS_KLDUNLOADF", Const, 0, ""},
    +		{"SYS_KMQ_NOTIFY", Const, 14, ""},
    +		{"SYS_KMQ_OPEN", Const, 14, ""},
    +		{"SYS_KMQ_SETATTR", Const, 14, ""},
    +		{"SYS_KMQ_TIMEDRECEIVE", Const, 14, ""},
    +		{"SYS_KMQ_TIMEDSEND", Const, 14, ""},
    +		{"SYS_KMQ_UNLINK", Const, 14, ""},
    +		{"SYS_KQUEUE", Const, 0, ""},
    +		{"SYS_KQUEUE1", Const, 1, ""},
    +		{"SYS_KSEM_CLOSE", Const, 14, ""},
    +		{"SYS_KSEM_DESTROY", Const, 14, ""},
    +		{"SYS_KSEM_GETVALUE", Const, 14, ""},
    +		{"SYS_KSEM_INIT", Const, 14, ""},
    +		{"SYS_KSEM_OPEN", Const, 14, ""},
    +		{"SYS_KSEM_POST", Const, 14, ""},
    +		{"SYS_KSEM_TIMEDWAIT", Const, 14, ""},
    +		{"SYS_KSEM_TRYWAIT", Const, 14, ""},
    +		{"SYS_KSEM_UNLINK", Const, 14, ""},
    +		{"SYS_KSEM_WAIT", Const, 14, ""},
    +		{"SYS_KTIMER_CREATE", Const, 0, ""},
    +		{"SYS_KTIMER_DELETE", Const, 0, ""},
    +		{"SYS_KTIMER_GETOVERRUN", Const, 0, ""},
    +		{"SYS_KTIMER_GETTIME", Const, 0, ""},
    +		{"SYS_KTIMER_SETTIME", Const, 0, ""},
    +		{"SYS_KTRACE", Const, 0, ""},
    +		{"SYS_LCHFLAGS", Const, 0, ""},
    +		{"SYS_LCHMOD", Const, 0, ""},
    +		{"SYS_LCHOWN", Const, 0, ""},
    +		{"SYS_LCHOWN32", Const, 0, ""},
    +		{"SYS_LEDGER", Const, 16, ""},
    +		{"SYS_LGETFH", Const, 0, ""},
    +		{"SYS_LGETXATTR", Const, 0, ""},
    +		{"SYS_LINK", Const, 0, ""},
    +		{"SYS_LINKAT", Const, 0, ""},
    +		{"SYS_LIO_LISTIO", Const, 0, ""},
    +		{"SYS_LISTEN", Const, 0, ""},
    +		{"SYS_LISTXATTR", Const, 0, ""},
    +		{"SYS_LLISTXATTR", Const, 0, ""},
    +		{"SYS_LOCK", Const, 0, ""},
    +		{"SYS_LOOKUP_DCOOKIE", Const, 0, ""},
    +		{"SYS_LPATHCONF", Const, 0, ""},
    +		{"SYS_LREMOVEXATTR", Const, 0, ""},
    +		{"SYS_LSEEK", Const, 0, ""},
    +		{"SYS_LSETXATTR", Const, 0, ""},
    +		{"SYS_LSTAT", Const, 0, ""},
    +		{"SYS_LSTAT64", Const, 0, ""},
    +		{"SYS_LSTAT64_EXTENDED", Const, 0, ""},
    +		{"SYS_LSTATV", Const, 0, ""},
    +		{"SYS_LSTAT_EXTENDED", Const, 0, ""},
    +		{"SYS_LUTIMES", Const, 0, ""},
    +		{"SYS_MAC_SYSCALL", Const, 0, ""},
    +		{"SYS_MADVISE", Const, 0, ""},
    +		{"SYS_MADVISE1", Const, 0, ""},
    +		{"SYS_MAXSYSCALL", Const, 0, ""},
    +		{"SYS_MBIND", Const, 0, ""},
    +		{"SYS_MIGRATE_PAGES", Const, 0, ""},
    +		{"SYS_MINCORE", Const, 0, ""},
    +		{"SYS_MINHERIT", Const, 0, ""},
    +		{"SYS_MKCOMPLEX", Const, 0, ""},
    +		{"SYS_MKDIR", Const, 0, ""},
    +		{"SYS_MKDIRAT", Const, 0, ""},
    +		{"SYS_MKDIR_EXTENDED", Const, 0, ""},
    +		{"SYS_MKFIFO", Const, 0, ""},
    +		{"SYS_MKFIFOAT", Const, 0, ""},
    +		{"SYS_MKFIFO_EXTENDED", Const, 0, ""},
    +		{"SYS_MKNOD", Const, 0, ""},
    +		{"SYS_MKNODAT", Const, 0, ""},
    +		{"SYS_MLOCK", Const, 0, ""},
    +		{"SYS_MLOCKALL", Const, 0, ""},
    +		{"SYS_MMAP", Const, 0, ""},
    +		{"SYS_MMAP2", Const, 0, ""},
    +		{"SYS_MODCTL", Const, 1, ""},
    +		{"SYS_MODFIND", Const, 0, ""},
    +		{"SYS_MODFNEXT", Const, 0, ""},
    +		{"SYS_MODIFY_LDT", Const, 0, ""},
    +		{"SYS_MODNEXT", Const, 0, ""},
    +		{"SYS_MODSTAT", Const, 0, ""},
    +		{"SYS_MODWATCH", Const, 0, ""},
    +		{"SYS_MOUNT", Const, 0, ""},
    +		{"SYS_MOVE_PAGES", Const, 0, ""},
    +		{"SYS_MPROTECT", Const, 0, ""},
    +		{"SYS_MPX", Const, 0, ""},
    +		{"SYS_MQUERY", Const, 1, ""},
    +		{"SYS_MQ_GETSETATTR", Const, 0, ""},
    +		{"SYS_MQ_NOTIFY", Const, 0, ""},
    +		{"SYS_MQ_OPEN", Const, 0, ""},
    +		{"SYS_MQ_TIMEDRECEIVE", Const, 0, ""},
    +		{"SYS_MQ_TIMEDSEND", Const, 0, ""},
    +		{"SYS_MQ_UNLINK", Const, 0, ""},
    +		{"SYS_MREMAP", Const, 0, ""},
    +		{"SYS_MSGCTL", Const, 0, ""},
    +		{"SYS_MSGGET", Const, 0, ""},
    +		{"SYS_MSGRCV", Const, 0, ""},
    +		{"SYS_MSGRCV_NOCANCEL", Const, 0, ""},
    +		{"SYS_MSGSND", Const, 0, ""},
    +		{"SYS_MSGSND_NOCANCEL", Const, 0, ""},
    +		{"SYS_MSGSYS", Const, 0, ""},
    +		{"SYS_MSYNC", Const, 0, ""},
    +		{"SYS_MSYNC_NOCANCEL", Const, 0, ""},
    +		{"SYS_MUNLOCK", Const, 0, ""},
    +		{"SYS_MUNLOCKALL", Const, 0, ""},
    +		{"SYS_MUNMAP", Const, 0, ""},
    +		{"SYS_NAME_TO_HANDLE_AT", Const, 0, ""},
    +		{"SYS_NANOSLEEP", Const, 0, ""},
    +		{"SYS_NEWFSTATAT", Const, 0, ""},
    +		{"SYS_NFSCLNT", Const, 0, ""},
    +		{"SYS_NFSSERVCTL", Const, 0, ""},
    +		{"SYS_NFSSVC", Const, 0, ""},
    +		{"SYS_NFSTAT", Const, 0, ""},
    +		{"SYS_NICE", Const, 0, ""},
    +		{"SYS_NLM_SYSCALL", Const, 14, ""},
    +		{"SYS_NLSTAT", Const, 0, ""},
    +		{"SYS_NMOUNT", Const, 0, ""},
    +		{"SYS_NSTAT", Const, 0, ""},
    +		{"SYS_NTP_ADJTIME", Const, 0, ""},
    +		{"SYS_NTP_GETTIME", Const, 0, ""},
    +		{"SYS_NUMA_GETAFFINITY", Const, 14, ""},
    +		{"SYS_NUMA_SETAFFINITY", Const, 14, ""},
    +		{"SYS_OABI_SYSCALL_BASE", Const, 0, ""},
    +		{"SYS_OBREAK", Const, 0, ""},
    +		{"SYS_OLDFSTAT", Const, 0, ""},
    +		{"SYS_OLDLSTAT", Const, 0, ""},
    +		{"SYS_OLDOLDUNAME", Const, 0, ""},
    +		{"SYS_OLDSTAT", Const, 0, ""},
    +		{"SYS_OLDUNAME", Const, 0, ""},
    +		{"SYS_OPEN", Const, 0, ""},
    +		{"SYS_OPENAT", Const, 0, ""},
    +		{"SYS_OPENBSD_POLL", Const, 0, ""},
    +		{"SYS_OPEN_BY_HANDLE_AT", Const, 0, ""},
    +		{"SYS_OPEN_DPROTECTED_NP", Const, 16, ""},
    +		{"SYS_OPEN_EXTENDED", Const, 0, ""},
    +		{"SYS_OPEN_NOCANCEL", Const, 0, ""},
    +		{"SYS_OVADVISE", Const, 0, ""},
    +		{"SYS_PACCEPT", Const, 1, ""},
    +		{"SYS_PATHCONF", Const, 0, ""},
    +		{"SYS_PAUSE", Const, 0, ""},
    +		{"SYS_PCICONFIG_IOBASE", Const, 0, ""},
    +		{"SYS_PCICONFIG_READ", Const, 0, ""},
    +		{"SYS_PCICONFIG_WRITE", Const, 0, ""},
    +		{"SYS_PDFORK", Const, 0, ""},
    +		{"SYS_PDGETPID", Const, 0, ""},
    +		{"SYS_PDKILL", Const, 0, ""},
    +		{"SYS_PERF_EVENT_OPEN", Const, 0, ""},
    +		{"SYS_PERSONALITY", Const, 0, ""},
    +		{"SYS_PID_HIBERNATE", Const, 0, ""},
    +		{"SYS_PID_RESUME", Const, 0, ""},
    +		{"SYS_PID_SHUTDOWN_SOCKETS", Const, 0, ""},
    +		{"SYS_PID_SUSPEND", Const, 0, ""},
    +		{"SYS_PIPE", Const, 0, ""},
    +		{"SYS_PIPE2", Const, 0, ""},
    +		{"SYS_PIVOT_ROOT", Const, 0, ""},
    +		{"SYS_PMC_CONTROL", Const, 1, ""},
    +		{"SYS_PMC_GET_INFO", Const, 1, ""},
    +		{"SYS_POLL", Const, 0, ""},
    +		{"SYS_POLLTS", Const, 1, ""},
    +		{"SYS_POLL_NOCANCEL", Const, 0, ""},
    +		{"SYS_POSIX_FADVISE", Const, 0, ""},
    +		{"SYS_POSIX_FALLOCATE", Const, 0, ""},
    +		{"SYS_POSIX_OPENPT", Const, 0, ""},
    +		{"SYS_POSIX_SPAWN", Const, 0, ""},
    +		{"SYS_PPOLL", Const, 0, ""},
    +		{"SYS_PRCTL", Const, 0, ""},
    +		{"SYS_PREAD", Const, 0, ""},
    +		{"SYS_PREAD64", Const, 0, ""},
    +		{"SYS_PREADV", Const, 0, ""},
    +		{"SYS_PREAD_NOCANCEL", Const, 0, ""},
    +		{"SYS_PRLIMIT64", Const, 0, ""},
    +		{"SYS_PROCCTL", Const, 3, ""},
    +		{"SYS_PROCESS_POLICY", Const, 0, ""},
    +		{"SYS_PROCESS_VM_READV", Const, 0, ""},
    +		{"SYS_PROCESS_VM_WRITEV", Const, 0, ""},
    +		{"SYS_PROC_INFO", Const, 0, ""},
    +		{"SYS_PROF", Const, 0, ""},
    +		{"SYS_PROFIL", Const, 0, ""},
    +		{"SYS_PSELECT", Const, 0, ""},
    +		{"SYS_PSELECT6", Const, 0, ""},
    +		{"SYS_PSET_ASSIGN", Const, 1, ""},
    +		{"SYS_PSET_CREATE", Const, 1, ""},
    +		{"SYS_PSET_DESTROY", Const, 1, ""},
    +		{"SYS_PSYNCH_CVBROAD", Const, 0, ""},
    +		{"SYS_PSYNCH_CVCLRPREPOST", Const, 0, ""},
    +		{"SYS_PSYNCH_CVSIGNAL", Const, 0, ""},
    +		{"SYS_PSYNCH_CVWAIT", Const, 0, ""},
    +		{"SYS_PSYNCH_MUTEXDROP", Const, 0, ""},
    +		{"SYS_PSYNCH_MUTEXWAIT", Const, 0, ""},
    +		{"SYS_PSYNCH_RW_DOWNGRADE", Const, 0, ""},
    +		{"SYS_PSYNCH_RW_LONGRDLOCK", Const, 0, ""},
    +		{"SYS_PSYNCH_RW_RDLOCK", Const, 0, ""},
    +		{"SYS_PSYNCH_RW_UNLOCK", Const, 0, ""},
    +		{"SYS_PSYNCH_RW_UNLOCK2", Const, 0, ""},
    +		{"SYS_PSYNCH_RW_UPGRADE", Const, 0, ""},
    +		{"SYS_PSYNCH_RW_WRLOCK", Const, 0, ""},
    +		{"SYS_PSYNCH_RW_YIELDWRLOCK", Const, 0, ""},
    +		{"SYS_PTRACE", Const, 0, ""},
    +		{"SYS_PUTPMSG", Const, 0, ""},
    +		{"SYS_PWRITE", Const, 0, ""},
    +		{"SYS_PWRITE64", Const, 0, ""},
    +		{"SYS_PWRITEV", Const, 0, ""},
    +		{"SYS_PWRITE_NOCANCEL", Const, 0, ""},
    +		{"SYS_QUERY_MODULE", Const, 0, ""},
    +		{"SYS_QUOTACTL", Const, 0, ""},
    +		{"SYS_RASCTL", Const, 1, ""},
    +		{"SYS_RCTL_ADD_RULE", Const, 0, ""},
    +		{"SYS_RCTL_GET_LIMITS", Const, 0, ""},
    +		{"SYS_RCTL_GET_RACCT", Const, 0, ""},
    +		{"SYS_RCTL_GET_RULES", Const, 0, ""},
    +		{"SYS_RCTL_REMOVE_RULE", Const, 0, ""},
    +		{"SYS_READ", Const, 0, ""},
    +		{"SYS_READAHEAD", Const, 0, ""},
    +		{"SYS_READDIR", Const, 0, ""},
    +		{"SYS_READLINK", Const, 0, ""},
    +		{"SYS_READLINKAT", Const, 0, ""},
    +		{"SYS_READV", Const, 0, ""},
    +		{"SYS_READV_NOCANCEL", Const, 0, ""},
    +		{"SYS_READ_NOCANCEL", Const, 0, ""},
    +		{"SYS_REBOOT", Const, 0, ""},
    +		{"SYS_RECV", Const, 0, ""},
    +		{"SYS_RECVFROM", Const, 0, ""},
    +		{"SYS_RECVFROM_NOCANCEL", Const, 0, ""},
    +		{"SYS_RECVMMSG", Const, 0, ""},
    +		{"SYS_RECVMSG", Const, 0, ""},
    +		{"SYS_RECVMSG_NOCANCEL", Const, 0, ""},
    +		{"SYS_REMAP_FILE_PAGES", Const, 0, ""},
    +		{"SYS_REMOVEXATTR", Const, 0, ""},
    +		{"SYS_RENAME", Const, 0, ""},
    +		{"SYS_RENAMEAT", Const, 0, ""},
    +		{"SYS_REQUEST_KEY", Const, 0, ""},
    +		{"SYS_RESTART_SYSCALL", Const, 0, ""},
    +		{"SYS_REVOKE", Const, 0, ""},
    +		{"SYS_RFORK", Const, 0, ""},
    +		{"SYS_RMDIR", Const, 0, ""},
    +		{"SYS_RTPRIO", Const, 0, ""},
    +		{"SYS_RTPRIO_THREAD", Const, 0, ""},
    +		{"SYS_RT_SIGACTION", Const, 0, ""},
    +		{"SYS_RT_SIGPENDING", Const, 0, ""},
    +		{"SYS_RT_SIGPROCMASK", Const, 0, ""},
    +		{"SYS_RT_SIGQUEUEINFO", Const, 0, ""},
    +		{"SYS_RT_SIGRETURN", Const, 0, ""},
    +		{"SYS_RT_SIGSUSPEND", Const, 0, ""},
    +		{"SYS_RT_SIGTIMEDWAIT", Const, 0, ""},
    +		{"SYS_RT_TGSIGQUEUEINFO", Const, 0, ""},
    +		{"SYS_SBRK", Const, 0, ""},
    +		{"SYS_SCHED_GETAFFINITY", Const, 0, ""},
    +		{"SYS_SCHED_GETPARAM", Const, 0, ""},
    +		{"SYS_SCHED_GETSCHEDULER", Const, 0, ""},
    +		{"SYS_SCHED_GET_PRIORITY_MAX", Const, 0, ""},
    +		{"SYS_SCHED_GET_PRIORITY_MIN", Const, 0, ""},
    +		{"SYS_SCHED_RR_GET_INTERVAL", Const, 0, ""},
    +		{"SYS_SCHED_SETAFFINITY", Const, 0, ""},
    +		{"SYS_SCHED_SETPARAM", Const, 0, ""},
    +		{"SYS_SCHED_SETSCHEDULER", Const, 0, ""},
    +		{"SYS_SCHED_YIELD", Const, 0, ""},
    +		{"SYS_SCTP_GENERIC_RECVMSG", Const, 0, ""},
    +		{"SYS_SCTP_GENERIC_SENDMSG", Const, 0, ""},
    +		{"SYS_SCTP_GENERIC_SENDMSG_IOV", Const, 0, ""},
    +		{"SYS_SCTP_PEELOFF", Const, 0, ""},
    +		{"SYS_SEARCHFS", Const, 0, ""},
    +		{"SYS_SECURITY", Const, 0, ""},
    +		{"SYS_SELECT", Const, 0, ""},
    +		{"SYS_SELECT_NOCANCEL", Const, 0, ""},
    +		{"SYS_SEMCONFIG", Const, 1, ""},
    +		{"SYS_SEMCTL", Const, 0, ""},
    +		{"SYS_SEMGET", Const, 0, ""},
    +		{"SYS_SEMOP", Const, 0, ""},
    +		{"SYS_SEMSYS", Const, 0, ""},
    +		{"SYS_SEMTIMEDOP", Const, 0, ""},
    +		{"SYS_SEM_CLOSE", Const, 0, ""},
    +		{"SYS_SEM_DESTROY", Const, 0, ""},
    +		{"SYS_SEM_GETVALUE", Const, 0, ""},
    +		{"SYS_SEM_INIT", Const, 0, ""},
    +		{"SYS_SEM_OPEN", Const, 0, ""},
    +		{"SYS_SEM_POST", Const, 0, ""},
    +		{"SYS_SEM_TRYWAIT", Const, 0, ""},
    +		{"SYS_SEM_UNLINK", Const, 0, ""},
    +		{"SYS_SEM_WAIT", Const, 0, ""},
    +		{"SYS_SEM_WAIT_NOCANCEL", Const, 0, ""},
    +		{"SYS_SEND", Const, 0, ""},
    +		{"SYS_SENDFILE", Const, 0, ""},
    +		{"SYS_SENDFILE64", Const, 0, ""},
    +		{"SYS_SENDMMSG", Const, 0, ""},
    +		{"SYS_SENDMSG", Const, 0, ""},
    +		{"SYS_SENDMSG_NOCANCEL", Const, 0, ""},
    +		{"SYS_SENDTO", Const, 0, ""},
    +		{"SYS_SENDTO_NOCANCEL", Const, 0, ""},
    +		{"SYS_SETATTRLIST", Const, 0, ""},
    +		{"SYS_SETAUDIT", Const, 0, ""},
    +		{"SYS_SETAUDIT_ADDR", Const, 0, ""},
    +		{"SYS_SETAUID", Const, 0, ""},
    +		{"SYS_SETCONTEXT", Const, 0, ""},
    +		{"SYS_SETDOMAINNAME", Const, 0, ""},
    +		{"SYS_SETEGID", Const, 0, ""},
    +		{"SYS_SETEUID", Const, 0, ""},
    +		{"SYS_SETFIB", Const, 0, ""},
    +		{"SYS_SETFSGID", Const, 0, ""},
    +		{"SYS_SETFSGID32", Const, 0, ""},
    +		{"SYS_SETFSUID", Const, 0, ""},
    +		{"SYS_SETFSUID32", Const, 0, ""},
    +		{"SYS_SETGID", Const, 0, ""},
    +		{"SYS_SETGID32", Const, 0, ""},
    +		{"SYS_SETGROUPS", Const, 0, ""},
    +		{"SYS_SETGROUPS32", Const, 0, ""},
    +		{"SYS_SETHOSTNAME", Const, 0, ""},
    +		{"SYS_SETITIMER", Const, 0, ""},
    +		{"SYS_SETLCID", Const, 0, ""},
    +		{"SYS_SETLOGIN", Const, 0, ""},
    +		{"SYS_SETLOGINCLASS", Const, 0, ""},
    +		{"SYS_SETNS", Const, 0, ""},
    +		{"SYS_SETPGID", Const, 0, ""},
    +		{"SYS_SETPRIORITY", Const, 0, ""},
    +		{"SYS_SETPRIVEXEC", Const, 0, ""},
    +		{"SYS_SETREGID", Const, 0, ""},
    +		{"SYS_SETREGID32", Const, 0, ""},
    +		{"SYS_SETRESGID", Const, 0, ""},
    +		{"SYS_SETRESGID32", Const, 0, ""},
    +		{"SYS_SETRESUID", Const, 0, ""},
    +		{"SYS_SETRESUID32", Const, 0, ""},
    +		{"SYS_SETREUID", Const, 0, ""},
    +		{"SYS_SETREUID32", Const, 0, ""},
    +		{"SYS_SETRLIMIT", Const, 0, ""},
    +		{"SYS_SETRTABLE", Const, 1, ""},
    +		{"SYS_SETSGROUPS", Const, 0, ""},
    +		{"SYS_SETSID", Const, 0, ""},
    +		{"SYS_SETSOCKOPT", Const, 0, ""},
    +		{"SYS_SETTID", Const, 0, ""},
    +		{"SYS_SETTID_WITH_PID", Const, 0, ""},
    +		{"SYS_SETTIMEOFDAY", Const, 0, ""},
    +		{"SYS_SETUID", Const, 0, ""},
    +		{"SYS_SETUID32", Const, 0, ""},
    +		{"SYS_SETWGROUPS", Const, 0, ""},
    +		{"SYS_SETXATTR", Const, 0, ""},
    +		{"SYS_SET_MEMPOLICY", Const, 0, ""},
    +		{"SYS_SET_ROBUST_LIST", Const, 0, ""},
    +		{"SYS_SET_THREAD_AREA", Const, 0, ""},
    +		{"SYS_SET_TID_ADDRESS", Const, 0, ""},
    +		{"SYS_SGETMASK", Const, 0, ""},
    +		{"SYS_SHARED_REGION_CHECK_NP", Const, 0, ""},
    +		{"SYS_SHARED_REGION_MAP_AND_SLIDE_NP", Const, 0, ""},
    +		{"SYS_SHMAT", Const, 0, ""},
    +		{"SYS_SHMCTL", Const, 0, ""},
    +		{"SYS_SHMDT", Const, 0, ""},
    +		{"SYS_SHMGET", Const, 0, ""},
    +		{"SYS_SHMSYS", Const, 0, ""},
    +		{"SYS_SHM_OPEN", Const, 0, ""},
    +		{"SYS_SHM_UNLINK", Const, 0, ""},
    +		{"SYS_SHUTDOWN", Const, 0, ""},
    +		{"SYS_SIGACTION", Const, 0, ""},
    +		{"SYS_SIGALTSTACK", Const, 0, ""},
    +		{"SYS_SIGNAL", Const, 0, ""},
    +		{"SYS_SIGNALFD", Const, 0, ""},
    +		{"SYS_SIGNALFD4", Const, 0, ""},
    +		{"SYS_SIGPENDING", Const, 0, ""},
    +		{"SYS_SIGPROCMASK", Const, 0, ""},
    +		{"SYS_SIGQUEUE", Const, 0, ""},
    +		{"SYS_SIGQUEUEINFO", Const, 1, ""},
    +		{"SYS_SIGRETURN", Const, 0, ""},
    +		{"SYS_SIGSUSPEND", Const, 0, ""},
    +		{"SYS_SIGSUSPEND_NOCANCEL", Const, 0, ""},
    +		{"SYS_SIGTIMEDWAIT", Const, 0, ""},
    +		{"SYS_SIGWAIT", Const, 0, ""},
    +		{"SYS_SIGWAITINFO", Const, 0, ""},
    +		{"SYS_SOCKET", Const, 0, ""},
    +		{"SYS_SOCKETCALL", Const, 0, ""},
    +		{"SYS_SOCKETPAIR", Const, 0, ""},
    +		{"SYS_SPLICE", Const, 0, ""},
    +		{"SYS_SSETMASK", Const, 0, ""},
    +		{"SYS_SSTK", Const, 0, ""},
    +		{"SYS_STACK_SNAPSHOT", Const, 0, ""},
    +		{"SYS_STAT", Const, 0, ""},
    +		{"SYS_STAT64", Const, 0, ""},
    +		{"SYS_STAT64_EXTENDED", Const, 0, ""},
    +		{"SYS_STATFS", Const, 0, ""},
    +		{"SYS_STATFS64", Const, 0, ""},
    +		{"SYS_STATV", Const, 0, ""},
    +		{"SYS_STATVFS1", Const, 1, ""},
    +		{"SYS_STAT_EXTENDED", Const, 0, ""},
    +		{"SYS_STIME", Const, 0, ""},
    +		{"SYS_STTY", Const, 0, ""},
    +		{"SYS_SWAPCONTEXT", Const, 0, ""},
    +		{"SYS_SWAPCTL", Const, 1, ""},
    +		{"SYS_SWAPOFF", Const, 0, ""},
    +		{"SYS_SWAPON", Const, 0, ""},
    +		{"SYS_SYMLINK", Const, 0, ""},
    +		{"SYS_SYMLINKAT", Const, 0, ""},
    +		{"SYS_SYNC", Const, 0, ""},
    +		{"SYS_SYNCFS", Const, 0, ""},
    +		{"SYS_SYNC_FILE_RANGE", Const, 0, ""},
    +		{"SYS_SYSARCH", Const, 0, ""},
    +		{"SYS_SYSCALL", Const, 0, ""},
    +		{"SYS_SYSCALL_BASE", Const, 0, ""},
    +		{"SYS_SYSFS", Const, 0, ""},
    +		{"SYS_SYSINFO", Const, 0, ""},
    +		{"SYS_SYSLOG", Const, 0, ""},
    +		{"SYS_TEE", Const, 0, ""},
    +		{"SYS_TGKILL", Const, 0, ""},
    +		{"SYS_THREAD_SELFID", Const, 0, ""},
    +		{"SYS_THR_CREATE", Const, 0, ""},
    +		{"SYS_THR_EXIT", Const, 0, ""},
    +		{"SYS_THR_KILL", Const, 0, ""},
    +		{"SYS_THR_KILL2", Const, 0, ""},
    +		{"SYS_THR_NEW", Const, 0, ""},
    +		{"SYS_THR_SELF", Const, 0, ""},
    +		{"SYS_THR_SET_NAME", Const, 0, ""},
    +		{"SYS_THR_SUSPEND", Const, 0, ""},
    +		{"SYS_THR_WAKE", Const, 0, ""},
    +		{"SYS_TIME", Const, 0, ""},
    +		{"SYS_TIMERFD_CREATE", Const, 0, ""},
    +		{"SYS_TIMERFD_GETTIME", Const, 0, ""},
    +		{"SYS_TIMERFD_SETTIME", Const, 0, ""},
    +		{"SYS_TIMER_CREATE", Const, 0, ""},
    +		{"SYS_TIMER_DELETE", Const, 0, ""},
    +		{"SYS_TIMER_GETOVERRUN", Const, 0, ""},
    +		{"SYS_TIMER_GETTIME", Const, 0, ""},
    +		{"SYS_TIMER_SETTIME", Const, 0, ""},
    +		{"SYS_TIMES", Const, 0, ""},
    +		{"SYS_TKILL", Const, 0, ""},
    +		{"SYS_TRUNCATE", Const, 0, ""},
    +		{"SYS_TRUNCATE64", Const, 0, ""},
    +		{"SYS_TUXCALL", Const, 0, ""},
    +		{"SYS_UGETRLIMIT", Const, 0, ""},
    +		{"SYS_ULIMIT", Const, 0, ""},
    +		{"SYS_UMASK", Const, 0, ""},
    +		{"SYS_UMASK_EXTENDED", Const, 0, ""},
    +		{"SYS_UMOUNT", Const, 0, ""},
    +		{"SYS_UMOUNT2", Const, 0, ""},
    +		{"SYS_UNAME", Const, 0, ""},
    +		{"SYS_UNDELETE", Const, 0, ""},
    +		{"SYS_UNLINK", Const, 0, ""},
    +		{"SYS_UNLINKAT", Const, 0, ""},
    +		{"SYS_UNMOUNT", Const, 0, ""},
    +		{"SYS_UNSHARE", Const, 0, ""},
    +		{"SYS_USELIB", Const, 0, ""},
    +		{"SYS_USTAT", Const, 0, ""},
    +		{"SYS_UTIME", Const, 0, ""},
    +		{"SYS_UTIMENSAT", Const, 0, ""},
    +		{"SYS_UTIMES", Const, 0, ""},
    +		{"SYS_UTRACE", Const, 0, ""},
    +		{"SYS_UUIDGEN", Const, 0, ""},
    +		{"SYS_VADVISE", Const, 1, ""},
    +		{"SYS_VFORK", Const, 0, ""},
    +		{"SYS_VHANGUP", Const, 0, ""},
    +		{"SYS_VM86", Const, 0, ""},
    +		{"SYS_VM86OLD", Const, 0, ""},
    +		{"SYS_VMSPLICE", Const, 0, ""},
    +		{"SYS_VM_PRESSURE_MONITOR", Const, 0, ""},
    +		{"SYS_VSERVER", Const, 0, ""},
    +		{"SYS_WAIT4", Const, 0, ""},
    +		{"SYS_WAIT4_NOCANCEL", Const, 0, ""},
    +		{"SYS_WAIT6", Const, 1, ""},
    +		{"SYS_WAITEVENT", Const, 0, ""},
    +		{"SYS_WAITID", Const, 0, ""},
    +		{"SYS_WAITID_NOCANCEL", Const, 0, ""},
    +		{"SYS_WAITPID", Const, 0, ""},
    +		{"SYS_WATCHEVENT", Const, 0, ""},
    +		{"SYS_WORKQ_KERNRETURN", Const, 0, ""},
    +		{"SYS_WORKQ_OPEN", Const, 0, ""},
    +		{"SYS_WRITE", Const, 0, ""},
    +		{"SYS_WRITEV", Const, 0, ""},
    +		{"SYS_WRITEV_NOCANCEL", Const, 0, ""},
    +		{"SYS_WRITE_NOCANCEL", Const, 0, ""},
    +		{"SYS_YIELD", Const, 0, ""},
    +		{"SYS__LLSEEK", Const, 0, ""},
    +		{"SYS__LWP_CONTINUE", Const, 1, ""},
    +		{"SYS__LWP_CREATE", Const, 1, ""},
    +		{"SYS__LWP_CTL", Const, 1, ""},
    +		{"SYS__LWP_DETACH", Const, 1, ""},
    +		{"SYS__LWP_EXIT", Const, 1, ""},
    +		{"SYS__LWP_GETNAME", Const, 1, ""},
    +		{"SYS__LWP_GETPRIVATE", Const, 1, ""},
    +		{"SYS__LWP_KILL", Const, 1, ""},
    +		{"SYS__LWP_PARK", Const, 1, ""},
    +		{"SYS__LWP_SELF", Const, 1, ""},
    +		{"SYS__LWP_SETNAME", Const, 1, ""},
    +		{"SYS__LWP_SETPRIVATE", Const, 1, ""},
    +		{"SYS__LWP_SUSPEND", Const, 1, ""},
    +		{"SYS__LWP_UNPARK", Const, 1, ""},
    +		{"SYS__LWP_UNPARK_ALL", Const, 1, ""},
    +		{"SYS__LWP_WAIT", Const, 1, ""},
    +		{"SYS__LWP_WAKEUP", Const, 1, ""},
    +		{"SYS__NEWSELECT", Const, 0, ""},
    +		{"SYS__PSET_BIND", Const, 1, ""},
    +		{"SYS__SCHED_GETAFFINITY", Const, 1, ""},
    +		{"SYS__SCHED_GETPARAM", Const, 1, ""},
    +		{"SYS__SCHED_SETAFFINITY", Const, 1, ""},
    +		{"SYS__SCHED_SETPARAM", Const, 1, ""},
    +		{"SYS__SYSCTL", Const, 0, ""},
    +		{"SYS__UMTX_LOCK", Const, 0, ""},
    +		{"SYS__UMTX_OP", Const, 0, ""},
    +		{"SYS__UMTX_UNLOCK", Const, 0, ""},
    +		{"SYS___ACL_ACLCHECK_FD", Const, 0, ""},
    +		{"SYS___ACL_ACLCHECK_FILE", Const, 0, ""},
    +		{"SYS___ACL_ACLCHECK_LINK", Const, 0, ""},
    +		{"SYS___ACL_DELETE_FD", Const, 0, ""},
    +		{"SYS___ACL_DELETE_FILE", Const, 0, ""},
    +		{"SYS___ACL_DELETE_LINK", Const, 0, ""},
    +		{"SYS___ACL_GET_FD", Const, 0, ""},
    +		{"SYS___ACL_GET_FILE", Const, 0, ""},
    +		{"SYS___ACL_GET_LINK", Const, 0, ""},
    +		{"SYS___ACL_SET_FD", Const, 0, ""},
    +		{"SYS___ACL_SET_FILE", Const, 0, ""},
    +		{"SYS___ACL_SET_LINK", Const, 0, ""},
    +		{"SYS___CAP_RIGHTS_GET", Const, 14, ""},
    +		{"SYS___CLONE", Const, 1, ""},
    +		{"SYS___DISABLE_THREADSIGNAL", Const, 0, ""},
    +		{"SYS___GETCWD", Const, 0, ""},
    +		{"SYS___GETLOGIN", Const, 1, ""},
    +		{"SYS___GET_TCB", Const, 1, ""},
    +		{"SYS___MAC_EXECVE", Const, 0, ""},
    +		{"SYS___MAC_GETFSSTAT", Const, 0, ""},
    +		{"SYS___MAC_GET_FD", Const, 0, ""},
    +		{"SYS___MAC_GET_FILE", Const, 0, ""},
    +		{"SYS___MAC_GET_LCID", Const, 0, ""},
    +		{"SYS___MAC_GET_LCTX", Const, 0, ""},
    +		{"SYS___MAC_GET_LINK", Const, 0, ""},
    +		{"SYS___MAC_GET_MOUNT", Const, 0, ""},
    +		{"SYS___MAC_GET_PID", Const, 0, ""},
    +		{"SYS___MAC_GET_PROC", Const, 0, ""},
    +		{"SYS___MAC_MOUNT", Const, 0, ""},
    +		{"SYS___MAC_SET_FD", Const, 0, ""},
    +		{"SYS___MAC_SET_FILE", Const, 0, ""},
    +		{"SYS___MAC_SET_LCTX", Const, 0, ""},
    +		{"SYS___MAC_SET_LINK", Const, 0, ""},
    +		{"SYS___MAC_SET_PROC", Const, 0, ""},
    +		{"SYS___MAC_SYSCALL", Const, 0, ""},
    +		{"SYS___OLD_SEMWAIT_SIGNAL", Const, 0, ""},
    +		{"SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL", Const, 0, ""},
    +		{"SYS___POSIX_CHOWN", Const, 1, ""},
    +		{"SYS___POSIX_FCHOWN", Const, 1, ""},
    +		{"SYS___POSIX_LCHOWN", Const, 1, ""},
    +		{"SYS___POSIX_RENAME", Const, 1, ""},
    +		{"SYS___PTHREAD_CANCELED", Const, 0, ""},
    +		{"SYS___PTHREAD_CHDIR", Const, 0, ""},
    +		{"SYS___PTHREAD_FCHDIR", Const, 0, ""},
    +		{"SYS___PTHREAD_KILL", Const, 0, ""},
    +		{"SYS___PTHREAD_MARKCANCEL", Const, 0, ""},
    +		{"SYS___PTHREAD_SIGMASK", Const, 0, ""},
    +		{"SYS___QUOTACTL", Const, 1, ""},
    +		{"SYS___SEMCTL", Const, 1, ""},
    +		{"SYS___SEMWAIT_SIGNAL", Const, 0, ""},
    +		{"SYS___SEMWAIT_SIGNAL_NOCANCEL", Const, 0, ""},
    +		{"SYS___SETLOGIN", Const, 1, ""},
    +		{"SYS___SETUGID", Const, 0, ""},
    +		{"SYS___SET_TCB", Const, 1, ""},
    +		{"SYS___SIGACTION_SIGTRAMP", Const, 1, ""},
    +		{"SYS___SIGTIMEDWAIT", Const, 1, ""},
    +		{"SYS___SIGWAIT", Const, 0, ""},
    +		{"SYS___SIGWAIT_NOCANCEL", Const, 0, ""},
    +		{"SYS___SYSCTL", Const, 0, ""},
    +		{"SYS___TFORK", Const, 1, ""},
    +		{"SYS___THREXIT", Const, 1, ""},
    +		{"SYS___THRSIGDIVERT", Const, 1, ""},
    +		{"SYS___THRSLEEP", Const, 1, ""},
    +		{"SYS___THRWAKEUP", Const, 1, ""},
    +		{"S_ARCH1", Const, 1, ""},
    +		{"S_ARCH2", Const, 1, ""},
    +		{"S_BLKSIZE", Const, 0, ""},
    +		{"S_IEXEC", Const, 0, ""},
    +		{"S_IFBLK", Const, 0, ""},
    +		{"S_IFCHR", Const, 0, ""},
    +		{"S_IFDIR", Const, 0, ""},
    +		{"S_IFIFO", Const, 0, ""},
    +		{"S_IFLNK", Const, 0, ""},
    +		{"S_IFMT", Const, 0, ""},
    +		{"S_IFREG", Const, 0, ""},
    +		{"S_IFSOCK", Const, 0, ""},
    +		{"S_IFWHT", Const, 0, ""},
    +		{"S_IREAD", Const, 0, ""},
    +		{"S_IRGRP", Const, 0, ""},
    +		{"S_IROTH", Const, 0, ""},
    +		{"S_IRUSR", Const, 0, ""},
    +		{"S_IRWXG", Const, 0, ""},
    +		{"S_IRWXO", Const, 0, ""},
    +		{"S_IRWXU", Const, 0, ""},
    +		{"S_ISGID", Const, 0, ""},
    +		{"S_ISTXT", Const, 0, ""},
    +		{"S_ISUID", Const, 0, ""},
    +		{"S_ISVTX", Const, 0, ""},
    +		{"S_IWGRP", Const, 0, ""},
    +		{"S_IWOTH", Const, 0, ""},
    +		{"S_IWRITE", Const, 0, ""},
    +		{"S_IWUSR", Const, 0, ""},
    +		{"S_IXGRP", Const, 0, ""},
    +		{"S_IXOTH", Const, 0, ""},
    +		{"S_IXUSR", Const, 0, ""},
    +		{"S_LOGIN_SET", Const, 1, ""},
    +		{"SecurityAttributes", Type, 0, ""},
    +		{"SecurityAttributes.InheritHandle", Field, 0, ""},
    +		{"SecurityAttributes.Length", Field, 0, ""},
    +		{"SecurityAttributes.SecurityDescriptor", Field, 0, ""},
    +		{"Seek", Func, 0, "func(fd int, offset int64, whence int) (off int64, err error)"},
    +		{"Select", Func, 0, "func(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error)"},
    +		{"Sendfile", Func, 0, "func(outfd int, infd int, offset *int64, count int) (written int, err error)"},
    +		{"Sendmsg", Func, 0, "func(fd int, p []byte, oob []byte, to Sockaddr, flags int) (err error)"},
    +		{"SendmsgN", Func, 3, "func(fd int, p []byte, oob []byte, to Sockaddr, flags int) (n int, err error)"},
    +		{"Sendto", Func, 0, "func(fd int, p []byte, flags int, to Sockaddr) (err error)"},
    +		{"Servent", Type, 0, ""},
    +		{"Servent.Aliases", Field, 0, ""},
    +		{"Servent.Name", Field, 0, ""},
    +		{"Servent.Port", Field, 0, ""},
    +		{"Servent.Proto", Field, 0, ""},
    +		{"SetBpf", Func, 0, ""},
    +		{"SetBpfBuflen", Func, 0, ""},
    +		{"SetBpfDatalink", Func, 0, ""},
    +		{"SetBpfHeadercmpl", Func, 0, ""},
    +		{"SetBpfImmediate", Func, 0, ""},
    +		{"SetBpfInterface", Func, 0, ""},
    +		{"SetBpfPromisc", Func, 0, ""},
    +		{"SetBpfTimeout", Func, 0, ""},
    +		{"SetCurrentDirectory", Func, 0, ""},
    +		{"SetEndOfFile", Func, 0, ""},
    +		{"SetEnvironmentVariable", Func, 0, ""},
    +		{"SetFileAttributes", Func, 0, ""},
    +		{"SetFileCompletionNotificationModes", Func, 2, ""},
    +		{"SetFilePointer", Func, 0, ""},
    +		{"SetFileTime", Func, 0, ""},
    +		{"SetHandleInformation", Func, 0, ""},
    +		{"SetKevent", Func, 0, ""},
    +		{"SetLsfPromisc", Func, 0, "func(name string, m bool) error"},
    +		{"SetNonblock", Func, 0, "func(fd int, nonblocking bool) (err error)"},
    +		{"Setdomainname", Func, 0, "func(p []byte) (err error)"},
    +		{"Setegid", Func, 0, "func(egid int) (err error)"},
    +		{"Setenv", Func, 0, "func(key string, value string) error"},
    +		{"Seteuid", Func, 0, "func(euid int) (err error)"},
    +		{"Setfsgid", Func, 0, "func(gid int) (err error)"},
    +		{"Setfsuid", Func, 0, "func(uid int) (err error)"},
    +		{"Setgid", Func, 0, "func(gid int) (err error)"},
    +		{"Setgroups", Func, 0, "func(gids []int) (err error)"},
    +		{"Sethostname", Func, 0, "func(p []byte) (err error)"},
    +		{"Setlogin", Func, 0, ""},
    +		{"Setpgid", Func, 0, "func(pid int, pgid int) (err error)"},
    +		{"Setpriority", Func, 0, "func(which int, who int, prio int) (err error)"},
    +		{"Setprivexec", Func, 0, ""},
    +		{"Setregid", Func, 0, "func(rgid int, egid int) (err error)"},
    +		{"Setresgid", Func, 0, "func(rgid int, egid int, sgid int) (err error)"},
    +		{"Setresuid", Func, 0, "func(ruid int, euid int, suid int) (err error)"},
    +		{"Setreuid", Func, 0, "func(ruid int, euid int) (err error)"},
    +		{"Setrlimit", Func, 0, "func(resource int, rlim *Rlimit) error"},
    +		{"Setsid", Func, 0, "func() (pid int, err error)"},
    +		{"Setsockopt", Func, 0, ""},
    +		{"SetsockoptByte", Func, 0, "func(fd int, level int, opt int, value byte) (err error)"},
    +		{"SetsockoptICMPv6Filter", Func, 2, "func(fd int, level int, opt int, filter *ICMPv6Filter) error"},
    +		{"SetsockoptIPMreq", Func, 0, "func(fd int, level int, opt int, mreq *IPMreq) (err error)"},
    +		{"SetsockoptIPMreqn", Func, 0, "func(fd int, level int, opt int, mreq *IPMreqn) (err error)"},
    +		{"SetsockoptIPv6Mreq", Func, 0, "func(fd int, level int, opt int, mreq *IPv6Mreq) (err error)"},
    +		{"SetsockoptInet4Addr", Func, 0, "func(fd int, level int, opt int, value [4]byte) (err error)"},
    +		{"SetsockoptInt", Func, 0, "func(fd int, level int, opt int, value int) (err error)"},
    +		{"SetsockoptLinger", Func, 0, "func(fd int, level int, opt int, l *Linger) (err error)"},
    +		{"SetsockoptString", Func, 0, "func(fd int, level int, opt int, s string) (err error)"},
    +		{"SetsockoptTimeval", Func, 0, "func(fd int, level int, opt int, tv *Timeval) (err error)"},
    +		{"Settimeofday", Func, 0, "func(tv *Timeval) (err error)"},
    +		{"Setuid", Func, 0, "func(uid int) (err error)"},
    +		{"Setxattr", Func, 1, "func(path string, attr string, data []byte, flags int) (err error)"},
    +		{"Shutdown", Func, 0, "func(fd int, how int) (err error)"},
    +		{"SidTypeAlias", Const, 0, ""},
    +		{"SidTypeComputer", Const, 0, ""},
    +		{"SidTypeDeletedAccount", Const, 0, ""},
    +		{"SidTypeDomain", Const, 0, ""},
    +		{"SidTypeGroup", Const, 0, ""},
    +		{"SidTypeInvalid", Const, 0, ""},
    +		{"SidTypeLabel", Const, 0, ""},
    +		{"SidTypeUnknown", Const, 0, ""},
    +		{"SidTypeUser", Const, 0, ""},
    +		{"SidTypeWellKnownGroup", Const, 0, ""},
    +		{"Signal", Type, 0, ""},
    +		{"SizeofBpfHdr", Const, 0, ""},
    +		{"SizeofBpfInsn", Const, 0, ""},
    +		{"SizeofBpfProgram", Const, 0, ""},
    +		{"SizeofBpfStat", Const, 0, ""},
    +		{"SizeofBpfVersion", Const, 0, ""},
    +		{"SizeofBpfZbuf", Const, 0, ""},
    +		{"SizeofBpfZbufHeader", Const, 0, ""},
    +		{"SizeofCmsghdr", Const, 0, ""},
    +		{"SizeofICMPv6Filter", Const, 2, ""},
    +		{"SizeofIPMreq", Const, 0, ""},
    +		{"SizeofIPMreqn", Const, 0, ""},
    +		{"SizeofIPv6MTUInfo", Const, 2, ""},
    +		{"SizeofIPv6Mreq", Const, 0, ""},
    +		{"SizeofIfAddrmsg", Const, 0, ""},
    +		{"SizeofIfAnnounceMsghdr", Const, 1, ""},
    +		{"SizeofIfData", Const, 0, ""},
    +		{"SizeofIfInfomsg", Const, 0, ""},
    +		{"SizeofIfMsghdr", Const, 0, ""},
    +		{"SizeofIfaMsghdr", Const, 0, ""},
    +		{"SizeofIfmaMsghdr", Const, 0, ""},
    +		{"SizeofIfmaMsghdr2", Const, 0, ""},
    +		{"SizeofInet4Pktinfo", Const, 0, ""},
    +		{"SizeofInet6Pktinfo", Const, 0, ""},
    +		{"SizeofInotifyEvent", Const, 0, ""},
    +		{"SizeofLinger", Const, 0, ""},
    +		{"SizeofMsghdr", Const, 0, ""},
    +		{"SizeofNlAttr", Const, 0, ""},
    +		{"SizeofNlMsgerr", Const, 0, ""},
    +		{"SizeofNlMsghdr", Const, 0, ""},
    +		{"SizeofRtAttr", Const, 0, ""},
    +		{"SizeofRtGenmsg", Const, 0, ""},
    +		{"SizeofRtMetrics", Const, 0, ""},
    +		{"SizeofRtMsg", Const, 0, ""},
    +		{"SizeofRtMsghdr", Const, 0, ""},
    +		{"SizeofRtNexthop", Const, 0, ""},
    +		{"SizeofSockFilter", Const, 0, ""},
    +		{"SizeofSockFprog", Const, 0, ""},
    +		{"SizeofSockaddrAny", Const, 0, ""},
    +		{"SizeofSockaddrDatalink", Const, 0, ""},
    +		{"SizeofSockaddrInet4", Const, 0, ""},
    +		{"SizeofSockaddrInet6", Const, 0, ""},
    +		{"SizeofSockaddrLinklayer", Const, 0, ""},
    +		{"SizeofSockaddrNetlink", Const, 0, ""},
    +		{"SizeofSockaddrUnix", Const, 0, ""},
    +		{"SizeofTCPInfo", Const, 1, ""},
    +		{"SizeofUcred", Const, 0, ""},
    +		{"SlicePtrFromStrings", Func, 1, "func(ss []string) ([]*byte, error)"},
    +		{"SockFilter", Type, 0, ""},
    +		{"SockFilter.Code", Field, 0, ""},
    +		{"SockFilter.Jf", Field, 0, ""},
    +		{"SockFilter.Jt", Field, 0, ""},
    +		{"SockFilter.K", Field, 0, ""},
    +		{"SockFprog", Type, 0, ""},
    +		{"SockFprog.Filter", Field, 0, ""},
    +		{"SockFprog.Len", Field, 0, ""},
    +		{"SockFprog.Pad_cgo_0", Field, 0, ""},
    +		{"Sockaddr", Type, 0, ""},
    +		{"SockaddrDatalink", Type, 0, ""},
    +		{"SockaddrDatalink.Alen", Field, 0, ""},
    +		{"SockaddrDatalink.Data", Field, 0, ""},
    +		{"SockaddrDatalink.Family", Field, 0, ""},
    +		{"SockaddrDatalink.Index", Field, 0, ""},
    +		{"SockaddrDatalink.Len", Field, 0, ""},
    +		{"SockaddrDatalink.Nlen", Field, 0, ""},
    +		{"SockaddrDatalink.Slen", Field, 0, ""},
    +		{"SockaddrDatalink.Type", Field, 0, ""},
    +		{"SockaddrGen", Type, 0, ""},
    +		{"SockaddrInet4", Type, 0, ""},
    +		{"SockaddrInet4.Addr", Field, 0, ""},
    +		{"SockaddrInet4.Port", Field, 0, ""},
    +		{"SockaddrInet6", Type, 0, ""},
    +		{"SockaddrInet6.Addr", Field, 0, ""},
    +		{"SockaddrInet6.Port", Field, 0, ""},
    +		{"SockaddrInet6.ZoneId", Field, 0, ""},
    +		{"SockaddrLinklayer", Type, 0, ""},
    +		{"SockaddrLinklayer.Addr", Field, 0, ""},
    +		{"SockaddrLinklayer.Halen", Field, 0, ""},
    +		{"SockaddrLinklayer.Hatype", Field, 0, ""},
    +		{"SockaddrLinklayer.Ifindex", Field, 0, ""},
    +		{"SockaddrLinklayer.Pkttype", Field, 0, ""},
    +		{"SockaddrLinklayer.Protocol", Field, 0, ""},
    +		{"SockaddrNetlink", Type, 0, ""},
    +		{"SockaddrNetlink.Family", Field, 0, ""},
    +		{"SockaddrNetlink.Groups", Field, 0, ""},
    +		{"SockaddrNetlink.Pad", Field, 0, ""},
    +		{"SockaddrNetlink.Pid", Field, 0, ""},
    +		{"SockaddrUnix", Type, 0, ""},
    +		{"SockaddrUnix.Name", Field, 0, ""},
    +		{"Socket", Func, 0, "func(domain int, typ int, proto int) (fd int, err error)"},
    +		{"SocketControlMessage", Type, 0, ""},
    +		{"SocketControlMessage.Data", Field, 0, ""},
    +		{"SocketControlMessage.Header", Field, 0, ""},
    +		{"SocketDisableIPv6", Var, 0, ""},
    +		{"Socketpair", Func, 0, "func(domain int, typ int, proto int) (fd [2]int, err error)"},
    +		{"Splice", Func, 0, "func(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)"},
    +		{"StartProcess", Func, 0, "func(argv0 string, argv []string, attr *ProcAttr) (pid int, handle uintptr, err error)"},
    +		{"StartupInfo", Type, 0, ""},
    +		{"StartupInfo.Cb", Field, 0, ""},
    +		{"StartupInfo.Desktop", Field, 0, ""},
    +		{"StartupInfo.FillAttribute", Field, 0, ""},
    +		{"StartupInfo.Flags", Field, 0, ""},
    +		{"StartupInfo.ShowWindow", Field, 0, ""},
    +		{"StartupInfo.StdErr", Field, 0, ""},
    +		{"StartupInfo.StdInput", Field, 0, ""},
    +		{"StartupInfo.StdOutput", Field, 0, ""},
    +		{"StartupInfo.Title", Field, 0, ""},
    +		{"StartupInfo.X", Field, 0, ""},
    +		{"StartupInfo.XCountChars", Field, 0, ""},
    +		{"StartupInfo.XSize", Field, 0, ""},
    +		{"StartupInfo.Y", Field, 0, ""},
    +		{"StartupInfo.YCountChars", Field, 0, ""},
    +		{"StartupInfo.YSize", Field, 0, ""},
    +		{"Stat", Func, 0, "func(path string, stat *Stat_t) (err error)"},
    +		{"Stat_t", Type, 0, ""},
    +		{"Stat_t.Atim", Field, 0, ""},
    +		{"Stat_t.Atim_ext", Field, 12, ""},
    +		{"Stat_t.Atimespec", Field, 0, ""},
    +		{"Stat_t.Birthtimespec", Field, 0, ""},
    +		{"Stat_t.Blksize", Field, 0, ""},
    +		{"Stat_t.Blocks", Field, 0, ""},
    +		{"Stat_t.Btim_ext", Field, 12, ""},
    +		{"Stat_t.Ctim", Field, 0, ""},
    +		{"Stat_t.Ctim_ext", Field, 12, ""},
    +		{"Stat_t.Ctimespec", Field, 0, ""},
    +		{"Stat_t.Dev", Field, 0, ""},
    +		{"Stat_t.Flags", Field, 0, ""},
    +		{"Stat_t.Gen", Field, 0, ""},
    +		{"Stat_t.Gid", Field, 0, ""},
    +		{"Stat_t.Ino", Field, 0, ""},
    +		{"Stat_t.Lspare", Field, 0, ""},
    +		{"Stat_t.Lspare0", Field, 2, ""},
    +		{"Stat_t.Lspare1", Field, 2, ""},
    +		{"Stat_t.Mode", Field, 0, ""},
    +		{"Stat_t.Mtim", Field, 0, ""},
    +		{"Stat_t.Mtim_ext", Field, 12, ""},
    +		{"Stat_t.Mtimespec", Field, 0, ""},
    +		{"Stat_t.Nlink", Field, 0, ""},
    +		{"Stat_t.Pad_cgo_0", Field, 0, ""},
    +		{"Stat_t.Pad_cgo_1", Field, 0, ""},
    +		{"Stat_t.Pad_cgo_2", Field, 0, ""},
    +		{"Stat_t.Padding0", Field, 12, ""},
    +		{"Stat_t.Padding1", Field, 12, ""},
    +		{"Stat_t.Qspare", Field, 0, ""},
    +		{"Stat_t.Rdev", Field, 0, ""},
    +		{"Stat_t.Size", Field, 0, ""},
    +		{"Stat_t.Spare", Field, 2, ""},
    +		{"Stat_t.Uid", Field, 0, ""},
    +		{"Stat_t.X__pad0", Field, 0, ""},
    +		{"Stat_t.X__pad1", Field, 0, ""},
    +		{"Stat_t.X__pad2", Field, 0, ""},
    +		{"Stat_t.X__st_birthtim", Field, 2, ""},
    +		{"Stat_t.X__st_ino", Field, 0, ""},
    +		{"Stat_t.X__unused", Field, 0, ""},
    +		{"Statfs", Func, 0, "func(path string, buf *Statfs_t) (err error)"},
    +		{"Statfs_t", Type, 0, ""},
    +		{"Statfs_t.Asyncreads", Field, 0, ""},
    +		{"Statfs_t.Asyncwrites", Field, 0, ""},
    +		{"Statfs_t.Bavail", Field, 0, ""},
    +		{"Statfs_t.Bfree", Field, 0, ""},
    +		{"Statfs_t.Blocks", Field, 0, ""},
    +		{"Statfs_t.Bsize", Field, 0, ""},
    +		{"Statfs_t.Charspare", Field, 0, ""},
    +		{"Statfs_t.F_asyncreads", Field, 2, ""},
    +		{"Statfs_t.F_asyncwrites", Field, 2, ""},
    +		{"Statfs_t.F_bavail", Field, 2, ""},
    +		{"Statfs_t.F_bfree", Field, 2, ""},
    +		{"Statfs_t.F_blocks", Field, 2, ""},
    +		{"Statfs_t.F_bsize", Field, 2, ""},
    +		{"Statfs_t.F_ctime", Field, 2, ""},
    +		{"Statfs_t.F_favail", Field, 2, ""},
    +		{"Statfs_t.F_ffree", Field, 2, ""},
    +		{"Statfs_t.F_files", Field, 2, ""},
    +		{"Statfs_t.F_flags", Field, 2, ""},
    +		{"Statfs_t.F_fsid", Field, 2, ""},
    +		{"Statfs_t.F_fstypename", Field, 2, ""},
    +		{"Statfs_t.F_iosize", Field, 2, ""},
    +		{"Statfs_t.F_mntfromname", Field, 2, ""},
    +		{"Statfs_t.F_mntfromspec", Field, 3, ""},
    +		{"Statfs_t.F_mntonname", Field, 2, ""},
    +		{"Statfs_t.F_namemax", Field, 2, ""},
    +		{"Statfs_t.F_owner", Field, 2, ""},
    +		{"Statfs_t.F_spare", Field, 2, ""},
    +		{"Statfs_t.F_syncreads", Field, 2, ""},
    +		{"Statfs_t.F_syncwrites", Field, 2, ""},
    +		{"Statfs_t.Ffree", Field, 0, ""},
    +		{"Statfs_t.Files", Field, 0, ""},
    +		{"Statfs_t.Flags", Field, 0, ""},
    +		{"Statfs_t.Frsize", Field, 0, ""},
    +		{"Statfs_t.Fsid", Field, 0, ""},
    +		{"Statfs_t.Fssubtype", Field, 0, ""},
    +		{"Statfs_t.Fstypename", Field, 0, ""},
    +		{"Statfs_t.Iosize", Field, 0, ""},
    +		{"Statfs_t.Mntfromname", Field, 0, ""},
    +		{"Statfs_t.Mntonname", Field, 0, ""},
    +		{"Statfs_t.Mount_info", Field, 2, ""},
    +		{"Statfs_t.Namelen", Field, 0, ""},
    +		{"Statfs_t.Namemax", Field, 0, ""},
    +		{"Statfs_t.Owner", Field, 0, ""},
    +		{"Statfs_t.Pad_cgo_0", Field, 0, ""},
    +		{"Statfs_t.Pad_cgo_1", Field, 2, ""},
    +		{"Statfs_t.Reserved", Field, 0, ""},
    +		{"Statfs_t.Spare", Field, 0, ""},
    +		{"Statfs_t.Syncreads", Field, 0, ""},
    +		{"Statfs_t.Syncwrites", Field, 0, ""},
    +		{"Statfs_t.Type", Field, 0, ""},
    +		{"Statfs_t.Version", Field, 0, ""},
    +		{"Stderr", Var, 0, ""},
    +		{"Stdin", Var, 0, ""},
    +		{"Stdout", Var, 0, ""},
    +		{"StringBytePtr", Func, 0, "func(s string) *byte"},
    +		{"StringByteSlice", Func, 0, "func(s string) []byte"},
    +		{"StringSlicePtr", Func, 0, "func(ss []string) []*byte"},
    +		{"StringToSid", Func, 0, ""},
    +		{"StringToUTF16", Func, 0, ""},
    +		{"StringToUTF16Ptr", Func, 0, ""},
    +		{"Symlink", Func, 0, "func(oldpath string, newpath string) (err error)"},
    +		{"Sync", Func, 0, "func()"},
    +		{"SyncFileRange", Func, 0, "func(fd int, off int64, n int64, flags int) (err error)"},
    +		{"SysProcAttr", Type, 0, ""},
    +		{"SysProcAttr.AdditionalInheritedHandles", Field, 17, ""},
    +		{"SysProcAttr.AmbientCaps", Field, 9, ""},
    +		{"SysProcAttr.CgroupFD", Field, 20, ""},
    +		{"SysProcAttr.Chroot", Field, 0, ""},
    +		{"SysProcAttr.Cloneflags", Field, 2, ""},
    +		{"SysProcAttr.CmdLine", Field, 0, ""},
    +		{"SysProcAttr.CreationFlags", Field, 1, ""},
    +		{"SysProcAttr.Credential", Field, 0, ""},
    +		{"SysProcAttr.Ctty", Field, 1, ""},
    +		{"SysProcAttr.Foreground", Field, 5, ""},
    +		{"SysProcAttr.GidMappings", Field, 4, ""},
    +		{"SysProcAttr.GidMappingsEnableSetgroups", Field, 5, ""},
    +		{"SysProcAttr.HideWindow", Field, 0, ""},
    +		{"SysProcAttr.Jail", Field, 21, ""},
    +		{"SysProcAttr.NoInheritHandles", Field, 16, ""},
    +		{"SysProcAttr.Noctty", Field, 0, ""},
    +		{"SysProcAttr.ParentProcess", Field, 17, ""},
    +		{"SysProcAttr.Pdeathsig", Field, 0, ""},
    +		{"SysProcAttr.Pgid", Field, 5, ""},
    +		{"SysProcAttr.PidFD", Field, 22, ""},
    +		{"SysProcAttr.ProcessAttributes", Field, 13, ""},
    +		{"SysProcAttr.Ptrace", Field, 0, ""},
    +		{"SysProcAttr.Setctty", Field, 0, ""},
    +		{"SysProcAttr.Setpgid", Field, 0, ""},
    +		{"SysProcAttr.Setsid", Field, 0, ""},
    +		{"SysProcAttr.ThreadAttributes", Field, 13, ""},
    +		{"SysProcAttr.Token", Field, 10, ""},
    +		{"SysProcAttr.UidMappings", Field, 4, ""},
    +		{"SysProcAttr.Unshareflags", Field, 7, ""},
    +		{"SysProcAttr.UseCgroupFD", Field, 20, ""},
    +		{"SysProcIDMap", Type, 4, ""},
    +		{"SysProcIDMap.ContainerID", Field, 4, ""},
    +		{"SysProcIDMap.HostID", Field, 4, ""},
    +		{"SysProcIDMap.Size", Field, 4, ""},
    +		{"Syscall", Func, 0, "func(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err Errno)"},
    +		{"Syscall12", Func, 0, ""},
    +		{"Syscall15", Func, 0, ""},
    +		{"Syscall18", Func, 12, ""},
    +		{"Syscall6", Func, 0, "func(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err Errno)"},
    +		{"Syscall9", Func, 0, ""},
    +		{"SyscallN", Func, 18, ""},
    +		{"Sysctl", Func, 0, ""},
    +		{"SysctlUint32", Func, 0, ""},
    +		{"Sysctlnode", Type, 2, ""},
    +		{"Sysctlnode.Flags", Field, 2, ""},
    +		{"Sysctlnode.Name", Field, 2, ""},
    +		{"Sysctlnode.Num", Field, 2, ""},
    +		{"Sysctlnode.Un", Field, 2, ""},
    +		{"Sysctlnode.Ver", Field, 2, ""},
    +		{"Sysctlnode.X__rsvd", Field, 2, ""},
    +		{"Sysctlnode.X_sysctl_desc", Field, 2, ""},
    +		{"Sysctlnode.X_sysctl_func", Field, 2, ""},
    +		{"Sysctlnode.X_sysctl_parent", Field, 2, ""},
    +		{"Sysctlnode.X_sysctl_size", Field, 2, ""},
    +		{"Sysinfo", Func, 0, "func(info *Sysinfo_t) (err error)"},
    +		{"Sysinfo_t", Type, 0, ""},
    +		{"Sysinfo_t.Bufferram", Field, 0, ""},
    +		{"Sysinfo_t.Freehigh", Field, 0, ""},
    +		{"Sysinfo_t.Freeram", Field, 0, ""},
    +		{"Sysinfo_t.Freeswap", Field, 0, ""},
    +		{"Sysinfo_t.Loads", Field, 0, ""},
    +		{"Sysinfo_t.Pad", Field, 0, ""},
    +		{"Sysinfo_t.Pad_cgo_0", Field, 0, ""},
    +		{"Sysinfo_t.Pad_cgo_1", Field, 0, ""},
    +		{"Sysinfo_t.Procs", Field, 0, ""},
    +		{"Sysinfo_t.Sharedram", Field, 0, ""},
    +		{"Sysinfo_t.Totalhigh", Field, 0, ""},
    +		{"Sysinfo_t.Totalram", Field, 0, ""},
    +		{"Sysinfo_t.Totalswap", Field, 0, ""},
    +		{"Sysinfo_t.Unit", Field, 0, ""},
    +		{"Sysinfo_t.Uptime", Field, 0, ""},
    +		{"Sysinfo_t.X_f", Field, 0, ""},
    +		{"Systemtime", Type, 0, ""},
    +		{"Systemtime.Day", Field, 0, ""},
    +		{"Systemtime.DayOfWeek", Field, 0, ""},
    +		{"Systemtime.Hour", Field, 0, ""},
    +		{"Systemtime.Milliseconds", Field, 0, ""},
    +		{"Systemtime.Minute", Field, 0, ""},
    +		{"Systemtime.Month", Field, 0, ""},
    +		{"Systemtime.Second", Field, 0, ""},
    +		{"Systemtime.Year", Field, 0, ""},
    +		{"TCGETS", Const, 0, ""},
    +		{"TCIFLUSH", Const, 1, ""},
    +		{"TCIOFLUSH", Const, 1, ""},
    +		{"TCOFLUSH", Const, 1, ""},
    +		{"TCPInfo", Type, 1, ""},
    +		{"TCPInfo.Advmss", Field, 1, ""},
    +		{"TCPInfo.Ato", Field, 1, ""},
    +		{"TCPInfo.Backoff", Field, 1, ""},
    +		{"TCPInfo.Ca_state", Field, 1, ""},
    +		{"TCPInfo.Fackets", Field, 1, ""},
    +		{"TCPInfo.Last_ack_recv", Field, 1, ""},
    +		{"TCPInfo.Last_ack_sent", Field, 1, ""},
    +		{"TCPInfo.Last_data_recv", Field, 1, ""},
    +		{"TCPInfo.Last_data_sent", Field, 1, ""},
    +		{"TCPInfo.Lost", Field, 1, ""},
    +		{"TCPInfo.Options", Field, 1, ""},
    +		{"TCPInfo.Pad_cgo_0", Field, 1, ""},
    +		{"TCPInfo.Pmtu", Field, 1, ""},
    +		{"TCPInfo.Probes", Field, 1, ""},
    +		{"TCPInfo.Rcv_mss", Field, 1, ""},
    +		{"TCPInfo.Rcv_rtt", Field, 1, ""},
    +		{"TCPInfo.Rcv_space", Field, 1, ""},
    +		{"TCPInfo.Rcv_ssthresh", Field, 1, ""},
    +		{"TCPInfo.Reordering", Field, 1, ""},
    +		{"TCPInfo.Retrans", Field, 1, ""},
    +		{"TCPInfo.Retransmits", Field, 1, ""},
    +		{"TCPInfo.Rto", Field, 1, ""},
    +		{"TCPInfo.Rtt", Field, 1, ""},
    +		{"TCPInfo.Rttvar", Field, 1, ""},
    +		{"TCPInfo.Sacked", Field, 1, ""},
    +		{"TCPInfo.Snd_cwnd", Field, 1, ""},
    +		{"TCPInfo.Snd_mss", Field, 1, ""},
    +		{"TCPInfo.Snd_ssthresh", Field, 1, ""},
    +		{"TCPInfo.State", Field, 1, ""},
    +		{"TCPInfo.Total_retrans", Field, 1, ""},
    +		{"TCPInfo.Unacked", Field, 1, ""},
    +		{"TCPKeepalive", Type, 3, ""},
    +		{"TCPKeepalive.Interval", Field, 3, ""},
    +		{"TCPKeepalive.OnOff", Field, 3, ""},
    +		{"TCPKeepalive.Time", Field, 3, ""},
    +		{"TCP_CA_NAME_MAX", Const, 0, ""},
    +		{"TCP_CONGCTL", Const, 1, ""},
    +		{"TCP_CONGESTION", Const, 0, ""},
    +		{"TCP_CONNECTIONTIMEOUT", Const, 0, ""},
    +		{"TCP_CORK", Const, 0, ""},
    +		{"TCP_DEFER_ACCEPT", Const, 0, ""},
    +		{"TCP_ENABLE_ECN", Const, 16, ""},
    +		{"TCP_INFO", Const, 0, ""},
    +		{"TCP_KEEPALIVE", Const, 0, ""},
    +		{"TCP_KEEPCNT", Const, 0, ""},
    +		{"TCP_KEEPIDLE", Const, 0, ""},
    +		{"TCP_KEEPINIT", Const, 1, ""},
    +		{"TCP_KEEPINTVL", Const, 0, ""},
    +		{"TCP_LINGER2", Const, 0, ""},
    +		{"TCP_MAXBURST", Const, 0, ""},
    +		{"TCP_MAXHLEN", Const, 0, ""},
    +		{"TCP_MAXOLEN", Const, 0, ""},
    +		{"TCP_MAXSEG", Const, 0, ""},
    +		{"TCP_MAXWIN", Const, 0, ""},
    +		{"TCP_MAX_SACK", Const, 0, ""},
    +		{"TCP_MAX_WINSHIFT", Const, 0, ""},
    +		{"TCP_MD5SIG", Const, 0, ""},
    +		{"TCP_MD5SIG_MAXKEYLEN", Const, 0, ""},
    +		{"TCP_MINMSS", Const, 0, ""},
    +		{"TCP_MINMSSOVERLOAD", Const, 0, ""},
    +		{"TCP_MSS", Const, 0, ""},
    +		{"TCP_NODELAY", Const, 0, ""},
    +		{"TCP_NOOPT", Const, 0, ""},
    +		{"TCP_NOPUSH", Const, 0, ""},
    +		{"TCP_NOTSENT_LOWAT", Const, 16, ""},
    +		{"TCP_NSTATES", Const, 1, ""},
    +		{"TCP_QUICKACK", Const, 0, ""},
    +		{"TCP_RXT_CONNDROPTIME", Const, 0, ""},
    +		{"TCP_RXT_FINDROP", Const, 0, ""},
    +		{"TCP_SACK_ENABLE", Const, 1, ""},
    +		{"TCP_SENDMOREACKS", Const, 16, ""},
    +		{"TCP_SYNCNT", Const, 0, ""},
    +		{"TCP_VENDOR", Const, 3, ""},
    +		{"TCP_WINDOW_CLAMP", Const, 0, ""},
    +		{"TCSAFLUSH", Const, 1, ""},
    +		{"TCSETS", Const, 0, ""},
    +		{"TF_DISCONNECT", Const, 0, ""},
    +		{"TF_REUSE_SOCKET", Const, 0, ""},
    +		{"TF_USE_DEFAULT_WORKER", Const, 0, ""},
    +		{"TF_USE_KERNEL_APC", Const, 0, ""},
    +		{"TF_USE_SYSTEM_THREAD", Const, 0, ""},
    +		{"TF_WRITE_BEHIND", Const, 0, ""},
    +		{"TH32CS_INHERIT", Const, 4, ""},
    +		{"TH32CS_SNAPALL", Const, 4, ""},
    +		{"TH32CS_SNAPHEAPLIST", Const, 4, ""},
    +		{"TH32CS_SNAPMODULE", Const, 4, ""},
    +		{"TH32CS_SNAPMODULE32", Const, 4, ""},
    +		{"TH32CS_SNAPPROCESS", Const, 4, ""},
    +		{"TH32CS_SNAPTHREAD", Const, 4, ""},
    +		{"TIME_ZONE_ID_DAYLIGHT", Const, 0, ""},
    +		{"TIME_ZONE_ID_STANDARD", Const, 0, ""},
    +		{"TIME_ZONE_ID_UNKNOWN", Const, 0, ""},
    +		{"TIOCCBRK", Const, 0, ""},
    +		{"TIOCCDTR", Const, 0, ""},
    +		{"TIOCCONS", Const, 0, ""},
    +		{"TIOCDCDTIMESTAMP", Const, 0, ""},
    +		{"TIOCDRAIN", Const, 0, ""},
    +		{"TIOCDSIMICROCODE", Const, 0, ""},
    +		{"TIOCEXCL", Const, 0, ""},
    +		{"TIOCEXT", Const, 0, ""},
    +		{"TIOCFLAG_CDTRCTS", Const, 1, ""},
    +		{"TIOCFLAG_CLOCAL", Const, 1, ""},
    +		{"TIOCFLAG_CRTSCTS", Const, 1, ""},
    +		{"TIOCFLAG_MDMBUF", Const, 1, ""},
    +		{"TIOCFLAG_PPS", Const, 1, ""},
    +		{"TIOCFLAG_SOFTCAR", Const, 1, ""},
    +		{"TIOCFLUSH", Const, 0, ""},
    +		{"TIOCGDEV", Const, 0, ""},
    +		{"TIOCGDRAINWAIT", Const, 0, ""},
    +		{"TIOCGETA", Const, 0, ""},
    +		{"TIOCGETD", Const, 0, ""},
    +		{"TIOCGFLAGS", Const, 1, ""},
    +		{"TIOCGICOUNT", Const, 0, ""},
    +		{"TIOCGLCKTRMIOS", Const, 0, ""},
    +		{"TIOCGLINED", Const, 1, ""},
    +		{"TIOCGPGRP", Const, 0, ""},
    +		{"TIOCGPTN", Const, 0, ""},
    +		{"TIOCGQSIZE", Const, 1, ""},
    +		{"TIOCGRANTPT", Const, 1, ""},
    +		{"TIOCGRS485", Const, 0, ""},
    +		{"TIOCGSERIAL", Const, 0, ""},
    +		{"TIOCGSID", Const, 0, ""},
    +		{"TIOCGSIZE", Const, 1, ""},
    +		{"TIOCGSOFTCAR", Const, 0, ""},
    +		{"TIOCGTSTAMP", Const, 1, ""},
    +		{"TIOCGWINSZ", Const, 0, ""},
    +		{"TIOCINQ", Const, 0, ""},
    +		{"TIOCIXOFF", Const, 0, ""},
    +		{"TIOCIXON", Const, 0, ""},
    +		{"TIOCLINUX", Const, 0, ""},
    +		{"TIOCMBIC", Const, 0, ""},
    +		{"TIOCMBIS", Const, 0, ""},
    +		{"TIOCMGDTRWAIT", Const, 0, ""},
    +		{"TIOCMGET", Const, 0, ""},
    +		{"TIOCMIWAIT", Const, 0, ""},
    +		{"TIOCMODG", Const, 0, ""},
    +		{"TIOCMODS", Const, 0, ""},
    +		{"TIOCMSDTRWAIT", Const, 0, ""},
    +		{"TIOCMSET", Const, 0, ""},
    +		{"TIOCM_CAR", Const, 0, ""},
    +		{"TIOCM_CD", Const, 0, ""},
    +		{"TIOCM_CTS", Const, 0, ""},
    +		{"TIOCM_DCD", Const, 0, ""},
    +		{"TIOCM_DSR", Const, 0, ""},
    +		{"TIOCM_DTR", Const, 0, ""},
    +		{"TIOCM_LE", Const, 0, ""},
    +		{"TIOCM_RI", Const, 0, ""},
    +		{"TIOCM_RNG", Const, 0, ""},
    +		{"TIOCM_RTS", Const, 0, ""},
    +		{"TIOCM_SR", Const, 0, ""},
    +		{"TIOCM_ST", Const, 0, ""},
    +		{"TIOCNOTTY", Const, 0, ""},
    +		{"TIOCNXCL", Const, 0, ""},
    +		{"TIOCOUTQ", Const, 0, ""},
    +		{"TIOCPKT", Const, 0, ""},
    +		{"TIOCPKT_DATA", Const, 0, ""},
    +		{"TIOCPKT_DOSTOP", Const, 0, ""},
    +		{"TIOCPKT_FLUSHREAD", Const, 0, ""},
    +		{"TIOCPKT_FLUSHWRITE", Const, 0, ""},
    +		{"TIOCPKT_IOCTL", Const, 0, ""},
    +		{"TIOCPKT_NOSTOP", Const, 0, ""},
    +		{"TIOCPKT_START", Const, 0, ""},
    +		{"TIOCPKT_STOP", Const, 0, ""},
    +		{"TIOCPTMASTER", Const, 0, ""},
    +		{"TIOCPTMGET", Const, 1, ""},
    +		{"TIOCPTSNAME", Const, 1, ""},
    +		{"TIOCPTYGNAME", Const, 0, ""},
    +		{"TIOCPTYGRANT", Const, 0, ""},
    +		{"TIOCPTYUNLK", Const, 0, ""},
    +		{"TIOCRCVFRAME", Const, 1, ""},
    +		{"TIOCREMOTE", Const, 0, ""},
    +		{"TIOCSBRK", Const, 0, ""},
    +		{"TIOCSCONS", Const, 0, ""},
    +		{"TIOCSCTTY", Const, 0, ""},
    +		{"TIOCSDRAINWAIT", Const, 0, ""},
    +		{"TIOCSDTR", Const, 0, ""},
    +		{"TIOCSERCONFIG", Const, 0, ""},
    +		{"TIOCSERGETLSR", Const, 0, ""},
    +		{"TIOCSERGETMULTI", Const, 0, ""},
    +		{"TIOCSERGSTRUCT", Const, 0, ""},
    +		{"TIOCSERGWILD", Const, 0, ""},
    +		{"TIOCSERSETMULTI", Const, 0, ""},
    +		{"TIOCSERSWILD", Const, 0, ""},
    +		{"TIOCSER_TEMT", Const, 0, ""},
    +		{"TIOCSETA", Const, 0, ""},
    +		{"TIOCSETAF", Const, 0, ""},
    +		{"TIOCSETAW", Const, 0, ""},
    +		{"TIOCSETD", Const, 0, ""},
    +		{"TIOCSFLAGS", Const, 1, ""},
    +		{"TIOCSIG", Const, 0, ""},
    +		{"TIOCSLCKTRMIOS", Const, 0, ""},
    +		{"TIOCSLINED", Const, 1, ""},
    +		{"TIOCSPGRP", Const, 0, ""},
    +		{"TIOCSPTLCK", Const, 0, ""},
    +		{"TIOCSQSIZE", Const, 1, ""},
    +		{"TIOCSRS485", Const, 0, ""},
    +		{"TIOCSSERIAL", Const, 0, ""},
    +		{"TIOCSSIZE", Const, 1, ""},
    +		{"TIOCSSOFTCAR", Const, 0, ""},
    +		{"TIOCSTART", Const, 0, ""},
    +		{"TIOCSTAT", Const, 0, ""},
    +		{"TIOCSTI", Const, 0, ""},
    +		{"TIOCSTOP", Const, 0, ""},
    +		{"TIOCSTSTAMP", Const, 1, ""},
    +		{"TIOCSWINSZ", Const, 0, ""},
    +		{"TIOCTIMESTAMP", Const, 0, ""},
    +		{"TIOCUCNTL", Const, 0, ""},
    +		{"TIOCVHANGUP", Const, 0, ""},
    +		{"TIOCXMTFRAME", Const, 1, ""},
    +		{"TOKEN_ADJUST_DEFAULT", Const, 0, ""},
    +		{"TOKEN_ADJUST_GROUPS", Const, 0, ""},
    +		{"TOKEN_ADJUST_PRIVILEGES", Const, 0, ""},
    +		{"TOKEN_ADJUST_SESSIONID", Const, 11, ""},
    +		{"TOKEN_ALL_ACCESS", Const, 0, ""},
    +		{"TOKEN_ASSIGN_PRIMARY", Const, 0, ""},
    +		{"TOKEN_DUPLICATE", Const, 0, ""},
    +		{"TOKEN_EXECUTE", Const, 0, ""},
    +		{"TOKEN_IMPERSONATE", Const, 0, ""},
    +		{"TOKEN_QUERY", Const, 0, ""},
    +		{"TOKEN_QUERY_SOURCE", Const, 0, ""},
    +		{"TOKEN_READ", Const, 0, ""},
    +		{"TOKEN_WRITE", Const, 0, ""},
    +		{"TOSTOP", Const, 0, ""},
    +		{"TRUNCATE_EXISTING", Const, 0, ""},
    +		{"TUNATTACHFILTER", Const, 0, ""},
    +		{"TUNDETACHFILTER", Const, 0, ""},
    +		{"TUNGETFEATURES", Const, 0, ""},
    +		{"TUNGETIFF", Const, 0, ""},
    +		{"TUNGETSNDBUF", Const, 0, ""},
    +		{"TUNGETVNETHDRSZ", Const, 0, ""},
    +		{"TUNSETDEBUG", Const, 0, ""},
    +		{"TUNSETGROUP", Const, 0, ""},
    +		{"TUNSETIFF", Const, 0, ""},
    +		{"TUNSETLINK", Const, 0, ""},
    +		{"TUNSETNOCSUM", Const, 0, ""},
    +		{"TUNSETOFFLOAD", Const, 0, ""},
    +		{"TUNSETOWNER", Const, 0, ""},
    +		{"TUNSETPERSIST", Const, 0, ""},
    +		{"TUNSETSNDBUF", Const, 0, ""},
    +		{"TUNSETTXFILTER", Const, 0, ""},
    +		{"TUNSETVNETHDRSZ", Const, 0, ""},
    +		{"Tee", Func, 0, "func(rfd int, wfd int, len int, flags int) (n int64, err error)"},
    +		{"TerminateProcess", Func, 0, ""},
    +		{"Termios", Type, 0, ""},
    +		{"Termios.Cc", Field, 0, ""},
    +		{"Termios.Cflag", Field, 0, ""},
    +		{"Termios.Iflag", Field, 0, ""},
    +		{"Termios.Ispeed", Field, 0, ""},
    +		{"Termios.Lflag", Field, 0, ""},
    +		{"Termios.Line", Field, 0, ""},
    +		{"Termios.Oflag", Field, 0, ""},
    +		{"Termios.Ospeed", Field, 0, ""},
    +		{"Termios.Pad_cgo_0", Field, 0, ""},
    +		{"Tgkill", Func, 0, "func(tgid int, tid int, sig Signal) (err error)"},
    +		{"Time", Func, 0, "func(t *Time_t) (tt Time_t, err error)"},
    +		{"Time_t", Type, 0, ""},
    +		{"Times", Func, 0, "func(tms *Tms) (ticks uintptr, err error)"},
    +		{"Timespec", Type, 0, ""},
    +		{"Timespec.Nsec", Field, 0, ""},
    +		{"Timespec.Pad_cgo_0", Field, 2, ""},
    +		{"Timespec.Sec", Field, 0, ""},
    +		{"TimespecToNsec", Func, 0, "func(ts Timespec) int64"},
    +		{"Timeval", Type, 0, ""},
    +		{"Timeval.Pad_cgo_0", Field, 0, ""},
    +		{"Timeval.Sec", Field, 0, ""},
    +		{"Timeval.Usec", Field, 0, ""},
    +		{"Timeval32", Type, 0, ""},
    +		{"Timeval32.Sec", Field, 0, ""},
    +		{"Timeval32.Usec", Field, 0, ""},
    +		{"TimevalToNsec", Func, 0, "func(tv Timeval) int64"},
    +		{"Timex", Type, 0, ""},
    +		{"Timex.Calcnt", Field, 0, ""},
    +		{"Timex.Constant", Field, 0, ""},
    +		{"Timex.Errcnt", Field, 0, ""},
    +		{"Timex.Esterror", Field, 0, ""},
    +		{"Timex.Freq", Field, 0, ""},
    +		{"Timex.Jitcnt", Field, 0, ""},
    +		{"Timex.Jitter", Field, 0, ""},
    +		{"Timex.Maxerror", Field, 0, ""},
    +		{"Timex.Modes", Field, 0, ""},
    +		{"Timex.Offset", Field, 0, ""},
    +		{"Timex.Pad_cgo_0", Field, 0, ""},
    +		{"Timex.Pad_cgo_1", Field, 0, ""},
    +		{"Timex.Pad_cgo_2", Field, 0, ""},
    +		{"Timex.Pad_cgo_3", Field, 0, ""},
    +		{"Timex.Ppsfreq", Field, 0, ""},
    +		{"Timex.Precision", Field, 0, ""},
    +		{"Timex.Shift", Field, 0, ""},
    +		{"Timex.Stabil", Field, 0, ""},
    +		{"Timex.Status", Field, 0, ""},
    +		{"Timex.Stbcnt", Field, 0, ""},
    +		{"Timex.Tai", Field, 0, ""},
    +		{"Timex.Tick", Field, 0, ""},
    +		{"Timex.Time", Field, 0, ""},
    +		{"Timex.Tolerance", Field, 0, ""},
    +		{"Timezoneinformation", Type, 0, ""},
    +		{"Timezoneinformation.Bias", Field, 0, ""},
    +		{"Timezoneinformation.DaylightBias", Field, 0, ""},
    +		{"Timezoneinformation.DaylightDate", Field, 0, ""},
    +		{"Timezoneinformation.DaylightName", Field, 0, ""},
    +		{"Timezoneinformation.StandardBias", Field, 0, ""},
    +		{"Timezoneinformation.StandardDate", Field, 0, ""},
    +		{"Timezoneinformation.StandardName", Field, 0, ""},
    +		{"Tms", Type, 0, ""},
    +		{"Tms.Cstime", Field, 0, ""},
    +		{"Tms.Cutime", Field, 0, ""},
    +		{"Tms.Stime", Field, 0, ""},
    +		{"Tms.Utime", Field, 0, ""},
    +		{"Token", Type, 0, ""},
    +		{"TokenAccessInformation", Const, 0, ""},
    +		{"TokenAuditPolicy", Const, 0, ""},
    +		{"TokenDefaultDacl", Const, 0, ""},
    +		{"TokenElevation", Const, 0, ""},
    +		{"TokenElevationType", Const, 0, ""},
    +		{"TokenGroups", Const, 0, ""},
    +		{"TokenGroupsAndPrivileges", Const, 0, ""},
    +		{"TokenHasRestrictions", Const, 0, ""},
    +		{"TokenImpersonationLevel", Const, 0, ""},
    +		{"TokenIntegrityLevel", Const, 0, ""},
    +		{"TokenLinkedToken", Const, 0, ""},
    +		{"TokenLogonSid", Const, 0, ""},
    +		{"TokenMandatoryPolicy", Const, 0, ""},
    +		{"TokenOrigin", Const, 0, ""},
    +		{"TokenOwner", Const, 0, ""},
    +		{"TokenPrimaryGroup", Const, 0, ""},
    +		{"TokenPrivileges", Const, 0, ""},
    +		{"TokenRestrictedSids", Const, 0, ""},
    +		{"TokenSandBoxInert", Const, 0, ""},
    +		{"TokenSessionId", Const, 0, ""},
    +		{"TokenSessionReference", Const, 0, ""},
    +		{"TokenSource", Const, 0, ""},
    +		{"TokenStatistics", Const, 0, ""},
    +		{"TokenType", Const, 0, ""},
    +		{"TokenUIAccess", Const, 0, ""},
    +		{"TokenUser", Const, 0, ""},
    +		{"TokenVirtualizationAllowed", Const, 0, ""},
    +		{"TokenVirtualizationEnabled", Const, 0, ""},
    +		{"Tokenprimarygroup", Type, 0, ""},
    +		{"Tokenprimarygroup.PrimaryGroup", Field, 0, ""},
    +		{"Tokenuser", Type, 0, ""},
    +		{"Tokenuser.User", Field, 0, ""},
    +		{"TranslateAccountName", Func, 0, ""},
    +		{"TranslateName", Func, 0, ""},
    +		{"TransmitFile", Func, 0, ""},
    +		{"TransmitFileBuffers", Type, 0, ""},
    +		{"TransmitFileBuffers.Head", Field, 0, ""},
    +		{"TransmitFileBuffers.HeadLength", Field, 0, ""},
    +		{"TransmitFileBuffers.Tail", Field, 0, ""},
    +		{"TransmitFileBuffers.TailLength", Field, 0, ""},
    +		{"Truncate", Func, 0, "func(path string, length int64) (err error)"},
    +		{"UNIX_PATH_MAX", Const, 12, ""},
    +		{"USAGE_MATCH_TYPE_AND", Const, 0, ""},
    +		{"USAGE_MATCH_TYPE_OR", Const, 0, ""},
    +		{"UTF16FromString", Func, 1, ""},
    +		{"UTF16PtrFromString", Func, 1, ""},
    +		{"UTF16ToString", Func, 0, ""},
    +		{"Ucred", Type, 0, ""},
    +		{"Ucred.Gid", Field, 0, ""},
    +		{"Ucred.Pid", Field, 0, ""},
    +		{"Ucred.Uid", Field, 0, ""},
    +		{"Umask", Func, 0, "func(mask int) (oldmask int)"},
    +		{"Uname", Func, 0, "func(buf *Utsname) (err error)"},
    +		{"Undelete", Func, 0, ""},
    +		{"UnixCredentials", Func, 0, "func(ucred *Ucred) []byte"},
    +		{"UnixRights", Func, 0, "func(fds ...int) []byte"},
    +		{"Unlink", Func, 0, "func(path string) error"},
    +		{"Unlinkat", Func, 0, "func(dirfd int, path string) error"},
    +		{"UnmapViewOfFile", Func, 0, ""},
    +		{"Unmount", Func, 0, "func(target string, flags int) (err error)"},
    +		{"Unsetenv", Func, 4, "func(key string) error"},
    +		{"Unshare", Func, 0, "func(flags int) (err error)"},
    +		{"UserInfo10", Type, 0, ""},
    +		{"UserInfo10.Comment", Field, 0, ""},
    +		{"UserInfo10.FullName", Field, 0, ""},
    +		{"UserInfo10.Name", Field, 0, ""},
    +		{"UserInfo10.UsrComment", Field, 0, ""},
    +		{"Ustat", Func, 0, "func(dev int, ubuf *Ustat_t) (err error)"},
    +		{"Ustat_t", Type, 0, ""},
    +		{"Ustat_t.Fname", Field, 0, ""},
    +		{"Ustat_t.Fpack", Field, 0, ""},
    +		{"Ustat_t.Pad_cgo_0", Field, 0, ""},
    +		{"Ustat_t.Pad_cgo_1", Field, 0, ""},
    +		{"Ustat_t.Tfree", Field, 0, ""},
    +		{"Ustat_t.Tinode", Field, 0, ""},
    +		{"Utimbuf", Type, 0, ""},
    +		{"Utimbuf.Actime", Field, 0, ""},
    +		{"Utimbuf.Modtime", Field, 0, ""},
    +		{"Utime", Func, 0, "func(path string, buf *Utimbuf) (err error)"},
    +		{"Utimes", Func, 0, "func(path string, tv []Timeval) (err error)"},
    +		{"UtimesNano", Func, 1, "func(path string, ts []Timespec) (err error)"},
    +		{"Utsname", Type, 0, ""},
    +		{"Utsname.Domainname", Field, 0, ""},
    +		{"Utsname.Machine", Field, 0, ""},
    +		{"Utsname.Nodename", Field, 0, ""},
    +		{"Utsname.Release", Field, 0, ""},
    +		{"Utsname.Sysname", Field, 0, ""},
    +		{"Utsname.Version", Field, 0, ""},
    +		{"VDISCARD", Const, 0, ""},
    +		{"VDSUSP", Const, 1, ""},
    +		{"VEOF", Const, 0, ""},
    +		{"VEOL", Const, 0, ""},
    +		{"VEOL2", Const, 0, ""},
    +		{"VERASE", Const, 0, ""},
    +		{"VERASE2", Const, 1, ""},
    +		{"VINTR", Const, 0, ""},
    +		{"VKILL", Const, 0, ""},
    +		{"VLNEXT", Const, 0, ""},
    +		{"VMIN", Const, 0, ""},
    +		{"VQUIT", Const, 0, ""},
    +		{"VREPRINT", Const, 0, ""},
    +		{"VSTART", Const, 0, ""},
    +		{"VSTATUS", Const, 1, ""},
    +		{"VSTOP", Const, 0, ""},
    +		{"VSUSP", Const, 0, ""},
    +		{"VSWTC", Const, 0, ""},
    +		{"VT0", Const, 1, ""},
    +		{"VT1", Const, 1, ""},
    +		{"VTDLY", Const, 1, ""},
    +		{"VTIME", Const, 0, ""},
    +		{"VWERASE", Const, 0, ""},
    +		{"VirtualLock", Func, 0, ""},
    +		{"VirtualUnlock", Func, 0, ""},
    +		{"WAIT_ABANDONED", Const, 0, ""},
    +		{"WAIT_FAILED", Const, 0, ""},
    +		{"WAIT_OBJECT_0", Const, 0, ""},
    +		{"WAIT_TIMEOUT", Const, 0, ""},
    +		{"WALL", Const, 0, ""},
    +		{"WALLSIG", Const, 1, ""},
    +		{"WALTSIG", Const, 1, ""},
    +		{"WCLONE", Const, 0, ""},
    +		{"WCONTINUED", Const, 0, ""},
    +		{"WCOREFLAG", Const, 0, ""},
    +		{"WEXITED", Const, 0, ""},
    +		{"WLINUXCLONE", Const, 0, ""},
    +		{"WNOHANG", Const, 0, ""},
    +		{"WNOTHREAD", Const, 0, ""},
    +		{"WNOWAIT", Const, 0, ""},
    +		{"WNOZOMBIE", Const, 1, ""},
    +		{"WOPTSCHECKED", Const, 1, ""},
    +		{"WORDSIZE", Const, 0, ""},
    +		{"WSABuf", Type, 0, ""},
    +		{"WSABuf.Buf", Field, 0, ""},
    +		{"WSABuf.Len", Field, 0, ""},
    +		{"WSACleanup", Func, 0, ""},
    +		{"WSADESCRIPTION_LEN", Const, 0, ""},
    +		{"WSAData", Type, 0, ""},
    +		{"WSAData.Description", Field, 0, ""},
    +		{"WSAData.HighVersion", Field, 0, ""},
    +		{"WSAData.MaxSockets", Field, 0, ""},
    +		{"WSAData.MaxUdpDg", Field, 0, ""},
    +		{"WSAData.SystemStatus", Field, 0, ""},
    +		{"WSAData.VendorInfo", Field, 0, ""},
    +		{"WSAData.Version", Field, 0, ""},
    +		{"WSAEACCES", Const, 2, ""},
    +		{"WSAECONNABORTED", Const, 9, ""},
    +		{"WSAECONNRESET", Const, 3, ""},
    +		{"WSAENOPROTOOPT", Const, 23, ""},
    +		{"WSAEnumProtocols", Func, 2, ""},
    +		{"WSAID_CONNECTEX", Var, 1, ""},
    +		{"WSAIoctl", Func, 0, ""},
    +		{"WSAPROTOCOL_LEN", Const, 2, ""},
    +		{"WSAProtocolChain", Type, 2, ""},
    +		{"WSAProtocolChain.ChainEntries", Field, 2, ""},
    +		{"WSAProtocolChain.ChainLen", Field, 2, ""},
    +		{"WSAProtocolInfo", Type, 2, ""},
    +		{"WSAProtocolInfo.AddressFamily", Field, 2, ""},
    +		{"WSAProtocolInfo.CatalogEntryId", Field, 2, ""},
    +		{"WSAProtocolInfo.MaxSockAddr", Field, 2, ""},
    +		{"WSAProtocolInfo.MessageSize", Field, 2, ""},
    +		{"WSAProtocolInfo.MinSockAddr", Field, 2, ""},
    +		{"WSAProtocolInfo.NetworkByteOrder", Field, 2, ""},
    +		{"WSAProtocolInfo.Protocol", Field, 2, ""},
    +		{"WSAProtocolInfo.ProtocolChain", Field, 2, ""},
    +		{"WSAProtocolInfo.ProtocolMaxOffset", Field, 2, ""},
    +		{"WSAProtocolInfo.ProtocolName", Field, 2, ""},
    +		{"WSAProtocolInfo.ProviderFlags", Field, 2, ""},
    +		{"WSAProtocolInfo.ProviderId", Field, 2, ""},
    +		{"WSAProtocolInfo.ProviderReserved", Field, 2, ""},
    +		{"WSAProtocolInfo.SecurityScheme", Field, 2, ""},
    +		{"WSAProtocolInfo.ServiceFlags1", Field, 2, ""},
    +		{"WSAProtocolInfo.ServiceFlags2", Field, 2, ""},
    +		{"WSAProtocolInfo.ServiceFlags3", Field, 2, ""},
    +		{"WSAProtocolInfo.ServiceFlags4", Field, 2, ""},
    +		{"WSAProtocolInfo.SocketType", Field, 2, ""},
    +		{"WSAProtocolInfo.Version", Field, 2, ""},
    +		{"WSARecv", Func, 0, ""},
    +		{"WSARecvFrom", Func, 0, ""},
    +		{"WSASYS_STATUS_LEN", Const, 0, ""},
    +		{"WSASend", Func, 0, ""},
    +		{"WSASendTo", Func, 0, ""},
    +		{"WSASendto", Func, 0, ""},
    +		{"WSAStartup", Func, 0, ""},
    +		{"WSTOPPED", Const, 0, ""},
    +		{"WTRAPPED", Const, 1, ""},
    +		{"WUNTRACED", Const, 0, ""},
    +		{"Wait4", Func, 0, "func(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error)"},
    +		{"WaitForSingleObject", Func, 0, ""},
    +		{"WaitStatus", Type, 0, ""},
    +		{"WaitStatus.ExitCode", Field, 0, ""},
    +		{"Win32FileAttributeData", Type, 0, ""},
    +		{"Win32FileAttributeData.CreationTime", Field, 0, ""},
    +		{"Win32FileAttributeData.FileAttributes", Field, 0, ""},
    +		{"Win32FileAttributeData.FileSizeHigh", Field, 0, ""},
    +		{"Win32FileAttributeData.FileSizeLow", Field, 0, ""},
    +		{"Win32FileAttributeData.LastAccessTime", Field, 0, ""},
    +		{"Win32FileAttributeData.LastWriteTime", Field, 0, ""},
    +		{"Win32finddata", Type, 0, ""},
    +		{"Win32finddata.AlternateFileName", Field, 0, ""},
    +		{"Win32finddata.CreationTime", Field, 0, ""},
    +		{"Win32finddata.FileAttributes", Field, 0, ""},
    +		{"Win32finddata.FileName", Field, 0, ""},
    +		{"Win32finddata.FileSizeHigh", Field, 0, ""},
    +		{"Win32finddata.FileSizeLow", Field, 0, ""},
    +		{"Win32finddata.LastAccessTime", Field, 0, ""},
    +		{"Win32finddata.LastWriteTime", Field, 0, ""},
    +		{"Win32finddata.Reserved0", Field, 0, ""},
    +		{"Win32finddata.Reserved1", Field, 0, ""},
    +		{"Write", Func, 0, "func(fd int, p []byte) (n int, err error)"},
    +		{"WriteConsole", Func, 1, ""},
    +		{"WriteFile", Func, 0, ""},
    +		{"X509_ASN_ENCODING", Const, 0, ""},
    +		{"XCASE", Const, 0, ""},
    +		{"XP1_CONNECTIONLESS", Const, 2, ""},
    +		{"XP1_CONNECT_DATA", Const, 2, ""},
    +		{"XP1_DISCONNECT_DATA", Const, 2, ""},
    +		{"XP1_EXPEDITED_DATA", Const, 2, ""},
    +		{"XP1_GRACEFUL_CLOSE", Const, 2, ""},
    +		{"XP1_GUARANTEED_DELIVERY", Const, 2, ""},
    +		{"XP1_GUARANTEED_ORDER", Const, 2, ""},
    +		{"XP1_IFS_HANDLES", Const, 2, ""},
    +		{"XP1_MESSAGE_ORIENTED", Const, 2, ""},
    +		{"XP1_MULTIPOINT_CONTROL_PLANE", Const, 2, ""},
    +		{"XP1_MULTIPOINT_DATA_PLANE", Const, 2, ""},
    +		{"XP1_PARTIAL_MESSAGE", Const, 2, ""},
    +		{"XP1_PSEUDO_STREAM", Const, 2, ""},
    +		{"XP1_QOS_SUPPORTED", Const, 2, ""},
    +		{"XP1_SAN_SUPPORT_SDP", Const, 2, ""},
    +		{"XP1_SUPPORT_BROADCAST", Const, 2, ""},
    +		{"XP1_SUPPORT_MULTIPOINT", Const, 2, ""},
    +		{"XP1_UNI_RECV", Const, 2, ""},
    +		{"XP1_UNI_SEND", Const, 2, ""},
     	},
     	"syscall/js": {
    -		{"CopyBytesToGo", Func, 0},
    -		{"CopyBytesToJS", Func, 0},
    -		{"Error", Type, 0},
    -		{"Func", Type, 0},
    -		{"FuncOf", Func, 0},
    -		{"Global", Func, 0},
    -		{"Null", Func, 0},
    -		{"Type", Type, 0},
    -		{"TypeBoolean", Const, 0},
    -		{"TypeFunction", Const, 0},
    -		{"TypeNull", Const, 0},
    -		{"TypeNumber", Const, 0},
    -		{"TypeObject", Const, 0},
    -		{"TypeString", Const, 0},
    -		{"TypeSymbol", Const, 0},
    -		{"TypeUndefined", Const, 0},
    -		{"Undefined", Func, 0},
    -		{"Value", Type, 0},
    -		{"ValueError", Type, 0},
    -		{"ValueOf", Func, 0},
    +		{"CopyBytesToGo", Func, 0, ""},
    +		{"CopyBytesToJS", Func, 0, ""},
    +		{"Error", Type, 0, ""},
    +		{"Func", Type, 0, ""},
    +		{"FuncOf", Func, 0, ""},
    +		{"Global", Func, 0, ""},
    +		{"Null", Func, 0, ""},
    +		{"Type", Type, 0, ""},
    +		{"TypeBoolean", Const, 0, ""},
    +		{"TypeFunction", Const, 0, ""},
    +		{"TypeNull", Const, 0, ""},
    +		{"TypeNumber", Const, 0, ""},
    +		{"TypeObject", Const, 0, ""},
    +		{"TypeString", Const, 0, ""},
    +		{"TypeSymbol", Const, 0, ""},
    +		{"TypeUndefined", Const, 0, ""},
    +		{"Undefined", Func, 0, ""},
    +		{"Value", Type, 0, ""},
    +		{"ValueError", Type, 0, ""},
    +		{"ValueOf", Func, 0, ""},
     	},
     	"testing": {
    -		{"(*B).Cleanup", Method, 14},
    -		{"(*B).Elapsed", Method, 20},
    -		{"(*B).Error", Method, 0},
    -		{"(*B).Errorf", Method, 0},
    -		{"(*B).Fail", Method, 0},
    -		{"(*B).FailNow", Method, 0},
    -		{"(*B).Failed", Method, 0},
    -		{"(*B).Fatal", Method, 0},
    -		{"(*B).Fatalf", Method, 0},
    -		{"(*B).Helper", Method, 9},
    -		{"(*B).Log", Method, 0},
    -		{"(*B).Logf", Method, 0},
    -		{"(*B).Name", Method, 8},
    -		{"(*B).ReportAllocs", Method, 1},
    -		{"(*B).ReportMetric", Method, 13},
    -		{"(*B).ResetTimer", Method, 0},
    -		{"(*B).Run", Method, 7},
    -		{"(*B).RunParallel", Method, 3},
    -		{"(*B).SetBytes", Method, 0},
    -		{"(*B).SetParallelism", Method, 3},
    -		{"(*B).Setenv", Method, 17},
    -		{"(*B).Skip", Method, 1},
    -		{"(*B).SkipNow", Method, 1},
    -		{"(*B).Skipf", Method, 1},
    -		{"(*B).Skipped", Method, 1},
    -		{"(*B).StartTimer", Method, 0},
    -		{"(*B).StopTimer", Method, 0},
    -		{"(*B).TempDir", Method, 15},
    -		{"(*F).Add", Method, 18},
    -		{"(*F).Cleanup", Method, 18},
    -		{"(*F).Error", Method, 18},
    -		{"(*F).Errorf", Method, 18},
    -		{"(*F).Fail", Method, 18},
    -		{"(*F).FailNow", Method, 18},
    -		{"(*F).Failed", Method, 18},
    -		{"(*F).Fatal", Method, 18},
    -		{"(*F).Fatalf", Method, 18},
    -		{"(*F).Fuzz", Method, 18},
    -		{"(*F).Helper", Method, 18},
    -		{"(*F).Log", Method, 18},
    -		{"(*F).Logf", Method, 18},
    -		{"(*F).Name", Method, 18},
    -		{"(*F).Setenv", Method, 18},
    -		{"(*F).Skip", Method, 18},
    -		{"(*F).SkipNow", Method, 18},
    -		{"(*F).Skipf", Method, 18},
    -		{"(*F).Skipped", Method, 18},
    -		{"(*F).TempDir", Method, 18},
    -		{"(*M).Run", Method, 4},
    -		{"(*PB).Next", Method, 3},
    -		{"(*T).Cleanup", Method, 14},
    -		{"(*T).Deadline", Method, 15},
    -		{"(*T).Error", Method, 0},
    -		{"(*T).Errorf", Method, 0},
    -		{"(*T).Fail", Method, 0},
    -		{"(*T).FailNow", Method, 0},
    -		{"(*T).Failed", Method, 0},
    -		{"(*T).Fatal", Method, 0},
    -		{"(*T).Fatalf", Method, 0},
    -		{"(*T).Helper", Method, 9},
    -		{"(*T).Log", Method, 0},
    -		{"(*T).Logf", Method, 0},
    -		{"(*T).Name", Method, 8},
    -		{"(*T).Parallel", Method, 0},
    -		{"(*T).Run", Method, 7},
    -		{"(*T).Setenv", Method, 17},
    -		{"(*T).Skip", Method, 1},
    -		{"(*T).SkipNow", Method, 1},
    -		{"(*T).Skipf", Method, 1},
    -		{"(*T).Skipped", Method, 1},
    -		{"(*T).TempDir", Method, 15},
    -		{"(BenchmarkResult).AllocedBytesPerOp", Method, 1},
    -		{"(BenchmarkResult).AllocsPerOp", Method, 1},
    -		{"(BenchmarkResult).MemString", Method, 1},
    -		{"(BenchmarkResult).NsPerOp", Method, 0},
    -		{"(BenchmarkResult).String", Method, 0},
    -		{"AllocsPerRun", Func, 1},
    -		{"B", Type, 0},
    -		{"B.N", Field, 0},
    -		{"Benchmark", Func, 0},
    -		{"BenchmarkResult", Type, 0},
    -		{"BenchmarkResult.Bytes", Field, 0},
    -		{"BenchmarkResult.Extra", Field, 13},
    -		{"BenchmarkResult.MemAllocs", Field, 1},
    -		{"BenchmarkResult.MemBytes", Field, 1},
    -		{"BenchmarkResult.N", Field, 0},
    -		{"BenchmarkResult.T", Field, 0},
    -		{"Cover", Type, 2},
    -		{"Cover.Blocks", Field, 2},
    -		{"Cover.Counters", Field, 2},
    -		{"Cover.CoveredPackages", Field, 2},
    -		{"Cover.Mode", Field, 2},
    -		{"CoverBlock", Type, 2},
    -		{"CoverBlock.Col0", Field, 2},
    -		{"CoverBlock.Col1", Field, 2},
    -		{"CoverBlock.Line0", Field, 2},
    -		{"CoverBlock.Line1", Field, 2},
    -		{"CoverBlock.Stmts", Field, 2},
    -		{"CoverMode", Func, 8},
    -		{"Coverage", Func, 4},
    -		{"F", Type, 18},
    -		{"Init", Func, 13},
    -		{"InternalBenchmark", Type, 0},
    -		{"InternalBenchmark.F", Field, 0},
    -		{"InternalBenchmark.Name", Field, 0},
    -		{"InternalExample", Type, 0},
    -		{"InternalExample.F", Field, 0},
    -		{"InternalExample.Name", Field, 0},
    -		{"InternalExample.Output", Field, 0},
    -		{"InternalExample.Unordered", Field, 7},
    -		{"InternalFuzzTarget", Type, 18},
    -		{"InternalFuzzTarget.Fn", Field, 18},
    -		{"InternalFuzzTarget.Name", Field, 18},
    -		{"InternalTest", Type, 0},
    -		{"InternalTest.F", Field, 0},
    -		{"InternalTest.Name", Field, 0},
    -		{"M", Type, 4},
    -		{"Main", Func, 0},
    -		{"MainStart", Func, 4},
    -		{"PB", Type, 3},
    -		{"RegisterCover", Func, 2},
    -		{"RunBenchmarks", Func, 0},
    -		{"RunExamples", Func, 0},
    -		{"RunTests", Func, 0},
    -		{"Short", Func, 0},
    -		{"T", Type, 0},
    -		{"TB", Type, 2},
    -		{"Testing", Func, 21},
    -		{"Verbose", Func, 1},
    +		{"(*B).Attr", Method, 25, ""},
    +		{"(*B).Chdir", Method, 24, ""},
    +		{"(*B).Cleanup", Method, 14, ""},
    +		{"(*B).Context", Method, 24, ""},
    +		{"(*B).Elapsed", Method, 20, ""},
    +		{"(*B).Error", Method, 0, ""},
    +		{"(*B).Errorf", Method, 0, ""},
    +		{"(*B).Fail", Method, 0, ""},
    +		{"(*B).FailNow", Method, 0, ""},
    +		{"(*B).Failed", Method, 0, ""},
    +		{"(*B).Fatal", Method, 0, ""},
    +		{"(*B).Fatalf", Method, 0, ""},
    +		{"(*B).Helper", Method, 9, ""},
    +		{"(*B).Log", Method, 0, ""},
    +		{"(*B).Logf", Method, 0, ""},
    +		{"(*B).Loop", Method, 24, ""},
    +		{"(*B).Name", Method, 8, ""},
    +		{"(*B).Output", Method, 25, ""},
    +		{"(*B).ReportAllocs", Method, 1, ""},
    +		{"(*B).ReportMetric", Method, 13, ""},
    +		{"(*B).ResetTimer", Method, 0, ""},
    +		{"(*B).Run", Method, 7, ""},
    +		{"(*B).RunParallel", Method, 3, ""},
    +		{"(*B).SetBytes", Method, 0, ""},
    +		{"(*B).SetParallelism", Method, 3, ""},
    +		{"(*B).Setenv", Method, 17, ""},
    +		{"(*B).Skip", Method, 1, ""},
    +		{"(*B).SkipNow", Method, 1, ""},
    +		{"(*B).Skipf", Method, 1, ""},
    +		{"(*B).Skipped", Method, 1, ""},
    +		{"(*B).StartTimer", Method, 0, ""},
    +		{"(*B).StopTimer", Method, 0, ""},
    +		{"(*B).TempDir", Method, 15, ""},
    +		{"(*F).Add", Method, 18, ""},
    +		{"(*F).Attr", Method, 25, ""},
    +		{"(*F).Chdir", Method, 24, ""},
    +		{"(*F).Cleanup", Method, 18, ""},
    +		{"(*F).Context", Method, 24, ""},
    +		{"(*F).Error", Method, 18, ""},
    +		{"(*F).Errorf", Method, 18, ""},
    +		{"(*F).Fail", Method, 18, ""},
    +		{"(*F).FailNow", Method, 18, ""},
    +		{"(*F).Failed", Method, 18, ""},
    +		{"(*F).Fatal", Method, 18, ""},
    +		{"(*F).Fatalf", Method, 18, ""},
    +		{"(*F).Fuzz", Method, 18, ""},
    +		{"(*F).Helper", Method, 18, ""},
    +		{"(*F).Log", Method, 18, ""},
    +		{"(*F).Logf", Method, 18, ""},
    +		{"(*F).Name", Method, 18, ""},
    +		{"(*F).Output", Method, 25, ""},
    +		{"(*F).Setenv", Method, 18, ""},
    +		{"(*F).Skip", Method, 18, ""},
    +		{"(*F).SkipNow", Method, 18, ""},
    +		{"(*F).Skipf", Method, 18, ""},
    +		{"(*F).Skipped", Method, 18, ""},
    +		{"(*F).TempDir", Method, 18, ""},
    +		{"(*M).Run", Method, 4, ""},
    +		{"(*PB).Next", Method, 3, ""},
    +		{"(*T).Attr", Method, 25, ""},
    +		{"(*T).Chdir", Method, 24, ""},
    +		{"(*T).Cleanup", Method, 14, ""},
    +		{"(*T).Context", Method, 24, ""},
    +		{"(*T).Deadline", Method, 15, ""},
    +		{"(*T).Error", Method, 0, ""},
    +		{"(*T).Errorf", Method, 0, ""},
    +		{"(*T).Fail", Method, 0, ""},
    +		{"(*T).FailNow", Method, 0, ""},
    +		{"(*T).Failed", Method, 0, ""},
    +		{"(*T).Fatal", Method, 0, ""},
    +		{"(*T).Fatalf", Method, 0, ""},
    +		{"(*T).Helper", Method, 9, ""},
    +		{"(*T).Log", Method, 0, ""},
    +		{"(*T).Logf", Method, 0, ""},
    +		{"(*T).Name", Method, 8, ""},
    +		{"(*T).Output", Method, 25, ""},
    +		{"(*T).Parallel", Method, 0, ""},
    +		{"(*T).Run", Method, 7, ""},
    +		{"(*T).Setenv", Method, 17, ""},
    +		{"(*T).Skip", Method, 1, ""},
    +		{"(*T).SkipNow", Method, 1, ""},
    +		{"(*T).Skipf", Method, 1, ""},
    +		{"(*T).Skipped", Method, 1, ""},
    +		{"(*T).TempDir", Method, 15, ""},
    +		{"(BenchmarkResult).AllocedBytesPerOp", Method, 1, ""},
    +		{"(BenchmarkResult).AllocsPerOp", Method, 1, ""},
    +		{"(BenchmarkResult).MemString", Method, 1, ""},
    +		{"(BenchmarkResult).NsPerOp", Method, 0, ""},
    +		{"(BenchmarkResult).String", Method, 0, ""},
    +		{"AllocsPerRun", Func, 1, "func(runs int, f func()) (avg float64)"},
    +		{"B", Type, 0, ""},
    +		{"B.N", Field, 0, ""},
    +		{"Benchmark", Func, 0, "func(f func(b *B)) BenchmarkResult"},
    +		{"BenchmarkResult", Type, 0, ""},
    +		{"BenchmarkResult.Bytes", Field, 0, ""},
    +		{"BenchmarkResult.Extra", Field, 13, ""},
    +		{"BenchmarkResult.MemAllocs", Field, 1, ""},
    +		{"BenchmarkResult.MemBytes", Field, 1, ""},
    +		{"BenchmarkResult.N", Field, 0, ""},
    +		{"BenchmarkResult.T", Field, 0, ""},
    +		{"Cover", Type, 2, ""},
    +		{"Cover.Blocks", Field, 2, ""},
    +		{"Cover.Counters", Field, 2, ""},
    +		{"Cover.CoveredPackages", Field, 2, ""},
    +		{"Cover.Mode", Field, 2, ""},
    +		{"CoverBlock", Type, 2, ""},
    +		{"CoverBlock.Col0", Field, 2, ""},
    +		{"CoverBlock.Col1", Field, 2, ""},
    +		{"CoverBlock.Line0", Field, 2, ""},
    +		{"CoverBlock.Line1", Field, 2, ""},
    +		{"CoverBlock.Stmts", Field, 2, ""},
    +		{"CoverMode", Func, 8, "func() string"},
    +		{"Coverage", Func, 4, "func() float64"},
    +		{"F", Type, 18, ""},
    +		{"Init", Func, 13, "func()"},
    +		{"InternalBenchmark", Type, 0, ""},
    +		{"InternalBenchmark.F", Field, 0, ""},
    +		{"InternalBenchmark.Name", Field, 0, ""},
    +		{"InternalExample", Type, 0, ""},
    +		{"InternalExample.F", Field, 0, ""},
    +		{"InternalExample.Name", Field, 0, ""},
    +		{"InternalExample.Output", Field, 0, ""},
    +		{"InternalExample.Unordered", Field, 7, ""},
    +		{"InternalFuzzTarget", Type, 18, ""},
    +		{"InternalFuzzTarget.Fn", Field, 18, ""},
    +		{"InternalFuzzTarget.Name", Field, 18, ""},
    +		{"InternalTest", Type, 0, ""},
    +		{"InternalTest.F", Field, 0, ""},
    +		{"InternalTest.Name", Field, 0, ""},
    +		{"M", Type, 4, ""},
    +		{"Main", Func, 0, "func(matchString func(pat string, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample)"},
    +		{"MainStart", Func, 4, "func(deps testDeps, tests []InternalTest, benchmarks []InternalBenchmark, fuzzTargets []InternalFuzzTarget, examples []InternalExample) *M"},
    +		{"PB", Type, 3, ""},
    +		{"RegisterCover", Func, 2, "func(c Cover)"},
    +		{"RunBenchmarks", Func, 0, "func(matchString func(pat string, str string) (bool, error), benchmarks []InternalBenchmark)"},
    +		{"RunExamples", Func, 0, "func(matchString func(pat string, str string) (bool, error), examples []InternalExample) (ok bool)"},
    +		{"RunTests", Func, 0, "func(matchString func(pat string, str string) (bool, error), tests []InternalTest) (ok bool)"},
    +		{"Short", Func, 0, "func() bool"},
    +		{"T", Type, 0, ""},
    +		{"TB", Type, 2, ""},
    +		{"Testing", Func, 21, "func() bool"},
    +		{"Verbose", Func, 1, "func() bool"},
     	},
     	"testing/fstest": {
    -		{"(MapFS).Glob", Method, 16},
    -		{"(MapFS).Open", Method, 16},
    -		{"(MapFS).ReadDir", Method, 16},
    -		{"(MapFS).ReadFile", Method, 16},
    -		{"(MapFS).Stat", Method, 16},
    -		{"(MapFS).Sub", Method, 16},
    -		{"MapFS", Type, 16},
    -		{"MapFile", Type, 16},
    -		{"MapFile.Data", Field, 16},
    -		{"MapFile.ModTime", Field, 16},
    -		{"MapFile.Mode", Field, 16},
    -		{"MapFile.Sys", Field, 16},
    -		{"TestFS", Func, 16},
    +		{"(MapFS).Glob", Method, 16, ""},
    +		{"(MapFS).Lstat", Method, 25, ""},
    +		{"(MapFS).Open", Method, 16, ""},
    +		{"(MapFS).ReadDir", Method, 16, ""},
    +		{"(MapFS).ReadFile", Method, 16, ""},
    +		{"(MapFS).ReadLink", Method, 25, ""},
    +		{"(MapFS).Stat", Method, 16, ""},
    +		{"(MapFS).Sub", Method, 16, ""},
    +		{"MapFS", Type, 16, ""},
    +		{"MapFile", Type, 16, ""},
    +		{"MapFile.Data", Field, 16, ""},
    +		{"MapFile.ModTime", Field, 16, ""},
    +		{"MapFile.Mode", Field, 16, ""},
    +		{"MapFile.Sys", Field, 16, ""},
    +		{"TestFS", Func, 16, "func(fsys fs.FS, expected ...string) error"},
     	},
     	"testing/iotest": {
    -		{"DataErrReader", Func, 0},
    -		{"ErrReader", Func, 16},
    -		{"ErrTimeout", Var, 0},
    -		{"HalfReader", Func, 0},
    -		{"NewReadLogger", Func, 0},
    -		{"NewWriteLogger", Func, 0},
    -		{"OneByteReader", Func, 0},
    -		{"TestReader", Func, 16},
    -		{"TimeoutReader", Func, 0},
    -		{"TruncateWriter", Func, 0},
    +		{"DataErrReader", Func, 0, "func(r io.Reader) io.Reader"},
    +		{"ErrReader", Func, 16, "func(err error) io.Reader"},
    +		{"ErrTimeout", Var, 0, ""},
    +		{"HalfReader", Func, 0, "func(r io.Reader) io.Reader"},
    +		{"NewReadLogger", Func, 0, "func(prefix string, r io.Reader) io.Reader"},
    +		{"NewWriteLogger", Func, 0, "func(prefix string, w io.Writer) io.Writer"},
    +		{"OneByteReader", Func, 0, "func(r io.Reader) io.Reader"},
    +		{"TestReader", Func, 16, "func(r io.Reader, content []byte) error"},
    +		{"TimeoutReader", Func, 0, "func(r io.Reader) io.Reader"},
    +		{"TruncateWriter", Func, 0, "func(w io.Writer, n int64) io.Writer"},
     	},
     	"testing/quick": {
    -		{"(*CheckEqualError).Error", Method, 0},
    -		{"(*CheckError).Error", Method, 0},
    -		{"(SetupError).Error", Method, 0},
    -		{"Check", Func, 0},
    -		{"CheckEqual", Func, 0},
    -		{"CheckEqualError", Type, 0},
    -		{"CheckEqualError.CheckError", Field, 0},
    -		{"CheckEqualError.Out1", Field, 0},
    -		{"CheckEqualError.Out2", Field, 0},
    -		{"CheckError", Type, 0},
    -		{"CheckError.Count", Field, 0},
    -		{"CheckError.In", Field, 0},
    -		{"Config", Type, 0},
    -		{"Config.MaxCount", Field, 0},
    -		{"Config.MaxCountScale", Field, 0},
    -		{"Config.Rand", Field, 0},
    -		{"Config.Values", Field, 0},
    -		{"Generator", Type, 0},
    -		{"SetupError", Type, 0},
    -		{"Value", Func, 0},
    +		{"(*CheckEqualError).Error", Method, 0, ""},
    +		{"(*CheckError).Error", Method, 0, ""},
    +		{"(SetupError).Error", Method, 0, ""},
    +		{"Check", Func, 0, "func(f any, config *Config) error"},
    +		{"CheckEqual", Func, 0, "func(f any, g any, config *Config) error"},
    +		{"CheckEqualError", Type, 0, ""},
    +		{"CheckEqualError.CheckError", Field, 0, ""},
    +		{"CheckEqualError.Out1", Field, 0, ""},
    +		{"CheckEqualError.Out2", Field, 0, ""},
    +		{"CheckError", Type, 0, ""},
    +		{"CheckError.Count", Field, 0, ""},
    +		{"CheckError.In", Field, 0, ""},
    +		{"Config", Type, 0, ""},
    +		{"Config.MaxCount", Field, 0, ""},
    +		{"Config.MaxCountScale", Field, 0, ""},
    +		{"Config.Rand", Field, 0, ""},
    +		{"Config.Values", Field, 0, ""},
    +		{"Generator", Type, 0, ""},
    +		{"SetupError", Type, 0, ""},
    +		{"Value", Func, 0, "func(t reflect.Type, rand *rand.Rand) (value reflect.Value, ok bool)"},
     	},
     	"testing/slogtest": {
    -		{"Run", Func, 22},
    -		{"TestHandler", Func, 21},
    +		{"Run", Func, 22, "func(t *testing.T, newHandler func(*testing.T) slog.Handler, result func(*testing.T) map[string]any)"},
    +		{"TestHandler", Func, 21, "func(h slog.Handler, results func() []map[string]any) error"},
    +	},
    +	"testing/synctest": {
    +		{"Test", Func, 25, "func(t *testing.T, f func(*testing.T))"},
    +		{"Wait", Func, 25, "func()"},
     	},
     	"text/scanner": {
    -		{"(*Position).IsValid", Method, 0},
    -		{"(*Scanner).Init", Method, 0},
    -		{"(*Scanner).IsValid", Method, 0},
    -		{"(*Scanner).Next", Method, 0},
    -		{"(*Scanner).Peek", Method, 0},
    -		{"(*Scanner).Pos", Method, 0},
    -		{"(*Scanner).Scan", Method, 0},
    -		{"(*Scanner).TokenText", Method, 0},
    -		{"(Position).String", Method, 0},
    -		{"(Scanner).String", Method, 0},
    -		{"Char", Const, 0},
    -		{"Comment", Const, 0},
    -		{"EOF", Const, 0},
    -		{"Float", Const, 0},
    -		{"GoTokens", Const, 0},
    -		{"GoWhitespace", Const, 0},
    -		{"Ident", Const, 0},
    -		{"Int", Const, 0},
    -		{"Position", Type, 0},
    -		{"Position.Column", Field, 0},
    -		{"Position.Filename", Field, 0},
    -		{"Position.Line", Field, 0},
    -		{"Position.Offset", Field, 0},
    -		{"RawString", Const, 0},
    -		{"ScanChars", Const, 0},
    -		{"ScanComments", Const, 0},
    -		{"ScanFloats", Const, 0},
    -		{"ScanIdents", Const, 0},
    -		{"ScanInts", Const, 0},
    -		{"ScanRawStrings", Const, 0},
    -		{"ScanStrings", Const, 0},
    -		{"Scanner", Type, 0},
    -		{"Scanner.Error", Field, 0},
    -		{"Scanner.ErrorCount", Field, 0},
    -		{"Scanner.IsIdentRune", Field, 4},
    -		{"Scanner.Mode", Field, 0},
    -		{"Scanner.Position", Field, 0},
    -		{"Scanner.Whitespace", Field, 0},
    -		{"SkipComments", Const, 0},
    -		{"String", Const, 0},
    -		{"TokenString", Func, 0},
    +		{"(*Position).IsValid", Method, 0, ""},
    +		{"(*Scanner).Init", Method, 0, ""},
    +		{"(*Scanner).IsValid", Method, 0, ""},
    +		{"(*Scanner).Next", Method, 0, ""},
    +		{"(*Scanner).Peek", Method, 0, ""},
    +		{"(*Scanner).Pos", Method, 0, ""},
    +		{"(*Scanner).Scan", Method, 0, ""},
    +		{"(*Scanner).TokenText", Method, 0, ""},
    +		{"(Position).String", Method, 0, ""},
    +		{"(Scanner).String", Method, 0, ""},
    +		{"Char", Const, 0, ""},
    +		{"Comment", Const, 0, ""},
    +		{"EOF", Const, 0, ""},
    +		{"Float", Const, 0, ""},
    +		{"GoTokens", Const, 0, ""},
    +		{"GoWhitespace", Const, 0, ""},
    +		{"Ident", Const, 0, ""},
    +		{"Int", Const, 0, ""},
    +		{"Position", Type, 0, ""},
    +		{"Position.Column", Field, 0, ""},
    +		{"Position.Filename", Field, 0, ""},
    +		{"Position.Line", Field, 0, ""},
    +		{"Position.Offset", Field, 0, ""},
    +		{"RawString", Const, 0, ""},
    +		{"ScanChars", Const, 0, ""},
    +		{"ScanComments", Const, 0, ""},
    +		{"ScanFloats", Const, 0, ""},
    +		{"ScanIdents", Const, 0, ""},
    +		{"ScanInts", Const, 0, ""},
    +		{"ScanRawStrings", Const, 0, ""},
    +		{"ScanStrings", Const, 0, ""},
    +		{"Scanner", Type, 0, ""},
    +		{"Scanner.Error", Field, 0, ""},
    +		{"Scanner.ErrorCount", Field, 0, ""},
    +		{"Scanner.IsIdentRune", Field, 4, ""},
    +		{"Scanner.Mode", Field, 0, ""},
    +		{"Scanner.Position", Field, 0, ""},
    +		{"Scanner.Whitespace", Field, 0, ""},
    +		{"SkipComments", Const, 0, ""},
    +		{"String", Const, 0, ""},
    +		{"TokenString", Func, 0, "func(tok rune) string"},
     	},
     	"text/tabwriter": {
    -		{"(*Writer).Flush", Method, 0},
    -		{"(*Writer).Init", Method, 0},
    -		{"(*Writer).Write", Method, 0},
    -		{"AlignRight", Const, 0},
    -		{"Debug", Const, 0},
    -		{"DiscardEmptyColumns", Const, 0},
    -		{"Escape", Const, 0},
    -		{"FilterHTML", Const, 0},
    -		{"NewWriter", Func, 0},
    -		{"StripEscape", Const, 0},
    -		{"TabIndent", Const, 0},
    -		{"Writer", Type, 0},
    +		{"(*Writer).Flush", Method, 0, ""},
    +		{"(*Writer).Init", Method, 0, ""},
    +		{"(*Writer).Write", Method, 0, ""},
    +		{"AlignRight", Const, 0, ""},
    +		{"Debug", Const, 0, ""},
    +		{"DiscardEmptyColumns", Const, 0, ""},
    +		{"Escape", Const, 0, ""},
    +		{"FilterHTML", Const, 0, ""},
    +		{"NewWriter", Func, 0, "func(output io.Writer, minwidth int, tabwidth int, padding int, padchar byte, flags uint) *Writer"},
    +		{"StripEscape", Const, 0, ""},
    +		{"TabIndent", Const, 0, ""},
    +		{"Writer", Type, 0, ""},
     	},
     	"text/template": {
    -		{"(*Template).AddParseTree", Method, 0},
    -		{"(*Template).Clone", Method, 0},
    -		{"(*Template).DefinedTemplates", Method, 5},
    -		{"(*Template).Delims", Method, 0},
    -		{"(*Template).Execute", Method, 0},
    -		{"(*Template).ExecuteTemplate", Method, 0},
    -		{"(*Template).Funcs", Method, 0},
    -		{"(*Template).Lookup", Method, 0},
    -		{"(*Template).Name", Method, 0},
    -		{"(*Template).New", Method, 0},
    -		{"(*Template).Option", Method, 5},
    -		{"(*Template).Parse", Method, 0},
    -		{"(*Template).ParseFS", Method, 16},
    -		{"(*Template).ParseFiles", Method, 0},
    -		{"(*Template).ParseGlob", Method, 0},
    -		{"(*Template).Templates", Method, 0},
    -		{"(ExecError).Error", Method, 6},
    -		{"(ExecError).Unwrap", Method, 13},
    -		{"(Template).Copy", Method, 2},
    -		{"(Template).ErrorContext", Method, 1},
    -		{"ExecError", Type, 6},
    -		{"ExecError.Err", Field, 6},
    -		{"ExecError.Name", Field, 6},
    -		{"FuncMap", Type, 0},
    -		{"HTMLEscape", Func, 0},
    -		{"HTMLEscapeString", Func, 0},
    -		{"HTMLEscaper", Func, 0},
    -		{"IsTrue", Func, 6},
    -		{"JSEscape", Func, 0},
    -		{"JSEscapeString", Func, 0},
    -		{"JSEscaper", Func, 0},
    -		{"Must", Func, 0},
    -		{"New", Func, 0},
    -		{"ParseFS", Func, 16},
    -		{"ParseFiles", Func, 0},
    -		{"ParseGlob", Func, 0},
    -		{"Template", Type, 0},
    -		{"Template.Tree", Field, 0},
    -		{"URLQueryEscaper", Func, 0},
    +		{"(*Template).AddParseTree", Method, 0, ""},
    +		{"(*Template).Clone", Method, 0, ""},
    +		{"(*Template).DefinedTemplates", Method, 5, ""},
    +		{"(*Template).Delims", Method, 0, ""},
    +		{"(*Template).Execute", Method, 0, ""},
    +		{"(*Template).ExecuteTemplate", Method, 0, ""},
    +		{"(*Template).Funcs", Method, 0, ""},
    +		{"(*Template).Lookup", Method, 0, ""},
    +		{"(*Template).Name", Method, 0, ""},
    +		{"(*Template).New", Method, 0, ""},
    +		{"(*Template).Option", Method, 5, ""},
    +		{"(*Template).Parse", Method, 0, ""},
    +		{"(*Template).ParseFS", Method, 16, ""},
    +		{"(*Template).ParseFiles", Method, 0, ""},
    +		{"(*Template).ParseGlob", Method, 0, ""},
    +		{"(*Template).Templates", Method, 0, ""},
    +		{"(ExecError).Error", Method, 6, ""},
    +		{"(ExecError).Unwrap", Method, 13, ""},
    +		{"(Template).Copy", Method, 2, ""},
    +		{"(Template).ErrorContext", Method, 1, ""},
    +		{"ExecError", Type, 6, ""},
    +		{"ExecError.Err", Field, 6, ""},
    +		{"ExecError.Name", Field, 6, ""},
    +		{"FuncMap", Type, 0, ""},
    +		{"HTMLEscape", Func, 0, "func(w io.Writer, b []byte)"},
    +		{"HTMLEscapeString", Func, 0, "func(s string) string"},
    +		{"HTMLEscaper", Func, 0, "func(args ...any) string"},
    +		{"IsTrue", Func, 6, "func(val any) (truth bool, ok bool)"},
    +		{"JSEscape", Func, 0, "func(w io.Writer, b []byte)"},
    +		{"JSEscapeString", Func, 0, "func(s string) string"},
    +		{"JSEscaper", Func, 0, "func(args ...any) string"},
    +		{"Must", Func, 0, "func(t *Template, err error) *Template"},
    +		{"New", Func, 0, "func(name string) *Template"},
    +		{"ParseFS", Func, 16, "func(fsys fs.FS, patterns ...string) (*Template, error)"},
    +		{"ParseFiles", Func, 0, "func(filenames ...string) (*Template, error)"},
    +		{"ParseGlob", Func, 0, "func(pattern string) (*Template, error)"},
    +		{"Template", Type, 0, ""},
    +		{"Template.Tree", Field, 0, ""},
    +		{"URLQueryEscaper", Func, 0, "func(args ...any) string"},
     	},
     	"text/template/parse": {
    -		{"(*ActionNode).Copy", Method, 0},
    -		{"(*ActionNode).String", Method, 0},
    -		{"(*BoolNode).Copy", Method, 0},
    -		{"(*BoolNode).String", Method, 0},
    -		{"(*BranchNode).Copy", Method, 4},
    -		{"(*BranchNode).String", Method, 0},
    -		{"(*BreakNode).Copy", Method, 18},
    -		{"(*BreakNode).String", Method, 18},
    -		{"(*ChainNode).Add", Method, 1},
    -		{"(*ChainNode).Copy", Method, 1},
    -		{"(*ChainNode).String", Method, 1},
    -		{"(*CommandNode).Copy", Method, 0},
    -		{"(*CommandNode).String", Method, 0},
    -		{"(*CommentNode).Copy", Method, 16},
    -		{"(*CommentNode).String", Method, 16},
    -		{"(*ContinueNode).Copy", Method, 18},
    -		{"(*ContinueNode).String", Method, 18},
    -		{"(*DotNode).Copy", Method, 0},
    -		{"(*DotNode).String", Method, 0},
    -		{"(*DotNode).Type", Method, 0},
    -		{"(*FieldNode).Copy", Method, 0},
    -		{"(*FieldNode).String", Method, 0},
    -		{"(*IdentifierNode).Copy", Method, 0},
    -		{"(*IdentifierNode).SetPos", Method, 1},
    -		{"(*IdentifierNode).SetTree", Method, 4},
    -		{"(*IdentifierNode).String", Method, 0},
    -		{"(*IfNode).Copy", Method, 0},
    -		{"(*IfNode).String", Method, 0},
    -		{"(*ListNode).Copy", Method, 0},
    -		{"(*ListNode).CopyList", Method, 0},
    -		{"(*ListNode).String", Method, 0},
    -		{"(*NilNode).Copy", Method, 1},
    -		{"(*NilNode).String", Method, 1},
    -		{"(*NilNode).Type", Method, 1},
    -		{"(*NumberNode).Copy", Method, 0},
    -		{"(*NumberNode).String", Method, 0},
    -		{"(*PipeNode).Copy", Method, 0},
    -		{"(*PipeNode).CopyPipe", Method, 0},
    -		{"(*PipeNode).String", Method, 0},
    -		{"(*RangeNode).Copy", Method, 0},
    -		{"(*RangeNode).String", Method, 0},
    -		{"(*StringNode).Copy", Method, 0},
    -		{"(*StringNode).String", Method, 0},
    -		{"(*TemplateNode).Copy", Method, 0},
    -		{"(*TemplateNode).String", Method, 0},
    -		{"(*TextNode).Copy", Method, 0},
    -		{"(*TextNode).String", Method, 0},
    -		{"(*Tree).Copy", Method, 2},
    -		{"(*Tree).ErrorContext", Method, 1},
    -		{"(*Tree).Parse", Method, 0},
    -		{"(*VariableNode).Copy", Method, 0},
    -		{"(*VariableNode).String", Method, 0},
    -		{"(*WithNode).Copy", Method, 0},
    -		{"(*WithNode).String", Method, 0},
    -		{"(ActionNode).Position", Method, 1},
    -		{"(ActionNode).Type", Method, 0},
    -		{"(BoolNode).Position", Method, 1},
    -		{"(BoolNode).Type", Method, 0},
    -		{"(BranchNode).Position", Method, 1},
    -		{"(BranchNode).Type", Method, 0},
    -		{"(BreakNode).Position", Method, 18},
    -		{"(BreakNode).Type", Method, 18},
    -		{"(ChainNode).Position", Method, 1},
    -		{"(ChainNode).Type", Method, 1},
    -		{"(CommandNode).Position", Method, 1},
    -		{"(CommandNode).Type", Method, 0},
    -		{"(CommentNode).Position", Method, 16},
    -		{"(CommentNode).Type", Method, 16},
    -		{"(ContinueNode).Position", Method, 18},
    -		{"(ContinueNode).Type", Method, 18},
    -		{"(DotNode).Position", Method, 1},
    -		{"(FieldNode).Position", Method, 1},
    -		{"(FieldNode).Type", Method, 0},
    -		{"(IdentifierNode).Position", Method, 1},
    -		{"(IdentifierNode).Type", Method, 0},
    -		{"(IfNode).Position", Method, 1},
    -		{"(IfNode).Type", Method, 0},
    -		{"(ListNode).Position", Method, 1},
    -		{"(ListNode).Type", Method, 0},
    -		{"(NilNode).Position", Method, 1},
    -		{"(NodeType).Type", Method, 0},
    -		{"(NumberNode).Position", Method, 1},
    -		{"(NumberNode).Type", Method, 0},
    -		{"(PipeNode).Position", Method, 1},
    -		{"(PipeNode).Type", Method, 0},
    -		{"(Pos).Position", Method, 1},
    -		{"(RangeNode).Position", Method, 1},
    -		{"(RangeNode).Type", Method, 0},
    -		{"(StringNode).Position", Method, 1},
    -		{"(StringNode).Type", Method, 0},
    -		{"(TemplateNode).Position", Method, 1},
    -		{"(TemplateNode).Type", Method, 0},
    -		{"(TextNode).Position", Method, 1},
    -		{"(TextNode).Type", Method, 0},
    -		{"(VariableNode).Position", Method, 1},
    -		{"(VariableNode).Type", Method, 0},
    -		{"(WithNode).Position", Method, 1},
    -		{"(WithNode).Type", Method, 0},
    -		{"ActionNode", Type, 0},
    -		{"ActionNode.Line", Field, 0},
    -		{"ActionNode.NodeType", Field, 0},
    -		{"ActionNode.Pipe", Field, 0},
    -		{"ActionNode.Pos", Field, 1},
    -		{"BoolNode", Type, 0},
    -		{"BoolNode.NodeType", Field, 0},
    -		{"BoolNode.Pos", Field, 1},
    -		{"BoolNode.True", Field, 0},
    -		{"BranchNode", Type, 0},
    -		{"BranchNode.ElseList", Field, 0},
    -		{"BranchNode.Line", Field, 0},
    -		{"BranchNode.List", Field, 0},
    -		{"BranchNode.NodeType", Field, 0},
    -		{"BranchNode.Pipe", Field, 0},
    -		{"BranchNode.Pos", Field, 1},
    -		{"BreakNode", Type, 18},
    -		{"BreakNode.Line", Field, 18},
    -		{"BreakNode.NodeType", Field, 18},
    -		{"BreakNode.Pos", Field, 18},
    -		{"ChainNode", Type, 1},
    -		{"ChainNode.Field", Field, 1},
    -		{"ChainNode.Node", Field, 1},
    -		{"ChainNode.NodeType", Field, 1},
    -		{"ChainNode.Pos", Field, 1},
    -		{"CommandNode", Type, 0},
    -		{"CommandNode.Args", Field, 0},
    -		{"CommandNode.NodeType", Field, 0},
    -		{"CommandNode.Pos", Field, 1},
    -		{"CommentNode", Type, 16},
    -		{"CommentNode.NodeType", Field, 16},
    -		{"CommentNode.Pos", Field, 16},
    -		{"CommentNode.Text", Field, 16},
    -		{"ContinueNode", Type, 18},
    -		{"ContinueNode.Line", Field, 18},
    -		{"ContinueNode.NodeType", Field, 18},
    -		{"ContinueNode.Pos", Field, 18},
    -		{"DotNode", Type, 0},
    -		{"DotNode.NodeType", Field, 4},
    -		{"DotNode.Pos", Field, 1},
    -		{"FieldNode", Type, 0},
    -		{"FieldNode.Ident", Field, 0},
    -		{"FieldNode.NodeType", Field, 0},
    -		{"FieldNode.Pos", Field, 1},
    -		{"IdentifierNode", Type, 0},
    -		{"IdentifierNode.Ident", Field, 0},
    -		{"IdentifierNode.NodeType", Field, 0},
    -		{"IdentifierNode.Pos", Field, 1},
    -		{"IfNode", Type, 0},
    -		{"IfNode.BranchNode", Field, 0},
    -		{"IsEmptyTree", Func, 0},
    -		{"ListNode", Type, 0},
    -		{"ListNode.NodeType", Field, 0},
    -		{"ListNode.Nodes", Field, 0},
    -		{"ListNode.Pos", Field, 1},
    -		{"Mode", Type, 16},
    -		{"New", Func, 0},
    -		{"NewIdentifier", Func, 0},
    -		{"NilNode", Type, 1},
    -		{"NilNode.NodeType", Field, 4},
    -		{"NilNode.Pos", Field, 1},
    -		{"Node", Type, 0},
    -		{"NodeAction", Const, 0},
    -		{"NodeBool", Const, 0},
    -		{"NodeBreak", Const, 18},
    -		{"NodeChain", Const, 1},
    -		{"NodeCommand", Const, 0},
    -		{"NodeComment", Const, 16},
    -		{"NodeContinue", Const, 18},
    -		{"NodeDot", Const, 0},
    -		{"NodeField", Const, 0},
    -		{"NodeIdentifier", Const, 0},
    -		{"NodeIf", Const, 0},
    -		{"NodeList", Const, 0},
    -		{"NodeNil", Const, 1},
    -		{"NodeNumber", Const, 0},
    -		{"NodePipe", Const, 0},
    -		{"NodeRange", Const, 0},
    -		{"NodeString", Const, 0},
    -		{"NodeTemplate", Const, 0},
    -		{"NodeText", Const, 0},
    -		{"NodeType", Type, 0},
    -		{"NodeVariable", Const, 0},
    -		{"NodeWith", Const, 0},
    -		{"NumberNode", Type, 0},
    -		{"NumberNode.Complex128", Field, 0},
    -		{"NumberNode.Float64", Field, 0},
    -		{"NumberNode.Int64", Field, 0},
    -		{"NumberNode.IsComplex", Field, 0},
    -		{"NumberNode.IsFloat", Field, 0},
    -		{"NumberNode.IsInt", Field, 0},
    -		{"NumberNode.IsUint", Field, 0},
    -		{"NumberNode.NodeType", Field, 0},
    -		{"NumberNode.Pos", Field, 1},
    -		{"NumberNode.Text", Field, 0},
    -		{"NumberNode.Uint64", Field, 0},
    -		{"Parse", Func, 0},
    -		{"ParseComments", Const, 16},
    -		{"PipeNode", Type, 0},
    -		{"PipeNode.Cmds", Field, 0},
    -		{"PipeNode.Decl", Field, 0},
    -		{"PipeNode.IsAssign", Field, 11},
    -		{"PipeNode.Line", Field, 0},
    -		{"PipeNode.NodeType", Field, 0},
    -		{"PipeNode.Pos", Field, 1},
    -		{"Pos", Type, 1},
    -		{"RangeNode", Type, 0},
    -		{"RangeNode.BranchNode", Field, 0},
    -		{"SkipFuncCheck", Const, 17},
    -		{"StringNode", Type, 0},
    -		{"StringNode.NodeType", Field, 0},
    -		{"StringNode.Pos", Field, 1},
    -		{"StringNode.Quoted", Field, 0},
    -		{"StringNode.Text", Field, 0},
    -		{"TemplateNode", Type, 0},
    -		{"TemplateNode.Line", Field, 0},
    -		{"TemplateNode.Name", Field, 0},
    -		{"TemplateNode.NodeType", Field, 0},
    -		{"TemplateNode.Pipe", Field, 0},
    -		{"TemplateNode.Pos", Field, 1},
    -		{"TextNode", Type, 0},
    -		{"TextNode.NodeType", Field, 0},
    -		{"TextNode.Pos", Field, 1},
    -		{"TextNode.Text", Field, 0},
    -		{"Tree", Type, 0},
    -		{"Tree.Mode", Field, 16},
    -		{"Tree.Name", Field, 0},
    -		{"Tree.ParseName", Field, 1},
    -		{"Tree.Root", Field, 0},
    -		{"VariableNode", Type, 0},
    -		{"VariableNode.Ident", Field, 0},
    -		{"VariableNode.NodeType", Field, 0},
    -		{"VariableNode.Pos", Field, 1},
    -		{"WithNode", Type, 0},
    -		{"WithNode.BranchNode", Field, 0},
    +		{"(*ActionNode).Copy", Method, 0, ""},
    +		{"(*ActionNode).String", Method, 0, ""},
    +		{"(*BoolNode).Copy", Method, 0, ""},
    +		{"(*BoolNode).String", Method, 0, ""},
    +		{"(*BranchNode).Copy", Method, 4, ""},
    +		{"(*BranchNode).String", Method, 0, ""},
    +		{"(*BreakNode).Copy", Method, 18, ""},
    +		{"(*BreakNode).String", Method, 18, ""},
    +		{"(*ChainNode).Add", Method, 1, ""},
    +		{"(*ChainNode).Copy", Method, 1, ""},
    +		{"(*ChainNode).String", Method, 1, ""},
    +		{"(*CommandNode).Copy", Method, 0, ""},
    +		{"(*CommandNode).String", Method, 0, ""},
    +		{"(*CommentNode).Copy", Method, 16, ""},
    +		{"(*CommentNode).String", Method, 16, ""},
    +		{"(*ContinueNode).Copy", Method, 18, ""},
    +		{"(*ContinueNode).String", Method, 18, ""},
    +		{"(*DotNode).Copy", Method, 0, ""},
    +		{"(*DotNode).String", Method, 0, ""},
    +		{"(*DotNode).Type", Method, 0, ""},
    +		{"(*FieldNode).Copy", Method, 0, ""},
    +		{"(*FieldNode).String", Method, 0, ""},
    +		{"(*IdentifierNode).Copy", Method, 0, ""},
    +		{"(*IdentifierNode).SetPos", Method, 1, ""},
    +		{"(*IdentifierNode).SetTree", Method, 4, ""},
    +		{"(*IdentifierNode).String", Method, 0, ""},
    +		{"(*IfNode).Copy", Method, 0, ""},
    +		{"(*IfNode).String", Method, 0, ""},
    +		{"(*ListNode).Copy", Method, 0, ""},
    +		{"(*ListNode).CopyList", Method, 0, ""},
    +		{"(*ListNode).String", Method, 0, ""},
    +		{"(*NilNode).Copy", Method, 1, ""},
    +		{"(*NilNode).String", Method, 1, ""},
    +		{"(*NilNode).Type", Method, 1, ""},
    +		{"(*NumberNode).Copy", Method, 0, ""},
    +		{"(*NumberNode).String", Method, 0, ""},
    +		{"(*PipeNode).Copy", Method, 0, ""},
    +		{"(*PipeNode).CopyPipe", Method, 0, ""},
    +		{"(*PipeNode).String", Method, 0, ""},
    +		{"(*RangeNode).Copy", Method, 0, ""},
    +		{"(*RangeNode).String", Method, 0, ""},
    +		{"(*StringNode).Copy", Method, 0, ""},
    +		{"(*StringNode).String", Method, 0, ""},
    +		{"(*TemplateNode).Copy", Method, 0, ""},
    +		{"(*TemplateNode).String", Method, 0, ""},
    +		{"(*TextNode).Copy", Method, 0, ""},
    +		{"(*TextNode).String", Method, 0, ""},
    +		{"(*Tree).Copy", Method, 2, ""},
    +		{"(*Tree).ErrorContext", Method, 1, ""},
    +		{"(*Tree).Parse", Method, 0, ""},
    +		{"(*VariableNode).Copy", Method, 0, ""},
    +		{"(*VariableNode).String", Method, 0, ""},
    +		{"(*WithNode).Copy", Method, 0, ""},
    +		{"(*WithNode).String", Method, 0, ""},
    +		{"(ActionNode).Position", Method, 1, ""},
    +		{"(ActionNode).Type", Method, 0, ""},
    +		{"(BoolNode).Position", Method, 1, ""},
    +		{"(BoolNode).Type", Method, 0, ""},
    +		{"(BranchNode).Position", Method, 1, ""},
    +		{"(BranchNode).Type", Method, 0, ""},
    +		{"(BreakNode).Position", Method, 18, ""},
    +		{"(BreakNode).Type", Method, 18, ""},
    +		{"(ChainNode).Position", Method, 1, ""},
    +		{"(ChainNode).Type", Method, 1, ""},
    +		{"(CommandNode).Position", Method, 1, ""},
    +		{"(CommandNode).Type", Method, 0, ""},
    +		{"(CommentNode).Position", Method, 16, ""},
    +		{"(CommentNode).Type", Method, 16, ""},
    +		{"(ContinueNode).Position", Method, 18, ""},
    +		{"(ContinueNode).Type", Method, 18, ""},
    +		{"(DotNode).Position", Method, 1, ""},
    +		{"(FieldNode).Position", Method, 1, ""},
    +		{"(FieldNode).Type", Method, 0, ""},
    +		{"(IdentifierNode).Position", Method, 1, ""},
    +		{"(IdentifierNode).Type", Method, 0, ""},
    +		{"(IfNode).Position", Method, 1, ""},
    +		{"(IfNode).Type", Method, 0, ""},
    +		{"(ListNode).Position", Method, 1, ""},
    +		{"(ListNode).Type", Method, 0, ""},
    +		{"(NilNode).Position", Method, 1, ""},
    +		{"(NodeType).Type", Method, 0, ""},
    +		{"(NumberNode).Position", Method, 1, ""},
    +		{"(NumberNode).Type", Method, 0, ""},
    +		{"(PipeNode).Position", Method, 1, ""},
    +		{"(PipeNode).Type", Method, 0, ""},
    +		{"(Pos).Position", Method, 1, ""},
    +		{"(RangeNode).Position", Method, 1, ""},
    +		{"(RangeNode).Type", Method, 0, ""},
    +		{"(StringNode).Position", Method, 1, ""},
    +		{"(StringNode).Type", Method, 0, ""},
    +		{"(TemplateNode).Position", Method, 1, ""},
    +		{"(TemplateNode).Type", Method, 0, ""},
    +		{"(TextNode).Position", Method, 1, ""},
    +		{"(TextNode).Type", Method, 0, ""},
    +		{"(VariableNode).Position", Method, 1, ""},
    +		{"(VariableNode).Type", Method, 0, ""},
    +		{"(WithNode).Position", Method, 1, ""},
    +		{"(WithNode).Type", Method, 0, ""},
    +		{"ActionNode", Type, 0, ""},
    +		{"ActionNode.Line", Field, 0, ""},
    +		{"ActionNode.NodeType", Field, 0, ""},
    +		{"ActionNode.Pipe", Field, 0, ""},
    +		{"ActionNode.Pos", Field, 1, ""},
    +		{"BoolNode", Type, 0, ""},
    +		{"BoolNode.NodeType", Field, 0, ""},
    +		{"BoolNode.Pos", Field, 1, ""},
    +		{"BoolNode.True", Field, 0, ""},
    +		{"BranchNode", Type, 0, ""},
    +		{"BranchNode.ElseList", Field, 0, ""},
    +		{"BranchNode.Line", Field, 0, ""},
    +		{"BranchNode.List", Field, 0, ""},
    +		{"BranchNode.NodeType", Field, 0, ""},
    +		{"BranchNode.Pipe", Field, 0, ""},
    +		{"BranchNode.Pos", Field, 1, ""},
    +		{"BreakNode", Type, 18, ""},
    +		{"BreakNode.Line", Field, 18, ""},
    +		{"BreakNode.NodeType", Field, 18, ""},
    +		{"BreakNode.Pos", Field, 18, ""},
    +		{"ChainNode", Type, 1, ""},
    +		{"ChainNode.Field", Field, 1, ""},
    +		{"ChainNode.Node", Field, 1, ""},
    +		{"ChainNode.NodeType", Field, 1, ""},
    +		{"ChainNode.Pos", Field, 1, ""},
    +		{"CommandNode", Type, 0, ""},
    +		{"CommandNode.Args", Field, 0, ""},
    +		{"CommandNode.NodeType", Field, 0, ""},
    +		{"CommandNode.Pos", Field, 1, ""},
    +		{"CommentNode", Type, 16, ""},
    +		{"CommentNode.NodeType", Field, 16, ""},
    +		{"CommentNode.Pos", Field, 16, ""},
    +		{"CommentNode.Text", Field, 16, ""},
    +		{"ContinueNode", Type, 18, ""},
    +		{"ContinueNode.Line", Field, 18, ""},
    +		{"ContinueNode.NodeType", Field, 18, ""},
    +		{"ContinueNode.Pos", Field, 18, ""},
    +		{"DotNode", Type, 0, ""},
    +		{"DotNode.NodeType", Field, 4, ""},
    +		{"DotNode.Pos", Field, 1, ""},
    +		{"FieldNode", Type, 0, ""},
    +		{"FieldNode.Ident", Field, 0, ""},
    +		{"FieldNode.NodeType", Field, 0, ""},
    +		{"FieldNode.Pos", Field, 1, ""},
    +		{"IdentifierNode", Type, 0, ""},
    +		{"IdentifierNode.Ident", Field, 0, ""},
    +		{"IdentifierNode.NodeType", Field, 0, ""},
    +		{"IdentifierNode.Pos", Field, 1, ""},
    +		{"IfNode", Type, 0, ""},
    +		{"IfNode.BranchNode", Field, 0, ""},
    +		{"IsEmptyTree", Func, 0, "func(n Node) bool"},
    +		{"ListNode", Type, 0, ""},
    +		{"ListNode.NodeType", Field, 0, ""},
    +		{"ListNode.Nodes", Field, 0, ""},
    +		{"ListNode.Pos", Field, 1, ""},
    +		{"Mode", Type, 16, ""},
    +		{"New", Func, 0, "func(name string, funcs ...map[string]any) *Tree"},
    +		{"NewIdentifier", Func, 0, "func(ident string) *IdentifierNode"},
    +		{"NilNode", Type, 1, ""},
    +		{"NilNode.NodeType", Field, 4, ""},
    +		{"NilNode.Pos", Field, 1, ""},
    +		{"Node", Type, 0, ""},
    +		{"NodeAction", Const, 0, ""},
    +		{"NodeBool", Const, 0, ""},
    +		{"NodeBreak", Const, 18, ""},
    +		{"NodeChain", Const, 1, ""},
    +		{"NodeCommand", Const, 0, ""},
    +		{"NodeComment", Const, 16, ""},
    +		{"NodeContinue", Const, 18, ""},
    +		{"NodeDot", Const, 0, ""},
    +		{"NodeField", Const, 0, ""},
    +		{"NodeIdentifier", Const, 0, ""},
    +		{"NodeIf", Const, 0, ""},
    +		{"NodeList", Const, 0, ""},
    +		{"NodeNil", Const, 1, ""},
    +		{"NodeNumber", Const, 0, ""},
    +		{"NodePipe", Const, 0, ""},
    +		{"NodeRange", Const, 0, ""},
    +		{"NodeString", Const, 0, ""},
    +		{"NodeTemplate", Const, 0, ""},
    +		{"NodeText", Const, 0, ""},
    +		{"NodeType", Type, 0, ""},
    +		{"NodeVariable", Const, 0, ""},
    +		{"NodeWith", Const, 0, ""},
    +		{"NumberNode", Type, 0, ""},
    +		{"NumberNode.Complex128", Field, 0, ""},
    +		{"NumberNode.Float64", Field, 0, ""},
    +		{"NumberNode.Int64", Field, 0, ""},
    +		{"NumberNode.IsComplex", Field, 0, ""},
    +		{"NumberNode.IsFloat", Field, 0, ""},
    +		{"NumberNode.IsInt", Field, 0, ""},
    +		{"NumberNode.IsUint", Field, 0, ""},
    +		{"NumberNode.NodeType", Field, 0, ""},
    +		{"NumberNode.Pos", Field, 1, ""},
    +		{"NumberNode.Text", Field, 0, ""},
    +		{"NumberNode.Uint64", Field, 0, ""},
    +		{"Parse", Func, 0, "func(name string, text string, leftDelim string, rightDelim string, funcs ...map[string]any) (map[string]*Tree, error)"},
    +		{"ParseComments", Const, 16, ""},
    +		{"PipeNode", Type, 0, ""},
    +		{"PipeNode.Cmds", Field, 0, ""},
    +		{"PipeNode.Decl", Field, 0, ""},
    +		{"PipeNode.IsAssign", Field, 11, ""},
    +		{"PipeNode.Line", Field, 0, ""},
    +		{"PipeNode.NodeType", Field, 0, ""},
    +		{"PipeNode.Pos", Field, 1, ""},
    +		{"Pos", Type, 1, ""},
    +		{"RangeNode", Type, 0, ""},
    +		{"RangeNode.BranchNode", Field, 0, ""},
    +		{"SkipFuncCheck", Const, 17, ""},
    +		{"StringNode", Type, 0, ""},
    +		{"StringNode.NodeType", Field, 0, ""},
    +		{"StringNode.Pos", Field, 1, ""},
    +		{"StringNode.Quoted", Field, 0, ""},
    +		{"StringNode.Text", Field, 0, ""},
    +		{"TemplateNode", Type, 0, ""},
    +		{"TemplateNode.Line", Field, 0, ""},
    +		{"TemplateNode.Name", Field, 0, ""},
    +		{"TemplateNode.NodeType", Field, 0, ""},
    +		{"TemplateNode.Pipe", Field, 0, ""},
    +		{"TemplateNode.Pos", Field, 1, ""},
    +		{"TextNode", Type, 0, ""},
    +		{"TextNode.NodeType", Field, 0, ""},
    +		{"TextNode.Pos", Field, 1, ""},
    +		{"TextNode.Text", Field, 0, ""},
    +		{"Tree", Type, 0, ""},
    +		{"Tree.Mode", Field, 16, ""},
    +		{"Tree.Name", Field, 0, ""},
    +		{"Tree.ParseName", Field, 1, ""},
    +		{"Tree.Root", Field, 0, ""},
    +		{"VariableNode", Type, 0, ""},
    +		{"VariableNode.Ident", Field, 0, ""},
    +		{"VariableNode.NodeType", Field, 0, ""},
    +		{"VariableNode.Pos", Field, 1, ""},
    +		{"WithNode", Type, 0, ""},
    +		{"WithNode.BranchNode", Field, 0, ""},
     	},
     	"time": {
    -		{"(*Location).String", Method, 0},
    -		{"(*ParseError).Error", Method, 0},
    -		{"(*Ticker).Reset", Method, 15},
    -		{"(*Ticker).Stop", Method, 0},
    -		{"(*Time).GobDecode", Method, 0},
    -		{"(*Time).UnmarshalBinary", Method, 2},
    -		{"(*Time).UnmarshalJSON", Method, 0},
    -		{"(*Time).UnmarshalText", Method, 2},
    -		{"(*Timer).Reset", Method, 1},
    -		{"(*Timer).Stop", Method, 0},
    -		{"(Duration).Abs", Method, 19},
    -		{"(Duration).Hours", Method, 0},
    -		{"(Duration).Microseconds", Method, 13},
    -		{"(Duration).Milliseconds", Method, 13},
    -		{"(Duration).Minutes", Method, 0},
    -		{"(Duration).Nanoseconds", Method, 0},
    -		{"(Duration).Round", Method, 9},
    -		{"(Duration).Seconds", Method, 0},
    -		{"(Duration).String", Method, 0},
    -		{"(Duration).Truncate", Method, 9},
    -		{"(Month).String", Method, 0},
    -		{"(Time).Add", Method, 0},
    -		{"(Time).AddDate", Method, 0},
    -		{"(Time).After", Method, 0},
    -		{"(Time).AppendFormat", Method, 5},
    -		{"(Time).Before", Method, 0},
    -		{"(Time).Clock", Method, 0},
    -		{"(Time).Compare", Method, 20},
    -		{"(Time).Date", Method, 0},
    -		{"(Time).Day", Method, 0},
    -		{"(Time).Equal", Method, 0},
    -		{"(Time).Format", Method, 0},
    -		{"(Time).GoString", Method, 17},
    -		{"(Time).GobEncode", Method, 0},
    -		{"(Time).Hour", Method, 0},
    -		{"(Time).ISOWeek", Method, 0},
    -		{"(Time).In", Method, 0},
    -		{"(Time).IsDST", Method, 17},
    -		{"(Time).IsZero", Method, 0},
    -		{"(Time).Local", Method, 0},
    -		{"(Time).Location", Method, 0},
    -		{"(Time).MarshalBinary", Method, 2},
    -		{"(Time).MarshalJSON", Method, 0},
    -		{"(Time).MarshalText", Method, 2},
    -		{"(Time).Minute", Method, 0},
    -		{"(Time).Month", Method, 0},
    -		{"(Time).Nanosecond", Method, 0},
    -		{"(Time).Round", Method, 1},
    -		{"(Time).Second", Method, 0},
    -		{"(Time).String", Method, 0},
    -		{"(Time).Sub", Method, 0},
    -		{"(Time).Truncate", Method, 1},
    -		{"(Time).UTC", Method, 0},
    -		{"(Time).Unix", Method, 0},
    -		{"(Time).UnixMicro", Method, 17},
    -		{"(Time).UnixMilli", Method, 17},
    -		{"(Time).UnixNano", Method, 0},
    -		{"(Time).Weekday", Method, 0},
    -		{"(Time).Year", Method, 0},
    -		{"(Time).YearDay", Method, 1},
    -		{"(Time).Zone", Method, 0},
    -		{"(Time).ZoneBounds", Method, 19},
    -		{"(Weekday).String", Method, 0},
    -		{"ANSIC", Const, 0},
    -		{"After", Func, 0},
    -		{"AfterFunc", Func, 0},
    -		{"April", Const, 0},
    -		{"August", Const, 0},
    -		{"Date", Func, 0},
    -		{"DateOnly", Const, 20},
    -		{"DateTime", Const, 20},
    -		{"December", Const, 0},
    -		{"Duration", Type, 0},
    -		{"February", Const, 0},
    -		{"FixedZone", Func, 0},
    -		{"Friday", Const, 0},
    -		{"Hour", Const, 0},
    -		{"January", Const, 0},
    -		{"July", Const, 0},
    -		{"June", Const, 0},
    -		{"Kitchen", Const, 0},
    -		{"Layout", Const, 17},
    -		{"LoadLocation", Func, 0},
    -		{"LoadLocationFromTZData", Func, 10},
    -		{"Local", Var, 0},
    -		{"Location", Type, 0},
    -		{"March", Const, 0},
    -		{"May", Const, 0},
    -		{"Microsecond", Const, 0},
    -		{"Millisecond", Const, 0},
    -		{"Minute", Const, 0},
    -		{"Monday", Const, 0},
    -		{"Month", Type, 0},
    -		{"Nanosecond", Const, 0},
    -		{"NewTicker", Func, 0},
    -		{"NewTimer", Func, 0},
    -		{"November", Const, 0},
    -		{"Now", Func, 0},
    -		{"October", Const, 0},
    -		{"Parse", Func, 0},
    -		{"ParseDuration", Func, 0},
    -		{"ParseError", Type, 0},
    -		{"ParseError.Layout", Field, 0},
    -		{"ParseError.LayoutElem", Field, 0},
    -		{"ParseError.Message", Field, 0},
    -		{"ParseError.Value", Field, 0},
    -		{"ParseError.ValueElem", Field, 0},
    -		{"ParseInLocation", Func, 1},
    -		{"RFC1123", Const, 0},
    -		{"RFC1123Z", Const, 0},
    -		{"RFC3339", Const, 0},
    -		{"RFC3339Nano", Const, 0},
    -		{"RFC822", Const, 0},
    -		{"RFC822Z", Const, 0},
    -		{"RFC850", Const, 0},
    -		{"RubyDate", Const, 0},
    -		{"Saturday", Const, 0},
    -		{"Second", Const, 0},
    -		{"September", Const, 0},
    -		{"Since", Func, 0},
    -		{"Sleep", Func, 0},
    -		{"Stamp", Const, 0},
    -		{"StampMicro", Const, 0},
    -		{"StampMilli", Const, 0},
    -		{"StampNano", Const, 0},
    -		{"Sunday", Const, 0},
    -		{"Thursday", Const, 0},
    -		{"Tick", Func, 0},
    -		{"Ticker", Type, 0},
    -		{"Ticker.C", Field, 0},
    -		{"Time", Type, 0},
    -		{"TimeOnly", Const, 20},
    -		{"Timer", Type, 0},
    -		{"Timer.C", Field, 0},
    -		{"Tuesday", Const, 0},
    -		{"UTC", Var, 0},
    -		{"Unix", Func, 0},
    -		{"UnixDate", Const, 0},
    -		{"UnixMicro", Func, 17},
    -		{"UnixMilli", Func, 17},
    -		{"Until", Func, 8},
    -		{"Wednesday", Const, 0},
    -		{"Weekday", Type, 0},
    +		{"(*Location).String", Method, 0, ""},
    +		{"(*ParseError).Error", Method, 0, ""},
    +		{"(*Ticker).Reset", Method, 15, ""},
    +		{"(*Ticker).Stop", Method, 0, ""},
    +		{"(*Time).GobDecode", Method, 0, ""},
    +		{"(*Time).UnmarshalBinary", Method, 2, ""},
    +		{"(*Time).UnmarshalJSON", Method, 0, ""},
    +		{"(*Time).UnmarshalText", Method, 2, ""},
    +		{"(*Timer).Reset", Method, 1, ""},
    +		{"(*Timer).Stop", Method, 0, ""},
    +		{"(Duration).Abs", Method, 19, ""},
    +		{"(Duration).Hours", Method, 0, ""},
    +		{"(Duration).Microseconds", Method, 13, ""},
    +		{"(Duration).Milliseconds", Method, 13, ""},
    +		{"(Duration).Minutes", Method, 0, ""},
    +		{"(Duration).Nanoseconds", Method, 0, ""},
    +		{"(Duration).Round", Method, 9, ""},
    +		{"(Duration).Seconds", Method, 0, ""},
    +		{"(Duration).String", Method, 0, ""},
    +		{"(Duration).Truncate", Method, 9, ""},
    +		{"(Month).String", Method, 0, ""},
    +		{"(Time).Add", Method, 0, ""},
    +		{"(Time).AddDate", Method, 0, ""},
    +		{"(Time).After", Method, 0, ""},
    +		{"(Time).AppendBinary", Method, 24, ""},
    +		{"(Time).AppendFormat", Method, 5, ""},
    +		{"(Time).AppendText", Method, 24, ""},
    +		{"(Time).Before", Method, 0, ""},
    +		{"(Time).Clock", Method, 0, ""},
    +		{"(Time).Compare", Method, 20, ""},
    +		{"(Time).Date", Method, 0, ""},
    +		{"(Time).Day", Method, 0, ""},
    +		{"(Time).Equal", Method, 0, ""},
    +		{"(Time).Format", Method, 0, ""},
    +		{"(Time).GoString", Method, 17, ""},
    +		{"(Time).GobEncode", Method, 0, ""},
    +		{"(Time).Hour", Method, 0, ""},
    +		{"(Time).ISOWeek", Method, 0, ""},
    +		{"(Time).In", Method, 0, ""},
    +		{"(Time).IsDST", Method, 17, ""},
    +		{"(Time).IsZero", Method, 0, ""},
    +		{"(Time).Local", Method, 0, ""},
    +		{"(Time).Location", Method, 0, ""},
    +		{"(Time).MarshalBinary", Method, 2, ""},
    +		{"(Time).MarshalJSON", Method, 0, ""},
    +		{"(Time).MarshalText", Method, 2, ""},
    +		{"(Time).Minute", Method, 0, ""},
    +		{"(Time).Month", Method, 0, ""},
    +		{"(Time).Nanosecond", Method, 0, ""},
    +		{"(Time).Round", Method, 1, ""},
    +		{"(Time).Second", Method, 0, ""},
    +		{"(Time).String", Method, 0, ""},
    +		{"(Time).Sub", Method, 0, ""},
    +		{"(Time).Truncate", Method, 1, ""},
    +		{"(Time).UTC", Method, 0, ""},
    +		{"(Time).Unix", Method, 0, ""},
    +		{"(Time).UnixMicro", Method, 17, ""},
    +		{"(Time).UnixMilli", Method, 17, ""},
    +		{"(Time).UnixNano", Method, 0, ""},
    +		{"(Time).Weekday", Method, 0, ""},
    +		{"(Time).Year", Method, 0, ""},
    +		{"(Time).YearDay", Method, 1, ""},
    +		{"(Time).Zone", Method, 0, ""},
    +		{"(Time).ZoneBounds", Method, 19, ""},
    +		{"(Weekday).String", Method, 0, ""},
    +		{"ANSIC", Const, 0, ""},
    +		{"After", Func, 0, "func(d Duration) <-chan Time"},
    +		{"AfterFunc", Func, 0, "func(d Duration, f func()) *Timer"},
    +		{"April", Const, 0, ""},
    +		{"August", Const, 0, ""},
    +		{"Date", Func, 0, "func(year int, month Month, day int, hour int, min int, sec int, nsec int, loc *Location) Time"},
    +		{"DateOnly", Const, 20, ""},
    +		{"DateTime", Const, 20, ""},
    +		{"December", Const, 0, ""},
    +		{"Duration", Type, 0, ""},
    +		{"February", Const, 0, ""},
    +		{"FixedZone", Func, 0, "func(name string, offset int) *Location"},
    +		{"Friday", Const, 0, ""},
    +		{"Hour", Const, 0, ""},
    +		{"January", Const, 0, ""},
    +		{"July", Const, 0, ""},
    +		{"June", Const, 0, ""},
    +		{"Kitchen", Const, 0, ""},
    +		{"Layout", Const, 17, ""},
    +		{"LoadLocation", Func, 0, "func(name string) (*Location, error)"},
    +		{"LoadLocationFromTZData", Func, 10, "func(name string, data []byte) (*Location, error)"},
    +		{"Local", Var, 0, ""},
    +		{"Location", Type, 0, ""},
    +		{"March", Const, 0, ""},
    +		{"May", Const, 0, ""},
    +		{"Microsecond", Const, 0, ""},
    +		{"Millisecond", Const, 0, ""},
    +		{"Minute", Const, 0, ""},
    +		{"Monday", Const, 0, ""},
    +		{"Month", Type, 0, ""},
    +		{"Nanosecond", Const, 0, ""},
    +		{"NewTicker", Func, 0, "func(d Duration) *Ticker"},
    +		{"NewTimer", Func, 0, "func(d Duration) *Timer"},
    +		{"November", Const, 0, ""},
    +		{"Now", Func, 0, "func() Time"},
    +		{"October", Const, 0, ""},
    +		{"Parse", Func, 0, "func(layout string, value string) (Time, error)"},
    +		{"ParseDuration", Func, 0, "func(s string) (Duration, error)"},
    +		{"ParseError", Type, 0, ""},
    +		{"ParseError.Layout", Field, 0, ""},
    +		{"ParseError.LayoutElem", Field, 0, ""},
    +		{"ParseError.Message", Field, 0, ""},
    +		{"ParseError.Value", Field, 0, ""},
    +		{"ParseError.ValueElem", Field, 0, ""},
    +		{"ParseInLocation", Func, 1, "func(layout string, value string, loc *Location) (Time, error)"},
    +		{"RFC1123", Const, 0, ""},
    +		{"RFC1123Z", Const, 0, ""},
    +		{"RFC3339", Const, 0, ""},
    +		{"RFC3339Nano", Const, 0, ""},
    +		{"RFC822", Const, 0, ""},
    +		{"RFC822Z", Const, 0, ""},
    +		{"RFC850", Const, 0, ""},
    +		{"RubyDate", Const, 0, ""},
    +		{"Saturday", Const, 0, ""},
    +		{"Second", Const, 0, ""},
    +		{"September", Const, 0, ""},
    +		{"Since", Func, 0, "func(t Time) Duration"},
    +		{"Sleep", Func, 0, "func(d Duration)"},
    +		{"Stamp", Const, 0, ""},
    +		{"StampMicro", Const, 0, ""},
    +		{"StampMilli", Const, 0, ""},
    +		{"StampNano", Const, 0, ""},
    +		{"Sunday", Const, 0, ""},
    +		{"Thursday", Const, 0, ""},
    +		{"Tick", Func, 0, "func(d Duration) <-chan Time"},
    +		{"Ticker", Type, 0, ""},
    +		{"Ticker.C", Field, 0, ""},
    +		{"Time", Type, 0, ""},
    +		{"TimeOnly", Const, 20, ""},
    +		{"Timer", Type, 0, ""},
    +		{"Timer.C", Field, 0, ""},
    +		{"Tuesday", Const, 0, ""},
    +		{"UTC", Var, 0, ""},
    +		{"Unix", Func, 0, "func(sec int64, nsec int64) Time"},
    +		{"UnixDate", Const, 0, ""},
    +		{"UnixMicro", Func, 17, "func(usec int64) Time"},
    +		{"UnixMilli", Func, 17, "func(msec int64) Time"},
    +		{"Until", Func, 8, "func(t Time) Duration"},
    +		{"Wednesday", Const, 0, ""},
    +		{"Weekday", Type, 0, ""},
     	},
     	"unicode": {
    -		{"(SpecialCase).ToLower", Method, 0},
    -		{"(SpecialCase).ToTitle", Method, 0},
    -		{"(SpecialCase).ToUpper", Method, 0},
    -		{"ASCII_Hex_Digit", Var, 0},
    -		{"Adlam", Var, 7},
    -		{"Ahom", Var, 5},
    -		{"Anatolian_Hieroglyphs", Var, 5},
    -		{"Arabic", Var, 0},
    -		{"Armenian", Var, 0},
    -		{"Avestan", Var, 0},
    -		{"AzeriCase", Var, 0},
    -		{"Balinese", Var, 0},
    -		{"Bamum", Var, 0},
    -		{"Bassa_Vah", Var, 4},
    -		{"Batak", Var, 0},
    -		{"Bengali", Var, 0},
    -		{"Bhaiksuki", Var, 7},
    -		{"Bidi_Control", Var, 0},
    -		{"Bopomofo", Var, 0},
    -		{"Brahmi", Var, 0},
    -		{"Braille", Var, 0},
    -		{"Buginese", Var, 0},
    -		{"Buhid", Var, 0},
    -		{"C", Var, 0},
    -		{"Canadian_Aboriginal", Var, 0},
    -		{"Carian", Var, 0},
    -		{"CaseRange", Type, 0},
    -		{"CaseRange.Delta", Field, 0},
    -		{"CaseRange.Hi", Field, 0},
    -		{"CaseRange.Lo", Field, 0},
    -		{"CaseRanges", Var, 0},
    -		{"Categories", Var, 0},
    -		{"Caucasian_Albanian", Var, 4},
    -		{"Cc", Var, 0},
    -		{"Cf", Var, 0},
    -		{"Chakma", Var, 1},
    -		{"Cham", Var, 0},
    -		{"Cherokee", Var, 0},
    -		{"Chorasmian", Var, 16},
    -		{"Co", Var, 0},
    -		{"Common", Var, 0},
    -		{"Coptic", Var, 0},
    -		{"Cs", Var, 0},
    -		{"Cuneiform", Var, 0},
    -		{"Cypriot", Var, 0},
    -		{"Cypro_Minoan", Var, 21},
    -		{"Cyrillic", Var, 0},
    -		{"Dash", Var, 0},
    -		{"Deprecated", Var, 0},
    -		{"Deseret", Var, 0},
    -		{"Devanagari", Var, 0},
    -		{"Diacritic", Var, 0},
    -		{"Digit", Var, 0},
    -		{"Dives_Akuru", Var, 16},
    -		{"Dogra", Var, 13},
    -		{"Duployan", Var, 4},
    -		{"Egyptian_Hieroglyphs", Var, 0},
    -		{"Elbasan", Var, 4},
    -		{"Elymaic", Var, 14},
    -		{"Ethiopic", Var, 0},
    -		{"Extender", Var, 0},
    -		{"FoldCategory", Var, 0},
    -		{"FoldScript", Var, 0},
    -		{"Georgian", Var, 0},
    -		{"Glagolitic", Var, 0},
    -		{"Gothic", Var, 0},
    -		{"Grantha", Var, 4},
    -		{"GraphicRanges", Var, 0},
    -		{"Greek", Var, 0},
    -		{"Gujarati", Var, 0},
    -		{"Gunjala_Gondi", Var, 13},
    -		{"Gurmukhi", Var, 0},
    -		{"Han", Var, 0},
    -		{"Hangul", Var, 0},
    -		{"Hanifi_Rohingya", Var, 13},
    -		{"Hanunoo", Var, 0},
    -		{"Hatran", Var, 5},
    -		{"Hebrew", Var, 0},
    -		{"Hex_Digit", Var, 0},
    -		{"Hiragana", Var, 0},
    -		{"Hyphen", Var, 0},
    -		{"IDS_Binary_Operator", Var, 0},
    -		{"IDS_Trinary_Operator", Var, 0},
    -		{"Ideographic", Var, 0},
    -		{"Imperial_Aramaic", Var, 0},
    -		{"In", Func, 2},
    -		{"Inherited", Var, 0},
    -		{"Inscriptional_Pahlavi", Var, 0},
    -		{"Inscriptional_Parthian", Var, 0},
    -		{"Is", Func, 0},
    -		{"IsControl", Func, 0},
    -		{"IsDigit", Func, 0},
    -		{"IsGraphic", Func, 0},
    -		{"IsLetter", Func, 0},
    -		{"IsLower", Func, 0},
    -		{"IsMark", Func, 0},
    -		{"IsNumber", Func, 0},
    -		{"IsOneOf", Func, 0},
    -		{"IsPrint", Func, 0},
    -		{"IsPunct", Func, 0},
    -		{"IsSpace", Func, 0},
    -		{"IsSymbol", Func, 0},
    -		{"IsTitle", Func, 0},
    -		{"IsUpper", Func, 0},
    -		{"Javanese", Var, 0},
    -		{"Join_Control", Var, 0},
    -		{"Kaithi", Var, 0},
    -		{"Kannada", Var, 0},
    -		{"Katakana", Var, 0},
    -		{"Kawi", Var, 21},
    -		{"Kayah_Li", Var, 0},
    -		{"Kharoshthi", Var, 0},
    -		{"Khitan_Small_Script", Var, 16},
    -		{"Khmer", Var, 0},
    -		{"Khojki", Var, 4},
    -		{"Khudawadi", Var, 4},
    -		{"L", Var, 0},
    -		{"Lao", Var, 0},
    -		{"Latin", Var, 0},
    -		{"Lepcha", Var, 0},
    -		{"Letter", Var, 0},
    -		{"Limbu", Var, 0},
    -		{"Linear_A", Var, 4},
    -		{"Linear_B", Var, 0},
    -		{"Lisu", Var, 0},
    -		{"Ll", Var, 0},
    -		{"Lm", Var, 0},
    -		{"Lo", Var, 0},
    -		{"Logical_Order_Exception", Var, 0},
    -		{"Lower", Var, 0},
    -		{"LowerCase", Const, 0},
    -		{"Lt", Var, 0},
    -		{"Lu", Var, 0},
    -		{"Lycian", Var, 0},
    -		{"Lydian", Var, 0},
    -		{"M", Var, 0},
    -		{"Mahajani", Var, 4},
    -		{"Makasar", Var, 13},
    -		{"Malayalam", Var, 0},
    -		{"Mandaic", Var, 0},
    -		{"Manichaean", Var, 4},
    -		{"Marchen", Var, 7},
    -		{"Mark", Var, 0},
    -		{"Masaram_Gondi", Var, 10},
    -		{"MaxASCII", Const, 0},
    -		{"MaxCase", Const, 0},
    -		{"MaxLatin1", Const, 0},
    -		{"MaxRune", Const, 0},
    -		{"Mc", Var, 0},
    -		{"Me", Var, 0},
    -		{"Medefaidrin", Var, 13},
    -		{"Meetei_Mayek", Var, 0},
    -		{"Mende_Kikakui", Var, 4},
    -		{"Meroitic_Cursive", Var, 1},
    -		{"Meroitic_Hieroglyphs", Var, 1},
    -		{"Miao", Var, 1},
    -		{"Mn", Var, 0},
    -		{"Modi", Var, 4},
    -		{"Mongolian", Var, 0},
    -		{"Mro", Var, 4},
    -		{"Multani", Var, 5},
    -		{"Myanmar", Var, 0},
    -		{"N", Var, 0},
    -		{"Nabataean", Var, 4},
    -		{"Nag_Mundari", Var, 21},
    -		{"Nandinagari", Var, 14},
    -		{"Nd", Var, 0},
    -		{"New_Tai_Lue", Var, 0},
    -		{"Newa", Var, 7},
    -		{"Nko", Var, 0},
    -		{"Nl", Var, 0},
    -		{"No", Var, 0},
    -		{"Noncharacter_Code_Point", Var, 0},
    -		{"Number", Var, 0},
    -		{"Nushu", Var, 10},
    -		{"Nyiakeng_Puachue_Hmong", Var, 14},
    -		{"Ogham", Var, 0},
    -		{"Ol_Chiki", Var, 0},
    -		{"Old_Hungarian", Var, 5},
    -		{"Old_Italic", Var, 0},
    -		{"Old_North_Arabian", Var, 4},
    -		{"Old_Permic", Var, 4},
    -		{"Old_Persian", Var, 0},
    -		{"Old_Sogdian", Var, 13},
    -		{"Old_South_Arabian", Var, 0},
    -		{"Old_Turkic", Var, 0},
    -		{"Old_Uyghur", Var, 21},
    -		{"Oriya", Var, 0},
    -		{"Osage", Var, 7},
    -		{"Osmanya", Var, 0},
    -		{"Other", Var, 0},
    -		{"Other_Alphabetic", Var, 0},
    -		{"Other_Default_Ignorable_Code_Point", Var, 0},
    -		{"Other_Grapheme_Extend", Var, 0},
    -		{"Other_ID_Continue", Var, 0},
    -		{"Other_ID_Start", Var, 0},
    -		{"Other_Lowercase", Var, 0},
    -		{"Other_Math", Var, 0},
    -		{"Other_Uppercase", Var, 0},
    -		{"P", Var, 0},
    -		{"Pahawh_Hmong", Var, 4},
    -		{"Palmyrene", Var, 4},
    -		{"Pattern_Syntax", Var, 0},
    -		{"Pattern_White_Space", Var, 0},
    -		{"Pau_Cin_Hau", Var, 4},
    -		{"Pc", Var, 0},
    -		{"Pd", Var, 0},
    -		{"Pe", Var, 0},
    -		{"Pf", Var, 0},
    -		{"Phags_Pa", Var, 0},
    -		{"Phoenician", Var, 0},
    -		{"Pi", Var, 0},
    -		{"Po", Var, 0},
    -		{"Prepended_Concatenation_Mark", Var, 7},
    -		{"PrintRanges", Var, 0},
    -		{"Properties", Var, 0},
    -		{"Ps", Var, 0},
    -		{"Psalter_Pahlavi", Var, 4},
    -		{"Punct", Var, 0},
    -		{"Quotation_Mark", Var, 0},
    -		{"Radical", Var, 0},
    -		{"Range16", Type, 0},
    -		{"Range16.Hi", Field, 0},
    -		{"Range16.Lo", Field, 0},
    -		{"Range16.Stride", Field, 0},
    -		{"Range32", Type, 0},
    -		{"Range32.Hi", Field, 0},
    -		{"Range32.Lo", Field, 0},
    -		{"Range32.Stride", Field, 0},
    -		{"RangeTable", Type, 0},
    -		{"RangeTable.LatinOffset", Field, 1},
    -		{"RangeTable.R16", Field, 0},
    -		{"RangeTable.R32", Field, 0},
    -		{"Regional_Indicator", Var, 10},
    -		{"Rejang", Var, 0},
    -		{"ReplacementChar", Const, 0},
    -		{"Runic", Var, 0},
    -		{"S", Var, 0},
    -		{"STerm", Var, 0},
    -		{"Samaritan", Var, 0},
    -		{"Saurashtra", Var, 0},
    -		{"Sc", Var, 0},
    -		{"Scripts", Var, 0},
    -		{"Sentence_Terminal", Var, 7},
    -		{"Sharada", Var, 1},
    -		{"Shavian", Var, 0},
    -		{"Siddham", Var, 4},
    -		{"SignWriting", Var, 5},
    -		{"SimpleFold", Func, 0},
    -		{"Sinhala", Var, 0},
    -		{"Sk", Var, 0},
    -		{"Sm", Var, 0},
    -		{"So", Var, 0},
    -		{"Soft_Dotted", Var, 0},
    -		{"Sogdian", Var, 13},
    -		{"Sora_Sompeng", Var, 1},
    -		{"Soyombo", Var, 10},
    -		{"Space", Var, 0},
    -		{"SpecialCase", Type, 0},
    -		{"Sundanese", Var, 0},
    -		{"Syloti_Nagri", Var, 0},
    -		{"Symbol", Var, 0},
    -		{"Syriac", Var, 0},
    -		{"Tagalog", Var, 0},
    -		{"Tagbanwa", Var, 0},
    -		{"Tai_Le", Var, 0},
    -		{"Tai_Tham", Var, 0},
    -		{"Tai_Viet", Var, 0},
    -		{"Takri", Var, 1},
    -		{"Tamil", Var, 0},
    -		{"Tangsa", Var, 21},
    -		{"Tangut", Var, 7},
    -		{"Telugu", Var, 0},
    -		{"Terminal_Punctuation", Var, 0},
    -		{"Thaana", Var, 0},
    -		{"Thai", Var, 0},
    -		{"Tibetan", Var, 0},
    -		{"Tifinagh", Var, 0},
    -		{"Tirhuta", Var, 4},
    -		{"Title", Var, 0},
    -		{"TitleCase", Const, 0},
    -		{"To", Func, 0},
    -		{"ToLower", Func, 0},
    -		{"ToTitle", Func, 0},
    -		{"ToUpper", Func, 0},
    -		{"Toto", Var, 21},
    -		{"TurkishCase", Var, 0},
    -		{"Ugaritic", Var, 0},
    -		{"Unified_Ideograph", Var, 0},
    -		{"Upper", Var, 0},
    -		{"UpperCase", Const, 0},
    -		{"UpperLower", Const, 0},
    -		{"Vai", Var, 0},
    -		{"Variation_Selector", Var, 0},
    -		{"Version", Const, 0},
    -		{"Vithkuqi", Var, 21},
    -		{"Wancho", Var, 14},
    -		{"Warang_Citi", Var, 4},
    -		{"White_Space", Var, 0},
    -		{"Yezidi", Var, 16},
    -		{"Yi", Var, 0},
    -		{"Z", Var, 0},
    -		{"Zanabazar_Square", Var, 10},
    -		{"Zl", Var, 0},
    -		{"Zp", Var, 0},
    -		{"Zs", Var, 0},
    +		{"(SpecialCase).ToLower", Method, 0, ""},
    +		{"(SpecialCase).ToTitle", Method, 0, ""},
    +		{"(SpecialCase).ToUpper", Method, 0, ""},
    +		{"ASCII_Hex_Digit", Var, 0, ""},
    +		{"Adlam", Var, 7, ""},
    +		{"Ahom", Var, 5, ""},
    +		{"Anatolian_Hieroglyphs", Var, 5, ""},
    +		{"Arabic", Var, 0, ""},
    +		{"Armenian", Var, 0, ""},
    +		{"Avestan", Var, 0, ""},
    +		{"AzeriCase", Var, 0, ""},
    +		{"Balinese", Var, 0, ""},
    +		{"Bamum", Var, 0, ""},
    +		{"Bassa_Vah", Var, 4, ""},
    +		{"Batak", Var, 0, ""},
    +		{"Bengali", Var, 0, ""},
    +		{"Bhaiksuki", Var, 7, ""},
    +		{"Bidi_Control", Var, 0, ""},
    +		{"Bopomofo", Var, 0, ""},
    +		{"Brahmi", Var, 0, ""},
    +		{"Braille", Var, 0, ""},
    +		{"Buginese", Var, 0, ""},
    +		{"Buhid", Var, 0, ""},
    +		{"C", Var, 0, ""},
    +		{"Canadian_Aboriginal", Var, 0, ""},
    +		{"Carian", Var, 0, ""},
    +		{"CaseRange", Type, 0, ""},
    +		{"CaseRange.Delta", Field, 0, ""},
    +		{"CaseRange.Hi", Field, 0, ""},
    +		{"CaseRange.Lo", Field, 0, ""},
    +		{"CaseRanges", Var, 0, ""},
    +		{"Categories", Var, 0, ""},
    +		{"CategoryAliases", Var, 25, ""},
    +		{"Caucasian_Albanian", Var, 4, ""},
    +		{"Cc", Var, 0, ""},
    +		{"Cf", Var, 0, ""},
    +		{"Chakma", Var, 1, ""},
    +		{"Cham", Var, 0, ""},
    +		{"Cherokee", Var, 0, ""},
    +		{"Chorasmian", Var, 16, ""},
    +		{"Cn", Var, 25, ""},
    +		{"Co", Var, 0, ""},
    +		{"Common", Var, 0, ""},
    +		{"Coptic", Var, 0, ""},
    +		{"Cs", Var, 0, ""},
    +		{"Cuneiform", Var, 0, ""},
    +		{"Cypriot", Var, 0, ""},
    +		{"Cypro_Minoan", Var, 21, ""},
    +		{"Cyrillic", Var, 0, ""},
    +		{"Dash", Var, 0, ""},
    +		{"Deprecated", Var, 0, ""},
    +		{"Deseret", Var, 0, ""},
    +		{"Devanagari", Var, 0, ""},
    +		{"Diacritic", Var, 0, ""},
    +		{"Digit", Var, 0, ""},
    +		{"Dives_Akuru", Var, 16, ""},
    +		{"Dogra", Var, 13, ""},
    +		{"Duployan", Var, 4, ""},
    +		{"Egyptian_Hieroglyphs", Var, 0, ""},
    +		{"Elbasan", Var, 4, ""},
    +		{"Elymaic", Var, 14, ""},
    +		{"Ethiopic", Var, 0, ""},
    +		{"Extender", Var, 0, ""},
    +		{"FoldCategory", Var, 0, ""},
    +		{"FoldScript", Var, 0, ""},
    +		{"Georgian", Var, 0, ""},
    +		{"Glagolitic", Var, 0, ""},
    +		{"Gothic", Var, 0, ""},
    +		{"Grantha", Var, 4, ""},
    +		{"GraphicRanges", Var, 0, ""},
    +		{"Greek", Var, 0, ""},
    +		{"Gujarati", Var, 0, ""},
    +		{"Gunjala_Gondi", Var, 13, ""},
    +		{"Gurmukhi", Var, 0, ""},
    +		{"Han", Var, 0, ""},
    +		{"Hangul", Var, 0, ""},
    +		{"Hanifi_Rohingya", Var, 13, ""},
    +		{"Hanunoo", Var, 0, ""},
    +		{"Hatran", Var, 5, ""},
    +		{"Hebrew", Var, 0, ""},
    +		{"Hex_Digit", Var, 0, ""},
    +		{"Hiragana", Var, 0, ""},
    +		{"Hyphen", Var, 0, ""},
    +		{"IDS_Binary_Operator", Var, 0, ""},
    +		{"IDS_Trinary_Operator", Var, 0, ""},
    +		{"Ideographic", Var, 0, ""},
    +		{"Imperial_Aramaic", Var, 0, ""},
    +		{"In", Func, 2, "func(r rune, ranges ...*RangeTable) bool"},
    +		{"Inherited", Var, 0, ""},
    +		{"Inscriptional_Pahlavi", Var, 0, ""},
    +		{"Inscriptional_Parthian", Var, 0, ""},
    +		{"Is", Func, 0, "func(rangeTab *RangeTable, r rune) bool"},
    +		{"IsControl", Func, 0, "func(r rune) bool"},
    +		{"IsDigit", Func, 0, "func(r rune) bool"},
    +		{"IsGraphic", Func, 0, "func(r rune) bool"},
    +		{"IsLetter", Func, 0, "func(r rune) bool"},
    +		{"IsLower", Func, 0, "func(r rune) bool"},
    +		{"IsMark", Func, 0, "func(r rune) bool"},
    +		{"IsNumber", Func, 0, "func(r rune) bool"},
    +		{"IsOneOf", Func, 0, "func(ranges []*RangeTable, r rune) bool"},
    +		{"IsPrint", Func, 0, "func(r rune) bool"},
    +		{"IsPunct", Func, 0, "func(r rune) bool"},
    +		{"IsSpace", Func, 0, "func(r rune) bool"},
    +		{"IsSymbol", Func, 0, "func(r rune) bool"},
    +		{"IsTitle", Func, 0, "func(r rune) bool"},
    +		{"IsUpper", Func, 0, "func(r rune) bool"},
    +		{"Javanese", Var, 0, ""},
    +		{"Join_Control", Var, 0, ""},
    +		{"Kaithi", Var, 0, ""},
    +		{"Kannada", Var, 0, ""},
    +		{"Katakana", Var, 0, ""},
    +		{"Kawi", Var, 21, ""},
    +		{"Kayah_Li", Var, 0, ""},
    +		{"Kharoshthi", Var, 0, ""},
    +		{"Khitan_Small_Script", Var, 16, ""},
    +		{"Khmer", Var, 0, ""},
    +		{"Khojki", Var, 4, ""},
    +		{"Khudawadi", Var, 4, ""},
    +		{"L", Var, 0, ""},
    +		{"LC", Var, 25, ""},
    +		{"Lao", Var, 0, ""},
    +		{"Latin", Var, 0, ""},
    +		{"Lepcha", Var, 0, ""},
    +		{"Letter", Var, 0, ""},
    +		{"Limbu", Var, 0, ""},
    +		{"Linear_A", Var, 4, ""},
    +		{"Linear_B", Var, 0, ""},
    +		{"Lisu", Var, 0, ""},
    +		{"Ll", Var, 0, ""},
    +		{"Lm", Var, 0, ""},
    +		{"Lo", Var, 0, ""},
    +		{"Logical_Order_Exception", Var, 0, ""},
    +		{"Lower", Var, 0, ""},
    +		{"LowerCase", Const, 0, ""},
    +		{"Lt", Var, 0, ""},
    +		{"Lu", Var, 0, ""},
    +		{"Lycian", Var, 0, ""},
    +		{"Lydian", Var, 0, ""},
    +		{"M", Var, 0, ""},
    +		{"Mahajani", Var, 4, ""},
    +		{"Makasar", Var, 13, ""},
    +		{"Malayalam", Var, 0, ""},
    +		{"Mandaic", Var, 0, ""},
    +		{"Manichaean", Var, 4, ""},
    +		{"Marchen", Var, 7, ""},
    +		{"Mark", Var, 0, ""},
    +		{"Masaram_Gondi", Var, 10, ""},
    +		{"MaxASCII", Const, 0, ""},
    +		{"MaxCase", Const, 0, ""},
    +		{"MaxLatin1", Const, 0, ""},
    +		{"MaxRune", Const, 0, ""},
    +		{"Mc", Var, 0, ""},
    +		{"Me", Var, 0, ""},
    +		{"Medefaidrin", Var, 13, ""},
    +		{"Meetei_Mayek", Var, 0, ""},
    +		{"Mende_Kikakui", Var, 4, ""},
    +		{"Meroitic_Cursive", Var, 1, ""},
    +		{"Meroitic_Hieroglyphs", Var, 1, ""},
    +		{"Miao", Var, 1, ""},
    +		{"Mn", Var, 0, ""},
    +		{"Modi", Var, 4, ""},
    +		{"Mongolian", Var, 0, ""},
    +		{"Mro", Var, 4, ""},
    +		{"Multani", Var, 5, ""},
    +		{"Myanmar", Var, 0, ""},
    +		{"N", Var, 0, ""},
    +		{"Nabataean", Var, 4, ""},
    +		{"Nag_Mundari", Var, 21, ""},
    +		{"Nandinagari", Var, 14, ""},
    +		{"Nd", Var, 0, ""},
    +		{"New_Tai_Lue", Var, 0, ""},
    +		{"Newa", Var, 7, ""},
    +		{"Nko", Var, 0, ""},
    +		{"Nl", Var, 0, ""},
    +		{"No", Var, 0, ""},
    +		{"Noncharacter_Code_Point", Var, 0, ""},
    +		{"Number", Var, 0, ""},
    +		{"Nushu", Var, 10, ""},
    +		{"Nyiakeng_Puachue_Hmong", Var, 14, ""},
    +		{"Ogham", Var, 0, ""},
    +		{"Ol_Chiki", Var, 0, ""},
    +		{"Old_Hungarian", Var, 5, ""},
    +		{"Old_Italic", Var, 0, ""},
    +		{"Old_North_Arabian", Var, 4, ""},
    +		{"Old_Permic", Var, 4, ""},
    +		{"Old_Persian", Var, 0, ""},
    +		{"Old_Sogdian", Var, 13, ""},
    +		{"Old_South_Arabian", Var, 0, ""},
    +		{"Old_Turkic", Var, 0, ""},
    +		{"Old_Uyghur", Var, 21, ""},
    +		{"Oriya", Var, 0, ""},
    +		{"Osage", Var, 7, ""},
    +		{"Osmanya", Var, 0, ""},
    +		{"Other", Var, 0, ""},
    +		{"Other_Alphabetic", Var, 0, ""},
    +		{"Other_Default_Ignorable_Code_Point", Var, 0, ""},
    +		{"Other_Grapheme_Extend", Var, 0, ""},
    +		{"Other_ID_Continue", Var, 0, ""},
    +		{"Other_ID_Start", Var, 0, ""},
    +		{"Other_Lowercase", Var, 0, ""},
    +		{"Other_Math", Var, 0, ""},
    +		{"Other_Uppercase", Var, 0, ""},
    +		{"P", Var, 0, ""},
    +		{"Pahawh_Hmong", Var, 4, ""},
    +		{"Palmyrene", Var, 4, ""},
    +		{"Pattern_Syntax", Var, 0, ""},
    +		{"Pattern_White_Space", Var, 0, ""},
    +		{"Pau_Cin_Hau", Var, 4, ""},
    +		{"Pc", Var, 0, ""},
    +		{"Pd", Var, 0, ""},
    +		{"Pe", Var, 0, ""},
    +		{"Pf", Var, 0, ""},
    +		{"Phags_Pa", Var, 0, ""},
    +		{"Phoenician", Var, 0, ""},
    +		{"Pi", Var, 0, ""},
    +		{"Po", Var, 0, ""},
    +		{"Prepended_Concatenation_Mark", Var, 7, ""},
    +		{"PrintRanges", Var, 0, ""},
    +		{"Properties", Var, 0, ""},
    +		{"Ps", Var, 0, ""},
    +		{"Psalter_Pahlavi", Var, 4, ""},
    +		{"Punct", Var, 0, ""},
    +		{"Quotation_Mark", Var, 0, ""},
    +		{"Radical", Var, 0, ""},
    +		{"Range16", Type, 0, ""},
    +		{"Range16.Hi", Field, 0, ""},
    +		{"Range16.Lo", Field, 0, ""},
    +		{"Range16.Stride", Field, 0, ""},
    +		{"Range32", Type, 0, ""},
    +		{"Range32.Hi", Field, 0, ""},
    +		{"Range32.Lo", Field, 0, ""},
    +		{"Range32.Stride", Field, 0, ""},
    +		{"RangeTable", Type, 0, ""},
    +		{"RangeTable.LatinOffset", Field, 1, ""},
    +		{"RangeTable.R16", Field, 0, ""},
    +		{"RangeTable.R32", Field, 0, ""},
    +		{"Regional_Indicator", Var, 10, ""},
    +		{"Rejang", Var, 0, ""},
    +		{"ReplacementChar", Const, 0, ""},
    +		{"Runic", Var, 0, ""},
    +		{"S", Var, 0, ""},
    +		{"STerm", Var, 0, ""},
    +		{"Samaritan", Var, 0, ""},
    +		{"Saurashtra", Var, 0, ""},
    +		{"Sc", Var, 0, ""},
    +		{"Scripts", Var, 0, ""},
    +		{"Sentence_Terminal", Var, 7, ""},
    +		{"Sharada", Var, 1, ""},
    +		{"Shavian", Var, 0, ""},
    +		{"Siddham", Var, 4, ""},
    +		{"SignWriting", Var, 5, ""},
    +		{"SimpleFold", Func, 0, "func(r rune) rune"},
    +		{"Sinhala", Var, 0, ""},
    +		{"Sk", Var, 0, ""},
    +		{"Sm", Var, 0, ""},
    +		{"So", Var, 0, ""},
    +		{"Soft_Dotted", Var, 0, ""},
    +		{"Sogdian", Var, 13, ""},
    +		{"Sora_Sompeng", Var, 1, ""},
    +		{"Soyombo", Var, 10, ""},
    +		{"Space", Var, 0, ""},
    +		{"SpecialCase", Type, 0, ""},
    +		{"Sundanese", Var, 0, ""},
    +		{"Syloti_Nagri", Var, 0, ""},
    +		{"Symbol", Var, 0, ""},
    +		{"Syriac", Var, 0, ""},
    +		{"Tagalog", Var, 0, ""},
    +		{"Tagbanwa", Var, 0, ""},
    +		{"Tai_Le", Var, 0, ""},
    +		{"Tai_Tham", Var, 0, ""},
    +		{"Tai_Viet", Var, 0, ""},
    +		{"Takri", Var, 1, ""},
    +		{"Tamil", Var, 0, ""},
    +		{"Tangsa", Var, 21, ""},
    +		{"Tangut", Var, 7, ""},
    +		{"Telugu", Var, 0, ""},
    +		{"Terminal_Punctuation", Var, 0, ""},
    +		{"Thaana", Var, 0, ""},
    +		{"Thai", Var, 0, ""},
    +		{"Tibetan", Var, 0, ""},
    +		{"Tifinagh", Var, 0, ""},
    +		{"Tirhuta", Var, 4, ""},
    +		{"Title", Var, 0, ""},
    +		{"TitleCase", Const, 0, ""},
    +		{"To", Func, 0, "func(_case int, r rune) rune"},
    +		{"ToLower", Func, 0, "func(r rune) rune"},
    +		{"ToTitle", Func, 0, "func(r rune) rune"},
    +		{"ToUpper", Func, 0, "func(r rune) rune"},
    +		{"Toto", Var, 21, ""},
    +		{"TurkishCase", Var, 0, ""},
    +		{"Ugaritic", Var, 0, ""},
    +		{"Unified_Ideograph", Var, 0, ""},
    +		{"Upper", Var, 0, ""},
    +		{"UpperCase", Const, 0, ""},
    +		{"UpperLower", Const, 0, ""},
    +		{"Vai", Var, 0, ""},
    +		{"Variation_Selector", Var, 0, ""},
    +		{"Version", Const, 0, ""},
    +		{"Vithkuqi", Var, 21, ""},
    +		{"Wancho", Var, 14, ""},
    +		{"Warang_Citi", Var, 4, ""},
    +		{"White_Space", Var, 0, ""},
    +		{"Yezidi", Var, 16, ""},
    +		{"Yi", Var, 0, ""},
    +		{"Z", Var, 0, ""},
    +		{"Zanabazar_Square", Var, 10, ""},
    +		{"Zl", Var, 0, ""},
    +		{"Zp", Var, 0, ""},
    +		{"Zs", Var, 0, ""},
     	},
     	"unicode/utf16": {
    -		{"AppendRune", Func, 20},
    -		{"Decode", Func, 0},
    -		{"DecodeRune", Func, 0},
    -		{"Encode", Func, 0},
    -		{"EncodeRune", Func, 0},
    -		{"IsSurrogate", Func, 0},
    -		{"RuneLen", Func, 23},
    +		{"AppendRune", Func, 20, "func(a []uint16, r rune) []uint16"},
    +		{"Decode", Func, 0, "func(s []uint16) []rune"},
    +		{"DecodeRune", Func, 0, "func(r1 rune, r2 rune) rune"},
    +		{"Encode", Func, 0, "func(s []rune) []uint16"},
    +		{"EncodeRune", Func, 0, "func(r rune) (r1 rune, r2 rune)"},
    +		{"IsSurrogate", Func, 0, "func(r rune) bool"},
    +		{"RuneLen", Func, 23, "func(r rune) int"},
     	},
     	"unicode/utf8": {
    -		{"AppendRune", Func, 18},
    -		{"DecodeLastRune", Func, 0},
    -		{"DecodeLastRuneInString", Func, 0},
    -		{"DecodeRune", Func, 0},
    -		{"DecodeRuneInString", Func, 0},
    -		{"EncodeRune", Func, 0},
    -		{"FullRune", Func, 0},
    -		{"FullRuneInString", Func, 0},
    -		{"MaxRune", Const, 0},
    -		{"RuneCount", Func, 0},
    -		{"RuneCountInString", Func, 0},
    -		{"RuneError", Const, 0},
    -		{"RuneLen", Func, 0},
    -		{"RuneSelf", Const, 0},
    -		{"RuneStart", Func, 0},
    -		{"UTFMax", Const, 0},
    -		{"Valid", Func, 0},
    -		{"ValidRune", Func, 1},
    -		{"ValidString", Func, 0},
    +		{"AppendRune", Func, 18, "func(p []byte, r rune) []byte"},
    +		{"DecodeLastRune", Func, 0, "func(p []byte) (r rune, size int)"},
    +		{"DecodeLastRuneInString", Func, 0, "func(s string) (r rune, size int)"},
    +		{"DecodeRune", Func, 0, "func(p []byte) (r rune, size int)"},
    +		{"DecodeRuneInString", Func, 0, "func(s string) (r rune, size int)"},
    +		{"EncodeRune", Func, 0, "func(p []byte, r rune) int"},
    +		{"FullRune", Func, 0, "func(p []byte) bool"},
    +		{"FullRuneInString", Func, 0, "func(s string) bool"},
    +		{"MaxRune", Const, 0, ""},
    +		{"RuneCount", Func, 0, "func(p []byte) int"},
    +		{"RuneCountInString", Func, 0, "func(s string) (n int)"},
    +		{"RuneError", Const, 0, ""},
    +		{"RuneLen", Func, 0, "func(r rune) int"},
    +		{"RuneSelf", Const, 0, ""},
    +		{"RuneStart", Func, 0, "func(b byte) bool"},
    +		{"UTFMax", Const, 0, ""},
    +		{"Valid", Func, 0, "func(p []byte) bool"},
    +		{"ValidRune", Func, 1, "func(r rune) bool"},
    +		{"ValidString", Func, 0, "func(s string) bool"},
     	},
     	"unique": {
    -		{"(Handle).Value", Method, 23},
    -		{"Handle", Type, 23},
    -		{"Make", Func, 23},
    +		{"(Handle).Value", Method, 23, ""},
    +		{"Handle", Type, 23, ""},
    +		{"Make", Func, 23, "func[T comparable](value T) Handle[T]"},
     	},
     	"unsafe": {
    -		{"Add", Func, 0},
    -		{"Alignof", Func, 0},
    -		{"Offsetof", Func, 0},
    -		{"Pointer", Type, 0},
    -		{"Sizeof", Func, 0},
    -		{"Slice", Func, 0},
    -		{"SliceData", Func, 0},
    -		{"String", Func, 0},
    -		{"StringData", Func, 0},
    +		{"Add", Func, 0, ""},
    +		{"Alignof", Func, 0, ""},
    +		{"Offsetof", Func, 0, ""},
    +		{"Pointer", Type, 0, ""},
    +		{"Sizeof", Func, 0, ""},
    +		{"Slice", Func, 0, ""},
    +		{"SliceData", Func, 0, ""},
    +		{"String", Func, 0, ""},
    +		{"StringData", Func, 0, ""},
    +	},
    +	"weak": {
    +		{"(Pointer).Value", Method, 24, ""},
    +		{"Make", Func, 24, "func[T any](ptr *T) Pointer[T]"},
    +		{"Pointer", Type, 24, ""},
     	},
     }
    diff --git a/vendor/golang.org/x/tools/internal/stdlib/stdlib.go b/vendor/golang.org/x/tools/internal/stdlib/stdlib.go
    index 98904017f..e223e0f34 100644
    --- a/vendor/golang.org/x/tools/internal/stdlib/stdlib.go
    +++ b/vendor/golang.org/x/tools/internal/stdlib/stdlib.go
    @@ -6,7 +6,7 @@
     
     // Package stdlib provides a table of all exported symbols in the
     // standard library, along with the version at which they first
    -// appeared.
    +// appeared. It also provides the import graph of std packages.
     package stdlib
     
     import (
    @@ -18,6 +18,14 @@ type Symbol struct {
     	Name    string
     	Kind    Kind
     	Version Version // Go version that first included the symbol
    +	// Signature provides the type of a function (defined only for Kind=Func).
    +	// Imported types are denoted as pkg.T; pkg is not fully qualified.
    +	// TODO(adonovan): use an unambiguous encoding that is parseable.
    +	//
    +	// Example2:
    +	//    func[M ~map[K]V, K comparable, V any](m M) M
    +	//    func(fi fs.FileInfo, link string) (*Header, error)
    +	Signature string // if Kind == stdlib.Func
     }
     
     // A Kind indicates the kind of a symbol:
    diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go
    index 0b84acc5c..cdae2b8e8 100644
    --- a/vendor/golang.org/x/tools/internal/typeparams/common.go
    +++ b/vendor/golang.org/x/tools/internal/typeparams/common.go
    @@ -66,75 +66,3 @@ func IsTypeParam(t types.Type) bool {
     	_, ok := types.Unalias(t).(*types.TypeParam)
     	return ok
     }
    -
    -// GenericAssignableTo is a generalization of types.AssignableTo that
    -// implements the following rule for uninstantiated generic types:
    -//
    -// If V and T are generic named types, then V is considered assignable to T if,
    -// for every possible instantiation of V[A_1, ..., A_N], the instantiation
    -// T[A_1, ..., A_N] is valid and V[A_1, ..., A_N] implements T[A_1, ..., A_N].
    -//
    -// If T has structural constraints, they must be satisfied by V.
    -//
    -// For example, consider the following type declarations:
    -//
    -//	type Interface[T any] interface {
    -//		Accept(T)
    -//	}
    -//
    -//	type Container[T any] struct {
    -//		Element T
    -//	}
    -//
    -//	func (c Container[T]) Accept(t T) { c.Element = t }
    -//
    -// In this case, GenericAssignableTo reports that instantiations of Container
    -// are assignable to the corresponding instantiation of Interface.
    -func GenericAssignableTo(ctxt *types.Context, V, T types.Type) bool {
    -	V = types.Unalias(V)
    -	T = types.Unalias(T)
    -
    -	// If V and T are not both named, or do not have matching non-empty type
    -	// parameter lists, fall back on types.AssignableTo.
    -
    -	VN, Vnamed := V.(*types.Named)
    -	TN, Tnamed := T.(*types.Named)
    -	if !Vnamed || !Tnamed {
    -		return types.AssignableTo(V, T)
    -	}
    -
    -	vtparams := VN.TypeParams()
    -	ttparams := TN.TypeParams()
    -	if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || VN.TypeArgs().Len() != 0 || TN.TypeArgs().Len() != 0 {
    -		return types.AssignableTo(V, T)
    -	}
    -
    -	// V and T have the same (non-zero) number of type params. Instantiate both
    -	// with the type parameters of V. This must always succeed for V, and will
    -	// succeed for T if and only if the type set of each type parameter of V is a
    -	// subset of the type set of the corresponding type parameter of T, meaning
    -	// that every instantiation of V corresponds to a valid instantiation of T.
    -
    -	// Minor optimization: ensure we share a context across the two
    -	// instantiations below.
    -	if ctxt == nil {
    -		ctxt = types.NewContext()
    -	}
    -
    -	var targs []types.Type
    -	for i := 0; i < vtparams.Len(); i++ {
    -		targs = append(targs, vtparams.At(i))
    -	}
    -
    -	vinst, err := types.Instantiate(ctxt, V, targs, true)
    -	if err != nil {
    -		panic("type parameters should satisfy their own constraints")
    -	}
    -
    -	tinst, err := types.Instantiate(ctxt, T, targs, true)
    -	if err != nil {
    -		return false
    -	}
    -
    -	return types.AssignableTo(vinst, tinst)
    -}
    diff --git a/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/vendor/golang.org/x/tools/internal/typeparams/coretype.go
    index 6e83c6fb1..27a2b1792 100644
    --- a/vendor/golang.org/x/tools/internal/typeparams/coretype.go
    +++ b/vendor/golang.org/x/tools/internal/typeparams/coretype.go
    @@ -109,8 +109,13 @@ func CoreType(T types.Type) types.Type {
     //
     // NormalTerms makes no guarantees about the order of terms, except that it
     // is deterministic.
    -func NormalTerms(typ types.Type) ([]*types.Term, error) {
    -	switch typ := typ.Underlying().(type) {
    +func NormalTerms(T types.Type) ([]*types.Term, error) {
    +	// typeSetOf(T) == typeSetOf(Unalias(T))
    +	typ := types.Unalias(T)
    +	if named, ok := typ.(*types.Named); ok {
    +		typ = named.Underlying()
    +	}
    +	switch typ := typ.(type) {
     	case *types.TypeParam:
     		return StructuralTerms(typ)
     	case *types.Union:
    @@ -118,7 +123,7 @@ func NormalTerms(typ types.Type) ([]*types.Term, error) {
     	case *types.Interface:
     		return InterfaceTermSet(typ)
     	default:
    -		return []*types.Term{types.NewTerm(false, typ)}, nil
    +		return []*types.Term{types.NewTerm(false, T)}, nil
     	}
     }
     
    diff --git a/vendor/golang.org/x/tools/internal/typeparams/free.go b/vendor/golang.org/x/tools/internal/typeparams/free.go
    index 0ade5c294..709d2fc14 100644
    --- a/vendor/golang.org/x/tools/internal/typeparams/free.go
    +++ b/vendor/golang.org/x/tools/internal/typeparams/free.go
    @@ -70,7 +70,7 @@ func (w *Free) Has(typ types.Type) (res bool) {
     
     	case *types.Tuple:
     		n := t.Len()
    -		for i := 0; i < n; i++ {
    +		for i := range n {
     			if w.Has(t.At(i).Type()) {
     				return true
     			}
    diff --git a/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/vendor/golang.org/x/tools/internal/typeparams/normalize.go
    index 93c80fdc9..f49802b8e 100644
    --- a/vendor/golang.org/x/tools/internal/typeparams/normalize.go
    +++ b/vendor/golang.org/x/tools/internal/typeparams/normalize.go
    @@ -120,7 +120,7 @@ type termSet struct {
     	terms    termlist
     }
     
    -func indentf(depth int, format string, args ...interface{}) {
    +func indentf(depth int, format string, args ...any) {
     	fmt.Fprintf(os.Stderr, strings.Repeat(".", depth)+format+"\n", args...)
     }
     
    diff --git a/vendor/golang.org/x/tools/internal/typeparams/termlist.go b/vendor/golang.org/x/tools/internal/typeparams/termlist.go
    index cbd12f801..9bc29143f 100644
    --- a/vendor/golang.org/x/tools/internal/typeparams/termlist.go
    +++ b/vendor/golang.org/x/tools/internal/typeparams/termlist.go
    @@ -1,3 +1,6 @@
    +// Code generated by "go test -run=Generate -write=all"; DO NOT EDIT.
    +// Source: ../../cmd/compile/internal/types2/termlist.go
    +
     // Copyright 2021 The Go Authors. All rights reserved.
     // Use of this source code is governed by a BSD-style
     // license that can be found in the LICENSE file.
    @@ -7,8 +10,8 @@
     package typeparams
     
     import (
    -	"bytes"
     	"go/types"
    +	"strings"
     )
     
     // A termlist represents the type set represented by the union
    @@ -22,15 +25,18 @@ type termlist []*term
     // It is in normal form.
     var allTermlist = termlist{new(term)}
     
    +// termSep is the separator used between individual terms.
    +const termSep = " | "
    +
     // String prints the termlist exactly (without normalization).
     func (xl termlist) String() string {
     	if len(xl) == 0 {
     		return "∅"
     	}
    -	var buf bytes.Buffer
    +	var buf strings.Builder
     	for i, x := range xl {
     		if i > 0 {
    -			buf.WriteString(" | ")
    +			buf.WriteString(termSep)
     		}
     		buf.WriteString(x.String())
     	}
    diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go
    index 7350bb702..fa758cdc9 100644
    --- a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go
    +++ b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go
    @@ -1,3 +1,6 @@
    +// Code generated by "go test -run=Generate -write=all"; DO NOT EDIT.
    +// Source: ../../cmd/compile/internal/types2/typeterm.go
    +
     // Copyright 2021 The Go Authors. All rights reserved.
     // Use of this source code is governed by a BSD-style
     // license that can be found in the LICENSE file.
    diff --git a/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go b/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go
    new file mode 100644
    index 000000000..3db2a135b
    --- /dev/null
    +++ b/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go
    @@ -0,0 +1,137 @@
    +// Copyright 2018 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +package typesinternal
    +
    +import (
    +	"fmt"
    +	"go/ast"
    +	"go/types"
    +	_ "unsafe"
    +)
    +
    +// CallKind describes the function position of an [*ast.CallExpr].
    +type CallKind int
    +
    +const (
    +	CallStatic     CallKind = iota // static call to known function
    +	CallInterface                  // dynamic call through an interface method
    +	CallDynamic                    // dynamic call of a func value
    +	CallBuiltin                    // call to a builtin function
    +	CallConversion                 // a conversion (not a call)
    +)
    +
    +var callKindNames = []string{
    +	"CallStatic",
    +	"CallInterface",
    +	"CallDynamic",
    +	"CallBuiltin",
    +	"CallConversion",
    +}
    +
    +func (k CallKind) String() string {
    +	if i := int(k); i >= 0 && i < len(callKindNames) {
    +		return callKindNames[i]
    +	}
    +	return fmt.Sprintf("typeutil.CallKind(%d)", k)
    +}
    +
    +// ClassifyCall classifies the function position of a call expression ([*ast.CallExpr]).
    +// It distinguishes among true function calls, calls to builtins, and type conversions,
    +// and further classifies function calls as static calls (where the function is known),
    +// dynamic interface calls, and other dynamic calls.
    +//
    +// For the declarations:
    +//
    +//	func f() {}
    +//	func g[T any]() {}
    +//	var v func()
    +//	var s []func()
    +//	type I interface { M() }
    +//	var i I
    +//
    +// ClassifyCall returns the following:
    +//
    +//	f()           CallStatic
    +//	g[int]()      CallStatic
    +//	i.M()         CallInterface
    +//	min(1, 2)     CallBuiltin
    +//	v()           CallDynamic
    +//	s[0]()        CallDynamic
    +//	int(x)        CallConversion
    +//	[]byte("")    CallConversion
    +func ClassifyCall(info *types.Info, call *ast.CallExpr) CallKind {
    +	if info.Types == nil {
    +		panic("ClassifyCall: info.Types is nil")
    +	}
    +	tv := info.Types[call.Fun]
    +	if tv.IsType() {
    +		return CallConversion
    +	}
    +	if tv.IsBuiltin() {
    +		return CallBuiltin
    +	}
    +	obj := info.Uses[UsedIdent(info, call.Fun)]
    +	// Classify the call by the type of the object, if any.
    +	switch obj := obj.(type) {
    +	case *types.Func:
    +		if interfaceMethod(obj) {
    +			return CallInterface
    +		}
    +		return CallStatic
    +	default:
    +		return CallDynamic
    +	}
    +}
    +
    +// UsedIdent returns the identifier such that info.Uses[UsedIdent(info, e)]
    +// is the [types.Object] used by e, if any.
    +//
    +// If e is one of various forms of reference:
    +//
    +//	f, c, v, T           lexical reference
    +//	pkg.X                qualified identifier
    +//	f[T] or pkg.F[K,V]   instantiations of the above kinds
    +//	expr.f               field or method value selector
    +//	T.f                  method expression selector
    +//
    +// UsedIdent returns the identifier whose is associated value in [types.Info.Uses]
    +// is the object to which it refers.
    +//
    +// For the declarations:
    +//
    +//	func F[T any] {...}
    +//	type I interface { M() }
    +//	var (
    +//	  x int
    +//	  s struct { f  int }
    +//	  a []int
    +//	  i I
    +//	)
    +//
    +// UsedIdent returns the following:
    +//
    +//	Expr          UsedIdent
    +//	x             x
    +//	s.f           f
    +//	F[int]        F
    +//	i.M           M
    +//	I.M           M
    +//	min           min
    +//	int           int
    +//	1             nil
    +//	a[0]          nil
    +//	[]byte        nil
    +//
    +// Note: if e is an instantiated function or method, UsedIdent returns
    +// the corresponding generic function or method on the generic type.
    +func UsedIdent(info *types.Info, e ast.Expr) *ast.Ident {
    +	return usedIdent(info, e)
    +}
    +
    +//go:linkname usedIdent golang.org/x/tools/go/types/typeutil.usedIdent
    +func usedIdent(info *types.Info, e ast.Expr) *ast.Ident
    +
    +//go:linkname interfaceMethod golang.org/x/tools/go/types/typeutil.interfaceMethod
    +func interfaceMethod(f *types.Func) bool
    diff --git a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go
    index 131caab28..235a6defc 100644
    --- a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go
    +++ b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go
    @@ -966,7 +966,7 @@ const (
     	//  var _ = string(x)
     	InvalidConversion
     
    -	// InvalidUntypedConversion occurs when an there is no valid implicit
    +	// InvalidUntypedConversion occurs when there is no valid implicit
     	// conversion from an untyped value satisfying the type constraints of the
     	// context in which it is used.
     	//
    diff --git a/vendor/golang.org/x/tools/internal/typesinternal/fx.go b/vendor/golang.org/x/tools/internal/typesinternal/fx.go
    new file mode 100644
    index 000000000..93acff217
    --- /dev/null
    +++ b/vendor/golang.org/x/tools/internal/typesinternal/fx.go
    @@ -0,0 +1,49 @@
    +// Copyright 2025 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +package typesinternal
    +
    +import (
    +	"go/ast"
    +	"go/token"
    +	"go/types"
    +)
    +
    +// NoEffects reports whether the expression has no side effects, i.e., it
    +// does not modify the memory state. This function is conservative: it may
    +// return false even when the expression has no effect.
    +func NoEffects(info *types.Info, expr ast.Expr) bool {
    +	noEffects := true
    +	ast.Inspect(expr, func(n ast.Node) bool {
    +		switch v := n.(type) {
    +		case nil, *ast.Ident, *ast.BasicLit, *ast.BinaryExpr, *ast.ParenExpr,
    +			*ast.SelectorExpr, *ast.IndexExpr, *ast.SliceExpr, *ast.TypeAssertExpr,
    +			*ast.StarExpr, *ast.CompositeLit, *ast.ArrayType, *ast.StructType,
    +			*ast.MapType, *ast.InterfaceType, *ast.KeyValueExpr:
    +			// No effect
    +		case *ast.UnaryExpr:
    +			// Channel send <-ch has effects
    +			if v.Op == token.ARROW {
    +				noEffects = false
    +			}
    +		case *ast.CallExpr:
    +			// Type conversion has no effects
    +			if !info.Types[v.Fun].IsType() {
    +				// TODO(adonovan): Add a case for built-in functions without side
    +				// effects (by using callsPureBuiltin from tools/internal/refactor/inline)
    +
    +				noEffects = false
    +			}
    +		case *ast.FuncLit:
    +			// A FuncLit has no effects, but do not descend into it.
    +			return false
    +		default:
    +			// All other expressions have effects
    +			noEffects = false
    +		}
    +
    +		return noEffects
    +	})
    +	return noEffects
    +}
    diff --git a/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go b/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go
    new file mode 100644
    index 000000000..f2affec4f
    --- /dev/null
    +++ b/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go
    @@ -0,0 +1,71 @@
    +// Copyright 2025 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +package typesinternal
    +
    +import (
    +	"go/types"
    +	"slices"
    +)
    +
    +// IsTypeNamed reports whether t is (or is an alias for) a
    +// package-level defined type with the given package path and one of
    +// the given names. It returns false if t is nil.
    +//
    +// This function avoids allocating the concatenation of "pkg.Name",
    +// which is important for the performance of syntax matching.
    +func IsTypeNamed(t types.Type, pkgPath string, names ...string) bool {
    +	if named, ok := types.Unalias(t).(*types.Named); ok {
    +		tname := named.Obj()
    +		return tname != nil &&
    +			IsPackageLevel(tname) &&
    +			tname.Pkg().Path() == pkgPath &&
    +			slices.Contains(names, tname.Name())
    +	}
    +	return false
    +}
    +
    +// IsPointerToNamed reports whether t is (or is an alias for) a pointer to a
    +// package-level defined type with the given package path and one of the given
    +// names. It returns false if t is not a pointer type.
    +func IsPointerToNamed(t types.Type, pkgPath string, names ...string) bool {
    +	r := Unpointer(t)
    +	if r == t {
    +		return false
    +	}
    +	return IsTypeNamed(r, pkgPath, names...)
    +}
    +
    +// IsFunctionNamed reports whether obj is a package-level function
    +// defined in the given package and has one of the given names.
    +// It returns false if obj is nil.
    +//
    +// This function avoids allocating the concatenation of "pkg.Name",
    +// which is important for the performance of syntax matching.
    +func IsFunctionNamed(obj types.Object, pkgPath string, names ...string) bool {
    +	f, ok := obj.(*types.Func)
    +	return ok &&
    +		IsPackageLevel(obj) &&
    +		f.Pkg().Path() == pkgPath &&
    +		f.Type().(*types.Signature).Recv() == nil &&
    +		slices.Contains(names, f.Name())
    +}
    +
    +// IsMethodNamed reports whether obj is a method defined on a
    +// package-level type with the given package and type name, and has
    +// one of the given names. It returns false if obj is nil.
    +//
    +// This function avoids allocating the concatenation of "pkg.TypeName.Name",
    +// which is important for the performance of syntax matching.
    +func IsMethodNamed(obj types.Object, pkgPath string, typeName string, names ...string) bool {
    +	if fn, ok := obj.(*types.Func); ok {
    +		if recv := fn.Type().(*types.Signature).Recv(); recv != nil {
    +			_, T := ReceiverNamed(recv)
    +			return T != nil &&
    +				IsTypeNamed(T, pkgPath, typeName) &&
    +				slices.Contains(names, fn.Name())
    +		}
    +	}
    +	return false
    +}
    diff --git a/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go b/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go
    new file mode 100644
    index 000000000..64f47919f
    --- /dev/null
    +++ b/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go
    @@ -0,0 +1,54 @@
    +// Copyright 2024 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +package typesinternal
    +
    +import (
    +	"go/ast"
    +	"go/types"
    +	"strconv"
    +)
    +
    +// FileQualifier returns a [types.Qualifier] function that qualifies
    +// imported symbols appropriately based on the import environment of a given
    +// file.
    +// If the same package is imported multiple times, the last appearance is
    +// recorded.
    +//
    +// TODO(adonovan): this function ignores the effect of shadowing. It
    +// should accept a [token.Pos] and a [types.Info] and compute only the
    +// set of imports that are not shadowed at that point, analogous to
    +// [analysisinternal.AddImport]. It could also compute (as a side
    +// effect) the set of additional imports required to ensure that there
    +// is an accessible import for each necessary package, making it
    +// converge even more closely with AddImport.
    +func FileQualifier(f *ast.File, pkg *types.Package) types.Qualifier {
    +	// Construct mapping of import paths to their defined names.
    +	// It is only necessary to look at renaming imports.
    +	imports := make(map[string]string)
    +	for _, imp := range f.Imports {
    +		if imp.Name != nil && imp.Name.Name != "_" {
    +			path, _ := strconv.Unquote(imp.Path.Value)
    +			imports[path] = imp.Name.Name
    +		}
    +	}
    +
    +	// Define qualifier to replace full package paths with names of the imports.
    +	return func(p *types.Package) string {
    +		if p == nil || p == pkg {
    +			return ""
    +		}
    +
    +		if name, ok := imports[p.Path()]; ok {
    +			if name == "." {
    +				return ""
    +			} else {
    +				return name
    +			}
    +		}
    +
    +		// If there is no local renaming, fall back to the package name.
    +		return p.Name()
    +	}
    +}
    diff --git a/vendor/golang.org/x/tools/internal/typesinternal/recv.go b/vendor/golang.org/x/tools/internal/typesinternal/recv.go
    index ba6f4f4eb..8352ea761 100644
    --- a/vendor/golang.org/x/tools/internal/typesinternal/recv.go
    +++ b/vendor/golang.org/x/tools/internal/typesinternal/recv.go
    @@ -11,6 +11,9 @@ import (
     // ReceiverNamed returns the named type (if any) associated with the
     // type of recv, which may be of the form N or *N, or aliases thereof.
     // It also reports whether a Pointer was present.
    +//
    +// The named result may be nil if recv is from a method on an
    +// anonymous interface or struct types or in ill-typed code.
     func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) {
     	t := recv.Type()
     	if ptr, ok := types.Unalias(t).(*types.Pointer); ok {
    diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go
    index df3ea5212..fef74a785 100644
    --- a/vendor/golang.org/x/tools/internal/typesinternal/types.go
    +++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go
    @@ -2,16 +2,30 @@
     // Use of this source code is governed by a BSD-style
     // license that can be found in the LICENSE file.
     
    -// Package typesinternal provides access to internal go/types APIs that are not
    -// yet exported.
    +// Package typesinternal provides helpful operators for dealing with
    +// go/types:
    +//
    +//   - operators for querying typed syntax trees (e.g. [Imports], [IsFunctionNamed]);
    +//   - functions for converting types to strings or syntax (e.g. [TypeExpr], FileQualifier]);
    +//   - helpers for working with the [go/types] API (e.g. [NewTypesInfo]);
    +//   - access to internal go/types APIs that are not yet
    +//     exported (e.g. [SetUsesCgo], [ErrorCodeStartEnd], [VarKind]); and
    +//   - common algorithms related to types (e.g. [TooNewStdSymbols]).
    +//
    +// See also:
    +//   - [golang.org/x/tools/internal/astutil], for operations on untyped syntax;
    +//   - [golang.org/x/tools/internal/analysisinernal], for helpers for analyzers;
    +//   - [golang.org/x/tools/internal/refactor], for operators to compute text edits.
     package typesinternal
     
     import (
    +	"go/ast"
     	"go/token"
     	"go/types"
     	"reflect"
     	"unsafe"
     
    +	"golang.org/x/tools/go/ast/inspector"
     	"golang.org/x/tools/internal/aliases"
     )
     
    @@ -32,12 +46,14 @@ func SetUsesCgo(conf *types.Config) bool {
     	return true
     }
     
    -// ReadGo116ErrorData extracts additional information from types.Error values
    +// ErrorCodeStartEnd extracts additional information from types.Error values
     // generated by Go version 1.16 and later: the error code, start position, and
     // end position. If all positions are valid, start <= err.Pos <= end.
     //
     // If the data could not be read, the final result parameter will be false.
    -func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos, ok bool) {
    +//
    +// TODO(adonovan): eliminate start/end when proposal #71803 is accepted.
    +func ErrorCodeStartEnd(err types.Error) (code ErrorCode, start, end token.Pos, ok bool) {
     	var data [3]int
     	// By coincidence all of these fields are ints, which simplifies things.
     	v := reflect.ValueOf(err)
    @@ -57,6 +73,9 @@ func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos,
     // which is often excessive.)
     //
     // If pkg is nil, it is equivalent to [*types.Package.Name].
    +//
    +// TODO(adonovan): all uses of this with TypeString should be
    +// eliminated when https://go.dev/issues/75604 is resolved.
     func NameRelativeTo(pkg *types.Package) types.Qualifier {
     	return func(other *types.Package) string {
     		if pkg != nil && pkg == other {
    @@ -66,6 +85,34 @@ func NameRelativeTo(pkg *types.Package) types.Qualifier {
     	}
     }
     
    +// TypeNameFor returns the type name symbol for the specified type, if
    +// it is a [*types.Alias], [*types.Named], [*types.TypeParam], or a
    +// [*types.Basic] representing a type.
    +//
    +// For all other types, and for Basic types representing a builtin,
    +// constant, or nil, it returns nil. Be careful not to convert the
    +// resulting nil pointer to a [types.Object]!
    +//
    +// If t is the type of a constant, it may be an "untyped" type, which
    +// has no TypeName. To access the name of such types (e.g. "untyped
    +// int"), use [types.Basic.Name].
    +func TypeNameFor(t types.Type) *types.TypeName {
    +	switch t := t.(type) {
    +	case *types.Alias:
    +		return t.Obj()
    +	case *types.Named:
    +		return t.Obj()
    +	case *types.TypeParam:
    +		return t.Obj()
    +	case *types.Basic:
    +		// See issues #71886 and #66890 for some history.
    +		if tname, ok := types.Universe.Lookup(t.Name()).(*types.TypeName); ok {
    +			return tname
    +		}
    +	}
    +	return nil
    +}
    +
     // A NamedOrAlias is a [types.Type] that is named (as
     // defined by the spec) and capable of bearing type parameters: it
     // abstracts aliases ([types.Alias]) and defined types
    @@ -74,7 +121,7 @@ func NameRelativeTo(pkg *types.Package) types.Qualifier {
     // Every type declared by an explicit "type" declaration is a
     // NamedOrAlias. (Built-in type symbols may additionally
     // have type [types.Basic], which is not a NamedOrAlias,
    -// though the spec regards them as "named".)
    +// though the spec regards them as "named"; see [TypeNameFor].)
     //
     // NamedOrAlias cannot expose the Origin method, because
     // [types.Alias.Origin] and [types.Named.Origin] have different
    @@ -82,31 +129,15 @@ func NameRelativeTo(pkg *types.Package) types.Qualifier {
     type NamedOrAlias interface {
     	types.Type
     	Obj() *types.TypeName
    +	TypeArgs() *types.TypeList
    +	TypeParams() *types.TypeParamList
    +	SetTypeParams(tparams []*types.TypeParam)
     }
     
    -// TypeParams is a light shim around t.TypeParams().
    -// (go/types.Alias).TypeParams requires >= 1.23.
    -func TypeParams(t NamedOrAlias) *types.TypeParamList {
    -	switch t := t.(type) {
    -	case *types.Alias:
    -		return aliases.TypeParams(t)
    -	case *types.Named:
    -		return t.TypeParams()
    -	}
    -	return nil
    -}
    -
    -// TypeArgs is a light shim around t.TypeArgs().
    -// (go/types.Alias).TypeArgs requires >= 1.23.
    -func TypeArgs(t NamedOrAlias) *types.TypeList {
    -	switch t := t.(type) {
    -	case *types.Alias:
    -		return aliases.TypeArgs(t)
    -	case *types.Named:
    -		return t.TypeArgs()
    -	}
    -	return nil
    -}
    +var (
    +	_ NamedOrAlias = (*types.Alias)(nil)
    +	_ NamedOrAlias = (*types.Named)(nil)
    +)
     
     // Origin returns the generic type of the Named or Alias type t if it
     // is instantiated, otherwise it returns t.
    @@ -119,3 +150,50 @@ func Origin(t NamedOrAlias) NamedOrAlias {
     	}
     	return t
     }
    +
    +// IsPackageLevel reports whether obj is a package-level symbol.
    +func IsPackageLevel(obj types.Object) bool {
    +	return obj.Pkg() != nil && obj.Parent() == obj.Pkg().Scope()
    +}
    +
    +// NewTypesInfo returns a *types.Info with all maps populated.
    +func NewTypesInfo() *types.Info {
    +	return &types.Info{
    +		Types:        map[ast.Expr]types.TypeAndValue{},
    +		Instances:    map[*ast.Ident]types.Instance{},
    +		Defs:         map[*ast.Ident]types.Object{},
    +		Uses:         map[*ast.Ident]types.Object{},
    +		Implicits:    map[ast.Node]types.Object{},
    +		Selections:   map[*ast.SelectorExpr]*types.Selection{},
    +		Scopes:       map[ast.Node]*types.Scope{},
    +		FileVersions: map[*ast.File]string{},
    +	}
    +}
    +
    +// EnclosingScope returns the innermost block logically enclosing the cursor.
    +func EnclosingScope(info *types.Info, cur inspector.Cursor) *types.Scope {
    +	for cur := range cur.Enclosing() {
    +		n := cur.Node()
    +		// A function's Scope is associated with its FuncType.
    +		switch f := n.(type) {
    +		case *ast.FuncDecl:
    +			n = f.Type
    +		case *ast.FuncLit:
    +			n = f.Type
    +		}
    +		if b := info.Scopes[n]; b != nil {
    +			return b
    +		}
    +	}
    +	panic("no Scope for *ast.File")
    +}
    +
    +// Imports reports whether path is imported by pkg.
    +func Imports(pkg *types.Package, path string) bool {
    +	for _, imp := range pkg.Imports() {
    +		if imp.Path() == path {
    +			return true
    +		}
    +	}
    +	return false
    +}
    diff --git a/vendor/golang.org/x/tools/internal/typesinternal/varkind.go b/vendor/golang.org/x/tools/internal/typesinternal/varkind.go
    new file mode 100644
    index 000000000..e5da04951
    --- /dev/null
    +++ b/vendor/golang.org/x/tools/internal/typesinternal/varkind.go
    @@ -0,0 +1,40 @@
    +// Copyright 2024 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +package typesinternal
    +
    +// TODO(adonovan): when CL 645115 lands, define the go1.25 version of
    +// this API that actually does something.
    +
    +import "go/types"
    +
    +type VarKind uint8
    +
    +const (
    +	_          VarKind = iota // (not meaningful)
    +	PackageVar                // a package-level variable
    +	LocalVar                  // a local variable
    +	RecvVar                   // a method receiver variable
    +	ParamVar                  // a function parameter variable
    +	ResultVar                 // a function result variable
    +	FieldVar                  // a struct field
    +)
    +
    +func (kind VarKind) String() string {
    +	return [...]string{
    +		0:          "VarKind(0)",
    +		PackageVar: "PackageVar",
    +		LocalVar:   "LocalVar",
    +		RecvVar:    "RecvVar",
    +		ParamVar:   "ParamVar",
    +		ResultVar:  "ResultVar",
    +		FieldVar:   "FieldVar",
    +	}[kind]
    +}
    +
    +// GetVarKind returns an invalid VarKind.
    +func GetVarKind(v *types.Var) VarKind { return 0 }
    +
    +// SetVarKind has no effect.
    +func SetVarKind(v *types.Var, kind VarKind) {}
    diff --git a/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go b/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go
    index 106698064..453bba2ad 100644
    --- a/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go
    +++ b/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go
    @@ -9,62 +9,97 @@ import (
     	"go/ast"
     	"go/token"
     	"go/types"
    -	"strconv"
     	"strings"
     )
     
    -// ZeroString returns the string representation of the "zero" value of the type t.
    +// ZeroString returns the string representation of the zero value for any type t.
    +// The boolean result indicates whether the type is or contains an invalid type
    +// or a non-basic (constraint) interface type.
    +//
    +// Even for invalid input types, ZeroString may return a partially correct
    +// string representation. The caller should use the returned isValid boolean
    +// to determine the validity of the expression.
    +//
    +// When assigning to a wider type (such as 'any'), it's the caller's
    +// responsibility to handle any necessary type conversions.
    +//
     // This string can be used on the right-hand side of an assignment where the
     // left-hand side has that explicit type.
    +// References to named types are qualified by an appropriate (optional)
    +// qualifier function.
     // Exception: This does not apply to tuples. Their string representation is
     // informational only and cannot be used in an assignment.
    -// When assigning to a wider type (such as 'any'), it's the caller's
    -// responsibility to handle any necessary type conversions.
    +//
     // See [ZeroExpr] for a variant that returns an [ast.Expr].
    -func ZeroString(t types.Type, qf types.Qualifier) string {
    +func ZeroString(t types.Type, qual types.Qualifier) (_ string, isValid bool) {
     	switch t := t.(type) {
     	case *types.Basic:
     		switch {
     		case t.Info()&types.IsBoolean != 0:
    -			return "false"
    +			return "false", true
     		case t.Info()&types.IsNumeric != 0:
    -			return "0"
    +			return "0", true
     		case t.Info()&types.IsString != 0:
    -			return `""`
    +			return `""`, true
     		case t.Kind() == types.UnsafePointer:
     			fallthrough
     		case t.Kind() == types.UntypedNil:
    -			return "nil"
    +			return "nil", true
    +		case t.Kind() == types.Invalid:
    +			return "invalid", false
     		default:
    -			panic(fmt.Sprint("ZeroString for unexpected type:", t))
    +			panic(fmt.Sprintf("ZeroString for unexpected type %v", t))
     		}
     
    -	case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature:
    -		return "nil"
    +	case *types.Pointer, *types.Slice, *types.Chan, *types.Map, *types.Signature:
    +		return "nil", true
    +
    +	case *types.Interface:
    +		if !t.IsMethodSet() {
    +			return "invalid", false
    +		}
    +		return "nil", true
     
    -	case *types.Named, *types.Alias:
    +	case *types.Named:
     		switch under := t.Underlying().(type) {
     		case *types.Struct, *types.Array:
    -			return types.TypeString(t, qf) + "{}"
    +			return types.TypeString(t, qual) + "{}", true
     		default:
    -			return ZeroString(under, qf)
    +			return ZeroString(under, qual)
    +		}
    +
    +	case *types.Alias:
    +		switch t.Underlying().(type) {
    +		case *types.Struct, *types.Array:
    +			return types.TypeString(t, qual) + "{}", true
    +		default:
    +			// A type parameter can have alias but alias type's underlying type
    +			// can never be a type parameter.
    +			// Use types.Unalias to preserve the info of type parameter instead
    +			// of call Underlying() going right through and get the underlying
    +			// type of the type parameter which is always an interface.
    +			return ZeroString(types.Unalias(t), qual)
     		}
     
     	case *types.Array, *types.Struct:
    -		return types.TypeString(t, qf) + "{}"
    +		return types.TypeString(t, qual) + "{}", true
     
     	case *types.TypeParam:
     		// Assumes func new is not shadowed.
    -		return "*new(" + types.TypeString(t, qf) + ")"
    +		return "*new(" + types.TypeString(t, qual) + ")", true
     
     	case *types.Tuple:
     		// Tuples are not normal values.
     		// We are currently format as "(t[0], ..., t[n])". Could be something else.
    +		isValid := true
     		components := make([]string, t.Len())
     		for i := 0; i < t.Len(); i++ {
    -			components[i] = ZeroString(t.At(i).Type(), qf)
    +			comp, ok := ZeroString(t.At(i).Type(), qual)
    +
    +			components[i] = comp
    +			isValid = isValid && ok
     		}
    -		return "(" + strings.Join(components, ", ") + ")"
    +		return "(" + strings.Join(components, ", ") + ")", isValid
     
     	case *types.Union:
     		// Variables of these types cannot be created, so it makes
    @@ -76,45 +111,72 @@ func ZeroString(t types.Type, qf types.Qualifier) string {
     	}
     }
     
    -// ZeroExpr returns the ast.Expr representation of the "zero" value of the type t.
    -// ZeroExpr is defined for types that are suitable for variables.
    -// It may panic for other types such as Tuple or Union.
    +// ZeroExpr returns the ast.Expr representation of the zero value for any type t.
    +// The boolean result indicates whether the type is or contains an invalid type
    +// or a non-basic (constraint) interface type.
    +//
    +// Even for invalid input types, ZeroExpr may return a partially correct ast.Expr
    +// representation. The caller should use the returned isValid boolean to determine
    +// the validity of the expression.
    +//
    +// This function is designed for types suitable for variables and should not be
    +// used with Tuple or Union types.References to named types are qualified by an
    +// appropriate (optional) qualifier function.
    +//
     // See [ZeroString] for a variant that returns a string.
    -func ZeroExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
    -	switch t := typ.(type) {
    +func ZeroExpr(t types.Type, qual types.Qualifier) (_ ast.Expr, isValid bool) {
    +	switch t := t.(type) {
     	case *types.Basic:
     		switch {
     		case t.Info()&types.IsBoolean != 0:
    -			return &ast.Ident{Name: "false"}
    +			return &ast.Ident{Name: "false"}, true
     		case t.Info()&types.IsNumeric != 0:
    -			return &ast.BasicLit{Kind: token.INT, Value: "0"}
    +			return &ast.BasicLit{Kind: token.INT, Value: "0"}, true
     		case t.Info()&types.IsString != 0:
    -			return &ast.BasicLit{Kind: token.STRING, Value: `""`}
    +			return &ast.BasicLit{Kind: token.STRING, Value: `""`}, true
     		case t.Kind() == types.UnsafePointer:
     			fallthrough
     		case t.Kind() == types.UntypedNil:
    -			return ast.NewIdent("nil")
    +			return ast.NewIdent("nil"), true
    +		case t.Kind() == types.Invalid:
    +			return &ast.BasicLit{Kind: token.STRING, Value: `"invalid"`}, false
     		default:
    -			panic(fmt.Sprint("ZeroExpr for unexpected type:", t))
    +			panic(fmt.Sprintf("ZeroExpr for unexpected type %v", t))
     		}
     
    -	case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature:
    -		return ast.NewIdent("nil")
    +	case *types.Pointer, *types.Slice, *types.Chan, *types.Map, *types.Signature:
    +		return ast.NewIdent("nil"), true
     
    -	case *types.Named, *types.Alias:
    +	case *types.Interface:
    +		if !t.IsMethodSet() {
    +			return &ast.BasicLit{Kind: token.STRING, Value: `"invalid"`}, false
    +		}
    +		return ast.NewIdent("nil"), true
    +
    +	case *types.Named:
     		switch under := t.Underlying().(type) {
     		case *types.Struct, *types.Array:
     			return &ast.CompositeLit{
    -				Type: TypeExpr(f, pkg, typ),
    -			}
    +				Type: TypeExpr(t, qual),
    +			}, true
    +		default:
    +			return ZeroExpr(under, qual)
    +		}
    +
    +	case *types.Alias:
    +		switch t.Underlying().(type) {
    +		case *types.Struct, *types.Array:
    +			return &ast.CompositeLit{
    +				Type: TypeExpr(t, qual),
    +			}, true
     		default:
    -			return ZeroExpr(f, pkg, under)
    +			return ZeroExpr(types.Unalias(t), qual)
     		}
     
     	case *types.Array, *types.Struct:
     		return &ast.CompositeLit{
    -			Type: TypeExpr(f, pkg, typ),
    -		}
    +			Type: TypeExpr(t, qual),
    +		}, true
     
     	case *types.TypeParam:
     		return &ast.StarExpr{ // *new(T)
    @@ -125,7 +187,7 @@ func ZeroExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
     					ast.NewIdent(t.Obj().Name()),
     				},
     			},
    -		}
    +		}, true
     
     	case *types.Tuple:
     		// Unlike ZeroString, there is no ast.Expr can express tuple by
    @@ -142,31 +204,18 @@ func ZeroExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
     	}
     }
     
    -// IsZeroExpr uses simple syntactic heuristics to report whether expr
    -// is a obvious zero value, such as 0, "", nil, or false.
    -// It cannot do better without type information.
    -func IsZeroExpr(expr ast.Expr) bool {
    -	switch e := expr.(type) {
    -	case *ast.BasicLit:
    -		return e.Value == "0" || e.Value == `""`
    -	case *ast.Ident:
    -		return e.Name == "nil" || e.Name == "false"
    -	default:
    -		return false
    -	}
    -}
    -
     // TypeExpr returns syntax for the specified type. References to named types
    -// from packages other than pkg are qualified by an appropriate package name, as
    -// defined by the import environment of file.
    +// are qualified by an appropriate (optional) qualifier function.
     // It may panic for types such as Tuple or Union.
    -func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
    -	switch t := typ.(type) {
    +//
    +// See also https://go.dev/issues/75604, which will provide a robust
    +// Type-to-valid-Go-syntax formatter.
    +func TypeExpr(t types.Type, qual types.Qualifier) ast.Expr {
    +	switch t := t.(type) {
     	case *types.Basic:
     		switch t.Kind() {
     		case types.UnsafePointer:
    -			// TODO(hxjiang): replace the implementation with types.Qualifier.
    -			return &ast.SelectorExpr{X: ast.NewIdent("unsafe"), Sel: ast.NewIdent("Pointer")}
    +			return &ast.SelectorExpr{X: ast.NewIdent(qual(types.NewPackage("unsafe", "unsafe"))), Sel: ast.NewIdent("Pointer")}
     		default:
     			return ast.NewIdent(t.Name())
     		}
    @@ -174,7 +223,7 @@ func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
     	case *types.Pointer:
     		return &ast.UnaryExpr{
     			Op: token.MUL,
    -			X:  TypeExpr(f, pkg, t.Elem()),
    +			X:  TypeExpr(t.Elem(), qual),
     		}
     
     	case *types.Array:
    @@ -183,18 +232,18 @@ func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
     				Kind:  token.INT,
     				Value: fmt.Sprintf("%d", t.Len()),
     			},
    -			Elt: TypeExpr(f, pkg, t.Elem()),
    +			Elt: TypeExpr(t.Elem(), qual),
     		}
     
     	case *types.Slice:
     		return &ast.ArrayType{
    -			Elt: TypeExpr(f, pkg, t.Elem()),
    +			Elt: TypeExpr(t.Elem(), qual),
     		}
     
     	case *types.Map:
     		return &ast.MapType{
    -			Key:   TypeExpr(f, pkg, t.Key()),
    -			Value: TypeExpr(f, pkg, t.Elem()),
    +			Key:   TypeExpr(t.Key(), qual),
    +			Value: TypeExpr(t.Elem(), qual),
     		}
     
     	case *types.Chan:
    @@ -204,14 +253,14 @@ func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
     		}
     		return &ast.ChanType{
     			Dir:   dir,
    -			Value: TypeExpr(f, pkg, t.Elem()),
    +			Value: TypeExpr(t.Elem(), qual),
     		}
     
     	case *types.Signature:
     		var params []*ast.Field
     		for i := 0; i < t.Params().Len(); i++ {
     			params = append(params, &ast.Field{
    -				Type: TypeExpr(f, pkg, t.Params().At(i).Type()),
    +				Type: TypeExpr(t.Params().At(i).Type(), qual),
     				Names: []*ast.Ident{
     					{
     						Name: t.Params().At(i).Name(),
    @@ -226,7 +275,7 @@ func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
     		var returns []*ast.Field
     		for i := 0; i < t.Results().Len(); i++ {
     			returns = append(returns, &ast.Field{
    -				Type: TypeExpr(f, pkg, t.Results().At(i).Type()),
    +				Type: TypeExpr(t.Results().At(i).Type(), qual),
     			})
     		}
     		return &ast.FuncType{
    @@ -238,23 +287,9 @@ func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
     			},
     		}
     
    -	case interface{ Obj() *types.TypeName }: // *types.{Alias,Named,TypeParam}
    -		switch t.Obj().Pkg() {
    -		case pkg, nil:
    -			return ast.NewIdent(t.Obj().Name())
    -		}
    -		pkgName := t.Obj().Pkg().Name()
    -
    -		// TODO(hxjiang): replace the implementation with types.Qualifier.
    -		// If the file already imports the package under another name, use that.
    -		for _, cand := range f.Imports {
    -			if path, _ := strconv.Unquote(cand.Path.Value); path == t.Obj().Pkg().Path() {
    -				if cand.Name != nil && cand.Name.Name != "" {
    -					pkgName = cand.Name.Name
    -				}
    -			}
    -		}
    -		if pkgName == "." {
    +	case *types.TypeParam:
    +		pkgName := qual(t.Obj().Pkg())
    +		if pkgName == "" || t.Obj().Pkg() == nil {
     			return ast.NewIdent(t.Obj().Name())
     		}
     		return &ast.SelectorExpr{
    @@ -262,6 +297,36 @@ func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
     			Sel: ast.NewIdent(t.Obj().Name()),
     		}
     
    +	// types.TypeParam also implements interface NamedOrAlias. To differentiate,
    +	// case TypeParam need to be present before case NamedOrAlias.
    +	// TODO(hxjiang): remove this comment once TypeArgs() is added to interface
    +	// NamedOrAlias.
    +	case NamedOrAlias:
    +		var expr ast.Expr = ast.NewIdent(t.Obj().Name())
    +		if pkgName := qual(t.Obj().Pkg()); pkgName != "." && pkgName != "" {
    +			expr = &ast.SelectorExpr{
    +				X:   ast.NewIdent(pkgName),
    +				Sel: expr.(*ast.Ident),
    +			}
    +		}
    +
    +		// TODO(hxjiang): call t.TypeArgs after adding method TypeArgs() to
    +		// typesinternal.NamedOrAlias.
    +		if hasTypeArgs, ok := t.(interface{ TypeArgs() *types.TypeList }); ok {
    +			if typeArgs := hasTypeArgs.TypeArgs(); typeArgs != nil && typeArgs.Len() > 0 {
    +				var indices []ast.Expr
    +				for i := range typeArgs.Len() {
    +					indices = append(indices, TypeExpr(typeArgs.At(i), qual))
    +				}
    +				expr = &ast.IndexListExpr{
    +					X:       expr,
    +					Indices: indices,
    +				}
    +			}
    +		}
    +
    +		return expr
    +
     	case *types.Struct:
     		return ast.NewIdent(t.String())
     
    @@ -269,9 +334,43 @@ func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
     		return ast.NewIdent(t.String())
     
     	case *types.Union:
    -		// TODO(hxjiang): handle the union through syntax (~A | ... | ~Z).
    -		// Remove nil check when calling typesinternal.TypeExpr.
    -		return nil
    +		if t.Len() == 0 {
    +			panic("Union type should have at least one term")
    +		}
    +		// Same as go/ast, the return expression will put last term in the
    +		// Y field at topmost level of BinaryExpr.
    +		// For union of type "float32 | float64 | int64", the structure looks
    +		// similar to:
    +		// {
    +		// 	X: {
    +		// 		X: float32,
    +		// 		Op: |
    +		// 		Y: float64,
    +		// 	}
    +		// 	Op: |,
    +		// 	Y: int64,
    +		// }
    +		var union ast.Expr
    +		for i := range t.Len() {
    +			term := t.Term(i)
    +			termExpr := TypeExpr(term.Type(), qual)
    +			if term.Tilde() {
    +				termExpr = &ast.UnaryExpr{
    +					Op: token.TILDE,
    +					X:  termExpr,
    +				}
    +			}
    +			if i == 0 {
    +				union = termExpr
    +			} else {
    +				union = &ast.BinaryExpr{
    +					X:  union,
    +					Op: token.OR,
    +					Y:  termExpr,
    +				}
    +			}
    +		}
    +		return union
     
     	case *types.Tuple:
     		panic("invalid input type types.Tuple")
    diff --git a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/checked.pb.go b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/checked.pb.go
    index 9f81dbcd8..af9c44d93 100644
    --- a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/checked.pb.go
    +++ b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/checked.pb.go
    @@ -1,4 +1,4 @@
    -// Copyright 2024 Google LLC
    +// Copyright 2025 Google LLC
     //
     // Licensed under the Apache License, Version 2.0 (the "License");
     // you may not use this file except in compliance with the License.
    diff --git a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/eval.pb.go b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/eval.pb.go
    index 0a2ffb595..4b4f15477 100644
    --- a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/eval.pb.go
    +++ b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/eval.pb.go
    @@ -1,4 +1,4 @@
    -// Copyright 2024 Google LLC
    +// Copyright 2025 Google LLC
     //
     // Licensed under the Apache License, Version 2.0 (the "License");
     // you may not use this file except in compliance with the License.
    diff --git a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/explain.pb.go b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/explain.pb.go
    index 57aaa2c9f..ef27e878b 100644
    --- a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/explain.pb.go
    +++ b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/explain.pb.go
    @@ -1,4 +1,4 @@
    -// Copyright 2024 Google LLC
    +// Copyright 2025 Google LLC
     //
     // Licensed under the Apache License, Version 2.0 (the "License");
     // you may not use this file except in compliance with the License.
    diff --git a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go
    index c90c6015d..7b973217e 100644
    --- a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go
    +++ b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go
    @@ -1,4 +1,4 @@
    -// Copyright 2024 Google LLC
    +// Copyright 2025 Google LLC
     //
     // Licensed under the Apache License, Version 2.0 (the "License");
     // you may not use this file except in compliance with the License.
    diff --git a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/value.pb.go b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/value.pb.go
    index 0a5ca6a1b..4ba3c7b2a 100644
    --- a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/value.pb.go
    +++ b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/value.pb.go
    @@ -1,4 +1,4 @@
    -// Copyright 2024 Google LLC
    +// Copyright 2025 Google LLC
     //
     // Licensed under the Apache License, Version 2.0 (the "License");
     // you may not use this file except in compliance with the License.
    diff --git a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go
    index f388426b0..d083dde3e 100644
    --- a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go
    +++ b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go
    @@ -1,4 +1,4 @@
    -// Copyright 2024 Google LLC
    +// Copyright 2025 Google LLC
     //
     // Licensed under the Apache License, Version 2.0 (the "License");
     // you may not use this file except in compliance with the License.
    diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go
    index 3cd9a5bb8..e017ef071 100644
    --- a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go
    +++ b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go
    @@ -1,4 +1,4 @@
    -// Copyright 2024 Google LLC
    +// Copyright 2025 Google LLC
     //
     // Licensed under the Apache License, Version 2.0 (the "License");
     // you may not use this file except in compliance with the License.
    @@ -703,6 +703,65 @@ type QuotaFailure_Violation struct {
     	// For example: "Service disabled" or "Daily Limit for read operations
     	// exceeded".
     	Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
    +	// The API Service from which the `QuotaFailure.Violation` orginates. In
    +	// some cases, Quota issues originate from an API Service other than the one
    +	// that was called. In other words, a dependency of the called API Service
    +	// could be the cause of the `QuotaFailure`, and this field would have the
    +	// dependency API service name.
    +	//
    +	// For example, if the called API is Kubernetes Engine API
    +	// (container.googleapis.com), and a quota violation occurs in the
    +	// Kubernetes Engine API itself, this field would be
    +	// "container.googleapis.com". On the other hand, if the quota violation
    +	// occurs when the Kubernetes Engine API creates VMs in the Compute Engine
    +	// API (compute.googleapis.com), this field would be
    +	// "compute.googleapis.com".
    +	ApiService string `protobuf:"bytes,3,opt,name=api_service,json=apiService,proto3" json:"api_service,omitempty"`
    +	// The metric of the violated quota. A quota metric is a named counter to
    +	// measure usage, such as API requests or CPUs. When an activity occurs in a
    +	// service, such as Virtual Machine allocation, one or more quota metrics
    +	// may be affected.
    +	//
    +	// For example, "compute.googleapis.com/cpus_per_vm_family",
    +	// "storage.googleapis.com/internet_egress_bandwidth".
    +	QuotaMetric string `protobuf:"bytes,4,opt,name=quota_metric,json=quotaMetric,proto3" json:"quota_metric,omitempty"`
    +	// The id of the violated quota. Also know as "limit name", this is the
    +	// unique identifier of a quota in the context of an API service.
    +	//
    +	// For example, "CPUS-PER-VM-FAMILY-per-project-region".
    +	QuotaId string `protobuf:"bytes,5,opt,name=quota_id,json=quotaId,proto3" json:"quota_id,omitempty"`
    +	// The dimensions of the violated quota. Every non-global quota is enforced
    +	// on a set of dimensions. While quota metric defines what to count, the
    +	// dimensions specify for what aspects the counter should be increased.
    +	//
    +	// For example, the quota "CPUs per region per VM family" enforces a limit
    +	// on the metric "compute.googleapis.com/cpus_per_vm_family" on dimensions
    +	// "region" and "vm_family". And if the violation occurred in region
    +	// "us-central1" and for VM family "n1", the quota_dimensions would be,
    +	//
    +	//	{
    +	//	  "region": "us-central1",
    +	//	  "vm_family": "n1",
    +	//	}
    +	//
    +	// When a quota is enforced globally, the quota_dimensions would always be
    +	// empty.
    +	QuotaDimensions map[string]string `protobuf:"bytes,6,rep,name=quota_dimensions,json=quotaDimensions,proto3" json:"quota_dimensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
    +	// The enforced quota value at the time of the `QuotaFailure`.
    +	//
    +	// For example, if the enforced quota value at the time of the
    +	// `QuotaFailure` on the number of CPUs is "10", then the value of this
    +	// field would reflect this quantity.
    +	QuotaValue int64 `protobuf:"varint,7,opt,name=quota_value,json=quotaValue,proto3" json:"quota_value,omitempty"`
    +	// The new quota value being rolled out at the time of the violation. At the
    +	// completion of the rollout, this value will be enforced in place of
    +	// quota_value. If no rollout is in progress at the time of the violation,
    +	// this field is not set.
    +	//
    +	// For example, if at the time of the violation a rollout is in progress
    +	// changing the number of CPUs quota from 10 to 20, 20 would be the value of
    +	// this field.
    +	FutureQuotaValue *int64 `protobuf:"varint,8,opt,name=future_quota_value,json=futureQuotaValue,proto3,oneof" json:"future_quota_value,omitempty"`
     }
     
     func (x *QuotaFailure_Violation) Reset() {
    @@ -751,6 +810,48 @@ func (x *QuotaFailure_Violation) GetDescription() string {
     	return ""
     }
     
    +func (x *QuotaFailure_Violation) GetApiService() string {
    +	if x != nil {
    +		return x.ApiService
    +	}
    +	return ""
    +}
    +
    +func (x *QuotaFailure_Violation) GetQuotaMetric() string {
    +	if x != nil {
    +		return x.QuotaMetric
    +	}
    +	return ""
    +}
    +
    +func (x *QuotaFailure_Violation) GetQuotaId() string {
    +	if x != nil {
    +		return x.QuotaId
    +	}
    +	return ""
    +}
    +
    +func (x *QuotaFailure_Violation) GetQuotaDimensions() map[string]string {
    +	if x != nil {
    +		return x.QuotaDimensions
    +	}
    +	return nil
    +}
    +
    +func (x *QuotaFailure_Violation) GetQuotaValue() int64 {
    +	if x != nil {
    +		return x.QuotaValue
    +	}
    +	return 0
    +}
    +
    +func (x *QuotaFailure_Violation) GetFutureQuotaValue() int64 {
    +	if x != nil && x.FutureQuotaValue != nil {
    +		return *x.FutureQuotaValue
    +	}
    +	return 0
    +}
    +
     // A message type used to describe a single precondition failure.
     type PreconditionFailure_Violation struct {
     	state         protoimpl.MessageState
    @@ -775,7 +876,7 @@ type PreconditionFailure_Violation struct {
     func (x *PreconditionFailure_Violation) Reset() {
     	*x = PreconditionFailure_Violation{}
     	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_rpc_error_details_proto_msgTypes[12]
    +		mi := &file_google_rpc_error_details_proto_msgTypes[13]
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		ms.StoreMessageInfo(mi)
     	}
    @@ -788,7 +889,7 @@ func (x *PreconditionFailure_Violation) String() string {
     func (*PreconditionFailure_Violation) ProtoMessage() {}
     
     func (x *PreconditionFailure_Violation) ProtoReflect() protoreflect.Message {
    -	mi := &file_google_rpc_error_details_proto_msgTypes[12]
    +	mi := &file_google_rpc_error_details_proto_msgTypes[13]
     	if protoimpl.UnsafeEnabled && x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
    @@ -886,7 +987,7 @@ type BadRequest_FieldViolation struct {
     func (x *BadRequest_FieldViolation) Reset() {
     	*x = BadRequest_FieldViolation{}
     	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_rpc_error_details_proto_msgTypes[13]
    +		mi := &file_google_rpc_error_details_proto_msgTypes[14]
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		ms.StoreMessageInfo(mi)
     	}
    @@ -899,7 +1000,7 @@ func (x *BadRequest_FieldViolation) String() string {
     func (*BadRequest_FieldViolation) ProtoMessage() {}
     
     func (x *BadRequest_FieldViolation) ProtoReflect() protoreflect.Message {
    -	mi := &file_google_rpc_error_details_proto_msgTypes[13]
    +	mi := &file_google_rpc_error_details_proto_msgTypes[14]
     	if protoimpl.UnsafeEnabled && x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
    @@ -958,7 +1059,7 @@ type Help_Link struct {
     func (x *Help_Link) Reset() {
     	*x = Help_Link{}
     	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_rpc_error_details_proto_msgTypes[14]
    +		mi := &file_google_rpc_error_details_proto_msgTypes[15]
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		ms.StoreMessageInfo(mi)
     	}
    @@ -971,7 +1072,7 @@ func (x *Help_Link) String() string {
     func (*Help_Link) ProtoMessage() {}
     
     func (x *Help_Link) ProtoReflect() protoreflect.Message {
    -	mi := &file_google_rpc_error_details_proto_msgTypes[14]
    +	mi := &file_google_rpc_error_details_proto_msgTypes[15]
     	if protoimpl.UnsafeEnabled && x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
    @@ -1029,79 +1130,102 @@ var file_google_rpc_error_details_proto_rawDesc = []byte{
     	0x0a, 0x0d, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18,
     	0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x45, 0x6e, 0x74, 0x72,
     	0x69, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20,
    -	0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x22, 0x9b, 0x01, 0x0a, 0x0c,
    +	0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x22, 0x8e, 0x04, 0x0a, 0x0c,
     	0x51, 0x75, 0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x42, 0x0a, 0x0a,
     	0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
     	0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x51, 0x75,
     	0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61,
     	0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
    -	0x1a, 0x47, 0x0a, 0x09, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a,
    -	0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07,
    -	0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72,
    -	0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65,
    -	0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xbd, 0x01, 0x0a, 0x13, 0x50, 0x72,
    -	0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72,
    -	0x65, 0x12, 0x49, 0x0a, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
    -	0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72,
    -	0x70, 0x63, 0x2e, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46,
    -	0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e,
    -	0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x5b, 0x0a, 0x09,
    -	0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70,
    -	0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a,
    -	0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07,
    -	0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72,
    -	0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65,
    -	0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x8c, 0x02, 0x0a, 0x0a, 0x42, 0x61,
    -	0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x10, 0x66, 0x69, 0x65, 0x6c,
    -	0x64, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03,
    -	0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e,
    -	0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
    -	0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64,
    -	0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xab, 0x01, 0x0a, 0x0e, 0x46,
    -	0x69, 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a,
    -	0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69,
    -	0x65, 0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69,
    -	0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
    -	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18,
    -	0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x49, 0x0a,
    -	0x11, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61,
    -	0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
    -	0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d,
    -	0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65,
    -	0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x4f, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75,
    -	0x65, 0x73, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65,
    -	0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71,
    -	0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e,
    -	0x67, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65,
    -	0x72, 0x76, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a, 0x0c, 0x52, 0x65,
    -	0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65,
    -	0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
    -	0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12,
    -	0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
    -	0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
    -	0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20,
    -	0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65,
    -	0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
    -	0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6f, 0x0a, 0x04,
    -	0x48, 0x65, 0x6c, 0x70, 0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20,
    -	0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63,
    -	0x2e, 0x48, 0x65, 0x6c, 0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b,
    -	0x73, 0x1a, 0x3a, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73,
    -	0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
    -	0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75,
    -	0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x44, 0x0a,
    -	0x10, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67,
    -	0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
    -	0x09, 0x52, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73,
    -	0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73,
    -	0x61, 0x67, 0x65, 0x42, 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
    -	0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x65, 0x74, 0x61,
    -	0x69, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67,
    -	0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65,
    -	0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
    -	0x73, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73,
    -	0x3b, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02, 0x03, 0x52, 0x50,
    -	0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
    +	0x1a, 0xb9, 0x03, 0x0a, 0x09, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18,
    +	0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
    +	0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63,
    +	0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64,
    +	0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x70,
    +	0x69, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
    +	0x0a, 0x61, 0x70, 0x69, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x71,
    +	0x75, 0x6f, 0x74, 0x61, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28,
    +	0x09, 0x52, 0x0b, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x19,
    +	0x0a, 0x08, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09,
    +	0x52, 0x07, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x49, 0x64, 0x12, 0x62, 0x0a, 0x10, 0x71, 0x75, 0x6f,
    +	0x74, 0x61, 0x5f, 0x64, 0x69, 0x6d, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20,
    +	0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63,
    +	0x2e, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69,
    +	0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x44, 0x69, 0x6d,
    +	0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x71, 0x75,
    +	0x6f, 0x74, 0x61, 0x44, 0x69, 0x6d, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a,
    +	0x0b, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01,
    +	0x28, 0x03, 0x52, 0x0a, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x31,
    +	0x0a, 0x12, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x76,
    +	0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x10, 0x66, 0x75,
    +	0x74, 0x75, 0x72, 0x65, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x88, 0x01,
    +	0x01, 0x1a, 0x42, 0x0a, 0x14, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x44, 0x69, 0x6d, 0x65, 0x6e, 0x73,
    +	0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
    +	0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76,
    +	0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
    +	0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65,
    +	0x5f, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xbd, 0x01, 0x0a,
    +	0x13, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69,
    +	0x6c, 0x75, 0x72, 0x65, 0x12, 0x49, 0x0a, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f,
    +	0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
    +	0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69,
    +	0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74,
    +	0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a,
    +	0x5b, 0x0a, 0x09, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04,
    +	0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65,
    +	0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28,
    +	0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65,
    +	0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
    +	0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x8c, 0x02, 0x0a,
    +	0x0a, 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x10, 0x66,
    +	0x69, 0x65, 0x6c, 0x64, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
    +	0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72,
    +	0x70, 0x63, 0x2e, 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69,
    +	0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x66, 0x69,
    +	0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xab, 0x01,
    +	0x0a, 0x0e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e,
    +	0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
    +	0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
    +	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73,
    +	0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73,
    +	0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e,
    +	0x12, 0x49, 0x0a, 0x11, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x6d, 0x65,
    +	0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f,
    +	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a,
    +	0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c,
    +	0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x4f, 0x0a, 0x0b, 0x52,
    +	0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65,
    +	0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
    +	0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72,
    +	0x76, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
    +	0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a,
    +	0x0c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a,
    +	0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01,
    +	0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79,
    +	0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e,
    +	0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75,
    +	0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72,
    +	0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a,
    +	0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01,
    +	0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22,
    +	0x6f, 0x0a, 0x04, 0x48, 0x65, 0x6c, 0x70, 0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73,
    +	0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
    +	0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x6c, 0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c,
    +	0x69, 0x6e, 0x6b, 0x73, 0x1a, 0x3a, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b,
    +	0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28,
    +	0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10,
    +	0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c,
    +	0x22, 0x44, 0x0a, 0x10, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73,
    +	0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01,
    +	0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07,
    +	0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d,
    +	0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f,
    +	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44,
    +	0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67,
    +	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67,
    +	0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
    +	0x61, 0x70, 0x69, 0x73, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61,
    +	0x69, 0x6c, 0x73, 0x3b, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02,
    +	0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
     }
     
     var (
    @@ -1116,7 +1240,7 @@ func file_google_rpc_error_details_proto_rawDescGZIP() []byte {
     	return file_google_rpc_error_details_proto_rawDescData
     }
     
    -var file_google_rpc_error_details_proto_msgTypes = make([]protoimpl.MessageInfo, 15)
    +var file_google_rpc_error_details_proto_msgTypes = make([]protoimpl.MessageInfo, 16)
     var file_google_rpc_error_details_proto_goTypes = []interface{}{
     	(*ErrorInfo)(nil),                     // 0: google.rpc.ErrorInfo
     	(*RetryInfo)(nil),                     // 1: google.rpc.RetryInfo
    @@ -1130,24 +1254,26 @@ var file_google_rpc_error_details_proto_goTypes = []interface{}{
     	(*LocalizedMessage)(nil),              // 9: google.rpc.LocalizedMessage
     	nil,                                   // 10: google.rpc.ErrorInfo.MetadataEntry
     	(*QuotaFailure_Violation)(nil),        // 11: google.rpc.QuotaFailure.Violation
    -	(*PreconditionFailure_Violation)(nil), // 12: google.rpc.PreconditionFailure.Violation
    -	(*BadRequest_FieldViolation)(nil),     // 13: google.rpc.BadRequest.FieldViolation
    -	(*Help_Link)(nil),                     // 14: google.rpc.Help.Link
    -	(*durationpb.Duration)(nil),           // 15: google.protobuf.Duration
    +	nil,                                   // 12: google.rpc.QuotaFailure.Violation.QuotaDimensionsEntry
    +	(*PreconditionFailure_Violation)(nil), // 13: google.rpc.PreconditionFailure.Violation
    +	(*BadRequest_FieldViolation)(nil),     // 14: google.rpc.BadRequest.FieldViolation
    +	(*Help_Link)(nil),                     // 15: google.rpc.Help.Link
    +	(*durationpb.Duration)(nil),           // 16: google.protobuf.Duration
     }
     var file_google_rpc_error_details_proto_depIdxs = []int32{
     	10, // 0: google.rpc.ErrorInfo.metadata:type_name -> google.rpc.ErrorInfo.MetadataEntry
    -	15, // 1: google.rpc.RetryInfo.retry_delay:type_name -> google.protobuf.Duration
    +	16, // 1: google.rpc.RetryInfo.retry_delay:type_name -> google.protobuf.Duration
     	11, // 2: google.rpc.QuotaFailure.violations:type_name -> google.rpc.QuotaFailure.Violation
    -	12, // 3: google.rpc.PreconditionFailure.violations:type_name -> google.rpc.PreconditionFailure.Violation
    -	13, // 4: google.rpc.BadRequest.field_violations:type_name -> google.rpc.BadRequest.FieldViolation
    -	14, // 5: google.rpc.Help.links:type_name -> google.rpc.Help.Link
    -	9,  // 6: google.rpc.BadRequest.FieldViolation.localized_message:type_name -> google.rpc.LocalizedMessage
    -	7,  // [7:7] is the sub-list for method output_type
    -	7,  // [7:7] is the sub-list for method input_type
    -	7,  // [7:7] is the sub-list for extension type_name
    -	7,  // [7:7] is the sub-list for extension extendee
    -	0,  // [0:7] is the sub-list for field type_name
    +	13, // 3: google.rpc.PreconditionFailure.violations:type_name -> google.rpc.PreconditionFailure.Violation
    +	14, // 4: google.rpc.BadRequest.field_violations:type_name -> google.rpc.BadRequest.FieldViolation
    +	15, // 5: google.rpc.Help.links:type_name -> google.rpc.Help.Link
    +	12, // 6: google.rpc.QuotaFailure.Violation.quota_dimensions:type_name -> google.rpc.QuotaFailure.Violation.QuotaDimensionsEntry
    +	9,  // 7: google.rpc.BadRequest.FieldViolation.localized_message:type_name -> google.rpc.LocalizedMessage
    +	8,  // [8:8] is the sub-list for method output_type
    +	8,  // [8:8] is the sub-list for method input_type
    +	8,  // [8:8] is the sub-list for extension type_name
    +	8,  // [8:8] is the sub-list for extension extendee
    +	0,  // [0:8] is the sub-list for field type_name
     }
     
     func init() { file_google_rpc_error_details_proto_init() }
    @@ -1288,7 +1414,7 @@ func file_google_rpc_error_details_proto_init() {
     				return nil
     			}
     		}
    -		file_google_rpc_error_details_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
    +		file_google_rpc_error_details_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
     			switch v := v.(*PreconditionFailure_Violation); i {
     			case 0:
     				return &v.state
    @@ -1300,7 +1426,7 @@ func file_google_rpc_error_details_proto_init() {
     				return nil
     			}
     		}
    -		file_google_rpc_error_details_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
    +		file_google_rpc_error_details_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
     			switch v := v.(*BadRequest_FieldViolation); i {
     			case 0:
     				return &v.state
    @@ -1312,7 +1438,7 @@ func file_google_rpc_error_details_proto_init() {
     				return nil
     			}
     		}
    -		file_google_rpc_error_details_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
    +		file_google_rpc_error_details_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
     			switch v := v.(*Help_Link); i {
     			case 0:
     				return &v.state
    @@ -1325,13 +1451,14 @@ func file_google_rpc_error_details_proto_init() {
     			}
     		}
     	}
    +	file_google_rpc_error_details_proto_msgTypes[11].OneofWrappers = []interface{}{}
     	type x struct{}
     	out := protoimpl.TypeBuilder{
     		File: protoimpl.DescBuilder{
     			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
     			RawDescriptor: file_google_rpc_error_details_proto_rawDesc,
     			NumEnums:      0,
    -			NumMessages:   15,
    +			NumMessages:   16,
     			NumExtensions: 0,
     			NumServices:   0,
     		},
    diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
    index 6ad1b1c1d..06a3f7106 100644
    --- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
    +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
    @@ -1,4 +1,4 @@
    -// Copyright 2024 Google LLC
    +// Copyright 2025 Google LLC
     //
     // Licensed under the Apache License, Version 2.0 (the "License");
     // you may not use this file except in compliance with the License.
    diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md
    index d9bfa6e1e..2079de7b0 100644
    --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md
    +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md
    @@ -1,73 +1,159 @@
     # How to contribute
     
    -We definitely welcome your patches and contributions to gRPC! Please read the gRPC
    -organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md)
    -and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding.
    +We welcome your patches and contributions to gRPC! Please read the gRPC
    +organization's [governance
    +rules](https://github.com/grpc/grpc-community/blob/master/governance.md) before
    +proceeding.
     
     If you are new to GitHub, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/)
     
     ## Legal requirements
     
     In order to protect both you and ourselves, you will need to sign the
    -[Contributor License Agreement](https://identity.linuxfoundation.org/projects/cncf).
    +[Contributor License
    +Agreement](https://identity.linuxfoundation.org/projects/cncf). When you create
    +your first PR, a link will be added as a comment that contains the steps needed
    +to complete this process.
    +
    +## Getting Started
    +
    +A great way to start is by searching through our open issues. [Unassigned issues
    +labeled as "help
    +wanted"](https://github.com/grpc/grpc-go/issues?q=sort%3Aupdated-desc%20is%3Aissue%20is%3Aopen%20label%3A%22Status%3A%20Help%20Wanted%22%20no%3Aassignee)
    +are especially nice for first-time contributors, as they should be well-defined
    +problems that already have agreed-upon solutions.
    +
    +## Code Style
    +
    +We follow [Google's published Go style
    +guide](https://google.github.io/styleguide/go/). Note that there are three
    +primary documents that make up this style guide; please follow them as closely
    +as possible. If a reviewer recommends something that contradicts those
    +guidelines, there may be valid reasons to do so, but it should be rare.
     
     ## Guidelines for Pull Requests
    -How to get your contributions merged smoothly and quickly.
    +
    +Please read the following carefully to ensure your contributions can be merged
    +smoothly and quickly.
    +
    +### PR Contents
     
     - Create **small PRs** that are narrowly focused on **addressing a single
    -  concern**. We often times receive PRs that are trying to fix several things at
    -  a time, but only one fix is considered acceptable, nothing gets merged and
    -  both author's & review's time is wasted. Create more PRs to address different
    -  concerns and everyone will be happy.
    +  concern**. We often receive PRs that attempt to fix several things at the same
    +  time, and if one part of the PR has a problem, that will hold up the entire
    +  PR.
    +
    +- If your change does not address an **open issue** with an **agreed
    +  resolution**, consider opening an issue and discussing it first. If you are
    +  suggesting a behavioral or API change, consider starting with a [gRFC
    +  proposal](https://github.com/grpc/proposal). Many new features that are not
    +  bug fixes will require cross-language agreement.
    +
    +- If you want to fix **formatting or style**, consider whether your changes are
    +  an obvious improvement or might be considered a personal preference. If a
    +  style change is based on preference, it likely will not be accepted. If it
    +  corrects widely agreed-upon anti-patterns, then please do create a PR and
    +  explain the benefits of the change.
    +
    +- For correcting **misspellings**, please be aware that we use some terms that
    +  are sometimes flagged by spell checkers. As an example, "if an only if" is
    +  often written as "iff". Please do not make spelling correction changes unless
    +  you are certain they are misspellings.
    +
    +- **All tests need to be passing** before your change can be merged. We
    +  recommend you run tests locally before creating your PR to catch breakages
    +  early on:
     
    -- If you are searching for features to work on, issues labeled [Status: Help
    -  Wanted](https://github.com/grpc/grpc-go/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3A%22Status%3A+Help+Wanted%22)
    -  is a great place to start. These issues are well-documented and usually can be
    -  resolved with a single pull request.
    +  - `./scripts/vet.sh` to catch vet errors.
    +  - `go test -cpu 1,4 -timeout 7m ./...` to run the tests.
    +  - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode.
     
    -- If you are adding a new file, make sure it has the copyright message template
    -  at the top as a comment. You can copy over the message from an existing file
    -  and update the year.
    +  Note that we have a multi-module repo, so `go test` commands may need to be
    +  run from the root of each module in order to cause all tests to run.
    +
    +  *Alternatively*, you may find it easier to push your changes to your fork on
    +  GitHub, which will trigger a GitHub Actions run that you can use to verify
    +  everything is passing.
    +
    +- Note that there are two GitHub actions checks that need not be green:
    +
    +  1. We test the freshness of the generated proto code we maintain via the
    +     `vet-proto` check. If the source proto files are updated, but our repo is
    +     not updated, an optional checker will fail. This will be fixed by our team
    +     in a separate PR and will not prevent the merge of your PR.
    +
    +  2. We run a checker that will fail if there is any change in dependencies of
    +     an exported package via the `dependencies` check. If new dependencies are
    +     added that are not appropriate, we may not accept your PR (see below).
    +
    +- If you are adding a **new file**, make sure it has the **copyright message**
    +  template at the top as a comment. You can copy the message from an existing
    +  file and update the year.
     
     - The grpc package should only depend on standard Go packages and a small number
    -  of exceptions. If your contribution introduces new dependencies which are NOT
    -  in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a
    -  discussion with gRPC-Go authors and consultants.
    +  of exceptions. **If your contribution introduces new dependencies**, you will
    +  need a discussion with gRPC-Go maintainers.
     
    -- For speculative changes, consider opening an issue and discussing it first. If
    -  you are suggesting a behavioral or API change, consider starting with a [gRFC
    -  proposal](https://github.com/grpc/proposal).
    +### PR Descriptions
     
    -- Provide a good **PR description** as a record of **what** change is being made
    -  and **why** it was made. Link to a GitHub issue if it exists.
    +- **PR titles** should start with the name of the component being addressed, or
    +  the type of change. Examples: transport, client, server, round_robin, xds,
    +  cleanup, deps.
     
    -- If you want to fix formatting or style, consider whether your changes are an
    -  obvious improvement or might be considered a personal preference. If a style
    -  change is based on preference, it likely will not be accepted. If it corrects
    -  widely agreed-upon anti-patterns, then please do create a PR and explain the
    -  benefits of the change.
    +- Read and follow the **guidelines for PR titles and descriptions** here:
    +  https://google.github.io/eng-practices/review/developer/cl-descriptions.html
     
    -- Unless your PR is trivial, you should expect there will be reviewer comments
    -  that you'll need to address before merging. We'll mark it as `Status: Requires
    -  Reporter Clarification` if we expect you to respond to these comments in a
    -  timely manner. If the PR remains inactive for 6 days, it will be marked as
    -  `stale` and automatically close 7 days after that if we don't hear back from
    -  you.
    +  *particularly* the sections "First Line" and "Body is Informative".
     
    -- Maintain **clean commit history** and use **meaningful commit messages**. PRs
    -  with messy commit history are difficult to review and won't be merged. Use
    -  `rebase -i upstream/master` to curate your commit history and/or to bring in
    -  latest changes from master (but avoid rebasing in the middle of a code
    -  review).
    +  Note: your PR description will be used as the git commit message in a
    +  squash-and-merge if your PR is approved. We may make changes to this as
    +  necessary.
     
    -- Keep your PR up to date with upstream/master (if there are merge conflicts, we
    -  can't really merge your change).
    +- **Does this PR relate to an open issue?** On the first line, please use the
    +  tag `Fixes #` to ensure the issue is closed when the PR is merged. Or
    +  use `Updates #` if the PR is related to an open issue, but does not fix
    +  it. Consider filing an issue if one does not already exist.
     
    -- **All tests need to be passing** before your change can be merged. We
    -  recommend you **run tests locally** before creating your PR to catch breakages
    -  early on.
    -  - `./scripts/vet.sh` to catch vet errors
    -  - `go test -cpu 1,4 -timeout 7m ./...` to run the tests
    -  - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode
    +- PR descriptions *must* conclude with **release notes** as follows:
    +
    +  ```
    +  RELEASE NOTES:
    +  * : 
    +  ```
    +
    +  This need not match the PR title.
    +
    +  The summary must:
    +
    +  * be something that gRPC users will understand.
    +
    +  * clearly explain the feature being added, the issue being fixed, or the
    +    behavior being changed, etc. If fixing a bug, be clear about how the bug
    +    can be triggered by an end-user.
    +
    +  * begin with a capital letter and use complete sentences.
     
    -- Exceptions to the rules can be made if there's a compelling reason for doing so.
    +  * be as short as possible to describe the change being made.
    +
    +  If a PR is *not* end-user visible -- e.g. a cleanup, testing change, or
    +  GitHub-related, use `RELEASE NOTES: n/a`.
    +
    +### PR Process
    +
    +- Please **self-review** your code changes before sending your PR. This will
    +  prevent simple, obvious errors from causing delays.
    +
    +- Maintain a **clean commit history** and use **meaningful commit messages**.
    +  PRs with messy commit histories are difficult to review and won't be merged.
    +  Before sending your PR, ensure your changes are based on top of the latest
    +  `upstream/master` commits, and avoid rebasing in the middle of a code review.
    +  You should **never use `git push -f`** unless absolutely necessary during a
    +  review, as it can interfere with GitHub's tracking of comments.
    +
    +- Unless your PR is trivial, you should **expect reviewer comments** that you
    +  will need to address before merging. We'll label the PR as `Status: Requires
    +  Reporter Clarification` if we expect you to respond to these comments in a
    +  timely manner. If the PR remains inactive for 6 days, it will be marked as
    +  `stale`, and we will automatically close it after 7 days if we don't hear back
    +  from you. Please feel free to ping issues or bugs if you do not get a response
    +  within a week.
    diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md
    index 5d4096d46..df35bb9a8 100644
    --- a/vendor/google.golang.org/grpc/MAINTAINERS.md
    +++ b/vendor/google.golang.org/grpc/MAINTAINERS.md
    @@ -9,21 +9,19 @@ for general contribution guidelines.
     
     ## Maintainers (in alphabetical order)
     
    -- [aranjans](https://github.com/aranjans), Google LLC
     - [arjan-bal](https://github.com/arjan-bal), Google LLC
     - [arvindbr8](https://github.com/arvindbr8), Google LLC
     - [atollena](https://github.com/atollena), Datadog, Inc.
     - [dfawley](https://github.com/dfawley), Google LLC
     - [easwars](https://github.com/easwars), Google LLC
    -- [erm-g](https://github.com/erm-g), Google LLC
     - [gtcooke94](https://github.com/gtcooke94), Google LLC
    -- [purnesh42h](https://github.com/purnesh42h), Google LLC
    -- [zasweq](https://github.com/zasweq), Google LLC
     
     ## Emeritus Maintainers (in alphabetical order)
     - [adelez](https://github.com/adelez)
    +- [aranjans](https://github.com/aranjans)
     - [canguler](https://github.com/canguler)
     - [cesarghali](https://github.com/cesarghali)
    +- [erm-g](https://github.com/erm-g)
     - [iamqizhao](https://github.com/iamqizhao)
     - [jeanbza](https://github.com/jeanbza)
     - [jtattermusch](https://github.com/jtattermusch)
    @@ -32,5 +30,7 @@ for general contribution guidelines.
     - [matt-kwong](https://github.com/matt-kwong)
     - [menghanl](https://github.com/menghanl)
     - [nicolasnoble](https://github.com/nicolasnoble)
    +- [purnesh42h](https://github.com/purnesh42h)
     - [srini100](https://github.com/srini100)
     - [yongni](https://github.com/yongni)
    +- [zasweq](https://github.com/zasweq)
    diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md
    index b572707c6..f9a88d597 100644
    --- a/vendor/google.golang.org/grpc/README.md
    +++ b/vendor/google.golang.org/grpc/README.md
    @@ -32,6 +32,7 @@ import "google.golang.org/grpc"
     - [Low-level technical docs](Documentation) from this repository
     - [Performance benchmark][]
     - [Examples](examples)
    +- [Contribution guidelines](CONTRIBUTING.md)
     
     ## FAQ
     
    diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go
    index c9b343c71..b1264017d 100644
    --- a/vendor/google.golang.org/grpc/balancer/balancer.go
    +++ b/vendor/google.golang.org/grpc/balancer/balancer.go
    @@ -360,6 +360,10 @@ type Balancer interface {
     	// call SubConn.Shutdown for its existing SubConns; however, this will be
     	// required in a future release, so it is recommended.
     	Close()
    +	// ExitIdle instructs the LB policy to reconnect to backends / exit the
    +	// IDLE state, if appropriate and possible.  Note that SubConns that enter
    +	// the IDLE state will not reconnect until SubConn.Connect is called.
    +	ExitIdle()
     }
     
     // ExitIdler is an optional interface for balancers to implement.  If
    @@ -367,8 +371,8 @@ type Balancer interface {
     // the ClientConn is idle.  If unimplemented, ClientConn.Connect will cause
     // all SubConns to connect.
     //
    -// Notice: it will be required for all balancers to implement this in a future
    -// release.
    +// Deprecated: All balancers must implement this interface. This interface will
    +// be removed in a future release.
     type ExitIdler interface {
     	// ExitIdle instructs the LB policy to reconnect to backends / exit the
     	// IDLE state, if appropriate and possible.  Note that SubConns that enter
    diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go
    index d5ed172ae..4d576876d 100644
    --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go
    +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go
    @@ -41,7 +41,7 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) ba
     		cc:            cc,
     		pickerBuilder: bb.pickerBuilder,
     
    -		subConns: resolver.NewAddressMap(),
    +		subConns: resolver.NewAddressMapV2[balancer.SubConn](),
     		scStates: make(map[balancer.SubConn]connectivity.State),
     		csEvltr:  &balancer.ConnectivityStateEvaluator{},
     		config:   bb.config,
    @@ -65,7 +65,7 @@ type baseBalancer struct {
     	csEvltr *balancer.ConnectivityStateEvaluator
     	state   connectivity.State
     
    -	subConns *resolver.AddressMap
    +	subConns *resolver.AddressMapV2[balancer.SubConn]
     	scStates map[balancer.SubConn]connectivity.State
     	picker   balancer.Picker
     	config   Config
    @@ -100,7 +100,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
     	// Successful resolution; clear resolver error and ensure we return nil.
     	b.resolverErr = nil
     	// addrsSet is the set converted from addrs, it's used for quick lookup of an address.
    -	addrsSet := resolver.NewAddressMap()
    +	addrsSet := resolver.NewAddressMapV2[any]()
     	for _, a := range s.ResolverState.Addresses {
     		addrsSet.Set(a, nil)
     		if _, ok := b.subConns.Get(a); !ok {
    @@ -122,8 +122,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
     		}
     	}
     	for _, a := range b.subConns.Keys() {
    -		sci, _ := b.subConns.Get(a)
    -		sc := sci.(balancer.SubConn)
    +		sc, _ := b.subConns.Get(a)
     		// a was removed by resolver.
     		if _, ok := addrsSet.Get(a); !ok {
     			sc.Shutdown()
    @@ -173,8 +172,7 @@ func (b *baseBalancer) regeneratePicker() {
     
     	// Filter out all ready SCs from full subConn map.
     	for _, addr := range b.subConns.Keys() {
    -		sci, _ := b.subConns.Get(addr)
    -		sc := sci.(balancer.SubConn)
    +		sc, _ := b.subConns.Get(addr)
     		if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
     			readySCs[sc] = SubConnInfo{Address: addr}
     		}
    diff --git a/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go b/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go
    index 421c4fecc..360db08eb 100644
    --- a/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go
    +++ b/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go
    @@ -37,6 +37,8 @@ import (
     	"google.golang.org/grpc/resolver"
     )
     
    +var randIntN = rand.IntN
    +
     // ChildState is the balancer state of a child along with the endpoint which
     // identifies the child balancer.
     type ChildState struct {
    @@ -45,7 +47,15 @@ type ChildState struct {
     
     	// Balancer exposes only the ExitIdler interface of the child LB policy.
     	// Other methods of the child policy are called only by endpointsharding.
    -	Balancer balancer.ExitIdler
    +	Balancer ExitIdler
    +}
    +
    +// ExitIdler provides access to only the ExitIdle method of the child balancer.
    +type ExitIdler interface {
    +	// ExitIdle instructs the LB policy to reconnect to backends / exit the
    +	// IDLE state, if appropriate and possible.  Note that SubConns that enter
    +	// the IDLE state will not reconnect until SubConn.Connect is called.
    +	ExitIdle()
     }
     
     // Options are the options to configure the behaviour of the
    @@ -73,7 +83,7 @@ func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions, childBuilde
     		esOpts:       esOpts,
     		childBuilder: childBuilder,
     	}
    -	es.children.Store(resolver.NewEndpointMap())
    +	es.children.Store(resolver.NewEndpointMap[*balancerWrapper]())
     	return es
     }
     
    @@ -90,7 +100,7 @@ type endpointSharding struct {
     	// calls into a child. To avoid deadlocks, do not acquire childMu while
     	// holding mu.
     	childMu  sync.Mutex
    -	children atomic.Pointer[resolver.EndpointMap] // endpoint -> *balancerWrapper
    +	children atomic.Pointer[resolver.EndpointMap[*balancerWrapper]]
     
     	// inhibitChildUpdates is set during UpdateClientConnState/ResolverError
     	// calls (calls to children will each produce an update, only want one
    @@ -104,6 +114,21 @@ type endpointSharding struct {
     	mu sync.Mutex
     }
     
    +// rotateEndpoints returns a slice of all the input endpoints rotated a random
    +// amount.
    +func rotateEndpoints(es []resolver.Endpoint) []resolver.Endpoint {
    +	les := len(es)
    +	if les == 0 {
    +		return es
    +	}
    +	r := randIntN(les)
    +	// Make a copy to avoid mutating data beyond the end of es.
    +	ret := make([]resolver.Endpoint, les)
    +	copy(ret, es[r:])
    +	copy(ret[les-r:], es[:r])
    +	return ret
    +}
    +
     // UpdateClientConnState creates a child for new endpoints and deletes children
     // for endpoints that are no longer present. It also updates all the children,
     // and sends a single synchronous update of the childrens' aggregated state at
    @@ -122,18 +147,17 @@ func (es *endpointSharding) UpdateClientConnState(state balancer.ClientConnState
     	var ret error
     
     	children := es.children.Load()
    -	newChildren := resolver.NewEndpointMap()
    +	newChildren := resolver.NewEndpointMap[*balancerWrapper]()
     
     	// Update/Create new children.
    -	for _, endpoint := range state.ResolverState.Endpoints {
    +	for _, endpoint := range rotateEndpoints(state.ResolverState.Endpoints) {
     		if _, ok := newChildren.Get(endpoint); ok {
     			// Endpoint child was already created, continue to avoid duplicate
     			// update.
     			continue
     		}
    -		var childBalancer *balancerWrapper
    -		if val, ok := children.Get(endpoint); ok {
    -			childBalancer = val.(*balancerWrapper)
    +		childBalancer, ok := children.Get(endpoint)
    +		if ok {
     			// Endpoint attributes may have changed, update the stored endpoint.
     			es.mu.Lock()
     			childBalancer.childState.Endpoint = endpoint
    @@ -166,7 +190,7 @@ func (es *endpointSharding) UpdateClientConnState(state balancer.ClientConnState
     	for _, e := range children.Keys() {
     		child, _ := children.Get(e)
     		if _, ok := newChildren.Get(e); !ok {
    -			child.(*balancerWrapper).closeLocked()
    +			child.closeLocked()
     		}
     	}
     	es.children.Store(newChildren)
    @@ -189,7 +213,7 @@ func (es *endpointSharding) ResolverError(err error) {
     	}()
     	children := es.children.Load()
     	for _, child := range children.Values() {
    -		child.(*balancerWrapper).resolverErrorLocked(err)
    +		child.resolverErrorLocked(err)
     	}
     }
     
    @@ -202,7 +226,17 @@ func (es *endpointSharding) Close() {
     	defer es.childMu.Unlock()
     	children := es.children.Load()
     	for _, child := range children.Values() {
    -		child.(*balancerWrapper).closeLocked()
    +		child.closeLocked()
    +	}
    +}
    +
    +func (es *endpointSharding) ExitIdle() {
    +	es.childMu.Lock()
    +	defer es.childMu.Unlock()
    +	for _, bw := range es.children.Load().Values() {
    +		if !bw.isClosed {
    +			bw.child.ExitIdle()
    +		}
     	}
     }
     
    @@ -222,8 +256,7 @@ func (es *endpointSharding) updateState() {
     	childStates := make([]ChildState, 0, children.Len())
     
     	for _, child := range children.Values() {
    -		bw := child.(*balancerWrapper)
    -		childState := bw.childState
    +		childState := child.childState
     		childStates = append(childStates, childState)
     		childPicker := childState.State.Picker
     		switch childState.State.ConnectivityState {
    @@ -263,7 +296,7 @@ func (es *endpointSharding) updateState() {
     	p := &pickerWithChildStates{
     		pickers:     pickers,
     		childStates: childStates,
    -		next:        uint32(rand.IntN(len(pickers))),
    +		next:        uint32(randIntN(len(pickers))),
     	}
     	es.cc.UpdateState(balancer.State{
     		ConnectivityState: aggState,
    @@ -328,15 +361,13 @@ func (bw *balancerWrapper) UpdateState(state balancer.State) {
     // ExitIdle pings an IDLE child balancer to exit idle in a new goroutine to
     // avoid deadlocks due to synchronous balancer state updates.
     func (bw *balancerWrapper) ExitIdle() {
    -	if ei, ok := bw.child.(balancer.ExitIdler); ok {
    -		go func() {
    -			bw.es.childMu.Lock()
    -			if !bw.isClosed {
    -				ei.ExitIdle()
    -			}
    -			bw.es.childMu.Unlock()
    -		}()
    -	}
    +	go func() {
    +		bw.es.childMu.Lock()
    +		if !bw.isClosed {
    +			bw.child.ExitIdle()
    +		}
    +		bw.es.childMu.Unlock()
    +	}()
     }
     
     // updateClientConnStateLocked delivers the ClientConnState to the child
    diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
    index ea8899818..b4bc3a2bf 100644
    --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
    +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
    @@ -16,55 +16,124 @@
      *
      */
     
    -// Package pickfirst contains the pick_first load balancing policy.
    +// Package pickfirst contains the pick_first load balancing policy which
    +// is the universal leaf policy.
     package pickfirst
     
     import (
     	"encoding/json"
     	"errors"
     	"fmt"
    -	rand "math/rand/v2"
    +	"net"
    +	"net/netip"
    +	"sync"
    +	"time"
     
     	"google.golang.org/grpc/balancer"
     	"google.golang.org/grpc/balancer/pickfirst/internal"
     	"google.golang.org/grpc/connectivity"
    +	expstats "google.golang.org/grpc/experimental/stats"
     	"google.golang.org/grpc/grpclog"
    -	"google.golang.org/grpc/internal/envconfig"
     	internalgrpclog "google.golang.org/grpc/internal/grpclog"
     	"google.golang.org/grpc/internal/pretty"
     	"google.golang.org/grpc/resolver"
     	"google.golang.org/grpc/serviceconfig"
    -
    -	_ "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" // For automatically registering the new pickfirst if required.
     )
     
     func init() {
    -	if envconfig.NewPickFirstEnabled {
    -		return
    -	}
     	balancer.Register(pickfirstBuilder{})
     }
     
    -var logger = grpclog.Component("pick-first-lb")
    +// Name is the name of the pick_first balancer.
    +const Name = "pick_first"
    +
    +// enableHealthListenerKeyType is a unique key type used in resolver
    +// attributes to indicate whether the health listener usage is enabled.
    +type enableHealthListenerKeyType struct{}
    +
    +var (
    +	logger               = grpclog.Component("pick-first-leaf-lb")
    +	disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
    +		Name:        "grpc.lb.pick_first.disconnections",
    +		Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.",
    +		Unit:        "{disconnection}",
    +		Labels:      []string{"grpc.target"},
    +		Default:     false,
    +	})
    +	connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
    +		Name:        "grpc.lb.pick_first.connection_attempts_succeeded",
    +		Description: "EXPERIMENTAL. Number of successful connection attempts.",
    +		Unit:        "{attempt}",
    +		Labels:      []string{"grpc.target"},
    +		Default:     false,
    +	})
    +	connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
    +		Name:        "grpc.lb.pick_first.connection_attempts_failed",
    +		Description: "EXPERIMENTAL. Number of failed connection attempts.",
    +		Unit:        "{attempt}",
    +		Labels:      []string{"grpc.target"},
    +		Default:     false,
    +	})
    +)
     
     const (
    -	// Name is the name of the pick_first balancer.
    -	Name      = "pick_first"
    -	logPrefix = "[pick-first-lb %p] "
    +	// TODO: change to pick-first when this becomes the default pick_first policy.
    +	logPrefix = "[pick-first-leaf-lb %p] "
    +	// connectionDelayInterval is the time to wait for during the happy eyeballs
    +	// pass before starting the next connection attempt.
    +	connectionDelayInterval = 250 * time.Millisecond
    +)
    +
    +type ipAddrFamily int
    +
    +const (
    +	// ipAddrFamilyUnknown represents strings that can't be parsed as an IP
    +	// address.
    +	ipAddrFamilyUnknown ipAddrFamily = iota
    +	ipAddrFamilyV4
    +	ipAddrFamilyV6
     )
     
     type pickfirstBuilder struct{}
     
    -func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer {
    -	b := &pickfirstBalancer{cc: cc}
    +func (pickfirstBuilder) Build(cc balancer.ClientConn, bo balancer.BuildOptions) balancer.Balancer {
    +	b := &pickfirstBalancer{
    +		cc:              cc,
    +		target:          bo.Target.String(),
    +		metricsRecorder: cc.MetricsRecorder(),
    +
    +		subConns:              resolver.NewAddressMapV2[*scData](),
    +		state:                 connectivity.Connecting,
    +		cancelConnectionTimer: func() {},
    +	}
     	b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b))
     	return b
     }
     
    -func (pickfirstBuilder) Name() string {
    +func (b pickfirstBuilder) Name() string {
     	return Name
     }
     
    +func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
    +	var cfg pfConfig
    +	if err := json.Unmarshal(js, &cfg); err != nil {
    +		return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err)
    +	}
    +	return cfg, nil
    +}
    +
    +// EnableHealthListener updates the state to configure pickfirst for using a
    +// generic health listener.
    +//
    +// # Experimental
    +//
    +// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
    +// release.
    +func EnableHealthListener(state resolver.State) resolver.State {
    +	state.Attributes = state.Attributes.WithValue(enableHealthListenerKeyType{}, true)
    +	return state
    +}
    +
     type pfConfig struct {
     	serviceconfig.LoadBalancingConfig `json:"-"`
     
    @@ -74,90 +143,129 @@ type pfConfig struct {
     	ShuffleAddressList bool `json:"shuffleAddressList"`
     }
     
    -func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
    -	var cfg pfConfig
    -	if err := json.Unmarshal(js, &cfg); err != nil {
    -		return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err)
    +// scData keeps track of the current state of the subConn.
    +// It is not safe for concurrent access.
    +type scData struct {
    +	// The following fields are initialized at build time and read-only after
    +	// that.
    +	subConn balancer.SubConn
    +	addr    resolver.Address
    +
    +	rawConnectivityState connectivity.State
    +	// The effective connectivity state based on raw connectivity, health state
    +	// and after following sticky TransientFailure behaviour defined in A62.
    +	effectiveState              connectivity.State
    +	lastErr                     error
    +	connectionFailedInFirstPass bool
    +}
    +
    +func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) {
    +	sd := &scData{
    +		rawConnectivityState: connectivity.Idle,
    +		effectiveState:       connectivity.Idle,
    +		addr:                 addr,
     	}
    -	return cfg, nil
    +	sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{
    +		StateListener: func(state balancer.SubConnState) {
    +			b.updateSubConnState(sd, state)
    +		},
    +	})
    +	if err != nil {
    +		return nil, err
    +	}
    +	sd.subConn = sc
    +	return sd, nil
     }
     
     type pickfirstBalancer struct {
    -	logger  *internalgrpclog.PrefixLogger
    -	state   connectivity.State
    -	cc      balancer.ClientConn
    -	subConn balancer.SubConn
    +	// The following fields are initialized at build time and read-only after
    +	// that and therefore do not need to be guarded by a mutex.
    +	logger          *internalgrpclog.PrefixLogger
    +	cc              balancer.ClientConn
    +	target          string
    +	metricsRecorder expstats.MetricsRecorder // guaranteed to be non nil
    +
    +	// The mutex is used to ensure synchronization of updates triggered
    +	// from the idle picker and the already serialized resolver,
    +	// SubConn state updates.
    +	mu sync.Mutex
    +	// State reported to the channel based on SubConn states and resolver
    +	// updates.
    +	state connectivity.State
    +	// scData for active subonns mapped by address.
    +	subConns              *resolver.AddressMapV2[*scData]
    +	addressList           addressList
    +	firstPass             bool
    +	numTF                 int
    +	cancelConnectionTimer func()
    +	healthCheckingEnabled bool
     }
     
    +// ResolverError is called by the ClientConn when the name resolver produces
    +// an error or when pickfirst determined the resolver update to be invalid.
     func (b *pickfirstBalancer) ResolverError(err error) {
    +	b.mu.Lock()
    +	defer b.mu.Unlock()
    +	b.resolverErrorLocked(err)
    +}
    +
    +func (b *pickfirstBalancer) resolverErrorLocked(err error) {
     	if b.logger.V(2) {
     		b.logger.Infof("Received error from the name resolver: %v", err)
     	}
    -	if b.subConn == nil {
    -		b.state = connectivity.TransientFailure
    -	}
     
    -	if b.state != connectivity.TransientFailure {
    -		// The picker will not change since the balancer does not currently
    -		// report an error.
    +	// The picker will not change since the balancer does not currently
    +	// report an error. If the balancer hasn't received a single good resolver
    +	// update yet, transition to TRANSIENT_FAILURE.
    +	if b.state != connectivity.TransientFailure && b.addressList.size() > 0 {
    +		if b.logger.V(2) {
    +			b.logger.Infof("Ignoring resolver error because balancer is using a previous good update.")
    +		}
     		return
     	}
    -	b.cc.UpdateState(balancer.State{
    +
    +	b.updateBalancerState(balancer.State{
     		ConnectivityState: connectivity.TransientFailure,
     		Picker:            &picker{err: fmt.Errorf("name resolver error: %v", err)},
     	})
     }
     
    -// Shuffler is an interface for shuffling an address list.
    -type Shuffler interface {
    -	ShuffleAddressListForTesting(n int, swap func(i, j int))
    -}
    -
    -// ShuffleAddressListForTesting pseudo-randomizes the order of addresses.  n
    -// is the number of elements.  swap swaps the elements with indexes i and j.
    -func ShuffleAddressListForTesting(n int, swap func(i, j int)) { rand.Shuffle(n, swap) }
    -
     func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
    +	b.mu.Lock()
    +	defer b.mu.Unlock()
    +	b.cancelConnectionTimer()
     	if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 {
    -		// The resolver reported an empty address list. Treat it like an error by
    -		// calling b.ResolverError.
    -		if b.subConn != nil {
    -			// Shut down the old subConn. All addresses were removed, so it is
    -			// no longer valid.
    -			b.subConn.Shutdown()
    -			b.subConn = nil
    -		}
    -		b.ResolverError(errors.New("produced zero addresses"))
    +		// Cleanup state pertaining to the previous resolver state.
    +		// Treat an empty address list like an error by calling b.ResolverError.
    +		b.closeSubConnsLocked()
    +		b.addressList.updateAddrs(nil)
    +		b.resolverErrorLocked(errors.New("produced zero addresses"))
     		return balancer.ErrBadResolverState
     	}
    -	// We don't have to guard this block with the env var because ParseConfig
    -	// already does so.
    +	b.healthCheckingEnabled = state.ResolverState.Attributes.Value(enableHealthListenerKeyType{}) != nil
     	cfg, ok := state.BalancerConfig.(pfConfig)
     	if state.BalancerConfig != nil && !ok {
    -		return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig)
    +		return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState)
     	}
     
     	if b.logger.V(2) {
     		b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState))
     	}
     
    -	var addrs []resolver.Address
    +	var newAddrs []resolver.Address
     	if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 {
    -		// Perform the optional shuffling described in gRFC A62. The shuffling will
    -		// change the order of endpoints but not touch the order of the addresses
    -		// within each endpoint. - A61
    +		// Perform the optional shuffling described in gRFC A62. The shuffling
    +		// will change the order of endpoints but not touch the order of the
    +		// addresses within each endpoint. - A61
     		if cfg.ShuffleAddressList {
     			endpoints = append([]resolver.Endpoint{}, endpoints...)
     			internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
     		}
     
    -		// "Flatten the list by concatenating the ordered list of addresses for each
    -		// of the endpoints, in order." - A61
    +		// "Flatten the list by concatenating the ordered list of addresses for
    +		// each of the endpoints, in order." - A61
     		for _, endpoint := range endpoints {
    -			// "In the flattened list, interleave addresses from the two address
    -			// families, as per RFC-8304 section 4." - A61
    -			// TODO: support the above language.
    -			addrs = append(addrs, endpoint.Addresses...)
    +			newAddrs = append(newAddrs, endpoint.Addresses...)
     		}
     	} else {
     		// Endpoints not set, process addresses until we migrate resolver
    @@ -166,42 +274,53 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState
     		// target do not forward the corresponding correct endpoints down/split
     		// endpoints properly. Once all balancers correctly forward endpoints
     		// down, can delete this else conditional.
    -		addrs = state.ResolverState.Addresses
    +		newAddrs = state.ResolverState.Addresses
     		if cfg.ShuffleAddressList {
    -			addrs = append([]resolver.Address{}, addrs...)
    -			rand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] })
    +			newAddrs = append([]resolver.Address{}, newAddrs...)
    +			internal.RandShuffle(len(newAddrs), func(i, j int) { newAddrs[i], newAddrs[j] = newAddrs[j], newAddrs[i] })
     		}
     	}
     
    -	if b.subConn != nil {
    -		b.cc.UpdateAddresses(b.subConn, addrs)
    +	// If an address appears in multiple endpoints or in the same endpoint
    +	// multiple times, we keep it only once. We will create only one SubConn
    +	// for the address because an AddressMap is used to store SubConns.
    +	// Not de-duplicating would result in attempting to connect to the same
    +	// SubConn multiple times in the same pass. We don't want this.
    +	newAddrs = deDupAddresses(newAddrs)
    +	newAddrs = interleaveAddresses(newAddrs)
    +
    +	prevAddr := b.addressList.currentAddress()
    +	prevSCData, found := b.subConns.Get(prevAddr)
    +	prevAddrsCount := b.addressList.size()
    +	isPrevRawConnectivityStateReady := found && prevSCData.rawConnectivityState == connectivity.Ready
    +	b.addressList.updateAddrs(newAddrs)
    +
    +	// If the previous ready SubConn exists in new address list,
    +	// keep this connection and don't create new SubConns.
    +	if isPrevRawConnectivityStateReady && b.addressList.seekTo(prevAddr) {
     		return nil
     	}
     
    -	var subConn balancer.SubConn
    -	subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{
    -		StateListener: func(state balancer.SubConnState) {
    -			b.updateSubConnState(subConn, state)
    -		},
    -	})
    -	if err != nil {
    -		if b.logger.V(2) {
    -			b.logger.Infof("Failed to create new SubConn: %v", err)
    -		}
    -		b.state = connectivity.TransientFailure
    -		b.cc.UpdateState(balancer.State{
    -			ConnectivityState: connectivity.TransientFailure,
    -			Picker:            &picker{err: fmt.Errorf("error creating connection: %v", err)},
    +	b.reconcileSubConnsLocked(newAddrs)
    +	// If it's the first resolver update or the balancer was already READY
    +	// (but the new address list does not contain the ready SubConn) or
    +	// CONNECTING, enter CONNECTING.
    +	// We may be in TRANSIENT_FAILURE due to a previous empty address list,
    +	// we should still enter CONNECTING because the sticky TF behaviour
    +	//  mentioned in A62 applies only when the TRANSIENT_FAILURE is reported
    +	// due to connectivity failures.
    +	if isPrevRawConnectivityStateReady || b.state == connectivity.Connecting || prevAddrsCount == 0 {
    +		// Start connection attempt at first address.
    +		b.forceUpdateConcludedStateLocked(balancer.State{
    +			ConnectivityState: connectivity.Connecting,
    +			Picker:            &picker{err: balancer.ErrNoSubConnAvailable},
     		})
    -		return balancer.ErrBadResolverState
    +		b.startFirstPassLocked()
    +	} else if b.state == connectivity.TransientFailure {
    +		// If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until
    +		// we're READY. See A62.
    +		b.startFirstPassLocked()
     	}
    -	b.subConn = subConn
    -	b.state = connectivity.Idle
    -	b.cc.UpdateState(balancer.State{
    -		ConnectivityState: connectivity.Connecting,
    -		Picker:            &picker{err: balancer.ErrNoSubConnAvailable},
    -	})
    -	b.subConn.Connect()
     	return nil
     }
     
    @@ -211,63 +330,484 @@ func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state b
     	b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state)
     }
     
    -func (b *pickfirstBalancer) updateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) {
    -	if b.logger.V(2) {
    -		b.logger.Infof("Received SubConn state update: %p, %+v", subConn, state)
    +func (b *pickfirstBalancer) Close() {
    +	b.mu.Lock()
    +	defer b.mu.Unlock()
    +	b.closeSubConnsLocked()
    +	b.cancelConnectionTimer()
    +	b.state = connectivity.Shutdown
    +}
    +
    +// ExitIdle moves the balancer out of idle state. It can be called concurrently
    +// by the idlePicker and clientConn so access to variables should be
    +// synchronized.
    +func (b *pickfirstBalancer) ExitIdle() {
    +	b.mu.Lock()
    +	defer b.mu.Unlock()
    +	if b.state == connectivity.Idle {
    +		// Move the balancer into CONNECTING state immediately. This is done to
    +		// avoid staying in IDLE if a resolver update arrives before the first
    +		// SubConn reports CONNECTING.
    +		b.updateBalancerState(balancer.State{
    +			ConnectivityState: connectivity.Connecting,
    +			Picker:            &picker{err: balancer.ErrNoSubConnAvailable},
    +		})
    +		b.startFirstPassLocked()
    +	}
    +}
    +
    +func (b *pickfirstBalancer) startFirstPassLocked() {
    +	b.firstPass = true
    +	b.numTF = 0
    +	// Reset the connection attempt record for existing SubConns.
    +	for _, sd := range b.subConns.Values() {
    +		sd.connectionFailedInFirstPass = false
    +	}
    +	b.requestConnectionLocked()
    +}
    +
    +func (b *pickfirstBalancer) closeSubConnsLocked() {
    +	for _, sd := range b.subConns.Values() {
    +		sd.subConn.Shutdown()
    +	}
    +	b.subConns = resolver.NewAddressMapV2[*scData]()
    +}
    +
    +// deDupAddresses ensures that each address appears only once in the slice.
    +func deDupAddresses(addrs []resolver.Address) []resolver.Address {
    +	seenAddrs := resolver.NewAddressMapV2[bool]()
    +	retAddrs := []resolver.Address{}
    +
    +	for _, addr := range addrs {
    +		if _, ok := seenAddrs.Get(addr); ok {
    +			continue
    +		}
    +		seenAddrs.Set(addr, true)
    +		retAddrs = append(retAddrs, addr)
    +	}
    +	return retAddrs
    +}
    +
    +// interleaveAddresses interleaves addresses of both families (IPv4 and IPv6)
    +// as per RFC-8305 section 4.
    +// Whichever address family is first in the list is followed by an address of
    +// the other address family; that is, if the first address in the list is IPv6,
    +// then the first IPv4 address should be moved up in the list to be second in
    +// the list. It doesn't support configuring "First Address Family Count", i.e.
    +// there will always be a single member of the first address family at the
    +// beginning of the interleaved list.
    +// Addresses that are neither IPv4 nor IPv6 are treated as part of a third
    +// "unknown" family for interleaving.
    +// See: https://datatracker.ietf.org/doc/html/rfc8305#autoid-6
    +func interleaveAddresses(addrs []resolver.Address) []resolver.Address {
    +	familyAddrsMap := map[ipAddrFamily][]resolver.Address{}
    +	interleavingOrder := []ipAddrFamily{}
    +	for _, addr := range addrs {
    +		family := addressFamily(addr.Addr)
    +		if _, found := familyAddrsMap[family]; !found {
    +			interleavingOrder = append(interleavingOrder, family)
    +		}
    +		familyAddrsMap[family] = append(familyAddrsMap[family], addr)
    +	}
    +
    +	interleavedAddrs := make([]resolver.Address, 0, len(addrs))
    +
    +	for curFamilyIdx := 0; len(interleavedAddrs) < len(addrs); curFamilyIdx = (curFamilyIdx + 1) % len(interleavingOrder) {
    +		// Some IP types may have fewer addresses than others, so we look for
    +		// the next type that has a remaining member to add to the interleaved
    +		// list.
    +		family := interleavingOrder[curFamilyIdx]
    +		remainingMembers := familyAddrsMap[family]
    +		if len(remainingMembers) > 0 {
    +			interleavedAddrs = append(interleavedAddrs, remainingMembers[0])
    +			familyAddrsMap[family] = remainingMembers[1:]
    +		}
    +	}
    +
    +	return interleavedAddrs
    +}
    +
    +// addressFamily returns the ipAddrFamily after parsing the address string.
    +// If the address isn't of the format "ip-address:port", it returns
    +// ipAddrFamilyUnknown. The address may be valid even if it's not an IP when
    +// using a resolver like passthrough where the address may be a hostname in
    +// some format that the dialer can resolve.
    +func addressFamily(address string) ipAddrFamily {
    +	// Parse the IP after removing the port.
    +	host, _, err := net.SplitHostPort(address)
    +	if err != nil {
    +		return ipAddrFamilyUnknown
    +	}
    +	ip, err := netip.ParseAddr(host)
    +	if err != nil {
    +		return ipAddrFamilyUnknown
    +	}
    +	switch {
    +	case ip.Is4() || ip.Is4In6():
    +		return ipAddrFamilyV4
    +	case ip.Is6():
    +		return ipAddrFamilyV6
    +	default:
    +		return ipAddrFamilyUnknown
    +	}
    +}
    +
    +// reconcileSubConnsLocked updates the active subchannels based on a new address
    +// list from the resolver. It does this by:
    +//   - closing subchannels: any existing subchannels associated with addresses
    +//     that are no longer in the updated list are shut down.
    +//   - removing subchannels: entries for these closed subchannels are removed
    +//     from the subchannel map.
    +//
    +// This ensures that the subchannel map accurately reflects the current set of
    +// addresses received from the name resolver.
    +func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) {
    +	newAddrsMap := resolver.NewAddressMapV2[bool]()
    +	for _, addr := range newAddrs {
    +		newAddrsMap.Set(addr, true)
    +	}
    +
    +	for _, oldAddr := range b.subConns.Keys() {
    +		if _, ok := newAddrsMap.Get(oldAddr); ok {
    +			continue
    +		}
    +		val, _ := b.subConns.Get(oldAddr)
    +		val.subConn.Shutdown()
    +		b.subConns.Delete(oldAddr)
    +	}
    +}
    +
    +// shutdownRemainingLocked shuts down remaining subConns. Called when a subConn
    +// becomes ready, which means that all other subConn must be shutdown.
    +func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) {
    +	b.cancelConnectionTimer()
    +	for _, sd := range b.subConns.Values() {
    +		if sd.subConn != selected.subConn {
    +			sd.subConn.Shutdown()
    +		}
    +	}
    +	b.subConns = resolver.NewAddressMapV2[*scData]()
    +	b.subConns.Set(selected.addr, selected)
    +}
    +
    +// requestConnectionLocked starts connecting on the subchannel corresponding to
    +// the current address. If no subchannel exists, one is created. If the current
    +// subchannel is in TransientFailure, a connection to the next address is
    +// attempted until a subchannel is found.
    +func (b *pickfirstBalancer) requestConnectionLocked() {
    +	if !b.addressList.isValid() {
    +		return
    +	}
    +	var lastErr error
    +	for valid := true; valid; valid = b.addressList.increment() {
    +		curAddr := b.addressList.currentAddress()
    +		sd, ok := b.subConns.Get(curAddr)
    +		if !ok {
    +			var err error
    +			// We want to assign the new scData to sd from the outer scope,
    +			// hence we can't use := below.
    +			sd, err = b.newSCData(curAddr)
    +			if err != nil {
    +				// This should never happen, unless the clientConn is being shut
    +				// down.
    +				if b.logger.V(2) {
    +					b.logger.Infof("Failed to create a subConn for address %v: %v", curAddr.String(), err)
    +				}
    +				// Do nothing, the LB policy will be closed soon.
    +				return
    +			}
    +			b.subConns.Set(curAddr, sd)
    +		}
    +
    +		switch sd.rawConnectivityState {
    +		case connectivity.Idle:
    +			sd.subConn.Connect()
    +			b.scheduleNextConnectionLocked()
    +			return
    +		case connectivity.TransientFailure:
    +			// The SubConn is being re-used and failed during a previous pass
    +			// over the addressList. It has not completed backoff yet.
    +			// Mark it as having failed and try the next address.
    +			sd.connectionFailedInFirstPass = true
    +			lastErr = sd.lastErr
    +			continue
    +		case connectivity.Connecting:
    +			// Wait for the connection attempt to complete or the timer to fire
    +			// before attempting the next address.
    +			b.scheduleNextConnectionLocked()
    +			return
    +		default:
    +			b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", sd.rawConnectivityState)
    +			return
    +
    +		}
    +	}
    +
    +	// All the remaining addresses in the list are in TRANSIENT_FAILURE, end the
    +	// first pass if possible.
    +	b.endFirstPassIfPossibleLocked(lastErr)
    +}
    +
    +func (b *pickfirstBalancer) scheduleNextConnectionLocked() {
    +	b.cancelConnectionTimer()
    +	if !b.addressList.hasNext() {
    +		return
     	}
    -	if b.subConn != subConn {
    +	curAddr := b.addressList.currentAddress()
    +	cancelled := false // Access to this is protected by the balancer's mutex.
    +	closeFn := internal.TimeAfterFunc(connectionDelayInterval, func() {
    +		b.mu.Lock()
    +		defer b.mu.Unlock()
    +		// If the scheduled task is cancelled while acquiring the mutex, return.
    +		if cancelled {
    +			return
    +		}
     		if b.logger.V(2) {
    -			b.logger.Infof("Ignored state change because subConn is not recognized")
    +			b.logger.Infof("Happy Eyeballs timer expired while waiting for connection to %q.", curAddr.Addr)
    +		}
    +		if b.addressList.increment() {
    +			b.requestConnectionLocked()
     		}
    +	})
    +	// Access to the cancellation callback held by the balancer is guarded by
    +	// the balancer's mutex, so it's safe to set the boolean from the callback.
    +	b.cancelConnectionTimer = sync.OnceFunc(func() {
    +		cancelled = true
    +		closeFn()
    +	})
    +}
    +
    +func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) {
    +	b.mu.Lock()
    +	defer b.mu.Unlock()
    +	oldState := sd.rawConnectivityState
    +	sd.rawConnectivityState = newState.ConnectivityState
    +	// Previously relevant SubConns can still callback with state updates.
    +	// To prevent pickers from returning these obsolete SubConns, this logic
    +	// is included to check if the current list of active SubConns includes this
    +	// SubConn.
    +	if !b.isActiveSCData(sd) {
     		return
     	}
    -	if state.ConnectivityState == connectivity.Shutdown {
    -		b.subConn = nil
    +	if newState.ConnectivityState == connectivity.Shutdown {
    +		sd.effectiveState = connectivity.Shutdown
     		return
     	}
     
    -	switch state.ConnectivityState {
    -	case connectivity.Ready:
    -		b.cc.UpdateState(balancer.State{
    -			ConnectivityState: state.ConnectivityState,
    -			Picker:            &picker{result: balancer.PickResult{SubConn: subConn}},
    -		})
    -	case connectivity.Connecting:
    -		if b.state == connectivity.TransientFailure {
    -			// We stay in TransientFailure until we are Ready. See A62.
    +	// Record a connection attempt when exiting CONNECTING.
    +	if newState.ConnectivityState == connectivity.TransientFailure {
    +		sd.connectionFailedInFirstPass = true
    +		connectionAttemptsFailedMetric.Record(b.metricsRecorder, 1, b.target)
    +	}
    +
    +	if newState.ConnectivityState == connectivity.Ready {
    +		connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target)
    +		b.shutdownRemainingLocked(sd)
    +		if !b.addressList.seekTo(sd.addr) {
    +			// This should not fail as we should have only one SubConn after
    +			// entering READY. The SubConn should be present in the addressList.
    +			b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses)
     			return
     		}
    -		b.cc.UpdateState(balancer.State{
    -			ConnectivityState: state.ConnectivityState,
    +		if !b.healthCheckingEnabled {
    +			if b.logger.V(2) {
    +				b.logger.Infof("SubConn %p reported connectivity state READY and the health listener is disabled. Transitioning SubConn to READY.", sd.subConn)
    +			}
    +
    +			sd.effectiveState = connectivity.Ready
    +			b.updateBalancerState(balancer.State{
    +				ConnectivityState: connectivity.Ready,
    +				Picker:            &picker{result: balancer.PickResult{SubConn: sd.subConn}},
    +			})
    +			return
    +		}
    +		if b.logger.V(2) {
    +			b.logger.Infof("SubConn %p reported connectivity state READY. Registering health listener.", sd.subConn)
    +		}
    +		// Send a CONNECTING update to take the SubConn out of sticky-TF if
    +		// required.
    +		sd.effectiveState = connectivity.Connecting
    +		b.updateBalancerState(balancer.State{
    +			ConnectivityState: connectivity.Connecting,
     			Picker:            &picker{err: balancer.ErrNoSubConnAvailable},
     		})
    +		sd.subConn.RegisterHealthListener(func(scs balancer.SubConnState) {
    +			b.updateSubConnHealthState(sd, scs)
    +		})
    +		return
    +	}
    +
    +	// If the LB policy is READY, and it receives a subchannel state change,
    +	// it means that the READY subchannel has failed.
    +	// A SubConn can also transition from CONNECTING directly to IDLE when
    +	// a transport is successfully created, but the connection fails
    +	// before the SubConn can send the notification for READY. We treat
    +	// this as a successful connection and transition to IDLE.
    +	// TODO: https://github.com/grpc/grpc-go/issues/7862 - Remove the second
    +	// part of the if condition below once the issue is fixed.
    +	if oldState == connectivity.Ready || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) {
    +		// Once a transport fails, the balancer enters IDLE and starts from
    +		// the first address when the picker is used.
    +		b.shutdownRemainingLocked(sd)
    +		sd.effectiveState = newState.ConnectivityState
    +		// READY SubConn interspliced in between CONNECTING and IDLE, need to
    +		// account for that.
    +		if oldState == connectivity.Connecting {
    +			// A known issue (https://github.com/grpc/grpc-go/issues/7862)
    +			// causes a race that prevents the READY state change notification.
    +			// This works around it.
    +			connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target)
    +		}
    +		disconnectionsMetric.Record(b.metricsRecorder, 1, b.target)
    +		b.addressList.reset()
    +		b.updateBalancerState(balancer.State{
    +			ConnectivityState: connectivity.Idle,
    +			Picker:            &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)},
    +		})
    +		return
    +	}
    +
    +	if b.firstPass {
    +		switch newState.ConnectivityState {
    +		case connectivity.Connecting:
    +			// The effective state can be in either IDLE, CONNECTING or
    +			// TRANSIENT_FAILURE. If it's  TRANSIENT_FAILURE, stay in
    +			// TRANSIENT_FAILURE until it's READY. See A62.
    +			if sd.effectiveState != connectivity.TransientFailure {
    +				sd.effectiveState = connectivity.Connecting
    +				b.updateBalancerState(balancer.State{
    +					ConnectivityState: connectivity.Connecting,
    +					Picker:            &picker{err: balancer.ErrNoSubConnAvailable},
    +				})
    +			}
    +		case connectivity.TransientFailure:
    +			sd.lastErr = newState.ConnectionError
    +			sd.effectiveState = connectivity.TransientFailure
    +			// Since we're re-using common SubConns while handling resolver
    +			// updates, we could receive an out of turn TRANSIENT_FAILURE from
    +			// a pass over the previous address list. Happy Eyeballs will also
    +			// cause out of order updates to arrive.
    +
    +			if curAddr := b.addressList.currentAddress(); equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) {
    +				b.cancelConnectionTimer()
    +				if b.addressList.increment() {
    +					b.requestConnectionLocked()
    +					return
    +				}
    +			}
    +
    +			// End the first pass if we've seen a TRANSIENT_FAILURE from all
    +			// SubConns once.
    +			b.endFirstPassIfPossibleLocked(newState.ConnectionError)
    +		}
    +		return
    +	}
    +
    +	// We have finished the first pass, keep re-connecting failing SubConns.
    +	switch newState.ConnectivityState {
    +	case connectivity.TransientFailure:
    +		b.numTF = (b.numTF + 1) % b.subConns.Len()
    +		sd.lastErr = newState.ConnectionError
    +		if b.numTF%b.subConns.Len() == 0 {
    +			b.updateBalancerState(balancer.State{
    +				ConnectivityState: connectivity.TransientFailure,
    +				Picker:            &picker{err: newState.ConnectionError},
    +			})
    +		}
    +		// We don't need to request re-resolution since the SubConn already
    +		// does that before reporting TRANSIENT_FAILURE.
    +		// TODO: #7534 - Move re-resolution requests from SubConn into
    +		// pick_first.
     	case connectivity.Idle:
    -		if b.state == connectivity.TransientFailure {
    -			// We stay in TransientFailure until we are Ready. Also kick the
    -			// subConn out of Idle into Connecting. See A62.
    -			b.subConn.Connect()
    +		sd.subConn.Connect()
    +	}
    +}
    +
    +// endFirstPassIfPossibleLocked ends the first happy-eyeballs pass if all the
    +// addresses are tried and their SubConns have reported a failure.
    +func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) {
    +	// An optimization to avoid iterating over the entire SubConn map.
    +	if b.addressList.isValid() {
    +		return
    +	}
    +	// Connect() has been called on all the SubConns. The first pass can be
    +	// ended if all the SubConns have reported a failure.
    +	for _, sd := range b.subConns.Values() {
    +		if !sd.connectionFailedInFirstPass {
     			return
     		}
    -		b.cc.UpdateState(balancer.State{
    -			ConnectivityState: state.ConnectivityState,
    -			Picker:            &idlePicker{subConn: subConn},
    +	}
    +	b.firstPass = false
    +	b.updateBalancerState(balancer.State{
    +		ConnectivityState: connectivity.TransientFailure,
    +		Picker:            &picker{err: lastErr},
    +	})
    +	// Start re-connecting all the SubConns that are already in IDLE.
    +	for _, sd := range b.subConns.Values() {
    +		if sd.rawConnectivityState == connectivity.Idle {
    +			sd.subConn.Connect()
    +		}
    +	}
    +}
    +
    +func (b *pickfirstBalancer) isActiveSCData(sd *scData) bool {
    +	activeSD, found := b.subConns.Get(sd.addr)
    +	return found && activeSD == sd
    +}
    +
    +func (b *pickfirstBalancer) updateSubConnHealthState(sd *scData, state balancer.SubConnState) {
    +	b.mu.Lock()
    +	defer b.mu.Unlock()
    +	// Previously relevant SubConns can still callback with state updates.
    +	// To prevent pickers from returning these obsolete SubConns, this logic
    +	// is included to check if the current list of active SubConns includes
    +	// this SubConn.
    +	if !b.isActiveSCData(sd) {
    +		return
    +	}
    +	sd.effectiveState = state.ConnectivityState
    +	switch state.ConnectivityState {
    +	case connectivity.Ready:
    +		b.updateBalancerState(balancer.State{
    +			ConnectivityState: connectivity.Ready,
    +			Picker:            &picker{result: balancer.PickResult{SubConn: sd.subConn}},
     		})
     	case connectivity.TransientFailure:
    -		b.cc.UpdateState(balancer.State{
    -			ConnectivityState: state.ConnectivityState,
    -			Picker:            &picker{err: state.ConnectionError},
    +		b.updateBalancerState(balancer.State{
    +			ConnectivityState: connectivity.TransientFailure,
    +			Picker:            &picker{err: fmt.Errorf("pickfirst: health check failure: %v", state.ConnectionError)},
    +		})
    +	case connectivity.Connecting:
    +		b.updateBalancerState(balancer.State{
    +			ConnectivityState: connectivity.Connecting,
    +			Picker:            &picker{err: balancer.ErrNoSubConnAvailable},
     		})
    +	default:
    +		b.logger.Errorf("Got unexpected health update for SubConn %p: %v", state)
     	}
    -	b.state = state.ConnectivityState
     }
     
    -func (b *pickfirstBalancer) Close() {
    +// updateBalancerState stores the state reported to the channel and calls
    +// ClientConn.UpdateState(). As an optimization, it avoids sending duplicate
    +// updates to the channel.
    +func (b *pickfirstBalancer) updateBalancerState(newState balancer.State) {
    +	// In case of TransientFailures allow the picker to be updated to update
    +	// the connectivity error, in all other cases don't send duplicate state
    +	// updates.
    +	if newState.ConnectivityState == b.state && b.state != connectivity.TransientFailure {
    +		return
    +	}
    +	b.forceUpdateConcludedStateLocked(newState)
     }
     
    -func (b *pickfirstBalancer) ExitIdle() {
    -	if b.subConn != nil && b.state == connectivity.Idle {
    -		b.subConn.Connect()
    -	}
    +// forceUpdateConcludedStateLocked stores the state reported to the channel and
    +// calls ClientConn.UpdateState().
    +// A separate function is defined to force update the ClientConn state since the
    +// channel doesn't correctly assume that LB policies start in CONNECTING and
    +// relies on LB policy to send an initial CONNECTING update.
    +func (b *pickfirstBalancer) forceUpdateConcludedStateLocked(newState balancer.State) {
    +	b.state = newState.ConnectivityState
    +	b.cc.UpdateState(newState)
     }
     
     type picker struct {
    @@ -282,10 +822,87 @@ func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
     // idlePicker is used when the SubConn is IDLE and kicks the SubConn into
     // CONNECTING when Pick is called.
     type idlePicker struct {
    -	subConn balancer.SubConn
    +	exitIdle func()
     }
     
     func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
    -	i.subConn.Connect()
    +	i.exitIdle()
     	return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
     }
    +
    +// addressList manages sequentially iterating over addresses present in a list
    +// of endpoints. It provides a 1 dimensional view of the addresses present in
    +// the endpoints.
    +// This type is not safe for concurrent access.
    +type addressList struct {
    +	addresses []resolver.Address
    +	idx       int
    +}
    +
    +func (al *addressList) isValid() bool {
    +	return al.idx < len(al.addresses)
    +}
    +
    +func (al *addressList) size() int {
    +	return len(al.addresses)
    +}
    +
    +// increment moves to the next index in the address list.
    +// This method returns false if it went off the list, true otherwise.
    +func (al *addressList) increment() bool {
    +	if !al.isValid() {
    +		return false
    +	}
    +	al.idx++
    +	return al.idx < len(al.addresses)
    +}
    +
    +// currentAddress returns the current address pointed to in the addressList.
    +// If the list is in an invalid state, it returns an empty address instead.
    +func (al *addressList) currentAddress() resolver.Address {
    +	if !al.isValid() {
    +		return resolver.Address{}
    +	}
    +	return al.addresses[al.idx]
    +}
    +
    +func (al *addressList) reset() {
    +	al.idx = 0
    +}
    +
    +func (al *addressList) updateAddrs(addrs []resolver.Address) {
    +	al.addresses = addrs
    +	al.reset()
    +}
    +
    +// seekTo returns false if the needle was not found and the current index was
    +// left unchanged.
    +func (al *addressList) seekTo(needle resolver.Address) bool {
    +	for ai, addr := range al.addresses {
    +		if !equalAddressIgnoringBalAttributes(&addr, &needle) {
    +			continue
    +		}
    +		al.idx = ai
    +		return true
    +	}
    +	return false
    +}
    +
    +// hasNext returns whether incrementing the addressList will result in moving
    +// past the end of the list. If the list has already moved past the end, it
    +// returns false.
    +func (al *addressList) hasNext() bool {
    +	if !al.isValid() {
    +		return false
    +	}
    +	return al.idx+1 < len(al.addresses)
    +}
    +
    +// equalAddressIgnoringBalAttributes returns true is a and b are considered
    +// equal. This is different from the Equal method on the resolver.Address type
    +// which considers all fields to determine equality. Here, we only consider
    +// fields that are meaningful to the SubConn.
    +func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool {
    +	return a.Addr == b.Addr && a.ServerName == b.ServerName &&
    +		a.Attributes.Equal(b.Attributes)
    +}
    diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go
    deleted file mode 100644
    index 113181e6b..000000000
    --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go
    +++ /dev/null
    @@ -1,932 +0,0 @@
    -/*
    - *
    - * Copyright 2024 gRPC authors.
    - *
    - * Licensed under the Apache License, Version 2.0 (the "License");
    - * you may not use this file except in compliance with the License.
    - * You may obtain a copy of the License at
    - *
    - *     http://www.apache.org/licenses/LICENSE-2.0
    - *
    - * Unless required by applicable law or agreed to in writing, software
    - * distributed under the License is distributed on an "AS IS" BASIS,
    - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    - * See the License for the specific language governing permissions and
    - * limitations under the License.
    - *
    - */
    -
    -// Package pickfirstleaf contains the pick_first load balancing policy which
    -// will be the universal leaf policy after dualstack changes are implemented.
    -//
    -// # Experimental
    -//
    -// Notice: This package is EXPERIMENTAL and may be changed or removed in a
    -// later release.
    -package pickfirstleaf
    -
    -import (
    -	"encoding/json"
    -	"errors"
    -	"fmt"
    -	"net"
    -	"net/netip"
    -	"sync"
    -	"time"
    -
    -	"google.golang.org/grpc/balancer"
    -	"google.golang.org/grpc/balancer/pickfirst/internal"
    -	"google.golang.org/grpc/connectivity"
    -	expstats "google.golang.org/grpc/experimental/stats"
    -	"google.golang.org/grpc/grpclog"
    -	"google.golang.org/grpc/internal/envconfig"
    -	internalgrpclog "google.golang.org/grpc/internal/grpclog"
    -	"google.golang.org/grpc/internal/pretty"
    -	"google.golang.org/grpc/resolver"
    -	"google.golang.org/grpc/serviceconfig"
    -)
    -
    -func init() {
    -	if envconfig.NewPickFirstEnabled {
    -		// Register as the default pick_first balancer.
    -		Name = "pick_first"
    -	}
    -	balancer.Register(pickfirstBuilder{})
    -}
    -
    -type (
    -	// enableHealthListenerKeyType is a unique key type used in resolver
    -	// attributes to indicate whether the health listener usage is enabled.
    -	enableHealthListenerKeyType struct{}
    -	// managedByPickfirstKeyType is an attribute key type to inform Outlier
    -	// Detection that the generic health listener is being used.
    -	// TODO: https://github.com/grpc/grpc-go/issues/7915 - Remove this when
    -	// implementing the dualstack design. This is a hack. Once Dualstack is
    -	// completed, outlier detection will stop sending ejection updates through
    -	// the connectivity listener.
    -	managedByPickfirstKeyType struct{}
    -)
    -
    -var (
    -	logger = grpclog.Component("pick-first-leaf-lb")
    -	// Name is the name of the pick_first_leaf balancer.
    -	// It is changed to "pick_first" in init() if this balancer is to be
    -	// registered as the default pickfirst.
    -	Name                 = "pick_first_leaf"
    -	disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
    -		Name:        "grpc.lb.pick_first.disconnections",
    -		Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.",
    -		Unit:        "disconnection",
    -		Labels:      []string{"grpc.target"},
    -		Default:     false,
    -	})
    -	connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
    -		Name:        "grpc.lb.pick_first.connection_attempts_succeeded",
    -		Description: "EXPERIMENTAL. Number of successful connection attempts.",
    -		Unit:        "attempt",
    -		Labels:      []string{"grpc.target"},
    -		Default:     false,
    -	})
    -	connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
    -		Name:        "grpc.lb.pick_first.connection_attempts_failed",
    -		Description: "EXPERIMENTAL. Number of failed connection attempts.",
    -		Unit:        "attempt",
    -		Labels:      []string{"grpc.target"},
    -		Default:     false,
    -	})
    -)
    -
    -const (
    -	// TODO: change to pick-first when this becomes the default pick_first policy.
    -	logPrefix = "[pick-first-leaf-lb %p] "
    -	// connectionDelayInterval is the time to wait for during the happy eyeballs
    -	// pass before starting the next connection attempt.
    -	connectionDelayInterval = 250 * time.Millisecond
    -)
    -
    -type ipAddrFamily int
    -
    -const (
    -	// ipAddrFamilyUnknown represents strings that can't be parsed as an IP
    -	// address.
    -	ipAddrFamilyUnknown ipAddrFamily = iota
    -	ipAddrFamilyV4
    -	ipAddrFamilyV6
    -)
    -
    -type pickfirstBuilder struct{}
    -
    -func (pickfirstBuilder) Build(cc balancer.ClientConn, bo balancer.BuildOptions) balancer.Balancer {
    -	b := &pickfirstBalancer{
    -		cc:              cc,
    -		target:          bo.Target.String(),
    -		metricsRecorder: cc.MetricsRecorder(),
    -
    -		subConns:              resolver.NewAddressMap(),
    -		state:                 connectivity.Connecting,
    -		cancelConnectionTimer: func() {},
    -	}
    -	b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b))
    -	return b
    -}
    -
    -func (b pickfirstBuilder) Name() string {
    -	return Name
    -}
    -
    -func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
    -	var cfg pfConfig
    -	if err := json.Unmarshal(js, &cfg); err != nil {
    -		return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err)
    -	}
    -	return cfg, nil
    -}
    -
    -// EnableHealthListener updates the state to configure pickfirst for using a
    -// generic health listener.
    -func EnableHealthListener(state resolver.State) resolver.State {
    -	state.Attributes = state.Attributes.WithValue(enableHealthListenerKeyType{}, true)
    -	return state
    -}
    -
    -// IsManagedByPickfirst returns whether an address belongs to a SubConn
    -// managed by the pickfirst LB policy.
    -// TODO: https://github.com/grpc/grpc-go/issues/7915 - This is a hack to disable
    -// outlier_detection via the with connectivity listener when using pick_first.
    -// Once Dualstack changes are complete, all SubConns will be created by
    -// pick_first and outlier detection will only use the health listener for
    -// ejection. This hack can then be removed.
    -func IsManagedByPickfirst(addr resolver.Address) bool {
    -	return addr.BalancerAttributes.Value(managedByPickfirstKeyType{}) != nil
    -}
    -
    -type pfConfig struct {
    -	serviceconfig.LoadBalancingConfig `json:"-"`
    -
    -	// If set to true, instructs the LB policy to shuffle the order of the list
    -	// of endpoints received from the name resolver before attempting to
    -	// connect to them.
    -	ShuffleAddressList bool `json:"shuffleAddressList"`
    -}
    -
    -// scData keeps track of the current state of the subConn.
    -// It is not safe for concurrent access.
    -type scData struct {
    -	// The following fields are initialized at build time and read-only after
    -	// that.
    -	subConn balancer.SubConn
    -	addr    resolver.Address
    -
    -	rawConnectivityState connectivity.State
    -	// The effective connectivity state based on raw connectivity, health state
    -	// and after following sticky TransientFailure behaviour defined in A62.
    -	effectiveState              connectivity.State
    -	lastErr                     error
    -	connectionFailedInFirstPass bool
    -}
    -
    -func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) {
    -	addr.BalancerAttributes = addr.BalancerAttributes.WithValue(managedByPickfirstKeyType{}, true)
    -	sd := &scData{
    -		rawConnectivityState: connectivity.Idle,
    -		effectiveState:       connectivity.Idle,
    -		addr:                 addr,
    -	}
    -	sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{
    -		StateListener: func(state balancer.SubConnState) {
    -			b.updateSubConnState(sd, state)
    -		},
    -	})
    -	if err != nil {
    -		return nil, err
    -	}
    -	sd.subConn = sc
    -	return sd, nil
    -}
    -
    -type pickfirstBalancer struct {
    -	// The following fields are initialized at build time and read-only after
    -	// that and therefore do not need to be guarded by a mutex.
    -	logger          *internalgrpclog.PrefixLogger
    -	cc              balancer.ClientConn
    -	target          string
    -	metricsRecorder expstats.MetricsRecorder // guaranteed to be non nil
    -
    -	// The mutex is used to ensure synchronization of updates triggered
    -	// from the idle picker and the already serialized resolver,
    -	// SubConn state updates.
    -	mu sync.Mutex
    -	// State reported to the channel based on SubConn states and resolver
    -	// updates.
    -	state connectivity.State
    -	// scData for active subonns mapped by address.
    -	subConns              *resolver.AddressMap
    -	addressList           addressList
    -	firstPass             bool
    -	numTF                 int
    -	cancelConnectionTimer func()
    -	healthCheckingEnabled bool
    -}
    -
    -// ResolverError is called by the ClientConn when the name resolver produces
    -// an error or when pickfirst determined the resolver update to be invalid.
    -func (b *pickfirstBalancer) ResolverError(err error) {
    -	b.mu.Lock()
    -	defer b.mu.Unlock()
    -	b.resolverErrorLocked(err)
    -}
    -
    -func (b *pickfirstBalancer) resolverErrorLocked(err error) {
    -	if b.logger.V(2) {
    -		b.logger.Infof("Received error from the name resolver: %v", err)
    -	}
    -
    -	// The picker will not change since the balancer does not currently
    -	// report an error. If the balancer hasn't received a single good resolver
    -	// update yet, transition to TRANSIENT_FAILURE.
    -	if b.state != connectivity.TransientFailure && b.addressList.size() > 0 {
    -		if b.logger.V(2) {
    -			b.logger.Infof("Ignoring resolver error because balancer is using a previous good update.")
    -		}
    -		return
    -	}
    -
    -	b.updateBalancerState(balancer.State{
    -		ConnectivityState: connectivity.TransientFailure,
    -		Picker:            &picker{err: fmt.Errorf("name resolver error: %v", err)},
    -	})
    -}
    -
    -func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
    -	b.mu.Lock()
    -	defer b.mu.Unlock()
    -	b.cancelConnectionTimer()
    -	if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 {
    -		// Cleanup state pertaining to the previous resolver state.
    -		// Treat an empty address list like an error by calling b.ResolverError.
    -		b.closeSubConnsLocked()
    -		b.addressList.updateAddrs(nil)
    -		b.resolverErrorLocked(errors.New("produced zero addresses"))
    -		return balancer.ErrBadResolverState
    -	}
    -	b.healthCheckingEnabled = state.ResolverState.Attributes.Value(enableHealthListenerKeyType{}) != nil
    -	cfg, ok := state.BalancerConfig.(pfConfig)
    -	if state.BalancerConfig != nil && !ok {
    -		return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState)
    -	}
    -
    -	if b.logger.V(2) {
    -		b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState))
    -	}
    -
    -	var newAddrs []resolver.Address
    -	if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 {
    -		// Perform the optional shuffling described in gRFC A62. The shuffling
    -		// will change the order of endpoints but not touch the order of the
    -		// addresses within each endpoint. - A61
    -		if cfg.ShuffleAddressList {
    -			endpoints = append([]resolver.Endpoint{}, endpoints...)
    -			internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
    -		}
    -
    -		// "Flatten the list by concatenating the ordered list of addresses for
    -		// each of the endpoints, in order." - A61
    -		for _, endpoint := range endpoints {
    -			newAddrs = append(newAddrs, endpoint.Addresses...)
    -		}
    -	} else {
    -		// Endpoints not set, process addresses until we migrate resolver
    -		// emissions fully to Endpoints. The top channel does wrap emitted
    -		// addresses with endpoints, however some balancers such as weighted
    -		// target do not forward the corresponding correct endpoints down/split
    -		// endpoints properly. Once all balancers correctly forward endpoints
    -		// down, can delete this else conditional.
    -		newAddrs = state.ResolverState.Addresses
    -		if cfg.ShuffleAddressList {
    -			newAddrs = append([]resolver.Address{}, newAddrs...)
    -			internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
    -		}
    -	}
    -
    -	// If an address appears in multiple endpoints or in the same endpoint
    -	// multiple times, we keep it only once. We will create only one SubConn
    -	// for the address because an AddressMap is used to store SubConns.
    -	// Not de-duplicating would result in attempting to connect to the same
    -	// SubConn multiple times in the same pass. We don't want this.
    -	newAddrs = deDupAddresses(newAddrs)
    -	newAddrs = interleaveAddresses(newAddrs)
    -
    -	prevAddr := b.addressList.currentAddress()
    -	prevSCData, found := b.subConns.Get(prevAddr)
    -	prevAddrsCount := b.addressList.size()
    -	isPrevRawConnectivityStateReady := found && prevSCData.(*scData).rawConnectivityState == connectivity.Ready
    -	b.addressList.updateAddrs(newAddrs)
    -
    -	// If the previous ready SubConn exists in new address list,
    -	// keep this connection and don't create new SubConns.
    -	if isPrevRawConnectivityStateReady && b.addressList.seekTo(prevAddr) {
    -		return nil
    -	}
    -
    -	b.reconcileSubConnsLocked(newAddrs)
    -	// If it's the first resolver update or the balancer was already READY
    -	// (but the new address list does not contain the ready SubConn) or
    -	// CONNECTING, enter CONNECTING.
    -	// We may be in TRANSIENT_FAILURE due to a previous empty address list,
    -	// we should still enter CONNECTING because the sticky TF behaviour
    -	//  mentioned in A62 applies only when the TRANSIENT_FAILURE is reported
    -	// due to connectivity failures.
    -	if isPrevRawConnectivityStateReady || b.state == connectivity.Connecting || prevAddrsCount == 0 {
    -		// Start connection attempt at first address.
    -		b.forceUpdateConcludedStateLocked(balancer.State{
    -			ConnectivityState: connectivity.Connecting,
    -			Picker:            &picker{err: balancer.ErrNoSubConnAvailable},
    -		})
    -		b.startFirstPassLocked()
    -	} else if b.state == connectivity.TransientFailure {
    -		// If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until
    -		// we're READY. See A62.
    -		b.startFirstPassLocked()
    -	}
    -	return nil
    -}
    -
    -// UpdateSubConnState is unused as a StateListener is always registered when
    -// creating SubConns.
    -func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) {
    -	b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state)
    -}
    -
    -func (b *pickfirstBalancer) Close() {
    -	b.mu.Lock()
    -	defer b.mu.Unlock()
    -	b.closeSubConnsLocked()
    -	b.cancelConnectionTimer()
    -	b.state = connectivity.Shutdown
    -}
    -
    -// ExitIdle moves the balancer out of idle state. It can be called concurrently
    -// by the idlePicker and clientConn so access to variables should be
    -// synchronized.
    -func (b *pickfirstBalancer) ExitIdle() {
    -	b.mu.Lock()
    -	defer b.mu.Unlock()
    -	if b.state == connectivity.Idle {
    -		b.startFirstPassLocked()
    -	}
    -}
    -
    -func (b *pickfirstBalancer) startFirstPassLocked() {
    -	b.firstPass = true
    -	b.numTF = 0
    -	// Reset the connection attempt record for existing SubConns.
    -	for _, sd := range b.subConns.Values() {
    -		sd.(*scData).connectionFailedInFirstPass = false
    -	}
    -	b.requestConnectionLocked()
    -}
    -
    -func (b *pickfirstBalancer) closeSubConnsLocked() {
    -	for _, sd := range b.subConns.Values() {
    -		sd.(*scData).subConn.Shutdown()
    -	}
    -	b.subConns = resolver.NewAddressMap()
    -}
    -
    -// deDupAddresses ensures that each address appears only once in the slice.
    -func deDupAddresses(addrs []resolver.Address) []resolver.Address {
    -	seenAddrs := resolver.NewAddressMap()
    -	retAddrs := []resolver.Address{}
    -
    -	for _, addr := range addrs {
    -		if _, ok := seenAddrs.Get(addr); ok {
    -			continue
    -		}
    -		retAddrs = append(retAddrs, addr)
    -	}
    -	return retAddrs
    -}
    -
    -// interleaveAddresses interleaves addresses of both families (IPv4 and IPv6)
    -// as per RFC-8305 section 4.
    -// Whichever address family is first in the list is followed by an address of
    -// the other address family; that is, if the first address in the list is IPv6,
    -// then the first IPv4 address should be moved up in the list to be second in
    -// the list. It doesn't support configuring "First Address Family Count", i.e.
    -// there will always be a single member of the first address family at the
    -// beginning of the interleaved list.
    -// Addresses that are neither IPv4 nor IPv6 are treated as part of a third
    -// "unknown" family for interleaving.
    -// See: https://datatracker.ietf.org/doc/html/rfc8305#autoid-6
    -func interleaveAddresses(addrs []resolver.Address) []resolver.Address {
    -	familyAddrsMap := map[ipAddrFamily][]resolver.Address{}
    -	interleavingOrder := []ipAddrFamily{}
    -	for _, addr := range addrs {
    -		family := addressFamily(addr.Addr)
    -		if _, found := familyAddrsMap[family]; !found {
    -			interleavingOrder = append(interleavingOrder, family)
    -		}
    -		familyAddrsMap[family] = append(familyAddrsMap[family], addr)
    -	}
    -
    -	interleavedAddrs := make([]resolver.Address, 0, len(addrs))
    -
    -	for curFamilyIdx := 0; len(interleavedAddrs) < len(addrs); curFamilyIdx = (curFamilyIdx + 1) % len(interleavingOrder) {
    -		// Some IP types may have fewer addresses than others, so we look for
    -		// the next type that has a remaining member to add to the interleaved
    -		// list.
    -		family := interleavingOrder[curFamilyIdx]
    -		remainingMembers := familyAddrsMap[family]
    -		if len(remainingMembers) > 0 {
    -			interleavedAddrs = append(interleavedAddrs, remainingMembers[0])
    -			familyAddrsMap[family] = remainingMembers[1:]
    -		}
    -	}
    -
    -	return interleavedAddrs
    -}
    -
    -// addressFamily returns the ipAddrFamily after parsing the address string.
    -// If the address isn't of the format "ip-address:port", it returns
    -// ipAddrFamilyUnknown. The address may be valid even if it's not an IP when
    -// using a resolver like passthrough where the address may be a hostname in
    -// some format that the dialer can resolve.
    -func addressFamily(address string) ipAddrFamily {
    -	// Parse the IP after removing the port.
    -	host, _, err := net.SplitHostPort(address)
    -	if err != nil {
    -		return ipAddrFamilyUnknown
    -	}
    -	ip, err := netip.ParseAddr(host)
    -	if err != nil {
    -		return ipAddrFamilyUnknown
    -	}
    -	switch {
    -	case ip.Is4() || ip.Is4In6():
    -		return ipAddrFamilyV4
    -	case ip.Is6():
    -		return ipAddrFamilyV6
    -	default:
    -		return ipAddrFamilyUnknown
    -	}
    -}
    -
    -// reconcileSubConnsLocked updates the active subchannels based on a new address
    -// list from the resolver. It does this by:
    -//   - closing subchannels: any existing subchannels associated with addresses
    -//     that are no longer in the updated list are shut down.
    -//   - removing subchannels: entries for these closed subchannels are removed
    -//     from the subchannel map.
    -//
    -// This ensures that the subchannel map accurately reflects the current set of
    -// addresses received from the name resolver.
    -func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) {
    -	newAddrsMap := resolver.NewAddressMap()
    -	for _, addr := range newAddrs {
    -		newAddrsMap.Set(addr, true)
    -	}
    -
    -	for _, oldAddr := range b.subConns.Keys() {
    -		if _, ok := newAddrsMap.Get(oldAddr); ok {
    -			continue
    -		}
    -		val, _ := b.subConns.Get(oldAddr)
    -		val.(*scData).subConn.Shutdown()
    -		b.subConns.Delete(oldAddr)
    -	}
    -}
    -
    -// shutdownRemainingLocked shuts down remaining subConns. Called when a subConn
    -// becomes ready, which means that all other subConn must be shutdown.
    -func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) {
    -	b.cancelConnectionTimer()
    -	for _, v := range b.subConns.Values() {
    -		sd := v.(*scData)
    -		if sd.subConn != selected.subConn {
    -			sd.subConn.Shutdown()
    -		}
    -	}
    -	b.subConns = resolver.NewAddressMap()
    -	b.subConns.Set(selected.addr, selected)
    -}
    -
    -// requestConnectionLocked starts connecting on the subchannel corresponding to
    -// the current address. If no subchannel exists, one is created. If the current
    -// subchannel is in TransientFailure, a connection to the next address is
    -// attempted until a subchannel is found.
    -func (b *pickfirstBalancer) requestConnectionLocked() {
    -	if !b.addressList.isValid() {
    -		return
    -	}
    -	var lastErr error
    -	for valid := true; valid; valid = b.addressList.increment() {
    -		curAddr := b.addressList.currentAddress()
    -		sd, ok := b.subConns.Get(curAddr)
    -		if !ok {
    -			var err error
    -			// We want to assign the new scData to sd from the outer scope,
    -			// hence we can't use := below.
    -			sd, err = b.newSCData(curAddr)
    -			if err != nil {
    -				// This should never happen, unless the clientConn is being shut
    -				// down.
    -				if b.logger.V(2) {
    -					b.logger.Infof("Failed to create a subConn for address %v: %v", curAddr.String(), err)
    -				}
    -				// Do nothing, the LB policy will be closed soon.
    -				return
    -			}
    -			b.subConns.Set(curAddr, sd)
    -		}
    -
    -		scd := sd.(*scData)
    -		switch scd.rawConnectivityState {
    -		case connectivity.Idle:
    -			scd.subConn.Connect()
    -			b.scheduleNextConnectionLocked()
    -			return
    -		case connectivity.TransientFailure:
    -			// The SubConn is being re-used and failed during a previous pass
    -			// over the addressList. It has not completed backoff yet.
    -			// Mark it as having failed and try the next address.
    -			scd.connectionFailedInFirstPass = true
    -			lastErr = scd.lastErr
    -			continue
    -		case connectivity.Connecting:
    -			// Wait for the connection attempt to complete or the timer to fire
    -			// before attempting the next address.
    -			b.scheduleNextConnectionLocked()
    -			return
    -		default:
    -			b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", scd.rawConnectivityState)
    -			return
    -
    -		}
    -	}
    -
    -	// All the remaining addresses in the list are in TRANSIENT_FAILURE, end the
    -	// first pass if possible.
    -	b.endFirstPassIfPossibleLocked(lastErr)
    -}
    -
    -func (b *pickfirstBalancer) scheduleNextConnectionLocked() {
    -	b.cancelConnectionTimer()
    -	if !b.addressList.hasNext() {
    -		return
    -	}
    -	curAddr := b.addressList.currentAddress()
    -	cancelled := false // Access to this is protected by the balancer's mutex.
    -	closeFn := internal.TimeAfterFunc(connectionDelayInterval, func() {
    -		b.mu.Lock()
    -		defer b.mu.Unlock()
    -		// If the scheduled task is cancelled while acquiring the mutex, return.
    -		if cancelled {
    -			return
    -		}
    -		if b.logger.V(2) {
    -			b.logger.Infof("Happy Eyeballs timer expired while waiting for connection to %q.", curAddr.Addr)
    -		}
    -		if b.addressList.increment() {
    -			b.requestConnectionLocked()
    -		}
    -	})
    -	// Access to the cancellation callback held by the balancer is guarded by
    -	// the balancer's mutex, so it's safe to set the boolean from the callback.
    -	b.cancelConnectionTimer = sync.OnceFunc(func() {
    -		cancelled = true
    -		closeFn()
    -	})
    -}
    -
    -func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) {
    -	b.mu.Lock()
    -	defer b.mu.Unlock()
    -	oldState := sd.rawConnectivityState
    -	sd.rawConnectivityState = newState.ConnectivityState
    -	// Previously relevant SubConns can still callback with state updates.
    -	// To prevent pickers from returning these obsolete SubConns, this logic
    -	// is included to check if the current list of active SubConns includes this
    -	// SubConn.
    -	if !b.isActiveSCData(sd) {
    -		return
    -	}
    -	if newState.ConnectivityState == connectivity.Shutdown {
    -		sd.effectiveState = connectivity.Shutdown
    -		return
    -	}
    -
    -	// Record a connection attempt when exiting CONNECTING.
    -	if newState.ConnectivityState == connectivity.TransientFailure {
    -		sd.connectionFailedInFirstPass = true
    -		connectionAttemptsFailedMetric.Record(b.metricsRecorder, 1, b.target)
    -	}
    -
    -	if newState.ConnectivityState == connectivity.Ready {
    -		connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target)
    -		b.shutdownRemainingLocked(sd)
    -		if !b.addressList.seekTo(sd.addr) {
    -			// This should not fail as we should have only one SubConn after
    -			// entering READY. The SubConn should be present in the addressList.
    -			b.logger.Errorf("Address %q not found address list in  %v", sd.addr, b.addressList.addresses)
    -			return
    -		}
    -		if !b.healthCheckingEnabled {
    -			if b.logger.V(2) {
    -				b.logger.Infof("SubConn %p reported connectivity state READY and the health listener is disabled. Transitioning SubConn to READY.", sd.subConn)
    -			}
    -
    -			sd.effectiveState = connectivity.Ready
    -			b.updateBalancerState(balancer.State{
    -				ConnectivityState: connectivity.Ready,
    -				Picker:            &picker{result: balancer.PickResult{SubConn: sd.subConn}},
    -			})
    -			return
    -		}
    -		if b.logger.V(2) {
    -			b.logger.Infof("SubConn %p reported connectivity state READY. Registering health listener.", sd.subConn)
    -		}
    -		// Send a CONNECTING update to take the SubConn out of sticky-TF if
    -		// required.
    -		sd.effectiveState = connectivity.Connecting
    -		b.updateBalancerState(balancer.State{
    -			ConnectivityState: connectivity.Connecting,
    -			Picker:            &picker{err: balancer.ErrNoSubConnAvailable},
    -		})
    -		sd.subConn.RegisterHealthListener(func(scs balancer.SubConnState) {
    -			b.updateSubConnHealthState(sd, scs)
    -		})
    -		return
    -	}
    -
    -	// If the LB policy is READY, and it receives a subchannel state change,
    -	// it means that the READY subchannel has failed.
    -	// A SubConn can also transition from CONNECTING directly to IDLE when
    -	// a transport is successfully created, but the connection fails
    -	// before the SubConn can send the notification for READY. We treat
    -	// this as a successful connection and transition to IDLE.
    -	// TODO: https://github.com/grpc/grpc-go/issues/7862 - Remove the second
    -	// part of the if condition below once the issue is fixed.
    -	if oldState == connectivity.Ready || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) {
    -		// Once a transport fails, the balancer enters IDLE and starts from
    -		// the first address when the picker is used.
    -		b.shutdownRemainingLocked(sd)
    -		sd.effectiveState = newState.ConnectivityState
    -		// READY SubConn interspliced in between CONNECTING and IDLE, need to
    -		// account for that.
    -		if oldState == connectivity.Connecting {
    -			// A known issue (https://github.com/grpc/grpc-go/issues/7862)
    -			// causes a race that prevents the READY state change notification.
    -			// This works around it.
    -			connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target)
    -		}
    -		disconnectionsMetric.Record(b.metricsRecorder, 1, b.target)
    -		b.addressList.reset()
    -		b.updateBalancerState(balancer.State{
    -			ConnectivityState: connectivity.Idle,
    -			Picker:            &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)},
    -		})
    -		return
    -	}
    -
    -	if b.firstPass {
    -		switch newState.ConnectivityState {
    -		case connectivity.Connecting:
    -			// The effective state can be in either IDLE, CONNECTING or
    -			// TRANSIENT_FAILURE. If it's  TRANSIENT_FAILURE, stay in
    -			// TRANSIENT_FAILURE until it's READY. See A62.
    -			if sd.effectiveState != connectivity.TransientFailure {
    -				sd.effectiveState = connectivity.Connecting
    -				b.updateBalancerState(balancer.State{
    -					ConnectivityState: connectivity.Connecting,
    -					Picker:            &picker{err: balancer.ErrNoSubConnAvailable},
    -				})
    -			}
    -		case connectivity.TransientFailure:
    -			sd.lastErr = newState.ConnectionError
    -			sd.effectiveState = connectivity.TransientFailure
    -			// Since we're re-using common SubConns while handling resolver
    -			// updates, we could receive an out of turn TRANSIENT_FAILURE from
    -			// a pass over the previous address list. Happy Eyeballs will also
    -			// cause out of order updates to arrive.
    -
    -			if curAddr := b.addressList.currentAddress(); equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) {
    -				b.cancelConnectionTimer()
    -				if b.addressList.increment() {
    -					b.requestConnectionLocked()
    -					return
    -				}
    -			}
    -
    -			// End the first pass if we've seen a TRANSIENT_FAILURE from all
    -			// SubConns once.
    -			b.endFirstPassIfPossibleLocked(newState.ConnectionError)
    -		}
    -		return
    -	}
    -
    -	// We have finished the first pass, keep re-connecting failing SubConns.
    -	switch newState.ConnectivityState {
    -	case connectivity.TransientFailure:
    -		b.numTF = (b.numTF + 1) % b.subConns.Len()
    -		sd.lastErr = newState.ConnectionError
    -		if b.numTF%b.subConns.Len() == 0 {
    -			b.updateBalancerState(balancer.State{
    -				ConnectivityState: connectivity.TransientFailure,
    -				Picker:            &picker{err: newState.ConnectionError},
    -			})
    -		}
    -		// We don't need to request re-resolution since the SubConn already
    -		// does that before reporting TRANSIENT_FAILURE.
    -		// TODO: #7534 - Move re-resolution requests from SubConn into
    -		// pick_first.
    -	case connectivity.Idle:
    -		sd.subConn.Connect()
    -	}
    -}
    -
    -// endFirstPassIfPossibleLocked ends the first happy-eyeballs pass if all the
    -// addresses are tried and their SubConns have reported a failure.
    -func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) {
    -	// An optimization to avoid iterating over the entire SubConn map.
    -	if b.addressList.isValid() {
    -		return
    -	}
    -	// Connect() has been called on all the SubConns. The first pass can be
    -	// ended if all the SubConns have reported a failure.
    -	for _, v := range b.subConns.Values() {
    -		sd := v.(*scData)
    -		if !sd.connectionFailedInFirstPass {
    -			return
    -		}
    -	}
    -	b.firstPass = false
    -	b.updateBalancerState(balancer.State{
    -		ConnectivityState: connectivity.TransientFailure,
    -		Picker:            &picker{err: lastErr},
    -	})
    -	// Start re-connecting all the SubConns that are already in IDLE.
    -	for _, v := range b.subConns.Values() {
    -		sd := v.(*scData)
    -		if sd.rawConnectivityState == connectivity.Idle {
    -			sd.subConn.Connect()
    -		}
    -	}
    -}
    -
    -func (b *pickfirstBalancer) isActiveSCData(sd *scData) bool {
    -	activeSD, found := b.subConns.Get(sd.addr)
    -	return found && activeSD == sd
    -}
    -
    -func (b *pickfirstBalancer) updateSubConnHealthState(sd *scData, state balancer.SubConnState) {
    -	b.mu.Lock()
    -	defer b.mu.Unlock()
    -	// Previously relevant SubConns can still callback with state updates.
    -	// To prevent pickers from returning these obsolete SubConns, this logic
    -	// is included to check if the current list of active SubConns includes
    -	// this SubConn.
    -	if !b.isActiveSCData(sd) {
    -		return
    -	}
    -	sd.effectiveState = state.ConnectivityState
    -	switch state.ConnectivityState {
    -	case connectivity.Ready:
    -		b.updateBalancerState(balancer.State{
    -			ConnectivityState: connectivity.Ready,
    -			Picker:            &picker{result: balancer.PickResult{SubConn: sd.subConn}},
    -		})
    -	case connectivity.TransientFailure:
    -		b.updateBalancerState(balancer.State{
    -			ConnectivityState: connectivity.TransientFailure,
    -			Picker:            &picker{err: fmt.Errorf("pickfirst: health check failure: %v", state.ConnectionError)},
    -		})
    -	case connectivity.Connecting:
    -		b.updateBalancerState(balancer.State{
    -			ConnectivityState: connectivity.Connecting,
    -			Picker:            &picker{err: balancer.ErrNoSubConnAvailable},
    -		})
    -	default:
    -		b.logger.Errorf("Got unexpected health update for SubConn %p: %v", state)
    -	}
    -}
    -
    -// updateBalancerState stores the state reported to the channel and calls
    -// ClientConn.UpdateState(). As an optimization, it avoids sending duplicate
    -// updates to the channel.
    -func (b *pickfirstBalancer) updateBalancerState(newState balancer.State) {
    -	// In case of TransientFailures allow the picker to be updated to update
    -	// the connectivity error, in all other cases don't send duplicate state
    -	// updates.
    -	if newState.ConnectivityState == b.state && b.state != connectivity.TransientFailure {
    -		return
    -	}
    -	b.forceUpdateConcludedStateLocked(newState)
    -}
    -
    -// forceUpdateConcludedStateLocked stores the state reported to the channel and
    -// calls ClientConn.UpdateState().
    -// A separate function is defined to force update the ClientConn state since the
    -// channel doesn't correctly assume that LB policies start in CONNECTING and
    -// relies on LB policy to send an initial CONNECTING update.
    -func (b *pickfirstBalancer) forceUpdateConcludedStateLocked(newState balancer.State) {
    -	b.state = newState.ConnectivityState
    -	b.cc.UpdateState(newState)
    -}
    -
    -type picker struct {
    -	result balancer.PickResult
    -	err    error
    -}
    -
    -func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
    -	return p.result, p.err
    -}
    -
    -// idlePicker is used when the SubConn is IDLE and kicks the SubConn into
    -// CONNECTING when Pick is called.
    -type idlePicker struct {
    -	exitIdle func()
    -}
    -
    -func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
    -	i.exitIdle()
    -	return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
    -}
    -
    -// addressList manages sequentially iterating over addresses present in a list
    -// of endpoints. It provides a 1 dimensional view of the addresses present in
    -// the endpoints.
    -// This type is not safe for concurrent access.
    -type addressList struct {
    -	addresses []resolver.Address
    -	idx       int
    -}
    -
    -func (al *addressList) isValid() bool {
    -	return al.idx < len(al.addresses)
    -}
    -
    -func (al *addressList) size() int {
    -	return len(al.addresses)
    -}
    -
    -// increment moves to the next index in the address list.
    -// This method returns false if it went off the list, true otherwise.
    -func (al *addressList) increment() bool {
    -	if !al.isValid() {
    -		return false
    -	}
    -	al.idx++
    -	return al.idx < len(al.addresses)
    -}
    -
    -// currentAddress returns the current address pointed to in the addressList.
    -// If the list is in an invalid state, it returns an empty address instead.
    -func (al *addressList) currentAddress() resolver.Address {
    -	if !al.isValid() {
    -		return resolver.Address{}
    -	}
    -	return al.addresses[al.idx]
    -}
    -
    -func (al *addressList) reset() {
    -	al.idx = 0
    -}
    -
    -func (al *addressList) updateAddrs(addrs []resolver.Address) {
    -	al.addresses = addrs
    -	al.reset()
    -}
    -
    -// seekTo returns false if the needle was not found and the current index was
    -// left unchanged.
    -func (al *addressList) seekTo(needle resolver.Address) bool {
    -	for ai, addr := range al.addresses {
    -		if !equalAddressIgnoringBalAttributes(&addr, &needle) {
    -			continue
    -		}
    -		al.idx = ai
    -		return true
    -	}
    -	return false
    -}
    -
    -// hasNext returns whether incrementing the addressList will result in moving
    -// past the end of the list. If the list has already moved past the end, it
    -// returns false.
    -func (al *addressList) hasNext() bool {
    -	if !al.isValid() {
    -		return false
    -	}
    -	return al.idx+1 < len(al.addresses)
    -}
    -
    -// equalAddressIgnoringBalAttributes returns true is a and b are considered
    -// equal. This is different from the Equal method on the resolver.Address type
    -// which considers all fields to determine equality. Here, we only consider
    -// fields that are meaningful to the SubConn.
    -func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool {
    -	return a.Addr == b.Addr && a.ServerName == b.ServerName &&
    -		a.Attributes.Equal(b.Attributes) &&
    -		a.Metadata == b.Metadata
    -}
    diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
    index 35da5d1ec..22e6e3267 100644
    --- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
    +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
    @@ -26,7 +26,7 @@ import (
     
     	"google.golang.org/grpc/balancer"
     	"google.golang.org/grpc/balancer/endpointsharding"
    -	"google.golang.org/grpc/balancer/pickfirst/pickfirstleaf"
    +	"google.golang.org/grpc/balancer/pickfirst"
     	"google.golang.org/grpc/grpclog"
     	internalgrpclog "google.golang.org/grpc/internal/grpclog"
     )
    @@ -47,7 +47,7 @@ func (bb builder) Name() string {
     }
     
     func (bb builder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer {
    -	childBuilder := balancer.Get(pickfirstleaf.Name).Build
    +	childBuilder := balancer.Get(pickfirst.Name).Build
     	bal := &rrBalancer{
     		cc:       cc,
     		Balancer: endpointsharding.NewBalancer(cc, opts, childBuilder, endpointsharding.Options{}),
    @@ -67,13 +67,6 @@ func (b *rrBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error {
     	return b.Balancer.UpdateClientConnState(balancer.ClientConnState{
     		// Enable the health listener in pickfirst children for client side health
     		// checks and outlier detection, if configured.
    -		ResolverState: pickfirstleaf.EnableHealthListener(ccs.ResolverState),
    +		ResolverState: pickfirst.EnableHealthListener(ccs.ResolverState),
     	})
     }
    -
    -func (b *rrBalancer) ExitIdle() {
    -	// Should always be ok, as child is endpoint sharding.
    -	if ei, ok := b.Balancer.(balancer.ExitIdler); ok {
    -		ei.ExitIdle()
    -	}
    -}
    diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go
    index 948a21ef6..2c760e623 100644
    --- a/vendor/google.golang.org/grpc/balancer_wrapper.go
    +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go
    @@ -450,13 +450,14 @@ func (acbw *acBalancerWrapper) healthListenerRegFn() func(context.Context, func(
     	if acbw.ccb.cc.dopts.disableHealthCheck {
     		return noOpRegisterHealthListenerFn
     	}
    +	cfg := acbw.ac.cc.healthCheckConfig()
    +	if cfg == nil {
    +		return noOpRegisterHealthListenerFn
    +	}
     	regHealthLisFn := internal.RegisterClientHealthCheckListener
     	if regHealthLisFn == nil {
     		// The health package is not imported.
    -		return noOpRegisterHealthListenerFn
    -	}
    -	cfg := acbw.ac.cc.healthCheckConfig()
    -	if cfg == nil {
    +		channelz.Error(logger, acbw.ac.channelz, "Health check is requested but health package is not imported.")
     		return noOpRegisterHealthListenerFn
     	}
     	return func(ctx context.Context, listener func(balancer.SubConnState)) func() {
    diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
    index b2f8fc7f4..42c61cf9f 100644
    --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
    +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
    @@ -18,7 +18,7 @@
     
     // Code generated by protoc-gen-go. DO NOT EDIT.
     // versions:
    -// 	protoc-gen-go v1.36.4
    +// 	protoc-gen-go v1.36.10
     // 	protoc        v5.27.1
     // source: grpc/binlog/v1/binarylog.proto
     
    @@ -858,133 +858,68 @@ func (x *Address) GetIpPort() uint32 {
     
     var File_grpc_binlog_v1_binarylog_proto protoreflect.FileDescriptor
     
    -var file_grpc_binlog_v1_binarylog_proto_rawDesc = string([]byte{
    -	0x0a, 0x1e, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x31,
    -	0x2f, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
    -	0x12, 0x11, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67,
    -	0x2e, 0x76, 0x31, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74,
    -	0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72,
    -	0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74,
    -	0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70,
    -	0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbb, 0x07, 0x0a, 0x0c, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67,
    -	0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
    -	0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
    -	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
    -	0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12,
    -	0x17, 0x0a, 0x07, 0x63, 0x61, 0x6c, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04,
    -	0x52, 0x06, 0x63, 0x61, 0x6c, 0x6c, 0x49, 0x64, 0x12, 0x35, 0x0a, 0x17, 0x73, 0x65, 0x71, 0x75,
    -	0x65, 0x6e, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x5f, 0x63,
    -	0x61, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x73, 0x65, 0x71, 0x75, 0x65,
    -	0x6e, 0x63, 0x65, 0x49, 0x64, 0x57, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x43, 0x61, 0x6c, 0x6c, 0x12,
    -	0x3d, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e,
    -	0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76,
    -	0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x45,
    -	0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x3e,
    -	0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26,
    -	0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e,
    -	0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e,
    -	0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x52, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x46,
    -	0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18,
    -	0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e,
    -	0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74,
    -	0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74,
    -	0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72,
    -	0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e,
    -	0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76,
    -	0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00,
    -	0x52, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x36,
    -	0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32,
    -	0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67,
    -	0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x07, 0x6d,
    -	0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65,
    -	0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62,
    -	0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x69,
    -	0x6c, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x2b,
    -	0x0a, 0x11, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61,
    -	0x74, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x61, 0x79, 0x6c, 0x6f,
    -	0x61, 0x64, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x04, 0x70,
    -	0x65, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63,
    -	0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64,
    -	0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x22, 0xf5, 0x01, 0x0a, 0x09,
    -	0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x45, 0x56, 0x45,
    -	0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10,
    -	0x00, 0x12, 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
    -	0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x01, 0x12,
    -	0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45,
    -	0x52, 0x56, 0x45, 0x52, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x02, 0x12, 0x1d, 0x0a,
    -	0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45,
    -	0x4e, 0x54, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x1d, 0x0a, 0x19,
    -	0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45,
    -	0x52, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x04, 0x12, 0x20, 0x0a, 0x1c, 0x45,
    -	0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54,
    -	0x5f, 0x48, 0x41, 0x4c, 0x46, 0x5f, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x10, 0x05, 0x12, 0x1d, 0x0a,
    -	0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56,
    -	0x45, 0x52, 0x5f, 0x54, 0x52, 0x41, 0x49, 0x4c, 0x45, 0x52, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11,
    -	0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x41, 0x4e, 0x43, 0x45,
    -	0x4c, 0x10, 0x07, 0x22, 0x42, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x12, 0x0a,
    -	0x0e, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10,
    -	0x00, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x43, 0x4c, 0x49, 0x45,
    -	0x4e, 0x54, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x53,
    -	0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f,
    -	0x61, 0x64, 0x22, 0xbb, 0x01, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x65, 0x61,
    -	0x64, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18,
    -	0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e,
    -	0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
    -	0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x0b,
    -	0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
    -	0x09, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a,
    -	0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
    -	0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x33, 0x0a, 0x07, 0x74,
    -	0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67,
    -	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44,
    -	0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74,
    -	0x22, 0x47, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72,
    -	0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01,
    -	0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79,
    -	0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52,
    -	0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0xb1, 0x01, 0x0a, 0x07, 0x54, 0x72,
    -	0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
    -	0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62,
    -	0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61,
    -	0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f,
    -	0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20,
    -	0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12,
    -	0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
    -	0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x4d,
    -	0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
    -	0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d,
    -	0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x35, 0x0a,
    -	0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67,
    -	0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68,
    -	0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04,
    -	0x64, 0x61, 0x74, 0x61, 0x22, 0x42, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
    -	0x12, 0x36, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
    -	0x20, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67,
    -	0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72,
    -	0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x37, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61,
    -	0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
    -	0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76,
    -	0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
    -	0x65, 0x22, 0xb8, 0x01, 0x0a, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x33, 0x0a,
    -	0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x67, 0x72,
    -	0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e,
    -	0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79,
    -	0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20,
    -	0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x17, 0x0a, 0x07,
    -	0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x69,
    -	0x70, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x45, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a,
    -	0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12,
    -	0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x34, 0x10, 0x01, 0x12, 0x0d,
    -	0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x10, 0x02, 0x12, 0x0d, 0x0a,
    -	0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x49, 0x58, 0x10, 0x03, 0x42, 0x5c, 0x0a, 0x14,
    -	0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f,
    -	0x67, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x42, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x4c, 0x6f, 0x67, 0x50,
    -	0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67,
    -	0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62,
    -	0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x62, 0x69,
    -	0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
    -	0x6f, 0x33,
    -})
    +const file_grpc_binlog_v1_binarylog_proto_rawDesc = "" +
    +	"\n" +
    +	"\x1egrpc/binlog/v1/binarylog.proto\x12\x11grpc.binarylog.v1\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xbb\a\n" +
    +	"\fGrpcLogEntry\x128\n" +
    +	"\ttimestamp\x18\x01 \x01(\v2\x1a.google.protobuf.TimestampR\ttimestamp\x12\x17\n" +
    +	"\acall_id\x18\x02 \x01(\x04R\x06callId\x125\n" +
    +	"\x17sequence_id_within_call\x18\x03 \x01(\x04R\x14sequenceIdWithinCall\x12=\n" +
    +	"\x04type\x18\x04 \x01(\x0e2).grpc.binarylog.v1.GrpcLogEntry.EventTypeR\x04type\x12>\n" +
    +	"\x06logger\x18\x05 \x01(\x0e2&.grpc.binarylog.v1.GrpcLogEntry.LoggerR\x06logger\x12F\n" +
    +	"\rclient_header\x18\x06 \x01(\v2\x1f.grpc.binarylog.v1.ClientHeaderH\x00R\fclientHeader\x12F\n" +
    +	"\rserver_header\x18\a \x01(\v2\x1f.grpc.binarylog.v1.ServerHeaderH\x00R\fserverHeader\x126\n" +
    +	"\amessage\x18\b \x01(\v2\x1a.grpc.binarylog.v1.MessageH\x00R\amessage\x126\n" +
    +	"\atrailer\x18\t \x01(\v2\x1a.grpc.binarylog.v1.TrailerH\x00R\atrailer\x12+\n" +
    +	"\x11payload_truncated\x18\n" +
    +	" \x01(\bR\x10payloadTruncated\x12.\n" +
    +	"\x04peer\x18\v \x01(\v2\x1a.grpc.binarylog.v1.AddressR\x04peer\"\xf5\x01\n" +
    +	"\tEventType\x12\x16\n" +
    +	"\x12EVENT_TYPE_UNKNOWN\x10\x00\x12\x1c\n" +
    +	"\x18EVENT_TYPE_CLIENT_HEADER\x10\x01\x12\x1c\n" +
    +	"\x18EVENT_TYPE_SERVER_HEADER\x10\x02\x12\x1d\n" +
    +	"\x19EVENT_TYPE_CLIENT_MESSAGE\x10\x03\x12\x1d\n" +
    +	"\x19EVENT_TYPE_SERVER_MESSAGE\x10\x04\x12 \n" +
    +	"\x1cEVENT_TYPE_CLIENT_HALF_CLOSE\x10\x05\x12\x1d\n" +
    +	"\x19EVENT_TYPE_SERVER_TRAILER\x10\x06\x12\x15\n" +
    +	"\x11EVENT_TYPE_CANCEL\x10\a\"B\n" +
    +	"\x06Logger\x12\x12\n" +
    +	"\x0eLOGGER_UNKNOWN\x10\x00\x12\x11\n" +
    +	"\rLOGGER_CLIENT\x10\x01\x12\x11\n" +
    +	"\rLOGGER_SERVER\x10\x02B\t\n" +
    +	"\apayload\"\xbb\x01\n" +
    +	"\fClientHeader\x127\n" +
    +	"\bmetadata\x18\x01 \x01(\v2\x1b.grpc.binarylog.v1.MetadataR\bmetadata\x12\x1f\n" +
    +	"\vmethod_name\x18\x02 \x01(\tR\n" +
    +	"methodName\x12\x1c\n" +
    +	"\tauthority\x18\x03 \x01(\tR\tauthority\x123\n" +
    +	"\atimeout\x18\x04 \x01(\v2\x19.google.protobuf.DurationR\atimeout\"G\n" +
    +	"\fServerHeader\x127\n" +
    +	"\bmetadata\x18\x01 \x01(\v2\x1b.grpc.binarylog.v1.MetadataR\bmetadata\"\xb1\x01\n" +
    +	"\aTrailer\x127\n" +
    +	"\bmetadata\x18\x01 \x01(\v2\x1b.grpc.binarylog.v1.MetadataR\bmetadata\x12\x1f\n" +
    +	"\vstatus_code\x18\x02 \x01(\rR\n" +
    +	"statusCode\x12%\n" +
    +	"\x0estatus_message\x18\x03 \x01(\tR\rstatusMessage\x12%\n" +
    +	"\x0estatus_details\x18\x04 \x01(\fR\rstatusDetails\"5\n" +
    +	"\aMessage\x12\x16\n" +
    +	"\x06length\x18\x01 \x01(\rR\x06length\x12\x12\n" +
    +	"\x04data\x18\x02 \x01(\fR\x04data\"B\n" +
    +	"\bMetadata\x126\n" +
    +	"\x05entry\x18\x01 \x03(\v2 .grpc.binarylog.v1.MetadataEntryR\x05entry\"7\n" +
    +	"\rMetadataEntry\x12\x10\n" +
    +	"\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" +
    +	"\x05value\x18\x02 \x01(\fR\x05value\"\xb8\x01\n" +
    +	"\aAddress\x123\n" +
    +	"\x04type\x18\x01 \x01(\x0e2\x1f.grpc.binarylog.v1.Address.TypeR\x04type\x12\x18\n" +
    +	"\aaddress\x18\x02 \x01(\tR\aaddress\x12\x17\n" +
    +	"\aip_port\x18\x03 \x01(\rR\x06ipPort\"E\n" +
    +	"\x04Type\x12\x10\n" +
    +	"\fTYPE_UNKNOWN\x10\x00\x12\r\n" +
    +	"\tTYPE_IPV4\x10\x01\x12\r\n" +
    +	"\tTYPE_IPV6\x10\x02\x12\r\n" +
    +	"\tTYPE_UNIX\x10\x03B\\\n" +
    +	"\x14io.grpc.binarylog.v1B\x0eBinaryLogProtoP\x01Z2google.golang.org/grpc/binarylog/grpc_binarylog_v1b\x06proto3"
     
     var (
     	file_grpc_binlog_v1_binarylog_proto_rawDescOnce sync.Once
    diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
    index a319ef979..b767d3e33 100644
    --- a/vendor/google.golang.org/grpc/clientconn.go
    +++ b/vendor/google.golang.org/grpc/clientconn.go
    @@ -35,16 +35,19 @@ import (
     	"google.golang.org/grpc/balancer/pickfirst"
     	"google.golang.org/grpc/codes"
     	"google.golang.org/grpc/connectivity"
    +	"google.golang.org/grpc/credentials"
    +	expstats "google.golang.org/grpc/experimental/stats"
     	"google.golang.org/grpc/internal"
     	"google.golang.org/grpc/internal/channelz"
     	"google.golang.org/grpc/internal/grpcsync"
     	"google.golang.org/grpc/internal/idle"
     	iresolver "google.golang.org/grpc/internal/resolver"
    -	"google.golang.org/grpc/internal/stats"
    +	istats "google.golang.org/grpc/internal/stats"
     	"google.golang.org/grpc/internal/transport"
     	"google.golang.org/grpc/keepalive"
     	"google.golang.org/grpc/resolver"
     	"google.golang.org/grpc/serviceconfig"
    +	"google.golang.org/grpc/stats"
     	"google.golang.org/grpc/status"
     
     	_ "google.golang.org/grpc/balancer/roundrobin"           // To register roundrobin.
    @@ -97,6 +100,41 @@ var (
     	errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)")
     )
     
    +var (
    +	disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
    +		Name:           "grpc.subchannel.disconnections",
    +		Description:    "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.",
    +		Unit:           "{disconnection}",
    +		Labels:         []string{"grpc.target"},
    +		OptionalLabels: []string{"grpc.lb.backend_service", "grpc.lb.locality", "grpc.disconnect_error"},
    +		Default:        false,
    +	})
    +	connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
    +		Name:           "grpc.subchannel.connection_attempts_succeeded",
    +		Description:    "EXPERIMENTAL. Number of successful connection attempts.",
    +		Unit:           "{attempt}",
    +		Labels:         []string{"grpc.target"},
    +		OptionalLabels: []string{"grpc.lb.backend_service", "grpc.lb.locality"},
    +		Default:        false,
    +	})
    +	connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
    +		Name:           "grpc.subchannel.connection_attempts_failed",
    +		Description:    "EXPERIMENTAL. Number of failed connection attempts.",
    +		Unit:           "{attempt}",
    +		Labels:         []string{"grpc.target"},
    +		OptionalLabels: []string{"grpc.lb.backend_service", "grpc.lb.locality"},
    +		Default:        false,
    +	})
    +	openConnectionsMetric = expstats.RegisterInt64UpDownCount(expstats.MetricDescriptor{
    +		Name:           "grpc.subchannel.open_connections",
    +		Description:    "EXPERIMENTAL. Number of open connections.",
    +		Unit:           "{attempt}",
    +		Labels:         []string{"grpc.target"},
    +		OptionalLabels: []string{"grpc.lb.backend_service", "grpc.security_level", "grpc.lb.locality"},
    +		Default:        false,
    +	})
    +)
    +
     const (
     	defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4
     	defaultClientMaxSendMessageSize    = math.MaxInt32
    @@ -208,9 +246,10 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error)
     	channelz.Infof(logger, cc.channelz, "Channel authority set to %q", cc.authority)
     
     	cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz)
    -	cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers)
    +	cc.pickerWrapper = newPickerWrapper()
     
    -	cc.metricsRecorderList = stats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers)
    +	cc.metricsRecorderList = istats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers)
    +	cc.statsHandler = istats.NewCombinedHandler(cc.dopts.copts.StatsHandlers...)
     
     	cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc.
     	cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout)
    @@ -260,9 +299,10 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
     	}()
     
     	// This creates the name resolver, load balancer, etc.
    -	if err := cc.idlenessMgr.ExitIdleMode(); err != nil {
    -		return nil, err
    +	if err := cc.exitIdleMode(); err != nil {
    +		return nil, fmt.Errorf("failed to exit idle mode: %w", err)
     	}
    +	cc.idlenessMgr.UnsafeSetNotIdle()
     
     	// Return now for non-blocking dials.
     	if !cc.dopts.block {
    @@ -330,7 +370,7 @@ func (cc *ClientConn) addTraceEvent(msg string) {
     			Severity: channelz.CtInfo,
     		}
     	}
    -	channelz.AddTraceEvent(logger, cc.channelz, 0, ted)
    +	channelz.AddTraceEvent(logger, cc.channelz, 1, ted)
     }
     
     type idler ClientConn
    @@ -339,14 +379,17 @@ func (i *idler) EnterIdleMode() {
     	(*ClientConn)(i).enterIdleMode()
     }
     
    -func (i *idler) ExitIdleMode() error {
    -	return (*ClientConn)(i).exitIdleMode()
    +func (i *idler) ExitIdleMode() {
    +	// Ignore the error returned from this method, because from the perspective
    +	// of the caller (idleness manager), the channel would have always moved out
    +	// of IDLE by the time this method returns.
    +	(*ClientConn)(i).exitIdleMode()
     }
     
     // exitIdleMode moves the channel out of idle mode by recreating the name
     // resolver and load balancer.  This should never be called directly; use
     // cc.idlenessMgr.ExitIdleMode instead.
    -func (cc *ClientConn) exitIdleMode() (err error) {
    +func (cc *ClientConn) exitIdleMode() error {
     	cc.mu.Lock()
     	if cc.conns == nil {
     		cc.mu.Unlock()
    @@ -354,11 +397,23 @@ func (cc *ClientConn) exitIdleMode() (err error) {
     	}
     	cc.mu.Unlock()
     
    +	// Set state to CONNECTING before building the name resolver
    +	// so the channel does not remain in IDLE.
    +	cc.csMgr.updateState(connectivity.Connecting)
    +
     	// This needs to be called without cc.mu because this builds a new resolver
     	// which might update state or report error inline, which would then need to
     	// acquire cc.mu.
     	if err := cc.resolverWrapper.start(); err != nil {
    -		return err
    +		// If resolver creation fails, treat it like an error reported by the
    +		// resolver before any valid updates. Set channel's state to
    +		// TransientFailure, and set an erroring picker with the resolver build
    +		// error, which will returned as part of any subsequent RPCs.
    +		logger.Warningf("Failed to start resolver: %v", err)
    +		cc.csMgr.updateState(connectivity.TransientFailure)
    +		cc.mu.Lock()
    +		cc.updateResolverStateAndUnlock(resolver.State{}, err)
    +		return fmt.Errorf("failed to start resolver: %w", err)
     	}
     
     	cc.addTraceEvent("exiting idle mode")
    @@ -456,7 +511,7 @@ func (cc *ClientConn) validateTransportCredentials() error {
     func (cc *ClientConn) channelzRegistration(target string) {
     	parentChannel, _ := cc.dopts.channelzParent.(*channelz.Channel)
     	cc.channelz = channelz.RegisterChannel(parentChannel, target)
    -	cc.addTraceEvent("created")
    +	cc.addTraceEvent(fmt.Sprintf("created for target %q", target))
     }
     
     // chainUnaryClientInterceptors chains all unary client interceptors into one.
    @@ -621,7 +676,8 @@ type ClientConn struct {
     	channelz            *channelz.Channel // Channelz object.
     	resolverBuilder     resolver.Builder  // See initParsedTargetAndResolverBuilder().
     	idlenessMgr         *idle.Manager
    -	metricsRecorderList *stats.MetricsRecorderList
    +	metricsRecorderList *istats.MetricsRecorderList
    +	statsHandler        stats.Handler
     
     	// The following provide their own synchronization, and therefore don't
     	// require cc.mu to be held to access them.
    @@ -678,10 +734,8 @@ func (cc *ClientConn) GetState() connectivity.State {
     // Notice: This API is EXPERIMENTAL and may be changed or removed in a later
     // release.
     func (cc *ClientConn) Connect() {
    -	if err := cc.idlenessMgr.ExitIdleMode(); err != nil {
    -		cc.addTraceEvent(err.Error())
    -		return
    -	}
    +	cc.idlenessMgr.ExitIdleMode()
    +
     	// If the ClientConn was not in idle mode, we need to call ExitIdle on the
     	// LB policy so that connections can be created.
     	cc.mu.Lock()
    @@ -689,22 +743,31 @@ func (cc *ClientConn) Connect() {
     	cc.mu.Unlock()
     }
     
    -// waitForResolvedAddrs blocks until the resolver has provided addresses or the
    -// context expires.  Returns nil unless the context expires first; otherwise
    -// returns a status error based on the context.
    -func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error {
    +// waitForResolvedAddrs blocks until the resolver provides addresses or the
    +// context expires, whichever happens first.
    +//
    +// Error is nil unless the context expires first; otherwise returns a status
    +// error based on the context.
    +//
    +// The returned boolean indicates whether it did block or not. If the
    +// resolution has already happened once before, it returns false without
    +// blocking. Otherwise, it wait for the resolution and return true if
    +// resolution has succeeded or return false along with error if resolution has
    +// failed.
    +func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) (bool, error) {
     	// This is on the RPC path, so we use a fast path to avoid the
     	// more-expensive "select" below after the resolver has returned once.
     	if cc.firstResolveEvent.HasFired() {
    -		return nil
    +		return false, nil
     	}
    +	internal.NewStreamWaitingForResolver()
     	select {
     	case <-cc.firstResolveEvent.Done():
    -		return nil
    +		return true, nil
     	case <-ctx.Done():
    -		return status.FromContextError(ctx.Err()).Err()
    +		return false, status.FromContextError(ctx.Err()).Err()
     	case <-cc.ctx.Done():
    -		return ErrClientConnClosing
    +		return false, ErrClientConnClosing
     	}
     }
     
    @@ -723,8 +786,8 @@ func init() {
     	internal.EnterIdleModeForTesting = func(cc *ClientConn) {
     		cc.idlenessMgr.EnterIdleModeForTesting()
     	}
    -	internal.ExitIdleModeForTesting = func(cc *ClientConn) error {
    -		return cc.idlenessMgr.ExitIdleMode()
    +	internal.ExitIdleModeForTesting = func(cc *ClientConn) {
    +		cc.idlenessMgr.ExitIdleMode()
     	}
     }
     
    @@ -849,6 +912,7 @@ func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer.
     		channelz:     channelz.RegisterSubChannel(cc.channelz, ""),
     		resetBackoff: make(chan struct{}),
     	}
    +	ac.updateTelemetryLabelsLocked()
     	ac.ctx, ac.cancel = context.WithCancel(cc.ctx)
     	// Start with our address set to the first address; this may be updated if
     	// we connect to different addresses.
    @@ -965,7 +1029,7 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
     	}
     
     	ac.addrs = addrs
    -
    +	ac.updateTelemetryLabelsLocked()
     	if ac.state == connectivity.Shutdown ||
     		ac.state == connectivity.TransientFailure ||
     		ac.state == connectivity.Idle {
    @@ -1067,13 +1131,6 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig {
     	return cc.sc.healthCheckConfig
     }
     
    -func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) {
    -	return cc.pickerWrapper.pick(ctx, failfast, balancer.PickInfo{
    -		Ctx:            ctx,
    -		FullMethodName: method,
    -	})
    -}
    -
     func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector) {
     	if sc == nil {
     		// should never reach here.
    @@ -1211,6 +1268,9 @@ type addrConn struct {
     	resetBackoff chan struct{}
     
     	channelz *channelz.SubChannel
    +
    +	localityLabel       string
    +	backendServiceLabel string
     }
     
     // Note: this requires a lock on ac.mu.
    @@ -1218,6 +1278,18 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error)
     	if ac.state == s {
     		return
     	}
    +
    +	// If we are transitioning out of Ready, it means there is a disconnection.
    +	// A SubConn can also transition from CONNECTING directly to IDLE when
    +	// a transport is successfully created, but the connection fails
    +	// before the SubConn can send the notification for READY. We treat
    +	// this as a successful connection and transition to IDLE.
    +	// TODO: https://github.com/grpc/grpc-go/issues/7862 - Remove the second
    +	// part of the if condition below once the issue is fixed.
    +	if ac.state == connectivity.Ready || (ac.state == connectivity.Connecting && s == connectivity.Idle) {
    +		disconnectionsMetric.Record(ac.cc.metricsRecorderList, 1, ac.cc.target, ac.backendServiceLabel, ac.localityLabel, "unknown")
    +		openConnectionsMetric.Record(ac.cc.metricsRecorderList, -1, ac.cc.target, ac.backendServiceLabel, ac.securityLevelLocked(), ac.localityLabel)
    +	}
     	ac.state = s
     	ac.channelz.ChannelMetrics.State.Store(&s)
     	if lastErr == nil {
    @@ -1231,8 +1303,7 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error)
     // adjustParams updates parameters used to create transports upon
     // receiving a GoAway.
     func (ac *addrConn) adjustParams(r transport.GoAwayReason) {
    -	switch r {
    -	case transport.GoAwayTooManyPings:
    +	if r == transport.GoAwayTooManyPings {
     		v := 2 * ac.dopts.copts.KeepaliveParams.Time
     		ac.cc.mu.Lock()
     		if v > ac.cc.keepaliveParams.Time {
    @@ -1276,6 +1347,15 @@ func (ac *addrConn) resetTransportAndUnlock() {
     	ac.mu.Unlock()
     
     	if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil {
    +		if !errors.Is(err, context.Canceled) {
    +			connectionAttemptsFailedMetric.Record(ac.cc.metricsRecorderList, 1, ac.cc.target, ac.backendServiceLabel, ac.localityLabel)
    +		} else {
    +			if logger.V(2) {
    +				// This records cancelled connection attempts which can be later
    +				// replaced by a metric.
    +				logger.Infof("Context cancellation detected; not recording this as a failed connection attempt.")
    +			}
    +		}
     		// TODO: #7534 - Move re-resolution requests into the pick_first LB policy
     		// to ensure one resolution request per pass instead of per subconn failure.
     		ac.cc.resolveNow(resolver.ResolveNowOptions{})
    @@ -1315,10 +1395,50 @@ func (ac *addrConn) resetTransportAndUnlock() {
     	}
     	// Success; reset backoff.
     	ac.mu.Lock()
    +	connectionAttemptsSucceededMetric.Record(ac.cc.metricsRecorderList, 1, ac.cc.target, ac.backendServiceLabel, ac.localityLabel)
    +	openConnectionsMetric.Record(ac.cc.metricsRecorderList, 1, ac.cc.target, ac.backendServiceLabel, ac.securityLevelLocked(), ac.localityLabel)
     	ac.backoffIdx = 0
     	ac.mu.Unlock()
     }
     
    +// updateTelemetryLabelsLocked calculates and caches the telemetry labels based on the
    +// first address in addrConn.
    +func (ac *addrConn) updateTelemetryLabelsLocked() {
    +	labelsFunc, ok := internal.AddressToTelemetryLabels.(func(resolver.Address) map[string]string)
    +	if !ok || len(ac.addrs) == 0 {
    +		// Reset defaults
    +		ac.localityLabel = ""
    +		ac.backendServiceLabel = ""
    +		return
    +	}
    +	labels := labelsFunc(ac.addrs[0])
    +	ac.localityLabel = labels["grpc.lb.locality"]
    +	ac.backendServiceLabel = labels["grpc.lb.backend_service"]
    +}
    +
    +type securityLevelKey struct{}
    +
    +func (ac *addrConn) securityLevelLocked() string {
    +	var secLevel string
    +	// During disconnection, ac.transport is nil. Fall back to the security level
    +	// stored in the current address during connection.
    +	if ac.transport == nil {
    +		secLevel, _ = ac.curAddr.Attributes.Value(securityLevelKey{}).(string)
    +		return secLevel
    +	}
    +	authInfo := ac.transport.Peer().AuthInfo
    +	if ci, ok := authInfo.(interface {
    +		GetCommonAuthInfo() credentials.CommonAuthInfo
    +	}); ok {
    +		secLevel = ci.GetCommonAuthInfo().SecurityLevel.String()
    +		// Store the security level in the current address' attributes so
    +		// that it remains available for disconnection metrics after the
    +		// transport is closed.
    +		ac.curAddr.Attributes = ac.curAddr.Attributes.WithValue(securityLevelKey{}, secLevel)
    +	}
    +	return secLevel
    +}
    +
     // tryAllAddrs tries to create a connection to the addresses, and stop when at
     // the first successful one. It returns an error if no address was successfully
     // connected, or updates ac appropriately with the new transport.
    @@ -1823,7 +1943,7 @@ func (cc *ClientConn) initAuthority() error {
     	} else if auth, ok := cc.resolverBuilder.(resolver.AuthorityOverrider); ok {
     		cc.authority = auth.OverrideAuthority(cc.parsedTarget)
     	} else if strings.HasPrefix(endpoint, ":") {
    -		cc.authority = "localhost" + endpoint
    +		cc.authority = "localhost" + encodeAuthority(endpoint)
     	} else {
     		cc.authority = encodeAuthority(endpoint)
     	}
    diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go
    index 665e790bb..06f6c6c70 100644
    --- a/vendor/google.golang.org/grpc/credentials/credentials.go
    +++ b/vendor/google.golang.org/grpc/credentials/credentials.go
    @@ -44,8 +44,7 @@ type PerRPCCredentials interface {
     	// A54). uri is the URI of the entry point for the request.  When supported
     	// by the underlying implementation, ctx can be used for timeout and
     	// cancellation. Additionally, RequestInfo data will be available via ctx
    -	// to this call.  TODO(zhaoq): Define the set of the qualified keys instead
    -	// of leaving it as an arbitrary string.
    +	// to this call.
     	GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error)
     	// RequireTransportSecurity indicates whether the credentials requires
     	// transport security.
    @@ -96,10 +95,11 @@ func (c CommonAuthInfo) GetCommonAuthInfo() CommonAuthInfo {
     	return c
     }
     
    -// ProtocolInfo provides information regarding the gRPC wire protocol version,
    -// security protocol, security protocol version in use, server name, etc.
    +// ProtocolInfo provides static information regarding transport credentials.
     type ProtocolInfo struct {
     	// ProtocolVersion is the gRPC wire protocol version.
    +	//
    +	// Deprecated: this is unused by gRPC.
     	ProtocolVersion string
     	// SecurityProtocol is the security protocol in use.
     	SecurityProtocol string
    @@ -109,7 +109,16 @@ type ProtocolInfo struct {
     	//
     	// Deprecated: please use Peer.AuthInfo.
     	SecurityVersion string
    -	// ServerName is the user-configured server name.
    +	// ServerName is the user-configured server name.  If set, this overrides
    +	// the default :authority header used for all RPCs on the channel using the
    +	// containing credentials, unless grpc.WithAuthority is set on the channel,
    +	// in which case that setting will take precedence.
    +	//
    +	// This must be a valid `:authority` header according to
    +	// [RFC3986](https://datatracker.ietf.org/doc/html/rfc3986#section-3.2).
    +	//
    +	// Deprecated: Users should use grpc.WithAuthority to override the authority
    +	// on a channel instead of configuring the credentials.
     	ServerName string
     }
     
    @@ -120,6 +129,20 @@ type AuthInfo interface {
     	AuthType() string
     }
     
    +// AuthorityValidator validates the authority used to override the `:authority`
    +// header. This is an optional interface that implementations of AuthInfo can
    +// implement if they support per-RPC authority overrides. It is invoked when the
    +// application attempts to override the HTTP/2 `:authority` header using the
    +// CallAuthority call option.
    +type AuthorityValidator interface {
    +	// ValidateAuthority checks the authority value used to override the
    +	// `:authority` header. The authority parameter is the override value
    +	// provided by the application via the CallAuthority option. This value
    +	// typically corresponds to the server hostname or endpoint the RPC is
    +	// targeting. It returns non-nil error if the validation fails.
    +	ValidateAuthority(authority string) error
    +}
    +
     // ErrConnDispatched indicates that rawConn has been dispatched out of gRPC
     // and the caller should not close rawConn.
     var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC")
    @@ -159,12 +182,17 @@ type TransportCredentials interface {
     	// Clone makes a copy of this TransportCredentials.
     	Clone() TransportCredentials
     	// OverrideServerName specifies the value used for the following:
    +	//
     	// - verifying the hostname on the returned certificates
     	// - as SNI in the client's handshake to support virtual hosting
     	// - as the value for `:authority` header at stream creation time
     	//
    -	// Deprecated: use grpc.WithAuthority instead. Will be supported
    -	// throughout 1.x.
    +	// The provided string should be a valid `:authority` header according to
    +	// [RFC3986](https://datatracker.ietf.org/doc/html/rfc3986#section-3.2).
    +	//
    +	// Deprecated: this method is unused by gRPC.  Users should use
    +	// grpc.WithAuthority to override the authority on a channel instead of
    +	// configuring the credentials.
     	OverrideServerName(string) error
     }
     
    @@ -207,14 +235,32 @@ type RequestInfo struct {
     	AuthInfo AuthInfo
     }
     
    +// requestInfoKey is a struct to be used as the key to store RequestInfo in a
    +// context.
    +type requestInfoKey struct{}
    +
     // RequestInfoFromContext extracts the RequestInfo from the context if it exists.
     //
     // This API is experimental.
     func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) {
    -	ri, ok = icredentials.RequestInfoFromContext(ctx).(RequestInfo)
    +	ri, ok = ctx.Value(requestInfoKey{}).(RequestInfo)
     	return ri, ok
     }
     
    +// NewContextWithRequestInfo creates a new context from ctx and attaches ri to it.
    +//
    +// This RequestInfo will be accessible via RequestInfoFromContext.
    +//
    +// Intended to be used from tests for PerRPCCredentials implementations (that
    +// often need to check connection's SecurityLevel). Should not be used from
    +// non-test code: the gRPC client already prepares a context with the correct
    +// RequestInfo attached when calling PerRPCCredentials.GetRequestMetadata.
    +//
    +// This API is experimental.
    +func NewContextWithRequestInfo(ctx context.Context, ri RequestInfo) context.Context {
    +	return context.WithValue(ctx, requestInfoKey{}, ri)
    +}
    +
     // ClientHandshakeInfo holds data to be passed to ClientHandshake. This makes
     // it possible to pass arbitrary data to the handshaker from gRPC, resolver,
     // balancer etc. Individual credential implementations control the actual
    diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go
    index 4c805c644..93156c0f3 100644
    --- a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go
    +++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go
    @@ -30,7 +30,7 @@ import (
     // NewCredentials returns a credentials which disables transport security.
     //
     // Note that using this credentials with per-RPC credentials which require
    -// transport security is incompatible and will cause grpc.Dial() to fail.
    +// transport security is incompatible and will cause RPCs to fail.
     func NewCredentials() credentials.TransportCredentials {
     	return insecureTC{}
     }
    @@ -71,6 +71,12 @@ func (info) AuthType() string {
     	return "insecure"
     }
     
    +// ValidateAuthority allows any value to be overridden for the :authority
    +// header.
    +func (info) ValidateAuthority(string) error {
    +	return nil
    +}
    +
     // insecureBundle implements an insecure bundle.
     // An insecure bundle provides a thin wrapper around insecureTC to support
     // the credentials.Bundle interface.
    diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go
    index bd5fe22b6..8277be7d6 100644
    --- a/vendor/google.golang.org/grpc/credentials/tls.go
    +++ b/vendor/google.golang.org/grpc/credentials/tls.go
    @@ -22,6 +22,7 @@ import (
     	"context"
     	"crypto/tls"
     	"crypto/x509"
    +	"errors"
     	"fmt"
     	"net"
     	"net/url"
    @@ -50,6 +51,21 @@ func (t TLSInfo) AuthType() string {
     	return "tls"
     }
     
    +// ValidateAuthority validates the provided authority being used to override the
    +// :authority header by verifying it against the peer certificates. It returns a
    +// non-nil error if the validation fails.
    +func (t TLSInfo) ValidateAuthority(authority string) error {
    +	var errs []error
    +	for _, cert := range t.State.PeerCertificates {
    +		var err error
    +		if err = cert.VerifyHostname(authority); err == nil {
    +			return nil
    +		}
    +		errs = append(errs, err)
    +	}
    +	return fmt.Errorf("credentials: invalid authority %q: %v", authority, errors.Join(errs...))
    +}
    +
     // cipherSuiteLookup returns the string version of a TLS cipher suite ID.
     func cipherSuiteLookup(cipherSuiteID uint16) string {
     	for _, s := range tls.CipherSuites() {
    @@ -94,14 +110,14 @@ func (c tlsCreds) Info() ProtocolInfo {
     func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) {
     	// use local cfg to avoid clobbering ServerName if using multiple endpoints
     	cfg := credinternal.CloneTLSConfig(c.config)
    -	if cfg.ServerName == "" {
    -		serverName, _, err := net.SplitHostPort(authority)
    -		if err != nil {
    -			// If the authority had no host port or if the authority cannot be parsed, use it as-is.
    -			serverName = authority
    -		}
    -		cfg.ServerName = serverName
    +
    +	serverName, _, err := net.SplitHostPort(authority)
    +	if err != nil {
    +		// If the authority had no host port or if the authority cannot be parsed, use it as-is.
    +		serverName = authority
     	}
    +	cfg.ServerName = serverName
    +
     	conn := tls.Client(rawConn, cfg)
     	errChannel := make(chan error, 1)
     	go func() {
    @@ -243,9 +259,11 @@ func applyDefaults(c *tls.Config) *tls.Config {
     // certificates to establish the identity of the client need to be included in
     // the credentials (eg: for mTLS), use NewTLS instead, where a complete
     // tls.Config can be specified.
    -// serverNameOverride is for testing only. If set to a non empty string,
    -// it will override the virtual host name of authority (e.g. :authority header
    -// field) in requests.
    +//
    +// serverNameOverride is for testing only. If set to a non empty string, it will
    +// override the virtual host name of authority (e.g. :authority header field) in
    +// requests.  Users should use grpc.WithAuthority passed to grpc.NewClient to
    +// override the authority of the client instead.
     func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials {
     	return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp})
     }
    @@ -255,9 +273,11 @@ func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) Transpor
     // certificates to establish the identity of the client need to be included in
     // the credentials (eg: for mTLS), use NewTLS instead, where a complete
     // tls.Config can be specified.
    -// serverNameOverride is for testing only. If set to a non empty string,
    -// it will override the virtual host name of authority (e.g. :authority header
    -// field) in requests.
    +//
    +// serverNameOverride is for testing only. If set to a non empty string, it will
    +// override the virtual host name of authority (e.g. :authority header field) in
    +// requests.  Users should use grpc.WithAuthority passed to grpc.NewClient to
    +// override the authority of the client instead.
     func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) {
     	b, err := os.ReadFile(certFile)
     	if err != nil {
    diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go
    index 405a2ffeb..7a5ac2e7c 100644
    --- a/vendor/google.golang.org/grpc/dialoptions.go
    +++ b/vendor/google.golang.org/grpc/dialoptions.go
    @@ -213,6 +213,7 @@ func WithReadBufferSize(s int) DialOption {
     func WithInitialWindowSize(s int32) DialOption {
     	return newFuncDialOption(func(o *dialOptions) {
     		o.copts.InitialWindowSize = s
    +		o.copts.StaticWindowSize = true
     	})
     }
     
    @@ -222,6 +223,26 @@ func WithInitialWindowSize(s int32) DialOption {
     func WithInitialConnWindowSize(s int32) DialOption {
     	return newFuncDialOption(func(o *dialOptions) {
     		o.copts.InitialConnWindowSize = s
    +		o.copts.StaticWindowSize = true
    +	})
    +}
    +
    +// WithStaticStreamWindowSize returns a DialOption which sets the initial
    +// stream window size to the value provided and disables dynamic flow control.
    +func WithStaticStreamWindowSize(s int32) DialOption {
    +	return newFuncDialOption(func(o *dialOptions) {
    +		o.copts.InitialWindowSize = s
    +		o.copts.StaticWindowSize = true
    +	})
    +}
    +
    +// WithStaticConnWindowSize returns a DialOption which sets the initial
    +// connection window size to the value provided and disables dynamic flow
    +// control.
    +func WithStaticConnWindowSize(s int32) DialOption {
    +	return newFuncDialOption(func(o *dialOptions) {
    +		o.copts.InitialConnWindowSize = s
    +		o.copts.StaticWindowSize = true
     	})
     }
     
    @@ -360,7 +381,7 @@ func WithReturnConnectionError() DialOption {
     //
     // Note that using this DialOption with per-RPC credentials (through
     // WithCredentialsBundle or WithPerRPCCredentials) which require transport
    -// security is incompatible and will cause grpc.Dial() to fail.
    +// security is incompatible and will cause RPCs to fail.
     //
     // Deprecated: use WithTransportCredentials and insecure.NewCredentials()
     // instead. Will be supported throughout 1.x.
    @@ -587,6 +608,8 @@ func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOpt
     
     // WithAuthority returns a DialOption that specifies the value to be used as the
     // :authority pseudo-header and as the server name in authentication handshake.
    +// This overrides all other ways of setting authority on the channel, but can be
    +// overridden per-call by using grpc.CallAuthority.
     func WithAuthority(a string) DialOption {
     	return newFuncDialOption(func(o *dialOptions) {
     		o.authority = a
    diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go
    index 11d0ae142..dadd21e40 100644
    --- a/vendor/google.golang.org/grpc/encoding/encoding.go
    +++ b/vendor/google.golang.org/grpc/encoding/encoding.go
    @@ -27,8 +27,10 @@ package encoding
     
     import (
     	"io"
    +	"slices"
     	"strings"
     
    +	"google.golang.org/grpc/encoding/internal"
     	"google.golang.org/grpc/internal/grpcutil"
     )
     
    @@ -36,6 +38,24 @@ import (
     // It is intended for grpc internal use only.
     const Identity = "identity"
     
    +func init() {
    +	internal.RegisterCompressorForTesting = func(c Compressor) func() {
    +		name := c.Name()
    +		curCompressor, found := registeredCompressor[name]
    +		RegisterCompressor(c)
    +		return func() {
    +			if found {
    +				registeredCompressor[name] = curCompressor
    +				return
    +			}
    +			delete(registeredCompressor, name)
    +			grpcutil.RegisteredCompressorNames = slices.DeleteFunc(grpcutil.RegisteredCompressorNames, func(s string) bool {
    +				return s == name
    +			})
    +		}
    +	}
    +}
    +
     // Compressor is used for compressing and decompressing when sending or
     // receiving messages.
     //
    diff --git a/vendor/google.golang.org/grpc/encoding/internal/internal.go b/vendor/google.golang.org/grpc/encoding/internal/internal.go
    new file mode 100644
    index 000000000..ee9acb437
    --- /dev/null
    +++ b/vendor/google.golang.org/grpc/encoding/internal/internal.go
    @@ -0,0 +1,28 @@
    +/*
    + *
    + * Copyright 2025 gRPC authors.
    + *
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + *
    + */
    +
    +// Package internal contains code internal to the encoding package.
    +package internal
    +
    +// RegisterCompressorForTesting registers a compressor in the global compressor
    +// registry. It returns a cleanup function that should be called at the end
    +// of the test to unregister the compressor.
    +//
    +// This prevents compressors registered in one test from appearing in the
    +// encoding headers of subsequent tests.
    +var RegisterCompressorForTesting any // func RegisterCompressor(c Compressor) func()
    diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go
    index ceec319dd..1ab874c7a 100644
    --- a/vendor/google.golang.org/grpc/encoding/proto/proto.go
    +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go
    @@ -46,9 +46,25 @@ func (c *codecV2) Marshal(v any) (data mem.BufferSlice, err error) {
     		return nil, fmt.Errorf("proto: failed to marshal, message is %T, want proto.Message", v)
     	}
     
    +	// Important: if we remove this Size call then we cannot use
    +	// UseCachedSize in MarshalOptions below.
     	size := proto.Size(vv)
    +
    +	// MarshalOptions with UseCachedSize allows reusing the result from the
    +	// previous Size call. This is safe here because:
    +	//
    +	// 1. We just computed the size.
    +	// 2. We assume the message is not being mutated concurrently.
    +	//
    +	// Important: If the proto.Size call above is removed, using UseCachedSize
    +	// becomes unsafe and may lead to incorrect marshaling.
    +	//
    +	// For more details, see the doc of UseCachedSize:
    +	// https://pkg.go.dev/google.golang.org/protobuf/proto#MarshalOptions
    +	marshalOptions := proto.MarshalOptions{UseCachedSize: true}
    +
     	if mem.IsBelowBufferPoolingThreshold(size) {
    -		buf, err := proto.Marshal(vv)
    +		buf, err := marshalOptions.Marshal(vv)
     		if err != nil {
     			return nil, err
     		}
    @@ -56,7 +72,7 @@ func (c *codecV2) Marshal(v any) (data mem.BufferSlice, err error) {
     	} else {
     		pool := mem.DefaultBufferPool()
     		buf := pool.Get(size)
    -		if _, err := (proto.MarshalOptions{}).MarshalAppend((*buf)[:0], vv); err != nil {
    +		if _, err := marshalOptions.MarshalAppend((*buf)[:0], vv); err != nil {
     			pool.Put(buf)
     			return nil, err
     		}
    diff --git a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go
    index ad75313a1..472813f58 100644
    --- a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go
    +++ b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go
    @@ -75,6 +75,8 @@ const (
     	MetricTypeIntHisto
     	MetricTypeFloatHisto
     	MetricTypeIntGauge
    +	MetricTypeIntUpDownCount
    +	MetricTypeIntAsyncGauge
     )
     
     // Int64CountHandle is a typed handle for a int count metric. This handle
    @@ -93,6 +95,23 @@ func (h *Int64CountHandle) Record(recorder MetricsRecorder, incr int64, labels .
     	recorder.RecordInt64Count(h, incr, labels...)
     }
     
    +// Int64UpDownCountHandle is a typed handle for an int up-down counter metric.
    +// This handle is passed at the recording point in order to know which metric
    +// to record on.
    +type Int64UpDownCountHandle MetricDescriptor
    +
    +// Descriptor returns the int64 up-down counter handle typecast to a pointer to a
    +// MetricDescriptor.
    +func (h *Int64UpDownCountHandle) Descriptor() *MetricDescriptor {
    +	return (*MetricDescriptor)(h)
    +}
    +
    +// Record records the int64 up-down counter value on the metrics recorder provided.
    +// The value 'v' can be positive to increment or negative to decrement.
    +func (h *Int64UpDownCountHandle) Record(recorder MetricsRecorder, v int64, labels ...string) {
    +	recorder.RecordInt64UpDownCount(h, v, labels...)
    +}
    +
     // Float64CountHandle is a typed handle for a float count metric. This handle is
     // passed at the recording point in order to know which metric to record on.
     type Float64CountHandle MetricDescriptor
    @@ -154,6 +173,30 @@ func (h *Int64GaugeHandle) Record(recorder MetricsRecorder, incr int64, labels .
     	recorder.RecordInt64Gauge(h, incr, labels...)
     }
     
    +// AsyncMetric is a marker interface for asynchronous metric types.
    +type AsyncMetric interface {
    +	isAsync()
    +	Descriptor() *MetricDescriptor
    +}
    +
    +// Int64AsyncGaugeHandle is a typed handle for an int gauge metric. This handle is
    +// passed at the recording point in order to know which metric to record on.
    +type Int64AsyncGaugeHandle MetricDescriptor
    +
    +// isAsync implements the AsyncMetric interface.
    +func (h *Int64AsyncGaugeHandle) isAsync() {}
    +
    +// Descriptor returns the int64 gauge handle typecast to a pointer to a
    +// MetricDescriptor.
    +func (h *Int64AsyncGaugeHandle) Descriptor() *MetricDescriptor {
    +	return (*MetricDescriptor)(h)
    +}
    +
    +// Record records the int64 gauge value on the metrics recorder provided.
    +func (h *Int64AsyncGaugeHandle) Record(recorder AsyncMetricsRecorder, value int64, labels ...string) {
    +	recorder.RecordInt64AsyncGauge(h, value, labels...)
    +}
    +
     // registeredMetrics are the registered metric descriptor names.
     var registeredMetrics = make(map[string]bool)
     
    @@ -249,6 +292,35 @@ func RegisterInt64Gauge(descriptor MetricDescriptor) *Int64GaugeHandle {
     	return (*Int64GaugeHandle)(descPtr)
     }
     
    +// RegisterInt64UpDownCount registers the metric description onto the global registry.
    +// It returns a typed handle to use for recording data.
    +//
    +// NOTE: this function must only be called during initialization time (i.e. in
    +// an init() function), and is not thread-safe. If multiple metrics are
    +// registered with the same name, this function will panic.
    +func RegisterInt64UpDownCount(descriptor MetricDescriptor) *Int64UpDownCountHandle {
    +	registerMetric(descriptor.Name, descriptor.Default)
    +	// Set the specific metric type for the up-down counter
    +	descriptor.Type = MetricTypeIntUpDownCount
    +	descPtr := &descriptor
    +	metricsRegistry[descriptor.Name] = descPtr
    +	return (*Int64UpDownCountHandle)(descPtr)
    +}
    +
    +// RegisterInt64AsyncGauge registers the metric description onto the global registry.
    +// It returns a typed handle to use for recording data.
    +//
    +// NOTE: this function must only be called during initialization time (i.e. in
    +// an init() function), and is not thread-safe. If multiple metrics are
    +// registered with the same name, this function will panic.
    +func RegisterInt64AsyncGauge(descriptor MetricDescriptor) *Int64AsyncGaugeHandle {
    +	registerMetric(descriptor.Name, descriptor.Default)
    +	descriptor.Type = MetricTypeIntAsyncGauge
    +	descPtr := &descriptor
    +	metricsRegistry[descriptor.Name] = descPtr
    +	return (*Int64AsyncGaugeHandle)(descPtr)
    +}
    +
     // snapshotMetricsRegistryForTesting snapshots the global data of the metrics
     // registry. Returns a cleanup function that sets the metrics registry to its
     // original state.
    diff --git a/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/vendor/google.golang.org/grpc/experimental/stats/metrics.go
    index ee1423605..d7d404cbe 100644
    --- a/vendor/google.golang.org/grpc/experimental/stats/metrics.go
    +++ b/vendor/google.golang.org/grpc/experimental/stats/metrics.go
    @@ -38,6 +38,16 @@ type MetricsRecorder interface {
     	// RecordInt64Gauge records the measurement alongside labels on the int
     	// gauge associated with the provided handle.
     	RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string)
    +	// RecordInt64UpDownCounter records the measurement alongside labels on the int
    +	// count associated with the provided handle.
    +	RecordInt64UpDownCount(handle *Int64UpDownCountHandle, incr int64, labels ...string)
    +}
    +
    +// AsyncMetricsRecorder records on asynchronous metrics derived from metric registry.
    +type AsyncMetricsRecorder interface {
    +	// RecordInt64AsyncGauge records the measurement alongside labels on the int
    +	// count associated with the provided handle asynchronously
    +	RecordInt64AsyncGauge(handle *Int64AsyncGaugeHandle, incr int64, labels ...string)
     }
     
     // Metrics is an experimental legacy alias of the now-stable stats.MetricSet.
    diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
    index 94177b05c..8f7d9f6bb 100644
    --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
    +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
    @@ -17,7 +17,7 @@
     
     // Code generated by protoc-gen-go. DO NOT EDIT.
     // versions:
    -// 	protoc-gen-go v1.36.4
    +// 	protoc-gen-go v1.36.10
     // 	protoc        v5.27.1
     // source: grpc/health/v1/health.proto
     
    @@ -178,46 +178,112 @@ func (x *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus {
     	return HealthCheckResponse_UNKNOWN
     }
     
    +type HealthListRequest struct {
    +	state         protoimpl.MessageState `protogen:"open.v1"`
    +	unknownFields protoimpl.UnknownFields
    +	sizeCache     protoimpl.SizeCache
    +}
    +
    +func (x *HealthListRequest) Reset() {
    +	*x = HealthListRequest{}
    +	mi := &file_grpc_health_v1_health_proto_msgTypes[2]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
    +}
    +
    +func (x *HealthListRequest) String() string {
    +	return protoimpl.X.MessageStringOf(x)
    +}
    +
    +func (*HealthListRequest) ProtoMessage() {}
    +
    +func (x *HealthListRequest) ProtoReflect() protoreflect.Message {
    +	mi := &file_grpc_health_v1_health_proto_msgTypes[2]
    +	if x != nil {
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		if ms.LoadMessageInfo() == nil {
    +			ms.StoreMessageInfo(mi)
    +		}
    +		return ms
    +	}
    +	return mi.MessageOf(x)
    +}
    +
    +// Deprecated: Use HealthListRequest.ProtoReflect.Descriptor instead.
    +func (*HealthListRequest) Descriptor() ([]byte, []int) {
    +	return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{2}
    +}
    +
    +type HealthListResponse struct {
    +	state protoimpl.MessageState `protogen:"open.v1"`
    +	// statuses contains all the services and their respective status.
    +	Statuses      map[string]*HealthCheckResponse `protobuf:"bytes,1,rep,name=statuses,proto3" json:"statuses,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
    +	unknownFields protoimpl.UnknownFields
    +	sizeCache     protoimpl.SizeCache
    +}
    +
    +func (x *HealthListResponse) Reset() {
    +	*x = HealthListResponse{}
    +	mi := &file_grpc_health_v1_health_proto_msgTypes[3]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
    +}
    +
    +func (x *HealthListResponse) String() string {
    +	return protoimpl.X.MessageStringOf(x)
    +}
    +
    +func (*HealthListResponse) ProtoMessage() {}
    +
    +func (x *HealthListResponse) ProtoReflect() protoreflect.Message {
    +	mi := &file_grpc_health_v1_health_proto_msgTypes[3]
    +	if x != nil {
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		if ms.LoadMessageInfo() == nil {
    +			ms.StoreMessageInfo(mi)
    +		}
    +		return ms
    +	}
    +	return mi.MessageOf(x)
    +}
    +
    +// Deprecated: Use HealthListResponse.ProtoReflect.Descriptor instead.
    +func (*HealthListResponse) Descriptor() ([]byte, []int) {
    +	return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{3}
    +}
    +
    +func (x *HealthListResponse) GetStatuses() map[string]*HealthCheckResponse {
    +	if x != nil {
    +		return x.Statuses
    +	}
    +	return nil
    +}
    +
     var File_grpc_health_v1_health_proto protoreflect.FileDescriptor
     
    -var file_grpc_health_v1_health_proto_rawDesc = string([]byte{
    -	0x0a, 0x1b, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x76, 0x31,
    -	0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x67,
    -	0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x22, 0x2e, 0x0a,
    -	0x12, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75,
    -	0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01,
    -	0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0xb1, 0x01,
    -	0x0a, 0x13, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73,
    -	0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18,
    -	0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61,
    -	0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65,
    -	0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69,
    -	0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
    -	0x22, 0x4f, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75,
    -	0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b,
    -	0x0a, 0x07, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4e,
    -	0x4f, 0x54, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f,
    -	0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10,
    -	0x03, 0x32, 0xae, 0x01, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x12, 0x50, 0x0a, 0x05,
    -	0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61,
    -	0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65,
    -	0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63,
    -	0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74,
    -	0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52,
    -	0x0a, 0x05, 0x57, 0x61, 0x74, 0x63, 0x68, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68,
    -	0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43,
    -	0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72,
    -	0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61,
    -	0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
    -	0x30, 0x01, 0x42, 0x70, 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65,
    -	0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x50,
    -	0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67,
    -	0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68,
    -	0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74,
    -	0x68, 0x5f, 0x76, 0x31, 0xa2, 0x02, 0x0c, 0x47, 0x72, 0x70, 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74,
    -	0x68, 0x56, 0x31, 0xaa, 0x02, 0x0e, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74,
    -	0x68, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
    -})
    +const file_grpc_health_v1_health_proto_rawDesc = "" +
    +	"\n" +
    +	"\x1bgrpc/health/v1/health.proto\x12\x0egrpc.health.v1\".\n" +
    +	"\x12HealthCheckRequest\x12\x18\n" +
    +	"\aservice\x18\x01 \x01(\tR\aservice\"\xb1\x01\n" +
    +	"\x13HealthCheckResponse\x12I\n" +
    +	"\x06status\x18\x01 \x01(\x0e21.grpc.health.v1.HealthCheckResponse.ServingStatusR\x06status\"O\n" +
    +	"\rServingStatus\x12\v\n" +
    +	"\aUNKNOWN\x10\x00\x12\v\n" +
    +	"\aSERVING\x10\x01\x12\x0f\n" +
    +	"\vNOT_SERVING\x10\x02\x12\x13\n" +
    +	"\x0fSERVICE_UNKNOWN\x10\x03\"\x13\n" +
    +	"\x11HealthListRequest\"\xc4\x01\n" +
    +	"\x12HealthListResponse\x12L\n" +
    +	"\bstatuses\x18\x01 \x03(\v20.grpc.health.v1.HealthListResponse.StatusesEntryR\bstatuses\x1a`\n" +
    +	"\rStatusesEntry\x12\x10\n" +
    +	"\x03key\x18\x01 \x01(\tR\x03key\x129\n" +
    +	"\x05value\x18\x02 \x01(\v2#.grpc.health.v1.HealthCheckResponseR\x05value:\x028\x012\xfd\x01\n" +
    +	"\x06Health\x12P\n" +
    +	"\x05Check\x12\".grpc.health.v1.HealthCheckRequest\x1a#.grpc.health.v1.HealthCheckResponse\x12M\n" +
    +	"\x04List\x12!.grpc.health.v1.HealthListRequest\x1a\".grpc.health.v1.HealthListResponse\x12R\n" +
    +	"\x05Watch\x12\".grpc.health.v1.HealthCheckRequest\x1a#.grpc.health.v1.HealthCheckResponse0\x01Bp\n" +
    +	"\x11io.grpc.health.v1B\vHealthProtoP\x01Z,google.golang.org/grpc/health/grpc_health_v1\xa2\x02\fGrpcHealthV1\xaa\x02\x0eGrpc.Health.V1b\x06proto3"
     
     var (
     	file_grpc_health_v1_health_proto_rawDescOnce sync.Once
    @@ -232,23 +298,30 @@ func file_grpc_health_v1_health_proto_rawDescGZIP() []byte {
     }
     
     var file_grpc_health_v1_health_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
    -var file_grpc_health_v1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
    +var file_grpc_health_v1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
     var file_grpc_health_v1_health_proto_goTypes = []any{
     	(HealthCheckResponse_ServingStatus)(0), // 0: grpc.health.v1.HealthCheckResponse.ServingStatus
     	(*HealthCheckRequest)(nil),             // 1: grpc.health.v1.HealthCheckRequest
     	(*HealthCheckResponse)(nil),            // 2: grpc.health.v1.HealthCheckResponse
    +	(*HealthListRequest)(nil),              // 3: grpc.health.v1.HealthListRequest
    +	(*HealthListResponse)(nil),             // 4: grpc.health.v1.HealthListResponse
    +	nil,                                    // 5: grpc.health.v1.HealthListResponse.StatusesEntry
     }
     var file_grpc_health_v1_health_proto_depIdxs = []int32{
     	0, // 0: grpc.health.v1.HealthCheckResponse.status:type_name -> grpc.health.v1.HealthCheckResponse.ServingStatus
    -	1, // 1: grpc.health.v1.Health.Check:input_type -> grpc.health.v1.HealthCheckRequest
    -	1, // 2: grpc.health.v1.Health.Watch:input_type -> grpc.health.v1.HealthCheckRequest
    -	2, // 3: grpc.health.v1.Health.Check:output_type -> grpc.health.v1.HealthCheckResponse
    -	2, // 4: grpc.health.v1.Health.Watch:output_type -> grpc.health.v1.HealthCheckResponse
    -	3, // [3:5] is the sub-list for method output_type
    -	1, // [1:3] is the sub-list for method input_type
    -	1, // [1:1] is the sub-list for extension type_name
    -	1, // [1:1] is the sub-list for extension extendee
    -	0, // [0:1] is the sub-list for field type_name
    +	5, // 1: grpc.health.v1.HealthListResponse.statuses:type_name -> grpc.health.v1.HealthListResponse.StatusesEntry
    +	2, // 2: grpc.health.v1.HealthListResponse.StatusesEntry.value:type_name -> grpc.health.v1.HealthCheckResponse
    +	1, // 3: grpc.health.v1.Health.Check:input_type -> grpc.health.v1.HealthCheckRequest
    +	3, // 4: grpc.health.v1.Health.List:input_type -> grpc.health.v1.HealthListRequest
    +	1, // 5: grpc.health.v1.Health.Watch:input_type -> grpc.health.v1.HealthCheckRequest
    +	2, // 6: grpc.health.v1.Health.Check:output_type -> grpc.health.v1.HealthCheckResponse
    +	4, // 7: grpc.health.v1.Health.List:output_type -> grpc.health.v1.HealthListResponse
    +	2, // 8: grpc.health.v1.Health.Watch:output_type -> grpc.health.v1.HealthCheckResponse
    +	6, // [6:9] is the sub-list for method output_type
    +	3, // [3:6] is the sub-list for method input_type
    +	3, // [3:3] is the sub-list for extension type_name
    +	3, // [3:3] is the sub-list for extension extendee
    +	0, // [0:3] is the sub-list for field type_name
     }
     
     func init() { file_grpc_health_v1_health_proto_init() }
    @@ -262,7 +335,7 @@ func file_grpc_health_v1_health_proto_init() {
     			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
     			RawDescriptor: unsafe.Slice(unsafe.StringData(file_grpc_health_v1_health_proto_rawDesc), len(file_grpc_health_v1_health_proto_rawDesc)),
     			NumEnums:      1,
    -			NumMessages:   2,
    +			NumMessages:   5,
     			NumExtensions: 0,
     			NumServices:   1,
     		},
    diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
    index f96b8ab49..e99cd5c83 100644
    --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
    +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
    @@ -17,7 +17,7 @@
     
     // Code generated by protoc-gen-go-grpc. DO NOT EDIT.
     // versions:
    -// - protoc-gen-go-grpc v1.5.1
    +// - protoc-gen-go-grpc v1.6.0
     // - protoc             v5.27.1
     // source: grpc/health/v1/health.proto
     
    @@ -37,6 +37,7 @@ const _ = grpc.SupportPackageIsVersion9
     
     const (
     	Health_Check_FullMethodName = "/grpc.health.v1.Health/Check"
    +	Health_List_FullMethodName  = "/grpc.health.v1.Health/List"
     	Health_Watch_FullMethodName = "/grpc.health.v1.Health/Watch"
     )
     
    @@ -55,9 +56,19 @@ type HealthClient interface {
     	//
     	// Clients should set a deadline when calling Check, and can declare the
     	// server unhealthy if they do not receive a timely response.
    -	//
    -	// Check implementations should be idempotent and side effect free.
     	Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error)
    +	// List provides a non-atomic snapshot of the health of all the available
    +	// services.
    +	//
    +	// The server may respond with a RESOURCE_EXHAUSTED error if too many services
    +	// exist.
    +	//
    +	// Clients should set a deadline when calling List, and can declare the server
    +	// unhealthy if they do not receive a timely response.
    +	//
    +	// Clients should keep in mind that the list of health services exposed by an
    +	// application can change over the lifetime of the process.
    +	List(ctx context.Context, in *HealthListRequest, opts ...grpc.CallOption) (*HealthListResponse, error)
     	// Performs a watch for the serving status of the requested service.
     	// The server will immediately send back a message indicating the current
     	// serving status.  It will then subsequently send a new message whenever
    @@ -94,6 +105,16 @@ func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts .
     	return out, nil
     }
     
    +func (c *healthClient) List(ctx context.Context, in *HealthListRequest, opts ...grpc.CallOption) (*HealthListResponse, error) {
    +	cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
    +	out := new(HealthListResponse)
    +	err := c.cc.Invoke(ctx, Health_List_FullMethodName, in, out, cOpts...)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return out, nil
    +}
    +
     func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error) {
     	cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
     	stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, cOpts...)
    @@ -128,9 +149,19 @@ type HealthServer interface {
     	//
     	// Clients should set a deadline when calling Check, and can declare the
     	// server unhealthy if they do not receive a timely response.
    -	//
    -	// Check implementations should be idempotent and side effect free.
     	Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error)
    +	// List provides a non-atomic snapshot of the health of all the available
    +	// services.
    +	//
    +	// The server may respond with a RESOURCE_EXHAUSTED error if too many services
    +	// exist.
    +	//
    +	// Clients should set a deadline when calling List, and can declare the server
    +	// unhealthy if they do not receive a timely response.
    +	//
    +	// Clients should keep in mind that the list of health services exposed by an
    +	// application can change over the lifetime of the process.
    +	List(context.Context, *HealthListRequest) (*HealthListResponse, error)
     	// Performs a watch for the serving status of the requested service.
     	// The server will immediately send back a message indicating the current
     	// serving status.  It will then subsequently send a new message whenever
    @@ -157,10 +188,13 @@ type HealthServer interface {
     type UnimplementedHealthServer struct{}
     
     func (UnimplementedHealthServer) Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) {
    -	return nil, status.Errorf(codes.Unimplemented, "method Check not implemented")
    +	return nil, status.Error(codes.Unimplemented, "method Check not implemented")
    +}
    +func (UnimplementedHealthServer) List(context.Context, *HealthListRequest) (*HealthListResponse, error) {
    +	return nil, status.Error(codes.Unimplemented, "method List not implemented")
     }
     func (UnimplementedHealthServer) Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error {
    -	return status.Errorf(codes.Unimplemented, "method Watch not implemented")
    +	return status.Error(codes.Unimplemented, "method Watch not implemented")
     }
     func (UnimplementedHealthServer) testEmbeddedByValue() {}
     
    @@ -200,6 +234,24 @@ func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interf
     	return interceptor(ctx, in, info, handler)
     }
     
    +func _Health_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
    +	in := new(HealthListRequest)
    +	if err := dec(in); err != nil {
    +		return nil, err
    +	}
    +	if interceptor == nil {
    +		return srv.(HealthServer).List(ctx, in)
    +	}
    +	info := &grpc.UnaryServerInfo{
    +		Server:     srv,
    +		FullMethod: Health_List_FullMethodName,
    +	}
    +	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
    +		return srv.(HealthServer).List(ctx, req.(*HealthListRequest))
    +	}
    +	return interceptor(ctx, in, info, handler)
    +}
    +
     func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error {
     	m := new(HealthCheckRequest)
     	if err := stream.RecvMsg(m); err != nil {
    @@ -222,6 +274,10 @@ var Health_ServiceDesc = grpc.ServiceDesc{
     			MethodName: "Check",
     			Handler:    _Health_Check_Handler,
     		},
    +		{
    +			MethodName: "List",
    +			Handler:    _Health_List_Handler,
    +		},
     	},
     	Streams: []grpc.StreamDesc{
     		{
    diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
    index fbc1ca356..f38de74a4 100644
    --- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
    +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
    @@ -67,6 +67,10 @@ type Balancer struct {
     	// balancerCurrent before the UpdateSubConnState is called on the
     	// balancerCurrent.
     	currentMu sync.Mutex
    +
    +	// activeGoroutines tracks all the goroutines that this balancer has started
    +	// and that should be waited on when the balancer closes.
    +	activeGoroutines sync.WaitGroup
     }
     
     // swap swaps out the current lb with the pending lb and updates the ClientConn.
    @@ -76,7 +80,9 @@ func (gsb *Balancer) swap() {
     	cur := gsb.balancerCurrent
     	gsb.balancerCurrent = gsb.balancerPending
     	gsb.balancerPending = nil
    +	gsb.activeGoroutines.Add(1)
     	go func() {
    +		defer gsb.activeGoroutines.Done()
     		gsb.currentMu.Lock()
     		defer gsb.currentMu.Unlock()
     		cur.Close()
    @@ -223,15 +229,7 @@ func (gsb *Balancer) ExitIdle() {
     	// There is no need to protect this read with a mutex, as the write to the
     	// Balancer field happens in SwitchTo, which completes before this can be
     	// called.
    -	if ei, ok := balToUpdate.Balancer.(balancer.ExitIdler); ok {
    -		ei.ExitIdle()
    -		return
    -	}
    -	gsb.mu.Lock()
    -	defer gsb.mu.Unlock()
    -	for sc := range balToUpdate.subconns {
    -		sc.Connect()
    -	}
    +	balToUpdate.ExitIdle()
     }
     
     // updateSubConnState forwards the update to the appropriate child.
    @@ -282,6 +280,7 @@ func (gsb *Balancer) Close() {
     
     	currentBalancerToClose.Close()
     	pendingBalancerToClose.Close()
    +	gsb.activeGoroutines.Wait()
     }
     
     // balancerWrapper wraps a balancer.Balancer, and overrides some Balancer
    @@ -332,7 +331,12 @@ func (bw *balancerWrapper) UpdateState(state balancer.State) {
     	defer bw.gsb.mu.Unlock()
     	bw.lastState = state
     
    +	// If Close() acquires the mutex before UpdateState(), the balancer
    +	// will already have been removed from the current or pending state when
    +	// reaching this point.
     	if !bw.gsb.balancerCurrentOrPending(bw) {
    +		// Returning here ensures that (*Balancer).swap() is not invoked after
    +		// (*Balancer).Close() and therefore prevents "use after close".
     		return
     	}
     
    diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go
    index 11f91668a..467392b8d 100644
    --- a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go
    +++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go
    @@ -83,6 +83,7 @@ func (b *Unbounded) Load() {
     		default:
     		}
     	} else if b.closing && !b.closed {
    +		b.closed = true
     		close(b.c)
     	}
     }
    diff --git a/vendor/google.golang.org/grpc/internal/channelz/trace.go b/vendor/google.golang.org/grpc/internal/channelz/trace.go
    index 2bffe4777..3b7ba5966 100644
    --- a/vendor/google.golang.org/grpc/internal/channelz/trace.go
    +++ b/vendor/google.golang.org/grpc/internal/channelz/trace.go
    @@ -194,7 +194,7 @@ func (r RefChannelType) String() string {
     // If channelz is not turned ON, this will simply log the event descriptions.
     func AddTraceEvent(l grpclog.DepthLoggerV2, e Entity, depth int, desc *TraceEvent) {
     	// Log only the trace description associated with the bottom most entity.
    -	d := fmt.Sprintf("[%s]%s", e, desc.Desc)
    +	d := fmt.Sprintf("[%s] %s", e, desc.Desc)
     	switch desc.Severity {
     	case CtUnknown, CtInfo:
     		l.InfoDepth(depth+1, d)
    diff --git a/vendor/google.golang.org/grpc/internal/credentials/credentials.go b/vendor/google.golang.org/grpc/internal/credentials/credentials.go
    index 9deee7f65..48b22d9cf 100644
    --- a/vendor/google.golang.org/grpc/internal/credentials/credentials.go
    +++ b/vendor/google.golang.org/grpc/internal/credentials/credentials.go
    @@ -20,20 +20,6 @@ import (
     	"context"
     )
     
    -// requestInfoKey is a struct to be used as the key to store RequestInfo in a
    -// context.
    -type requestInfoKey struct{}
    -
    -// NewRequestInfoContext creates a context with ri.
    -func NewRequestInfoContext(ctx context.Context, ri any) context.Context {
    -	return context.WithValue(ctx, requestInfoKey{}, ri)
    -}
    -
    -// RequestInfoFromContext extracts the RequestInfo from ctx.
    -func RequestInfoFromContext(ctx context.Context) any {
    -	return ctx.Value(requestInfoKey{})
    -}
    -
     // clientHandshakeInfoKey is a struct used as the key to store
     // ClientHandshakeInfo in a context.
     type clientHandshakeInfoKey struct{}
    diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
    index 1e42b6fdc..6414ee4bb 100644
    --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
    +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
    @@ -26,35 +26,62 @@ import (
     )
     
     var (
    -	// TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false").
    +	// EnableTXTServiceConfig is set if the DNS resolver should perform TXT
    +	// lookups for service config ("GRPC_ENABLE_TXT_SERVICE_CONFIG" is not
    +	// "false").
    +	EnableTXTServiceConfig = boolFromEnv("GRPC_ENABLE_TXT_SERVICE_CONFIG", true)
    +
    +	// TXTErrIgnore is set if TXT errors should be ignored
    +	// ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false").
     	TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true)
    +
     	// RingHashCap indicates the maximum ring size which defaults to 4096
     	// entries but may be overridden by setting the environment variable
     	// "GRPC_RING_HASH_CAP".  This does not override the default bounds
     	// checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M).
     	RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024)
    -	// LeastRequestLB is set if we should support the least_request_experimental
    -	// LB policy, which can be enabled by setting the environment variable
    -	// "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true".
    -	LeastRequestLB = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST", false)
    +
     	// ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS
     	// handshakes that can be performed.
     	ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100)
    +
     	// EnforceALPNEnabled is set if TLS connections to servers with ALPN disabled
     	// should be rejected. The HTTP/2 protocol requires ALPN to be enabled, this
     	// option is present for backward compatibility. This option may be overridden
     	// by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true"
     	// or "false".
     	EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", true)
    -	// XDSFallbackSupport is the env variable that controls whether support for
    -	// xDS fallback is turned on. If this is unset or is false, only the first
    -	// xDS server in the list of server configs will be used.
    -	XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", true)
    -	// NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used
    -	// instead of the exiting pickfirst implementation. This can be enabled by
    -	// setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST"
    -	// to "true".
    -	NewPickFirstEnabled = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST", false)
    +
    +	// XDSEndpointHashKeyBackwardCompat controls the parsing of the endpoint hash
    +	// key from EDS LbEndpoint metadata. Endpoint hash keys can be disabled by
    +	// setting "GRPC_XDS_ENDPOINT_HASH_KEY_BACKWARD_COMPAT" to "true". When the
    +	// implementation of A76 is stable, we will flip the default value to false
    +	// in a subsequent release. A final release will remove this environment
    +	// variable, enabling the new behavior unconditionally.
    +	XDSEndpointHashKeyBackwardCompat = boolFromEnv("GRPC_XDS_ENDPOINT_HASH_KEY_BACKWARD_COMPAT", true)
    +
    +	// RingHashSetRequestHashKey is set if the ring hash balancer can get the
    +	// request hash header by setting the "requestHashHeader" field, according
    +	// to gRFC A76. It can be enabled by setting the environment variable
    +	// "GRPC_EXPERIMENTAL_RING_HASH_SET_REQUEST_HASH_KEY" to "true".
    +	RingHashSetRequestHashKey = boolFromEnv("GRPC_EXPERIMENTAL_RING_HASH_SET_REQUEST_HASH_KEY", false)
    +
    +	// ALTSHandshakerKeepaliveParams is set if we should add the
    +	// KeepaliveParams when dial the ALTS handshaker service.
    +	ALTSHandshakerKeepaliveParams = boolFromEnv("GRPC_EXPERIMENTAL_ALTS_HANDSHAKER_KEEPALIVE_PARAMS", false)
    +
    +	// EnableDefaultPortForProxyTarget controls whether the resolver adds a default port 443
    +	// to a target address that lacks one. This flag only has an effect when all of
    +	// the following conditions are met:
    +	//   - A connect proxy is being used.
    +	//   - Target resolution is disabled.
    +	//   - The DNS resolver is being used.
    +	EnableDefaultPortForProxyTarget = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_DEFAULT_PORT_FOR_PROXY_TARGET", true)
    +
    +	// XDSAuthorityRewrite indicates whether xDS authority rewriting is enabled.
    +	// This feature is defined in gRFC A81 and is enabled by setting the
    +	// environment variable GRPC_EXPERIMENTAL_XDS_AUTHORITY_REWRITE to "true".
    +	XDSAuthorityRewrite = boolFromEnv("GRPC_EXPERIMENTAL_XDS_AUTHORITY_REWRITE", false)
     )
     
     func boolFromEnv(envVar string, def bool) bool {
    diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go
    index 2eb97f832..7685d08b5 100644
    --- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go
    +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go
    @@ -63,4 +63,20 @@ var (
     	// For more details, see:
     	// https://github.com/grpc/proposal/blob/master/A82-xds-system-root-certs.md.
     	XDSSystemRootCertsEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_SYSTEM_ROOT_CERTS", false)
    +
    +	// XDSSPIFFEEnabled controls if SPIFFE Bundle Maps can be used as roots of
    +	// trust.  For more details, see:
    +	// https://github.com/grpc/proposal/blob/master/A87-mtls-spiffe-support.md
    +	XDSSPIFFEEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_MTLS_SPIFFE", false)
    +
    +	// XDSHTTPConnectEnabled is true if gRPC should parse custom Metadata
    +	// configuring use of an HTTP CONNECT proxy via xDS from cluster resources.
    +	// For more details, see:
    +	// https://github.com/grpc/proposal/blob/master/A86-xds-http-connect.md
    +	XDSHTTPConnectEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_HTTP_CONNECT", false)
    +
    +	// XDSBootstrapCallCredsEnabled controls if call credentials can be used in
    +	// xDS bootstrap configuration via the `call_creds` field. For more details,
    +	// see: https://github.com/grpc/proposal/blob/master/A97-xds-jwt-call-creds.md
    +	XDSBootstrapCallCredsEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_BOOTSTRAP_CALL_CREDS", false)
     )
    diff --git a/vendor/google.golang.org/grpc/internal/experimental.go b/vendor/google.golang.org/grpc/internal/experimental.go
    index 7617be215..c90cc51bd 100644
    --- a/vendor/google.golang.org/grpc/internal/experimental.go
    +++ b/vendor/google.golang.org/grpc/internal/experimental.go
    @@ -25,4 +25,8 @@ var (
     	// BufferPool is implemented by the grpc package and returns a server
     	// option to configure a shared buffer pool for a grpc.Server.
     	BufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption
    +
    +	// AcceptCompressors is implemented by the grpc package and returns
    +	// a call option that restricts the grpc-accept-encoding header for a call.
    +	AcceptCompressors any // func(...string) grpc.CallOption
     )
    diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
    index 8e8e86128..9b6d8a1fa 100644
    --- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
    +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
    @@ -80,25 +80,11 @@ func (cs *CallbackSerializer) ScheduleOr(f func(ctx context.Context), onFailure
     func (cs *CallbackSerializer) run(ctx context.Context) {
     	defer close(cs.done)
     
    -	// TODO: when Go 1.21 is the oldest supported version, this loop and Close
    -	// can be replaced with:
    -	//
    -	// context.AfterFunc(ctx, cs.callbacks.Close)
    -	for ctx.Err() == nil {
    -		select {
    -		case <-ctx.Done():
    -			// Do nothing here. Next iteration of the for loop will not happen,
    -			// since ctx.Err() would be non-nil.
    -		case cb := <-cs.callbacks.Get():
    -			cs.callbacks.Load()
    -			cb.(func(context.Context))(ctx)
    -		}
    -	}
    -
    -	// Close the buffer to prevent new callbacks from being added.
    -	cs.callbacks.Close()
    +	// Close the buffer when the context is canceled
    +	// to prevent new callbacks from being added.
    +	context.AfterFunc(ctx, cs.callbacks.Close)
     
    -	// Run all pending callbacks.
    +	// Run all callbacks.
     	for cb := range cs.callbacks.Get() {
     		cs.callbacks.Load()
     		cb.(func(context.Context))(ctx)
    diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/event.go b/vendor/google.golang.org/grpc/internal/grpcsync/event.go
    index fbe697c37..d788c2493 100644
    --- a/vendor/google.golang.org/grpc/internal/grpcsync/event.go
    +++ b/vendor/google.golang.org/grpc/internal/grpcsync/event.go
    @@ -21,28 +21,25 @@
     package grpcsync
     
     import (
    -	"sync"
     	"sync/atomic"
     )
     
     // Event represents a one-time event that may occur in the future.
     type Event struct {
    -	fired int32
    +	fired atomic.Bool
     	c     chan struct{}
    -	o     sync.Once
     }
     
     // Fire causes e to complete.  It is safe to call multiple times, and
     // concurrently.  It returns true iff this call to Fire caused the signaling
    -// channel returned by Done to close.
    +// channel returned by Done to close. If Fire returns false, it is possible
    +// the Done channel has not been closed yet.
     func (e *Event) Fire() bool {
    -	ret := false
    -	e.o.Do(func() {
    -		atomic.StoreInt32(&e.fired, 1)
    +	if e.fired.CompareAndSwap(false, true) {
     		close(e.c)
    -		ret = true
    -	})
    -	return ret
    +		return true
    +	}
    +	return false
     }
     
     // Done returns a channel that will be closed when Fire is called.
    @@ -52,7 +49,7 @@ func (e *Event) Done() <-chan struct{} {
     
     // HasFired returns true if Fire has been called.
     func (e *Event) HasFired() bool {
    -	return atomic.LoadInt32(&e.fired) == 1
    +	return e.fired.Load()
     }
     
     // NewEvent returns a new, ready-to-use Event.
    diff --git a/vendor/google.golang.org/grpc/internal/idle/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go
    index 2c13ee9da..d3cd24f80 100644
    --- a/vendor/google.golang.org/grpc/internal/idle/idle.go
    +++ b/vendor/google.golang.org/grpc/internal/idle/idle.go
    @@ -21,7 +21,6 @@
     package idle
     
     import (
    -	"fmt"
     	"math"
     	"sync"
     	"sync/atomic"
    @@ -33,15 +32,15 @@ var timeAfterFunc = func(d time.Duration, f func()) *time.Timer {
     	return time.AfterFunc(d, f)
     }
     
    -// Enforcer is the functionality provided by grpc.ClientConn to enter
    -// and exit from idle mode.
    -type Enforcer interface {
    -	ExitIdleMode() error
    +// ClientConn is the functionality provided by grpc.ClientConn to enter and exit
    +// from idle mode.
    +type ClientConn interface {
    +	ExitIdleMode()
     	EnterIdleMode()
     }
     
    -// Manager implements idleness detection and calls the configured Enforcer to
    -// enter/exit idle mode when appropriate.  Must be created by NewManager.
    +// Manager implements idleness detection and calls the ClientConn to enter/exit
    +// idle mode when appropriate. Must be created by NewManager.
     type Manager struct {
     	// State accessed atomically.
     	lastCallEndTime           int64 // Unix timestamp in nanos; time when the most recent RPC completed.
    @@ -51,8 +50,8 @@ type Manager struct {
     
     	// Can be accessed without atomics or mutex since these are set at creation
     	// time and read-only after that.
    -	enforcer Enforcer // Functionality provided by grpc.ClientConn.
    -	timeout  time.Duration
    +	cc      ClientConn // Functionality provided by grpc.ClientConn.
    +	timeout time.Duration
     
     	// idleMu is used to guarantee mutual exclusion in two scenarios:
     	// - Opposing intentions:
    @@ -72,9 +71,9 @@ type Manager struct {
     
     // NewManager creates a new idleness manager implementation for the
     // given idle timeout.  It begins in idle mode.
    -func NewManager(enforcer Enforcer, timeout time.Duration) *Manager {
    +func NewManager(cc ClientConn, timeout time.Duration) *Manager {
     	return &Manager{
    -		enforcer:         enforcer,
    +		cc:               cc,
     		timeout:          timeout,
     		actuallyIdle:     true,
     		activeCallsCount: -math.MaxInt32,
    @@ -127,7 +126,7 @@ func (m *Manager) handleIdleTimeout() {
     
     	// Now that we've checked that there has been no activity, attempt to enter
     	// idle mode, which is very likely to succeed.
    -	if m.tryEnterIdleMode() {
    +	if m.tryEnterIdleMode(true) {
     		// Successfully entered idle mode. No timer needed until we exit idle.
     		return
     	}
    @@ -142,10 +141,13 @@ func (m *Manager) handleIdleTimeout() {
     // that, it performs a last minute check to ensure that no new RPC has come in,
     // making the channel active.
     //
    +// checkActivity controls if a check for RPC activity, since the last time the
    +// idle_timeout fired, is made.
    +
     // Return value indicates whether or not the channel moved to idle mode.
     //
     // Holds idleMu which ensures mutual exclusion with exitIdleMode.
    -func (m *Manager) tryEnterIdleMode() bool {
    +func (m *Manager) tryEnterIdleMode(checkActivity bool) bool {
     	// Setting the activeCallsCount to -math.MaxInt32 indicates to OnCallBegin()
     	// that the channel is either in idle mode or is trying to get there.
     	if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) {
    @@ -166,7 +168,7 @@ func (m *Manager) tryEnterIdleMode() bool {
     		atomic.AddInt32(&m.activeCallsCount, math.MaxInt32)
     		return false
     	}
    -	if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 {
    +	if checkActivity && atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 {
     		// A very short RPC could have come in (and also finished) after we
     		// checked for calls count and activity in handleIdleTimeout(), but
     		// before the CAS operation. So, we need to check for activity again.
    @@ -177,44 +179,37 @@ func (m *Manager) tryEnterIdleMode() bool {
     	// No new RPCs have come in since we set the active calls count value to
     	// -math.MaxInt32. And since we have the lock, it is safe to enter idle mode
     	// unconditionally now.
    -	m.enforcer.EnterIdleMode()
    +	m.cc.EnterIdleMode()
     	m.actuallyIdle = true
     	return true
     }
     
     // EnterIdleModeForTesting instructs the channel to enter idle mode.
     func (m *Manager) EnterIdleModeForTesting() {
    -	m.tryEnterIdleMode()
    +	m.tryEnterIdleMode(false)
     }
     
     // OnCallBegin is invoked at the start of every RPC.
    -func (m *Manager) OnCallBegin() error {
    +func (m *Manager) OnCallBegin() {
     	if m.isClosed() {
    -		return nil
    +		return
     	}
     
     	if atomic.AddInt32(&m.activeCallsCount, 1) > 0 {
     		// Channel is not idle now. Set the activity bit and allow the call.
     		atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1)
    -		return nil
    +		return
     	}
     
     	// Channel is either in idle mode or is in the process of moving to idle
     	// mode. Attempt to exit idle mode to allow this RPC.
    -	if err := m.ExitIdleMode(); err != nil {
    -		// Undo the increment to calls count, and return an error causing the
    -		// RPC to fail.
    -		atomic.AddInt32(&m.activeCallsCount, -1)
    -		return err
    -	}
    -
    +	m.ExitIdleMode()
     	atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1)
    -	return nil
     }
     
    -// ExitIdleMode instructs m to call the enforcer's ExitIdleMode and update m's
    +// ExitIdleMode instructs m to call the ClientConn's ExitIdleMode and update its
     // internal state.
    -func (m *Manager) ExitIdleMode() error {
    +func (m *Manager) ExitIdleMode() {
     	// Holds idleMu which ensures mutual exclusion with tryEnterIdleMode.
     	m.idleMu.Lock()
     	defer m.idleMu.Unlock()
    @@ -231,12 +226,10 @@ func (m *Manager) ExitIdleMode() error {
     		//   m.ExitIdleMode.
     		//
     		// In any case, there is nothing to do here.
    -		return nil
    +		return
     	}
     
    -	if err := m.enforcer.ExitIdleMode(); err != nil {
    -		return fmt.Errorf("failed to exit idle mode: %w", err)
    -	}
    +	m.cc.ExitIdleMode()
     
     	// Undo the idle entry process. This also respects any new RPC attempts.
     	atomic.AddInt32(&m.activeCallsCount, math.MaxInt32)
    @@ -244,7 +237,23 @@ func (m *Manager) ExitIdleMode() error {
     
     	// Start a new timer to fire after the configured idle timeout.
     	m.resetIdleTimerLocked(m.timeout)
    -	return nil
    +}
    +
    +// UnsafeSetNotIdle instructs the Manager to update its internal state to
    +// reflect the reality that the channel is no longer in IDLE mode.
    +//
    +// N.B. This method is intended only for internal use by the gRPC client
    +// when it exits IDLE mode **manually** from `Dial`. The callsite must ensure:
    +//   - The channel was **actually in IDLE mode** immediately prior to the call.
    +//   - There is **no concurrent activity** that could cause the channel to exit
    +//     IDLE mode *naturally* at the same time.
    +func (m *Manager) UnsafeSetNotIdle() {
    +	m.idleMu.Lock()
    +	defer m.idleMu.Unlock()
    +
    +	atomic.AddInt32(&m.activeCallsCount, math.MaxInt32)
    +	m.actuallyIdle = false
    +	m.resetIdleTimerLocked(m.timeout)
     }
     
     // OnCallEnd is invoked at the end of every RPC.
    diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go
    index 13e1f386b..27bef83d9 100644
    --- a/vendor/google.golang.org/grpc/internal/internal.go
    +++ b/vendor/google.golang.org/grpc/internal/internal.go
    @@ -182,35 +182,6 @@ var (
     	// other features, including the CSDS service.
     	NewXDSResolverWithClientForTesting any // func(xdsclient.XDSClient) (resolver.Builder, error)
     
    -	// RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster
    -	// Specifier Plugin for testing purposes, regardless of the XDSRLS environment
    -	// variable.
    -	//
    -	// TODO: Remove this function once the RLS env var is removed.
    -	RegisterRLSClusterSpecifierPluginForTesting func()
    -
    -	// UnregisterRLSClusterSpecifierPluginForTesting unregisters the RLS Cluster
    -	// Specifier Plugin for testing purposes. This is needed because there is no way
    -	// to unregister the RLS Cluster Specifier Plugin after registering it solely
    -	// for testing purposes using RegisterRLSClusterSpecifierPluginForTesting().
    -	//
    -	// TODO: Remove this function once the RLS env var is removed.
    -	UnregisterRLSClusterSpecifierPluginForTesting func()
    -
    -	// RegisterRBACHTTPFilterForTesting registers the RBAC HTTP Filter for testing
    -	// purposes, regardless of the RBAC environment variable.
    -	//
    -	// TODO: Remove this function once the RBAC env var is removed.
    -	RegisterRBACHTTPFilterForTesting func()
    -
    -	// UnregisterRBACHTTPFilterForTesting unregisters the RBAC HTTP Filter for
    -	// testing purposes. This is needed because there is no way to unregister the
    -	// HTTP Filter after registering it solely for testing purposes using
    -	// RegisterRBACHTTPFilterForTesting().
    -	//
    -	// TODO: Remove this function once the RBAC env var is removed.
    -	UnregisterRBACHTTPFilterForTesting func()
    -
     	// ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY.
     	ORCAAllowAnyMinReportingInterval any // func(so *orca.ServiceOptions)
     
    @@ -259,6 +230,24 @@ var (
     	// SetBufferPoolingThresholdForTesting updates the buffer pooling threshold, for
     	// testing purposes.
     	SetBufferPoolingThresholdForTesting any // func(int)
    +
    +	// TimeAfterFunc is used to create timers. During tests the function is
    +	// replaced to track allocated timers and fail the test if a timer isn't
    +	// cancelled.
    +	TimeAfterFunc = func(d time.Duration, f func()) Timer {
    +		return time.AfterFunc(d, f)
    +	}
    +
    +	// NewStreamWaitingForResolver is a test hook that is triggered when a
    +	// new stream blocks while waiting for name resolution. This can be
    +	// used in tests to synchronize resolver updates and avoid race conditions.
    +	// When set, the function will be called before the stream enters
    +	// the blocking state.
    +	NewStreamWaitingForResolver = func() {}
    +
    +	// AddressToTelemetryLabels is an xDS-provided function to extract telemetry
    +	// labels from a resolver.Address. Callers must assert its type before calling.
    +	AddressToTelemetryLabels any // func(addr resolver.Address) map[string]string
     )
     
     // HealthChecker defines the signature of the client-side LB channel health
    @@ -300,3 +289,9 @@ type EnforceSubConnEmbedding interface {
     type EnforceClientConnEmbedding interface {
     	enforceClientConnEmbedding()
     }
    +
    +// Timer is an interface to allow injecting different time.Timer implementations
    +// during tests.
    +type Timer interface {
    +	Stop() bool
    +}
    diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go
    index 900bfb716..c4055bc00 100644
    --- a/vendor/google.golang.org/grpc/internal/metadata/metadata.go
    +++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go
    @@ -97,13 +97,11 @@ func hasNotPrintable(msg string) bool {
     	return false
     }
     
    -// ValidatePair validate a key-value pair with the following rules (the pseudo-header will be skipped) :
    -//
    -// - key must contain one or more characters.
    -// - the characters in the key must be contained in [0-9 a-z _ - .].
    -// - if the key ends with a "-bin" suffix, no validation of the corresponding value is performed.
    -// - the characters in the every value must be printable (in [%x20-%x7E]).
    -func ValidatePair(key string, vals ...string) error {
    +// ValidateKey validates a key with the following rules (pseudo-headers are
    +// skipped):
    +// - the key must contain one or more characters.
    +// - the characters in the key must be in [0-9 a-z _ - .].
    +func ValidateKey(key string) error {
     	// key should not be empty
     	if key == "" {
     		return fmt.Errorf("there is an empty key in the header")
    @@ -119,6 +117,20 @@ func ValidatePair(key string, vals ...string) error {
     			return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", key)
     		}
     	}
    +	return nil
    +}
    +
    +// ValidatePair validates a key-value pair with the following rules
    +// (pseudo-header are skipped):
    +//   - the key must contain one or more characters.
    +//   - the characters in the key must be in [0-9 a-z _ - .].
    +//   - if the key ends with a "-bin" suffix, no validation of the corresponding
    +//     value is performed.
    +//   - the characters in every value must be printable (in [%x20-%x7E]).
    +func ValidatePair(key string, vals ...string) error {
    +	if err := ValidateKey(key); err != nil {
    +		return err
    +	}
     	if strings.HasSuffix(key, "-bin") {
     		return nil
     	}
    diff --git a/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go b/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go
    index a6c647013..5bfa67b72 100644
    --- a/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go
    +++ b/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go
    @@ -22,12 +22,16 @@ package delegatingresolver
     
     import (
     	"fmt"
    +	"net"
     	"net/http"
     	"net/url"
     	"sync"
     
     	"google.golang.org/grpc/grpclog"
    +	"google.golang.org/grpc/internal/envconfig"
     	"google.golang.org/grpc/internal/proxyattributes"
    +	"google.golang.org/grpc/internal/transport"
    +	"google.golang.org/grpc/internal/transport/networktype"
     	"google.golang.org/grpc/resolver"
     	"google.golang.org/grpc/serviceconfig"
     )
    @@ -38,21 +42,30 @@ var (
     	HTTPSProxyFromEnvironment = http.ProxyFromEnvironment
     )
     
    +const defaultPort = "443"
    +
     // delegatingResolver manages both target URI and proxy address resolution by
     // delegating these tasks to separate child resolvers. Essentially, it acts as
    -// a intermediary between the gRPC ClientConn and the child resolvers.
    +// an intermediary between the gRPC ClientConn and the child resolvers.
     //
     // It implements the [resolver.Resolver] interface.
     type delegatingResolver struct {
    -	target         resolver.Target     // parsed target URI to be resolved
    -	cc             resolver.ClientConn // gRPC ClientConn
    -	targetResolver resolver.Resolver   // resolver for the target URI, based on its scheme
    -	proxyResolver  resolver.Resolver   // resolver for the proxy URI; nil if no proxy is configured
    -	proxyURL       *url.URL            // proxy URL, derived from proxy environment and target
    +	target   resolver.Target     // parsed target URI to be resolved
    +	cc       resolver.ClientConn // gRPC ClientConn
    +	proxyURL *url.URL            // proxy URL, derived from proxy environment and target
     
    +	// We do not hold both mu and childMu in the same goroutine. Avoid holding
    +	// both locks when calling into the child, as the child resolver may
    +	// synchronously callback into the channel.
     	mu                  sync.Mutex         // protects all the fields below
     	targetResolverState *resolver.State    // state of the target resolver
     	proxyAddrs          []resolver.Address // resolved proxy addresses; empty if no proxy is configured
    +
    +	// childMu serializes calls into child resolvers. It also protects access to
    +	// the following fields.
    +	childMu        sync.Mutex
    +	targetResolver resolver.Resolver // resolver for the target URI, based on its scheme
    +	proxyResolver  resolver.Resolver // resolver for the proxy URI; nil if no proxy is configured
     }
     
     // nopResolver is a resolver that does nothing.
    @@ -62,8 +75,8 @@ func (nopResolver) ResolveNow(resolver.ResolveNowOptions) {}
     
     func (nopResolver) Close() {}
     
    -// proxyURLForTarget determines the proxy URL for the given address based on
    -// the environment. It can return the following:
    +// proxyURLForTarget determines the proxy URL for the given address based on the
    +// environment. It can return the following:
     //   - nil URL, nil error: No proxy is configured or the address is excluded
     //     using the `NO_PROXY` environment variable or if req.URL.Host is
     //     "localhost" (with or without // a port number)
    @@ -82,7 +95,8 @@ func proxyURLForTarget(address string) (*url.URL, error) {
     // resolvers:
     //   - one to resolve the proxy address specified using the supported
     //     environment variables. This uses the registered resolver for the "dns"
    -//     scheme.
    +//     scheme. It is lazily built when a target resolver update contains at least
    +//     one TCP address.
     //   - one to resolve the target URI using the resolver specified by the scheme
     //     in the target URI or specified by the user using the WithResolvers dial
     //     option. As a special case, if the target URI's scheme is "dns" and a
    @@ -91,14 +105,24 @@ func proxyURLForTarget(address string) (*url.URL, error) {
     //     resolution is enabled using the dial option.
     func New(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions, targetResolverBuilder resolver.Builder, targetResolutionEnabled bool) (resolver.Resolver, error) {
     	r := &delegatingResolver{
    -		target: target,
    -		cc:     cc,
    +		target:         target,
    +		cc:             cc,
    +		proxyResolver:  nopResolver{},
    +		targetResolver: nopResolver{},
     	}
     
    +	addr := target.Endpoint()
     	var err error
    -	r.proxyURL, err = proxyURLForTarget(target.Endpoint())
    +	if target.URL.Scheme == "dns" && !targetResolutionEnabled && envconfig.EnableDefaultPortForProxyTarget {
    +		addr, err = parseTarget(addr)
    +		if err != nil {
    +			return nil, fmt.Errorf("delegating_resolver: invalid target address %q: %v", target.Endpoint(), err)
    +		}
    +	}
    +
    +	r.proxyURL, err = proxyURLForTarget(addr)
     	if err != nil {
    -		return nil, fmt.Errorf("delegating_resolver: failed to determine proxy URL for target %s: %v", target, err)
    +		return nil, fmt.Errorf("delegating_resolver: failed to determine proxy URL for target %q: %v", target, err)
     	}
     
     	// proxy is not configured or proxy address excluded using `NO_PROXY` env
    @@ -111,41 +135,34 @@ func New(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOpti
     		logger.Infof("Proxy URL detected : %s", r.proxyURL)
     	}
     
    +	// Resolver updates from one child may trigger calls into the other. Block
    +	// updates until the children are initialized.
    +	r.childMu.Lock()
    +	defer r.childMu.Unlock()
     	// When the scheme is 'dns' and target resolution on client is not enabled,
     	// resolution should be handled by the proxy, not the client. Therefore, we
     	// bypass the target resolver and store the unresolved target address.
     	if target.URL.Scheme == "dns" && !targetResolutionEnabled {
    -		state := resolver.State{
    -			Addresses: []resolver.Address{{Addr: target.Endpoint()}},
    -			Endpoints: []resolver.Endpoint{{Addresses: []resolver.Address{{Addr: target.Endpoint()}}}},
    -		}
    -		r.targetResolverState = &state
    -	} else {
    -		wcc := &wrappingClientConn{
    -			stateListener: r.updateTargetResolverState,
    -			parent:        r,
    +		r.targetResolverState = &resolver.State{
    +			Addresses: []resolver.Address{{Addr: addr}},
    +			Endpoints: []resolver.Endpoint{{Addresses: []resolver.Address{{Addr: addr}}}},
     		}
    -		if r.targetResolver, err = targetResolverBuilder.Build(target, wcc, opts); err != nil {
    -			return nil, fmt.Errorf("delegating_resolver: unable to build the resolver for target %s: %v", target, err)
    -		}
    -	}
    -
    -	if r.proxyResolver, err = r.proxyURIResolver(opts); err != nil {
    -		return nil, fmt.Errorf("delegating_resolver: failed to build resolver for proxy URL %q: %v", r.proxyURL, err)
    +		r.updateTargetResolverState(*r.targetResolverState)
    +		return r, nil
     	}
    -
    -	if r.targetResolver == nil {
    -		r.targetResolver = nopResolver{}
    +	wcc := &wrappingClientConn{
    +		stateListener: r.updateTargetResolverState,
    +		parent:        r,
     	}
    -	if r.proxyResolver == nil {
    -		r.proxyResolver = nopResolver{}
    +	if r.targetResolver, err = targetResolverBuilder.Build(target, wcc, opts); err != nil {
    +		return nil, fmt.Errorf("delegating_resolver: unable to build the resolver for target %s: %v", target, err)
     	}
     	return r, nil
     }
     
    -// proxyURIResolver creates a resolver for resolving proxy URIs using the
    -// "dns" scheme. It adjusts the proxyURL to conform to the "dns:///" format and
    -// builds a resolver with a wrappingClientConn to capture resolved addresses.
    +// proxyURIResolver creates a resolver for resolving proxy URIs using the "dns"
    +// scheme. It adjusts the proxyURL to conform to the "dns:///" format and builds
    +// a resolver with a wrappingClientConn to capture resolved addresses.
     func (r *delegatingResolver) proxyURIResolver(opts resolver.BuildOptions) (resolver.Resolver, error) {
     	proxyBuilder := resolver.Get("dns")
     	if proxyBuilder == nil {
    @@ -165,11 +182,15 @@ func (r *delegatingResolver) proxyURIResolver(opts resolver.BuildOptions) (resol
     }
     
     func (r *delegatingResolver) ResolveNow(o resolver.ResolveNowOptions) {
    +	r.childMu.Lock()
    +	defer r.childMu.Unlock()
     	r.targetResolver.ResolveNow(o)
     	r.proxyResolver.ResolveNow(o)
     }
     
     func (r *delegatingResolver) Close() {
    +	r.childMu.Lock()
    +	defer r.childMu.Unlock()
     	r.targetResolver.Close()
     	r.targetResolver = nil
     
    @@ -177,18 +198,96 @@ func (r *delegatingResolver) Close() {
     	r.proxyResolver = nil
     }
     
    -// updateClientConnStateLocked creates a list of combined addresses by
    -// pairing each proxy address with every target address. For each pair, it
    -// generates a new [resolver.Address] using the proxy address, and adding the
    -// target address as the attribute along with user info. It returns nil if
    -// either resolver has not sent update even once and returns the error from
    -// ClientConn update once both resolvers have sent update atleast once.
    +func needsProxyResolver(state *resolver.State) bool {
    +	for _, addr := range state.Addresses {
    +		if !skipProxy(addr) {
    +			return true
    +		}
    +	}
    +	for _, endpoint := range state.Endpoints {
    +		for _, addr := range endpoint.Addresses {
    +			if !skipProxy(addr) {
    +				return true
    +			}
    +		}
    +	}
    +	return false
    +}
    +
    +// parseTarget takes a target string and ensures it is a valid "host:port" target.
    +//
    +// It does the following:
    +//  1. If the target already has a port (e.g., "host:port", "[ipv6]:port"),
    +//     it is returned as is.
    +//  2. If the host part is empty (e.g., ":80"), it defaults to "localhost",
    +//     returning "localhost:80".
    +//  3. If the target is missing a port (e.g., "host", "ipv6"), the defaultPort
    +//     is added.
    +//
    +// An error is returned for empty targets or targets with a trailing colon
    +// but no port (e.g., "host:").
    +func parseTarget(target string) (string, error) {
    +	if target == "" {
    +		return "", fmt.Errorf("missing address")
    +	}
    +
    +	host, port, err := net.SplitHostPort(target)
    +	if err != nil {
    +		// If SplitHostPort fails, it's likely because the port is missing.
    +		// We append the default port and return the result.
    +		return net.JoinHostPort(target, defaultPort), nil
    +	}
    +
    +	// If SplitHostPort succeeds, we check for edge cases.
    +	if port == "" {
    +		// A success with an empty port means the target had a trailing colon,
    +		// e.g., "host:", which is an error.
    +		return "", fmt.Errorf("missing port after port-separator colon")
    +	}
    +	if host == "" {
    +		// A success with an empty host means the target was like ":80".
    +		// We default the host to "localhost".
    +		host = "localhost"
    +	}
    +	return net.JoinHostPort(host, port), nil
    +}
    +
    +func skipProxy(address resolver.Address) bool {
    +	// Avoid proxy when network is not tcp.
    +	networkType, ok := networktype.Get(address)
    +	if !ok {
    +		networkType, _ = transport.ParseDialTarget(address.Addr)
    +	}
    +	if networkType != "tcp" {
    +		return true
    +	}
    +
    +	req := &http.Request{URL: &url.URL{
    +		Scheme: "https",
    +		Host:   address.Addr,
    +	}}
    +	// Avoid proxy when address included in `NO_PROXY` environment variable or
    +	// fails to get the proxy address.
    +	url, err := HTTPSProxyFromEnvironment(req)
    +	if err != nil || url == nil {
    +		return true
    +	}
    +	return false
    +}
    +
    +// updateClientConnStateLocked constructs a combined list of addresses by
    +// pairing each proxy address with every target address of type TCP. For each
    +// pair, it creates a new [resolver.Address] using the proxy address and
    +// attaches the corresponding target address and user info as attributes. Target
    +// addresses that are not of type TCP are appended to the list as-is. The
    +// function returns nil if either resolver has not yet provided an update, and
    +// returns the result of ClientConn.UpdateState once both resolvers have
    +// provided at least one update.
     func (r *delegatingResolver) updateClientConnStateLocked() error {
     	if r.targetResolverState == nil || r.proxyAddrs == nil {
     		return nil
     	}
     
    -	curState := *r.targetResolverState
     	// If multiple resolved proxy addresses are present, we send only the
     	// unresolved proxy host and let net.Dial handle the proxy host name
     	// resolution when creating the transport. Sending all resolved addresses
    @@ -206,24 +305,29 @@ func (r *delegatingResolver) updateClientConnStateLocked() error {
     	}
     	var addresses []resolver.Address
     	for _, targetAddr := range (*r.targetResolverState).Addresses {
    +		if skipProxy(targetAddr) {
    +			addresses = append(addresses, targetAddr)
    +			continue
    +		}
     		addresses = append(addresses, proxyattributes.Set(proxyAddr, proxyattributes.Options{
     			User:        r.proxyURL.User,
     			ConnectAddr: targetAddr.Addr,
     		}))
     	}
     
    -	// Create a list of combined endpoints by pairing all proxy endpoints
    -	// with every target endpoint. Each time, it constructs a new
    -	// [resolver.Endpoint] using the all addresses from all the proxy endpoint
    -	// and the target addresses from one endpoint. The target address and user
    -	// information from the proxy URL are added as attributes to the proxy
    -	// address.The resulting list of addresses is then grouped into endpoints,
    -	// covering all combinations of proxy and target endpoints.
    +	// For each target endpoint, construct a new [resolver.Endpoint] that
    +	// includes all addresses from all proxy endpoints and the addresses from
    +	// that target endpoint, preserving the number of target endpoints.
     	var endpoints []resolver.Endpoint
     	for _, endpt := range (*r.targetResolverState).Endpoints {
     		var addrs []resolver.Address
    -		for _, proxyAddr := range r.proxyAddrs {
    -			for _, targetAddr := range endpt.Addresses {
    +		for _, targetAddr := range endpt.Addresses {
    +			// Avoid proxy when network is not tcp.
    +			if skipProxy(targetAddr) {
    +				addrs = append(addrs, targetAddr)
    +				continue
    +			}
    +			for _, proxyAddr := range r.proxyAddrs {
     				addrs = append(addrs, proxyattributes.Set(proxyAddr, proxyattributes.Options{
     					User:        r.proxyURL.User,
     					ConnectAddr: targetAddr.Addr,
    @@ -234,8 +338,9 @@ func (r *delegatingResolver) updateClientConnStateLocked() error {
     	}
     	// Use the targetResolverState for its service config and attributes
     	// contents. The state update is only sent after both the target and proxy
    -	// resolvers have sent their updates, and curState has been updated with
    -	// the combined addresses.
    +	// resolvers have sent their updates, and curState has been updated with the
    +	// combined addresses.
    +	curState := *r.targetResolverState
     	curState.Addresses = addresses
     	curState.Endpoints = endpoints
     	return r.cc.UpdateState(curState)
    @@ -245,7 +350,8 @@ func (r *delegatingResolver) updateClientConnStateLocked() error {
     // addresses and endpoints, marking the resolver as ready, and triggering a
     // state update if both proxy and target resolvers are ready. If the ClientConn
     // returns a non-nil error, it calls `ResolveNow()` on the target resolver.  It
    -// is a StateListener function of wrappingClientConn passed to the proxy resolver.
    +// is a StateListener function of wrappingClientConn passed to the proxy
    +// resolver.
     func (r *delegatingResolver) updateProxyResolverState(state resolver.State) error {
     	r.mu.Lock()
     	defer r.mu.Unlock()
    @@ -253,8 +359,8 @@ func (r *delegatingResolver) updateProxyResolverState(state resolver.State) erro
     		logger.Infof("Addresses received from proxy resolver: %s", state.Addresses)
     	}
     	if len(state.Endpoints) > 0 {
    -		// We expect exactly one address per endpoint because the proxy
    -		// resolver uses "dns" resolution.
    +		// We expect exactly one address per endpoint because the proxy resolver
    +		// uses "dns" resolution.
     		r.proxyAddrs = make([]resolver.Address, 0, len(state.Endpoints))
     		for _, endpoint := range state.Endpoints {
     			r.proxyAddrs = append(r.proxyAddrs, endpoint.Addresses...)
    @@ -267,20 +373,29 @@ func (r *delegatingResolver) updateProxyResolverState(state resolver.State) erro
     	err := r.updateClientConnStateLocked()
     	// Another possible approach was to block until updates are received from
     	// both resolvers. But this is not used because calling `New()` triggers
    -	// `Build()`  for the first resolver, which calls `UpdateState()`. And the
    +	// `Build()` for the first resolver, which calls `UpdateState()`. And the
     	// second resolver hasn't sent an update yet, so it would cause `New()` to
     	// block indefinitely.
     	if err != nil {
    -		r.targetResolver.ResolveNow(resolver.ResolveNowOptions{})
    +		go func() {
    +			r.childMu.Lock()
    +			defer r.childMu.Unlock()
    +			if r.targetResolver != nil {
    +				r.targetResolver.ResolveNow(resolver.ResolveNowOptions{})
    +			}
    +		}()
     	}
     	return err
     }
     
    -// updateTargetResolverState updates the target resolver state by storing target
    -// addresses, endpoints, and service config, marking the resolver as ready, and
    -// triggering a state update if both resolvers are ready. If the ClientConn
    -// returns a non-nil error, it calls `ResolveNow()` on the proxy resolver. It
    -// is a StateListener function of wrappingClientConn passed to the target resolver.
    +// updateTargetResolverState is the StateListener function provided to the
    +// target resolver via wrappingClientConn. It updates the resolver state and
    +// marks the target resolver as ready. If the update includes at least one TCP
    +// address and the proxy resolver has not yet been constructed, it initializes
    +// the proxy resolver. A combined state update is triggered once both resolvers
    +// are ready. If all addresses are non-TCP, it proceeds without waiting for the
    +// proxy resolver. If ClientConn.UpdateState returns a non-nil error,
    +// ResolveNow() is called on the proxy resolver.
     func (r *delegatingResolver) updateTargetResolverState(state resolver.State) error {
     	r.mu.Lock()
     	defer r.mu.Unlock()
    @@ -289,9 +404,41 @@ func (r *delegatingResolver) updateTargetResolverState(state resolver.State) err
     		logger.Infof("Addresses received from target resolver: %v", state.Addresses)
     	}
     	r.targetResolverState = &state
    +	// If all addresses returned by the target resolver have a non-TCP network
    +	// type, or are listed in the `NO_PROXY` environment variable, do not wait
    +	// for proxy update.
    +	if !needsProxyResolver(r.targetResolverState) {
    +		return r.cc.UpdateState(*r.targetResolverState)
    +	}
    +
    +	// The proxy resolver may be rebuilt multiple times, specifically each time
    +	// the target resolver sends an update, even if the target resolver is built
    +	// successfully but building the proxy resolver fails.
    +	if len(r.proxyAddrs) == 0 {
    +		go func() {
    +			r.childMu.Lock()
    +			defer r.childMu.Unlock()
    +			if _, ok := r.proxyResolver.(nopResolver); !ok {
    +				return
    +			}
    +			proxyResolver, err := r.proxyURIResolver(resolver.BuildOptions{})
    +			if err != nil {
    +				r.cc.ReportError(fmt.Errorf("delegating_resolver: unable to build the proxy resolver: %v", err))
    +				return
    +			}
    +			r.proxyResolver = proxyResolver
    +		}()
    +	}
    +
     	err := r.updateClientConnStateLocked()
     	if err != nil {
    -		r.proxyResolver.ResolveNow(resolver.ResolveNowOptions{})
    +		go func() {
    +			r.childMu.Lock()
    +			defer r.childMu.Unlock()
    +			if r.proxyResolver != nil {
    +				r.proxyResolver.ResolveNow(resolver.ResolveNowOptions{})
    +			}
    +		}()
     	}
     	return nil
     }
    @@ -311,7 +458,8 @@ func (wcc *wrappingClientConn) UpdateState(state resolver.State) error {
     	return wcc.stateListener(state)
     }
     
    -// ReportError intercepts errors from the child resolvers and passes them to ClientConn.
    +// ReportError intercepts errors from the child resolvers and passes them to
    +// ClientConn.
     func (wcc *wrappingClientConn) ReportError(err error) {
     	wcc.parent.cc.ReportError(err)
     }
    @@ -322,8 +470,8 @@ func (wcc *wrappingClientConn) NewAddress(addrs []resolver.Address) {
     	wcc.UpdateState(resolver.State{Addresses: addrs})
     }
     
    -// ParseServiceConfig parses the provided service config and returns an
    -// object that provides the parsed config.
    +// ParseServiceConfig parses the provided service config and returns an object
    +// that provides the parsed config.
     func (wcc *wrappingClientConn) ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult {
     	return wcc.parent.cc.ParseServiceConfig(serviceConfigJSON)
     }
    diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
    index ba5c5a95d..ada5251cf 100644
    --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
    +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
    @@ -132,13 +132,13 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts
     	// DNS address (non-IP).
     	ctx, cancel := context.WithCancel(context.Background())
     	d := &dnsResolver{
    -		host:                 host,
    -		port:                 port,
    -		ctx:                  ctx,
    -		cancel:               cancel,
    -		cc:                   cc,
    -		rn:                   make(chan struct{}, 1),
    -		disableServiceConfig: opts.DisableServiceConfig,
    +		host:                host,
    +		port:                port,
    +		ctx:                 ctx,
    +		cancel:              cancel,
    +		cc:                  cc,
    +		rn:                  make(chan struct{}, 1),
    +		enableServiceConfig: envconfig.EnableTXTServiceConfig && !opts.DisableServiceConfig,
     	}
     
     	d.resolver, err = internal.NewNetResolver(target.URL.Host)
    @@ -181,8 +181,8 @@ type dnsResolver struct {
     	// finishes, race detector sometimes will warn lookup (READ the lookup
     	// function pointers) inside watcher() goroutine has data race with
     	// replaceNetFunc (WRITE the lookup function pointers).
    -	wg                   sync.WaitGroup
    -	disableServiceConfig bool
    +	wg                  sync.WaitGroup
    +	enableServiceConfig bool
     }
     
     // ResolveNow invoke an immediate resolution of the target that this
    @@ -346,7 +346,7 @@ func (d *dnsResolver) lookup() (*resolver.State, error) {
     	if len(srv) > 0 {
     		state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv})
     	}
    -	if !d.disableServiceConfig {
    +	if d.enableServiceConfig {
     		state.ServiceConfig = d.lookupTXT(ctx)
     	}
     	return &state, nil
    diff --git a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go
    index 79044657b..d5f7e4d62 100644
    --- a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go
    +++ b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go
    @@ -64,6 +64,16 @@ func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle,
     	}
     }
     
    +// RecordInt64UpDownCount records the measurement alongside labels on the int
    +// count associated with the provided handle.
    +func (l *MetricsRecorderList) RecordInt64UpDownCount(handle *estats.Int64UpDownCountHandle, incr int64, labels ...string) {
    +	verifyLabels(handle.Descriptor(), labels...)
    +
    +	for _, metricRecorder := range l.metricsRecorders {
    +		metricRecorder.RecordInt64UpDownCount(handle, incr, labels...)
    +	}
    +}
    +
     // RecordFloat64Count records the measurement alongside labels on the float
     // count associated with the provided handle.
     func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) {
    diff --git a/vendor/google.golang.org/grpc/internal/stats/stats.go b/vendor/google.golang.org/grpc/internal/stats/stats.go
    new file mode 100644
    index 000000000..49019b80d
    --- /dev/null
    +++ b/vendor/google.golang.org/grpc/internal/stats/stats.go
    @@ -0,0 +1,70 @@
    +/*
    + *
    + * Copyright 2025 gRPC authors.
    + *
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + *
    + */
    +
    +package stats
    +
    +import (
    +	"context"
    +
    +	"google.golang.org/grpc/stats"
    +)
    +
    +type combinedHandler struct {
    +	handlers []stats.Handler
    +}
    +
    +// NewCombinedHandler combines multiple stats.Handlers into a single handler.
    +//
    +// It returns nil if no handlers are provided. If only one handler is
    +// provided, it is returned directly without wrapping.
    +func NewCombinedHandler(handlers ...stats.Handler) stats.Handler {
    +	switch len(handlers) {
    +	case 0:
    +		return nil
    +	case 1:
    +		return handlers[0]
    +	default:
    +		return &combinedHandler{handlers: handlers}
    +	}
    +}
    +
    +func (ch *combinedHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context {
    +	for _, h := range ch.handlers {
    +		ctx = h.TagRPC(ctx, info)
    +	}
    +	return ctx
    +}
    +
    +func (ch *combinedHandler) HandleRPC(ctx context.Context, stats stats.RPCStats) {
    +	for _, h := range ch.handlers {
    +		h.HandleRPC(ctx, stats)
    +	}
    +}
    +
    +func (ch *combinedHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context {
    +	for _, h := range ch.handlers {
    +		ctx = h.TagConn(ctx, info)
    +	}
    +	return ctx
    +}
    +
    +func (ch *combinedHandler) HandleConn(ctx context.Context, stats stats.ConnStats) {
    +	for _, h := range ch.handlers {
    +		h.HandleConn(ctx, stats)
    +	}
    +}
    diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go
    index 1186f1e9a..aad171cd0 100644
    --- a/vendor/google.golang.org/grpc/internal/status/status.go
    +++ b/vendor/google.golang.org/grpc/internal/status/status.go
    @@ -236,3 +236,11 @@ func IsRestrictedControlPlaneCode(s *Status) bool {
     	}
     	return false
     }
    +
    +// RawStatusProto returns the internal protobuf message for use by gRPC itself.
    +func RawStatusProto(s *Status) *spb.Status {
    +	if s == nil {
    +		return nil
    +	}
    +	return s.s
    +}
    diff --git a/vendor/google.golang.org/grpc/internal/transport/client_stream.go b/vendor/google.golang.org/grpc/internal/transport/client_stream.go
    index 8ed347c54..980452519 100644
    --- a/vendor/google.golang.org/grpc/internal/transport/client_stream.go
    +++ b/vendor/google.golang.org/grpc/internal/transport/client_stream.go
    @@ -29,25 +29,27 @@ import (
     
     // ClientStream implements streaming functionality for a gRPC client.
     type ClientStream struct {
    -	*Stream // Embed for common stream functionality.
    +	Stream // Embed for common stream functionality.
     
     	ct       *http2Client
     	done     chan struct{} // closed at the end of stream to unblock writers.
     	doneFunc func()        // invoked at the end of stream.
     
    -	headerChan       chan struct{} // closed to indicate the end of header metadata.
    -	headerChanClosed uint32        // set when headerChan is closed. Used to avoid closing headerChan multiple times.
    +	headerChan chan struct{} // closed to indicate the end of header metadata.
    +	header     metadata.MD   // the received header metadata
    +
    +	status *status.Status // the status error received from the server
    +
    +	// Non-pointer fields are at the end to optimize GC allocations.
    +
     	// headerValid indicates whether a valid header was received.  Only
     	// meaningful after headerChan is closed (always call waitOnHeader() before
     	// reading its value).
    -	headerValid bool
    -	header      metadata.MD // the received header metadata
    -	noHeaders   bool        // set if the client never received headers (set only after the stream is done).
    -
    -	bytesReceived atomic.Bool // indicates whether any bytes have been received on this stream
    -	unprocessed   atomic.Bool // set if the server sends a refused stream or GOAWAY including this stream
    -
    -	status *status.Status // the status error received from the server
    +	headerValid      bool
    +	noHeaders        bool        // set if the client never received headers (set only after the stream is done).
    +	headerChanClosed uint32      // set when headerChan is closed. Used to avoid closing headerChan multiple times.
    +	bytesReceived    atomic.Bool // indicates whether any bytes have been received on this stream
    +	unprocessed      atomic.Bool // set if the server sends a refused stream or GOAWAY including this stream
     }
     
     // Read reads an n byte message from the input stream.
    @@ -59,7 +61,7 @@ func (s *ClientStream) Read(n int) (mem.BufferSlice, error) {
     	return b, err
     }
     
    -// Close closes the stream and popagates err to any readers.
    +// Close closes the stream and propagates err to any readers.
     func (s *ClientStream) Close(err error) {
     	var (
     		rst     bool
    @@ -142,3 +144,11 @@ func (s *ClientStream) TrailersOnly() bool {
     func (s *ClientStream) Status() *status.Status {
     	return s.status
     }
    +
    +func (s *ClientStream) requestRead(n int) {
    +	s.ct.adjustWindow(s, uint32(n))
    +}
    +
    +func (s *ClientStream) updateWindow(n int) {
    +	s.ct.updateWindow(s, uint32(n))
    +}
    diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
    index ef72fbb3a..2dcd1e63b 100644
    --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
    +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
    @@ -40,6 +40,13 @@ var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) {
     	e.SetMaxDynamicTableSizeLimit(v)
     }
     
    +// itemNodePool is used to reduce heap allocations.
    +var itemNodePool = sync.Pool{
    +	New: func() any {
    +		return &itemNode{}
    +	},
    +}
    +
     type itemNode struct {
     	it   any
     	next *itemNode
    @@ -51,7 +58,9 @@ type itemList struct {
     }
     
     func (il *itemList) enqueue(i any) {
    -	n := &itemNode{it: i}
    +	n := itemNodePool.Get().(*itemNode)
    +	n.next = nil
    +	n.it = i
     	if il.tail == nil {
     		il.head, il.tail = n, n
     		return
    @@ -71,7 +80,9 @@ func (il *itemList) dequeue() any {
     		return nil
     	}
     	i := il.head.it
    +	temp := il.head
     	il.head = il.head.next
    +	itemNodePool.Put(temp)
     	if il.head == nil {
     		il.tail = nil
     	}
    @@ -146,10 +157,11 @@ type earlyAbortStream struct {
     func (*earlyAbortStream) isTransportResponseFrame() bool { return false }
     
     type dataFrame struct {
    -	streamID  uint32
    -	endStream bool
    -	h         []byte
    -	reader    mem.Reader
    +	streamID   uint32
    +	endStream  bool
    +	h          []byte
    +	data       mem.BufferSlice
    +	processing bool
     	// onEachWrite is called every time
     	// a part of data is written out.
     	onEachWrite func()
    @@ -234,6 +246,7 @@ type outStream struct {
     	itl              *itemList
     	bytesOutStanding int
     	wq               *writeQuota
    +	reader           mem.Reader
     
     	next *outStream
     	prev *outStream
    @@ -461,7 +474,9 @@ func (c *controlBuffer) finish() {
     				v.onOrphaned(ErrConnClosing)
     			}
     		case *dataFrame:
    -			_ = v.reader.Close()
    +			if !v.processing {
    +				v.data.Free()
    +			}
     		}
     	}
     
    @@ -481,6 +496,16 @@ const (
     	serverSide
     )
     
    +// maxWriteBufSize is the maximum length (number of elements) the cached
    +// writeBuf can grow to. The length depends on the number of buffers
    +// contained within the BufferSlice produced by the codec, which is
    +// generally small.
    +//
    +// If a writeBuf larger than this limit is required, it will be allocated
    +// and freed after use, rather than being cached. This avoids holding
    +// on to large amounts of memory.
    +const maxWriteBufSize = 64
    +
     // Loopy receives frames from the control buffer.
     // Each frame is handled individually; most of the work done by loopy goes
     // into handling data frames. Loopy maintains a queue of active streams, and each
    @@ -515,6 +540,8 @@ type loopyWriter struct {
     
     	// Side-specific handlers
     	ssGoAwayHandler func(*goAway) (bool, error)
    +
    +	writeBuf [][]byte // cached slice to avoid heap allocations for calls to mem.Reader.Peek.
     }
     
     func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error), bufferPool mem.BufferPool) *loopyWriter {
    @@ -790,10 +817,13 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
     		// a RST_STREAM before stream initialization thus the stream might
     		// not be established yet.
     		delete(l.estdStreams, c.streamID)
    +		str.reader.Close()
     		str.deleteSelf()
     		for head := str.itl.dequeueAll(); head != nil; head = head.next {
     			if df, ok := head.it.(*dataFrame); ok {
    -				_ = df.reader.Close()
    +				if !df.processing {
    +					df.data.Free()
    +				}
     			}
     		}
     	}
    @@ -928,7 +958,13 @@ func (l *loopyWriter) processData() (bool, error) {
     	if str == nil {
     		return true, nil
     	}
    +	reader := &str.reader
     	dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream.
    +	if !dataItem.processing {
    +		dataItem.processing = true
    +		reader.Reset(dataItem.data)
    +		dataItem.data.Free()
    +	}
     	// A data item is represented by a dataFrame, since it later translates into
     	// multiple HTTP2 data frames.
     	// Every dataFrame has two buffers; h that keeps grpc-message header and data
    @@ -936,13 +972,13 @@ func (l *loopyWriter) processData() (bool, error) {
     	// from data is copied to h to make as big as the maximum possible HTTP2 frame
     	// size.
     
    -	if len(dataItem.h) == 0 && dataItem.reader.Remaining() == 0 { // Empty data frame
    +	if len(dataItem.h) == 0 && reader.Remaining() == 0 { // Empty data frame
     		// Client sends out empty data frame with endStream = true
    -		if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil {
    +		if err := l.framer.writeData(dataItem.streamID, dataItem.endStream, nil); err != nil {
     			return false, err
     		}
     		str.itl.dequeue() // remove the empty data item from stream
    -		_ = dataItem.reader.Close()
    +		reader.Close()
     		if str.itl.isEmpty() {
     			str.state = empty
     		} else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers.
    @@ -971,29 +1007,24 @@ func (l *loopyWriter) processData() (bool, error) {
     	}
     	// Compute how much of the header and data we can send within quota and max frame length
     	hSize := min(maxSize, len(dataItem.h))
    -	dSize := min(maxSize-hSize, dataItem.reader.Remaining())
    -	remainingBytes := len(dataItem.h) + dataItem.reader.Remaining() - hSize - dSize
    +	dSize := min(maxSize-hSize, reader.Remaining())
    +	remainingBytes := len(dataItem.h) + reader.Remaining() - hSize - dSize
     	size := hSize + dSize
     
    -	var buf *[]byte
    -
    -	if hSize != 0 && dSize == 0 {
    -		buf = &dataItem.h
    -	} else {
    -		// Note: this is only necessary because the http2.Framer does not support
    -		// partially writing a frame, so the sequence must be materialized into a buffer.
    -		// TODO: Revisit once https://github.com/golang/go/issues/66655 is addressed.
    -		pool := l.bufferPool
    -		if pool == nil {
    -			// Note that this is only supposed to be nil in tests. Otherwise, stream is
    -			// always initialized with a BufferPool.
    -			pool = mem.DefaultBufferPool()
    +	l.writeBuf = l.writeBuf[:0]
    +	if hSize > 0 {
    +		l.writeBuf = append(l.writeBuf, dataItem.h[:hSize])
    +	}
    +	if dSize > 0 {
    +		var err error
    +		l.writeBuf, err = reader.Peek(dSize, l.writeBuf)
    +		if err != nil {
    +			// This must never happen since the reader must have at least dSize
    +			// bytes.
    +			// Log an error to fail tests.
    +			l.logger.Errorf("unexpected error while reading Data frame payload: %v", err)
    +			return false, err
     		}
    -		buf = pool.Get(size)
    -		defer pool.Put(buf)
    -
    -		copy((*buf)[:hSize], dataItem.h)
    -		_, _ = dataItem.reader.Read((*buf)[hSize:])
     	}
     
     	// Now that outgoing flow controls are checked we can replenish str's write quota
    @@ -1006,7 +1037,14 @@ func (l *loopyWriter) processData() (bool, error) {
     	if dataItem.onEachWrite != nil {
     		dataItem.onEachWrite()
     	}
    -	if err := l.framer.fr.WriteData(dataItem.streamID, endStream, (*buf)[:size]); err != nil {
    +	err := l.framer.writeData(dataItem.streamID, endStream, l.writeBuf)
    +	reader.Discard(dSize)
    +	if cap(l.writeBuf) > maxWriteBufSize {
    +		l.writeBuf = nil
    +	} else {
    +		clear(l.writeBuf)
    +	}
    +	if err != nil {
     		return false, err
     	}
     	str.bytesOutStanding += size
    @@ -1014,7 +1052,7 @@ func (l *loopyWriter) processData() (bool, error) {
     	dataItem.h = dataItem.h[hSize:]
     
     	if remainingBytes == 0 { // All the data from that message was written out.
    -		_ = dataItem.reader.Close()
    +		reader.Close()
     		str.itl.dequeue()
     	}
     	if str.itl.isEmpty() {
    diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
    index dfc0f224e..7cfbc9637 100644
    --- a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
    +++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
    @@ -28,7 +28,7 @@ import (
     // writeQuota is a soft limit on the amount of data a stream can
     // schedule before some of it is written out.
     type writeQuota struct {
    -	quota int32
    +	_ noCopy
     	// get waits on read from when quota goes less than or equal to zero.
     	// replenish writes on it when quota goes positive again.
     	ch chan struct{}
    @@ -38,16 +38,17 @@ type writeQuota struct {
     	// It is implemented as a field so that it can be updated
     	// by tests.
     	replenish func(n int)
    +	quota     int32
     }
     
    -func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota {
    -	w := &writeQuota{
    -		quota: sz,
    -		ch:    make(chan struct{}, 1),
    -		done:  done,
    -	}
    +// init allows a writeQuota to be initialized in-place, which is useful for
    +// resetting a buffer or for avoiding a heap allocation when the buffer is
    +// embedded in another struct.
    +func (w *writeQuota) init(sz int32, done <-chan struct{}) {
    +	w.quota = sz
    +	w.ch = make(chan struct{}, 1)
    +	w.done = done
     	w.replenish = w.realReplenish
    -	return w
     }
     
     func (w *writeQuota) get(sz int32) error {
    @@ -67,9 +68,9 @@ func (w *writeQuota) get(sz int32) error {
     
     func (w *writeQuota) realReplenish(n int) {
     	sz := int32(n)
    -	a := atomic.AddInt32(&w.quota, sz)
    -	b := a - sz
    -	if b <= 0 && a > 0 {
    +	newQuota := atomic.AddInt32(&w.quota, sz)
    +	previousQuota := newQuota - sz
    +	if previousQuota <= 0 && newQuota > 0 {
     		select {
     		case w.ch <- struct{}{}:
     		default:
    diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
    index 3dea23573..7ab3422b8 100644
    --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go
    +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
    @@ -50,7 +50,7 @@ import (
     // NewServerHandlerTransport returns a ServerTransport handling gRPC from
     // inside an http.Handler, or writes an HTTP error to w and returns an error.
     // It requires that the http Server supports HTTP/2.
    -func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) {
    +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) {
     	if r.Method != http.MethodPost {
     		w.Header().Set("Allow", http.MethodPost)
     		msg := fmt.Sprintf("invalid gRPC request method %q", r.Method)
    @@ -170,7 +170,7 @@ type serverHandlerTransport struct {
     	// TODO make sure this is consistent across handler_server and http2_server
     	contentSubtype string
     
    -	stats  []stats.Handler
    +	stats  stats.Handler
     	logger *grpclog.PrefixLogger
     
     	bufferPool mem.BufferPool
    @@ -274,14 +274,14 @@ func (ht *serverHandlerTransport) writeStatus(s *ServerStream, st *status.Status
     		}
     	})
     
    -	if err == nil { // transport has not been closed
    +	if err == nil && ht.stats != nil { // transport has not been closed
     		// Note: The trailer fields are compressed with hpack after this call returns.
     		// No WireLength field is set here.
    -		for _, sh := range ht.stats {
    -			sh.HandleRPC(s.Context(), &stats.OutTrailer{
    -				Trailer: s.trailer.Copy(),
    -			})
    -		}
    +		s.hdrMu.Lock()
    +		ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{
    +			Trailer: s.trailer.Copy(),
    +		})
    +		s.hdrMu.Unlock()
     	}
     	ht.Close(errors.New("finished writing status"))
     	return err
    @@ -372,19 +372,23 @@ func (ht *serverHandlerTransport) writeHeader(s *ServerStream, md metadata.MD) e
     		ht.rw.(http.Flusher).Flush()
     	})
     
    -	if err == nil {
    -		for _, sh := range ht.stats {
    -			// Note: The header fields are compressed with hpack after this call returns.
    -			// No WireLength field is set here.
    -			sh.HandleRPC(s.Context(), &stats.OutHeader{
    -				Header:      md.Copy(),
    -				Compression: s.sendCompress,
    -			})
    -		}
    +	if err == nil && ht.stats != nil {
    +		// Note: The header fields are compressed with hpack after this call returns.
    +		// No WireLength field is set here.
    +		ht.stats.HandleRPC(s.Context(), &stats.OutHeader{
    +			Header:      md.Copy(),
    +			Compression: s.sendCompress,
    +		})
     	}
     	return err
     }
     
    +func (ht *serverHandlerTransport) adjustWindow(*ServerStream, uint32) {
    +}
    +
    +func (ht *serverHandlerTransport) updateWindow(*ServerStream, uint32) {
    +}
    +
     func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*ServerStream)) {
     	// With this transport type there will be exactly 1 stream: this HTTP request.
     	var cancel context.CancelFunc
    @@ -409,11 +413,9 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream
     	ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
     	req := ht.req
     	s := &ServerStream{
    -		Stream: &Stream{
    +		Stream: Stream{
     			id:             0, // irrelevant
     			ctx:            ctx,
    -			requestRead:    func(int) {},
    -			buf:            newRecvBuffer(),
     			method:         req.URL.Path,
     			recvCompress:   req.Header.Get("grpc-encoding"),
     			contentSubtype: ht.contentSubtype,
    @@ -422,9 +424,11 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream
     		st:               ht,
     		headerWireLength: 0, // won't have access to header wire length until golang/go#18997.
     	}
    -	s.trReader = &transportReader{
    -		reader:        &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf},
    -		windowHandler: func(int) {},
    +	s.Stream.buf.init()
    +	s.readRequester = s
    +	s.trReader = transportReader{
    +		reader:        recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: &s.buf},
    +		windowHandler: s,
     	}
     
     	// readerDone is closed when the Body.Read-ing goroutine exits.
    diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
    index 513dbb93d..38ca031af 100644
    --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go
    +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
    @@ -44,6 +44,7 @@ import (
     	"google.golang.org/grpc/internal/grpcutil"
     	imetadata "google.golang.org/grpc/internal/metadata"
     	"google.golang.org/grpc/internal/proxyattributes"
    +	istats "google.golang.org/grpc/internal/stats"
     	istatus "google.golang.org/grpc/internal/status"
     	isyscall "google.golang.org/grpc/internal/syscall"
     	"google.golang.org/grpc/internal/transport/networktype"
    @@ -105,7 +106,7 @@ type http2Client struct {
     	kp               keepalive.ClientParameters
     	keepaliveEnabled bool
     
    -	statsHandlers []stats.Handler
    +	statsHandler stats.Handler
     
     	initialWindowSize int32
     
    @@ -176,7 +177,7 @@ func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error
     		return fn(ctx, address)
     	}
     	if !ok {
    -		networkType, address = parseDialTarget(address)
    +		networkType, address = ParseDialTarget(address)
     	}
     	if opts, present := proxyattributes.Get(addr); present {
     		return proxyDial(ctx, addr, grpcUA, opts)
    @@ -309,11 +310,9 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
     			scheme = "https"
     		}
     	}
    -	dynamicWindow := true
     	icwz := int32(initialWindowSize)
     	if opts.InitialConnWindowSize >= defaultWindowSize {
     		icwz = opts.InitialConnWindowSize
    -		dynamicWindow = false
     	}
     	writeBufSize := opts.WriteBufferSize
     	readBufSize := opts.ReadBufferSize
    @@ -337,14 +336,14 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
     		writerDone:            make(chan struct{}),
     		goAway:                make(chan struct{}),
     		keepaliveDone:         make(chan struct{}),
    -		framer:                newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize),
    +		framer:                newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize, opts.BufferPool),
     		fc:                    &trInFlow{limit: uint32(icwz)},
     		scheme:                scheme,
     		activeStreams:         make(map[uint32]*ClientStream),
     		isSecure:              isSecure,
     		perRPCCreds:           perRPCCreds,
     		kp:                    kp,
    -		statsHandlers:         opts.StatsHandlers,
    +		statsHandler:          istats.NewCombinedHandler(opts.StatsHandlers...),
     		initialWindowSize:     initialWindowSize,
     		nextID:                1,
     		maxConcurrentStreams:  defaultMaxStreamsClient,
    @@ -371,7 +370,7 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
     		})
     	t.logger = prefixLoggerForClientTransport(t)
     	// Add peer information to the http2client context.
    -	t.ctx = peer.NewContext(t.ctx, t.getPeer())
    +	t.ctx = peer.NewContext(t.ctx, t.Peer())
     
     	if md, ok := addr.Metadata.(*metadata.MD); ok {
     		t.md = *md
    @@ -381,23 +380,21 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
     	t.controlBuf = newControlBuffer(t.ctxDone)
     	if opts.InitialWindowSize >= defaultWindowSize {
     		t.initialWindowSize = opts.InitialWindowSize
    -		dynamicWindow = false
     	}
    -	if dynamicWindow {
    +	if !opts.StaticWindowSize {
     		t.bdpEst = &bdpEstimator{
     			bdp:               initialWindowSize,
     			updateFlowControl: t.updateFlowControl,
     		}
     	}
    -	for _, sh := range t.statsHandlers {
    -		t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{
    +	if t.statsHandler != nil {
    +		t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{
     			RemoteAddr: t.remoteAddr,
     			LocalAddr:  t.localAddr,
     		})
    -		connBegin := &stats.ConnBegin{
    +		t.statsHandler.HandleConn(t.ctx, &stats.ConnBegin{
     			Client: true,
    -		}
    -		sh.HandleConn(t.ctx, connBegin)
    +		})
     	}
     	if t.keepaliveEnabled {
     		t.kpDormancyCond = sync.NewCond(&t.mu)
    @@ -484,10 +481,9 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
     func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientStream {
     	// TODO(zhaoq): Handle uint32 overflow of Stream.id.
     	s := &ClientStream{
    -		Stream: &Stream{
    +		Stream: Stream{
     			method:         callHdr.Method,
     			sendCompress:   callHdr.SendCompress,
    -			buf:            newRecvBuffer(),
     			contentSubtype: callHdr.ContentSubtype,
     		},
     		ct:         t,
    @@ -495,31 +491,26 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientSt
     		headerChan: make(chan struct{}),
     		doneFunc:   callHdr.DoneFunc,
     	}
    -	s.wq = newWriteQuota(defaultWriteQuota, s.done)
    -	s.requestRead = func(n int) {
    -		t.adjustWindow(s, uint32(n))
    -	}
    +	s.Stream.buf.init()
    +	s.Stream.wq.init(defaultWriteQuota, s.done)
    +	s.readRequester = s
     	// The client side stream context should have exactly the same life cycle with the user provided context.
     	// That means, s.ctx should be read-only. And s.ctx is done iff ctx is done.
     	// So we use the original context here instead of creating a copy.
     	s.ctx = ctx
    -	s.trReader = &transportReader{
    -		reader: &recvBufferReader{
    -			ctx:     s.ctx,
    -			ctxDone: s.ctx.Done(),
    -			recv:    s.buf,
    -			closeStream: func(err error) {
    -				s.Close(err)
    -			},
    -		},
    -		windowHandler: func(n int) {
    -			t.updateWindow(s, uint32(n))
    +	s.trReader = transportReader{
    +		reader: recvBufferReader{
    +			ctx:          s.ctx,
    +			ctxDone:      s.ctx.Done(),
    +			recv:         &s.buf,
    +			clientStream: s,
     		},
    +		windowHandler: s,
     	}
     	return s
     }
     
    -func (t *http2Client) getPeer() *peer.Peer {
    +func (t *http2Client) Peer() *peer.Peer {
     	return &peer.Peer{
     		Addr:      t.remoteAddr,
     		AuthInfo:  t.authInfo, // Can be nil
    @@ -545,7 +536,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
     		Method:   callHdr.Method,
     		AuthInfo: t.authInfo,
     	}
    -	ctxWithRequestInfo := icredentials.NewRequestInfoContext(ctx, ri)
    +	ctxWithRequestInfo := credentials.NewContextWithRequestInfo(ctx, ri)
     	authData, err := t.getTrAuthData(ctxWithRequestInfo, aud)
     	if err != nil {
     		return nil, err
    @@ -559,6 +550,22 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
     	// Make the slice of certain predictable size to reduce allocations made by append.
     	hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te
     	hfLen += len(authData) + len(callAuthData)
    +	registeredCompressors := t.registeredCompressors
    +	if callHdr.AcceptedCompressors != nil {
    +		registeredCompressors = *callHdr.AcceptedCompressors
    +	}
    +	if callHdr.PreviousAttempts > 0 {
    +		hfLen++
    +	}
    +	if callHdr.SendCompress != "" {
    +		hfLen++
    +	}
    +	if registeredCompressors != "" {
    +		hfLen++
    +	}
    +	if _, ok := ctx.Deadline(); ok {
    +		hfLen++
    +	}
     	headerFields := make([]hpack.HeaderField, 0, hfLen)
     	headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"})
     	headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme})
    @@ -571,7 +578,6 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
     		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)})
     	}
     
    -	registeredCompressors := t.registeredCompressors
     	if callHdr.SendCompress != "" {
     		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress})
     		// Include the outgoing compressor name when compressor is not registered
    @@ -592,6 +598,9 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
     		// Send out timeout regardless its value. The server can detect timeout context by itself.
     		// TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire.
     		timeout := time.Until(dl)
    +		if timeout <= 0 {
    +			return nil, status.Error(codes.DeadlineExceeded, context.DeadlineExceeded.Error())
    +		}
     		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: grpcutil.EncodeDuration(timeout)})
     	}
     	for k, v := range authData {
    @@ -736,7 +745,7 @@ func (e NewStreamError) Error() string {
     // NewStream creates a stream and registers it into the transport as "active"
     // streams.  All non-nil errors returned will be *NewStreamError.
     func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientStream, error) {
    -	ctx = peer.NewContext(ctx, t.getPeer())
    +	ctx = peer.NewContext(ctx, t.Peer())
     
     	// ServerName field of the resolver returned address takes precedence over
     	// Host field of CallHdr to determine the :authority header. This is because,
    @@ -749,6 +758,25 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS
     		callHdr = &newCallHdr
     	}
     
    +	// The authority specified via the `CallAuthority` CallOption takes the
    +	// highest precedence when determining the `:authority` header. It overrides
    +	// any value present in the Host field of CallHdr. Before applying this
    +	// override, the authority string is validated. If the credentials do not
    +	// implement the AuthorityValidator interface, or if validation fails, the
    +	// RPC is failed with a status code of `UNAVAILABLE`.
    +	if callHdr.Authority != "" {
    +		auth, ok := t.authInfo.(credentials.AuthorityValidator)
    +		if !ok {
    +			return nil, &NewStreamError{Err: status.Errorf(codes.Unavailable, "credentials type %q does not implement the AuthorityValidator interface, but authority override specified with CallAuthority call option", t.authInfo.AuthType())}
    +		}
    +		if err := auth.ValidateAuthority(callHdr.Authority); err != nil {
    +			return nil, &NewStreamError{Err: status.Errorf(codes.Unavailable, "failed to validate authority %q : %v", callHdr.Authority, err)}
    +		}
    +		newCallHdr := *callHdr
    +		newCallHdr.Host = callHdr.Authority
    +		callHdr = &newCallHdr
    +	}
    +
     	headerFields, err := t.createHeaderFields(ctx, callHdr)
     	if err != nil {
     		return nil, &NewStreamError{Err: err, AllowTransparentRetry: false}
    @@ -792,7 +820,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS
     			return nil
     		},
     		onOrphaned: cleanup,
    -		wq:         s.wq,
    +		wq:         &s.wq,
     	}
     	firstTry := true
     	var ch chan struct{}
    @@ -823,7 +851,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS
     		transportDrainRequired = t.nextID > MaxStreamID
     
     		s.id = hdr.streamID
    -		s.fc = &inFlow{limit: uint32(t.initialWindowSize)}
    +		s.fc = inFlow{limit: uint32(t.initialWindowSize)}
     		t.activeStreams[s.id] = s
     		t.mu.Unlock()
     
    @@ -874,27 +902,23 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS
     			return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true}
     		}
     	}
    -	if len(t.statsHandlers) != 0 {
    +	if t.statsHandler != nil {
     		header, ok := metadata.FromOutgoingContext(ctx)
     		if ok {
     			header.Set("user-agent", t.userAgent)
     		} else {
     			header = metadata.Pairs("user-agent", t.userAgent)
     		}
    -		for _, sh := range t.statsHandlers {
    -			// Note: The header fields are compressed with hpack after this call returns.
    -			// No WireLength field is set here.
    -			// Note: Creating a new stats object to prevent pollution.
    -			outHeader := &stats.OutHeader{
    -				Client:      true,
    -				FullMethod:  callHdr.Method,
    -				RemoteAddr:  t.remoteAddr,
    -				LocalAddr:   t.localAddr,
    -				Compression: callHdr.SendCompress,
    -				Header:      header,
    -			}
    -			sh.HandleRPC(s.ctx, outHeader)
    -		}
    +		// Note: The header fields are compressed with hpack after this call returns.
    +		// No WireLength field is set here.
    +		t.statsHandler.HandleRPC(s.ctx, &stats.OutHeader{
    +			Client:      true,
    +			FullMethod:  callHdr.Method,
    +			RemoteAddr:  t.remoteAddr,
    +			LocalAddr:   t.localAddr,
    +			Compression: callHdr.SendCompress,
    +			Header:      header,
    +		})
     	}
     	if transportDrainRequired {
     		if t.logger.V(logLevel) {
    @@ -971,6 +995,9 @@ func (t *http2Client) closeStream(s *ClientStream, err error, rst bool, rstCode
     // accessed anymore.
     func (t *http2Client) Close(err error) {
     	t.conn.SetWriteDeadline(time.Now().Add(time.Second * 10))
    +	// For background on the deadline value chosen here, see
    +	// https://github.com/grpc/grpc-go/issues/8425#issuecomment-3057938248 .
    +	t.conn.SetReadDeadline(time.Now().Add(time.Second))
     	t.mu.Lock()
     	// Make sure we only close once.
     	if t.state == closing {
    @@ -1032,11 +1059,10 @@ func (t *http2Client) Close(err error) {
     	for _, s := range streams {
     		t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false)
     	}
    -	for _, sh := range t.statsHandlers {
    -		connEnd := &stats.ConnEnd{
    +	if t.statsHandler != nil {
    +		t.statsHandler.HandleConn(t.ctx, &stats.ConnEnd{
     			Client: true,
    -		}
    -		sh.HandleConn(t.ctx, connEnd)
    +		})
     	}
     }
     
    @@ -1069,32 +1095,29 @@ func (t *http2Client) GracefulClose() {
     // Write formats the data into HTTP2 data frame(s) and sends it out. The caller
     // should proceed only if Write returns nil.
     func (t *http2Client) write(s *ClientStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error {
    -	reader := data.Reader()
    -
     	if opts.Last {
     		// If it's the last message, update stream state.
     		if !s.compareAndSwapState(streamActive, streamWriteDone) {
    -			_ = reader.Close()
     			return errStreamDone
     		}
     	} else if s.getState() != streamActive {
    -		_ = reader.Close()
     		return errStreamDone
     	}
     	df := &dataFrame{
     		streamID:  s.id,
     		endStream: opts.Last,
     		h:         hdr,
    -		reader:    reader,
    +		data:      data,
     	}
    -	if hdr != nil || df.reader.Remaining() != 0 { // If it's not an empty data frame, check quota.
    -		if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil {
    -			_ = reader.Close()
    +	dataLen := data.Len()
    +	if hdr != nil || dataLen != 0 { // If it's not an empty data frame, check quota.
    +		if err := s.wq.get(int32(len(hdr) + dataLen)); err != nil {
     			return err
     		}
     	}
    +	data.Ref()
     	if err := t.controlBuf.put(df); err != nil {
    -		_ = reader.Close()
    +		data.Free()
     		return err
     	}
     	t.incrMsgSent()
    @@ -1150,7 +1173,7 @@ func (t *http2Client) updateFlowControl(n uint32) {
     	})
     }
     
    -func (t *http2Client) handleData(f *http2.DataFrame) {
    +func (t *http2Client) handleData(f *parsedDataFrame) {
     	size := f.Header().Length
     	var sendBDPPing bool
     	if t.bdpEst != nil {
    @@ -1194,22 +1217,15 @@ func (t *http2Client) handleData(f *http2.DataFrame) {
     			t.closeStream(s, io.EOF, true, http2.ErrCodeFlowControl, status.New(codes.Internal, err.Error()), nil, false)
     			return
     		}
    +		dataLen := f.data.Len()
     		if f.Header().Flags.Has(http2.FlagDataPadded) {
    -			if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 {
    +			if w := s.fc.onRead(size - uint32(dataLen)); w > 0 {
     				t.controlBuf.put(&outgoingWindowUpdate{s.id, w})
     			}
     		}
    -		// TODO(bradfitz, zhaoq): A copy is required here because there is no
    -		// guarantee f.Data() is consumed before the arrival of next frame.
    -		// Can this copy be eliminated?
    -		if len(f.Data()) > 0 {
    -			pool := t.bufferPool
    -			if pool == nil {
    -				// Note that this is only supposed to be nil in tests. Otherwise, stream is
    -				// always initialized with a BufferPool.
    -				pool = mem.DefaultBufferPool()
    -			}
    -			s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)})
    +		if dataLen > 0 {
    +			f.data.Ref()
    +			s.write(recvMsg{buffer: f.data})
     		}
     	}
     	// The server has closed the stream without sending trailers.  Record that
    @@ -1242,7 +1258,8 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
     			statusCode = codes.DeadlineExceeded
     		}
     	}
    -	t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode), nil, false)
    +	st := status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode)
    +	t.closeStream(s, st.Err(), false, http2.ErrCodeNo, st, nil, false)
     }
     
     func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) {
    @@ -1390,8 +1407,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) error {
     // the caller.
     func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) {
     	t.goAwayReason = GoAwayNoReason
    -	switch f.ErrCode {
    -	case http2.ErrCodeEnhanceYourCalm:
    +	if f.ErrCode == http2.ErrCodeEnhanceYourCalm {
     		if string(f.DebugData()) == "too_many_pings" {
     			t.goAwayReason = GoAwayTooManyPings
     		}
    @@ -1449,17 +1465,14 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
     		contentTypeErr = "malformed header: missing HTTP content-type"
     		grpcMessage    string
     		recvCompress   string
    -		httpStatusCode *int
     		httpStatusErr  string
    -		rawStatusCode  = codes.Unknown
    +		// the code from the grpc-status header, if present
    +		grpcStatusCode = codes.Unknown
     		// headerError is set if an error is encountered while parsing the headers
     		headerError string
    +		httpStatus  string
     	)
     
    -	if initialHeader {
    -		httpStatusErr = "malformed header: missing HTTP status"
    -	}
    -
     	for _, hf := range frame.Fields {
     		switch hf.Name {
     		case "content-type":
    @@ -1475,35 +1488,15 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
     		case "grpc-status":
     			code, err := strconv.ParseInt(hf.Value, 10, 32)
     			if err != nil {
    -				se := status.New(codes.Internal, fmt.Sprintf("transport: malformed grpc-status: %v", err))
    +				se := status.New(codes.Unknown, fmt.Sprintf("transport: malformed grpc-status: %v", err))
     				t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
     				return
     			}
    -			rawStatusCode = codes.Code(uint32(code))
    +			grpcStatusCode = codes.Code(uint32(code))
     		case "grpc-message":
     			grpcMessage = decodeGrpcMessage(hf.Value)
     		case ":status":
    -			if hf.Value == "200" {
    -				httpStatusErr = ""
    -				statusCode := 200
    -				httpStatusCode = &statusCode
    -				break
    -			}
    -
    -			c, err := strconv.ParseInt(hf.Value, 10, 32)
    -			if err != nil {
    -				se := status.New(codes.Internal, fmt.Sprintf("transport: malformed http-status: %v", err))
    -				t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
    -				return
    -			}
    -			statusCode := int(c)
    -			httpStatusCode = &statusCode
    -
    -			httpStatusErr = fmt.Sprintf(
    -				"unexpected HTTP status code received from server: %d (%s)",
    -				statusCode,
    -				http.StatusText(statusCode),
    -			)
    +			httpStatus = hf.Value
     		default:
     			if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) {
     				break
    @@ -1518,25 +1511,52 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
     		}
     	}
     
    -	if !isGRPC || httpStatusErr != "" {
    -		var code = codes.Internal // when header does not include HTTP status, return INTERNAL
    -
    -		if httpStatusCode != nil {
    +	// If a non-gRPC response is received, then evaluate the HTTP status to
    +	// process the response and close the stream.
    +	// In case http status doesn't provide any error information (status : 200),
    +	// then evalute response code to be Unknown.
    +	if !isGRPC {
    +		var grpcErrorCode = codes.Internal
    +		if httpStatus == "" {
    +			httpStatusErr = "malformed header: missing HTTP status"
    +		} else {
    +			// Parse the status codes (e.g. "200", 404").
    +			statusCode, err := strconv.Atoi(httpStatus)
    +			if err != nil {
    +				se := status.New(grpcErrorCode, fmt.Sprintf("transport: malformed http-status: %v", err))
    +				t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
    +				return
    +			}
    +			if statusCode >= 100 && statusCode < 200 {
    +				if endStream {
    +					se := status.New(codes.Internal, fmt.Sprintf(
    +						"protocol error: informational header with status code %d must not have END_STREAM set", statusCode))
    +					t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
    +				}
    +				// In case of informational headers, return.
    +				return
    +			}
    +			httpStatusErr = fmt.Sprintf(
    +				"unexpected HTTP status code received from server: %d (%s)",
    +				statusCode,
    +				http.StatusText(statusCode),
    +			)
     			var ok bool
    -			code, ok = HTTPStatusConvTab[*httpStatusCode]
    +			grpcErrorCode, ok = HTTPStatusConvTab[statusCode]
     			if !ok {
    -				code = codes.Unknown
    +				grpcErrorCode = codes.Unknown
     			}
     		}
     		var errs []string
     		if httpStatusErr != "" {
     			errs = append(errs, httpStatusErr)
     		}
    +
     		if contentTypeErr != "" {
     			errs = append(errs, contentTypeErr)
     		}
    -		// Verify the HTTP response is a 200.
    -		se := status.New(code, strings.Join(errs, "; "))
    +
    +		se := status.New(grpcErrorCode, strings.Join(errs, "; "))
     		t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
     		return
     	}
    @@ -1567,22 +1587,20 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
     		}
     	}
     
    -	for _, sh := range t.statsHandlers {
    +	if t.statsHandler != nil {
     		if !endStream {
    -			inHeader := &stats.InHeader{
    +			t.statsHandler.HandleRPC(s.ctx, &stats.InHeader{
     				Client:      true,
     				WireLength:  int(frame.Header().Length),
     				Header:      metadata.MD(mdata).Copy(),
     				Compression: s.recvCompress,
    -			}
    -			sh.HandleRPC(s.ctx, inHeader)
    +			})
     		} else {
    -			inTrailer := &stats.InTrailer{
    +			t.statsHandler.HandleRPC(s.ctx, &stats.InTrailer{
     				Client:     true,
     				WireLength: int(frame.Header().Length),
     				Trailer:    metadata.MD(mdata).Copy(),
    -			}
    -			sh.HandleRPC(s.ctx, inTrailer)
    +			})
     		}
     	}
     
    @@ -1590,7 +1608,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
     		return
     	}
     
    -	status := istatus.NewWithProto(rawStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader])
    +	status := istatus.NewWithProto(grpcStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader])
     
     	// If client received END_STREAM from server while stream was still active,
     	// send RST_STREAM.
    @@ -1637,7 +1655,7 @@ func (t *http2Client) reader(errCh chan<- error) {
     	// loop to keep reading incoming messages on this transport.
     	for {
     		t.controlBuf.throttle()
    -		frame, err := t.framer.fr.ReadFrame()
    +		frame, err := t.framer.readFrame()
     		if t.keepaliveEnabled {
     			atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
     		}
    @@ -1652,7 +1670,7 @@ func (t *http2Client) reader(errCh chan<- error) {
     				if s != nil {
     					// use error detail to provide better err message
     					code := http2ErrConvTab[se.Code]
    -					errorDetail := t.framer.fr.ErrorDetail()
    +					errorDetail := t.framer.errorDetail()
     					var msg string
     					if errorDetail != nil {
     						msg = errorDetail.Error()
    @@ -1670,8 +1688,9 @@ func (t *http2Client) reader(errCh chan<- error) {
     		switch frame := frame.(type) {
     		case *http2.MetaHeadersFrame:
     			t.operateHeaders(frame)
    -		case *http2.DataFrame:
    +		case *parsedDataFrame:
     			t.handleData(frame)
    +			frame.data.Free()
     		case *http2.RSTStreamFrame:
     			t.handleRSTStream(frame)
     		case *http2.SettingsFrame:
    @@ -1791,8 +1810,6 @@ func (t *http2Client) socketMetrics() *channelz.EphemeralSocketMetrics {
     	}
     }
     
    -func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr }
    -
     func (t *http2Client) incrMsgSent() {
     	if channelz.IsOn() {
     		t.channelz.SocketMetrics.MessagesSent.Add(1)
    diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
    index 997b0a59b..6f78a6b0c 100644
    --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go
    +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
    @@ -35,12 +35,15 @@ import (
     
     	"golang.org/x/net/http2"
     	"golang.org/x/net/http2/hpack"
    +	"google.golang.org/protobuf/proto"
    +
    +	"google.golang.org/grpc/internal"
     	"google.golang.org/grpc/internal/grpclog"
     	"google.golang.org/grpc/internal/grpcutil"
     	"google.golang.org/grpc/internal/pretty"
    +	istatus "google.golang.org/grpc/internal/status"
     	"google.golang.org/grpc/internal/syscall"
     	"google.golang.org/grpc/mem"
    -	"google.golang.org/protobuf/proto"
     
     	"google.golang.org/grpc/codes"
     	"google.golang.org/grpc/credentials"
    @@ -84,7 +87,7 @@ type http2Server struct {
     	// updates, reset streams, and various settings) to the controller.
     	controlBuf *controlBuffer
     	fc         *trInFlow
    -	stats      []stats.Handler
    +	stats      stats.Handler
     	// Keepalive and max-age parameters for the server.
     	kp keepalive.ServerParameters
     	// Keepalive enforcement policy.
    @@ -130,6 +133,10 @@ type http2Server struct {
     	maxStreamID uint32 // max stream ID ever seen
     
     	logger *grpclog.PrefixLogger
    +	// setResetPingStrikes is stored as a closure instead of making this a
    +	// method on http2Server to avoid a heap allocation when converting a method
    +	// to a closure for passing to frames objects.
    +	setResetPingStrikes func()
     }
     
     // NewServerTransport creates a http2 transport with conn and configuration
    @@ -162,7 +169,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
     	if config.MaxHeaderListSize != nil {
     		maxHeaderListSize = *config.MaxHeaderListSize
     	}
    -	framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize)
    +	framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize, config.BufferPool)
     	// Send initial settings as connection preface to client.
     	isettings := []http2.Setting{{
     		ID:  http2.SettingMaxFrameSize,
    @@ -174,16 +181,13 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
     			Val: config.MaxStreams,
     		})
     	}
    -	dynamicWindow := true
     	iwz := int32(initialWindowSize)
     	if config.InitialWindowSize >= defaultWindowSize {
     		iwz = config.InitialWindowSize
    -		dynamicWindow = false
     	}
     	icwz := int32(initialWindowSize)
     	if config.InitialConnWindowSize >= defaultWindowSize {
     		icwz = config.InitialConnWindowSize
    -		dynamicWindow = false
     	}
     	if iwz != defaultWindowSize {
     		isettings = append(isettings, http2.Setting{
    @@ -257,13 +261,16 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
     		fc:                &trInFlow{limit: uint32(icwz)},
     		state:             reachable,
     		activeStreams:     make(map[uint32]*ServerStream),
    -		stats:             config.StatsHandlers,
    +		stats:             config.StatsHandler,
     		kp:                kp,
     		idle:              time.Now(),
     		kep:               kep,
     		initialWindowSize: iwz,
     		bufferPool:        config.BufferPool,
     	}
    +	t.setResetPingStrikes = func() {
    +		atomic.StoreUint32(&t.resetPingStrikes, 1)
    +	}
     	var czSecurity credentials.ChannelzSecurityValue
     	if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok {
     		czSecurity = au.GetSecurityValue()
    @@ -283,7 +290,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
     	t.logger = prefixLoggerForServerTransport(t)
     
     	t.controlBuf = newControlBuffer(t.done)
    -	if dynamicWindow {
    +	if !config.StaticWindowSize {
     		t.bdpEst = &bdpEstimator{
     			bdp:               initialWindowSize,
     			updateFlowControl: t.updateFlowControl,
    @@ -384,16 +391,15 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade
     	}
     	t.maxStreamID = streamID
     
    -	buf := newRecvBuffer()
     	s := &ServerStream{
    -		Stream: &Stream{
    -			id:  streamID,
    -			buf: buf,
    -			fc:  &inFlow{limit: uint32(t.initialWindowSize)},
    +		Stream: Stream{
    +			id: streamID,
    +			fc: inFlow{limit: uint32(t.initialWindowSize)},
     		},
     		st:               t,
     		headerWireLength: int(frame.Header().Length),
     	}
    +	s.Stream.buf.init()
     	var (
     		// if false, content-type was missing or invalid
     		isGRPC      = false
    @@ -594,34 +600,61 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade
     			return nil
     		}
     	}
    +
    +	if s.ctx.Err() != nil {
    +		t.mu.Unlock()
    +		// Early abort in case the timeout was zero or so low it already fired.
    +		t.controlBuf.put(&earlyAbortStream{
    +			httpStatus:     http.StatusOK,
    +			streamID:       s.id,
    +			contentSubtype: s.contentSubtype,
    +			status:         status.New(codes.DeadlineExceeded, context.DeadlineExceeded.Error()),
    +			rst:            !frame.StreamEnded(),
    +		})
    +		return nil
    +	}
    +
     	t.activeStreams[streamID] = s
     	if len(t.activeStreams) == 1 {
     		t.idle = time.Time{}
     	}
    +
    +	// Start a timer to close the stream on reaching the deadline.
    +	if timeoutSet {
    +		// We need to wait for s.cancel to be updated before calling
    +		// t.closeStream to avoid data races.
    +		cancelUpdated := make(chan struct{})
    +		timer := internal.TimeAfterFunc(timeout, func() {
    +			<-cancelUpdated
    +			t.closeStream(s, true, http2.ErrCodeCancel, false)
    +		})
    +		oldCancel := s.cancel
    +		s.cancel = func() {
    +			oldCancel()
    +			timer.Stop()
    +		}
    +		close(cancelUpdated)
    +	}
     	t.mu.Unlock()
     	if channelz.IsOn() {
     		t.channelz.SocketMetrics.StreamsStarted.Add(1)
     		t.channelz.SocketMetrics.LastRemoteStreamCreatedTimestamp.Store(time.Now().UnixNano())
     	}
    -	s.requestRead = func(n int) {
    -		t.adjustWindow(s, uint32(n))
    -	}
    +	s.readRequester = s
     	s.ctxDone = s.ctx.Done()
    -	s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone)
    -	s.trReader = &transportReader{
    -		reader: &recvBufferReader{
    +	s.Stream.wq.init(defaultWriteQuota, s.ctxDone)
    +	s.trReader = transportReader{
    +		reader: recvBufferReader{
     			ctx:     s.ctx,
     			ctxDone: s.ctxDone,
    -			recv:    s.buf,
    -		},
    -		windowHandler: func(n int) {
    -			t.updateWindow(s, uint32(n))
    +			recv:    &s.buf,
     		},
    +		windowHandler: s,
     	}
     	// Register the stream with loopy.
     	t.controlBuf.put(®isterStream{
     		streamID: s.id,
    -		wq:       s.wq,
    +		wq:       &s.wq,
     	})
     	handle(s)
     	return nil
    @@ -637,7 +670,7 @@ func (t *http2Server) HandleStreams(ctx context.Context, handle func(*ServerStre
     	}()
     	for {
     		t.controlBuf.throttle()
    -		frame, err := t.framer.fr.ReadFrame()
    +		frame, err := t.framer.readFrame()
     		atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
     		if err != nil {
     			if se, ok := err.(http2.StreamError); ok {
    @@ -674,8 +707,9 @@ func (t *http2Server) HandleStreams(ctx context.Context, handle func(*ServerStre
     				})
     				continue
     			}
    -		case *http2.DataFrame:
    +		case *parsedDataFrame:
     			t.handleData(frame)
    +			frame.data.Free()
     		case *http2.RSTStreamFrame:
     			t.handleRSTStream(frame)
     		case *http2.SettingsFrame:
    @@ -755,7 +789,7 @@ func (t *http2Server) updateFlowControl(n uint32) {
     
     }
     
    -func (t *http2Server) handleData(f *http2.DataFrame) {
    +func (t *http2Server) handleData(f *parsedDataFrame) {
     	size := f.Header().Length
     	var sendBDPPing bool
     	if t.bdpEst != nil {
    @@ -800,22 +834,15 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
     			t.closeStream(s, true, http2.ErrCodeFlowControl, false)
     			return
     		}
    +		dataLen := f.data.Len()
     		if f.Header().Flags.Has(http2.FlagDataPadded) {
    -			if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 {
    +			if w := s.fc.onRead(size - uint32(dataLen)); w > 0 {
     				t.controlBuf.put(&outgoingWindowUpdate{s.id, w})
     			}
     		}
    -		// TODO(bradfitz, zhaoq): A copy is required here because there is no
    -		// guarantee f.Data() is consumed before the arrival of next frame.
    -		// Can this copy be eliminated?
    -		if len(f.Data()) > 0 {
    -			pool := t.bufferPool
    -			if pool == nil {
    -				// Note that this is only supposed to be nil in tests. Otherwise, stream is
    -				// always initialized with a BufferPool.
    -				pool = mem.DefaultBufferPool()
    -			}
    -			s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)})
    +		if dataLen > 0 {
    +			f.data.Ref()
    +			s.write(recvMsg{buffer: f.data})
     		}
     	}
     	if f.StreamEnded() {
    @@ -998,10 +1025,6 @@ func (t *http2Server) writeHeader(s *ServerStream, md metadata.MD) error {
     	return nil
     }
     
    -func (t *http2Server) setResetPingStrikes() {
    -	atomic.StoreUint32(&t.resetPingStrikes, 1)
    -}
    -
     func (t *http2Server) writeHeaderLocked(s *ServerStream) error {
     	// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
     	// first and create a slice of that exact size.
    @@ -1026,19 +1049,18 @@ func (t *http2Server) writeHeaderLocked(s *ServerStream) error {
     		t.closeStream(s, true, http2.ErrCodeInternal, false)
     		return ErrHeaderListSizeLimitViolation
     	}
    -	for _, sh := range t.stats {
    +	if t.stats != nil {
     		// Note: Headers are compressed with hpack after this call returns.
     		// No WireLength field is set here.
    -		outHeader := &stats.OutHeader{
    +		t.stats.HandleRPC(s.Context(), &stats.OutHeader{
     			Header:      s.header.Copy(),
     			Compression: s.sendCompress,
    -		}
    -		sh.HandleRPC(s.Context(), outHeader)
    +		})
     	}
     	return nil
     }
     
    -// WriteStatus sends stream status to the client and terminates the stream.
    +// writeStatus sends stream status to the client and terminates the stream.
     // There is no further I/O operations being able to perform on this stream.
     // TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early
     // OK is adopted.
    @@ -1066,7 +1088,7 @@ func (t *http2Server) writeStatus(s *ServerStream, st *status.Status) error {
     	headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))})
     	headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())})
     
    -	if p := st.Proto(); p != nil && len(p.Details) > 0 {
    +	if p := istatus.RawStatusProto(st); len(p.GetDetails()) > 0 {
     		// Do not use the user's grpc-status-details-bin (if present) if we are
     		// even attempting to set our own.
     		delete(s.trailer, grpcStatusDetailsBinHeader)
    @@ -1101,10 +1123,10 @@ func (t *http2Server) writeStatus(s *ServerStream, st *status.Status) error {
     	// Send a RST_STREAM after the trailers if the client has not already half-closed.
     	rst := s.getState() == streamActive
     	t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true)
    -	for _, sh := range t.stats {
    +	if t.stats != nil {
     		// Note: The trailer fields are compressed with hpack after this call returns.
     		// No WireLength field is set here.
    -		sh.HandleRPC(s.Context(), &stats.OutTrailer{
    +		t.stats.HandleRPC(s.Context(), &stats.OutTrailer{
     			Trailer: s.trailer.Copy(),
     		})
     	}
    @@ -1114,17 +1136,13 @@ func (t *http2Server) writeStatus(s *ServerStream, st *status.Status) error {
     // Write converts the data into HTTP2 data frame and sends it out. Non-nil error
     // is returns if it fails (e.g., framing error, transport error).
     func (t *http2Server) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ *WriteOptions) error {
    -	reader := data.Reader()
    -
     	if !s.isHeaderSent() { // Headers haven't been written yet.
     		if err := t.writeHeader(s, nil); err != nil {
    -			_ = reader.Close()
     			return err
     		}
     	} else {
     		// Writing headers checks for this condition.
     		if s.getState() == streamDone {
    -			_ = reader.Close()
     			return t.streamContextErr(s)
     		}
     	}
    @@ -1132,15 +1150,16 @@ func (t *http2Server) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _
     	df := &dataFrame{
     		streamID:    s.id,
     		h:           hdr,
    -		reader:      reader,
    +		data:        data,
     		onEachWrite: t.setResetPingStrikes,
     	}
    -	if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil {
    -		_ = reader.Close()
    +	dataLen := data.Len()
    +	if err := s.wq.get(int32(len(hdr) + dataLen)); err != nil {
     		return t.streamContextErr(s)
     	}
    +	data.Ref()
     	if err := t.controlBuf.put(df); err != nil {
    -		_ = reader.Close()
    +		data.Free()
     		return err
     	}
     	t.incrMsgSent()
    @@ -1274,9 +1293,9 @@ func (t *http2Server) Close(err error) {
     
     // deleteStream deletes the stream s from transport's active streams.
     func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) {
    -
     	t.mu.Lock()
    -	if _, ok := t.activeStreams[s.id]; ok {
    +	_, isActive := t.activeStreams[s.id]
    +	if isActive {
     		delete(t.activeStreams, s.id)
     		if len(t.activeStreams) == 0 {
     			t.idle = time.Now()
    @@ -1284,7 +1303,7 @@ func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) {
     	}
     	t.mu.Unlock()
     
    -	if channelz.IsOn() {
    +	if isActive && channelz.IsOn() {
     		if eosReceived {
     			t.channelz.SocketMetrics.StreamsSucceeded.Add(1)
     		} else {
    @@ -1324,6 +1343,9 @@ func (t *http2Server) closeStream(s *ServerStream, rst bool, rstCode http2.ErrCo
     	// called to interrupt the potential blocking on other goroutines.
     	s.cancel()
     
    +	// We can't return early even if the stream's state is "done" as the state
    +	// might have been set by the `finishStream` method. Deleting the stream via
    +	// `finishStream` can get blocked on flow control.
     	s.swapState(streamDone)
     	t.deleteStream(s, eosReceived)
     
    diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go
    index 3613d7b64..5bbb641ad 100644
    --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go
    +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go
    @@ -25,7 +25,6 @@ import (
     	"fmt"
     	"io"
     	"math"
    -	"net"
     	"net/http"
     	"net/url"
     	"strconv"
    @@ -37,6 +36,7 @@ import (
     	"golang.org/x/net/http2"
     	"golang.org/x/net/http2/hpack"
     	"google.golang.org/grpc/codes"
    +	"google.golang.org/grpc/mem"
     )
     
     const (
    @@ -196,11 +196,11 @@ func decodeTimeout(s string) (time.Duration, error) {
     	if !ok {
     		return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s)
     	}
    -	t, err := strconv.ParseInt(s[:size-1], 10, 64)
    +	t, err := strconv.ParseUint(s[:size-1], 10, 64)
     	if err != nil {
     		return 0, err
     	}
    -	const maxHours = math.MaxInt64 / int64(time.Hour)
    +	const maxHours = math.MaxInt64 / uint64(time.Hour)
     	if d == time.Hour && t > maxHours {
     		// This timeout would overflow math.MaxInt64; clamp it.
     		return time.Duration(math.MaxInt64), nil
    @@ -300,11 +300,11 @@ type bufWriter struct {
     	buf       []byte
     	offset    int
     	batchSize int
    -	conn      net.Conn
    +	conn      io.Writer
     	err       error
     }
     
    -func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter {
    +func newBufWriter(conn io.Writer, batchSize int, pool *sync.Pool) *bufWriter {
     	w := &bufWriter{
     		batchSize: batchSize,
     		conn:      conn,
    @@ -388,15 +388,29 @@ func toIOError(err error) error {
     	return ioError{error: err}
     }
     
    +type parsedDataFrame struct {
    +	http2.FrameHeader
    +	data mem.Buffer
    +}
    +
    +func (df *parsedDataFrame) StreamEnded() bool {
    +	return df.FrameHeader.Flags.Has(http2.FlagDataEndStream)
    +}
    +
     type framer struct {
    -	writer *bufWriter
    -	fr     *http2.Framer
    +	writer    *bufWriter
    +	fr        *http2.Framer
    +	headerBuf []byte // cached slice for framer headers to reduce heap allocs.
    +	reader    io.Reader
    +	dataFrame parsedDataFrame // Cached data frame to avoid heap allocations.
    +	pool      mem.BufferPool
    +	errDetail error
     }
     
     var writeBufferPoolMap = make(map[int]*sync.Pool)
     var writeBufferMutex sync.Mutex
     
    -func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer {
    +func newFramer(conn io.ReadWriter, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32, memPool mem.BufferPool) *framer {
     	if writeBufferSize < 0 {
     		writeBufferSize = 0
     	}
    @@ -412,6 +426,8 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBu
     	f := &framer{
     		writer: w,
     		fr:     http2.NewFramer(w, r),
    +		reader: r,
    +		pool:   memPool,
     	}
     	f.fr.SetMaxReadFrameSize(http2MaxFrameLen)
     	// Opt-in to Frame reuse API on framer to reduce garbage.
    @@ -422,6 +438,146 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBu
     	return f
     }
     
    +// writeData writes a DATA frame.
    +//
    +// It is the caller's responsibility not to violate the maximum frame size.
    +func (f *framer) writeData(streamID uint32, endStream bool, data [][]byte) error {
    +	var flags http2.Flags
    +	if endStream {
    +		flags = http2.FlagDataEndStream
    +	}
    +	length := uint32(0)
    +	for _, d := range data {
    +		length += uint32(len(d))
    +	}
    +	// TODO: Replace the header write with the framer API being added in
    +	// https://github.com/golang/go/issues/66655.
    +	f.headerBuf = append(f.headerBuf[:0],
    +		byte(length>>16),
    +		byte(length>>8),
    +		byte(length),
    +		byte(http2.FrameData),
    +		byte(flags),
    +		byte(streamID>>24),
    +		byte(streamID>>16),
    +		byte(streamID>>8),
    +		byte(streamID))
    +	if _, err := f.writer.Write(f.headerBuf); err != nil {
    +		return err
    +	}
    +	for _, d := range data {
    +		if _, err := f.writer.Write(d); err != nil {
    +			return err
    +		}
    +	}
    +	return nil
    +}
    +
    +// readFrame reads a single frame. The returned Frame is only valid
    +// until the next call to readFrame.
    +func (f *framer) readFrame() (any, error) {
    +	f.errDetail = nil
    +	fh, err := f.fr.ReadFrameHeader()
    +	if err != nil {
    +		f.errDetail = f.fr.ErrorDetail()
    +		return nil, err
    +	}
    +	// Read the data frame directly from the underlying io.Reader to avoid
    +	// copies.
    +	if fh.Type == http2.FrameData {
    +		err = f.readDataFrame(fh)
    +		return &f.dataFrame, err
    +	}
    +	fr, err := f.fr.ReadFrameForHeader(fh)
    +	if err != nil {
    +		f.errDetail = f.fr.ErrorDetail()
    +		return nil, err
    +	}
    +	return fr, err
    +}
    +
    +// errorDetail returns a more detailed error of the last error
    +// returned by framer.readFrame. For instance, if readFrame
    +// returns a StreamError with code PROTOCOL_ERROR, errorDetail
    +// will say exactly what was invalid. errorDetail is not guaranteed
    +// to return a non-nil value.
    +// errorDetail is reset after the next call to readFrame.
    +func (f *framer) errorDetail() error {
    +	return f.errDetail
    +}
    +
    +func (f *framer) readDataFrame(fh http2.FrameHeader) (err error) {
    +	if fh.StreamID == 0 {
    +		// DATA frames MUST be associated with a stream. If a
    +		// DATA frame is received whose stream identifier
    +		// field is 0x0, the recipient MUST respond with a
    +		// connection error (Section 5.4.1) of type
    +		// PROTOCOL_ERROR.
    +		f.errDetail = errors.New("DATA frame with stream ID 0")
    +		return http2.ConnectionError(http2.ErrCodeProtocol)
    +	}
    +	// Converting a *[]byte to a mem.SliceBuffer incurs a heap allocation. This
    +	// conversion is performed by mem.NewBuffer. To avoid the extra allocation
    +	// a []byte is allocated directly if required and cast to a mem.SliceBuffer.
    +	var buf []byte
    +	// poolHandle is the pointer returned by the buffer pool (if it's used.).
    +	var poolHandle *[]byte
    +	useBufferPool := !mem.IsBelowBufferPoolingThreshold(int(fh.Length))
    +	if useBufferPool {
    +		poolHandle = f.pool.Get(int(fh.Length))
    +		buf = *poolHandle
    +		defer func() {
    +			if err != nil {
    +				f.pool.Put(poolHandle)
    +			}
    +		}()
    +	} else {
    +		buf = make([]byte, int(fh.Length))
    +	}
    +	if fh.Flags.Has(http2.FlagDataPadded) {
    +		if fh.Length == 0 {
    +			return io.ErrUnexpectedEOF
    +		}
    +		// This initial 1-byte read can be inefficient for unbuffered readers,
    +		// but it allows the rest of the payload to be read directly to the
    +		// start of the destination slice. This makes it easy to return the
    +		// original slice back to the buffer pool.
    +		if _, err := io.ReadFull(f.reader, buf[:1]); err != nil {
    +			return err
    +		}
    +		padSize := buf[0]
    +		buf = buf[:len(buf)-1]
    +		if int(padSize) > len(buf) {
    +			// If the length of the padding is greater than the
    +			// length of the frame payload, the recipient MUST
    +			// treat this as a connection error.
    +			// Filed: https://github.com/http2/http2-spec/issues/610
    +			f.errDetail = errors.New("pad size larger than data payload")
    +			return http2.ConnectionError(http2.ErrCodeProtocol)
    +		}
    +		if _, err := io.ReadFull(f.reader, buf); err != nil {
    +			return err
    +		}
    +		buf = buf[:len(buf)-int(padSize)]
    +	} else if _, err := io.ReadFull(f.reader, buf); err != nil {
    +		return err
    +	}
    +
    +	f.dataFrame.FrameHeader = fh
    +	if useBufferPool {
    +		// Update the handle to point to the (potentially re-sliced) buf.
    +		*poolHandle = buf
    +		f.dataFrame.data = mem.NewBuffer(poolHandle, f.pool)
    +	} else {
    +		f.dataFrame.data = mem.SliceBuffer(buf)
    +	}
    +	return nil
    +}
    +
    +func (df *parsedDataFrame) Header() http2.FrameHeader {
    +	return df.FrameHeader
    +}
    +
     func getWriteBufferPool(size int) *sync.Pool {
     	writeBufferMutex.Lock()
     	defer writeBufferMutex.Unlock()
    @@ -439,8 +595,8 @@ func getWriteBufferPool(size int) *sync.Pool {
     	return pool
     }
     
    -// parseDialTarget returns the network and address to pass to dialer.
    -func parseDialTarget(target string) (string, string) {
    +// ParseDialTarget returns the network and address to pass to dialer.
    +func ParseDialTarget(target string) (string, string) {
     	net := "tcp"
     	m1 := strings.Index(target, ":")
     	m2 := strings.Index(target, ":/")
    diff --git a/vendor/google.golang.org/grpc/internal/transport/server_stream.go b/vendor/google.golang.org/grpc/internal/transport/server_stream.go
    index a22a90151..ed6a13b75 100644
    --- a/vendor/google.golang.org/grpc/internal/transport/server_stream.go
    +++ b/vendor/google.golang.org/grpc/internal/transport/server_stream.go
    @@ -32,21 +32,24 @@ import (
     
     // ServerStream implements streaming functionality for a gRPC server.
     type ServerStream struct {
    -	*Stream // Embed for common stream functionality.
    +	Stream // Embed for common stream functionality.
     
     	st      internalServerTransport
    -	ctxDone <-chan struct{}    // closed at the end of stream.  Cache of ctx.Done() (for performance)
    -	cancel  context.CancelFunc // invoked at the end of stream to cancel ctx.
    +	ctxDone <-chan struct{} // closed at the end of stream.  Cache of ctx.Done() (for performance)
    +	// cancel is invoked at the end of stream to cancel ctx. It also stops the
    +	// timer for monitoring the rpc deadline if configured.
    +	cancel func()
     
     	// Holds compressor names passed in grpc-accept-encoding metadata from the
     	// client.
     	clientAdvertisedCompressors string
    -	headerWireLength            int
     
     	// hdrMu protects outgoing header and trailer metadata.
     	hdrMu      sync.Mutex
     	header     metadata.MD // the outgoing header metadata.  Updated by WriteHeader.
     	headerSent atomic.Bool // atomically set when the headers are sent out.
    +
    +	headerWireLength int
     }
     
     // Read reads an n byte message from the input stream.
    @@ -176,3 +179,11 @@ func (s *ServerStream) SetTrailer(md metadata.MD) error {
     	s.hdrMu.Unlock()
     	return nil
     }
    +
    +func (s *ServerStream) requestRead(n int) {
    +	s.st.adjustWindow(s, uint32(n))
    +}
    +
    +func (s *ServerStream) updateWindow(n int) {
    +	s.st.updateWindow(s, uint32(n))
    +}
    diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go
    index af4a4aeab..6daf1e002 100644
    --- a/vendor/google.golang.org/grpc/internal/transport/transport.go
    +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go
    @@ -68,11 +68,11 @@ type recvBuffer struct {
     	err     error
     }
     
    -func newRecvBuffer() *recvBuffer {
    -	b := &recvBuffer{
    -		c: make(chan recvMsg, 1),
    -	}
    -	return b
    +// init allows a recvBuffer to be initialized in-place, which is useful
    +// for resetting a buffer or for avoiding a heap allocation when the buffer
    +// is embedded in another struct.
    +func (b *recvBuffer) init() {
    +	b.c = make(chan recvMsg, 1)
     }
     
     func (b *recvBuffer) put(r recvMsg) {
    @@ -123,12 +123,13 @@ func (b *recvBuffer) get() <-chan recvMsg {
     // recvBufferReader implements io.Reader interface to read the data from
     // recvBuffer.
     type recvBufferReader struct {
    -	closeStream func(error) // Closes the client transport stream with the given error and nil trailer metadata.
    -	ctx         context.Context
    -	ctxDone     <-chan struct{} // cache of ctx.Done() (for performance).
    -	recv        *recvBuffer
    -	last        mem.Buffer // Stores the remaining data in the previous calls.
    -	err         error
    +	_            noCopy
    +	clientStream *ClientStream // The client transport stream is closed with a status representing ctx.Err() and nil trailer metadata.
    +	ctx          context.Context
    +	ctxDone      <-chan struct{} // cache of ctx.Done() (for performance).
    +	recv         *recvBuffer
    +	last         mem.Buffer // Stores the remaining data in the previous calls.
    +	err          error
     }
     
     func (r *recvBufferReader) ReadMessageHeader(header []byte) (n int, err error) {
    @@ -139,7 +140,7 @@ func (r *recvBufferReader) ReadMessageHeader(header []byte) (n int, err error) {
     		n, r.last = mem.ReadUnsafe(header, r.last)
     		return n, nil
     	}
    -	if r.closeStream != nil {
    +	if r.clientStream != nil {
     		n, r.err = r.readMessageHeaderClient(header)
     	} else {
     		n, r.err = r.readMessageHeader(header)
    @@ -164,7 +165,7 @@ func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) {
     		}
     		return buf, nil
     	}
    -	if r.closeStream != nil {
    +	if r.clientStream != nil {
     		buf, r.err = r.readClient(n)
     	} else {
     		buf, r.err = r.read(n)
    @@ -209,7 +210,7 @@ func (r *recvBufferReader) readMessageHeaderClient(header []byte) (n int, err er
     		// TODO: delaying ctx error seems like a unnecessary side effect. What
     		// we really want is to mark the stream as done, and return ctx error
     		// faster.
    -		r.closeStream(ContextErr(r.ctx.Err()))
    +		r.clientStream.Close(ContextErr(r.ctx.Err()))
     		m := <-r.recv.get()
     		return r.readMessageHeaderAdditional(m, header)
     	case m := <-r.recv.get():
    @@ -236,7 +237,7 @@ func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) {
     		// TODO: delaying ctx error seems like a unnecessary side effect. What
     		// we really want is to mark the stream as done, and return ctx error
     		// faster.
    -		r.closeStream(ContextErr(r.ctx.Err()))
    +		r.clientStream.Close(ContextErr(r.ctx.Err()))
     		m := <-r.recv.get()
     		return r.readAdditional(m, n)
     	case m := <-r.recv.get():
    @@ -285,27 +286,32 @@ const (
     
     // Stream represents an RPC in the transport layer.
     type Stream struct {
    -	id           uint32
     	ctx          context.Context // the associated context of the stream
     	method       string          // the associated RPC method of the stream
     	recvCompress string
     	sendCompress string
    -	buf          *recvBuffer
    -	trReader     *transportReader
    -	fc           *inFlow
    -	wq           *writeQuota
    -
    -	// Callback to state application's intentions to read data. This
    -	// is used to adjust flow control, if needed.
    -	requestRead func(int)
     
    -	state streamState
    +	readRequester readRequester
     
     	// contentSubtype is the content-subtype for requests.
     	// this must be lowercase or the behavior is undefined.
     	contentSubtype string
     
     	trailer metadata.MD // the key-value map of trailer metadata.
    +
    +	// Non-pointer fields are at the end to optimize GC performance.
    +	state    streamState
    +	id       uint32
    +	buf      recvBuffer
    +	trReader transportReader
    +	fc       inFlow
    +	wq       writeQuota
    +}
    +
    +// readRequester is used to state application's intentions to read data. This
    +// is used to adjust flow control, if needed.
    +type readRequester interface {
    +	requestRead(int)
     }
     
     func (s *Stream) swapState(st streamState) streamState {
    @@ -355,7 +361,7 @@ func (s *Stream) ReadMessageHeader(header []byte) (err error) {
     	if er := s.trReader.er; er != nil {
     		return er
     	}
    -	s.requestRead(len(header))
    +	s.readRequester.requestRead(len(header))
     	for len(header) != 0 {
     		n, err := s.trReader.ReadMessageHeader(header)
     		header = header[n:]
    @@ -378,7 +384,7 @@ func (s *Stream) read(n int) (data mem.BufferSlice, err error) {
     	if er := s.trReader.er; er != nil {
     		return nil, er
     	}
    -	s.requestRead(n)
    +	s.readRequester.requestRead(n)
     	for n != 0 {
     		buf, err := s.trReader.Read(n)
     		var bufLen int
    @@ -401,16 +407,34 @@ func (s *Stream) read(n int) (data mem.BufferSlice, err error) {
     	return data, nil
     }
     
    +// noCopy may be embedded into structs which must not be copied
    +// after the first use.
    +//
    +// See https://golang.org/issues/8005#issuecomment-190753527
    +// for details.
    +type noCopy struct {
    +}
    +
    +func (*noCopy) Lock()   {}
    +func (*noCopy) Unlock() {}
    +
     // transportReader reads all the data available for this Stream from the transport and
     // passes them into the decoder, which converts them into a gRPC message stream.
     // The error is io.EOF when the stream is done or another non-nil error if
     // the stream broke.
     type transportReader struct {
    -	reader *recvBufferReader
    +	_ noCopy
     	// The handler to control the window update procedure for both this
     	// particular stream and the associated transport.
    -	windowHandler func(int)
    +	windowHandler windowHandler
     	er            error
    +	reader        recvBufferReader
    +}
    +
    +// The handler to control the window update procedure for both this
    +// particular stream and the associated transport.
    +type windowHandler interface {
    +	updateWindow(int)
     }
     
     func (t *transportReader) ReadMessageHeader(header []byte) (int, error) {
    @@ -419,7 +443,7 @@ func (t *transportReader) ReadMessageHeader(header []byte) (int, error) {
     		t.er = err
     		return 0, err
     	}
    -	t.windowHandler(n)
    +	t.windowHandler.updateWindow(n)
     	return n, nil
     }
     
    @@ -429,7 +453,7 @@ func (t *transportReader) Read(n int) (mem.Buffer, error) {
     		t.er = err
     		return buf, err
     	}
    -	t.windowHandler(buf.Len())
    +	t.windowHandler.updateWindow(buf.Len())
     	return buf, nil
     }
     
    @@ -454,7 +478,7 @@ type ServerConfig struct {
     	ConnectionTimeout     time.Duration
     	Credentials           credentials.TransportCredentials
     	InTapHandle           tap.ServerInHandle
    -	StatsHandlers         []stats.Handler
    +	StatsHandler          stats.Handler
     	KeepaliveParams       keepalive.ServerParameters
     	KeepalivePolicy       keepalive.EnforcementPolicy
     	InitialWindowSize     int32
    @@ -466,6 +490,7 @@ type ServerConfig struct {
     	MaxHeaderListSize     *uint32
     	HeaderTableSize       *uint32
     	BufferPool            mem.BufferPool
    +	StaticWindowSize      bool
     }
     
     // ConnectOptions covers all relevant options for communicating with the server.
    @@ -504,6 +529,8 @@ type ConnectOptions struct {
     	MaxHeaderListSize *uint32
     	// The mem.BufferPool to use when reading/writing to the wire.
     	BufferPool mem.BufferPool
    +	// StaticWindowSize controls whether dynamic window sizing is enabled.
    +	StaticWindowSize bool
     }
     
     // WriteOptions provides additional hints and information for message
    @@ -526,6 +553,12 @@ type CallHdr struct {
     	// outbound message.
     	SendCompress string
     
    +	// AcceptedCompressors overrides the grpc-accept-encoding header for this
    +	// call. When nil, the transport advertises the default set of registered
    +	// compressors. A non-nil pointer overrides that value (including the empty
    +	// string to advertise none).
    +	AcceptedCompressors *string
    +
     	// Creds specifies credentials.PerRPCCredentials for a call.
     	Creds credentials.PerRPCCredentials
     
    @@ -540,6 +573,11 @@ type CallHdr struct {
     	PreviousAttempts int // value of grpc-previous-rpc-attempts header to set
     
     	DoneFunc func() // called when the stream is finished
    +
    +	// Authority is used to explicitly override the `:authority` header. If set,
    +	// this value takes precedence over the Host field and will be used as the
    +	// value for the `:authority` header.
    +	Authority string
     }
     
     // ClientTransport is the common interface for all gRPC client-side transport
    @@ -576,8 +614,9 @@ type ClientTransport interface {
     	// with a human readable string with debug info.
     	GetGoAwayReason() (GoAwayReason, string)
     
    -	// RemoteAddr returns the remote network address.
    -	RemoteAddr() net.Addr
    +	// Peer returns information about the peer associated with the Transport.
    +	// The returned information includes authentication and network address details.
    +	Peer() *peer.Peer
     }
     
     // ServerTransport is the common interface for all gRPC server-side transport
    @@ -607,6 +646,8 @@ type internalServerTransport interface {
     	write(s *ServerStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error
     	writeStatus(s *ServerStream, st *status.Status) error
     	incrMsgRecv()
    +	adjustWindow(s *ServerStream, n uint32)
    +	updateWindow(s *ServerStream, n uint32)
     }
     
     // connectionErrorf creates an ConnectionError with the specified error description.
    diff --git a/vendor/google.golang.org/grpc/mem/buffer_pool.go b/vendor/google.golang.org/grpc/mem/buffer_pool.go
    index c37c58c02..e37afdd19 100644
    --- a/vendor/google.golang.org/grpc/mem/buffer_pool.go
    +++ b/vendor/google.golang.org/grpc/mem/buffer_pool.go
    @@ -32,12 +32,17 @@ type BufferPool interface {
     	Get(length int) *[]byte
     
     	// Put returns a buffer to the pool.
    +	//
    +	// The provided pointer must hold a prefix of the buffer obtained via
    +	// BufferPool.Get to ensure the buffer's entire capacity can be re-used.
     	Put(*[]byte)
     }
     
    +const goPageSize = 4 << 10 // 4KiB. N.B. this must be a power of 2.
    +
     var defaultBufferPoolSizes = []int{
     	256,
    -	4 << 10,  // 4KB (go page size)
    +	goPageSize,
     	16 << 10, // 16KB (max HTTP/2 frame size used by gRPC)
     	32 << 10, // 32KB (default buffer size for io.Copy)
     	1 << 20,  // 1MB
    @@ -118,7 +123,11 @@ type sizedBufferPool struct {
     }
     
     func (p *sizedBufferPool) Get(size int) *[]byte {
    -	buf := p.pool.Get().(*[]byte)
    +	buf, ok := p.pool.Get().(*[]byte)
    +	if !ok {
    +		buf := make([]byte, size, p.defaultSize)
    +		return &buf
    +	}
     	b := *buf
     	clear(b[:cap(b)])
     	*buf = b[:size]
    @@ -137,12 +146,6 @@ func (p *sizedBufferPool) Put(buf *[]byte) {
     
     func newSizedBufferPool(size int) *sizedBufferPool {
     	return &sizedBufferPool{
    -		pool: sync.Pool{
    -			New: func() any {
    -				buf := make([]byte, size)
    -				return &buf
    -			},
    -		},
     		defaultSize: size,
     	}
     }
    @@ -160,6 +163,7 @@ type simpleBufferPool struct {
     func (p *simpleBufferPool) Get(size int) *[]byte {
     	bs, ok := p.pool.Get().(*[]byte)
     	if ok && cap(*bs) >= size {
    +		clear((*bs)[:cap(*bs)])
     		*bs = (*bs)[:size]
     		return bs
     	}
    @@ -170,7 +174,14 @@ func (p *simpleBufferPool) Get(size int) *[]byte {
     		p.pool.Put(bs)
     	}
     
    -	b := make([]byte, size)
    +	// If we're going to allocate, round up to the nearest page. This way if
    +	// requests frequently arrive with small variation we don't allocate
    +	// repeatedly if we get unlucky and they increase over time. By default we
    +	// only allocate here if size > 1MiB. Because goPageSize is a power of 2, we
    +	// can round up efficiently.
    +	allocSize := (size + goPageSize - 1) & ^(goPageSize - 1)
    +
    +	b := make([]byte, size, allocSize)
     	return &b
     }
     
    diff --git a/vendor/google.golang.org/grpc/mem/buffer_slice.go b/vendor/google.golang.org/grpc/mem/buffer_slice.go
    index 65002e2cc..084fb19c6 100644
    --- a/vendor/google.golang.org/grpc/mem/buffer_slice.go
    +++ b/vendor/google.golang.org/grpc/mem/buffer_slice.go
    @@ -19,6 +19,7 @@
     package mem
     
     import (
    +	"fmt"
     	"io"
     )
     
    @@ -117,47 +118,53 @@ func (s BufferSlice) MaterializeToBuffer(pool BufferPool) Buffer {
     
     // Reader returns a new Reader for the input slice after taking references to
     // each underlying buffer.
    -func (s BufferSlice) Reader() Reader {
    +func (s BufferSlice) Reader() *Reader {
     	s.Ref()
    -	return &sliceReader{
    +	return &Reader{
     		data: s,
     		len:  s.Len(),
     	}
     }
     
     // Reader exposes a BufferSlice's data as an io.Reader, allowing it to interface
    -// with other parts systems. It also provides an additional convenience method
    -// Remaining(), which returns the number of unread bytes remaining in the slice.
    +// with other systems.
    +//
     // Buffers will be freed as they are read.
    -type Reader interface {
    -	io.Reader
    -	io.ByteReader
    -	// Close frees the underlying BufferSlice and never returns an error. Subsequent
    -	// calls to Read will return (0, io.EOF).
    -	Close() error
    -	// Remaining returns the number of unread bytes remaining in the slice.
    -	Remaining() int
    -}
    -
    -type sliceReader struct {
    +//
    +// A Reader can be constructed from a BufferSlice; alternatively the zero value
    +// of a Reader may be used after calling Reset on it.
    +type Reader struct {
     	data BufferSlice
     	len  int
     	// The index into data[0].ReadOnlyData().
     	bufferIdx int
     }
     
    -func (r *sliceReader) Remaining() int {
    +// Remaining returns the number of unread bytes remaining in the slice.
    +func (r *Reader) Remaining() int {
     	return r.len
     }
     
    -func (r *sliceReader) Close() error {
    +// Reset frees the currently held buffer slice and starts reading from the
    +// provided slice. This allows reusing the reader object.
    +func (r *Reader) Reset(s BufferSlice) {
    +	r.data.Free()
    +	s.Ref()
    +	r.data = s
    +	r.len = s.Len()
    +	r.bufferIdx = 0
    +}
    +
    +// Close frees the underlying BufferSlice and never returns an error. Subsequent
    +// calls to Read will return (0, io.EOF).
    +func (r *Reader) Close() error {
     	r.data.Free()
     	r.data = nil
     	r.len = 0
     	return nil
     }
     
    -func (r *sliceReader) freeFirstBufferIfEmpty() bool {
    +func (r *Reader) freeFirstBufferIfEmpty() bool {
     	if len(r.data) == 0 || r.bufferIdx != len(r.data[0].ReadOnlyData()) {
     		return false
     	}
    @@ -168,7 +175,7 @@ func (r *sliceReader) freeFirstBufferIfEmpty() bool {
     	return true
     }
     
    -func (r *sliceReader) Read(buf []byte) (n int, _ error) {
    +func (r *Reader) Read(buf []byte) (n int, _ error) {
     	if r.len == 0 {
     		return 0, io.EOF
     	}
    @@ -191,7 +198,8 @@ func (r *sliceReader) Read(buf []byte) (n int, _ error) {
     	return n, nil
     }
     
    -func (r *sliceReader) ReadByte() (byte, error) {
    +// ReadByte reads a single byte.
    +func (r *Reader) ReadByte() (byte, error) {
     	if r.len == 0 {
     		return 0, io.EOF
     	}
    @@ -279,3 +287,59 @@ nextBuffer:
     		}
     	}
     }
    +
    +// Discard skips the next n bytes, returning the number of bytes discarded.
    +//
    +// It frees buffers as they are fully consumed.
    +//
    +// If Discard skips fewer than n bytes, it also returns an error.
    +func (r *Reader) Discard(n int) (discarded int, err error) {
    +	total := n
    +	for n > 0 && r.len > 0 {
    +		curData := r.data[0].ReadOnlyData()
    +		curSize := min(n, len(curData)-r.bufferIdx)
    +		n -= curSize
    +		r.len -= curSize
    +		r.bufferIdx += curSize
    +		if r.bufferIdx >= len(curData) {
    +			r.data[0].Free()
    +			r.data = r.data[1:]
    +			r.bufferIdx = 0
    +		}
    +	}
    +	discarded = total - n
    +	if n > 0 {
    +		return discarded, fmt.Errorf("insufficient bytes in reader")
    +	}
    +	return discarded, nil
    +}
    +
    +// Peek returns the next n bytes without advancing the reader.
    +//
    +// Peek appends results to the provided res slice and returns the updated slice.
    +// This pattern allows re-using the storage of res if it has sufficient
    +// capacity.
    +//
    +// The returned subslices are views into the underlying buffers and are only
    +// valid until the reader is advanced past the corresponding buffer.
    +//
    +// If Peek returns fewer than n bytes, it also returns an error.
    +func (r *Reader) Peek(n int, res [][]byte) ([][]byte, error) {
    +	for i := 0; n > 0 && i < len(r.data); i++ {
    +		curData := r.data[i].ReadOnlyData()
    +		start := 0
    +		if i == 0 {
    +			start = r.bufferIdx
    +		}
    +		curSize := min(n, len(curData)-start)
    +		if curSize == 0 {
    +			continue
    +		}
    +		res = append(res, curData[start:start+curSize])
    +		n -= curSize
    +	}
    +	if n > 0 {
    +		return nil, fmt.Errorf("insufficient bytes in reader")
    +	}
    +	return res, nil
    +}
    diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go
    index a2d2a798d..aa52bfe95 100644
    --- a/vendor/google.golang.org/grpc/picker_wrapper.go
    +++ b/vendor/google.golang.org/grpc/picker_wrapper.go
    @@ -29,7 +29,6 @@ import (
     	"google.golang.org/grpc/internal/channelz"
     	istatus "google.golang.org/grpc/internal/status"
     	"google.golang.org/grpc/internal/transport"
    -	"google.golang.org/grpc/stats"
     	"google.golang.org/grpc/status"
     )
     
    @@ -48,14 +47,11 @@ type pickerGeneration struct {
     // actions and unblock when there's a picker update.
     type pickerWrapper struct {
     	// If pickerGen holds a nil pointer, the pickerWrapper is closed.
    -	pickerGen     atomic.Pointer[pickerGeneration]
    -	statsHandlers []stats.Handler // to record blocking picker calls
    +	pickerGen atomic.Pointer[pickerGeneration]
     }
     
    -func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper {
    -	pw := &pickerWrapper{
    -		statsHandlers: statsHandlers,
    -	}
    +func newPickerWrapper() *pickerWrapper {
    +	pw := &pickerWrapper{}
     	pw.pickerGen.Store(&pickerGeneration{
     		blockingCh: make(chan struct{}),
     	})
    @@ -93,6 +89,12 @@ func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) {
     	}
     }
     
    +type pick struct {
    +	transport transport.ClientTransport // the selected transport
    +	result    balancer.PickResult       // the contents of the pick from the LB policy
    +	blocked   bool                      // set if a picker call queued for a new picker
    +}
    +
     // pick returns the transport that will be used for the RPC.
     // It may block in the following cases:
     // - there's no picker
    @@ -100,15 +102,16 @@ func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) {
     // - the current picker returns other errors and failfast is false.
     // - the subConn returned by the current picker is not READY
     // When one of these situations happens, pick blocks until the picker gets updated.
    -func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, balancer.PickResult, error) {
    +func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (pick, error) {
     	var ch chan struct{}
     
     	var lastPickErr error
    +	pickBlocked := false
     
     	for {
     		pg := pw.pickerGen.Load()
     		if pg == nil {
    -			return nil, balancer.PickResult{}, ErrClientConnClosing
    +			return pick{}, ErrClientConnClosing
     		}
     		if pg.picker == nil {
     			ch = pg.blockingCh
    @@ -127,9 +130,9 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
     				}
     				switch ctx.Err() {
     				case context.DeadlineExceeded:
    -					return nil, balancer.PickResult{}, status.Error(codes.DeadlineExceeded, errStr)
    +					return pick{}, status.Error(codes.DeadlineExceeded, errStr)
     				case context.Canceled:
    -					return nil, balancer.PickResult{}, status.Error(codes.Canceled, errStr)
    +					return pick{}, status.Error(codes.Canceled, errStr)
     				}
     			case <-ch:
     			}
    @@ -145,9 +148,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
     		// In the second case, the only way it will get to this conditional is
     		// if there is a new picker.
     		if ch != nil {
    -			for _, sh := range pw.statsHandlers {
    -				sh.HandleRPC(ctx, &stats.PickerUpdated{})
    -			}
    +			pickBlocked = true
     		}
     
     		ch = pg.blockingCh
    @@ -164,7 +165,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
     				if istatus.IsRestrictedControlPlaneCode(st) {
     					err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err)
     				}
    -				return nil, balancer.PickResult{}, dropError{error: err}
    +				return pick{}, dropError{error: err}
     			}
     			// For all other errors, wait for ready RPCs should block and other
     			// RPCs should fail with unavailable.
    @@ -172,7 +173,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
     				lastPickErr = err
     				continue
     			}
    -			return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error())
    +			return pick{}, status.Error(codes.Unavailable, err.Error())
     		}
     
     		acbw, ok := pickResult.SubConn.(*acBalancerWrapper)
    @@ -183,9 +184,8 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
     		if t := acbw.ac.getReadyTransport(); t != nil {
     			if channelz.IsOn() {
     				doneChannelzWrapper(acbw, &pickResult)
    -				return t, pickResult, nil
     			}
    -			return t, pickResult, nil
    +			return pick{transport: t, result: pickResult, blocked: pickBlocked}, nil
     		}
     		if pickResult.Done != nil {
     			// Calling done with nil error, no bytes sent and no bytes received.
    diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go
    index ee0ff969a..1e783febf 100644
    --- a/vendor/google.golang.org/grpc/preloader.go
    +++ b/vendor/google.golang.org/grpc/preloader.go
    @@ -47,9 +47,6 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error {
     	}
     
     	// check if the context has the relevant information to prepareMsg
    -	if rpcInfo.preloaderInfo == nil {
    -		return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo is nil")
    -	}
     	if rpcInfo.preloaderInfo.codec == nil {
     		return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo.codec is nil")
     	}
    diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go
    index ada5b9bb7..c3c15ac96 100644
    --- a/vendor/google.golang.org/grpc/resolver/map.go
    +++ b/vendor/google.golang.org/grpc/resolver/map.go
    @@ -18,16 +18,28 @@
     
     package resolver
     
    -type addressMapEntry struct {
    +import (
    +	"encoding/base64"
    +	"sort"
    +	"strings"
    +)
    +
    +type addressMapEntry[T any] struct {
     	addr  Address
    -	value any
    +	value T
     }
     
    -// AddressMap is a map of addresses to arbitrary values taking into account
    +// AddressMap is an AddressMapV2[any].  It will be deleted in an upcoming
    +// release of grpc-go.
    +//
    +// Deprecated: use the generic AddressMapV2 type instead.
    +type AddressMap = AddressMapV2[any]
    +
    +// AddressMapV2 is a map of addresses to arbitrary values taking into account
     // Attributes.  BalancerAttributes are ignored, as are Metadata and Type.
     // Multiple accesses may not be performed concurrently.  Must be created via
     // NewAddressMap; do not construct directly.
    -type AddressMap struct {
    +type AddressMapV2[T any] struct {
     	// The underlying map is keyed by an Address with fields that we don't care
     	// about being set to their zero values. The only fields that we care about
     	// are `Addr`, `ServerName` and `Attributes`. Since we need to be able to
    @@ -41,23 +53,30 @@ type AddressMap struct {
     	// The value type of the map contains a slice of addresses which match the key
     	// in their `Addr` and `ServerName` fields and contain the corresponding value
     	// associated with them.
    -	m map[Address]addressMapEntryList
    +	m map[Address]addressMapEntryList[T]
     }
     
     func toMapKey(addr *Address) Address {
     	return Address{Addr: addr.Addr, ServerName: addr.ServerName}
     }
     
    -type addressMapEntryList []*addressMapEntry
    +type addressMapEntryList[T any] []*addressMapEntry[T]
     
    -// NewAddressMap creates a new AddressMap.
    +// NewAddressMap creates a new AddressMapV2[any].
    +//
    +// Deprecated: use the generic NewAddressMapV2 constructor instead.
     func NewAddressMap() *AddressMap {
    -	return &AddressMap{m: make(map[Address]addressMapEntryList)}
    +	return NewAddressMapV2[any]()
    +}
    +
    +// NewAddressMapV2 creates a new AddressMapV2.
    +func NewAddressMapV2[T any]() *AddressMapV2[T] {
    +	return &AddressMapV2[T]{m: make(map[Address]addressMapEntryList[T])}
     }
     
     // find returns the index of addr in the addressMapEntry slice, or -1 if not
     // present.
    -func (l addressMapEntryList) find(addr Address) int {
    +func (l addressMapEntryList[T]) find(addr Address) int {
     	for i, entry := range l {
     		// Attributes are the only thing to match on here, since `Addr` and
     		// `ServerName` are already equal.
    @@ -69,28 +88,28 @@ func (l addressMapEntryList) find(addr Address) int {
     }
     
     // Get returns the value for the address in the map, if present.
    -func (a *AddressMap) Get(addr Address) (value any, ok bool) {
    +func (a *AddressMapV2[T]) Get(addr Address) (value T, ok bool) {
     	addrKey := toMapKey(&addr)
     	entryList := a.m[addrKey]
     	if entry := entryList.find(addr); entry != -1 {
     		return entryList[entry].value, true
     	}
    -	return nil, false
    +	return value, false
     }
     
     // Set updates or adds the value to the address in the map.
    -func (a *AddressMap) Set(addr Address, value any) {
    +func (a *AddressMapV2[T]) Set(addr Address, value T) {
     	addrKey := toMapKey(&addr)
     	entryList := a.m[addrKey]
     	if entry := entryList.find(addr); entry != -1 {
     		entryList[entry].value = value
     		return
     	}
    -	a.m[addrKey] = append(entryList, &addressMapEntry{addr: addr, value: value})
    +	a.m[addrKey] = append(entryList, &addressMapEntry[T]{addr: addr, value: value})
     }
     
     // Delete removes addr from the map.
    -func (a *AddressMap) Delete(addr Address) {
    +func (a *AddressMapV2[T]) Delete(addr Address) {
     	addrKey := toMapKey(&addr)
     	entryList := a.m[addrKey]
     	entry := entryList.find(addr)
    @@ -107,7 +126,7 @@ func (a *AddressMap) Delete(addr Address) {
     }
     
     // Len returns the number of entries in the map.
    -func (a *AddressMap) Len() int {
    +func (a *AddressMapV2[T]) Len() int {
     	ret := 0
     	for _, entryList := range a.m {
     		ret += len(entryList)
    @@ -116,7 +135,7 @@ func (a *AddressMap) Len() int {
     }
     
     // Keys returns a slice of all current map keys.
    -func (a *AddressMap) Keys() []Address {
    +func (a *AddressMapV2[T]) Keys() []Address {
     	ret := make([]Address, 0, a.Len())
     	for _, entryList := range a.m {
     		for _, entry := range entryList {
    @@ -127,8 +146,8 @@ func (a *AddressMap) Keys() []Address {
     }
     
     // Values returns a slice of all current map values.
    -func (a *AddressMap) Values() []any {
    -	ret := make([]any, 0, a.Len())
    +func (a *AddressMapV2[T]) Values() []T {
    +	ret := make([]T, 0, a.Len())
     	for _, entryList := range a.m {
     		for _, entry := range entryList {
     			ret = append(ret, entry.value)
    @@ -137,70 +156,65 @@ func (a *AddressMap) Values() []any {
     	return ret
     }
     
    -type endpointNode struct {
    -	addrs map[string]struct{}
    -}
    -
    -// Equal returns whether the unordered set of addrs are the same between the
    -// endpoint nodes.
    -func (en *endpointNode) Equal(en2 *endpointNode) bool {
    -	if len(en.addrs) != len(en2.addrs) {
    -		return false
    -	}
    -	for addr := range en.addrs {
    -		if _, ok := en2.addrs[addr]; !ok {
    -			return false
    -		}
    -	}
    -	return true
    -}
    -
    -func toEndpointNode(endpoint Endpoint) endpointNode {
    -	en := make(map[string]struct{})
    -	for _, addr := range endpoint.Addresses {
    -		en[addr.Addr] = struct{}{}
    -	}
    -	return endpointNode{
    -		addrs: en,
    -	}
    -}
    +type endpointMapKey string
     
     // EndpointMap is a map of endpoints to arbitrary values keyed on only the
     // unordered set of address strings within an endpoint. This map is not thread
     // safe, thus it is unsafe to access concurrently. Must be created via
     // NewEndpointMap; do not construct directly.
    -type EndpointMap struct {
    -	endpoints map[*endpointNode]any
    +type EndpointMap[T any] struct {
    +	endpoints map[endpointMapKey]endpointData[T]
    +}
    +
    +type endpointData[T any] struct {
    +	// decodedKey stores the original key to avoid decoding when iterating on
    +	// EndpointMap keys.
    +	decodedKey Endpoint
    +	value      T
     }
     
     // NewEndpointMap creates a new EndpointMap.
    -func NewEndpointMap() *EndpointMap {
    -	return &EndpointMap{
    -		endpoints: make(map[*endpointNode]any),
    +func NewEndpointMap[T any]() *EndpointMap[T] {
    +	return &EndpointMap[T]{
    +		endpoints: make(map[endpointMapKey]endpointData[T]),
     	}
     }
     
    +// encodeEndpoint returns a string that uniquely identifies the unordered set of
    +// addresses within an endpoint.
    +func encodeEndpoint(e Endpoint) endpointMapKey {
    +	addrs := make([]string, 0, len(e.Addresses))
    +	// base64 encoding the address strings restricts the characters present
    +	// within the strings. This allows us to use a delimiter without the need of
    +	// escape characters.
    +	for _, addr := range e.Addresses {
    +		addrs = append(addrs, base64.StdEncoding.EncodeToString([]byte(addr.Addr)))
    +	}
    +	sort.Strings(addrs)
    +	// " " should not appear in base64 encoded strings.
    +	return endpointMapKey(strings.Join(addrs, " "))
    +}
    +
     // Get returns the value for the address in the map, if present.
    -func (em *EndpointMap) Get(e Endpoint) (value any, ok bool) {
    -	en := toEndpointNode(e)
    -	if endpoint := em.find(en); endpoint != nil {
    -		return em.endpoints[endpoint], true
    +func (em *EndpointMap[T]) Get(e Endpoint) (value T, ok bool) {
    +	val, found := em.endpoints[encodeEndpoint(e)]
    +	if found {
    +		return val.value, true
     	}
    -	return nil, false
    +	return value, false
     }
     
     // Set updates or adds the value to the address in the map.
    -func (em *EndpointMap) Set(e Endpoint, value any) {
    -	en := toEndpointNode(e)
    -	if endpoint := em.find(en); endpoint != nil {
    -		em.endpoints[endpoint] = value
    -		return
    +func (em *EndpointMap[T]) Set(e Endpoint, value T) {
    +	en := encodeEndpoint(e)
    +	em.endpoints[en] = endpointData[T]{
    +		decodedKey: Endpoint{Addresses: e.Addresses},
    +		value:      value,
     	}
    -	em.endpoints[&en] = value
     }
     
     // Len returns the number of entries in the map.
    -func (em *EndpointMap) Len() int {
    +func (em *EndpointMap[T]) Len() int {
     	return len(em.endpoints)
     }
     
    @@ -209,43 +223,25 @@ func (em *EndpointMap) Len() int {
     // the unordered set of addresses. Thus, endpoint information returned is not
     // the full endpoint data (drops duplicated addresses and attributes) but can be
     // used for EndpointMap accesses.
    -func (em *EndpointMap) Keys() []Endpoint {
    +func (em *EndpointMap[T]) Keys() []Endpoint {
     	ret := make([]Endpoint, 0, len(em.endpoints))
    -	for en := range em.endpoints {
    -		var endpoint Endpoint
    -		for addr := range en.addrs {
    -			endpoint.Addresses = append(endpoint.Addresses, Address{Addr: addr})
    -		}
    -		ret = append(ret, endpoint)
    +	for _, en := range em.endpoints {
    +		ret = append(ret, en.decodedKey)
     	}
     	return ret
     }
     
     // Values returns a slice of all current map values.
    -func (em *EndpointMap) Values() []any {
    -	ret := make([]any, 0, len(em.endpoints))
    +func (em *EndpointMap[T]) Values() []T {
    +	ret := make([]T, 0, len(em.endpoints))
     	for _, val := range em.endpoints {
    -		ret = append(ret, val)
    +		ret = append(ret, val.value)
     	}
     	return ret
     }
     
    -// find returns a pointer to the endpoint node in em if the endpoint node is
    -// already present. If not found, nil is returned. The comparisons are done on
    -// the unordered set of addresses within an endpoint.
    -func (em EndpointMap) find(e endpointNode) *endpointNode {
    -	for endpoint := range em.endpoints {
    -		if e.Equal(endpoint) {
    -			return endpoint
    -		}
    -	}
    -	return nil
    -}
    -
     // Delete removes the specified endpoint from the map.
    -func (em *EndpointMap) Delete(e Endpoint) {
    -	en := toEndpointNode(e)
    -	if entry := em.find(en); entry != nil {
    -		delete(em.endpoints, entry)
    -	}
    +func (em *EndpointMap[T]) Delete(e Endpoint) {
    +	en := encodeEndpoint(e)
    +	delete(em.endpoints, en)
     }
    diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go
    index b84ef26d4..8e6af9514 100644
    --- a/vendor/google.golang.org/grpc/resolver/resolver.go
    +++ b/vendor/google.golang.org/grpc/resolver/resolver.go
    @@ -332,6 +332,11 @@ type AuthorityOverrider interface {
     	// OverrideAuthority returns the authority to use for a ClientConn with the
     	// given target. The implementation must generate it without blocking,
     	// typically in line, and must keep it unchanged.
    +	//
    +	// The returned string must be a valid ":authority" header value, i.e. be
    +	// encoded according to
    +	// [RFC3986](https://datatracker.ietf.org/doc/html/rfc3986#section-3.2) as
    +	// necessary.
     	OverrideAuthority(Target) string
     }
     
    diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go
    index 945e24ff8..6e6137643 100644
    --- a/vendor/google.golang.org/grpc/resolver_wrapper.go
    +++ b/vendor/google.golang.org/grpc/resolver_wrapper.go
    @@ -69,6 +69,7 @@ func (ccr *ccResolverWrapper) start() error {
     	errCh := make(chan error)
     	ccr.serializer.TrySchedule(func(ctx context.Context) {
     		if ctx.Err() != nil {
    +			errCh <- ctx.Err()
     			return
     		}
     		opts := resolver.BuildOptions{
    @@ -134,12 +135,7 @@ func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error {
     		return nil
     	}
     	if s.Endpoints == nil {
    -		s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses))
    -		for _, a := range s.Addresses {
    -			ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes}
    -			ep.Addresses[0].BalancerAttributes = nil
    -			s.Endpoints = append(s.Endpoints, ep)
    -		}
    +		s.Endpoints = addressesToEndpoints(s.Addresses)
     	}
     	ccr.addChannelzTraceEvent(s)
     	ccr.curState = s
    @@ -172,7 +168,11 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
     		ccr.cc.mu.Unlock()
     		return
     	}
    -	s := resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}
    +	s := resolver.State{
    +		Addresses:     addrs,
    +		ServiceConfig: ccr.curState.ServiceConfig,
    +		Endpoints:     addressesToEndpoints(addrs),
    +	}
     	ccr.addChannelzTraceEvent(s)
     	ccr.curState = s
     	ccr.mu.Unlock()
    @@ -210,3 +210,13 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
     	}
     	channelz.Infof(logger, ccr.cc.channelz, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; "))
     }
    +
    +func addressesToEndpoints(addrs []resolver.Address) []resolver.Endpoint {
    +	endpoints := make([]resolver.Endpoint, 0, len(addrs))
    +	for _, a := range addrs {
    +		ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes}
    +		ep.Addresses[0].BalancerAttributes = nil
    +		endpoints = append(endpoints, ep)
    +	}
    +	return endpoints
    +}
    diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go
    index a8ddb0af5..8160f9430 100644
    --- a/vendor/google.golang.org/grpc/rpc_util.go
    +++ b/vendor/google.golang.org/grpc/rpc_util.go
    @@ -33,6 +33,8 @@ import (
     	"google.golang.org/grpc/credentials"
     	"google.golang.org/grpc/encoding"
     	"google.golang.org/grpc/encoding/proto"
    +	"google.golang.org/grpc/internal"
    +	"google.golang.org/grpc/internal/grpcutil"
     	"google.golang.org/grpc/internal/transport"
     	"google.golang.org/grpc/mem"
     	"google.golang.org/grpc/metadata"
    @@ -41,6 +43,10 @@ import (
     	"google.golang.org/grpc/status"
     )
     
    +func init() {
    +	internal.AcceptCompressors = acceptCompressors
    +}
    +
     // Compressor defines the interface gRPC uses to compress a message.
     //
     // Deprecated: use package encoding.
    @@ -151,15 +157,32 @@ func (d *gzipDecompressor) Type() string {
     
     // callInfo contains all related configuration and information about an RPC.
     type callInfo struct {
    -	compressorName        string
    -	failFast              bool
    -	maxReceiveMessageSize *int
    -	maxSendMessageSize    *int
    -	creds                 credentials.PerRPCCredentials
    -	contentSubtype        string
    -	codec                 baseCodec
    -	maxRetryRPCBufferSize int
    -	onFinish              []func(err error)
    +	compressorName              string
    +	failFast                    bool
    +	maxReceiveMessageSize       *int
    +	maxSendMessageSize          *int
    +	creds                       credentials.PerRPCCredentials
    +	contentSubtype              string
    +	codec                       baseCodec
    +	maxRetryRPCBufferSize       int
    +	onFinish                    []func(err error)
    +	authority                   string
    +	acceptedResponseCompressors []string
    +}
    +
    +func acceptedCompressorAllows(allowed []string, name string) bool {
    +	if allowed == nil {
    +		return true
    +	}
    +	if name == "" || name == encoding.Identity {
    +		return true
    +	}
    +	for _, a := range allowed {
    +		if a == name {
    +			return true
    +		}
    +	}
    +	return false
     }
     
     func defaultCallInfo() *callInfo {
    @@ -169,6 +192,29 @@ func defaultCallInfo() *callInfo {
     	}
     }
     
    +func newAcceptedCompressionConfig(names []string) ([]string, error) {
    +	if len(names) == 0 {
    +		return nil, nil
    +	}
    +	var allowed []string
    +	seen := make(map[string]struct{}, len(names))
    +	for _, name := range names {
    +		name = strings.TrimSpace(name)
    +		if name == "" || name == encoding.Identity {
    +			continue
    +		}
    +		if !grpcutil.IsCompressorNameRegistered(name) {
    +			return nil, status.Errorf(codes.InvalidArgument, "grpc: compressor %q is not registered", name)
    +		}
    +		if _, dup := seen[name]; dup {
    +			continue
    +		}
    +		seen[name] = struct{}{}
    +		allowed = append(allowed, name)
    +	}
    +	return allowed, nil
    +}
    +
     // CallOption configures a Call before it starts or extracts information from
     // a Call after it completes.
     type CallOption interface {
    @@ -365,6 +411,36 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error {
     }
     func (o MaxRecvMsgSizeCallOption) after(*callInfo, *csAttempt) {}
     
    +// CallAuthority returns a CallOption that sets the HTTP/2 :authority header of
    +// an RPC to the specified value. When using CallAuthority, the credentials in
    +// use must implement the AuthorityValidator interface.
    +//
    +// # Experimental
    +//
    +// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
    +// release.
    +func CallAuthority(authority string) CallOption {
    +	return AuthorityOverrideCallOption{Authority: authority}
    +}
    +
    +// AuthorityOverrideCallOption is a CallOption that indicates the HTTP/2
    +// :authority header value to use for the call.
    +//
    +// # Experimental
    +//
    +// Notice: This type is EXPERIMENTAL and may be changed or removed in a later
    +// release.
    +type AuthorityOverrideCallOption struct {
    +	Authority string
    +}
    +
    +func (o AuthorityOverrideCallOption) before(c *callInfo) error {
    +	c.authority = o.Authority
    +	return nil
    +}
    +
    +func (o AuthorityOverrideCallOption) after(*callInfo, *csAttempt) {}
    +
     // MaxCallSendMsgSize returns a CallOption which sets the maximum message size
     // in bytes the client can send. If this is not set, gRPC uses the default
     // `math.MaxInt32`.
    @@ -440,6 +516,31 @@ func (o CompressorCallOption) before(c *callInfo) error {
     }
     func (o CompressorCallOption) after(*callInfo, *csAttempt) {}
     
    +// acceptCompressors returns a CallOption that limits the compression algorithms
    +// advertised in the grpc-accept-encoding header for response messages.
    +// Compression algorithms not in the provided list will not be advertised, and
    +// responses compressed with non-listed algorithms will be rejected.
    +func acceptCompressors(names ...string) CallOption {
    +	cp := append([]string(nil), names...)
    +	return acceptCompressorsCallOption{names: cp}
    +}
    +
    +// acceptCompressorsCallOption is a CallOption that limits response compression.
    +type acceptCompressorsCallOption struct {
    +	names []string
    +}
    +
    +func (o acceptCompressorsCallOption) before(c *callInfo) error {
    +	allowed, err := newAcceptedCompressionConfig(o.names)
    +	if err != nil {
    +		return err
    +	}
    +	c.acceptedResponseCompressors = allowed
    +	return nil
    +}
    +
    +func (acceptCompressorsCallOption) after(*callInfo, *csAttempt) {}
    +
     // CallContentSubtype returns a CallOption that will set the content-subtype
     // for a call. For example, if content-subtype is "json", the Content-Type over
     // the wire will be "application/grpc+json". The content-subtype is converted
    @@ -626,8 +727,20 @@ type streamReader interface {
     	Read(n int) (mem.BufferSlice, error)
     }
     
    +// noCopy may be embedded into structs which must not be copied
    +// after the first use.
    +//
    +// See https://golang.org/issues/8005#issuecomment-190753527
    +// for details.
    +type noCopy struct {
    +}
    +
    +func (*noCopy) Lock()   {}
    +func (*noCopy) Unlock() {}
    +
     // parser reads complete gRPC messages from the underlying reader.
     type parser struct {
    +	_ noCopy
     	// r is the underlying reader.
     	// See the comment on recvMsg for the permissible
     	// error types.
    @@ -814,8 +927,7 @@ func (p *payloadInfo) free() {
     // the buffer is no longer needed.
     // TODO: Refactor this function to reduce the number of arguments.
     // See: https://google.github.io/styleguide/go/best-practices.html#function-argument-lists
    -func recvAndDecompress(p *parser, s recvCompressor, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool,
    -) (out mem.BufferSlice, err error) {
    +func recvAndDecompress(p *parser, s recvCompressor, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) (out mem.BufferSlice, err error) {
     	pf, compressed, err := p.recvMsg(maxReceiveMessageSize)
     	if err != nil {
     		return nil, err
    @@ -870,13 +982,19 @@ func decompress(compressor encoding.Compressor, d mem.BufferSlice, dc Decompress
     			return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the message: %v", err)
     		}
     
    -		out, err := mem.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)), pool)
    +		// Read at most one byte more than the limit from the decompressor.
    +		// Unless the limit is MaxInt64, in which case, that's impossible, so
    +		// apply no limit.
    +		if limit := int64(maxReceiveMessageSize); limit < math.MaxInt64 {
    +			dcReader = io.LimitReader(dcReader, limit+1)
    +		}
    +		out, err := mem.ReadAll(dcReader, pool)
     		if err != nil {
     			out.Free()
     			return nil, status.Errorf(codes.Internal, "grpc: failed to read decompressed data: %v", err)
     		}
     
    -		if out.Len() == maxReceiveMessageSize && !atEOF(dcReader) {
    +		if out.Len() > maxReceiveMessageSize {
     			out.Free()
     			return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max %d", maxReceiveMessageSize)
     		}
    @@ -885,12 +1003,6 @@ func decompress(compressor encoding.Compressor, d mem.BufferSlice, dc Decompress
     	return nil, status.Errorf(codes.Internal, "grpc: no decompressor available for compressed payload")
     }
     
    -// atEOF reads data from r and returns true if zero bytes could be read and r.Read returns EOF.
    -func atEOF(dcReader io.Reader) bool {
    -	n, err := dcReader.Read(make([]byte, 1))
    -	return n == 0 && err == io.EOF
    -}
    -
     type recvCompressor interface {
     	RecvCompress() string
     }
    @@ -918,7 +1030,7 @@ func recv(p *parser, c baseCodec, s recvCompressor, dc Decompressor, m any, maxR
     // Information about RPC
     type rpcInfo struct {
     	failfast      bool
    -	preloaderInfo *compressorInfo
    +	preloaderInfo compressorInfo
     }
     
     // Information about Preloader
    @@ -937,7 +1049,7 @@ type rpcInfoContextKey struct{}
     func newContextWithRPCInfo(ctx context.Context, failfast bool, codec baseCodec, cp Compressor, comp encoding.Compressor) context.Context {
     	return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{
     		failfast: failfast,
    -		preloaderInfo: &compressorInfo{
    +		preloaderInfo: compressorInfo{
     			codec: codec,
     			cp:    cp,
     			comp:  comp,
    diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go
    index 976e70ae0..ddd377341 100644
    --- a/vendor/google.golang.org/grpc/server.go
    +++ b/vendor/google.golang.org/grpc/server.go
    @@ -124,7 +124,8 @@ type serviceInfo struct {
     
     // Server is a gRPC server to serve RPC requests.
     type Server struct {
    -	opts serverOptions
    +	opts         serverOptions
    +	statsHandler stats.Handler
     
     	mu  sync.Mutex // guards following
     	lis map[net.Listener]bool
    @@ -179,6 +180,7 @@ type serverOptions struct {
     	numServerWorkers      uint32
     	bufferPool            mem.BufferPool
     	waitForHandlers       bool
    +	staticWindowSize      bool
     }
     
     var defaultServerOptions = serverOptions{
    @@ -279,6 +281,7 @@ func ReadBufferSize(s int) ServerOption {
     func InitialWindowSize(s int32) ServerOption {
     	return newFuncServerOption(func(o *serverOptions) {
     		o.initialWindowSize = s
    +		o.staticWindowSize = true
     	})
     }
     
    @@ -287,6 +290,29 @@ func InitialWindowSize(s int32) ServerOption {
     func InitialConnWindowSize(s int32) ServerOption {
     	return newFuncServerOption(func(o *serverOptions) {
     		o.initialConnWindowSize = s
    +		o.staticWindowSize = true
    +	})
    +}
    +
    +// StaticStreamWindowSize returns a ServerOption to set the initial stream
    +// window size to the value provided and disables dynamic flow control.
    +// The lower bound for window size is 64K and any value smaller than that
    +// will be ignored.
    +func StaticStreamWindowSize(s int32) ServerOption {
    +	return newFuncServerOption(func(o *serverOptions) {
    +		o.initialWindowSize = s
    +		o.staticWindowSize = true
    +	})
    +}
    +
    +// StaticConnWindowSize returns a ServerOption to set the initial connection
    +// window size to the value provided and disables dynamic flow control.
    +// The lower bound for window size is 64K and any value smaller than that
    +// will be ignored.
    +func StaticConnWindowSize(s int32) ServerOption {
    +	return newFuncServerOption(func(o *serverOptions) {
    +		o.initialConnWindowSize = s
    +		o.staticWindowSize = true
     	})
     }
     
    @@ -667,13 +693,14 @@ func NewServer(opt ...ServerOption) *Server {
     		o.apply(&opts)
     	}
     	s := &Server{
    -		lis:      make(map[net.Listener]bool),
    -		opts:     opts,
    -		conns:    make(map[string]map[transport.ServerTransport]bool),
    -		services: make(map[string]*serviceInfo),
    -		quit:     grpcsync.NewEvent(),
    -		done:     grpcsync.NewEvent(),
    -		channelz: channelz.RegisterServer(""),
    +		lis:          make(map[net.Listener]bool),
    +		opts:         opts,
    +		statsHandler: istats.NewCombinedHandler(opts.statsHandlers...),
    +		conns:        make(map[string]map[transport.ServerTransport]bool),
    +		services:     make(map[string]*serviceInfo),
    +		quit:         grpcsync.NewEvent(),
    +		done:         grpcsync.NewEvent(),
    +		channelz:     channelz.RegisterServer(""),
     	}
     	chainUnaryServerInterceptors(s)
     	chainStreamServerInterceptors(s)
    @@ -974,7 +1001,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
     		ConnectionTimeout:     s.opts.connectionTimeout,
     		Credentials:           s.opts.creds,
     		InTapHandle:           s.opts.inTapHandle,
    -		StatsHandlers:         s.opts.statsHandlers,
    +		StatsHandler:          s.statsHandler,
     		KeepaliveParams:       s.opts.keepaliveParams,
     		KeepalivePolicy:       s.opts.keepalivePolicy,
     		InitialWindowSize:     s.opts.initialWindowSize,
    @@ -986,6 +1013,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
     		MaxHeaderListSize:     s.opts.maxHeaderListSize,
     		HeaderTableSize:       s.opts.headerTableSize,
     		BufferPool:            s.opts.bufferPool,
    +		StaticWindowSize:      s.opts.staticWindowSize,
     	}
     	st, err := transport.NewServerTransport(c, config)
     	if err != nil {
    @@ -1010,18 +1038,18 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
     func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, rawConn net.Conn) {
     	ctx = transport.SetConnection(ctx, rawConn)
     	ctx = peer.NewContext(ctx, st.Peer())
    -	for _, sh := range s.opts.statsHandlers {
    -		ctx = sh.TagConn(ctx, &stats.ConnTagInfo{
    +	if s.statsHandler != nil {
    +		ctx = s.statsHandler.TagConn(ctx, &stats.ConnTagInfo{
     			RemoteAddr: st.Peer().Addr,
     			LocalAddr:  st.Peer().LocalAddr,
     		})
    -		sh.HandleConn(ctx, &stats.ConnBegin{})
    +		s.statsHandler.HandleConn(ctx, &stats.ConnBegin{})
     	}
     
     	defer func() {
     		st.Close(errors.New("finished serving streams for the server transport"))
    -		for _, sh := range s.opts.statsHandlers {
    -			sh.HandleConn(ctx, &stats.ConnEnd{})
    +		if s.statsHandler != nil {
    +			s.statsHandler.HandleConn(ctx, &stats.ConnEnd{})
     		}
     	}()
     
    @@ -1078,7 +1106,7 @@ var _ http.Handler = (*Server)(nil)
     // Notice: This API is EXPERIMENTAL and may be changed or removed in a
     // later release.
     func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
    -	st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers, s.opts.bufferPool)
    +	st, err := transport.NewServerHandlerTransport(w, r, s.statsHandler, s.opts.bufferPool)
     	if err != nil {
     		// Errors returned from transport.NewServerHandlerTransport have
     		// already been written to w.
    @@ -1172,12 +1200,8 @@ func (s *Server) sendResponse(ctx context.Context, stream *transport.ServerStrea
     		return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize)
     	}
     	err = stream.Write(hdr, payload, opts)
    -	if err == nil {
    -		if len(s.opts.statsHandlers) != 0 {
    -			for _, sh := range s.opts.statsHandlers {
    -				sh.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now()))
    -			}
    -		}
    +	if err == nil && s.statsHandler != nil {
    +		s.statsHandler.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now()))
     	}
     	return err
     }
    @@ -1219,16 +1243,15 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info
     }
     
     func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) {
    -	shs := s.opts.statsHandlers
    -	if len(shs) != 0 || trInfo != nil || channelz.IsOn() {
    +	sh := s.statsHandler
    +	if sh != nil || trInfo != nil || channelz.IsOn() {
     		if channelz.IsOn() {
     			s.incrCallsStarted()
     		}
     		var statsBegin *stats.Begin
    -		for _, sh := range shs {
    -			beginTime := time.Now()
    +		if sh != nil {
     			statsBegin = &stats.Begin{
    -				BeginTime:      beginTime,
    +				BeginTime:      time.Now(),
     				IsClientStream: false,
     				IsServerStream: false,
     			}
    @@ -1256,7 +1279,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt
     				trInfo.tr.Finish()
     			}
     
    -			for _, sh := range shs {
    +			if sh != nil {
     				end := &stats.End{
     					BeginTime: statsBegin.BeginTime,
     					EndTime:   time.Now(),
    @@ -1353,7 +1376,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt
     	}
     
     	var payInfo *payloadInfo
    -	if len(shs) != 0 || len(binlogs) != 0 {
    +	if sh != nil || len(binlogs) != 0 {
     		payInfo = &payloadInfo{}
     		defer payInfo.free()
     	}
    @@ -1379,7 +1402,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt
     			return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
     		}
     
    -		for _, sh := range shs {
    +		if sh != nil {
     			sh.HandleRPC(ctx, &stats.InPayload{
     				RecvTime:         time.Now(),
     				Payload:          v,
    @@ -1553,32 +1576,30 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv
     	if channelz.IsOn() {
     		s.incrCallsStarted()
     	}
    -	shs := s.opts.statsHandlers
    +	sh := s.statsHandler
     	var statsBegin *stats.Begin
    -	if len(shs) != 0 {
    -		beginTime := time.Now()
    +	if sh != nil {
     		statsBegin = &stats.Begin{
    -			BeginTime:      beginTime,
    +			BeginTime:      time.Now(),
     			IsClientStream: sd.ClientStreams,
     			IsServerStream: sd.ServerStreams,
     		}
    -		for _, sh := range shs {
    -			sh.HandleRPC(ctx, statsBegin)
    -		}
    +		sh.HandleRPC(ctx, statsBegin)
     	}
     	ctx = NewContextWithServerTransportStream(ctx, stream)
     	ss := &serverStream{
     		ctx:                   ctx,
     		s:                     stream,
    -		p:                     &parser{r: stream, bufferPool: s.opts.bufferPool},
    +		p:                     parser{r: stream, bufferPool: s.opts.bufferPool},
     		codec:                 s.getCodec(stream.ContentSubtype()),
    +		desc:                  sd,
     		maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
     		maxSendMessageSize:    s.opts.maxSendMessageSize,
     		trInfo:                trInfo,
    -		statsHandler:          shs,
    +		statsHandler:          sh,
     	}
     
    -	if len(shs) != 0 || trInfo != nil || channelz.IsOn() {
    +	if sh != nil || trInfo != nil || channelz.IsOn() {
     		// See comment in processUnaryRPC on defers.
     		defer func() {
     			if trInfo != nil {
    @@ -1592,7 +1613,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv
     				ss.mu.Unlock()
     			}
     
    -			if len(shs) != 0 {
    +			if sh != nil {
     				end := &stats.End{
     					BeginTime: statsBegin.BeginTime,
     					EndTime:   time.Now(),
    @@ -1600,9 +1621,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv
     				if err != nil && err != io.EOF {
     					end.Error = toRPCErr(err)
     				}
    -				for _, sh := range shs {
    -					sh.HandleRPC(ctx, end)
    -				}
    +				sh.HandleRPC(ctx, end)
     			}
     
     			if channelz.IsOn() {
    @@ -1791,19 +1810,17 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Ser
     	method := sm[pos+1:]
     
     	// FromIncomingContext is expensive: skip if there are no statsHandlers
    -	if len(s.opts.statsHandlers) > 0 {
    +	if s.statsHandler != nil {
     		md, _ := metadata.FromIncomingContext(ctx)
    -		for _, sh := range s.opts.statsHandlers {
    -			ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()})
    -			sh.HandleRPC(ctx, &stats.InHeader{
    -				FullMethod:  stream.Method(),
    -				RemoteAddr:  t.Peer().Addr,
    -				LocalAddr:   t.Peer().LocalAddr,
    -				Compression: stream.RecvCompress(),
    -				WireLength:  stream.HeaderWireLength(),
    -				Header:      md,
    -			})
    -		}
    +		ctx = s.statsHandler.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()})
    +		s.statsHandler.HandleRPC(ctx, &stats.InHeader{
    +			FullMethod:  stream.Method(),
    +			RemoteAddr:  t.Peer().Addr,
    +			LocalAddr:   t.Peer().LocalAddr,
    +			Compression: stream.RecvCompress(),
    +			WireLength:  stream.HeaderWireLength(),
    +			Header:      md,
    +		})
     	}
     	// To have calls in stream callouts work. Will delete once all stats handler
     	// calls come from the gRPC layer.
    diff --git a/vendor/google.golang.org/grpc/stats/handlers.go b/vendor/google.golang.org/grpc/stats/handlers.go
    index dc03731e4..67194a592 100644
    --- a/vendor/google.golang.org/grpc/stats/handlers.go
    +++ b/vendor/google.golang.org/grpc/stats/handlers.go
    @@ -38,6 +38,15 @@ type RPCTagInfo struct {
     	// FailFast indicates if this RPC is failfast.
     	// This field is only valid on client side, it's always false on server side.
     	FailFast bool
    +	// NameResolutionDelay indicates if the RPC needed to wait for the
    +	// initial name resolver update before it could begin. This should only
    +	// happen if the channel is IDLE when the RPC is started.  Note that
    +	// all retry or hedging attempts for an RPC that experienced a delay
    +	// will have it set.
    +	//
    +	// This field is only valid on the client side; it is always false on
    +	// the server side.
    +	NameResolutionDelay bool
     }
     
     // Handler defines the interface for the related stats handling (e.g., RPCs, connections).
    diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go
    index 6f20d2d54..10bf998aa 100644
    --- a/vendor/google.golang.org/grpc/stats/stats.go
    +++ b/vendor/google.golang.org/grpc/stats/stats.go
    @@ -36,7 +36,12 @@ type RPCStats interface {
     	IsClient() bool
     }
     
    -// Begin contains stats when an RPC attempt begins.
    +// Begin contains stats for the start of an RPC attempt.
    +//
    +//   - Server-side: Triggered after `InHeader`, as headers are processed
    +//     before the RPC lifecycle begins.
    +//   - Client-side: The first stats event recorded.
    +//
     // FailFast is only valid if this Begin is from client side.
     type Begin struct {
     	// Client is true if this Begin is from client side.
    @@ -59,17 +64,23 @@ func (s *Begin) IsClient() bool { return s.Client }
     
     func (s *Begin) isRPCStats() {}
     
    -// PickerUpdated indicates that the LB policy provided a new picker while the
    -// RPC was waiting for one.
    -type PickerUpdated struct{}
    +// DelayedPickComplete indicates that the RPC is unblocked following a delay in
    +// selecting a connection for the call.
    +type DelayedPickComplete struct{}
    +
    +// IsClient indicates DelayedPickComplete is available on the client.
    +func (*DelayedPickComplete) IsClient() bool { return true }
     
    -// IsClient indicates if the stats information is from client side. Only Client
    -// Side interfaces with a Picker, thus always returns true.
    -func (*PickerUpdated) IsClient() bool { return true }
    +func (*DelayedPickComplete) isRPCStats() {}
     
    -func (*PickerUpdated) isRPCStats() {}
    +// PickerUpdated indicates that the RPC is unblocked following a delay in
    +// selecting a connection for the call.
    +//
    +// Deprecated: will be removed in a future release; use DelayedPickComplete
    +// instead.
    +type PickerUpdated = DelayedPickComplete
     
    -// InPayload contains the information for an incoming payload.
    +// InPayload contains stats about an incoming payload.
     type InPayload struct {
     	// Client is true if this InPayload is from client side.
     	Client bool
    @@ -98,7 +109,9 @@ func (s *InPayload) IsClient() bool { return s.Client }
     
     func (s *InPayload) isRPCStats() {}
     
    -// InHeader contains stats when a header is received.
    +// InHeader contains stats about header reception.
    +//
    +// - Server-side: The first stats event after the RPC request is received.
     type InHeader struct {
     	// Client is true if this InHeader is from client side.
     	Client bool
    @@ -123,7 +136,7 @@ func (s *InHeader) IsClient() bool { return s.Client }
     
     func (s *InHeader) isRPCStats() {}
     
    -// InTrailer contains stats when a trailer is received.
    +// InTrailer contains stats about trailer reception.
     type InTrailer struct {
     	// Client is true if this InTrailer is from client side.
     	Client bool
    @@ -139,7 +152,7 @@ func (s *InTrailer) IsClient() bool { return s.Client }
     
     func (s *InTrailer) isRPCStats() {}
     
    -// OutPayload contains the information for an outgoing payload.
    +// OutPayload contains stats about an outgoing payload.
     type OutPayload struct {
     	// Client is true if this OutPayload is from client side.
     	Client bool
    @@ -166,7 +179,10 @@ func (s *OutPayload) IsClient() bool { return s.Client }
     
     func (s *OutPayload) isRPCStats() {}
     
    -// OutHeader contains stats when a header is sent.
    +// OutHeader contains stats about header transmission.
    +//
    +//   - Client-side: Only occurs after 'Begin', as headers are always the first
    +//     thing sent on a stream.
     type OutHeader struct {
     	// Client is true if this OutHeader is from client side.
     	Client bool
    @@ -189,14 +205,15 @@ func (s *OutHeader) IsClient() bool { return s.Client }
     
     func (s *OutHeader) isRPCStats() {}
     
    -// OutTrailer contains stats when a trailer is sent.
    +// OutTrailer contains stats about trailer transmission.
     type OutTrailer struct {
     	// Client is true if this OutTrailer is from client side.
     	Client bool
     	// WireLength is the wire length of trailer.
     	//
    -	// Deprecated: This field is never set. The length is not known when this message is
    -	// emitted because the trailer fields are compressed with hpack after that.
    +	// Deprecated: This field is never set. The length is not known when this
    +	// message is emitted because the trailer fields are compressed with hpack
    +	// after that.
     	WireLength int
     	// Trailer contains the trailer metadata sent to the client. This
     	// field is only valid if this OutTrailer is from the server side.
    @@ -208,7 +225,7 @@ func (s *OutTrailer) IsClient() bool { return s.Client }
     
     func (s *OutTrailer) isRPCStats() {}
     
    -// End contains stats when an RPC ends.
    +// End contains stats about RPC completion.
     type End struct {
     	// Client is true if this End is from client side.
     	Client bool
    @@ -238,7 +255,7 @@ type ConnStats interface {
     	IsClient() bool
     }
     
    -// ConnBegin contains the stats of a connection when it is established.
    +// ConnBegin contains stats about connection establishment.
     type ConnBegin struct {
     	// Client is true if this ConnBegin is from client side.
     	Client bool
    @@ -249,7 +266,7 @@ func (s *ConnBegin) IsClient() bool { return s.Client }
     
     func (s *ConnBegin) isConnStats() {}
     
    -// ConnEnd contains the stats of a connection when it ends.
    +// ConnEnd contains stats about connection termination.
     type ConnEnd struct {
     	// Client is true if this ConnEnd is from client side.
     	Client bool
    diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go
    index 12163150b..ec9577b27 100644
    --- a/vendor/google.golang.org/grpc/stream.go
    +++ b/vendor/google.golang.org/grpc/stream.go
    @@ -25,6 +25,7 @@ import (
     	"math"
     	rand "math/rand/v2"
     	"strconv"
    +	"strings"
     	"sync"
     	"time"
     
    @@ -101,9 +102,9 @@ type ClientStream interface {
     	// It must only be called after stream.CloseAndRecv has returned, or
     	// stream.Recv has returned a non-nil error (including io.EOF).
     	Trailer() metadata.MD
    -	// CloseSend closes the send direction of the stream. It closes the stream
    -	// when non-nil error is met. It is also not safe to call CloseSend
    -	// concurrently with SendMsg.
    +	// CloseSend closes the send direction of the stream. This method always
    +	// returns a nil error. The status of the stream may be discovered using
    +	// RecvMsg. It is also not safe to call CloseSend concurrently with SendMsg.
     	CloseSend() error
     	// Context returns the context for this stream.
     	//
    @@ -177,13 +178,43 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
     	return cc.NewStream(ctx, desc, method, opts...)
     }
     
    +var emptyMethodConfig = serviceconfig.MethodConfig{}
    +
    +// endOfClientStream performs cleanup actions required for both successful and
    +// failed streams. This includes incrementing channelz stats and invoking all
    +// registered OnFinish call options.
    +func endOfClientStream(cc *ClientConn, err error, opts ...CallOption) {
    +	if channelz.IsOn() {
    +		if err != nil {
    +			cc.incrCallsFailed()
    +		} else {
    +			cc.incrCallsSucceeded()
    +		}
    +	}
    +
    +	for _, o := range opts {
    +		if o, ok := o.(OnFinishCallOption); ok {
    +			o.OnFinish(err)
    +		}
    +	}
    +}
    +
     func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
    +	if channelz.IsOn() {
    +		cc.incrCallsStarted()
    +	}
    +	defer func() {
    +		if err != nil {
    +			// Ensure cleanup when stream creation fails.
    +			endOfClientStream(cc, err, opts...)
    +		}
    +	}()
    +
     	// Start tracking the RPC for idleness purposes. This is where a stream is
     	// created for both streaming and unary RPCs, and hence is a good place to
     	// track active RPC count.
    -	if err := cc.idlenessMgr.OnCallBegin(); err != nil {
    -		return nil, err
    -	}
    +	cc.idlenessMgr.OnCallBegin()
    +
     	// Add a calloption, to decrement the active call count, that gets executed
     	// when the RPC completes.
     	opts = append([]CallOption{OnFinish(func(error) { cc.idlenessMgr.OnCallEnd() })}, opts...)
    @@ -202,24 +233,17 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
     			}
     		}
     	}
    -	if channelz.IsOn() {
    -		cc.incrCallsStarted()
    -		defer func() {
    -			if err != nil {
    -				cc.incrCallsFailed()
    -			}
    -		}()
    -	}
     	// Provide an opportunity for the first RPC to see the first service config
     	// provided by the resolver.
    -	if err := cc.waitForResolvedAddrs(ctx); err != nil {
    +	nameResolutionDelayed, err := cc.waitForResolvedAddrs(ctx)
    +	if err != nil {
     		return nil, err
     	}
     
    -	var mc serviceconfig.MethodConfig
    +	mc := &emptyMethodConfig
     	var onCommit func()
     	newStream := func(ctx context.Context, done func()) (iresolver.ClientStream, error) {
    -		return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...)
    +		return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, nameResolutionDelayed, opts...)
     	}
     
     	rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method}
    @@ -239,7 +263,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
     		if rpcConfig.Context != nil {
     			ctx = rpcConfig.Context
     		}
    -		mc = rpcConfig.MethodConfig
    +		mc = &rpcConfig.MethodConfig
     		onCommit = rpcConfig.OnCommitted
     		if rpcConfig.Interceptor != nil {
     			rpcInfo.Context = nil
    @@ -257,7 +281,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
     	return newStream(ctx, func() {})
     }
     
    -func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), opts ...CallOption) (_ iresolver.ClientStream, err error) {
    +func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc *serviceconfig.MethodConfig, onCommit, doneFunc func(), nameResolutionDelayed bool, opts ...CallOption) (_ iresolver.ClientStream, err error) {
     	callInfo := defaultCallInfo()
     	if mc.WaitForReady != nil {
     		callInfo.failFast = !*mc.WaitForReady
    @@ -296,6 +320,11 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client
     		Method:         method,
     		ContentSubtype: callInfo.contentSubtype,
     		DoneFunc:       doneFunc,
    +		Authority:      callInfo.authority,
    +	}
    +	if allowed := callInfo.acceptedResponseCompressors; len(allowed) > 0 {
    +		headerValue := strings.Join(allowed, ",")
    +		callHdr.AcceptedCompressors = &headerValue
     	}
     
     	// Set our outgoing compression according to the UseCompressor CallOption, if
    @@ -321,19 +350,20 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client
     	}
     
     	cs := &clientStream{
    -		callHdr:      callHdr,
    -		ctx:          ctx,
    -		methodConfig: &mc,
    -		opts:         opts,
    -		callInfo:     callInfo,
    -		cc:           cc,
    -		desc:         desc,
    -		codec:        callInfo.codec,
    -		compressorV0: compressorV0,
    -		compressorV1: compressorV1,
    -		cancel:       cancel,
    -		firstAttempt: true,
    -		onCommit:     onCommit,
    +		callHdr:             callHdr,
    +		ctx:                 ctx,
    +		methodConfig:        mc,
    +		opts:                opts,
    +		callInfo:            callInfo,
    +		cc:                  cc,
    +		desc:                desc,
    +		codec:               callInfo.codec,
    +		compressorV0:        compressorV0,
    +		compressorV1:        compressorV1,
    +		cancel:              cancel,
    +		firstAttempt:        true,
    +		onCommit:            onCommit,
    +		nameResolutionDelay: nameResolutionDelayed,
     	}
     	if !cc.dopts.disableRetry {
     		cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler)
    @@ -415,19 +445,21 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error)
     	ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.compressorV0, cs.compressorV1)
     	method := cs.callHdr.Method
     	var beginTime time.Time
    -	shs := cs.cc.dopts.copts.StatsHandlers
    -	for _, sh := range shs {
    -		ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast})
    +	sh := cs.cc.statsHandler
    +	if sh != nil {
     		beginTime = time.Now()
    -		begin := &stats.Begin{
    +		ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{
    +			FullMethodName: method, FailFast: cs.callInfo.failFast,
    +			NameResolutionDelay: cs.nameResolutionDelay,
    +		})
    +		sh.HandleRPC(ctx, &stats.Begin{
     			Client:                    true,
     			BeginTime:                 beginTime,
     			FailFast:                  cs.callInfo.failFast,
     			IsClientStream:            cs.desc.ClientStreams,
     			IsServerStream:            cs.desc.ServerStreams,
     			IsTransparentRetryAttempt: isTransparent,
    -		}
    -		sh.HandleRPC(ctx, begin)
    +		})
     	}
     
     	var trInfo *traceInfo
    @@ -458,7 +490,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error)
     		beginTime:      beginTime,
     		cs:             cs,
     		decompressorV0: cs.cc.dopts.dc,
    -		statsHandlers:  shs,
    +		statsHandler:   sh,
     		trInfo:         trInfo,
     	}, nil
     }
    @@ -466,8 +498,9 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error)
     func (a *csAttempt) getTransport() error {
     	cs := a.cs
     
    -	var err error
    -	a.transport, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method)
    +	pickInfo := balancer.PickInfo{Ctx: a.ctx, FullMethodName: cs.callHdr.Method}
    +	pick, err := cs.cc.pickerWrapper.pick(a.ctx, cs.callInfo.failFast, pickInfo)
    +	a.transport, a.pickResult = pick.transport, pick.result
     	if err != nil {
     		if de, ok := err.(dropError); ok {
     			err = de.error
    @@ -476,7 +509,10 @@ func (a *csAttempt) getTransport() error {
     		return err
     	}
     	if a.trInfo != nil {
    -		a.trInfo.firstLine.SetRemoteAddr(a.transport.RemoteAddr())
    +		a.trInfo.firstLine.SetRemoteAddr(a.transport.Peer().Addr)
    +	}
    +	if pick.blocked && a.statsHandler != nil {
    +		a.statsHandler.HandleRPC(a.ctx, &stats.DelayedPickComplete{})
     	}
     	return nil
     }
    @@ -520,7 +556,7 @@ func (a *csAttempt) newStream() error {
     	}
     	a.transportStream = s
     	a.ctx = s.Context()
    -	a.parser = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool}
    +	a.parser = parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool}
     	return nil
     }
     
    @@ -540,6 +576,8 @@ type clientStream struct {
     
     	sentLast bool // sent an end stream
     
    +	receivedFirstMsg bool // set after the first message is received
    +
     	methodConfig *MethodConfig
     
     	ctx context.Context // the application's context, wrapped by stats/tracing
    @@ -573,6 +611,9 @@ type clientStream struct {
     	onCommit         func()
     	replayBuffer     []replayOp // operations to replay on retry
     	replayBufferSize int        // current size of replayBuffer
    +	// nameResolutionDelay indicates if there was a delay in the name resolution.
    +	// This field is only valid on client side, it's always false on server side.
    +	nameResolutionDelay bool
     }
     
     type replayOp struct {
    @@ -587,7 +628,7 @@ type csAttempt struct {
     	cs              *clientStream
     	transport       transport.ClientTransport
     	transportStream *transport.ClientStream
    -	parser          *parser
    +	parser          parser
     	pickResult      balancer.PickResult
     
     	finished        bool
    @@ -601,8 +642,8 @@ type csAttempt struct {
     	// and cleared when the finish method is called.
     	trInfo *traceInfo
     
    -	statsHandlers []stats.Handler
    -	beginTime     time.Time
    +	statsHandler stats.Handler
    +	beginTime    time.Time
     
     	// set for newStream errors that may be transparently retried
     	allowTransparentRetry bool
    @@ -987,7 +1028,7 @@ func (cs *clientStream) RecvMsg(m any) error {
     
     func (cs *clientStream) CloseSend() error {
     	if cs.sentLast {
    -		// TODO: return an error and finish the stream instead, due to API misuse?
    +		// Return a nil error on repeated calls to this method.
     		return nil
     	}
     	cs.sentLast = true
    @@ -1008,7 +1049,10 @@ func (cs *clientStream) CloseSend() error {
     			binlog.Log(cs.ctx, chc)
     		}
     	}
    -	// We never returned an error here for reasons.
    +	// We don't return an error here as we expect users to read all messages
    +	// from the stream and get the RPC status from RecvMsg().  Note that
    +	// SendMsg() must return an error when one occurs so the application
    +	// knows to stop sending messages, but that does not apply here.
     	return nil
     }
     
    @@ -1023,9 +1067,6 @@ func (cs *clientStream) finish(err error) {
     		return
     	}
     	cs.finished = true
    -	for _, onFinish := range cs.callInfo.onFinish {
    -		onFinish(err)
    -	}
     	cs.commitAttemptLocked()
     	if cs.attempt != nil {
     		cs.attempt.finish(err)
    @@ -1065,13 +1106,7 @@ func (cs *clientStream) finish(err error) {
     	if err == nil {
     		cs.retryThrottler.successfulRPC()
     	}
    -	if channelz.IsOn() {
    -		if err != nil {
    -			cs.cc.incrCallsFailed()
    -		} else {
    -			cs.cc.incrCallsSucceeded()
    -		}
    -	}
    +	endOfClientStream(cs.cc, err, cs.opts...)
     	cs.cancel()
     }
     
    @@ -1093,17 +1128,15 @@ func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength
     		}
     		return io.EOF
     	}
    -	if len(a.statsHandlers) != 0 {
    -		for _, sh := range a.statsHandlers {
    -			sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now()))
    -		}
    +	if a.statsHandler != nil {
    +		a.statsHandler.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now()))
     	}
     	return nil
     }
     
     func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
     	cs := a.cs
    -	if len(a.statsHandlers) != 0 && payInfo == nil {
    +	if a.statsHandler != nil && payInfo == nil {
     		payInfo = &payloadInfo{}
     		defer payInfo.free()
     	}
    @@ -1117,6 +1150,10 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
     				a.decompressorV0 = nil
     				a.decompressorV1 = encoding.GetCompressor(ct)
     			}
    +			// Validate that the compression method is acceptable for this call.
    +			if !acceptedCompressorAllows(cs.callInfo.acceptedResponseCompressors, ct) {
    +				return status.Errorf(codes.Internal, "grpc: peer compressed the response with %q which is not allowed by AcceptCompressors", ct)
    +			}
     		} else {
     			// No compression is used; disable our decompressor.
     			a.decompressorV0 = nil
    @@ -1124,16 +1161,21 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
     		// Only initialize this state once per stream.
     		a.decompressorSet = true
     	}
    -	if err := recv(a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decompressorV1, false); err != nil {
    +	if err := recv(&a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decompressorV1, false); err != nil {
     		if err == io.EOF {
     			if statusErr := a.transportStream.Status().Err(); statusErr != nil {
     				return statusErr
     			}
    +			// Received no msg and status OK for non-server streaming rpcs.
    +			if !cs.desc.ServerStreams && !cs.receivedFirstMsg {
    +				return status.Error(codes.Internal, "cardinality violation: received no response message from non-server-streaming RPC")
    +			}
     			return io.EOF // indicates successful end of stream.
     		}
     
     		return toRPCErr(err)
     	}
    +	cs.receivedFirstMsg = true
     	if a.trInfo != nil {
     		a.mu.Lock()
     		if a.trInfo.tr != nil {
    @@ -1141,8 +1183,8 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
     		}
     		a.mu.Unlock()
     	}
    -	for _, sh := range a.statsHandlers {
    -		sh.HandleRPC(a.ctx, &stats.InPayload{
    +	if a.statsHandler != nil {
    +		a.statsHandler.HandleRPC(a.ctx, &stats.InPayload{
     			Client:           true,
     			RecvTime:         time.Now(),
     			Payload:          m,
    @@ -1157,12 +1199,12 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
     	}
     	// Special handling for non-server-stream rpcs.
     	// This recv expects EOF or errors, so we don't collect inPayload.
    -	if err := recv(a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decompressorV1, false); err == io.EOF {
    +	if err := recv(&a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decompressorV1, false); err == io.EOF {
     		return a.transportStream.Status().Err() // non-server streaming Recv returns nil on success
     	} else if err != nil {
     		return toRPCErr(err)
     	}
    -	return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want "))
    +	return status.Error(codes.Internal, "cardinality violation: expected  for non server-streaming RPCs, but received another message")
     }
     
     func (a *csAttempt) finish(err error) {
    @@ -1195,15 +1237,14 @@ func (a *csAttempt) finish(err error) {
     			ServerLoad:    balancerload.Parse(tr),
     		})
     	}
    -	for _, sh := range a.statsHandlers {
    -		end := &stats.End{
    +	if a.statsHandler != nil {
    +		a.statsHandler.HandleRPC(a.ctx, &stats.End{
     			Client:    true,
     			BeginTime: a.beginTime,
     			EndTime:   time.Now(),
     			Trailer:   tr,
     			Error:     err,
    -		}
    -		sh.HandleRPC(a.ctx, end)
    +		})
     	}
     	if a.trInfo != nil && a.trInfo.tr != nil {
     		if err == nil {
    @@ -1309,7 +1350,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin
     		return nil, err
     	}
     	as.transportStream = s
    -	as.parser = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool}
    +	as.parser = parser{r: s, bufferPool: ac.dopts.copts.BufferPool}
     	ac.incrCallsStarted()
     	if desc != unaryStreamDesc {
     		// Listen on stream context to cleanup when the stream context is
    @@ -1344,6 +1385,7 @@ type addrConnStream struct {
     	transport        transport.ClientTransport
     	ctx              context.Context
     	sentLast         bool
    +	receivedFirstMsg bool
     	desc             *StreamDesc
     	codec            baseCodec
     	sendCompressorV0 Compressor
    @@ -1351,7 +1393,7 @@ type addrConnStream struct {
     	decompressorSet  bool
     	decompressorV0   Decompressor
     	decompressorV1   encoding.Compressor
    -	parser           *parser
    +	parser           parser
     
     	// mu guards finished and is held for the entire finish method.
     	mu       sync.Mutex
    @@ -1372,7 +1414,7 @@ func (as *addrConnStream) Trailer() metadata.MD {
     
     func (as *addrConnStream) CloseSend() error {
     	if as.sentLast {
    -		// TODO: return an error and finish the stream instead, due to API misuse?
    +		// Return a nil error on repeated calls to this method.
     		return nil
     	}
     	as.sentLast = true
    @@ -1457,6 +1499,10 @@ func (as *addrConnStream) RecvMsg(m any) (err error) {
     				as.decompressorV0 = nil
     				as.decompressorV1 = encoding.GetCompressor(ct)
     			}
    +			// Validate that the compression method is acceptable for this call.
    +			if !acceptedCompressorAllows(as.callInfo.acceptedResponseCompressors, ct) {
    +				return status.Errorf(codes.Internal, "grpc: peer compressed the response with %q which is not allowed by AcceptCompressors", ct)
    +			}
     		} else {
     			// No compression is used; disable our decompressor.
     			as.decompressorV0 = nil
    @@ -1464,15 +1510,20 @@ func (as *addrConnStream) RecvMsg(m any) (err error) {
     		// Only initialize this state once per stream.
     		as.decompressorSet = true
     	}
    -	if err := recv(as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err != nil {
    +	if err := recv(&as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err != nil {
     		if err == io.EOF {
     			if statusErr := as.transportStream.Status().Err(); statusErr != nil {
     				return statusErr
     			}
    +			// Received no msg and status OK for non-server streaming rpcs.
    +			if !as.desc.ServerStreams && !as.receivedFirstMsg {
    +				return status.Error(codes.Internal, "cardinality violation: received no response message from non-server-streaming RPC")
    +			}
     			return io.EOF // indicates successful end of stream.
     		}
     		return toRPCErr(err)
     	}
    +	as.receivedFirstMsg = true
     
     	if as.desc.ServerStreams {
     		// Subsequent messages should be received by subsequent RecvMsg calls.
    @@ -1481,12 +1532,12 @@ func (as *addrConnStream) RecvMsg(m any) (err error) {
     
     	// Special handling for non-server-stream rpcs.
     	// This recv expects EOF or errors, so we don't collect inPayload.
    -	if err := recv(as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err == io.EOF {
    +	if err := recv(&as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err == io.EOF {
     		return as.transportStream.Status().Err() // non-server streaming Recv returns nil on success
     	} else if err != nil {
     		return toRPCErr(err)
     	}
    -	return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want "))
    +	return status.Error(codes.Internal, "cardinality violation: expected  for non server-streaming RPCs, but received another message")
     }
     
     func (as *addrConnStream) finish(err error) {
    @@ -1569,8 +1620,9 @@ type ServerStream interface {
     type serverStream struct {
     	ctx   context.Context
     	s     *transport.ServerStream
    -	p     *parser
    +	p     parser
     	codec baseCodec
    +	desc  *StreamDesc
     
     	compressorV0   Compressor
     	compressorV1   encoding.Compressor
    @@ -1579,11 +1631,13 @@ type serverStream struct {
     
     	sendCompressorName string
     
    +	recvFirstMsg bool // set after the first message is received
    +
     	maxReceiveMessageSize int
     	maxSendMessageSize    int
     	trInfo                *traceInfo
     
    -	statsHandler []stats.Handler
    +	statsHandler stats.Handler
     
     	binlogs []binarylog.MethodLogger
     	// serverHeaderBinlogged indicates whether server header has been logged. It
    @@ -1719,10 +1773,8 @@ func (ss *serverStream) SendMsg(m any) (err error) {
     			binlog.Log(ss.ctx, sm)
     		}
     	}
    -	if len(ss.statsHandler) != 0 {
    -		for _, sh := range ss.statsHandler {
    -			sh.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now()))
    -		}
    +	if ss.statsHandler != nil {
    +		ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now()))
     	}
     	return nil
     }
    @@ -1753,11 +1805,11 @@ func (ss *serverStream) RecvMsg(m any) (err error) {
     		}
     	}()
     	var payInfo *payloadInfo
    -	if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 {
    +	if ss.statsHandler != nil || len(ss.binlogs) != 0 {
     		payInfo = &payloadInfo{}
     		defer payInfo.free()
     	}
    -	if err := recv(ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, payInfo, ss.decompressorV1, true); err != nil {
    +	if err := recv(&ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, payInfo, ss.decompressorV1, true); err != nil {
     		if err == io.EOF {
     			if len(ss.binlogs) != 0 {
     				chc := &binarylog.ClientHalfClose{}
    @@ -1765,6 +1817,10 @@ func (ss *serverStream) RecvMsg(m any) (err error) {
     					binlog.Log(ss.ctx, chc)
     				}
     			}
    +			// Received no request msg for non-client streaming rpcs.
    +			if !ss.desc.ClientStreams && !ss.recvFirstMsg {
    +				return status.Error(codes.Internal, "cardinality violation: received no request message from non-client-streaming RPC")
    +			}
     			return err
     		}
     		if err == io.ErrUnexpectedEOF {
    @@ -1772,16 +1828,15 @@ func (ss *serverStream) RecvMsg(m any) (err error) {
     		}
     		return toRPCErr(err)
     	}
    -	if len(ss.statsHandler) != 0 {
    -		for _, sh := range ss.statsHandler {
    -			sh.HandleRPC(ss.s.Context(), &stats.InPayload{
    -				RecvTime:         time.Now(),
    -				Payload:          m,
    -				Length:           payInfo.uncompressedBytes.Len(),
    -				WireLength:       payInfo.compressedLength + headerLen,
    -				CompressedLength: payInfo.compressedLength,
    -			})
    -		}
    +	ss.recvFirstMsg = true
    +	if ss.statsHandler != nil {
    +		ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{
    +			RecvTime:         time.Now(),
    +			Payload:          m,
    +			Length:           payInfo.uncompressedBytes.Len(),
    +			WireLength:       payInfo.compressedLength + headerLen,
    +			CompressedLength: payInfo.compressedLength,
    +		})
     	}
     	if len(ss.binlogs) != 0 {
     		cm := &binarylog.ClientMessage{
    @@ -1791,7 +1846,19 @@ func (ss *serverStream) RecvMsg(m any) (err error) {
     			binlog.Log(ss.ctx, cm)
     		}
     	}
    -	return nil
    +
    +	if ss.desc.ClientStreams {
    +		// Subsequent messages should be received by subsequent RecvMsg calls.
    +		return nil
    +	}
    +	// Special handling for non-client-stream rpcs.
    +	// This recv expects EOF or errors, so we don't collect inPayload.
    +	if err := recv(&ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, nil, ss.decompressorV1, true); err == io.EOF {
    +		return nil
    +	} else if err != nil {
    +		return err
    +	}
    +	return status.Error(codes.Internal, "cardinality violation: received multiple request messages for non-client-streaming RPC")
     }
     
     // MethodFromServerStream returns the method string for the input stream.
    diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go
    index 783c41f78..ff7840fd8 100644
    --- a/vendor/google.golang.org/grpc/version.go
    +++ b/vendor/google.golang.org/grpc/version.go
    @@ -19,4 +19,4 @@
     package grpc
     
     // Version is the current grpc version.
    -const Version = "1.71.0"
    +const Version = "1.78.0"
    diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
    index e942bc983..743bfb81d 100644
    --- a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
    +++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
    @@ -371,7 +371,31 @@ func ConsumeVarint(b []byte) (v uint64, n int) {
     func SizeVarint(v uint64) int {
     	// This computes 1 + (bits.Len64(v)-1)/7.
     	// 9/64 is a good enough approximation of 1/7
    -	return int(9*uint32(bits.Len64(v))+64) / 64
    +	//
    +	// The Go compiler can translate the bits.LeadingZeros64 call into the LZCNT
    +	// instruction, which is very fast on CPUs from the last few years. The
    +	// specific way of expressing the calculation matches C++ Protobuf, see
    +	// https://godbolt.org/z/4P3h53oM4 for the C++ code and how gcc/clang
    +	// optimize that function for GOAMD64=v1 and GOAMD64=v3 (-march=haswell).
    +
    +	// By OR'ing v with 1, we guarantee that v is never 0, without changing the
    +	// result of SizeVarint. LZCNT is not defined for 0, meaning the compiler
    +	// needs to add extra instructions to handle that case.
    +	//
    +	// The Go compiler currently (go1.24.4) does not make use of this knowledge.
    +	// This opportunity (removing the XOR instruction, which handles the 0 case)
    +	// results in a small (1%) performance win across CPU architectures.
    +	//
    +	// Independently of avoiding the 0 case, we need the v |= 1 line because
    +	// it allows the Go compiler to eliminate an extra XCHGL barrier.
    +	v |= 1
    +
    +	// It would be clearer to write log2value := 63 - uint32(...), but
    +	// writing uint32(...) ^ 63 is much more efficient (-14% ARM, -20% Intel).
    +	// Proof of identity for our value range [0..63]:
    +	// https://go.dev/play/p/Pdn9hEWYakX
    +	log2value := uint32(bits.LeadingZeros64(v)) ^ 63
    +	return int((log2value*9 + (64 + 9)) / 64)
     }
     
     // AppendFixed32 appends v to b as a little-endian uint32.
    diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb
    index 5a57ef6f3..04696351e 100644
    Binary files a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb and b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb differ
    diff --git a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go
    index bf1aba0e8..7b9f01afb 100644
    --- a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go
    +++ b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go
    @@ -9,7 +9,7 @@ import "google.golang.org/protobuf/types/descriptorpb"
     
     const (
     	Minimum = descriptorpb.Edition_EDITION_PROTO2
    -	Maximum = descriptorpb.Edition_EDITION_2023
    +	Maximum = descriptorpb.Edition_EDITION_2024
     
     	// MaximumKnown is the maximum edition that is known to Go Protobuf, but not
     	// declared as supported. In other words: end users cannot use it, but
    diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
    index 669133d04..c96e44834 100644
    --- a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
    +++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
    @@ -32,7 +32,7 @@ var byteType = reflect.TypeOf(byte(0))
     func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescriptors) protoreflect.FieldDescriptor {
     	f := new(filedesc.Field)
     	f.L0.ParentFile = filedesc.SurrogateProto2
    -	f.L1.EditionFeatures = f.L0.ParentFile.L1.EditionFeatures
    +	packed := false
     	for len(tag) > 0 {
     		i := strings.IndexByte(tag, ',')
     		if i < 0 {
    @@ -108,7 +108,7 @@ func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescri
     				f.L1.StringName.InitJSON(jsonName)
     			}
     		case s == "packed":
    -			f.L1.EditionFeatures.IsPacked = true
    +			packed = true
     		case strings.HasPrefix(s, "def="):
     			// The default tag is special in that everything afterwards is the
     			// default regardless of the presence of commas.
    @@ -121,6 +121,13 @@ func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescri
     		tag = strings.TrimPrefix(tag[i:], ",")
     	}
     
    +	// Update EditionFeatures after the loop and after we know whether this is
    +	// a proto2 or proto3 field.
    +	f.L1.EditionFeatures = f.L0.ParentFile.L1.EditionFeatures
    +	if packed {
    +		f.L1.EditionFeatures.IsPacked = true
    +	}
    +
     	// The generator uses the group message name instead of the field name.
     	// We obtain the real field name by lowercasing the group name.
     	if f.L1.Kind == protoreflect.GroupKind {
    diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go
    index 099b2bf45..9aa7a9bb7 100644
    --- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go
    +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go
    @@ -424,27 +424,34 @@ func (d *Decoder) parseFieldName() (tok Token, err error) {
     	return Token{}, d.newSyntaxError("invalid field name: %s", errId(d.in))
     }
     
    -// parseTypeName parses Any type URL or extension field name. The name is
    -// enclosed in [ and ] characters. The C++ parser does not handle many legal URL
    -// strings. This implementation is more liberal and allows for the pattern
    -// ^[-_a-zA-Z0-9]+([./][-_a-zA-Z0-9]+)*`). Whitespaces and comments are allowed
    -// in between [ ], '.', '/' and the sub names.
    +// parseTypeName parses an Any type URL or an extension field name. The name is
    +// enclosed in [ and ] characters. We allow almost arbitrary type URL prefixes,
    +// closely following the text-format spec [1,2]. We implement "ExtensionName |
    +// AnyName" as follows (with some exceptions for backwards compatibility):
    +//
    +// char      = [-_a-zA-Z0-9]
    +// url_char  = char | [.~!$&'()*+,;=] | "%", hex, hex
    +//
    +// Ident         = char, { char }
    +// TypeName      = Ident, { ".", Ident } ;
    +// UrlPrefix     = url_char, { url_char | "/" } ;
    +// ExtensionName = "[", TypeName, "]" ;
    +// AnyName       = "[", UrlPrefix, "/", TypeName, "]" ;
    +//
    +// Additionally, we allow arbitrary whitespace and comments between [ and ].
    +//
    +// [1] https://protobuf.dev/reference/protobuf/textformat-spec/#characters
    +// [2] https://protobuf.dev/reference/protobuf/textformat-spec/#field-names
     func (d *Decoder) parseTypeName() (Token, error) {
    -	startPos := len(d.orig) - len(d.in)
     	// Use alias s to advance first in order to use d.in for error handling.
    -	// Caller already checks for [ as first character.
    +	// Caller already checks for [ as first character (d.in[0] == '[').
     	s := consume(d.in[1:], 0)
     	if len(s) == 0 {
     		return Token{}, ErrUnexpectedEOF
     	}
     
    +	// Collect everything between [ and ] in name.
     	var name []byte
    -	for len(s) > 0 && isTypeNameChar(s[0]) {
    -		name = append(name, s[0])
    -		s = s[1:]
    -	}
    -	s = consume(s, 0)
    -
     	var closed bool
     	for len(s) > 0 && !closed {
     		switch {
    @@ -452,23 +459,20 @@ func (d *Decoder) parseTypeName() (Token, error) {
     			s = s[1:]
     			closed = true
     
    -		case s[0] == '/', s[0] == '.':
    -			if len(name) > 0 && (name[len(name)-1] == '/' || name[len(name)-1] == '.') {
    -				return Token{}, d.newSyntaxError("invalid type URL/extension field name: %s",
    -					d.orig[startPos:len(d.orig)-len(s)+1])
    -			}
    +		case s[0] == '/' || isTypeNameChar(s[0]) || isUrlExtraChar(s[0]):
     			name = append(name, s[0])
    -			s = s[1:]
    -			s = consume(s, 0)
    -			for len(s) > 0 && isTypeNameChar(s[0]) {
    -				name = append(name, s[0])
    -				s = s[1:]
    +			s = consume(s[1:], 0)
    +
    +		// URL percent-encoded chars
    +		case s[0] == '%':
    +			if len(s) < 3 || !isHexChar(s[1]) || !isHexChar(s[2]) {
    +				return Token{}, d.parseTypeNameError(s, 3)
     			}
    -			s = consume(s, 0)
    +			name = append(name, s[0], s[1], s[2])
    +			s = consume(s[3:], 0)
     
     		default:
    -			return Token{}, d.newSyntaxError(
    -				"invalid type URL/extension field name: %s", d.orig[startPos:len(d.orig)-len(s)+1])
    +			return Token{}, d.parseTypeNameError(s, 1)
     		}
     	}
     
    @@ -476,15 +480,38 @@ func (d *Decoder) parseTypeName() (Token, error) {
     		return Token{}, ErrUnexpectedEOF
     	}
     
    -	// First character cannot be '.'. Last character cannot be '.' or '/'.
    -	size := len(name)
    -	if size == 0 || name[0] == '.' || name[size-1] == '.' || name[size-1] == '/' {
    -		return Token{}, d.newSyntaxError("invalid type URL/extension field name: %s",
    -			d.orig[startPos:len(d.orig)-len(s)])
    +	// Split collected name on last '/' into urlPrefix and typeName (if '/' is
    +	// present).
    +	typeName := name
    +	if i := bytes.LastIndexByte(name, '/'); i != -1 {
    +		urlPrefix := name[:i]
    +		typeName = name[i+1:]
    +
    +		// urlPrefix may be empty (for backwards compatibility).
    +		// If non-empty, it must not start with '/'.
    +		if len(urlPrefix) > 0 && urlPrefix[0] == '/' {
    +			return Token{}, d.parseTypeNameError(s, 0)
    +		}
     	}
     
    +	// typeName must not be empty (note: "" splits to [""]) and all identifier
    +	// parts must not be empty.
    +	for _, ident := range bytes.Split(typeName, []byte{'.'}) {
    +		if len(ident) == 0 {
    +			return Token{}, d.parseTypeNameError(s, 0)
    +		}
    +	}
    +
    +	// typeName must not contain any percent-encoded or special URL chars.
    +	for _, b := range typeName {
    +		if b == '%' || (b != '.' && isUrlExtraChar(b)) {
    +			return Token{}, d.parseTypeNameError(s, 0)
    +		}
    +	}
    +
    +	startPos := len(d.orig) - len(d.in)
    +	endPos := len(d.orig) - len(s)
     	d.in = s
    -	endPos := len(d.orig) - len(d.in)
     	d.consume(0)
     
     	return Token{
    @@ -496,16 +523,32 @@ func (d *Decoder) parseTypeName() (Token, error) {
     	}, nil
     }
     
    +func (d *Decoder) parseTypeNameError(s []byte, numUnconsumedChars int) error {
    +	return d.newSyntaxError(
    +		"invalid type URL/extension field name: %s",
    +		d.in[:len(d.in)-len(s)+min(numUnconsumedChars, len(s))],
    +	)
    +}
    +
    +func isHexChar(b byte) bool {
    +	return ('0' <= b && b <= '9') ||
    +		('a' <= b && b <= 'f') ||
    +		('A' <= b && b <= 'F')
    +}
    +
     func isTypeNameChar(b byte) bool {
    -	return (b == '-' || b == '_' ||
    +	return b == '-' || b == '_' ||
     		('0' <= b && b <= '9') ||
     		('a' <= b && b <= 'z') ||
    -		('A' <= b && b <= 'Z'))
    +		('A' <= b && b <= 'Z')
     }
     
    -func isWhiteSpace(b byte) bool {
    +// isUrlExtraChar complements isTypeNameChar with extra characters that we allow
    +// in URLs but not in type names. Note that '/' is not included so that it can
    +// be treated specially.
    +func isUrlExtraChar(b byte) bool {
     	switch b {
    -	case ' ', '\n', '\r', '\t':
    +	case '.', '~', '!', '$', '&', '(', ')', '*', '+', ',', ';', '=':
     		return true
     	default:
     		return false
    diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
    index 688aabe43..c775e5832 100644
    --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
    +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
    @@ -32,6 +32,7 @@ const (
     	EditionProto3      Edition = 999
     	Edition2023        Edition = 1000
     	Edition2024        Edition = 1001
    +	EditionUnstable    Edition = 9999
     	EditionUnsupported Edition = 100000
     )
     
    @@ -72,9 +73,10 @@ type (
     		EditionFeatures EditionFeatures
     	}
     	FileL2 struct {
    -		Options   func() protoreflect.ProtoMessage
    -		Imports   FileImports
    -		Locations SourceLocations
    +		Options       func() protoreflect.ProtoMessage
    +		Imports       FileImports
    +		OptionImports func() protoreflect.FileImports
    +		Locations     SourceLocations
     	}
     
     	// EditionFeatures is a frequently-instantiated struct, so please take care
    @@ -126,12 +128,9 @@ func (fd *File) ParentFile() protoreflect.FileDescriptor { return fd }
     func (fd *File) Parent() protoreflect.Descriptor         { return nil }
     func (fd *File) Index() int                              { return 0 }
     func (fd *File) Syntax() protoreflect.Syntax             { return fd.L1.Syntax }
    -
    -// Not exported and just used to reconstruct the original FileDescriptor proto
    -func (fd *File) Edition() int32                  { return int32(fd.L1.Edition) }
    -func (fd *File) Name() protoreflect.Name         { return fd.L1.Package.Name() }
    -func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package }
    -func (fd *File) IsPlaceholder() bool             { return false }
    +func (fd *File) Name() protoreflect.Name                 { return fd.L1.Package.Name() }
    +func (fd *File) FullName() protoreflect.FullName         { return fd.L1.Package }
    +func (fd *File) IsPlaceholder() bool                     { return false }
     func (fd *File) Options() protoreflect.ProtoMessage {
     	if f := fd.lazyInit().Options; f != nil {
     		return f()
    @@ -150,6 +149,16 @@ func (fd *File) Format(s fmt.State, r rune)                    { descfmt.FormatD
     func (fd *File) ProtoType(protoreflect.FileDescriptor)         {}
     func (fd *File) ProtoInternal(pragma.DoNotImplement)           {}
     
    +// The next two are not part of the FileDescriptor interface. They are just used to reconstruct
    +// the original FileDescriptor proto.
    +func (fd *File) Edition() int32 { return int32(fd.L1.Edition) }
    +func (fd *File) OptionImports() protoreflect.FileImports {
    +	if f := fd.lazyInit().OptionImports; f != nil {
    +		return f()
    +	}
    +	return emptyFiles
    +}
    +
     func (fd *File) lazyInit() *FileL2 {
     	if atomic.LoadUint32(&fd.once) == 0 {
     		fd.lazyInitOnce()
    @@ -182,9 +191,9 @@ type (
     		L2 *EnumL2 // protected by fileDesc.once
     	}
     	EnumL1 struct {
    -		eagerValues bool // controls whether EnumL2.Values is already populated
    -
     		EditionFeatures EditionFeatures
    +		Visibility      int32
    +		eagerValues     bool // controls whether EnumL2.Values is already populated
     	}
     	EnumL2 struct {
     		Options        func() protoreflect.ProtoMessage
    @@ -219,6 +228,11 @@ func (ed *Enum) ReservedNames() protoreflect.Names       { return &ed.lazyInit()
     func (ed *Enum) ReservedRanges() protoreflect.EnumRanges { return &ed.lazyInit().ReservedRanges }
     func (ed *Enum) Format(s fmt.State, r rune)              { descfmt.FormatDesc(s, r, ed) }
     func (ed *Enum) ProtoType(protoreflect.EnumDescriptor)   {}
    +
    +// This is not part of the EnumDescriptor interface. It is just used to reconstruct
    +// the original FileDescriptor proto.
    +func (ed *Enum) Visibility() int32 { return ed.L1.Visibility }
    +
     func (ed *Enum) lazyInit() *EnumL2 {
     	ed.L0.ParentFile.lazyInit() // implicitly initializes L2
     	return ed.L2
    @@ -244,13 +258,13 @@ type (
     		L2 *MessageL2 // protected by fileDesc.once
     	}
     	MessageL1 struct {
    -		Enums        Enums
    -		Messages     Messages
    -		Extensions   Extensions
    -		IsMapEntry   bool // promoted from google.protobuf.MessageOptions
    -		IsMessageSet bool // promoted from google.protobuf.MessageOptions
    -
    +		Enums           Enums
    +		Messages        Messages
    +		Extensions      Extensions
     		EditionFeatures EditionFeatures
    +		Visibility      int32
    +		IsMapEntry      bool // promoted from google.protobuf.MessageOptions
    +		IsMessageSet    bool // promoted from google.protobuf.MessageOptions
     	}
     	MessageL2 struct {
     		Options               func() protoreflect.ProtoMessage
    @@ -319,6 +333,11 @@ func (md *Message) Messages() protoreflect.MessageDescriptors     { return &md.L
     func (md *Message) Extensions() protoreflect.ExtensionDescriptors { return &md.L1.Extensions }
     func (md *Message) ProtoType(protoreflect.MessageDescriptor)      {}
     func (md *Message) Format(s fmt.State, r rune)                    { descfmt.FormatDesc(s, r, md) }
    +
    +// This is not part of the MessageDescriptor interface. It is just used to reconstruct
    +// the original FileDescriptor proto.
    +func (md *Message) Visibility() int32 { return md.L1.Visibility }
    +
     func (md *Message) lazyInit() *MessageL2 {
     	md.L0.ParentFile.lazyInit() // implicitly initializes L2
     	return md.L2
    diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
    index d2f549497..e91860f5a 100644
    --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
    +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
    @@ -284,6 +284,13 @@ func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protorefl
     			case genid.EnumDescriptorProto_Value_field_number:
     				numValues++
     			}
    +		case protowire.VarintType:
    +			v, m := protowire.ConsumeVarint(b)
    +			b = b[m:]
    +			switch num {
    +			case genid.EnumDescriptorProto_Visibility_field_number:
    +				ed.L1.Visibility = int32(v)
    +			}
     		default:
     			m := protowire.ConsumeFieldValue(num, typ, b)
     			b = b[m:]
    @@ -365,6 +372,13 @@ func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protor
     				md.unmarshalSeedOptions(v)
     			}
     			prevField = num
    +		case protowire.VarintType:
    +			v, m := protowire.ConsumeVarint(b)
    +			b = b[m:]
    +			switch num {
    +			case genid.DescriptorProto_Visibility_field_number:
    +				md.L1.Visibility = int32(v)
    +			}
     		default:
     			m := protowire.ConsumeFieldValue(num, typ, b)
     			b = b[m:]
    diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
    index d4c94458b..78f02b1b4 100644
    --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
    +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
    @@ -134,6 +134,7 @@ func (fd *File) unmarshalFull(b []byte) {
     
     	var enumIdx, messageIdx, extensionIdx, serviceIdx int
     	var rawOptions []byte
    +	var optionImports []string
     	fd.L2 = new(FileL2)
     	for len(b) > 0 {
     		num, typ, n := protowire.ConsumeTag(b)
    @@ -157,6 +158,8 @@ func (fd *File) unmarshalFull(b []byte) {
     					imp = PlaceholderFile(path)
     				}
     				fd.L2.Imports = append(fd.L2.Imports, protoreflect.FileImport{FileDescriptor: imp})
    +			case genid.FileDescriptorProto_OptionDependency_field_number:
    +				optionImports = append(optionImports, sb.MakeString(v))
     			case genid.FileDescriptorProto_EnumType_field_number:
     				fd.L1.Enums.List[enumIdx].unmarshalFull(v, sb)
     				enumIdx++
    @@ -178,6 +181,23 @@ func (fd *File) unmarshalFull(b []byte) {
     		}
     	}
     	fd.L2.Options = fd.builder.optionsUnmarshaler(&descopts.File, rawOptions)
    +	if len(optionImports) > 0 {
    +		var imps FileImports
    +		var once sync.Once
    +		fd.L2.OptionImports = func() protoreflect.FileImports {
    +			once.Do(func() {
    +				imps = make(FileImports, len(optionImports))
    +				for i, path := range optionImports {
    +					imp, _ := fd.builder.FileRegistry.FindFileByPath(path)
    +					if imp == nil {
    +						imp = PlaceholderFile(path)
    +					}
    +					imps[i] = protoreflect.FileImport{FileDescriptor: imp}
    +				}
    +			})
    +			return &imps
    +		}
    +	}
     }
     
     func (ed *Enum) unmarshalFull(b []byte, sb *strs.Builder) {
    @@ -310,7 +330,6 @@ func (md *Message) unmarshalFull(b []byte, sb *strs.Builder) {
     				md.L1.Extensions.List[extensionIdx].unmarshalFull(v, sb)
     				extensionIdx++
     			case genid.DescriptorProto_Options_field_number:
    -				md.unmarshalOptions(v)
     				rawOptions = appendOptions(rawOptions, v)
     			}
     		default:
    @@ -336,27 +355,6 @@ func (md *Message) unmarshalFull(b []byte, sb *strs.Builder) {
     	md.L2.Options = md.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Message, rawOptions)
     }
     
    -func (md *Message) unmarshalOptions(b []byte) {
    -	for len(b) > 0 {
    -		num, typ, n := protowire.ConsumeTag(b)
    -		b = b[n:]
    -		switch typ {
    -		case protowire.VarintType:
    -			v, m := protowire.ConsumeVarint(b)
    -			b = b[m:]
    -			switch num {
    -			case genid.MessageOptions_MapEntry_field_number:
    -				md.L1.IsMapEntry = protowire.DecodeBool(v)
    -			case genid.MessageOptions_MessageSetWireFormat_field_number:
    -				md.L1.IsMessageSet = protowire.DecodeBool(v)
    -			}
    -		default:
    -			m := protowire.ConsumeFieldValue(num, typ, b)
    -			b = b[m:]
    -		}
    -	}
    -}
    -
     func unmarshalMessageReservedRange(b []byte) (r [2]protoreflect.FieldNumber) {
     	for len(b) > 0 {
     		num, typ, n := protowire.ConsumeTag(b)
    diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
    index 10132c9b3..66ba90680 100644
    --- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
    +++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
    @@ -13,8 +13,10 @@ import (
     	"google.golang.org/protobuf/reflect/protoreflect"
     )
     
    -var defaultsCache = make(map[Edition]EditionFeatures)
    -var defaultsKeys = []Edition{}
    +var (
    +	defaultsCache = make(map[Edition]EditionFeatures)
    +	defaultsKeys  = []Edition{}
    +)
     
     func init() {
     	unmarshalEditionDefaults(editiondefaults.Defaults)
    @@ -41,7 +43,7 @@ func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures {
     			b = b[m:]
     			parent.StripEnumPrefix = int(v)
     		default:
    -			panic(fmt.Sprintf("unkown field number %d while unmarshalling GoFeatures", num))
    +			panic(fmt.Sprintf("unknown field number %d while unmarshalling GoFeatures", num))
     		}
     	}
     	return parent
    @@ -69,8 +71,14 @@ func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures {
     				parent.IsDelimitedEncoded = v == genid.FeatureSet_DELIMITED_enum_value
     			case genid.FeatureSet_JsonFormat_field_number:
     				parent.IsJSONCompliant = v == genid.FeatureSet_ALLOW_enum_value
    +			case genid.FeatureSet_EnforceNamingStyle_field_number:
    +				// EnforceNamingStyle is enforced in protoc, languages other than C++
    +				// are not supposed to do anything with this feature.
    +			case genid.FeatureSet_DefaultSymbolVisibility_field_number:
    +				// DefaultSymbolVisibility is enforced in protoc, runtimes should not
    +				// inspect this value.
     			default:
    -				panic(fmt.Sprintf("unkown field number %d while unmarshalling FeatureSet", num))
    +				panic(fmt.Sprintf("unknown field number %d while unmarshalling FeatureSet", num))
     			}
     		case protowire.BytesType:
     			v, m := protowire.ConsumeBytes(b)
    @@ -144,7 +152,7 @@ func unmarshalEditionDefaults(b []byte) {
     			_, m := protowire.ConsumeVarint(b)
     			b = b[m:]
     		default:
    -			panic(fmt.Sprintf("unkown field number %d while unmarshalling EditionDefault", num))
    +			panic(fmt.Sprintf("unknown field number %d while unmarshalling EditionDefault", num))
     		}
     	}
     }
    diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/presence.go b/vendor/google.golang.org/protobuf/internal/filedesc/presence.go
    new file mode 100644
    index 000000000..a12ec9791
    --- /dev/null
    +++ b/vendor/google.golang.org/protobuf/internal/filedesc/presence.go
    @@ -0,0 +1,33 @@
    +// Copyright 2025 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +package filedesc
    +
    +import "google.golang.org/protobuf/reflect/protoreflect"
    +
    +// UsePresenceForField reports whether the presence bitmap should be used for
    +// the specified field.
    +func UsePresenceForField(fd protoreflect.FieldDescriptor) (usePresence, canBeLazy bool) {
    +	switch {
    +	case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
    +		// Oneof fields never use the presence bitmap.
    +		//
    +		// Synthetic oneofs are an exception: Those are used to implement proto3
    +		// optional fields and hence should follow non-oneof field semantics.
    +		return false, false
    +
    +	case fd.IsMap():
    +		// Map-typed fields never use the presence bitmap.
    +		return false, false
    +
    +	case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind:
    +		// Lazy fields always use the presence bitmap (only messages can be lazy).
    +		isLazy := fd.(interface{ IsLazy() bool }).IsLazy()
    +		return isLazy, isLazy
    +
    +	default:
    +		// If the field has presence, use the presence bitmap.
    +		return fd.HasPresence(), false
    +	}
    +}
    diff --git a/vendor/google.golang.org/protobuf/internal/genid/api_gen.go b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go
    index df8f91850..3ceb6fa7f 100644
    --- a/vendor/google.golang.org/protobuf/internal/genid/api_gen.go
    +++ b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go
    @@ -27,6 +27,7 @@ const (
     	Api_SourceContext_field_name protoreflect.Name = "source_context"
     	Api_Mixins_field_name        protoreflect.Name = "mixins"
     	Api_Syntax_field_name        protoreflect.Name = "syntax"
    +	Api_Edition_field_name       protoreflect.Name = "edition"
     
     	Api_Name_field_fullname          protoreflect.FullName = "google.protobuf.Api.name"
     	Api_Methods_field_fullname       protoreflect.FullName = "google.protobuf.Api.methods"
    @@ -35,6 +36,7 @@ const (
     	Api_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Api.source_context"
     	Api_Mixins_field_fullname        protoreflect.FullName = "google.protobuf.Api.mixins"
     	Api_Syntax_field_fullname        protoreflect.FullName = "google.protobuf.Api.syntax"
    +	Api_Edition_field_fullname       protoreflect.FullName = "google.protobuf.Api.edition"
     )
     
     // Field numbers for google.protobuf.Api.
    @@ -46,6 +48,7 @@ const (
     	Api_SourceContext_field_number protoreflect.FieldNumber = 5
     	Api_Mixins_field_number        protoreflect.FieldNumber = 6
     	Api_Syntax_field_number        protoreflect.FieldNumber = 7
    +	Api_Edition_field_number       protoreflect.FieldNumber = 8
     )
     
     // Names for google.protobuf.Method.
    @@ -63,6 +66,7 @@ const (
     	Method_ResponseStreaming_field_name protoreflect.Name = "response_streaming"
     	Method_Options_field_name           protoreflect.Name = "options"
     	Method_Syntax_field_name            protoreflect.Name = "syntax"
    +	Method_Edition_field_name           protoreflect.Name = "edition"
     
     	Method_Name_field_fullname              protoreflect.FullName = "google.protobuf.Method.name"
     	Method_RequestTypeUrl_field_fullname    protoreflect.FullName = "google.protobuf.Method.request_type_url"
    @@ -71,6 +75,7 @@ const (
     	Method_ResponseStreaming_field_fullname protoreflect.FullName = "google.protobuf.Method.response_streaming"
     	Method_Options_field_fullname           protoreflect.FullName = "google.protobuf.Method.options"
     	Method_Syntax_field_fullname            protoreflect.FullName = "google.protobuf.Method.syntax"
    +	Method_Edition_field_fullname           protoreflect.FullName = "google.protobuf.Method.edition"
     )
     
     // Field numbers for google.protobuf.Method.
    @@ -82,6 +87,7 @@ const (
     	Method_ResponseStreaming_field_number protoreflect.FieldNumber = 5
     	Method_Options_field_number           protoreflect.FieldNumber = 6
     	Method_Syntax_field_number            protoreflect.FieldNumber = 7
    +	Method_Edition_field_number           protoreflect.FieldNumber = 8
     )
     
     // Names for google.protobuf.Mixin.
    diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
    index f30ab6b58..65aaf4d21 100644
    --- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
    +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
    @@ -26,6 +26,7 @@ const (
     	Edition_EDITION_PROTO3_enum_value          = 999
     	Edition_EDITION_2023_enum_value            = 1000
     	Edition_EDITION_2024_enum_value            = 1001
    +	Edition_EDITION_UNSTABLE_enum_value        = 9999
     	Edition_EDITION_1_TEST_ONLY_enum_value     = 1
     	Edition_EDITION_2_TEST_ONLY_enum_value     = 2
     	Edition_EDITION_99997_TEST_ONLY_enum_value = 99997
    @@ -34,6 +35,19 @@ const (
     	Edition_EDITION_MAX_enum_value             = 2147483647
     )
     
    +// Full and short names for google.protobuf.SymbolVisibility.
    +const (
    +	SymbolVisibility_enum_fullname = "google.protobuf.SymbolVisibility"
    +	SymbolVisibility_enum_name     = "SymbolVisibility"
    +)
    +
    +// Enum values for google.protobuf.SymbolVisibility.
    +const (
    +	SymbolVisibility_VISIBILITY_UNSET_enum_value  = 0
    +	SymbolVisibility_VISIBILITY_LOCAL_enum_value  = 1
    +	SymbolVisibility_VISIBILITY_EXPORT_enum_value = 2
    +)
    +
     // Names for google.protobuf.FileDescriptorSet.
     const (
     	FileDescriptorSet_message_name     protoreflect.Name     = "FileDescriptorSet"
    @@ -65,6 +79,7 @@ const (
     	FileDescriptorProto_Dependency_field_name       protoreflect.Name = "dependency"
     	FileDescriptorProto_PublicDependency_field_name protoreflect.Name = "public_dependency"
     	FileDescriptorProto_WeakDependency_field_name   protoreflect.Name = "weak_dependency"
    +	FileDescriptorProto_OptionDependency_field_name protoreflect.Name = "option_dependency"
     	FileDescriptorProto_MessageType_field_name      protoreflect.Name = "message_type"
     	FileDescriptorProto_EnumType_field_name         protoreflect.Name = "enum_type"
     	FileDescriptorProto_Service_field_name          protoreflect.Name = "service"
    @@ -79,6 +94,7 @@ const (
     	FileDescriptorProto_Dependency_field_fullname       protoreflect.FullName = "google.protobuf.FileDescriptorProto.dependency"
     	FileDescriptorProto_PublicDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.public_dependency"
     	FileDescriptorProto_WeakDependency_field_fullname   protoreflect.FullName = "google.protobuf.FileDescriptorProto.weak_dependency"
    +	FileDescriptorProto_OptionDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.option_dependency"
     	FileDescriptorProto_MessageType_field_fullname      protoreflect.FullName = "google.protobuf.FileDescriptorProto.message_type"
     	FileDescriptorProto_EnumType_field_fullname         protoreflect.FullName = "google.protobuf.FileDescriptorProto.enum_type"
     	FileDescriptorProto_Service_field_fullname          protoreflect.FullName = "google.protobuf.FileDescriptorProto.service"
    @@ -96,6 +112,7 @@ const (
     	FileDescriptorProto_Dependency_field_number       protoreflect.FieldNumber = 3
     	FileDescriptorProto_PublicDependency_field_number protoreflect.FieldNumber = 10
     	FileDescriptorProto_WeakDependency_field_number   protoreflect.FieldNumber = 11
    +	FileDescriptorProto_OptionDependency_field_number protoreflect.FieldNumber = 15
     	FileDescriptorProto_MessageType_field_number      protoreflect.FieldNumber = 4
     	FileDescriptorProto_EnumType_field_number         protoreflect.FieldNumber = 5
     	FileDescriptorProto_Service_field_number          protoreflect.FieldNumber = 6
    @@ -124,6 +141,7 @@ const (
     	DescriptorProto_Options_field_name        protoreflect.Name = "options"
     	DescriptorProto_ReservedRange_field_name  protoreflect.Name = "reserved_range"
     	DescriptorProto_ReservedName_field_name   protoreflect.Name = "reserved_name"
    +	DescriptorProto_Visibility_field_name     protoreflect.Name = "visibility"
     
     	DescriptorProto_Name_field_fullname           protoreflect.FullName = "google.protobuf.DescriptorProto.name"
     	DescriptorProto_Field_field_fullname          protoreflect.FullName = "google.protobuf.DescriptorProto.field"
    @@ -135,6 +153,7 @@ const (
     	DescriptorProto_Options_field_fullname        protoreflect.FullName = "google.protobuf.DescriptorProto.options"
     	DescriptorProto_ReservedRange_field_fullname  protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_range"
     	DescriptorProto_ReservedName_field_fullname   protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_name"
    +	DescriptorProto_Visibility_field_fullname     protoreflect.FullName = "google.protobuf.DescriptorProto.visibility"
     )
     
     // Field numbers for google.protobuf.DescriptorProto.
    @@ -149,6 +168,7 @@ const (
     	DescriptorProto_Options_field_number        protoreflect.FieldNumber = 7
     	DescriptorProto_ReservedRange_field_number  protoreflect.FieldNumber = 9
     	DescriptorProto_ReservedName_field_number   protoreflect.FieldNumber = 10
    +	DescriptorProto_Visibility_field_number     protoreflect.FieldNumber = 11
     )
     
     // Names for google.protobuf.DescriptorProto.ExtensionRange.
    @@ -388,12 +408,14 @@ const (
     	EnumDescriptorProto_Options_field_name       protoreflect.Name = "options"
     	EnumDescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range"
     	EnumDescriptorProto_ReservedName_field_name  protoreflect.Name = "reserved_name"
    +	EnumDescriptorProto_Visibility_field_name    protoreflect.Name = "visibility"
     
     	EnumDescriptorProto_Name_field_fullname          protoreflect.FullName = "google.protobuf.EnumDescriptorProto.name"
     	EnumDescriptorProto_Value_field_fullname         protoreflect.FullName = "google.protobuf.EnumDescriptorProto.value"
     	EnumDescriptorProto_Options_field_fullname       protoreflect.FullName = "google.protobuf.EnumDescriptorProto.options"
     	EnumDescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_range"
     	EnumDescriptorProto_ReservedName_field_fullname  protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_name"
    +	EnumDescriptorProto_Visibility_field_fullname    protoreflect.FullName = "google.protobuf.EnumDescriptorProto.visibility"
     )
     
     // Field numbers for google.protobuf.EnumDescriptorProto.
    @@ -403,6 +425,7 @@ const (
     	EnumDescriptorProto_Options_field_number       protoreflect.FieldNumber = 3
     	EnumDescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 4
     	EnumDescriptorProto_ReservedName_field_number  protoreflect.FieldNumber = 5
    +	EnumDescriptorProto_Visibility_field_number    protoreflect.FieldNumber = 6
     )
     
     // Names for google.protobuf.EnumDescriptorProto.EnumReservedRange.
    @@ -1008,29 +1031,35 @@ const (
     
     // Field names for google.protobuf.FeatureSet.
     const (
    -	FeatureSet_FieldPresence_field_name         protoreflect.Name = "field_presence"
    -	FeatureSet_EnumType_field_name              protoreflect.Name = "enum_type"
    -	FeatureSet_RepeatedFieldEncoding_field_name protoreflect.Name = "repeated_field_encoding"
    -	FeatureSet_Utf8Validation_field_name        protoreflect.Name = "utf8_validation"
    -	FeatureSet_MessageEncoding_field_name       protoreflect.Name = "message_encoding"
    -	FeatureSet_JsonFormat_field_name            protoreflect.Name = "json_format"
    -
    -	FeatureSet_FieldPresence_field_fullname         protoreflect.FullName = "google.protobuf.FeatureSet.field_presence"
    -	FeatureSet_EnumType_field_fullname              protoreflect.FullName = "google.protobuf.FeatureSet.enum_type"
    -	FeatureSet_RepeatedFieldEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.repeated_field_encoding"
    -	FeatureSet_Utf8Validation_field_fullname        protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation"
    -	FeatureSet_MessageEncoding_field_fullname       protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding"
    -	FeatureSet_JsonFormat_field_fullname            protoreflect.FullName = "google.protobuf.FeatureSet.json_format"
    +	FeatureSet_FieldPresence_field_name           protoreflect.Name = "field_presence"
    +	FeatureSet_EnumType_field_name                protoreflect.Name = "enum_type"
    +	FeatureSet_RepeatedFieldEncoding_field_name   protoreflect.Name = "repeated_field_encoding"
    +	FeatureSet_Utf8Validation_field_name          protoreflect.Name = "utf8_validation"
    +	FeatureSet_MessageEncoding_field_name         protoreflect.Name = "message_encoding"
    +	FeatureSet_JsonFormat_field_name              protoreflect.Name = "json_format"
    +	FeatureSet_EnforceNamingStyle_field_name      protoreflect.Name = "enforce_naming_style"
    +	FeatureSet_DefaultSymbolVisibility_field_name protoreflect.Name = "default_symbol_visibility"
    +
    +	FeatureSet_FieldPresence_field_fullname           protoreflect.FullName = "google.protobuf.FeatureSet.field_presence"
    +	FeatureSet_EnumType_field_fullname                protoreflect.FullName = "google.protobuf.FeatureSet.enum_type"
    +	FeatureSet_RepeatedFieldEncoding_field_fullname   protoreflect.FullName = "google.protobuf.FeatureSet.repeated_field_encoding"
    +	FeatureSet_Utf8Validation_field_fullname          protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation"
    +	FeatureSet_MessageEncoding_field_fullname         protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding"
    +	FeatureSet_JsonFormat_field_fullname              protoreflect.FullName = "google.protobuf.FeatureSet.json_format"
    +	FeatureSet_EnforceNamingStyle_field_fullname      protoreflect.FullName = "google.protobuf.FeatureSet.enforce_naming_style"
    +	FeatureSet_DefaultSymbolVisibility_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.default_symbol_visibility"
     )
     
     // Field numbers for google.protobuf.FeatureSet.
     const (
    -	FeatureSet_FieldPresence_field_number         protoreflect.FieldNumber = 1
    -	FeatureSet_EnumType_field_number              protoreflect.FieldNumber = 2
    -	FeatureSet_RepeatedFieldEncoding_field_number protoreflect.FieldNumber = 3
    -	FeatureSet_Utf8Validation_field_number        protoreflect.FieldNumber = 4
    -	FeatureSet_MessageEncoding_field_number       protoreflect.FieldNumber = 5
    -	FeatureSet_JsonFormat_field_number            protoreflect.FieldNumber = 6
    +	FeatureSet_FieldPresence_field_number           protoreflect.FieldNumber = 1
    +	FeatureSet_EnumType_field_number                protoreflect.FieldNumber = 2
    +	FeatureSet_RepeatedFieldEncoding_field_number   protoreflect.FieldNumber = 3
    +	FeatureSet_Utf8Validation_field_number          protoreflect.FieldNumber = 4
    +	FeatureSet_MessageEncoding_field_number         protoreflect.FieldNumber = 5
    +	FeatureSet_JsonFormat_field_number              protoreflect.FieldNumber = 6
    +	FeatureSet_EnforceNamingStyle_field_number      protoreflect.FieldNumber = 7
    +	FeatureSet_DefaultSymbolVisibility_field_number protoreflect.FieldNumber = 8
     )
     
     // Full and short names for google.protobuf.FeatureSet.FieldPresence.
    @@ -1112,6 +1141,40 @@ const (
     	FeatureSet_LEGACY_BEST_EFFORT_enum_value  = 2
     )
     
    +// Full and short names for google.protobuf.FeatureSet.EnforceNamingStyle.
    +const (
    +	FeatureSet_EnforceNamingStyle_enum_fullname = "google.protobuf.FeatureSet.EnforceNamingStyle"
    +	FeatureSet_EnforceNamingStyle_enum_name     = "EnforceNamingStyle"
    +)
    +
    +// Enum values for google.protobuf.FeatureSet.EnforceNamingStyle.
    +const (
    +	FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN_enum_value = 0
    +	FeatureSet_STYLE2024_enum_value                    = 1
    +	FeatureSet_STYLE_LEGACY_enum_value                 = 2
    +)
    +
    +// Names for google.protobuf.FeatureSet.VisibilityFeature.
    +const (
    +	FeatureSet_VisibilityFeature_message_name     protoreflect.Name     = "VisibilityFeature"
    +	FeatureSet_VisibilityFeature_message_fullname protoreflect.FullName = "google.protobuf.FeatureSet.VisibilityFeature"
    +)
    +
    +// Full and short names for google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility.
    +const (
    +	FeatureSet_VisibilityFeature_DefaultSymbolVisibility_enum_fullname = "google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility"
    +	FeatureSet_VisibilityFeature_DefaultSymbolVisibility_enum_name     = "DefaultSymbolVisibility"
    +)
    +
    +// Enum values for google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility.
    +const (
    +	FeatureSet_VisibilityFeature_DEFAULT_SYMBOL_VISIBILITY_UNKNOWN_enum_value = 0
    +	FeatureSet_VisibilityFeature_EXPORT_ALL_enum_value                        = 1
    +	FeatureSet_VisibilityFeature_EXPORT_TOP_LEVEL_enum_value                  = 2
    +	FeatureSet_VisibilityFeature_LOCAL_ALL_enum_value                         = 3
    +	FeatureSet_VisibilityFeature_STRICT_enum_value                            = 4
    +)
    +
     // Names for google.protobuf.FeatureSetDefaults.
     const (
     	FeatureSetDefaults_message_name     protoreflect.Name     = "FeatureSetDefaults"
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go
    index 229c69801..4a3bf393e 100644
    --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go
    +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go
    @@ -113,6 +113,9 @@ func sizeMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalO
     }
     
     func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
    +	if opts.depth--; opts.depth < 0 {
    +		return out, errRecursionDepth
    +	}
     	if wtyp != protowire.BytesType {
     		return out, errUnknown
     	}
    @@ -170,6 +173,9 @@ func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo
     }
     
     func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
    +	if opts.depth--; opts.depth < 0 {
    +		return out, errRecursionDepth
    +	}
     	if wtyp != protowire.BytesType {
     		return out, errUnknown
     	}
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go
    index 41c1f74ef..bdad12a9b 100644
    --- a/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go
    +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go
    @@ -11,6 +11,7 @@ import (
     
     	"google.golang.org/protobuf/encoding/protowire"
     	"google.golang.org/protobuf/internal/encoding/messageset"
    +	"google.golang.org/protobuf/internal/filedesc"
     	"google.golang.org/protobuf/internal/order"
     	"google.golang.org/protobuf/reflect/protoreflect"
     	piface "google.golang.org/protobuf/runtime/protoiface"
    @@ -80,7 +81,7 @@ func (mi *MessageInfo) makeOpaqueCoderMethods(t reflect.Type, si opaqueStructInf
     		// permit us to skip over definitely-unset fields at marshal time.
     
     		var hasPresence bool
    -		hasPresence, cf.isLazy = usePresenceForField(si, fd)
    +		hasPresence, cf.isLazy = filedesc.UsePresenceForField(fd)
     
     		if hasPresence {
     			cf.presenceIndex, mi.presenceSize = presenceIndex(mi.Desc, fd)
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/decode.go b/vendor/google.golang.org/protobuf/internal/impl/decode.go
    index e0dd21fa5..1228b5c8c 100644
    --- a/vendor/google.golang.org/protobuf/internal/impl/decode.go
    +++ b/vendor/google.golang.org/protobuf/internal/impl/decode.go
    @@ -102,8 +102,7 @@ var errUnknown = errors.New("unknown")
     
     func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) {
     	mi.init()
    -	opts.depth--
    -	if opts.depth < 0 {
    +	if opts.depth--; opts.depth < 0 {
     		return out, errRecursionDepth
     	}
     	if flags.ProtoLegacy && mi.isMessageSet {
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go
    index dd55e8e00..5a439daac 100644
    --- a/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go
    +++ b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go
    @@ -11,6 +11,7 @@ import (
     	"strings"
     	"sync/atomic"
     
    +	"google.golang.org/protobuf/internal/filedesc"
     	"google.golang.org/protobuf/reflect/protoreflect"
     )
     
    @@ -53,7 +54,7 @@ func opaqueInitHook(mi *MessageInfo) bool {
     		fd := fds.Get(i)
     		fs := si.fieldsByNumber[fd.Number()]
     		var fi fieldInfo
    -		usePresence, _ := usePresenceForField(si, fd)
    +		usePresence, _ := filedesc.UsePresenceForField(fd)
     
     		switch {
     		case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
    @@ -343,17 +344,15 @@ func (mi *MessageInfo) fieldInfoForMessageListOpaqueNoPresence(si opaqueStructIn
     			if p.IsNil() {
     				return false
     			}
    -			sp := p.Apply(fieldOffset).AtomicGetPointer()
    -			if sp.IsNil() {
    +			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
    +			if rv.IsNil() {
     				return false
     			}
    -			rv := sp.AsValueOf(fs.Type.Elem())
     			return rv.Elem().Len() > 0
     		},
     		clear: func(p pointer) {
    -			sp := p.Apply(fieldOffset).AtomicGetPointer()
    -			if !sp.IsNil() {
    -				rv := sp.AsValueOf(fs.Type.Elem())
    +			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
    +			if !rv.IsNil() {
     				rv.Elem().Set(reflect.Zero(rv.Type().Elem()))
     			}
     		},
    @@ -361,11 +360,10 @@ func (mi *MessageInfo) fieldInfoForMessageListOpaqueNoPresence(si opaqueStructIn
     			if p.IsNil() {
     				return conv.Zero()
     			}
    -			sp := p.Apply(fieldOffset).AtomicGetPointer()
    -			if sp.IsNil() {
    +			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
    +			if rv.IsNil() {
     				return conv.Zero()
     			}
    -			rv := sp.AsValueOf(fs.Type.Elem())
     			if rv.Elem().Len() == 0 {
     				return conv.Zero()
     			}
    @@ -598,30 +596,3 @@ func (mi *MessageInfo) clearPresent(p pointer, index uint32) {
     func (mi *MessageInfo) present(p pointer, index uint32) bool {
     	return p.Apply(mi.presenceOffset).PresenceInfo().Present(index)
     }
    -
    -// usePresenceForField implements the somewhat intricate logic of when
    -// the presence bitmap is used for a field.  The main logic is that a
    -// field that is optional or that can be lazy will use the presence
    -// bit, but for proto2, also maps have a presence bit. It also records
    -// if the field can ever be lazy, which is true if we have a
    -// lazyOffset and the field is a message or a slice of messages. A
    -// field that is lazy will always need a presence bit.  Oneofs are not
    -// lazy and do not use presence, unless they are a synthetic oneof,
    -// which is a proto3 optional field. For proto3 optionals, we use the
    -// presence and they can also be lazy when applicable (a message).
    -func usePresenceForField(si opaqueStructInfo, fd protoreflect.FieldDescriptor) (usePresence, canBeLazy bool) {
    -	hasLazyField := fd.(interface{ IsLazy() bool }).IsLazy()
    -
    -	// Non-oneof scalar fields with explicit field presence use the presence array.
    -	usesPresenceArray := fd.HasPresence() && fd.Message() == nil && (fd.ContainingOneof() == nil || fd.ContainingOneof().IsSynthetic())
    -	switch {
    -	case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
    -		return false, false
    -	case fd.IsMap():
    -		return false, false
    -	case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind:
    -		return hasLazyField, hasLazyField
    -	default:
    -		return usesPresenceArray || (hasLazyField && fd.HasPresence()), false
    -	}
    -}
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/presence.go b/vendor/google.golang.org/protobuf/internal/impl/presence.go
    index 914cb1ded..443afe81c 100644
    --- a/vendor/google.golang.org/protobuf/internal/impl/presence.go
    +++ b/vendor/google.golang.org/protobuf/internal/impl/presence.go
    @@ -32,9 +32,6 @@ func (p presence) toElem(num uint32) (ret *uint32) {
     
     // Present checks for the presence of a specific field number in a presence set.
     func (p presence) Present(num uint32) bool {
    -	if p.P == nil {
    -		return false
    -	}
     	return Export{}.Present(p.toElem(num), num)
     }
     
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/validate.go b/vendor/google.golang.org/protobuf/internal/impl/validate.go
    index 7b2995dde..99a1eb95f 100644
    --- a/vendor/google.golang.org/protobuf/internal/impl/validate.go
    +++ b/vendor/google.golang.org/protobuf/internal/impl/validate.go
    @@ -68,9 +68,13 @@ func Validate(mt protoreflect.MessageType, in protoiface.UnmarshalInput) (out pr
     	if in.Resolver == nil {
     		in.Resolver = protoregistry.GlobalTypes
     	}
    +	if in.Depth == 0 {
    +		in.Depth = protowire.DefaultRecursionLimit
    +	}
     	o, st := mi.validate(in.Buf, 0, unmarshalOptions{
     		flags:    in.Flags,
     		resolver: in.Resolver,
    +		depth:    in.Depth,
     	})
     	if o.initialized {
     		out.Flags |= protoiface.UnmarshalInitialized
    @@ -257,6 +261,9 @@ func (mi *MessageInfo) validate(b []byte, groupTag protowire.Number, opts unmars
     		states[0].typ = validationTypeGroup
     		states[0].endGroup = groupTag
     	}
    +	if opts.depth--; opts.depth < 0 {
    +		return out, ValidationInvalid
    +	}
     	initialized := true
     	start := len(b)
     State:
    @@ -451,6 +458,13 @@ State:
     						mi:      vi.mi,
     						tail:    b,
     					})
    +					if vi.typ == validationTypeMessage ||
    +						vi.typ == validationTypeGroup ||
    +						vi.typ == validationTypeMap {
    +						if opts.depth--; opts.depth < 0 {
    +							return out, ValidationInvalid
    +						}
    +					}
     					b = v
     					continue State
     				case validationTypeRepeatedVarint:
    @@ -499,6 +513,9 @@ State:
     						mi:       vi.mi,
     						endGroup: num,
     					})
    +					if opts.depth--; opts.depth < 0 {
    +						return out, ValidationInvalid
    +					}
     					continue State
     				case flags.ProtoLegacy && vi.typ == validationTypeMessageSetItem:
     					typeid, v, n, err := messageset.ConsumeFieldValue(b, false)
    @@ -521,6 +538,13 @@ State:
     							mi:   xvi.mi,
     							tail: b[n:],
     						})
    +						if xvi.typ == validationTypeMessage ||
    +							xvi.typ == validationTypeGroup ||
    +							xvi.typ == validationTypeMap {
    +							if opts.depth--; opts.depth < 0 {
    +								return out, ValidationInvalid
    +							}
    +						}
     						b = v
     						continue State
     					}
    @@ -547,12 +571,14 @@ State:
     		switch st.typ {
     		case validationTypeMessage, validationTypeGroup:
     			numRequiredFields = int(st.mi.numRequiredFields)
    +			opts.depth++
     		case validationTypeMap:
     			// If this is a map field with a message value that contains
     			// required fields, require that the value be present.
     			if st.mi != nil && st.mi.numRequiredFields > 0 {
     				numRequiredFields = 1
     			}
    +			opts.depth++
     		}
     		// If there are more than 64 required fields, this check will
     		// always fail and we will report that the message is potentially
    diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go
    new file mode 100644
    index 000000000..42dd6f70c
    --- /dev/null
    +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go
    @@ -0,0 +1,71 @@
    +// Copyright 2018 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +package strs
    +
    +import (
    +	"unsafe"
    +
    +	"google.golang.org/protobuf/reflect/protoreflect"
    +)
    +
    +// UnsafeString returns an unsafe string reference of b.
    +// The caller must treat the input slice as immutable.
    +//
    +// WARNING: Use carefully. The returned result must not leak to the end user
    +// unless the input slice is provably immutable.
    +func UnsafeString(b []byte) string {
    +	return unsafe.String(unsafe.SliceData(b), len(b))
    +}
    +
    +// UnsafeBytes returns an unsafe bytes slice reference of s.
    +// The caller must treat returned slice as immutable.
    +//
    +// WARNING: Use carefully. The returned result must not leak to the end user.
    +func UnsafeBytes(s string) []byte {
    +	return unsafe.Slice(unsafe.StringData(s), len(s))
    +}
    +
    +// Builder builds a set of strings with shared lifetime.
    +// This differs from strings.Builder, which is for building a single string.
    +type Builder struct {
    +	buf []byte
    +}
    +
    +// AppendFullName is equivalent to protoreflect.FullName.Append,
    +// but optimized for large batches where each name has a shared lifetime.
    +func (sb *Builder) AppendFullName(prefix protoreflect.FullName, name protoreflect.Name) protoreflect.FullName {
    +	n := len(prefix) + len(".") + len(name)
    +	if len(prefix) == 0 {
    +		n -= len(".")
    +	}
    +	sb.grow(n)
    +	sb.buf = append(sb.buf, prefix...)
    +	sb.buf = append(sb.buf, '.')
    +	sb.buf = append(sb.buf, name...)
    +	return protoreflect.FullName(sb.last(n))
    +}
    +
    +// MakeString is equivalent to string(b), but optimized for large batches
    +// with a shared lifetime.
    +func (sb *Builder) MakeString(b []byte) string {
    +	sb.grow(len(b))
    +	sb.buf = append(sb.buf, b...)
    +	return sb.last(len(b))
    +}
    +
    +func (sb *Builder) grow(n int) {
    +	if cap(sb.buf)-len(sb.buf) >= n {
    +		return
    +	}
    +
    +	// Unlike strings.Builder, we do not need to copy over the contents
    +	// of the old buffer since our builder provides no API for
    +	// retrieving previously created strings.
    +	sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n))
    +}
    +
    +func (sb *Builder) last(n int) string {
    +	return UnsafeString(sb.buf[len(sb.buf)-n:])
    +}
    diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
    deleted file mode 100644
    index 832a7988f..000000000
    --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
    +++ /dev/null
    @@ -1,94 +0,0 @@
    -// Copyright 2018 The Go Authors. All rights reserved.
    -// Use of this source code is governed by a BSD-style
    -// license that can be found in the LICENSE file.
    -
    -//go:build !go1.21
    -
    -package strs
    -
    -import (
    -	"unsafe"
    -
    -	"google.golang.org/protobuf/reflect/protoreflect"
    -)
    -
    -type (
    -	stringHeader struct {
    -		Data unsafe.Pointer
    -		Len  int
    -	}
    -	sliceHeader struct {
    -		Data unsafe.Pointer
    -		Len  int
    -		Cap  int
    -	}
    -)
    -
    -// UnsafeString returns an unsafe string reference of b.
    -// The caller must treat the input slice as immutable.
    -//
    -// WARNING: Use carefully. The returned result must not leak to the end user
    -// unless the input slice is provably immutable.
    -func UnsafeString(b []byte) (s string) {
    -	src := (*sliceHeader)(unsafe.Pointer(&b))
    -	dst := (*stringHeader)(unsafe.Pointer(&s))
    -	dst.Data = src.Data
    -	dst.Len = src.Len
    -	return s
    -}
    -
    -// UnsafeBytes returns an unsafe bytes slice reference of s.
    -// The caller must treat returned slice as immutable.
    -//
    -// WARNING: Use carefully. The returned result must not leak to the end user.
    -func UnsafeBytes(s string) (b []byte) {
    -	src := (*stringHeader)(unsafe.Pointer(&s))
    -	dst := (*sliceHeader)(unsafe.Pointer(&b))
    -	dst.Data = src.Data
    -	dst.Len = src.Len
    -	dst.Cap = src.Len
    -	return b
    -}
    -
    -// Builder builds a set of strings with shared lifetime.
    -// This differs from strings.Builder, which is for building a single string.
    -type Builder struct {
    -	buf []byte
    -}
    -
    -// AppendFullName is equivalent to protoreflect.FullName.Append,
    -// but optimized for large batches where each name has a shared lifetime.
    -func (sb *Builder) AppendFullName(prefix protoreflect.FullName, name protoreflect.Name) protoreflect.FullName {
    -	n := len(prefix) + len(".") + len(name)
    -	if len(prefix) == 0 {
    -		n -= len(".")
    -	}
    -	sb.grow(n)
    -	sb.buf = append(sb.buf, prefix...)
    -	sb.buf = append(sb.buf, '.')
    -	sb.buf = append(sb.buf, name...)
    -	return protoreflect.FullName(sb.last(n))
    -}
    -
    -// MakeString is equivalent to string(b), but optimized for large batches
    -// with a shared lifetime.
    -func (sb *Builder) MakeString(b []byte) string {
    -	sb.grow(len(b))
    -	sb.buf = append(sb.buf, b...)
    -	return sb.last(len(b))
    -}
    -
    -func (sb *Builder) grow(n int) {
    -	if cap(sb.buf)-len(sb.buf) >= n {
    -		return
    -	}
    -
    -	// Unlike strings.Builder, we do not need to copy over the contents
    -	// of the old buffer since our builder provides no API for
    -	// retrieving previously created strings.
    -	sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n))
    -}
    -
    -func (sb *Builder) last(n int) string {
    -	return UnsafeString(sb.buf[len(sb.buf)-n:])
    -}
    diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
    deleted file mode 100644
    index 1ffddf687..000000000
    --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
    +++ /dev/null
    @@ -1,73 +0,0 @@
    -// Copyright 2018 The Go Authors. All rights reserved.
    -// Use of this source code is governed by a BSD-style
    -// license that can be found in the LICENSE file.
    -
    -//go:build go1.21
    -
    -package strs
    -
    -import (
    -	"unsafe"
    -
    -	"google.golang.org/protobuf/reflect/protoreflect"
    -)
    -
    -// UnsafeString returns an unsafe string reference of b.
    -// The caller must treat the input slice as immutable.
    -//
    -// WARNING: Use carefully. The returned result must not leak to the end user
    -// unless the input slice is provably immutable.
    -func UnsafeString(b []byte) string {
    -	return unsafe.String(unsafe.SliceData(b), len(b))
    -}
    -
    -// UnsafeBytes returns an unsafe bytes slice reference of s.
    -// The caller must treat returned slice as immutable.
    -//
    -// WARNING: Use carefully. The returned result must not leak to the end user.
    -func UnsafeBytes(s string) []byte {
    -	return unsafe.Slice(unsafe.StringData(s), len(s))
    -}
    -
    -// Builder builds a set of strings with shared lifetime.
    -// This differs from strings.Builder, which is for building a single string.
    -type Builder struct {
    -	buf []byte
    -}
    -
    -// AppendFullName is equivalent to protoreflect.FullName.Append,
    -// but optimized for large batches where each name has a shared lifetime.
    -func (sb *Builder) AppendFullName(prefix protoreflect.FullName, name protoreflect.Name) protoreflect.FullName {
    -	n := len(prefix) + len(".") + len(name)
    -	if len(prefix) == 0 {
    -		n -= len(".")
    -	}
    -	sb.grow(n)
    -	sb.buf = append(sb.buf, prefix...)
    -	sb.buf = append(sb.buf, '.')
    -	sb.buf = append(sb.buf, name...)
    -	return protoreflect.FullName(sb.last(n))
    -}
    -
    -// MakeString is equivalent to string(b), but optimized for large batches
    -// with a shared lifetime.
    -func (sb *Builder) MakeString(b []byte) string {
    -	sb.grow(len(b))
    -	sb.buf = append(sb.buf, b...)
    -	return sb.last(len(b))
    -}
    -
    -func (sb *Builder) grow(n int) {
    -	if cap(sb.buf)-len(sb.buf) >= n {
    -		return
    -	}
    -
    -	// Unlike strings.Builder, we do not need to copy over the contents
    -	// of the old buffer since our builder provides no API for
    -	// retrieving previously created strings.
    -	sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n))
    -}
    -
    -func (sb *Builder) last(n int) string {
    -	return UnsafeString(sb.buf[len(sb.buf)-n:])
    -}
    diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go
    index 01efc3303..763fd8284 100644
    --- a/vendor/google.golang.org/protobuf/internal/version/version.go
    +++ b/vendor/google.golang.org/protobuf/internal/version/version.go
    @@ -52,7 +52,7 @@ import (
     const (
     	Major      = 1
     	Minor      = 36
    -	Patch      = 5
    +	Patch      = 11
     	PreRelease = ""
     )
     
    diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go
    index 4cbf1aeaf..889d8511d 100644
    --- a/vendor/google.golang.org/protobuf/proto/decode.go
    +++ b/vendor/google.golang.org/protobuf/proto/decode.go
    @@ -121,9 +121,8 @@ func (o UnmarshalOptions) unmarshal(b []byte, m protoreflect.Message) (out proto
     
     		out, err = methods.Unmarshal(in)
     	} else {
    -		o.RecursionLimit--
    -		if o.RecursionLimit < 0 {
    -			return out, errors.New("exceeded max recursion depth")
    +		if o.RecursionLimit--; o.RecursionLimit < 0 {
    +			return out, errRecursionDepth
     		}
     		err = o.unmarshalMessageSlow(b, m)
     	}
    @@ -220,6 +219,9 @@ func (o UnmarshalOptions) unmarshalSingular(b []byte, wtyp protowire.Type, m pro
     }
     
     func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv protoreflect.Map, fd protoreflect.FieldDescriptor) (n int, err error) {
    +	if o.RecursionLimit--; o.RecursionLimit < 0 {
    +		return 0, errRecursionDepth
    +	}
     	if wtyp != protowire.BytesType {
     		return 0, errUnknown
     	}
    @@ -305,3 +307,5 @@ func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv proto
     var errUnknown = errors.New("BUG: internal error (unknown)")
     
     var errDecode = errors.New("cannot parse invalid wire-format data")
    +
    +var errRecursionDepth = errors.New("exceeded maximum recursion depth")
    diff --git a/vendor/google.golang.org/protobuf/proto/merge.go b/vendor/google.golang.org/protobuf/proto/merge.go
    index 3c6fe5780..ef55b97dd 100644
    --- a/vendor/google.golang.org/protobuf/proto/merge.go
    +++ b/vendor/google.golang.org/protobuf/proto/merge.go
    @@ -59,6 +59,12 @@ func Clone(m Message) Message {
     	return dst.Interface()
     }
     
    +// CloneOf returns a deep copy of m. If the top-level message is invalid,
    +// it returns an invalid message as well.
    +func CloneOf[M Message](m M) M {
    +	return Clone(m).(M)
    +}
    +
     // mergeOptions provides a namespace for merge functions, and can be
     // exported in the future if we add user-visible merge options.
     type mergeOptions struct{}
    diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
    index 823dbf3ba..40f17af4e 100644
    --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
    +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
    @@ -108,7 +108,9 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot
     	if f.L1.Path == "" {
     		return nil, errors.New("file path must be populated")
     	}
    -	if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < editionssupport.Minimum || fd.GetEdition() > editionssupport.Maximum) {
    +	if f.L1.Syntax == protoreflect.Editions &&
    +		(fd.GetEdition() < editionssupport.Minimum || fd.GetEdition() > editionssupport.Maximum) &&
    +		fd.GetEdition() != descriptorpb.Edition_EDITION_UNSTABLE {
     		// Allow cmd/protoc-gen-go/testdata to use any edition for easier
     		// testing of upcoming edition features.
     		if !strings.HasPrefix(fd.GetName(), "cmd/protoc-gen-go/testdata/") {
    @@ -152,6 +154,31 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot
     		imp := &f.L2.Imports[i]
     		imps.importPublic(imp.Imports())
     	}
    +	optionImps := importSet{f.Path(): true}
    +	if len(fd.GetOptionDependency()) > 0 {
    +		optionImports := make(filedesc.FileImports, len(fd.GetOptionDependency()))
    +		for i, path := range fd.GetOptionDependency() {
    +			imp := &optionImports[i]
    +			f, err := r.FindFileByPath(path)
    +			if err == protoregistry.NotFound {
    +				// We always allow option imports to be unresolvable.
    +				f = filedesc.PlaceholderFile(path)
    +			} else if err != nil {
    +				return nil, errors.New("could not resolve import %q: %v", path, err)
    +			}
    +			imp.FileDescriptor = f
    +
    +			if imps[imp.Path()] || optionImps[imp.Path()] {
    +				return nil, errors.New("already imported %q", path)
    +			}
    +			// This needs to be a separate map so that we don't recognize non-options
    +			// symbols coming from option imports.
    +			optionImps[imp.Path()] = true
    +		}
    +		f.L2.OptionImports = func() protoreflect.FileImports {
    +			return &optionImports
    +		}
    +	}
     
     	// Handle source locations.
     	f.L2.Locations.File = f
    diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
    index 9da34998b..c826ad043 100644
    --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
    +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
    @@ -29,6 +29,7 @@ func (r descsByName) initEnumDeclarations(eds []*descriptorpb.EnumDescriptorProt
     			e.L2.Options = func() protoreflect.ProtoMessage { return opts }
     		}
     		e.L1.EditionFeatures = mergeEditionFeatures(parent, ed.GetOptions().GetFeatures())
    +		e.L1.Visibility = int32(ed.GetVisibility())
     		for _, s := range ed.GetReservedName() {
     			e.L2.ReservedNames.List = append(e.L2.ReservedNames.List, protoreflect.Name(s))
     		}
    @@ -70,6 +71,7 @@ func (r descsByName) initMessagesDeclarations(mds []*descriptorpb.DescriptorProt
     			return nil, err
     		}
     		m.L1.EditionFeatures = mergeEditionFeatures(parent, md.GetOptions().GetFeatures())
    +		m.L1.Visibility = int32(md.GetVisibility())
     		if opts := md.GetOptions(); opts != nil {
     			opts = proto.Clone(opts).(*descriptorpb.MessageOptions)
     			m.L2.Options = func() protoreflect.ProtoMessage { return opts }
    diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
    index 697a61b29..147b8c739 100644
    --- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
    +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
    @@ -46,6 +46,8 @@ func toEditionProto(ed filedesc.Edition) descriptorpb.Edition {
     		return descriptorpb.Edition_EDITION_2023
     	case filedesc.Edition2024:
     		return descriptorpb.Edition_EDITION_2024
    +	case filedesc.EditionUnstable:
    +		return descriptorpb.Edition_EDITION_UNSTABLE
     	default:
     		panic(fmt.Sprintf("unknown value for edition: %v", ed))
     	}
    @@ -58,7 +60,7 @@ func getFeatureSetFor(ed filedesc.Edition) *descriptorpb.FeatureSet {
     		return def
     	}
     	edpb := toEditionProto(ed)
    -	if defaults.GetMinimumEdition() > edpb || defaults.GetMaximumEdition() < edpb {
    +	if (defaults.GetMinimumEdition() > edpb || defaults.GetMaximumEdition() < edpb) && edpb != descriptorpb.Edition_EDITION_UNSTABLE {
     		// This should never happen protodesc.(FileOptions).New would fail when
     		// initializing the file descriptor.
     		// This most likely means the embedded defaults were not updated.
    diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
    index 9b880aa8c..6f91074e3 100644
    --- a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
    +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
    @@ -70,16 +70,27 @@ func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileD
     	if syntax := file.Syntax(); syntax != protoreflect.Proto2 && syntax.IsValid() {
     		p.Syntax = proto.String(file.Syntax().String())
     	}
    +	desc := file
    +	if fileImportDesc, ok := file.(protoreflect.FileImport); ok {
    +		desc = fileImportDesc.FileDescriptor
    +	}
     	if file.Syntax() == protoreflect.Editions {
    -		desc := file
    -		if fileImportDesc, ok := file.(protoreflect.FileImport); ok {
    -			desc = fileImportDesc.FileDescriptor
    -		}
    -
     		if editionsInterface, ok := desc.(interface{ Edition() int32 }); ok {
     			p.Edition = descriptorpb.Edition(editionsInterface.Edition()).Enum()
     		}
     	}
    +	type hasOptionImports interface {
    +		OptionImports() protoreflect.FileImports
    +	}
    +	if opts, ok := desc.(hasOptionImports); ok {
    +		if optionImports := opts.OptionImports(); optionImports.Len() > 0 {
    +			optionDeps := make([]string, optionImports.Len())
    +			for i := range optionImports.Len() {
    +				optionDeps[i] = optionImports.Get(i).Path()
    +			}
    +			p.OptionDependency = optionDeps
    +		}
    +	}
     	return p
     }
     
    @@ -123,6 +134,14 @@ func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.Des
     	for i, names := 0, message.ReservedNames(); i < names.Len(); i++ {
     		p.ReservedName = append(p.ReservedName, string(names.Get(i)))
     	}
    +	type hasVisibility interface {
    +		Visibility() int32
    +	}
    +	if vis, ok := message.(hasVisibility); ok {
    +		if visibility := vis.Visibility(); visibility > 0 {
    +			p.Visibility = descriptorpb.SymbolVisibility(visibility).Enum()
    +		}
    +	}
     	return p
     }
     
    @@ -216,6 +235,14 @@ func ToEnumDescriptorProto(enum protoreflect.EnumDescriptor) *descriptorpb.EnumD
     	for i, names := 0, enum.ReservedNames(); i < names.Len(); i++ {
     		p.ReservedName = append(p.ReservedName, string(names.Get(i)))
     	}
    +	type hasVisibility interface {
    +		Visibility() int32
    +	}
    +	if vis, ok := enum.(hasVisibility); ok {
    +		if visibility := vis.Visibility(); visibility > 0 {
    +			p.Visibility = descriptorpb.SymbolVisibility(visibility).Enum()
    +		}
    +	}
     	return p
     }
     
    diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
    index ea154eec4..730331e66 100644
    --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
    +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
    @@ -21,6 +21,8 @@ func (p *SourcePath) appendFileDescriptorProto(b []byte) []byte {
     		b = p.appendRepeatedField(b, "public_dependency", nil)
     	case 11:
     		b = p.appendRepeatedField(b, "weak_dependency", nil)
    +	case 15:
    +		b = p.appendRepeatedField(b, "option_dependency", nil)
     	case 4:
     		b = p.appendRepeatedField(b, "message_type", (*SourcePath).appendDescriptorProto)
     	case 5:
    @@ -66,6 +68,8 @@ func (p *SourcePath) appendDescriptorProto(b []byte) []byte {
     		b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendDescriptorProto_ReservedRange)
     	case 10:
     		b = p.appendRepeatedField(b, "reserved_name", nil)
    +	case 11:
    +		b = p.appendSingularField(b, "visibility", nil)
     	}
     	return b
     }
    @@ -85,6 +89,8 @@ func (p *SourcePath) appendEnumDescriptorProto(b []byte) []byte {
     		b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendEnumDescriptorProto_EnumReservedRange)
     	case 5:
     		b = p.appendRepeatedField(b, "reserved_name", nil)
    +	case 6:
    +		b = p.appendSingularField(b, "visibility", nil)
     	}
     	return b
     }
    @@ -398,6 +404,10 @@ func (p *SourcePath) appendFeatureSet(b []byte) []byte {
     		b = p.appendSingularField(b, "message_encoding", nil)
     	case 6:
     		b = p.appendSingularField(b, "json_format", nil)
    +	case 7:
    +		b = p.appendSingularField(b, "enforce_naming_style", nil)
    +	case 8:
    +		b = p.appendSingularField(b, "default_symbol_visibility", nil)
     	}
     	return b
     }
    diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go
    new file mode 100644
    index 000000000..fe17f3722
    --- /dev/null
    +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go
    @@ -0,0 +1,84 @@
    +// Copyright 2018 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +package protoreflect
    +
    +import (
    +	"unsafe"
    +
    +	"google.golang.org/protobuf/internal/pragma"
    +)
    +
    +type (
    +	ifaceHeader struct {
    +		_    [0]any // if interfaces have greater alignment than unsafe.Pointer, this will enforce it.
    +		Type unsafe.Pointer
    +		Data unsafe.Pointer
    +	}
    +)
    +
    +var (
    +	nilType     = typeOf(nil)
    +	boolType    = typeOf(*new(bool))
    +	int32Type   = typeOf(*new(int32))
    +	int64Type   = typeOf(*new(int64))
    +	uint32Type  = typeOf(*new(uint32))
    +	uint64Type  = typeOf(*new(uint64))
    +	float32Type = typeOf(*new(float32))
    +	float64Type = typeOf(*new(float64))
    +	stringType  = typeOf(*new(string))
    +	bytesType   = typeOf(*new([]byte))
    +	enumType    = typeOf(*new(EnumNumber))
    +)
    +
    +// typeOf returns a pointer to the Go type information.
    +// The pointer is comparable and equal if and only if the types are identical.
    +func typeOf(t any) unsafe.Pointer {
    +	return (*ifaceHeader)(unsafe.Pointer(&t)).Type
    +}
    +
    +// value is a union where only one type can be represented at a time.
    +// The struct is 24B large on 64-bit systems and requires the minimum storage
    +// necessary to represent each possible type.
    +//
    +// The Go GC needs to be able to scan variables containing pointers.
    +// As such, pointers and non-pointers cannot be intermixed.
    +type value struct {
    +	pragma.DoNotCompare // 0B
    +
    +	// typ stores the type of the value as a pointer to the Go type.
    +	typ unsafe.Pointer // 8B
    +
    +	// ptr stores the data pointer for a String, Bytes, or interface value.
    +	ptr unsafe.Pointer // 8B
    +
    +	// num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or
    +	// Enum value as a raw uint64.
    +	//
    +	// It is also used to store the length of a String or Bytes value;
    +	// the capacity is ignored.
    +	num uint64 // 8B
    +}
    +
    +func valueOfString(v string) Value {
    +	return Value{typ: stringType, ptr: unsafe.Pointer(unsafe.StringData(v)), num: uint64(len(v))}
    +}
    +func valueOfBytes(v []byte) Value {
    +	return Value{typ: bytesType, ptr: unsafe.Pointer(unsafe.SliceData(v)), num: uint64(len(v))}
    +}
    +func valueOfIface(v any) Value {
    +	p := (*ifaceHeader)(unsafe.Pointer(&v))
    +	return Value{typ: p.Type, ptr: p.Data}
    +}
    +
    +func (v Value) getString() string {
    +	return unsafe.String((*byte)(v.ptr), v.num)
    +}
    +func (v Value) getBytes() []byte {
    +	return unsafe.Slice((*byte)(v.ptr), v.num)
    +}
    +func (v Value) getIface() (x any) {
    +	*(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr}
    +	return x
    +}
    diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
    deleted file mode 100644
    index 0015fcb35..000000000
    --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
    +++ /dev/null
    @@ -1,98 +0,0 @@
    -// Copyright 2018 The Go Authors. All rights reserved.
    -// Use of this source code is governed by a BSD-style
    -// license that can be found in the LICENSE file.
    -
    -//go:build !go1.21
    -
    -package protoreflect
    -
    -import (
    -	"unsafe"
    -
    -	"google.golang.org/protobuf/internal/pragma"
    -)
    -
    -type (
    -	stringHeader struct {
    -		Data unsafe.Pointer
    -		Len  int
    -	}
    -	sliceHeader struct {
    -		Data unsafe.Pointer
    -		Len  int
    -		Cap  int
    -	}
    -	ifaceHeader struct {
    -		Type unsafe.Pointer
    -		Data unsafe.Pointer
    -	}
    -)
    -
    -var (
    -	nilType     = typeOf(nil)
    -	boolType    = typeOf(*new(bool))
    -	int32Type   = typeOf(*new(int32))
    -	int64Type   = typeOf(*new(int64))
    -	uint32Type  = typeOf(*new(uint32))
    -	uint64Type  = typeOf(*new(uint64))
    -	float32Type = typeOf(*new(float32))
    -	float64Type = typeOf(*new(float64))
    -	stringType  = typeOf(*new(string))
    -	bytesType   = typeOf(*new([]byte))
    -	enumType    = typeOf(*new(EnumNumber))
    -)
    -
    -// typeOf returns a pointer to the Go type information.
    -// The pointer is comparable and equal if and only if the types are identical.
    -func typeOf(t any) unsafe.Pointer {
    -	return (*ifaceHeader)(unsafe.Pointer(&t)).Type
    -}
    -
    -// value is a union where only one type can be represented at a time.
    -// The struct is 24B large on 64-bit systems and requires the minimum storage
    -// necessary to represent each possible type.
    -//
    -// The Go GC needs to be able to scan variables containing pointers.
    -// As such, pointers and non-pointers cannot be intermixed.
    -type value struct {
    -	pragma.DoNotCompare // 0B
    -
    -	// typ stores the type of the value as a pointer to the Go type.
    -	typ unsafe.Pointer // 8B
    -
    -	// ptr stores the data pointer for a String, Bytes, or interface value.
    -	ptr unsafe.Pointer // 8B
    -
    -	// num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or
    -	// Enum value as a raw uint64.
    -	//
    -	// It is also used to store the length of a String or Bytes value;
    -	// the capacity is ignored.
    -	num uint64 // 8B
    -}
    -
    -func valueOfString(v string) Value {
    -	p := (*stringHeader)(unsafe.Pointer(&v))
    -	return Value{typ: stringType, ptr: p.Data, num: uint64(len(v))}
    -}
    -func valueOfBytes(v []byte) Value {
    -	p := (*sliceHeader)(unsafe.Pointer(&v))
    -	return Value{typ: bytesType, ptr: p.Data, num: uint64(len(v))}
    -}
    -func valueOfIface(v any) Value {
    -	p := (*ifaceHeader)(unsafe.Pointer(&v))
    -	return Value{typ: p.Type, ptr: p.Data}
    -}
    -
    -func (v Value) getString() (x string) {
    -	*(*stringHeader)(unsafe.Pointer(&x)) = stringHeader{Data: v.ptr, Len: int(v.num)}
    -	return x
    -}
    -func (v Value) getBytes() (x []byte) {
    -	*(*sliceHeader)(unsafe.Pointer(&x)) = sliceHeader{Data: v.ptr, Len: int(v.num), Cap: int(v.num)}
    -	return x
    -}
    -func (v Value) getIface() (x any) {
    -	*(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr}
    -	return x
    -}
    diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
    deleted file mode 100644
    index 479527b58..000000000
    --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
    +++ /dev/null
    @@ -1,86 +0,0 @@
    -// Copyright 2018 The Go Authors. All rights reserved.
    -// Use of this source code is governed by a BSD-style
    -// license that can be found in the LICENSE file.
    -
    -//go:build go1.21
    -
    -package protoreflect
    -
    -import (
    -	"unsafe"
    -
    -	"google.golang.org/protobuf/internal/pragma"
    -)
    -
    -type (
    -	ifaceHeader struct {
    -		_    [0]any // if interfaces have greater alignment than unsafe.Pointer, this will enforce it.
    -		Type unsafe.Pointer
    -		Data unsafe.Pointer
    -	}
    -)
    -
    -var (
    -	nilType     = typeOf(nil)
    -	boolType    = typeOf(*new(bool))
    -	int32Type   = typeOf(*new(int32))
    -	int64Type   = typeOf(*new(int64))
    -	uint32Type  = typeOf(*new(uint32))
    -	uint64Type  = typeOf(*new(uint64))
    -	float32Type = typeOf(*new(float32))
    -	float64Type = typeOf(*new(float64))
    -	stringType  = typeOf(*new(string))
    -	bytesType   = typeOf(*new([]byte))
    -	enumType    = typeOf(*new(EnumNumber))
    -)
    -
    -// typeOf returns a pointer to the Go type information.
    -// The pointer is comparable and equal if and only if the types are identical.
    -func typeOf(t any) unsafe.Pointer {
    -	return (*ifaceHeader)(unsafe.Pointer(&t)).Type
    -}
    -
    -// value is a union where only one type can be represented at a time.
    -// The struct is 24B large on 64-bit systems and requires the minimum storage
    -// necessary to represent each possible type.
    -//
    -// The Go GC needs to be able to scan variables containing pointers.
    -// As such, pointers and non-pointers cannot be intermixed.
    -type value struct {
    -	pragma.DoNotCompare // 0B
    -
    -	// typ stores the type of the value as a pointer to the Go type.
    -	typ unsafe.Pointer // 8B
    -
    -	// ptr stores the data pointer for a String, Bytes, or interface value.
    -	ptr unsafe.Pointer // 8B
    -
    -	// num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or
    -	// Enum value as a raw uint64.
    -	//
    -	// It is also used to store the length of a String or Bytes value;
    -	// the capacity is ignored.
    -	num uint64 // 8B
    -}
    -
    -func valueOfString(v string) Value {
    -	return Value{typ: stringType, ptr: unsafe.Pointer(unsafe.StringData(v)), num: uint64(len(v))}
    -}
    -func valueOfBytes(v []byte) Value {
    -	return Value{typ: bytesType, ptr: unsafe.Pointer(unsafe.SliceData(v)), num: uint64(len(v))}
    -}
    -func valueOfIface(v any) Value {
    -	p := (*ifaceHeader)(unsafe.Pointer(&v))
    -	return Value{typ: p.Type, ptr: p.Data}
    -}
    -
    -func (v Value) getString() string {
    -	return unsafe.String((*byte)(v.ptr), v.num)
    -}
    -func (v Value) getBytes() []byte {
    -	return unsafe.Slice((*byte)(v.ptr), v.num)
    -}
    -func (v Value) getIface() (x any) {
    -	*(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr}
    -	return x
    -}
    diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
    index a51633767..0b23faa95 100644
    --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
    +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
    @@ -69,6 +69,8 @@ const (
     	// comparison.
     	Edition_EDITION_2023 Edition = 1000
     	Edition_EDITION_2024 Edition = 1001
    +	// A placeholder edition for developing and testing unscheduled features.
    +	Edition_EDITION_UNSTABLE Edition = 9999
     	// Placeholder editions for testing feature resolution.  These should not be
     	// used or relied on outside of tests.
     	Edition_EDITION_1_TEST_ONLY     Edition = 1
    @@ -91,6 +93,7 @@ var (
     		999:        "EDITION_PROTO3",
     		1000:       "EDITION_2023",
     		1001:       "EDITION_2024",
    +		9999:       "EDITION_UNSTABLE",
     		1:          "EDITION_1_TEST_ONLY",
     		2:          "EDITION_2_TEST_ONLY",
     		99997:      "EDITION_99997_TEST_ONLY",
    @@ -105,6 +108,7 @@ var (
     		"EDITION_PROTO3":          999,
     		"EDITION_2023":            1000,
     		"EDITION_2024":            1001,
    +		"EDITION_UNSTABLE":        9999,
     		"EDITION_1_TEST_ONLY":     1,
     		"EDITION_2_TEST_ONLY":     2,
     		"EDITION_99997_TEST_ONLY": 99997,
    @@ -151,6 +155,70 @@ func (Edition) EnumDescriptor() ([]byte, []int) {
     	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{0}
     }
     
    +// Describes the 'visibility' of a symbol with respect to the proto import
    +// system. Symbols can only be imported when the visibility rules do not prevent
    +// it (ex: local symbols cannot be imported).  Visibility modifiers can only set
    +// on `message` and `enum` as they are the only types available to be referenced
    +// from other files.
    +type SymbolVisibility int32
    +
    +const (
    +	SymbolVisibility_VISIBILITY_UNSET  SymbolVisibility = 0
    +	SymbolVisibility_VISIBILITY_LOCAL  SymbolVisibility = 1
    +	SymbolVisibility_VISIBILITY_EXPORT SymbolVisibility = 2
    +)
    +
    +// Enum value maps for SymbolVisibility.
    +var (
    +	SymbolVisibility_name = map[int32]string{
    +		0: "VISIBILITY_UNSET",
    +		1: "VISIBILITY_LOCAL",
    +		2: "VISIBILITY_EXPORT",
    +	}
    +	SymbolVisibility_value = map[string]int32{
    +		"VISIBILITY_UNSET":  0,
    +		"VISIBILITY_LOCAL":  1,
    +		"VISIBILITY_EXPORT": 2,
    +	}
    +)
    +
    +func (x SymbolVisibility) Enum() *SymbolVisibility {
    +	p := new(SymbolVisibility)
    +	*p = x
    +	return p
    +}
    +
    +func (x SymbolVisibility) String() string {
    +	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
    +}
    +
    +func (SymbolVisibility) Descriptor() protoreflect.EnumDescriptor {
    +	return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor()
    +}
    +
    +func (SymbolVisibility) Type() protoreflect.EnumType {
    +	return &file_google_protobuf_descriptor_proto_enumTypes[1]
    +}
    +
    +func (x SymbolVisibility) Number() protoreflect.EnumNumber {
    +	return protoreflect.EnumNumber(x)
    +}
    +
    +// Deprecated: Do not use.
    +func (x *SymbolVisibility) UnmarshalJSON(b []byte) error {
    +	num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
    +	if err != nil {
    +		return err
    +	}
    +	*x = SymbolVisibility(num)
    +	return nil
    +}
    +
    +// Deprecated: Use SymbolVisibility.Descriptor instead.
    +func (SymbolVisibility) EnumDescriptor() ([]byte, []int) {
    +	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{1}
    +}
    +
     // The verification state of the extension range.
     type ExtensionRangeOptions_VerificationState int32
     
    @@ -183,11 +251,11 @@ func (x ExtensionRangeOptions_VerificationState) String() string {
     }
     
     func (ExtensionRangeOptions_VerificationState) Descriptor() protoreflect.EnumDescriptor {
    -	return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor()
    +	return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor()
     }
     
     func (ExtensionRangeOptions_VerificationState) Type() protoreflect.EnumType {
    -	return &file_google_protobuf_descriptor_proto_enumTypes[1]
    +	return &file_google_protobuf_descriptor_proto_enumTypes[2]
     }
     
     func (x ExtensionRangeOptions_VerificationState) Number() protoreflect.EnumNumber {
    @@ -299,11 +367,11 @@ func (x FieldDescriptorProto_Type) String() string {
     }
     
     func (FieldDescriptorProto_Type) Descriptor() protoreflect.EnumDescriptor {
    -	return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor()
    +	return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor()
     }
     
     func (FieldDescriptorProto_Type) Type() protoreflect.EnumType {
    -	return &file_google_protobuf_descriptor_proto_enumTypes[2]
    +	return &file_google_protobuf_descriptor_proto_enumTypes[3]
     }
     
     func (x FieldDescriptorProto_Type) Number() protoreflect.EnumNumber {
    @@ -362,11 +430,11 @@ func (x FieldDescriptorProto_Label) String() string {
     }
     
     func (FieldDescriptorProto_Label) Descriptor() protoreflect.EnumDescriptor {
    -	return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor()
    +	return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor()
     }
     
     func (FieldDescriptorProto_Label) Type() protoreflect.EnumType {
    -	return &file_google_protobuf_descriptor_proto_enumTypes[3]
    +	return &file_google_protobuf_descriptor_proto_enumTypes[4]
     }
     
     func (x FieldDescriptorProto_Label) Number() protoreflect.EnumNumber {
    @@ -423,11 +491,11 @@ func (x FileOptions_OptimizeMode) String() string {
     }
     
     func (FileOptions_OptimizeMode) Descriptor() protoreflect.EnumDescriptor {
    -	return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor()
    +	return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor()
     }
     
     func (FileOptions_OptimizeMode) Type() protoreflect.EnumType {
    -	return &file_google_protobuf_descriptor_proto_enumTypes[4]
    +	return &file_google_protobuf_descriptor_proto_enumTypes[5]
     }
     
     func (x FileOptions_OptimizeMode) Number() protoreflect.EnumNumber {
    @@ -489,11 +557,11 @@ func (x FieldOptions_CType) String() string {
     }
     
     func (FieldOptions_CType) Descriptor() protoreflect.EnumDescriptor {
    -	return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor()
    +	return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor()
     }
     
     func (FieldOptions_CType) Type() protoreflect.EnumType {
    -	return &file_google_protobuf_descriptor_proto_enumTypes[5]
    +	return &file_google_protobuf_descriptor_proto_enumTypes[6]
     }
     
     func (x FieldOptions_CType) Number() protoreflect.EnumNumber {
    @@ -551,11 +619,11 @@ func (x FieldOptions_JSType) String() string {
     }
     
     func (FieldOptions_JSType) Descriptor() protoreflect.EnumDescriptor {
    -	return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor()
    +	return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor()
     }
     
     func (FieldOptions_JSType) Type() protoreflect.EnumType {
    -	return &file_google_protobuf_descriptor_proto_enumTypes[6]
    +	return &file_google_protobuf_descriptor_proto_enumTypes[7]
     }
     
     func (x FieldOptions_JSType) Number() protoreflect.EnumNumber {
    @@ -611,11 +679,11 @@ func (x FieldOptions_OptionRetention) String() string {
     }
     
     func (FieldOptions_OptionRetention) Descriptor() protoreflect.EnumDescriptor {
    -	return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor()
    +	return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor()
     }
     
     func (FieldOptions_OptionRetention) Type() protoreflect.EnumType {
    -	return &file_google_protobuf_descriptor_proto_enumTypes[7]
    +	return &file_google_protobuf_descriptor_proto_enumTypes[8]
     }
     
     func (x FieldOptions_OptionRetention) Number() protoreflect.EnumNumber {
    @@ -694,11 +762,11 @@ func (x FieldOptions_OptionTargetType) String() string {
     }
     
     func (FieldOptions_OptionTargetType) Descriptor() protoreflect.EnumDescriptor {
    -	return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor()
    +	return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor()
     }
     
     func (FieldOptions_OptionTargetType) Type() protoreflect.EnumType {
    -	return &file_google_protobuf_descriptor_proto_enumTypes[8]
    +	return &file_google_protobuf_descriptor_proto_enumTypes[9]
     }
     
     func (x FieldOptions_OptionTargetType) Number() protoreflect.EnumNumber {
    @@ -756,11 +824,11 @@ func (x MethodOptions_IdempotencyLevel) String() string {
     }
     
     func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor {
    -	return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor()
    +	return file_google_protobuf_descriptor_proto_enumTypes[10].Descriptor()
     }
     
     func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType {
    -	return &file_google_protobuf_descriptor_proto_enumTypes[9]
    +	return &file_google_protobuf_descriptor_proto_enumTypes[10]
     }
     
     func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber {
    @@ -818,11 +886,11 @@ func (x FeatureSet_FieldPresence) String() string {
     }
     
     func (FeatureSet_FieldPresence) Descriptor() protoreflect.EnumDescriptor {
    -	return file_google_protobuf_descriptor_proto_enumTypes[10].Descriptor()
    +	return file_google_protobuf_descriptor_proto_enumTypes[11].Descriptor()
     }
     
     func (FeatureSet_FieldPresence) Type() protoreflect.EnumType {
    -	return &file_google_protobuf_descriptor_proto_enumTypes[10]
    +	return &file_google_protobuf_descriptor_proto_enumTypes[11]
     }
     
     func (x FeatureSet_FieldPresence) Number() protoreflect.EnumNumber {
    @@ -877,11 +945,11 @@ func (x FeatureSet_EnumType) String() string {
     }
     
     func (FeatureSet_EnumType) Descriptor() protoreflect.EnumDescriptor {
    -	return file_google_protobuf_descriptor_proto_enumTypes[11].Descriptor()
    +	return file_google_protobuf_descriptor_proto_enumTypes[12].Descriptor()
     }
     
     func (FeatureSet_EnumType) Type() protoreflect.EnumType {
    -	return &file_google_protobuf_descriptor_proto_enumTypes[11]
    +	return &file_google_protobuf_descriptor_proto_enumTypes[12]
     }
     
     func (x FeatureSet_EnumType) Number() protoreflect.EnumNumber {
    @@ -936,11 +1004,11 @@ func (x FeatureSet_RepeatedFieldEncoding) String() string {
     }
     
     func (FeatureSet_RepeatedFieldEncoding) Descriptor() protoreflect.EnumDescriptor {
    -	return file_google_protobuf_descriptor_proto_enumTypes[12].Descriptor()
    +	return file_google_protobuf_descriptor_proto_enumTypes[13].Descriptor()
     }
     
     func (FeatureSet_RepeatedFieldEncoding) Type() protoreflect.EnumType {
    -	return &file_google_protobuf_descriptor_proto_enumTypes[12]
    +	return &file_google_protobuf_descriptor_proto_enumTypes[13]
     }
     
     func (x FeatureSet_RepeatedFieldEncoding) Number() protoreflect.EnumNumber {
    @@ -995,11 +1063,11 @@ func (x FeatureSet_Utf8Validation) String() string {
     }
     
     func (FeatureSet_Utf8Validation) Descriptor() protoreflect.EnumDescriptor {
    -	return file_google_protobuf_descriptor_proto_enumTypes[13].Descriptor()
    +	return file_google_protobuf_descriptor_proto_enumTypes[14].Descriptor()
     }
     
     func (FeatureSet_Utf8Validation) Type() protoreflect.EnumType {
    -	return &file_google_protobuf_descriptor_proto_enumTypes[13]
    +	return &file_google_protobuf_descriptor_proto_enumTypes[14]
     }
     
     func (x FeatureSet_Utf8Validation) Number() protoreflect.EnumNumber {
    @@ -1054,11 +1122,11 @@ func (x FeatureSet_MessageEncoding) String() string {
     }
     
     func (FeatureSet_MessageEncoding) Descriptor() protoreflect.EnumDescriptor {
    -	return file_google_protobuf_descriptor_proto_enumTypes[14].Descriptor()
    +	return file_google_protobuf_descriptor_proto_enumTypes[15].Descriptor()
     }
     
     func (FeatureSet_MessageEncoding) Type() protoreflect.EnumType {
    -	return &file_google_protobuf_descriptor_proto_enumTypes[14]
    +	return &file_google_protobuf_descriptor_proto_enumTypes[15]
     }
     
     func (x FeatureSet_MessageEncoding) Number() protoreflect.EnumNumber {
    @@ -1113,11 +1181,11 @@ func (x FeatureSet_JsonFormat) String() string {
     }
     
     func (FeatureSet_JsonFormat) Descriptor() protoreflect.EnumDescriptor {
    -	return file_google_protobuf_descriptor_proto_enumTypes[15].Descriptor()
    +	return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor()
     }
     
     func (FeatureSet_JsonFormat) Type() protoreflect.EnumType {
    -	return &file_google_protobuf_descriptor_proto_enumTypes[15]
    +	return &file_google_protobuf_descriptor_proto_enumTypes[16]
     }
     
     func (x FeatureSet_JsonFormat) Number() protoreflect.EnumNumber {
    @@ -1139,6 +1207,136 @@ func (FeatureSet_JsonFormat) EnumDescriptor() ([]byte, []int) {
     	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 5}
     }
     
    +type FeatureSet_EnforceNamingStyle int32
    +
    +const (
    +	FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN FeatureSet_EnforceNamingStyle = 0
    +	FeatureSet_STYLE2024                    FeatureSet_EnforceNamingStyle = 1
    +	FeatureSet_STYLE_LEGACY                 FeatureSet_EnforceNamingStyle = 2
    +)
    +
    +// Enum value maps for FeatureSet_EnforceNamingStyle.
    +var (
    +	FeatureSet_EnforceNamingStyle_name = map[int32]string{
    +		0: "ENFORCE_NAMING_STYLE_UNKNOWN",
    +		1: "STYLE2024",
    +		2: "STYLE_LEGACY",
    +	}
    +	FeatureSet_EnforceNamingStyle_value = map[string]int32{
    +		"ENFORCE_NAMING_STYLE_UNKNOWN": 0,
    +		"STYLE2024":                    1,
    +		"STYLE_LEGACY":                 2,
    +	}
    +)
    +
    +func (x FeatureSet_EnforceNamingStyle) Enum() *FeatureSet_EnforceNamingStyle {
    +	p := new(FeatureSet_EnforceNamingStyle)
    +	*p = x
    +	return p
    +}
    +
    +func (x FeatureSet_EnforceNamingStyle) String() string {
    +	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
    +}
    +
    +func (FeatureSet_EnforceNamingStyle) Descriptor() protoreflect.EnumDescriptor {
    +	return file_google_protobuf_descriptor_proto_enumTypes[17].Descriptor()
    +}
    +
    +func (FeatureSet_EnforceNamingStyle) Type() protoreflect.EnumType {
    +	return &file_google_protobuf_descriptor_proto_enumTypes[17]
    +}
    +
    +func (x FeatureSet_EnforceNamingStyle) Number() protoreflect.EnumNumber {
    +	return protoreflect.EnumNumber(x)
    +}
    +
    +// Deprecated: Do not use.
    +func (x *FeatureSet_EnforceNamingStyle) UnmarshalJSON(b []byte) error {
    +	num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
    +	if err != nil {
    +		return err
    +	}
    +	*x = FeatureSet_EnforceNamingStyle(num)
    +	return nil
    +}
    +
    +// Deprecated: Use FeatureSet_EnforceNamingStyle.Descriptor instead.
    +func (FeatureSet_EnforceNamingStyle) EnumDescriptor() ([]byte, []int) {
    +	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 6}
    +}
    +
    +type FeatureSet_VisibilityFeature_DefaultSymbolVisibility int32
    +
    +const (
    +	FeatureSet_VisibilityFeature_DEFAULT_SYMBOL_VISIBILITY_UNKNOWN FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 0
    +	// Default pre-EDITION_2024, all UNSET visibility are export.
    +	FeatureSet_VisibilityFeature_EXPORT_ALL FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 1
    +	// All top-level symbols default to export, nested default to local.
    +	FeatureSet_VisibilityFeature_EXPORT_TOP_LEVEL FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 2
    +	// All symbols default to local.
    +	FeatureSet_VisibilityFeature_LOCAL_ALL FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 3
    +	// All symbols local by default. Nested types cannot be exported.
    +	// With special case caveat for message { enum {} reserved 1 to max; }
    +	// This is the recommended setting for new protos.
    +	FeatureSet_VisibilityFeature_STRICT FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 4
    +)
    +
    +// Enum value maps for FeatureSet_VisibilityFeature_DefaultSymbolVisibility.
    +var (
    +	FeatureSet_VisibilityFeature_DefaultSymbolVisibility_name = map[int32]string{
    +		0: "DEFAULT_SYMBOL_VISIBILITY_UNKNOWN",
    +		1: "EXPORT_ALL",
    +		2: "EXPORT_TOP_LEVEL",
    +		3: "LOCAL_ALL",
    +		4: "STRICT",
    +	}
    +	FeatureSet_VisibilityFeature_DefaultSymbolVisibility_value = map[string]int32{
    +		"DEFAULT_SYMBOL_VISIBILITY_UNKNOWN": 0,
    +		"EXPORT_ALL":                        1,
    +		"EXPORT_TOP_LEVEL":                  2,
    +		"LOCAL_ALL":                         3,
    +		"STRICT":                            4,
    +	}
    +)
    +
    +func (x FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Enum() *FeatureSet_VisibilityFeature_DefaultSymbolVisibility {
    +	p := new(FeatureSet_VisibilityFeature_DefaultSymbolVisibility)
    +	*p = x
    +	return p
    +}
    +
    +func (x FeatureSet_VisibilityFeature_DefaultSymbolVisibility) String() string {
    +	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
    +}
    +
    +func (FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Descriptor() protoreflect.EnumDescriptor {
    +	return file_google_protobuf_descriptor_proto_enumTypes[18].Descriptor()
    +}
    +
    +func (FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Type() protoreflect.EnumType {
    +	return &file_google_protobuf_descriptor_proto_enumTypes[18]
    +}
    +
    +func (x FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Number() protoreflect.EnumNumber {
    +	return protoreflect.EnumNumber(x)
    +}
    +
    +// Deprecated: Do not use.
    +func (x *FeatureSet_VisibilityFeature_DefaultSymbolVisibility) UnmarshalJSON(b []byte) error {
    +	num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
    +	if err != nil {
    +		return err
    +	}
    +	*x = FeatureSet_VisibilityFeature_DefaultSymbolVisibility(num)
    +	return nil
    +}
    +
    +// Deprecated: Use FeatureSet_VisibilityFeature_DefaultSymbolVisibility.Descriptor instead.
    +func (FeatureSet_VisibilityFeature_DefaultSymbolVisibility) EnumDescriptor() ([]byte, []int) {
    +	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0, 0}
    +}
    +
     // Represents the identified object's effect on the element in the original
     // .proto file.
     type GeneratedCodeInfo_Annotation_Semantic int32
    @@ -1177,11 +1375,11 @@ func (x GeneratedCodeInfo_Annotation_Semantic) String() string {
     }
     
     func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor {
    -	return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor()
    +	return file_google_protobuf_descriptor_proto_enumTypes[19].Descriptor()
     }
     
     func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType {
    -	return &file_google_protobuf_descriptor_proto_enumTypes[16]
    +	return &file_google_protobuf_descriptor_proto_enumTypes[19]
     }
     
     func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber {
    @@ -1262,6 +1460,9 @@ type FileDescriptorProto struct {
     	// Indexes of the weak imported files in the dependency list.
     	// For Google-internal migration only. Do not use.
     	WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"`
    +	// Names of files imported by this file purely for the purpose of providing
    +	// option extensions. These are excluded from the dependency list above.
    +	OptionDependency []string `protobuf:"bytes,15,rep,name=option_dependency,json=optionDependency" json:"option_dependency,omitempty"`
     	// All top-level definitions in this file.
     	MessageType []*DescriptorProto        `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"`
     	EnumType    []*EnumDescriptorProto    `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"`
    @@ -1277,8 +1478,14 @@ type FileDescriptorProto struct {
     	// The supported values are "proto2", "proto3", and "editions".
     	//
     	// If `edition` is present, this value must be "editions".
    +	// WARNING: This field should only be used by protobuf plugins or special
    +	// cases like the proto compiler. Other uses are discouraged and
    +	// developers should rely on the protoreflect APIs for their client language.
     	Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"`
     	// The edition of the proto file.
    +	// WARNING: This field should only be used by protobuf plugins or special
    +	// cases like the proto compiler. Other uses are discouraged and
    +	// developers should rely on the protoreflect APIs for their client language.
     	Edition       *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
     	unknownFields protoimpl.UnknownFields
     	sizeCache     protoimpl.SizeCache
    @@ -1349,6 +1556,13 @@ func (x *FileDescriptorProto) GetWeakDependency() []int32 {
     	return nil
     }
     
    +func (x *FileDescriptorProto) GetOptionDependency() []string {
    +	if x != nil {
    +		return x.OptionDependency
    +	}
    +	return nil
    +}
    +
     func (x *FileDescriptorProto) GetMessageType() []*DescriptorProto {
     	if x != nil {
     		return x.MessageType
    @@ -1419,7 +1633,9 @@ type DescriptorProto struct {
     	ReservedRange  []*DescriptorProto_ReservedRange  `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
     	// Reserved field names, which may not be used by fields in the same message.
     	// A given name may only be reserved once.
    -	ReservedName  []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
    +	ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
    +	// Support for `export` and `local` keywords on enums.
    +	Visibility    *SymbolVisibility `protobuf:"varint,11,opt,name=visibility,enum=google.protobuf.SymbolVisibility" json:"visibility,omitempty"`
     	unknownFields protoimpl.UnknownFields
     	sizeCache     protoimpl.SizeCache
     }
    @@ -1524,6 +1740,13 @@ func (x *DescriptorProto) GetReservedName() []string {
     	return nil
     }
     
    +func (x *DescriptorProto) GetVisibility() SymbolVisibility {
    +	if x != nil && x.Visibility != nil {
    +		return *x.Visibility
    +	}
    +	return SymbolVisibility_VISIBILITY_UNSET
    +}
    +
     type ExtensionRangeOptions struct {
     	state protoimpl.MessageState `protogen:"open.v1"`
     	// The parser stores options it doesn't recognize here. See above.
    @@ -1836,7 +2059,9 @@ type EnumDescriptorProto struct {
     	ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
     	// Reserved enum value names, which may not be reused. A given name may only
     	// be reserved once.
    -	ReservedName  []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
    +	ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
    +	// Support for `export` and `local` keywords on enums.
    +	Visibility    *SymbolVisibility `protobuf:"varint,6,opt,name=visibility,enum=google.protobuf.SymbolVisibility" json:"visibility,omitempty"`
     	unknownFields protoimpl.UnknownFields
     	sizeCache     protoimpl.SizeCache
     }
    @@ -1906,6 +2131,13 @@ func (x *EnumDescriptorProto) GetReservedName() []string {
     	return nil
     }
     
    +func (x *EnumDescriptorProto) GetVisibility() SymbolVisibility {
    +	if x != nil && x.Visibility != nil {
    +		return *x.Visibility
    +	}
    +	return SymbolVisibility_VISIBILITY_UNSET
    +}
    +
     // Describes a value within an enum.
     type EnumValueDescriptorProto struct {
     	state         protoimpl.MessageState `protogen:"open.v1"`
    @@ -2212,6 +2444,9 @@ type FileOptions struct {
     	// determining the ruby package.
     	RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"`
     	// Any features defined in the specific edition.
    +	// WARNING: This field should only be used by protobuf plugins or special
    +	// cases like the proto compiler. Other uses are discouraged and
    +	// developers should rely on the protoreflect APIs for their client language.
     	Features *FeatureSet `protobuf:"bytes,50,opt,name=features" json:"features,omitempty"`
     	// The parser stores options it doesn't recognize here.
     	// See the documentation for the "Options" section above.
    @@ -2482,6 +2717,9 @@ type MessageOptions struct {
     	// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
     	DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,11,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"`
     	// Any features defined in the specific edition.
    +	// WARNING: This field should only be used by protobuf plugins or special
    +	// cases like the proto compiler. Other uses are discouraged and
    +	// developers should rely on the protoreflect APIs for their client language.
     	Features *FeatureSet `protobuf:"bytes,12,opt,name=features" json:"features,omitempty"`
     	// The parser stores options it doesn't recognize here. See above.
     	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
    @@ -2639,7 +2877,10 @@ type FieldOptions struct {
     	// for accessors, or it will be completely ignored; in the very least, this
     	// is a formalization for deprecating fields.
     	Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
    +	// DEPRECATED. DO NOT USE!
     	// For Google-internal migration only. Do not use.
    +	//
    +	// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
     	Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"`
     	// Indicate that the field value should not be printed out when using debug
     	// formats, e.g. when the field contains sensitive credentials.
    @@ -2648,6 +2889,9 @@ type FieldOptions struct {
     	Targets         []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"`
     	EditionDefaults []*FieldOptions_EditionDefault  `protobuf:"bytes,20,rep,name=edition_defaults,json=editionDefaults" json:"edition_defaults,omitempty"`
     	// Any features defined in the specific edition.
    +	// WARNING: This field should only be used by protobuf plugins or special
    +	// cases like the proto compiler. Other uses are discouraged and
    +	// developers should rely on the protoreflect APIs for their client language.
     	Features       *FeatureSet                  `protobuf:"bytes,21,opt,name=features" json:"features,omitempty"`
     	FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,22,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"`
     	// The parser stores options it doesn't recognize here. See above.
    @@ -2740,6 +2984,7 @@ func (x *FieldOptions) GetDeprecated() bool {
     	return Default_FieldOptions_Deprecated
     }
     
    +// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
     func (x *FieldOptions) GetWeak() bool {
     	if x != nil && x.Weak != nil {
     		return *x.Weak
    @@ -2799,6 +3044,9 @@ func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption {
     type OneofOptions struct {
     	state protoimpl.MessageState `protogen:"open.v1"`
     	// Any features defined in the specific edition.
    +	// WARNING: This field should only be used by protobuf plugins or special
    +	// cases like the proto compiler. Other uses are discouraged and
    +	// developers should rely on the protoreflect APIs for their client language.
     	Features *FeatureSet `protobuf:"bytes,1,opt,name=features" json:"features,omitempty"`
     	// The parser stores options it doesn't recognize here. See above.
     	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
    @@ -2871,6 +3119,9 @@ type EnumOptions struct {
     	// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
     	DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,6,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"`
     	// Any features defined in the specific edition.
    +	// WARNING: This field should only be used by protobuf plugins or special
    +	// cases like the proto compiler. Other uses are discouraged and
    +	// developers should rely on the protoreflect APIs for their client language.
     	Features *FeatureSet `protobuf:"bytes,7,opt,name=features" json:"features,omitempty"`
     	// The parser stores options it doesn't recognize here. See above.
     	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
    @@ -2958,6 +3209,9 @@ type EnumValueOptions struct {
     	// this is a formalization for deprecating enum values.
     	Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
     	// Any features defined in the specific edition.
    +	// WARNING: This field should only be used by protobuf plugins or special
    +	// cases like the proto compiler. Other uses are discouraged and
    +	// developers should rely on the protoreflect APIs for their client language.
     	Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"`
     	// Indicate that fields annotated with this enum value should not be printed
     	// out when using debug formats, e.g. when the field contains sensitive
    @@ -3046,6 +3300,9 @@ func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption {
     type ServiceOptions struct {
     	state protoimpl.MessageState `protogen:"open.v1"`
     	// Any features defined in the specific edition.
    +	// WARNING: This field should only be used by protobuf plugins or special
    +	// cases like the proto compiler. Other uses are discouraged and
    +	// developers should rely on the protoreflect APIs for their client language.
     	Features *FeatureSet `protobuf:"bytes,34,opt,name=features" json:"features,omitempty"`
     	// Is this service deprecated?
     	// Depending on the target platform, this can emit Deprecated annotations
    @@ -3124,6 +3381,9 @@ type MethodOptions struct {
     	Deprecated       *bool                           `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
     	IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"`
     	// Any features defined in the specific edition.
    +	// WARNING: This field should only be used by protobuf plugins or special
    +	// cases like the proto compiler. Other uses are discouraged and
    +	// developers should rely on the protoreflect APIs for their client language.
     	Features *FeatureSet `protobuf:"bytes,35,opt,name=features" json:"features,omitempty"`
     	// The parser stores options it doesn't recognize here. See above.
     	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
    @@ -3303,16 +3563,18 @@ func (x *UninterpretedOption) GetAggregateValue() string {
     // be designed and implemented to handle this, hopefully before we ever hit a
     // conflict here.
     type FeatureSet struct {
    -	state                 protoimpl.MessageState            `protogen:"open.v1"`
    -	FieldPresence         *FeatureSet_FieldPresence         `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"`
    -	EnumType              *FeatureSet_EnumType              `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"`
    -	RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"`
    -	Utf8Validation        *FeatureSet_Utf8Validation        `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"`
    -	MessageEncoding       *FeatureSet_MessageEncoding       `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"`
    -	JsonFormat            *FeatureSet_JsonFormat            `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"`
    -	extensionFields       protoimpl.ExtensionFields
    -	unknownFields         protoimpl.UnknownFields
    -	sizeCache             protoimpl.SizeCache
    +	state                   protoimpl.MessageState                                `protogen:"open.v1"`
    +	FieldPresence           *FeatureSet_FieldPresence                             `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"`
    +	EnumType                *FeatureSet_EnumType                                  `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"`
    +	RepeatedFieldEncoding   *FeatureSet_RepeatedFieldEncoding                     `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"`
    +	Utf8Validation          *FeatureSet_Utf8Validation                            `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"`
    +	MessageEncoding         *FeatureSet_MessageEncoding                           `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"`
    +	JsonFormat              *FeatureSet_JsonFormat                                `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"`
    +	EnforceNamingStyle      *FeatureSet_EnforceNamingStyle                        `protobuf:"varint,7,opt,name=enforce_naming_style,json=enforceNamingStyle,enum=google.protobuf.FeatureSet_EnforceNamingStyle" json:"enforce_naming_style,omitempty"`
    +	DefaultSymbolVisibility *FeatureSet_VisibilityFeature_DefaultSymbolVisibility `protobuf:"varint,8,opt,name=default_symbol_visibility,json=defaultSymbolVisibility,enum=google.protobuf.FeatureSet_VisibilityFeature_DefaultSymbolVisibility" json:"default_symbol_visibility,omitempty"`
    +	extensionFields         protoimpl.ExtensionFields
    +	unknownFields           protoimpl.UnknownFields
    +	sizeCache               protoimpl.SizeCache
     }
     
     func (x *FeatureSet) Reset() {
    @@ -3387,6 +3649,20 @@ func (x *FeatureSet) GetJsonFormat() FeatureSet_JsonFormat {
     	return FeatureSet_JSON_FORMAT_UNKNOWN
     }
     
    +func (x *FeatureSet) GetEnforceNamingStyle() FeatureSet_EnforceNamingStyle {
    +	if x != nil && x.EnforceNamingStyle != nil {
    +		return *x.EnforceNamingStyle
    +	}
    +	return FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN
    +}
    +
    +func (x *FeatureSet) GetDefaultSymbolVisibility() FeatureSet_VisibilityFeature_DefaultSymbolVisibility {
    +	if x != nil && x.DefaultSymbolVisibility != nil {
    +		return *x.DefaultSymbolVisibility
    +	}
    +	return FeatureSet_VisibilityFeature_DEFAULT_SYMBOL_VISIBILITY_UNKNOWN
    +}
    +
     // A compiled specification for the defaults of a set of features.  These
     // messages are generated from FeatureSet extensions and can be used to seed
     // feature resolution. The resolution with this object becomes a simple search
    @@ -4047,6 +4323,42 @@ func (x *UninterpretedOption_NamePart) GetIsExtension() bool {
     	return false
     }
     
    +type FeatureSet_VisibilityFeature struct {
    +	state         protoimpl.MessageState `protogen:"open.v1"`
    +	unknownFields protoimpl.UnknownFields
    +	sizeCache     protoimpl.SizeCache
    +}
    +
    +func (x *FeatureSet_VisibilityFeature) Reset() {
    +	*x = FeatureSet_VisibilityFeature{}
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
    +}
    +
    +func (x *FeatureSet_VisibilityFeature) String() string {
    +	return protoimpl.X.MessageStringOf(x)
    +}
    +
    +func (*FeatureSet_VisibilityFeature) ProtoMessage() {}
    +
    +func (x *FeatureSet_VisibilityFeature) ProtoReflect() protoreflect.Message {
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
    +	if x != nil {
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		if ms.LoadMessageInfo() == nil {
    +			ms.StoreMessageInfo(mi)
    +		}
    +		return ms
    +	}
    +	return mi.MessageOf(x)
    +}
    +
    +// Deprecated: Use FeatureSet_VisibilityFeature.ProtoReflect.Descriptor instead.
    +func (*FeatureSet_VisibilityFeature) Descriptor() ([]byte, []int) {
    +	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0}
    +}
    +
     // A map from every known edition with a unique set of defaults to its
     // defaults. Not all editions may be contained here.  For a given edition,
     // the defaults at the closest matching edition ordered at or before it should
    @@ -4064,7 +4376,7 @@ type FeatureSetDefaults_FeatureSetEditionDefault struct {
     
     func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() {
     	*x = FeatureSetDefaults_FeatureSetEditionDefault{}
    -	mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
     	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     	ms.StoreMessageInfo(mi)
     }
    @@ -4076,7 +4388,7 @@ func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string {
     func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {}
     
     func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message {
    -	mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
     	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
    @@ -4212,7 +4524,7 @@ type SourceCodeInfo_Location struct {
     
     func (x *SourceCodeInfo_Location) Reset() {
     	*x = SourceCodeInfo_Location{}
    -	mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
     	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     	ms.StoreMessageInfo(mi)
     }
    @@ -4224,7 +4536,7 @@ func (x *SourceCodeInfo_Location) String() string {
     func (*SourceCodeInfo_Location) ProtoMessage() {}
     
     func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message {
    -	mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
     	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
    @@ -4296,7 +4608,7 @@ type GeneratedCodeInfo_Annotation struct {
     
     func (x *GeneratedCodeInfo_Annotation) Reset() {
     	*x = GeneratedCodeInfo_Annotation{}
    -	mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[33]
     	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     	ms.StoreMessageInfo(mi)
     }
    @@ -4308,7 +4620,7 @@ func (x *GeneratedCodeInfo_Annotation) String() string {
     func (*GeneratedCodeInfo_Annotation) ProtoMessage() {}
     
     func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message {
    -	mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[33]
     	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
    @@ -4361,777 +4673,390 @@ func (x *GeneratedCodeInfo_Annotation) GetSemantic() GeneratedCodeInfo_Annotatio
     
     var File_google_protobuf_descriptor_proto protoreflect.FileDescriptor
     
    -var file_google_protobuf_descriptor_proto_rawDesc = string([]byte{
    -	0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
    -	0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f,
    -	0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
    -	0x62, 0x75, 0x66, 0x22, 0x5b, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72,
    -	0x69, 0x70, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x38, 0x0a, 0x04, 0x66, 0x69, 0x6c, 0x65,
    -	0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
    -	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73,
    -	0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69,
    -	0x6c, 0x65, 0x2a, 0x0c, 0x08, 0x80, 0xec, 0xca, 0xff, 0x01, 0x10, 0x81, 0xec, 0xca, 0xff, 0x01,
    -	0x22, 0x98, 0x05, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
    -	0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
    -	0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07,
    -	0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70,
    -	0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64,
    -	0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x65,
    -	0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63,
    -	0x5f, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0a, 0x20, 0x03, 0x28,
    -	0x05, 0x52, 0x10, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65,
    -	0x6e, 0x63, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x65, 0x61, 0x6b, 0x5f, 0x64, 0x65, 0x70, 0x65,
    -	0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0e, 0x77, 0x65,
    -	0x61, 0x6b, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x43, 0x0a, 0x0c,
    -	0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03,
    -	0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
    -	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50,
    -	0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70,
    -	0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05,
    -	0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
    -	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72,
    -	0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d,
    -	0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18,
    -	0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
    -	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44,
    -	0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x07,
    -	0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e,
    -	0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f,
    -	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65,
    -	0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74,
    -	0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x07,
    -	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e,
    -	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
    -	0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74,
    -	0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63,
    -	0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f,
    -	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
    -	0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52,
    -	0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12,
    -	0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52,
    -	0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69,
    -	0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
    -	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69,
    -	0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, 0x0a, 0x0f,
    -	0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12,
    -	0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
    -	0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03,
    -	0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
    -	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69,
    -	0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64,
    -	0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20,
    -	0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
    -	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72,
    -	0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65,
    -	0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f,
    -	0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f,
    -	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73,
    -	0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65,
    -	0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d,
    -	0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f,
    -	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e,
    -	0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74,
    -	0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65,
    -	0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05,
    -	0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
    -	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
    -	0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
    -	0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
    -	0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64,
    -	0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
    -	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f,
    -	0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
    -	0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f,
    -	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67,
    -	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d,
    -	0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f,
    -	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76,
    -	0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e,
    -	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
    -	0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
    -	0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d,
    -	0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a,
    -	0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a,
    -	0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61,
    -	0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52,
    -	0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20,
    -	0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e,
    -	0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07,
    -	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e,
    -	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
    -	0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70,
    -	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37,
    -	0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12,
    -	0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05,
    -	0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01,
    -	0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xcc, 0x04, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65,
    -	0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
    -	0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
    -	0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b,
    -	0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
    -	0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
    -	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
    -	0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, 0x0b, 0x64,
    -	0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
    -	0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
    -	0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67,
    -	0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61,
    -	0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61,
    -	0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72,
    -	0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
    -	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75,
    -	0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12,
    -	0x6d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
    -	0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
    -	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f,
    -	0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65,
    -	0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a,
    -	0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x42, 0x03, 0x88, 0x01, 0x02,
    -	0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x94,
    -	0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16,
    -	0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06,
    -	0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e,
    -	0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e,
    -	0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
    -	0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72,
    -	0x76, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72,
    -	0x76, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18,
    -	0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x4a,
    -	0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63,
    -	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45,
    -	0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55,
    -	0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07,
    -	0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64,
    -	0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12,
    -	0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
    -	0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20,
    -	0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c,
    -	0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f,
    -	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65,
    -	0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74,
    -	0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e,
    -	0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67,
    -	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
    -	0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72,
    -	0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b,
    -	0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28,
    -	0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65,
    -	0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65,
    -	0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75,
    -	0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c,
    -	0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b,
    -	0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28,
    -	0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a,
    -	0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09,
    -	0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70,
    -	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f,
    -	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69,
    -	0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69,
    -	0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70,
    -	0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72,
    -	0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a,
    -	0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f,
    -	0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46,
    -	0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49,
    -	0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55,
    -	0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f,
    -	0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f,
    -	0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50,
    -	0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54,
    -	0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59,
    -	0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54,
    -	0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54,
    -	0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a,
    -	0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a,
    -	0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d,
    -	0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a,
    -	0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f,
    -	0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36,
    -	0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54,
    -	0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e,
    -	0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12,
    -	0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c,
    -	0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45,
    -	0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f,
    -	0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e,
    -	0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
    -	0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
    -	0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
    -	0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
    -	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f,
    -	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22,
    -	0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
    -	0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
    -	0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76,
    -	0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f,
    -	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75,
    -	0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
    -	0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07,
    -	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e,
    -	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
    -	0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74,
    -	0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64,
    -	0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67,
    -	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45,
    -	0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
    -	0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52,
    -	0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61,
    -	0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f,
    -	0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65,
    -	0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d,
    -	0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a,
    -	0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74,
    -	0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05,
    -	0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61,
    -	0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
    -	0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
    -	0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72,
    -	0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b,
    -	0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
    -	0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
    -	0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f,
    -	0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16,
    -	0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
    -	0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
    -	0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65,
    -	0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f,
    -	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74,
    -	0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
    -	0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70,
    -	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f,
    -	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65,
    -	0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70,
    -	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64,
    -	0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12,
    -	0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
    -	0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70,
    -	0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79,
    -	0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70,
    -	0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54,
    -	0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04,
    -	0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
    -	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74,
    -	0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a,
    -	0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e,
    -	0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f,
    -	0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12,
    -	0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d,
    -	0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65,
    -	0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e,
    -	0x67, 0x22, 0xad, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
    -	0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67,
    -	0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63,
    -	0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74,
    -	0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01,
    -	0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61,
    -	0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d,
    -	0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20,
    -	0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61,
    -	0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a,
    -	0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65,
    -	0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14,
    -	0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65,
    -	0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48,
    -	0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69,
    -	0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20,
    -	0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61,
    -	0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12,
    -	0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18,
    -	0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
    -	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69,
    -	0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65,
    -	0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a,
    -	0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61,
    -	0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b,
    -	0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69,
    -	0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08,
    -	0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72,
    -	0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61,
    -	0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69,
    -	0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65,
    -	0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72,
    -	0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65,
    -	0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01,
    -	0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e,
    -	0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a,
    -	0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08,
    -	0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61,
    -	0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65,
    -	0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74,
    -	0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65,
    -	0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73,
    -	0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f,
    -	0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12,
    -	0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70,
    -	0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72,
    -	0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77,
    -	0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, 0x28, 0x09,
    -	0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a,
    -	0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69,
    -	0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73,
    -	0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e,
    -	0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c,
    -	0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16,
    -	0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d,
    -	0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x70, 0x68,
    -	0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
    -	0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61,
    -	0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, 0x50, 0x61,
    -	0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
    -	0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
    -	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72,
    -	0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58,
    -	0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f,
    -	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e,
    -	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
    -	0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74,
    -	0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
    -	0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69,
    -	0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45,
    -	0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45,
    -	0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49,
    -	0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a,
    -	0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x52, 0x14, 0x70, 0x68, 0x70,
    -	0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
    -	0x73, 0x22, 0xf4, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74,
    -	0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f,
    -	0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18,
    -	0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65,
    -	0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d,
    -	0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72,
    -	0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63,
    -	0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c,
    -	0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65,
    -	0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72,
    -	0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03,
    -	0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70,
    -	0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65,
    -	0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45,
    -	0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74,
    -	0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66,
    -	0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b,
    -	0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63,
    -	0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69,
    -	0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08,
    -	0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b,
    -	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
    -	0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61,
    -	0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72,
    -	0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07,
    -	0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
    -	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
    -	0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e,
    -	0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a,
    -	0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05,
    -	0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08,
    -	0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46, 0x69, 0x65,
    -	0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79,
    -	0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
    -	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
    -	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53,
    -	0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06,
    -	0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61,
    -	0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06,
    -	0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
    -	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69,
    -	0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e,
    -	0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a,
    -	0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c,
    -	0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65,
    -	0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28,
    -	0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69,
    -	0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72,
    -	0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61,
    -	0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12,
    -	0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66,
    -	0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65,
    -	0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08,
    -	0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65,
    -	0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f,
    -	0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
    -	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f,
    -	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74,
    -	0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f,
    -	0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03,
    -	0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
    -	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
    -	0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79,
    -	0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65,
    -	0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18,
    -	0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
    -	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74,
    -	0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61,
    -	0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61,
    -	0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73,
    -	0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
    -	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
    -	0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x55, 0x0a,
    -	0x0f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74,
    -	0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
    -	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70,
    -	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70,
    -	0x70, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70,
    -	0x70, 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
    -	0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20,
    -	0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
    -	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65,
    -	0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74,
    -	0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x5a,
    -	0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74,
    -	0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28,
    -	0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
    -	0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69,
    -	0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
    -	0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a, 0x0e, 0x46,
    -	0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x47, 0x0a,
    -	0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x64, 0x75,
    -	0x63, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
    -	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74,
    -	0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x72,
    -	0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f,
    -	0x6e, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01,
    -	0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
    -	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64,
    -	0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12,
    -	0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x77,
    -	0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x64, 0x65,
    -	0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67,
    -	0x12, 0x41, 0x0a, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x6d, 0x6f,
    -	0x76, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
    -	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74,
    -	0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f,
    -	0x76, 0x65, 0x64, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06,
    -	0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44,
    -	0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45,
    -	0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d,
    -	0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a,
    -	0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09,
    -	0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f,
    -	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15,
    -	0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e,
    -	0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49,
    -	0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10,
    -	0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45,
    -	0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72,
    -	0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45,
    -	0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00,
    -	0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
    -	0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54,
    -	0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f,
    -	0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45,
    -	0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03,
    -	0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
    -	0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45,
    -	0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14,
    -	0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e,
    -	0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54,
    -	0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07,
    -	0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
    -	0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52,
    -	0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10,
    -	0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04,
    -	0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65,
    -	0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61,
    -	0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f,
    -	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65,
    -	0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72,
    -	0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65,
    -	0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28,
    -	0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
    -	0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65,
    -	0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72,
    -	0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8,
    -	0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d,
    -	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77,
    -	0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c,
    -	0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72,
    -	0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61,
    -	0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12,
    -	0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65,
    -	0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f,
    -	0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42,
    -	0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c,
    -	0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f,
    -	0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75,
    -	0x72, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
    -	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74,
    -	0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73,
    -	0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65,
    -	0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32,
    -	0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
    -	0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f,
    -	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
    -	0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10,
    -	0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xd8, 0x02, 0x0a, 0x10,
    -	0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
    -	0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01,
    -	0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70,
    -	0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75,
    -	0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
    -	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74,
    -	0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73,
    -	0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74,
    -	0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64,
    -	0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x55, 0x0a, 0x0f, 0x66, 0x65,
    -	0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20,
    -	0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
    -	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
    -	0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72,
    -	0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72,
    -	0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
    -	0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b,
    -	0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
    -	0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
    -	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
    -	0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07,
    -	0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69,
    -	0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61,
    -	0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f,
    -	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65,
    -	0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72,
    -	0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
    -	0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64,
    -	0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69,
    -	0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f,
    -	0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
    -	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74,
    -	0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13,
    -	0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74,
    -	0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x99,
    -	0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
    -	0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21,
    -	0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70,
    -	0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, 0x65, 0x6d, 0x70,
    -	0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x22, 0x20, 0x01,
    -	0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
    -	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
    -	0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65,
    -	0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59,
    -	0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f,
    -	0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65,
    -	0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67,
    -	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
    -	0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75,
    -	0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
    -	0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03,
    -	0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
    -	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
    -	0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65,
    -	0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a,
    -	0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65,
    -	0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59,
    -	0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f,
    -	0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12,
    -	0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a,
    -	0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55,
    -	0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69,
    -	0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
    -	0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
    -	0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
    -	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52,
    -	0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66,
    -	0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
    -	0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65,
    -	0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74,
    -	0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f,
    -	0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c,
    -	0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76,
    -	0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61,
    -	0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c,
    -	0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01,
    -	0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12,
    -	0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
    -	0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c,
    -	0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f,
    -	0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67,
    -	0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e,
    -	0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f,
    -	0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65,
    -	0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e,
    -	0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78,
    -	0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x0a, 0x0a, 0x0a, 0x46, 0x65, 0x61, 0x74,
    -	0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64,
    -	0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32,
    -	0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
    -	0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x46, 0x69, 0x65,
    -	0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x3f, 0x88, 0x01, 0x01, 0x98,
    -	0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43,
    -	0x49, 0x54, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43,
    -	0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43,
    -	0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0d, 0x66, 0x69, 0x65,
    -	0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x6c, 0x0a, 0x09, 0x65, 0x6e,
    -	0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e,
    -	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
    -	0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x54,
    -	0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01,
    -	0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x09, 0x12,
    -	0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x08,
    -	0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x17, 0x72, 0x65, 0x70,
    -	0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x6f,
    -	0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f,
    -	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61,
    -	0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64,
    -	0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x2d, 0x88,
    -	0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50,
    -	0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, 0x41, 0x43,
    -	0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x15, 0x72, 0x65,
    -	0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64,
    -	0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, 0x6c, 0x69,
    -	0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67,
    -	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
    -	0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, 0x56, 0x61,
    -	0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04,
    -	0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0x84, 0x07, 0xa2,
    -	0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03,
    -	0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
    -	0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65,
    -	0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e,
    -	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
    -	0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61,
    -	0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x26, 0x88, 0x01, 0x01, 0x98,
    -	0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48,
    -	0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0x84, 0x07, 0xb2, 0x01, 0x03, 0x08,
    -	0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64,
    -	0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72,
    -	0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
    -	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74,
    -	0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61,
    -	0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2,
    -	0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f,
    -	0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x41, 0x4c,
    -	0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0a, 0x6a, 0x73,
    -	0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c,
    -	0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45,
    -	0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e,
    -	0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49,
    -	0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10,
    -	0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, 0x51, 0x55,
    -	0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79,
    -	0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
    -	0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45,
    -	0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x02, 0x22,
    -	0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64,
    -	0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, 0x50, 0x45,
    -	0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44,
    -	0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a,
    -	0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50,
    -	0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x49, 0x0a, 0x0e, 0x55, 0x74, 0x66, 0x38, 0x56,
    -	0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x54, 0x46,
    -	0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b,
    -	0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59,
    -	0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x04, 0x08, 0x01,
    -	0x10, 0x01, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63,
    -	0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45,
    -	0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57,
    -	0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52,
    -	0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49,
    -	0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46,
    -	0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f,
    -	0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09,
    -	0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47,
    -	0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10,
    -	0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0x8b, 0x4e, 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90,
    -	0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8,
    -	0x07, 0x22, 0xef, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74,
    -	0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61,
    -	0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f,
    -	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61,
    -	0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e,
    -	0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f,
    -	0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c,
    -	0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64,
    -	0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f,
    -	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64,
    -	0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64,
    -	0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d,
    -	0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18,
    -	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
    -	0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75,
    -	0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xf8, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61,
    -	0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65,
    -	0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
    -	0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
    -	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
    -	0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x14, 0x6f, 0x76, 0x65,
    -	0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
    -	0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
    -	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72,
    -	0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c,
    -	0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x0e, 0x66, 0x69, 0x78,
    -	0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28,
    -	0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
    -	0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x0d,
    -	0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x4a, 0x04, 0x08,
    -	0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75,
    -	0x72, 0x65, 0x73, 0x22, 0xb5, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f,
    -	0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69,
    -	0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
    -	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63,
    -	0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69,
    -	0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a,
    -	0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74,
    -	0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74,
    -	0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42,
    -	0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61,
    -	0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20,
    -	0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d,
    -	0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67,
    -	0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
    -	0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74,
    -	0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74,
    -	0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06,
    -	0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74,
    -	0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2a, 0x0c, 0x08,
    -	0x80, 0xec, 0xca, 0xff, 0x01, 0x10, 0x81, 0xec, 0xca, 0xff, 0x01, 0x22, 0xd0, 0x02, 0x0a, 0x11,
    -	0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66,
    -	0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
    -	0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
    -	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65,
    -	0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
    -	0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
    -	0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
    -	0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10,
    -	0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63,
    -	0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f,
    -	0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69,
    -	0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10,
    -	0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64,
    -	0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01,
    -	0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
    -	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f,
    -	0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
    -	0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x61,
    -	0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63,
    -	0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45,
    -	0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x02, 0x2a, 0xa7,
    -	0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x44,
    -	0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12,
    -	0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, 0x45, 0x47, 0x41, 0x43,
    -	0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f,
    -	0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49,
    -	0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, 0xe7, 0x07, 0x12, 0x11,
    -	0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, 0x33, 0x10, 0xe8,
    -	0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32,
    -	0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f,
    -	0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, 0x17, 0x0a,
    -	0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f,
    -	0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f,
    -	0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c,
    -	0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e,
    -	0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59,
    -	0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f,
    -	0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10,
    -	0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d,
    -	0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e,
    -	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42,
    -	0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
    -	0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61,
    -	0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f,
    -	0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
    -	0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f,
    -	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65,
    -	0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
    -})
    +const file_google_protobuf_descriptor_proto_rawDesc = "" +
    +	"\n" +
    +	" google/protobuf/descriptor.proto\x12\x0fgoogle.protobuf\"[\n" +
    +	"\x11FileDescriptorSet\x128\n" +
    +	"\x04file\x18\x01 \x03(\v2$.google.protobuf.FileDescriptorProtoR\x04file*\f\b\x80\xec\xca\xff\x01\x10\x81\xec\xca\xff\x01\"\xc5\x05\n" +
    +	"\x13FileDescriptorProto\x12\x12\n" +
    +	"\x04name\x18\x01 \x01(\tR\x04name\x12\x18\n" +
    +	"\apackage\x18\x02 \x01(\tR\apackage\x12\x1e\n" +
    +	"\n" +
    +	"dependency\x18\x03 \x03(\tR\n" +
    +	"dependency\x12+\n" +
    +	"\x11public_dependency\x18\n" +
    +	" \x03(\x05R\x10publicDependency\x12'\n" +
    +	"\x0fweak_dependency\x18\v \x03(\x05R\x0eweakDependency\x12+\n" +
    +	"\x11option_dependency\x18\x0f \x03(\tR\x10optionDependency\x12C\n" +
    +	"\fmessage_type\x18\x04 \x03(\v2 .google.protobuf.DescriptorProtoR\vmessageType\x12A\n" +
    +	"\tenum_type\x18\x05 \x03(\v2$.google.protobuf.EnumDescriptorProtoR\benumType\x12A\n" +
    +	"\aservice\x18\x06 \x03(\v2'.google.protobuf.ServiceDescriptorProtoR\aservice\x12C\n" +
    +	"\textension\x18\a \x03(\v2%.google.protobuf.FieldDescriptorProtoR\textension\x126\n" +
    +	"\aoptions\x18\b \x01(\v2\x1c.google.protobuf.FileOptionsR\aoptions\x12I\n" +
    +	"\x10source_code_info\x18\t \x01(\v2\x1f.google.protobuf.SourceCodeInfoR\x0esourceCodeInfo\x12\x16\n" +
    +	"\x06syntax\x18\f \x01(\tR\x06syntax\x122\n" +
    +	"\aedition\x18\x0e \x01(\x0e2\x18.google.protobuf.EditionR\aedition\"\xfc\x06\n" +
    +	"\x0fDescriptorProto\x12\x12\n" +
    +	"\x04name\x18\x01 \x01(\tR\x04name\x12;\n" +
    +	"\x05field\x18\x02 \x03(\v2%.google.protobuf.FieldDescriptorProtoR\x05field\x12C\n" +
    +	"\textension\x18\x06 \x03(\v2%.google.protobuf.FieldDescriptorProtoR\textension\x12A\n" +
    +	"\vnested_type\x18\x03 \x03(\v2 .google.protobuf.DescriptorProtoR\n" +
    +	"nestedType\x12A\n" +
    +	"\tenum_type\x18\x04 \x03(\v2$.google.protobuf.EnumDescriptorProtoR\benumType\x12X\n" +
    +	"\x0fextension_range\x18\x05 \x03(\v2/.google.protobuf.DescriptorProto.ExtensionRangeR\x0eextensionRange\x12D\n" +
    +	"\n" +
    +	"oneof_decl\x18\b \x03(\v2%.google.protobuf.OneofDescriptorProtoR\toneofDecl\x129\n" +
    +	"\aoptions\x18\a \x01(\v2\x1f.google.protobuf.MessageOptionsR\aoptions\x12U\n" +
    +	"\x0ereserved_range\x18\t \x03(\v2..google.protobuf.DescriptorProto.ReservedRangeR\rreservedRange\x12#\n" +
    +	"\rreserved_name\x18\n" +
    +	" \x03(\tR\freservedName\x12A\n" +
    +	"\n" +
    +	"visibility\x18\v \x01(\x0e2!.google.protobuf.SymbolVisibilityR\n" +
    +	"visibility\x1az\n" +
    +	"\x0eExtensionRange\x12\x14\n" +
    +	"\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" +
    +	"\x03end\x18\x02 \x01(\x05R\x03end\x12@\n" +
    +	"\aoptions\x18\x03 \x01(\v2&.google.protobuf.ExtensionRangeOptionsR\aoptions\x1a7\n" +
    +	"\rReservedRange\x12\x14\n" +
    +	"\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" +
    +	"\x03end\x18\x02 \x01(\x05R\x03end\"\xcc\x04\n" +
    +	"\x15ExtensionRangeOptions\x12X\n" +
    +	"\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\x12Y\n" +
    +	"\vdeclaration\x18\x02 \x03(\v22.google.protobuf.ExtensionRangeOptions.DeclarationB\x03\x88\x01\x02R\vdeclaration\x127\n" +
    +	"\bfeatures\x182 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12m\n" +
    +	"\fverification\x18\x03 \x01(\x0e28.google.protobuf.ExtensionRangeOptions.VerificationState:\n" +
    +	"UNVERIFIEDB\x03\x88\x01\x02R\fverification\x1a\x94\x01\n" +
    +	"\vDeclaration\x12\x16\n" +
    +	"\x06number\x18\x01 \x01(\x05R\x06number\x12\x1b\n" +
    +	"\tfull_name\x18\x02 \x01(\tR\bfullName\x12\x12\n" +
    +	"\x04type\x18\x03 \x01(\tR\x04type\x12\x1a\n" +
    +	"\breserved\x18\x05 \x01(\bR\breserved\x12\x1a\n" +
    +	"\brepeated\x18\x06 \x01(\bR\brepeatedJ\x04\b\x04\x10\x05\"4\n" +
    +	"\x11VerificationState\x12\x0f\n" +
    +	"\vDECLARATION\x10\x00\x12\x0e\n" +
    +	"\n" +
    +	"UNVERIFIED\x10\x01*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xc1\x06\n" +
    +	"\x14FieldDescriptorProto\x12\x12\n" +
    +	"\x04name\x18\x01 \x01(\tR\x04name\x12\x16\n" +
    +	"\x06number\x18\x03 \x01(\x05R\x06number\x12A\n" +
    +	"\x05label\x18\x04 \x01(\x0e2+.google.protobuf.FieldDescriptorProto.LabelR\x05label\x12>\n" +
    +	"\x04type\x18\x05 \x01(\x0e2*.google.protobuf.FieldDescriptorProto.TypeR\x04type\x12\x1b\n" +
    +	"\ttype_name\x18\x06 \x01(\tR\btypeName\x12\x1a\n" +
    +	"\bextendee\x18\x02 \x01(\tR\bextendee\x12#\n" +
    +	"\rdefault_value\x18\a \x01(\tR\fdefaultValue\x12\x1f\n" +
    +	"\voneof_index\x18\t \x01(\x05R\n" +
    +	"oneofIndex\x12\x1b\n" +
    +	"\tjson_name\x18\n" +
    +	" \x01(\tR\bjsonName\x127\n" +
    +	"\aoptions\x18\b \x01(\v2\x1d.google.protobuf.FieldOptionsR\aoptions\x12'\n" +
    +	"\x0fproto3_optional\x18\x11 \x01(\bR\x0eproto3Optional\"\xb6\x02\n" +
    +	"\x04Type\x12\x0f\n" +
    +	"\vTYPE_DOUBLE\x10\x01\x12\x0e\n" +
    +	"\n" +
    +	"TYPE_FLOAT\x10\x02\x12\x0e\n" +
    +	"\n" +
    +	"TYPE_INT64\x10\x03\x12\x0f\n" +
    +	"\vTYPE_UINT64\x10\x04\x12\x0e\n" +
    +	"\n" +
    +	"TYPE_INT32\x10\x05\x12\x10\n" +
    +	"\fTYPE_FIXED64\x10\x06\x12\x10\n" +
    +	"\fTYPE_FIXED32\x10\a\x12\r\n" +
    +	"\tTYPE_BOOL\x10\b\x12\x0f\n" +
    +	"\vTYPE_STRING\x10\t\x12\x0e\n" +
    +	"\n" +
    +	"TYPE_GROUP\x10\n" +
    +	"\x12\x10\n" +
    +	"\fTYPE_MESSAGE\x10\v\x12\x0e\n" +
    +	"\n" +
    +	"TYPE_BYTES\x10\f\x12\x0f\n" +
    +	"\vTYPE_UINT32\x10\r\x12\r\n" +
    +	"\tTYPE_ENUM\x10\x0e\x12\x11\n" +
    +	"\rTYPE_SFIXED32\x10\x0f\x12\x11\n" +
    +	"\rTYPE_SFIXED64\x10\x10\x12\x0f\n" +
    +	"\vTYPE_SINT32\x10\x11\x12\x0f\n" +
    +	"\vTYPE_SINT64\x10\x12\"C\n" +
    +	"\x05Label\x12\x12\n" +
    +	"\x0eLABEL_OPTIONAL\x10\x01\x12\x12\n" +
    +	"\x0eLABEL_REPEATED\x10\x03\x12\x12\n" +
    +	"\x0eLABEL_REQUIRED\x10\x02\"c\n" +
    +	"\x14OneofDescriptorProto\x12\x12\n" +
    +	"\x04name\x18\x01 \x01(\tR\x04name\x127\n" +
    +	"\aoptions\x18\x02 \x01(\v2\x1d.google.protobuf.OneofOptionsR\aoptions\"\xa6\x03\n" +
    +	"\x13EnumDescriptorProto\x12\x12\n" +
    +	"\x04name\x18\x01 \x01(\tR\x04name\x12?\n" +
    +	"\x05value\x18\x02 \x03(\v2).google.protobuf.EnumValueDescriptorProtoR\x05value\x126\n" +
    +	"\aoptions\x18\x03 \x01(\v2\x1c.google.protobuf.EnumOptionsR\aoptions\x12]\n" +
    +	"\x0ereserved_range\x18\x04 \x03(\v26.google.protobuf.EnumDescriptorProto.EnumReservedRangeR\rreservedRange\x12#\n" +
    +	"\rreserved_name\x18\x05 \x03(\tR\freservedName\x12A\n" +
    +	"\n" +
    +	"visibility\x18\x06 \x01(\x0e2!.google.protobuf.SymbolVisibilityR\n" +
    +	"visibility\x1a;\n" +
    +	"\x11EnumReservedRange\x12\x14\n" +
    +	"\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" +
    +	"\x03end\x18\x02 \x01(\x05R\x03end\"\x83\x01\n" +
    +	"\x18EnumValueDescriptorProto\x12\x12\n" +
    +	"\x04name\x18\x01 \x01(\tR\x04name\x12\x16\n" +
    +	"\x06number\x18\x02 \x01(\x05R\x06number\x12;\n" +
    +	"\aoptions\x18\x03 \x01(\v2!.google.protobuf.EnumValueOptionsR\aoptions\"\xb5\x01\n" +
    +	"\x16ServiceDescriptorProto\x12\x12\n" +
    +	"\x04name\x18\x01 \x01(\tR\x04name\x12>\n" +
    +	"\x06method\x18\x02 \x03(\v2&.google.protobuf.MethodDescriptorProtoR\x06method\x129\n" +
    +	"\aoptions\x18\x03 \x01(\v2\x1f.google.protobuf.ServiceOptionsR\aoptionsJ\x04\b\x04\x10\x05R\x06stream\"\x89\x02\n" +
    +	"\x15MethodDescriptorProto\x12\x12\n" +
    +	"\x04name\x18\x01 \x01(\tR\x04name\x12\x1d\n" +
    +	"\n" +
    +	"input_type\x18\x02 \x01(\tR\tinputType\x12\x1f\n" +
    +	"\voutput_type\x18\x03 \x01(\tR\n" +
    +	"outputType\x128\n" +
    +	"\aoptions\x18\x04 \x01(\v2\x1e.google.protobuf.MethodOptionsR\aoptions\x120\n" +
    +	"\x10client_streaming\x18\x05 \x01(\b:\x05falseR\x0fclientStreaming\x120\n" +
    +	"\x10server_streaming\x18\x06 \x01(\b:\x05falseR\x0fserverStreaming\"\xad\t\n" +
    +	"\vFileOptions\x12!\n" +
    +	"\fjava_package\x18\x01 \x01(\tR\vjavaPackage\x120\n" +
    +	"\x14java_outer_classname\x18\b \x01(\tR\x12javaOuterClassname\x125\n" +
    +	"\x13java_multiple_files\x18\n" +
    +	" \x01(\b:\x05falseR\x11javaMultipleFiles\x12D\n" +
    +	"\x1djava_generate_equals_and_hash\x18\x14 \x01(\bB\x02\x18\x01R\x19javaGenerateEqualsAndHash\x12:\n" +
    +	"\x16java_string_check_utf8\x18\x1b \x01(\b:\x05falseR\x13javaStringCheckUtf8\x12S\n" +
    +	"\foptimize_for\x18\t \x01(\x0e2).google.protobuf.FileOptions.OptimizeMode:\x05SPEEDR\voptimizeFor\x12\x1d\n" +
    +	"\n" +
    +	"go_package\x18\v \x01(\tR\tgoPackage\x125\n" +
    +	"\x13cc_generic_services\x18\x10 \x01(\b:\x05falseR\x11ccGenericServices\x129\n" +
    +	"\x15java_generic_services\x18\x11 \x01(\b:\x05falseR\x13javaGenericServices\x125\n" +
    +	"\x13py_generic_services\x18\x12 \x01(\b:\x05falseR\x11pyGenericServices\x12%\n" +
    +	"\n" +
    +	"deprecated\x18\x17 \x01(\b:\x05falseR\n" +
    +	"deprecated\x12.\n" +
    +	"\x10cc_enable_arenas\x18\x1f \x01(\b:\x04trueR\x0eccEnableArenas\x12*\n" +
    +	"\x11objc_class_prefix\x18$ \x01(\tR\x0fobjcClassPrefix\x12)\n" +
    +	"\x10csharp_namespace\x18% \x01(\tR\x0fcsharpNamespace\x12!\n" +
    +	"\fswift_prefix\x18' \x01(\tR\vswiftPrefix\x12(\n" +
    +	"\x10php_class_prefix\x18( \x01(\tR\x0ephpClassPrefix\x12#\n" +
    +	"\rphp_namespace\x18) \x01(\tR\fphpNamespace\x124\n" +
    +	"\x16php_metadata_namespace\x18, \x01(\tR\x14phpMetadataNamespace\x12!\n" +
    +	"\fruby_package\x18- \x01(\tR\vrubyPackage\x127\n" +
    +	"\bfeatures\x182 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" +
    +	"\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\":\n" +
    +	"\fOptimizeMode\x12\t\n" +
    +	"\x05SPEED\x10\x01\x12\r\n" +
    +	"\tCODE_SIZE\x10\x02\x12\x10\n" +
    +	"\fLITE_RUNTIME\x10\x03*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b*\x10+J\x04\b&\x10'R\x14php_generic_services\"\xf4\x03\n" +
    +	"\x0eMessageOptions\x12<\n" +
    +	"\x17message_set_wire_format\x18\x01 \x01(\b:\x05falseR\x14messageSetWireFormat\x12L\n" +
    +	"\x1fno_standard_descriptor_accessor\x18\x02 \x01(\b:\x05falseR\x1cnoStandardDescriptorAccessor\x12%\n" +
    +	"\n" +
    +	"deprecated\x18\x03 \x01(\b:\x05falseR\n" +
    +	"deprecated\x12\x1b\n" +
    +	"\tmap_entry\x18\a \x01(\bR\bmapEntry\x12V\n" +
    +	"&deprecated_legacy_json_field_conflicts\x18\v \x01(\bB\x02\x18\x01R\"deprecatedLegacyJsonFieldConflicts\x127\n" +
    +	"\bfeatures\x18\f \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" +
    +	"\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x04\x10\x05J\x04\b\x05\x10\x06J\x04\b\x06\x10\aJ\x04\b\b\x10\tJ\x04\b\t\x10\n" +
    +	"\"\xa1\r\n" +
    +	"\fFieldOptions\x12A\n" +
    +	"\x05ctype\x18\x01 \x01(\x0e2#.google.protobuf.FieldOptions.CType:\x06STRINGR\x05ctype\x12\x16\n" +
    +	"\x06packed\x18\x02 \x01(\bR\x06packed\x12G\n" +
    +	"\x06jstype\x18\x06 \x01(\x0e2$.google.protobuf.FieldOptions.JSType:\tJS_NORMALR\x06jstype\x12\x19\n" +
    +	"\x04lazy\x18\x05 \x01(\b:\x05falseR\x04lazy\x12.\n" +
    +	"\x0funverified_lazy\x18\x0f \x01(\b:\x05falseR\x0eunverifiedLazy\x12%\n" +
    +	"\n" +
    +	"deprecated\x18\x03 \x01(\b:\x05falseR\n" +
    +	"deprecated\x12\x1d\n" +
    +	"\x04weak\x18\n" +
    +	" \x01(\b:\x05falseB\x02\x18\x01R\x04weak\x12(\n" +
    +	"\fdebug_redact\x18\x10 \x01(\b:\x05falseR\vdebugRedact\x12K\n" +
    +	"\tretention\x18\x11 \x01(\x0e2-.google.protobuf.FieldOptions.OptionRetentionR\tretention\x12H\n" +
    +	"\atargets\x18\x13 \x03(\x0e2..google.protobuf.FieldOptions.OptionTargetTypeR\atargets\x12W\n" +
    +	"\x10edition_defaults\x18\x14 \x03(\v2,.google.protobuf.FieldOptions.EditionDefaultR\x0feditionDefaults\x127\n" +
    +	"\bfeatures\x18\x15 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12U\n" +
    +	"\x0ffeature_support\x18\x16 \x01(\v2,.google.protobuf.FieldOptions.FeatureSupportR\x0efeatureSupport\x12X\n" +
    +	"\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\x1aZ\n" +
    +	"\x0eEditionDefault\x122\n" +
    +	"\aedition\x18\x03 \x01(\x0e2\x18.google.protobuf.EditionR\aedition\x12\x14\n" +
    +	"\x05value\x18\x02 \x01(\tR\x05value\x1a\x96\x02\n" +
    +	"\x0eFeatureSupport\x12G\n" +
    +	"\x12edition_introduced\x18\x01 \x01(\x0e2\x18.google.protobuf.EditionR\x11editionIntroduced\x12G\n" +
    +	"\x12edition_deprecated\x18\x02 \x01(\x0e2\x18.google.protobuf.EditionR\x11editionDeprecated\x12/\n" +
    +	"\x13deprecation_warning\x18\x03 \x01(\tR\x12deprecationWarning\x12A\n" +
    +	"\x0fedition_removed\x18\x04 \x01(\x0e2\x18.google.protobuf.EditionR\x0eeditionRemoved\"/\n" +
    +	"\x05CType\x12\n" +
    +	"\n" +
    +	"\x06STRING\x10\x00\x12\b\n" +
    +	"\x04CORD\x10\x01\x12\x10\n" +
    +	"\fSTRING_PIECE\x10\x02\"5\n" +
    +	"\x06JSType\x12\r\n" +
    +	"\tJS_NORMAL\x10\x00\x12\r\n" +
    +	"\tJS_STRING\x10\x01\x12\r\n" +
    +	"\tJS_NUMBER\x10\x02\"U\n" +
    +	"\x0fOptionRetention\x12\x15\n" +
    +	"\x11RETENTION_UNKNOWN\x10\x00\x12\x15\n" +
    +	"\x11RETENTION_RUNTIME\x10\x01\x12\x14\n" +
    +	"\x10RETENTION_SOURCE\x10\x02\"\x8c\x02\n" +
    +	"\x10OptionTargetType\x12\x17\n" +
    +	"\x13TARGET_TYPE_UNKNOWN\x10\x00\x12\x14\n" +
    +	"\x10TARGET_TYPE_FILE\x10\x01\x12\x1f\n" +
    +	"\x1bTARGET_TYPE_EXTENSION_RANGE\x10\x02\x12\x17\n" +
    +	"\x13TARGET_TYPE_MESSAGE\x10\x03\x12\x15\n" +
    +	"\x11TARGET_TYPE_FIELD\x10\x04\x12\x15\n" +
    +	"\x11TARGET_TYPE_ONEOF\x10\x05\x12\x14\n" +
    +	"\x10TARGET_TYPE_ENUM\x10\x06\x12\x1a\n" +
    +	"\x16TARGET_TYPE_ENUM_ENTRY\x10\a\x12\x17\n" +
    +	"\x13TARGET_TYPE_SERVICE\x10\b\x12\x16\n" +
    +	"\x12TARGET_TYPE_METHOD\x10\t*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x04\x10\x05J\x04\b\x12\x10\x13\"\xac\x01\n" +
    +	"\fOneofOptions\x127\n" +
    +	"\bfeatures\x18\x01 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" +
    +	"\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xd1\x02\n" +
    +	"\vEnumOptions\x12\x1f\n" +
    +	"\vallow_alias\x18\x02 \x01(\bR\n" +
    +	"allowAlias\x12%\n" +
    +	"\n" +
    +	"deprecated\x18\x03 \x01(\b:\x05falseR\n" +
    +	"deprecated\x12V\n" +
    +	"&deprecated_legacy_json_field_conflicts\x18\x06 \x01(\bB\x02\x18\x01R\"deprecatedLegacyJsonFieldConflicts\x127\n" +
    +	"\bfeatures\x18\a \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" +
    +	"\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x05\x10\x06\"\xd8\x02\n" +
    +	"\x10EnumValueOptions\x12%\n" +
    +	"\n" +
    +	"deprecated\x18\x01 \x01(\b:\x05falseR\n" +
    +	"deprecated\x127\n" +
    +	"\bfeatures\x18\x02 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12(\n" +
    +	"\fdebug_redact\x18\x03 \x01(\b:\x05falseR\vdebugRedact\x12U\n" +
    +	"\x0ffeature_support\x18\x04 \x01(\v2,.google.protobuf.FieldOptions.FeatureSupportR\x0efeatureSupport\x12X\n" +
    +	"\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xd5\x01\n" +
    +	"\x0eServiceOptions\x127\n" +
    +	"\bfeatures\x18\" \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12%\n" +
    +	"\n" +
    +	"deprecated\x18! \x01(\b:\x05falseR\n" +
    +	"deprecated\x12X\n" +
    +	"\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\x99\x03\n" +
    +	"\rMethodOptions\x12%\n" +
    +	"\n" +
    +	"deprecated\x18! \x01(\b:\x05falseR\n" +
    +	"deprecated\x12q\n" +
    +	"\x11idempotency_level\x18\" \x01(\x0e2/.google.protobuf.MethodOptions.IdempotencyLevel:\x13IDEMPOTENCY_UNKNOWNR\x10idempotencyLevel\x127\n" +
    +	"\bfeatures\x18# \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" +
    +	"\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\"P\n" +
    +	"\x10IdempotencyLevel\x12\x17\n" +
    +	"\x13IDEMPOTENCY_UNKNOWN\x10\x00\x12\x13\n" +
    +	"\x0fNO_SIDE_EFFECTS\x10\x01\x12\x0e\n" +
    +	"\n" +
    +	"IDEMPOTENT\x10\x02*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\x9a\x03\n" +
    +	"\x13UninterpretedOption\x12A\n" +
    +	"\x04name\x18\x02 \x03(\v2-.google.protobuf.UninterpretedOption.NamePartR\x04name\x12)\n" +
    +	"\x10identifier_value\x18\x03 \x01(\tR\x0fidentifierValue\x12,\n" +
    +	"\x12positive_int_value\x18\x04 \x01(\x04R\x10positiveIntValue\x12,\n" +
    +	"\x12negative_int_value\x18\x05 \x01(\x03R\x10negativeIntValue\x12!\n" +
    +	"\fdouble_value\x18\x06 \x01(\x01R\vdoubleValue\x12!\n" +
    +	"\fstring_value\x18\a \x01(\fR\vstringValue\x12'\n" +
    +	"\x0faggregate_value\x18\b \x01(\tR\x0eaggregateValue\x1aJ\n" +
    +	"\bNamePart\x12\x1b\n" +
    +	"\tname_part\x18\x01 \x02(\tR\bnamePart\x12!\n" +
    +	"\fis_extension\x18\x02 \x02(\bR\visExtension\"\x8e\x0f\n" +
    +	"\n" +
    +	"FeatureSet\x12\x91\x01\n" +
    +	"\x0efield_presence\x18\x01 \x01(\x0e2).google.protobuf.FeatureSet.FieldPresenceB?\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\r\x12\bEXPLICIT\x18\x84\a\xa2\x01\r\x12\bIMPLICIT\x18\xe7\a\xa2\x01\r\x12\bEXPLICIT\x18\xe8\a\xb2\x01\x03\b\xe8\aR\rfieldPresence\x12l\n" +
    +	"\tenum_type\x18\x02 \x01(\x0e2$.google.protobuf.FeatureSet.EnumTypeB)\x88\x01\x01\x98\x01\x06\x98\x01\x01\xa2\x01\v\x12\x06CLOSED\x18\x84\a\xa2\x01\t\x12\x04OPEN\x18\xe7\a\xb2\x01\x03\b\xe8\aR\benumType\x12\x98\x01\n" +
    +	"\x17repeated_field_encoding\x18\x03 \x01(\x0e21.google.protobuf.FeatureSet.RepeatedFieldEncodingB-\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\r\x12\bEXPANDED\x18\x84\a\xa2\x01\v\x12\x06PACKED\x18\xe7\a\xb2\x01\x03\b\xe8\aR\x15repeatedFieldEncoding\x12~\n" +
    +	"\x0futf8_validation\x18\x04 \x01(\x0e2*.google.protobuf.FeatureSet.Utf8ValidationB)\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\t\x12\x04NONE\x18\x84\a\xa2\x01\v\x12\x06VERIFY\x18\xe7\a\xb2\x01\x03\b\xe8\aR\x0eutf8Validation\x12~\n" +
    +	"\x10message_encoding\x18\x05 \x01(\x0e2+.google.protobuf.FeatureSet.MessageEncodingB&\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\x14\x12\x0fLENGTH_PREFIXED\x18\x84\a\xb2\x01\x03\b\xe8\aR\x0fmessageEncoding\x12\x82\x01\n" +
    +	"\vjson_format\x18\x06 \x01(\x0e2&.google.protobuf.FeatureSet.JsonFormatB9\x88\x01\x01\x98\x01\x03\x98\x01\x06\x98\x01\x01\xa2\x01\x17\x12\x12LEGACY_BEST_EFFORT\x18\x84\a\xa2\x01\n" +
    +	"\x12\x05ALLOW\x18\xe7\a\xb2\x01\x03\b\xe8\aR\n" +
    +	"jsonFormat\x12\xab\x01\n" +
    +	"\x14enforce_naming_style\x18\a \x01(\x0e2..google.protobuf.FeatureSet.EnforceNamingStyleBI\x88\x01\x02\x98\x01\x01\x98\x01\x02\x98\x01\x03\x98\x01\x04\x98\x01\x05\x98\x01\x06\x98\x01\a\x98\x01\b\x98\x01\t\xa2\x01\x11\x12\fSTYLE_LEGACY\x18\x84\a\xa2\x01\x0e\x12\tSTYLE2024\x18\xe9\a\xb2\x01\x03\b\xe9\aR\x12enforceNamingStyle\x12\xb9\x01\n" +
    +	"\x19default_symbol_visibility\x18\b \x01(\x0e2E.google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibilityB6\x88\x01\x02\x98\x01\x01\xa2\x01\x0f\x12\n" +
    +	"EXPORT_ALL\x18\x84\a\xa2\x01\x15\x12\x10EXPORT_TOP_LEVEL\x18\xe9\a\xb2\x01\x03\b\xe9\aR\x17defaultSymbolVisibility\x1a\xa1\x01\n" +
    +	"\x11VisibilityFeature\"\x81\x01\n" +
    +	"\x17DefaultSymbolVisibility\x12%\n" +
    +	"!DEFAULT_SYMBOL_VISIBILITY_UNKNOWN\x10\x00\x12\x0e\n" +
    +	"\n" +
    +	"EXPORT_ALL\x10\x01\x12\x14\n" +
    +	"\x10EXPORT_TOP_LEVEL\x10\x02\x12\r\n" +
    +	"\tLOCAL_ALL\x10\x03\x12\n" +
    +	"\n" +
    +	"\x06STRICT\x10\x04J\b\b\x01\x10\x80\x80\x80\x80\x02\"\\\n" +
    +	"\rFieldPresence\x12\x1a\n" +
    +	"\x16FIELD_PRESENCE_UNKNOWN\x10\x00\x12\f\n" +
    +	"\bEXPLICIT\x10\x01\x12\f\n" +
    +	"\bIMPLICIT\x10\x02\x12\x13\n" +
    +	"\x0fLEGACY_REQUIRED\x10\x03\"7\n" +
    +	"\bEnumType\x12\x15\n" +
    +	"\x11ENUM_TYPE_UNKNOWN\x10\x00\x12\b\n" +
    +	"\x04OPEN\x10\x01\x12\n" +
    +	"\n" +
    +	"\x06CLOSED\x10\x02\"V\n" +
    +	"\x15RepeatedFieldEncoding\x12#\n" +
    +	"\x1fREPEATED_FIELD_ENCODING_UNKNOWN\x10\x00\x12\n" +
    +	"\n" +
    +	"\x06PACKED\x10\x01\x12\f\n" +
    +	"\bEXPANDED\x10\x02\"I\n" +
    +	"\x0eUtf8Validation\x12\x1b\n" +
    +	"\x17UTF8_VALIDATION_UNKNOWN\x10\x00\x12\n" +
    +	"\n" +
    +	"\x06VERIFY\x10\x02\x12\b\n" +
    +	"\x04NONE\x10\x03\"\x04\b\x01\x10\x01\"S\n" +
    +	"\x0fMessageEncoding\x12\x1c\n" +
    +	"\x18MESSAGE_ENCODING_UNKNOWN\x10\x00\x12\x13\n" +
    +	"\x0fLENGTH_PREFIXED\x10\x01\x12\r\n" +
    +	"\tDELIMITED\x10\x02\"H\n" +
    +	"\n" +
    +	"JsonFormat\x12\x17\n" +
    +	"\x13JSON_FORMAT_UNKNOWN\x10\x00\x12\t\n" +
    +	"\x05ALLOW\x10\x01\x12\x16\n" +
    +	"\x12LEGACY_BEST_EFFORT\x10\x02\"W\n" +
    +	"\x12EnforceNamingStyle\x12 \n" +
    +	"\x1cENFORCE_NAMING_STYLE_UNKNOWN\x10\x00\x12\r\n" +
    +	"\tSTYLE2024\x10\x01\x12\x10\n" +
    +	"\fSTYLE_LEGACY\x10\x02*\x06\b\xe8\a\x10\x8bN*\x06\b\x8bN\x10\x90N*\x06\b\x90N\x10\x91NJ\x06\b\xe7\a\x10\xe8\a\"\xef\x03\n" +
    +	"\x12FeatureSetDefaults\x12X\n" +
    +	"\bdefaults\x18\x01 \x03(\v2<.google.protobuf.FeatureSetDefaults.FeatureSetEditionDefaultR\bdefaults\x12A\n" +
    +	"\x0fminimum_edition\x18\x04 \x01(\x0e2\x18.google.protobuf.EditionR\x0eminimumEdition\x12A\n" +
    +	"\x0fmaximum_edition\x18\x05 \x01(\x0e2\x18.google.protobuf.EditionR\x0emaximumEdition\x1a\xf8\x01\n" +
    +	"\x18FeatureSetEditionDefault\x122\n" +
    +	"\aedition\x18\x03 \x01(\x0e2\x18.google.protobuf.EditionR\aedition\x12N\n" +
    +	"\x14overridable_features\x18\x04 \x01(\v2\x1b.google.protobuf.FeatureSetR\x13overridableFeatures\x12B\n" +
    +	"\x0efixed_features\x18\x05 \x01(\v2\x1b.google.protobuf.FeatureSetR\rfixedFeaturesJ\x04\b\x01\x10\x02J\x04\b\x02\x10\x03R\bfeatures\"\xb5\x02\n" +
    +	"\x0eSourceCodeInfo\x12D\n" +
    +	"\blocation\x18\x01 \x03(\v2(.google.protobuf.SourceCodeInfo.LocationR\blocation\x1a\xce\x01\n" +
    +	"\bLocation\x12\x16\n" +
    +	"\x04path\x18\x01 \x03(\x05B\x02\x10\x01R\x04path\x12\x16\n" +
    +	"\x04span\x18\x02 \x03(\x05B\x02\x10\x01R\x04span\x12)\n" +
    +	"\x10leading_comments\x18\x03 \x01(\tR\x0fleadingComments\x12+\n" +
    +	"\x11trailing_comments\x18\x04 \x01(\tR\x10trailingComments\x12:\n" +
    +	"\x19leading_detached_comments\x18\x06 \x03(\tR\x17leadingDetachedComments*\f\b\x80\xec\xca\xff\x01\x10\x81\xec\xca\xff\x01\"\xd0\x02\n" +
    +	"\x11GeneratedCodeInfo\x12M\n" +
    +	"\n" +
    +	"annotation\x18\x01 \x03(\v2-.google.protobuf.GeneratedCodeInfo.AnnotationR\n" +
    +	"annotation\x1a\xeb\x01\n" +
    +	"\n" +
    +	"Annotation\x12\x16\n" +
    +	"\x04path\x18\x01 \x03(\x05B\x02\x10\x01R\x04path\x12\x1f\n" +
    +	"\vsource_file\x18\x02 \x01(\tR\n" +
    +	"sourceFile\x12\x14\n" +
    +	"\x05begin\x18\x03 \x01(\x05R\x05begin\x12\x10\n" +
    +	"\x03end\x18\x04 \x01(\x05R\x03end\x12R\n" +
    +	"\bsemantic\x18\x05 \x01(\x0e26.google.protobuf.GeneratedCodeInfo.Annotation.SemanticR\bsemantic\"(\n" +
    +	"\bSemantic\x12\b\n" +
    +	"\x04NONE\x10\x00\x12\a\n" +
    +	"\x03SET\x10\x01\x12\t\n" +
    +	"\x05ALIAS\x10\x02*\xbe\x02\n" +
    +	"\aEdition\x12\x13\n" +
    +	"\x0fEDITION_UNKNOWN\x10\x00\x12\x13\n" +
    +	"\x0eEDITION_LEGACY\x10\x84\a\x12\x13\n" +
    +	"\x0eEDITION_PROTO2\x10\xe6\a\x12\x13\n" +
    +	"\x0eEDITION_PROTO3\x10\xe7\a\x12\x11\n" +
    +	"\fEDITION_2023\x10\xe8\a\x12\x11\n" +
    +	"\fEDITION_2024\x10\xe9\a\x12\x15\n" +
    +	"\x10EDITION_UNSTABLE\x10\x8fN\x12\x17\n" +
    +	"\x13EDITION_1_TEST_ONLY\x10\x01\x12\x17\n" +
    +	"\x13EDITION_2_TEST_ONLY\x10\x02\x12\x1d\n" +
    +	"\x17EDITION_99997_TEST_ONLY\x10\x9d\x8d\x06\x12\x1d\n" +
    +	"\x17EDITION_99998_TEST_ONLY\x10\x9e\x8d\x06\x12\x1d\n" +
    +	"\x17EDITION_99999_TEST_ONLY\x10\x9f\x8d\x06\x12\x13\n" +
    +	"\vEDITION_MAX\x10\xff\xff\xff\xff\a*U\n" +
    +	"\x10SymbolVisibility\x12\x14\n" +
    +	"\x10VISIBILITY_UNSET\x10\x00\x12\x14\n" +
    +	"\x10VISIBILITY_LOCAL\x10\x01\x12\x15\n" +
    +	"\x11VISIBILITY_EXPORT\x10\x02B~\n" +
    +	"\x13com.google.protobufB\x10DescriptorProtosH\x01Z-google.golang.org/protobuf/types/descriptorpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1aGoogle.Protobuf.Reflection"
     
     var (
     	file_google_protobuf_descriptor_proto_rawDescOnce sync.Once
    @@ -5145,143 +5070,151 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte {
     	return file_google_protobuf_descriptor_proto_rawDescData
     }
     
    -var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 17)
    -var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 33)
    +var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 20)
    +var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 34)
     var file_google_protobuf_descriptor_proto_goTypes = []any{
    -	(Edition)(0), // 0: google.protobuf.Edition
    -	(ExtensionRangeOptions_VerificationState)(0),        // 1: google.protobuf.ExtensionRangeOptions.VerificationState
    -	(FieldDescriptorProto_Type)(0),                      // 2: google.protobuf.FieldDescriptorProto.Type
    -	(FieldDescriptorProto_Label)(0),                     // 3: google.protobuf.FieldDescriptorProto.Label
    -	(FileOptions_OptimizeMode)(0),                       // 4: google.protobuf.FileOptions.OptimizeMode
    -	(FieldOptions_CType)(0),                             // 5: google.protobuf.FieldOptions.CType
    -	(FieldOptions_JSType)(0),                            // 6: google.protobuf.FieldOptions.JSType
    -	(FieldOptions_OptionRetention)(0),                   // 7: google.protobuf.FieldOptions.OptionRetention
    -	(FieldOptions_OptionTargetType)(0),                  // 8: google.protobuf.FieldOptions.OptionTargetType
    -	(MethodOptions_IdempotencyLevel)(0),                 // 9: google.protobuf.MethodOptions.IdempotencyLevel
    -	(FeatureSet_FieldPresence)(0),                       // 10: google.protobuf.FeatureSet.FieldPresence
    -	(FeatureSet_EnumType)(0),                            // 11: google.protobuf.FeatureSet.EnumType
    -	(FeatureSet_RepeatedFieldEncoding)(0),               // 12: google.protobuf.FeatureSet.RepeatedFieldEncoding
    -	(FeatureSet_Utf8Validation)(0),                      // 13: google.protobuf.FeatureSet.Utf8Validation
    -	(FeatureSet_MessageEncoding)(0),                     // 14: google.protobuf.FeatureSet.MessageEncoding
    -	(FeatureSet_JsonFormat)(0),                          // 15: google.protobuf.FeatureSet.JsonFormat
    -	(GeneratedCodeInfo_Annotation_Semantic)(0),          // 16: google.protobuf.GeneratedCodeInfo.Annotation.Semantic
    -	(*FileDescriptorSet)(nil),                           // 17: google.protobuf.FileDescriptorSet
    -	(*FileDescriptorProto)(nil),                         // 18: google.protobuf.FileDescriptorProto
    -	(*DescriptorProto)(nil),                             // 19: google.protobuf.DescriptorProto
    -	(*ExtensionRangeOptions)(nil),                       // 20: google.protobuf.ExtensionRangeOptions
    -	(*FieldDescriptorProto)(nil),                        // 21: google.protobuf.FieldDescriptorProto
    -	(*OneofDescriptorProto)(nil),                        // 22: google.protobuf.OneofDescriptorProto
    -	(*EnumDescriptorProto)(nil),                         // 23: google.protobuf.EnumDescriptorProto
    -	(*EnumValueDescriptorProto)(nil),                    // 24: google.protobuf.EnumValueDescriptorProto
    -	(*ServiceDescriptorProto)(nil),                      // 25: google.protobuf.ServiceDescriptorProto
    -	(*MethodDescriptorProto)(nil),                       // 26: google.protobuf.MethodDescriptorProto
    -	(*FileOptions)(nil),                                 // 27: google.protobuf.FileOptions
    -	(*MessageOptions)(nil),                              // 28: google.protobuf.MessageOptions
    -	(*FieldOptions)(nil),                                // 29: google.protobuf.FieldOptions
    -	(*OneofOptions)(nil),                                // 30: google.protobuf.OneofOptions
    -	(*EnumOptions)(nil),                                 // 31: google.protobuf.EnumOptions
    -	(*EnumValueOptions)(nil),                            // 32: google.protobuf.EnumValueOptions
    -	(*ServiceOptions)(nil),                              // 33: google.protobuf.ServiceOptions
    -	(*MethodOptions)(nil),                               // 34: google.protobuf.MethodOptions
    -	(*UninterpretedOption)(nil),                         // 35: google.protobuf.UninterpretedOption
    -	(*FeatureSet)(nil),                                  // 36: google.protobuf.FeatureSet
    -	(*FeatureSetDefaults)(nil),                          // 37: google.protobuf.FeatureSetDefaults
    -	(*SourceCodeInfo)(nil),                              // 38: google.protobuf.SourceCodeInfo
    -	(*GeneratedCodeInfo)(nil),                           // 39: google.protobuf.GeneratedCodeInfo
    -	(*DescriptorProto_ExtensionRange)(nil),              // 40: google.protobuf.DescriptorProto.ExtensionRange
    -	(*DescriptorProto_ReservedRange)(nil),               // 41: google.protobuf.DescriptorProto.ReservedRange
    -	(*ExtensionRangeOptions_Declaration)(nil),           // 42: google.protobuf.ExtensionRangeOptions.Declaration
    -	(*EnumDescriptorProto_EnumReservedRange)(nil),       // 43: google.protobuf.EnumDescriptorProto.EnumReservedRange
    -	(*FieldOptions_EditionDefault)(nil),                 // 44: google.protobuf.FieldOptions.EditionDefault
    -	(*FieldOptions_FeatureSupport)(nil),                 // 45: google.protobuf.FieldOptions.FeatureSupport
    -	(*UninterpretedOption_NamePart)(nil),                // 46: google.protobuf.UninterpretedOption.NamePart
    -	(*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 47: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
    -	(*SourceCodeInfo_Location)(nil),                     // 48: google.protobuf.SourceCodeInfo.Location
    -	(*GeneratedCodeInfo_Annotation)(nil),                // 49: google.protobuf.GeneratedCodeInfo.Annotation
    +	(Edition)(0),          // 0: google.protobuf.Edition
    +	(SymbolVisibility)(0), // 1: google.protobuf.SymbolVisibility
    +	(ExtensionRangeOptions_VerificationState)(0),              // 2: google.protobuf.ExtensionRangeOptions.VerificationState
    +	(FieldDescriptorProto_Type)(0),                            // 3: google.protobuf.FieldDescriptorProto.Type
    +	(FieldDescriptorProto_Label)(0),                           // 4: google.protobuf.FieldDescriptorProto.Label
    +	(FileOptions_OptimizeMode)(0),                             // 5: google.protobuf.FileOptions.OptimizeMode
    +	(FieldOptions_CType)(0),                                   // 6: google.protobuf.FieldOptions.CType
    +	(FieldOptions_JSType)(0),                                  // 7: google.protobuf.FieldOptions.JSType
    +	(FieldOptions_OptionRetention)(0),                         // 8: google.protobuf.FieldOptions.OptionRetention
    +	(FieldOptions_OptionTargetType)(0),                        // 9: google.protobuf.FieldOptions.OptionTargetType
    +	(MethodOptions_IdempotencyLevel)(0),                       // 10: google.protobuf.MethodOptions.IdempotencyLevel
    +	(FeatureSet_FieldPresence)(0),                             // 11: google.protobuf.FeatureSet.FieldPresence
    +	(FeatureSet_EnumType)(0),                                  // 12: google.protobuf.FeatureSet.EnumType
    +	(FeatureSet_RepeatedFieldEncoding)(0),                     // 13: google.protobuf.FeatureSet.RepeatedFieldEncoding
    +	(FeatureSet_Utf8Validation)(0),                            // 14: google.protobuf.FeatureSet.Utf8Validation
    +	(FeatureSet_MessageEncoding)(0),                           // 15: google.protobuf.FeatureSet.MessageEncoding
    +	(FeatureSet_JsonFormat)(0),                                // 16: google.protobuf.FeatureSet.JsonFormat
    +	(FeatureSet_EnforceNamingStyle)(0),                        // 17: google.protobuf.FeatureSet.EnforceNamingStyle
    +	(FeatureSet_VisibilityFeature_DefaultSymbolVisibility)(0), // 18: google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility
    +	(GeneratedCodeInfo_Annotation_Semantic)(0),                // 19: google.protobuf.GeneratedCodeInfo.Annotation.Semantic
    +	(*FileDescriptorSet)(nil),                                 // 20: google.protobuf.FileDescriptorSet
    +	(*FileDescriptorProto)(nil),                               // 21: google.protobuf.FileDescriptorProto
    +	(*DescriptorProto)(nil),                                   // 22: google.protobuf.DescriptorProto
    +	(*ExtensionRangeOptions)(nil),                             // 23: google.protobuf.ExtensionRangeOptions
    +	(*FieldDescriptorProto)(nil),                              // 24: google.protobuf.FieldDescriptorProto
    +	(*OneofDescriptorProto)(nil),                              // 25: google.protobuf.OneofDescriptorProto
    +	(*EnumDescriptorProto)(nil),                               // 26: google.protobuf.EnumDescriptorProto
    +	(*EnumValueDescriptorProto)(nil),                          // 27: google.protobuf.EnumValueDescriptorProto
    +	(*ServiceDescriptorProto)(nil),                            // 28: google.protobuf.ServiceDescriptorProto
    +	(*MethodDescriptorProto)(nil),                             // 29: google.protobuf.MethodDescriptorProto
    +	(*FileOptions)(nil),                                       // 30: google.protobuf.FileOptions
    +	(*MessageOptions)(nil),                                    // 31: google.protobuf.MessageOptions
    +	(*FieldOptions)(nil),                                      // 32: google.protobuf.FieldOptions
    +	(*OneofOptions)(nil),                                      // 33: google.protobuf.OneofOptions
    +	(*EnumOptions)(nil),                                       // 34: google.protobuf.EnumOptions
    +	(*EnumValueOptions)(nil),                                  // 35: google.protobuf.EnumValueOptions
    +	(*ServiceOptions)(nil),                                    // 36: google.protobuf.ServiceOptions
    +	(*MethodOptions)(nil),                                     // 37: google.protobuf.MethodOptions
    +	(*UninterpretedOption)(nil),                               // 38: google.protobuf.UninterpretedOption
    +	(*FeatureSet)(nil),                                        // 39: google.protobuf.FeatureSet
    +	(*FeatureSetDefaults)(nil),                                // 40: google.protobuf.FeatureSetDefaults
    +	(*SourceCodeInfo)(nil),                                    // 41: google.protobuf.SourceCodeInfo
    +	(*GeneratedCodeInfo)(nil),                                 // 42: google.protobuf.GeneratedCodeInfo
    +	(*DescriptorProto_ExtensionRange)(nil),                    // 43: google.protobuf.DescriptorProto.ExtensionRange
    +	(*DescriptorProto_ReservedRange)(nil),                     // 44: google.protobuf.DescriptorProto.ReservedRange
    +	(*ExtensionRangeOptions_Declaration)(nil),                 // 45: google.protobuf.ExtensionRangeOptions.Declaration
    +	(*EnumDescriptorProto_EnumReservedRange)(nil),             // 46: google.protobuf.EnumDescriptorProto.EnumReservedRange
    +	(*FieldOptions_EditionDefault)(nil),                       // 47: google.protobuf.FieldOptions.EditionDefault
    +	(*FieldOptions_FeatureSupport)(nil),                       // 48: google.protobuf.FieldOptions.FeatureSupport
    +	(*UninterpretedOption_NamePart)(nil),                      // 49: google.protobuf.UninterpretedOption.NamePart
    +	(*FeatureSet_VisibilityFeature)(nil),                      // 50: google.protobuf.FeatureSet.VisibilityFeature
    +	(*FeatureSetDefaults_FeatureSetEditionDefault)(nil),       // 51: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
    +	(*SourceCodeInfo_Location)(nil),                           // 52: google.protobuf.SourceCodeInfo.Location
    +	(*GeneratedCodeInfo_Annotation)(nil),                      // 53: google.protobuf.GeneratedCodeInfo.Annotation
     }
     var file_google_protobuf_descriptor_proto_depIdxs = []int32{
    -	18, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto
    -	19, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto
    -	23, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
    -	25, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto
    -	21, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
    -	27, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions
    -	38, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo
    +	21, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto
    +	22, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto
    +	26, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
    +	28, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto
    +	24, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
    +	30, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions
    +	41, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo
     	0,  // 7: google.protobuf.FileDescriptorProto.edition:type_name -> google.protobuf.Edition
    -	21, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto
    -	21, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
    -	19, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto
    -	23, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
    -	40, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange
    -	22, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto
    -	28, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions
    -	41, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange
    -	35, // 16: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
    -	42, // 17: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration
    -	36, // 18: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet
    -	1,  // 19: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState
    -	3,  // 20: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label
    -	2,  // 21: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type
    -	29, // 22: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions
    -	30, // 23: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions
    -	24, // 24: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto
    -	31, // 25: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions
    -	43, // 26: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange
    -	32, // 27: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions
    -	26, // 28: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto
    -	33, // 29: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions
    -	34, // 30: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions
    -	4,  // 31: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode
    -	36, // 32: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet
    -	35, // 33: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
    -	36, // 34: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet
    -	35, // 35: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
    -	5,  // 36: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType
    -	6,  // 37: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType
    -	7,  // 38: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention
    -	8,  // 39: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType
    -	44, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault
    -	36, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet
    -	45, // 42: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport
    -	35, // 43: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
    -	36, // 44: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet
    -	35, // 45: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
    -	36, // 46: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet
    -	35, // 47: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
    -	36, // 48: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet
    -	45, // 49: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport
    -	35, // 50: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
    -	36, // 51: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet
    -	35, // 52: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
    -	9,  // 53: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel
    -	36, // 54: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet
    -	35, // 55: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
    -	46, // 56: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart
    -	10, // 57: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence
    -	11, // 58: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType
    -	12, // 59: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding
    -	13, // 60: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation
    -	14, // 61: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding
    -	15, // 62: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat
    -	47, // 63: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
    -	0,  // 64: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition
    -	0,  // 65: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition
    -	48, // 66: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location
    -	49, // 67: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation
    -	20, // 68: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions
    -	0,  // 69: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition
    -	0,  // 70: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition
    -	0,  // 71: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition
    -	0,  // 72: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition
    -	0,  // 73: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition
    -	36, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet
    -	36, // 75: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet
    -	16, // 76: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic
    -	77, // [77:77] is the sub-list for method output_type
    -	77, // [77:77] is the sub-list for method input_type
    -	77, // [77:77] is the sub-list for extension type_name
    -	77, // [77:77] is the sub-list for extension extendee
    -	0,  // [0:77] is the sub-list for field type_name
    +	24, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto
    +	24, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
    +	22, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto
    +	26, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
    +	43, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange
    +	25, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto
    +	31, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions
    +	44, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange
    +	1,  // 16: google.protobuf.DescriptorProto.visibility:type_name -> google.protobuf.SymbolVisibility
    +	38, // 17: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
    +	45, // 18: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration
    +	39, // 19: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet
    +	2,  // 20: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState
    +	4,  // 21: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label
    +	3,  // 22: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type
    +	32, // 23: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions
    +	33, // 24: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions
    +	27, // 25: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto
    +	34, // 26: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions
    +	46, // 27: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange
    +	1,  // 28: google.protobuf.EnumDescriptorProto.visibility:type_name -> google.protobuf.SymbolVisibility
    +	35, // 29: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions
    +	29, // 30: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto
    +	36, // 31: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions
    +	37, // 32: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions
    +	5,  // 33: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode
    +	39, // 34: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet
    +	38, // 35: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
    +	39, // 36: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet
    +	38, // 37: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
    +	6,  // 38: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType
    +	7,  // 39: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType
    +	8,  // 40: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention
    +	9,  // 41: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType
    +	47, // 42: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault
    +	39, // 43: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet
    +	48, // 44: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport
    +	38, // 45: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
    +	39, // 46: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet
    +	38, // 47: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
    +	39, // 48: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet
    +	38, // 49: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
    +	39, // 50: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet
    +	48, // 51: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport
    +	38, // 52: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
    +	39, // 53: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet
    +	38, // 54: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
    +	10, // 55: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel
    +	39, // 56: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet
    +	38, // 57: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
    +	49, // 58: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart
    +	11, // 59: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence
    +	12, // 60: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType
    +	13, // 61: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding
    +	14, // 62: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation
    +	15, // 63: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding
    +	16, // 64: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat
    +	17, // 65: google.protobuf.FeatureSet.enforce_naming_style:type_name -> google.protobuf.FeatureSet.EnforceNamingStyle
    +	18, // 66: google.protobuf.FeatureSet.default_symbol_visibility:type_name -> google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility
    +	51, // 67: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
    +	0,  // 68: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition
    +	0,  // 69: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition
    +	52, // 70: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location
    +	53, // 71: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation
    +	23, // 72: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions
    +	0,  // 73: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition
    +	0,  // 74: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition
    +	0,  // 75: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition
    +	0,  // 76: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition
    +	0,  // 77: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition
    +	39, // 78: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet
    +	39, // 79: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet
    +	19, // 80: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic
    +	81, // [81:81] is the sub-list for method output_type
    +	81, // [81:81] is the sub-list for method input_type
    +	81, // [81:81] is the sub-list for extension type_name
    +	81, // [81:81] is the sub-list for extension extendee
    +	0,  // [0:81] is the sub-list for field type_name
     }
     
     func init() { file_google_protobuf_descriptor_proto_init() }
    @@ -5294,8 +5227,8 @@ func file_google_protobuf_descriptor_proto_init() {
     		File: protoimpl.DescBuilder{
     			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
     			RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_descriptor_proto_rawDesc), len(file_google_protobuf_descriptor_proto_rawDesc)),
    -			NumEnums:      17,
    -			NumMessages:   33,
    +			NumEnums:      20,
    +			NumMessages:   34,
     			NumExtensions: 0,
     			NumServices:   0,
     		},
    diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
    index 28d24bad7..37e712b6b 100644
    --- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
    +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
    @@ -228,63 +228,29 @@ var (
     
     var File_google_protobuf_go_features_proto protoreflect.FileDescriptor
     
    -var file_google_protobuf_go_features_proto_rawDesc = string([]byte{
    -	0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
    -	0x66, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72,
    -	0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
    -	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
    -	0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xab, 0x05, 0x0a, 0x0a, 0x47, 0x6f,
    -	0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0xbe, 0x01, 0x0a, 0x1a, 0x6c, 0x65, 0x67,
    -	0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x5f, 0x6a, 0x73,
    -	0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x80, 0x01,
    -	0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72,
    -	0x75, 0x65, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18,
    -	0xe7, 0x07, 0xb2, 0x01, 0x5b, 0x08, 0xe8, 0x07, 0x10, 0xe8, 0x07, 0x1a, 0x53, 0x54, 0x68, 0x65,
    -	0x20, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x20, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61,
    -	0x6c, 0x4a, 0x53, 0x4f, 0x4e, 0x20, 0x41, 0x50, 0x49, 0x20, 0x69, 0x73, 0x20, 0x64, 0x65, 0x70,
    -	0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x77, 0x69, 0x6c, 0x6c,
    -	0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x61,
    -	0x20, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x20, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e,
    -	0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61,
    -	0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x12, 0x74, 0x0a, 0x09, 0x61, 0x70, 0x69,
    -	0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x70,
    -	0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x41, 0x50, 0x49,
    -	0x4c, 0x65, 0x76, 0x65, 0x6c, 0x42, 0x3e, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x01,
    -	0xa2, 0x01, 0x1a, 0x12, 0x15, 0x41, 0x50, 0x49, 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55,
    -	0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0f,
    -	0x12, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x41, 0x51, 0x55, 0x45, 0x18, 0xe9, 0x07, 0xb2,
    -	0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x08, 0x61, 0x70, 0x69, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12,
    -	0x7c, 0x0a, 0x11, 0x73, 0x74, 0x72, 0x69, 0x70, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x70, 0x72,
    -	0x65, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x70, 0x62, 0x2e,
    -	0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x70,
    -	0x45, 0x6e, 0x75, 0x6d, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x42, 0x30, 0x88, 0x01, 0x01, 0x98,
    -	0x01, 0x06, 0x98, 0x01, 0x07, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x1b, 0x12, 0x16, 0x53, 0x54, 0x52,
    -	0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x4b,
    -	0x45, 0x45, 0x50, 0x18, 0x84, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe9, 0x07, 0x52, 0x0f, 0x73, 0x74,
    -	0x72, 0x69, 0x70, 0x45, 0x6e, 0x75, 0x6d, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x22, 0x53, 0x0a,
    -	0x08, 0x41, 0x50, 0x49, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x19, 0x0a, 0x15, 0x41, 0x50, 0x49,
    -	0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49,
    -	0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x45, 0x4e,
    -	0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x48, 0x59, 0x42, 0x52, 0x49, 0x44,
    -	0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x41, 0x51, 0x55, 0x45,
    -	0x10, 0x03, 0x22, 0x92, 0x01, 0x0a, 0x0f, 0x53, 0x74, 0x72, 0x69, 0x70, 0x45, 0x6e, 0x75, 0x6d,
    -	0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f,
    -	0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x55, 0x4e, 0x53, 0x50,
    -	0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x54, 0x52,
    -	0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x4b,
    -	0x45, 0x45, 0x50, 0x10, 0x01, 0x12, 0x23, 0x0a, 0x1f, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f, 0x45,
    -	0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x52,
    -	0x41, 0x54, 0x45, 0x5f, 0x42, 0x4f, 0x54, 0x48, 0x10, 0x02, 0x12, 0x1b, 0x0a, 0x17, 0x53, 0x54,
    -	0x52, 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f,
    -	0x53, 0x54, 0x52, 0x49, 0x50, 0x10, 0x03, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12, 0x1b, 0x2e,
    -	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
    -	0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20, 0x01, 0x28,
    -	0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
    -	0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
    -	0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
    -	0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, 0x61, 0x74,
    -	0x75, 0x72, 0x65, 0x73, 0x70, 0x62,
    -})
    +const file_google_protobuf_go_features_proto_rawDesc = "" +
    +	"\n" +
    +	"!google/protobuf/go_features.proto\x12\x02pb\x1a google/protobuf/descriptor.proto\"\xab\x05\n" +
    +	"\n" +
    +	"GoFeatures\x12\xbe\x01\n" +
    +	"\x1alegacy_unmarshal_json_enum\x18\x01 \x01(\bB\x80\x01\x88\x01\x01\x98\x01\x06\x98\x01\x01\xa2\x01\t\x12\x04true\x18\x84\a\xa2\x01\n" +
    +	"\x12\x05false\x18\xe7\a\xb2\x01[\b\xe8\a\x10\xe8\a\x1aSThe legacy UnmarshalJSON API is deprecated and will be removed in a future edition.R\x17legacyUnmarshalJsonEnum\x12t\n" +
    +	"\tapi_level\x18\x02 \x01(\x0e2\x17.pb.GoFeatures.APILevelB>\x88\x01\x01\x98\x01\x03\x98\x01\x01\xa2\x01\x1a\x12\x15API_LEVEL_UNSPECIFIED\x18\x84\a\xa2\x01\x0f\x12\n" +
    +	"API_OPAQUE\x18\xe9\a\xb2\x01\x03\b\xe8\aR\bapiLevel\x12|\n" +
    +	"\x11strip_enum_prefix\x18\x03 \x01(\x0e2\x1e.pb.GoFeatures.StripEnumPrefixB0\x88\x01\x01\x98\x01\x06\x98\x01\a\x98\x01\x01\xa2\x01\x1b\x12\x16STRIP_ENUM_PREFIX_KEEP\x18\x84\a\xb2\x01\x03\b\xe9\aR\x0fstripEnumPrefix\"S\n" +
    +	"\bAPILevel\x12\x19\n" +
    +	"\x15API_LEVEL_UNSPECIFIED\x10\x00\x12\f\n" +
    +	"\bAPI_OPEN\x10\x01\x12\x0e\n" +
    +	"\n" +
    +	"API_HYBRID\x10\x02\x12\x0e\n" +
    +	"\n" +
    +	"API_OPAQUE\x10\x03\"\x92\x01\n" +
    +	"\x0fStripEnumPrefix\x12!\n" +
    +	"\x1dSTRIP_ENUM_PREFIX_UNSPECIFIED\x10\x00\x12\x1a\n" +
    +	"\x16STRIP_ENUM_PREFIX_KEEP\x10\x01\x12#\n" +
    +	"\x1fSTRIP_ENUM_PREFIX_GENERATE_BOTH\x10\x02\x12\x1b\n" +
    +	"\x17STRIP_ENUM_PREFIX_STRIP\x10\x03:<\n" +
    +	"\x02go\x12\x1b.google.protobuf.FeatureSet\x18\xea\a \x01(\v2\x0e.pb.GoFeaturesR\x02goB/Z-google.golang.org/protobuf/types/gofeaturespb"
     
     var (
     	file_google_protobuf_go_features_proto_rawDescOnce sync.Once
    diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
    index 497da66e9..1ff0d1494 100644
    --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
    +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
    @@ -412,23 +412,13 @@ func (x *Any) GetValue() []byte {
     
     var File_google_protobuf_any_proto protoreflect.FileDescriptor
     
    -var file_google_protobuf_any_proto_rawDesc = string([]byte{
    -	0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
    -	0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f,
    -	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x36, 0x0a, 0x03,
    -	0x41, 0x6e, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18,
    -	0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x14,
    -	0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76,
    -	0x61, 0x6c, 0x75, 0x65, 0x42, 0x76, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
    -	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x08, 0x41, 0x6e, 0x79,
    -	0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
    -	0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
    -	0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f,
    -	0x61, 0x6e, 0x79, 0x70, 0x62, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f,
    -	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65,
    -	0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72,
    -	0x6f, 0x74, 0x6f, 0x33,
    -})
    +const file_google_protobuf_any_proto_rawDesc = "" +
    +	"\n" +
    +	"\x19google/protobuf/any.proto\x12\x0fgoogle.protobuf\"6\n" +
    +	"\x03Any\x12\x19\n" +
    +	"\btype_url\x18\x01 \x01(\tR\atypeUrl\x12\x14\n" +
    +	"\x05value\x18\x02 \x01(\fR\x05valueBv\n" +
    +	"\x13com.google.protobufB\bAnyProtoP\x01Z,google.golang.org/protobuf/types/known/anypb\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
     
     var (
     	file_google_protobuf_any_proto_rawDescOnce sync.Once
    diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
    index 193880d18..ca2e7b38f 100644
    --- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
    +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
    @@ -289,24 +289,13 @@ func (x *Duration) GetNanos() int32 {
     
     var File_google_protobuf_duration_proto protoreflect.FileDescriptor
     
    -var file_google_protobuf_duration_proto_rawDesc = string([]byte{
    -	0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
    -	0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
    -	0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
    -	0x66, 0x22, 0x3a, 0x0a, 0x08, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a,
    -	0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07,
    -	0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73,
    -	0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, 0x83, 0x01,
    -	0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
    -	0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50,
    -	0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67,
    -	0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
    -	0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x64,
    -	0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47,
    -	0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74,
    -	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79,
    -	0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
    -})
    +const file_google_protobuf_duration_proto_rawDesc = "" +
    +	"\n" +
    +	"\x1egoogle/protobuf/duration.proto\x12\x0fgoogle.protobuf\":\n" +
    +	"\bDuration\x12\x18\n" +
    +	"\aseconds\x18\x01 \x01(\x03R\aseconds\x12\x14\n" +
    +	"\x05nanos\x18\x02 \x01(\x05R\x05nanosB\x83\x01\n" +
    +	"\x13com.google.protobufB\rDurationProtoP\x01Z1google.golang.org/protobuf/types/known/durationpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
     
     var (
     	file_google_protobuf_duration_proto_rawDescOnce sync.Once
    diff --git a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go
    index a5b8657c4..1d7ee3b47 100644
    --- a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go
    +++ b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go
    @@ -86,20 +86,12 @@ func (*Empty) Descriptor() ([]byte, []int) {
     
     var File_google_protobuf_empty_proto protoreflect.FileDescriptor
     
    -var file_google_protobuf_empty_proto_rawDesc = string([]byte{
    -	0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
    -	0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67,
    -	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x07,
    -	0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x7d, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67,
    -	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0a,
    -	0x45, 0x6d, 0x70, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2e, 0x67, 0x6f,
    -	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f,
    -	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b,
    -	0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2,
    -	0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50,
    -	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77,
    -	0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
    -})
    +const file_google_protobuf_empty_proto_rawDesc = "" +
    +	"\n" +
    +	"\x1bgoogle/protobuf/empty.proto\x12\x0fgoogle.protobuf\"\a\n" +
    +	"\x05EmptyB}\n" +
    +	"\x13com.google.protobufB\n" +
    +	"EmptyProtoP\x01Z.google.golang.org/protobuf/types/known/emptypb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
     
     var (
     	file_google_protobuf_empty_proto_rawDescOnce sync.Once
    diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
    index 041feb0f3..91ee89a5c 100644
    --- a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
    +++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
    @@ -504,23 +504,12 @@ func (x *FieldMask) GetPaths() []string {
     
     var File_google_protobuf_field_mask_proto protoreflect.FileDescriptor
     
    -var file_google_protobuf_field_mask_proto_rawDesc = string([]byte{
    -	0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
    -	0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f,
    -	0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
    -	0x62, 0x75, 0x66, 0x22, 0x21, 0x0a, 0x09, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b,
    -	0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52,
    -	0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x42, 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67,
    -	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e,
    -	0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
    -	0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e,
    -	0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70,
    -	0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x6d, 0x61,
    -	0x73, 0x6b, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e,
    -	0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
    -	0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06,
    -	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
    -})
    +const file_google_protobuf_field_mask_proto_rawDesc = "" +
    +	"\n" +
    +	" google/protobuf/field_mask.proto\x12\x0fgoogle.protobuf\"!\n" +
    +	"\tFieldMask\x12\x14\n" +
    +	"\x05paths\x18\x01 \x03(\tR\x05pathsB\x85\x01\n" +
    +	"\x13com.google.protobufB\x0eFieldMaskProtoP\x01Z2google.golang.org/protobuf/types/known/fieldmaskpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
     
     var (
     	file_google_protobuf_field_mask_proto_rawDescOnce sync.Once
    diff --git a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
    index ecdd31ab5..30411b728 100644
    --- a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
    +++ b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
    @@ -672,55 +672,31 @@ func (x *ListValue) GetValues() []*Value {
     
     var File_google_protobuf_struct_proto protoreflect.FileDescriptor
     
    -var file_google_protobuf_struct_proto_rawDesc = string([]byte{
    -	0x0a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
    -	0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f,
    -	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22,
    -	0x98, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12, 0x3b, 0x0a, 0x06, 0x66, 0x69,
    -	0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f,
    -	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72,
    -	0x75, 0x63, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
    -	0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x51, 0x0a, 0x0b, 0x46, 0x69, 0x65, 0x6c, 0x64,
    -	0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
    -	0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
    -	0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
    -	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52,
    -	0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb2, 0x02, 0x0a, 0x05, 0x56,
    -	0x61, 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c,
    -	0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
    -	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56,
    -	0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75,
    -	0x65, 0x12, 0x23, 0x0a, 0x0c, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75,
    -	0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x6e, 0x75, 0x6d, 0x62, 0x65,
    -	0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67,
    -	0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b,
    -	0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62,
    -	0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x48,
    -	0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3c, 0x0a, 0x0c,
    -	0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01,
    -	0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
    -	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x73,
    -	0x74, 0x72, 0x75, 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6c, 0x69,
    -	0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
    -	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
    -	0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69,
    -	0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22,
    -	0x3b, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2e, 0x0a, 0x06,
    -	0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67,
    -	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56,
    -	0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x2a, 0x1b, 0x0a, 0x09,
    -	0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x0e, 0x0a, 0x0a, 0x4e, 0x55, 0x4c,
    -	0x4c, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x10, 0x00, 0x42, 0x7f, 0x0a, 0x13, 0x63, 0x6f, 0x6d,
    -	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
    -	0x42, 0x0b, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a,
    -	0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f,
    -	0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65,
    -	0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x70, 0x62,
    -	0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67,
    -	0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c,
    -	0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
    -	0x6f, 0x33,
    -})
    +const file_google_protobuf_struct_proto_rawDesc = "" +
    +	"\n" +
    +	"\x1cgoogle/protobuf/struct.proto\x12\x0fgoogle.protobuf\"\x98\x01\n" +
    +	"\x06Struct\x12;\n" +
    +	"\x06fields\x18\x01 \x03(\v2#.google.protobuf.Struct.FieldsEntryR\x06fields\x1aQ\n" +
    +	"\vFieldsEntry\x12\x10\n" +
    +	"\x03key\x18\x01 \x01(\tR\x03key\x12,\n" +
    +	"\x05value\x18\x02 \x01(\v2\x16.google.protobuf.ValueR\x05value:\x028\x01\"\xb2\x02\n" +
    +	"\x05Value\x12;\n" +
    +	"\n" +
    +	"null_value\x18\x01 \x01(\x0e2\x1a.google.protobuf.NullValueH\x00R\tnullValue\x12#\n" +
    +	"\fnumber_value\x18\x02 \x01(\x01H\x00R\vnumberValue\x12#\n" +
    +	"\fstring_value\x18\x03 \x01(\tH\x00R\vstringValue\x12\x1f\n" +
    +	"\n" +
    +	"bool_value\x18\x04 \x01(\bH\x00R\tboolValue\x12<\n" +
    +	"\fstruct_value\x18\x05 \x01(\v2\x17.google.protobuf.StructH\x00R\vstructValue\x12;\n" +
    +	"\n" +
    +	"list_value\x18\x06 \x01(\v2\x1a.google.protobuf.ListValueH\x00R\tlistValueB\x06\n" +
    +	"\x04kind\";\n" +
    +	"\tListValue\x12.\n" +
    +	"\x06values\x18\x01 \x03(\v2\x16.google.protobuf.ValueR\x06values*\x1b\n" +
    +	"\tNullValue\x12\x0e\n" +
    +	"\n" +
    +	"NULL_VALUE\x10\x00B\x7f\n" +
    +	"\x13com.google.protobufB\vStructProtoP\x01Z/google.golang.org/protobuf/types/known/structpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
     
     var (
     	file_google_protobuf_struct_proto_rawDescOnce sync.Once
    diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
    index 00ac835c0..484c21fd5 100644
    --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
    +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
    @@ -172,13 +172,14 @@ import (
     // ) to obtain a formatter capable of generating timestamps in this format.
     type Timestamp struct {
     	state protoimpl.MessageState `protogen:"open.v1"`
    -	// Represents seconds of UTC time since Unix epoch
    -	// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
    -	// 9999-12-31T23:59:59Z inclusive.
    +	// Represents seconds of UTC time since Unix epoch 1970-01-01T00:00:00Z. Must
    +	// be between -315576000000 and 315576000000 inclusive (which corresponds to
    +	// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z).
     	Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
    -	// Non-negative fractions of a second at nanosecond resolution. Negative
    -	// second values with fractions must still have non-negative nanos values
    -	// that count forward in time. Must be from 0 to 999,999,999
    +	// Non-negative fractions of a second at nanosecond resolution. This field is
    +	// the nanosecond portion of the duration, not an alternative to seconds.
    +	// Negative second values with fractions must still have non-negative nanos
    +	// values that count forward in time. Must be between 0 and 999,999,999
     	// inclusive.
     	Nanos         int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
     	unknownFields protoimpl.UnknownFields
    @@ -298,24 +299,13 @@ func (x *Timestamp) GetNanos() int32 {
     
     var File_google_protobuf_timestamp_proto protoreflect.FileDescriptor
     
    -var file_google_protobuf_timestamp_proto_rawDesc = string([]byte{
    -	0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
    -	0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74,
    -	0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
    -	0x75, 0x66, 0x22, 0x3b, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12,
    -	0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
    -	0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e,
    -	0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42,
    -	0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
    -	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
    -	0x6d, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
    -	0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f,
    -	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77,
    -	0x6e, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x70, 0x62, 0xf8, 0x01, 0x01,
    -	0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
    -	0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f,
    -	0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
    -})
    +const file_google_protobuf_timestamp_proto_rawDesc = "" +
    +	"\n" +
    +	"\x1fgoogle/protobuf/timestamp.proto\x12\x0fgoogle.protobuf\";\n" +
    +	"\tTimestamp\x12\x18\n" +
    +	"\aseconds\x18\x01 \x01(\x03R\aseconds\x12\x14\n" +
    +	"\x05nanos\x18\x02 \x01(\x05R\x05nanosB\x85\x01\n" +
    +	"\x13com.google.protobufB\x0eTimestampProtoP\x01Z2google.golang.org/protobuf/types/known/timestamppb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
     
     var (
     	file_google_protobuf_timestamp_proto_rawDescOnce sync.Once
    diff --git a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
    index 5de530106..b7c2d0607 100644
    --- a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
    +++ b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
    @@ -28,10 +28,17 @@
     // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     //
    -// Wrappers for primitive (non-message) types. These types are useful
    -// for embedding primitives in the `google.protobuf.Any` type and for places
    -// where we need to distinguish between the absence of a primitive
    -// typed field and its default value.
    +// Wrappers for primitive (non-message) types. These types were needed
    +// for legacy reasons and are not recommended for use in new APIs.
    +//
    +// Historically these wrappers were useful to have presence on proto3 primitive
    +// fields, but proto3 syntax has been updated to support the `optional` keyword.
    +// Using that keyword is now the strongly preferred way to add presence to
    +// proto3 primitive fields.
    +//
    +// A secondary usecase was to embed primitives in the `google.protobuf.Any`
    +// type: it is now recommended that you embed your value in your own wrapper
    +// message which can be specifically documented.
     //
     // These wrappers have no meaningful use within repeated fields as they lack
     // the ability to detect presence on individual elements.
    @@ -54,6 +61,9 @@ import (
     // Wrapper message for `double`.
     //
     // The JSON representation for `DoubleValue` is JSON number.
    +//
    +// Not recommended for use in new APIs, but still useful for legacy APIs and
    +// has no plan to be removed.
     type DoubleValue struct {
     	state protoimpl.MessageState `protogen:"open.v1"`
     	// The double value.
    @@ -107,6 +117,9 @@ func (x *DoubleValue) GetValue() float64 {
     // Wrapper message for `float`.
     //
     // The JSON representation for `FloatValue` is JSON number.
    +//
    +// Not recommended for use in new APIs, but still useful for legacy APIs and
    +// has no plan to be removed.
     type FloatValue struct {
     	state protoimpl.MessageState `protogen:"open.v1"`
     	// The float value.
    @@ -160,6 +173,9 @@ func (x *FloatValue) GetValue() float32 {
     // Wrapper message for `int64`.
     //
     // The JSON representation for `Int64Value` is JSON string.
    +//
    +// Not recommended for use in new APIs, but still useful for legacy APIs and
    +// has no plan to be removed.
     type Int64Value struct {
     	state protoimpl.MessageState `protogen:"open.v1"`
     	// The int64 value.
    @@ -213,6 +229,9 @@ func (x *Int64Value) GetValue() int64 {
     // Wrapper message for `uint64`.
     //
     // The JSON representation for `UInt64Value` is JSON string.
    +//
    +// Not recommended for use in new APIs, but still useful for legacy APIs and
    +// has no plan to be removed.
     type UInt64Value struct {
     	state protoimpl.MessageState `protogen:"open.v1"`
     	// The uint64 value.
    @@ -266,6 +285,9 @@ func (x *UInt64Value) GetValue() uint64 {
     // Wrapper message for `int32`.
     //
     // The JSON representation for `Int32Value` is JSON number.
    +//
    +// Not recommended for use in new APIs, but still useful for legacy APIs and
    +// has no plan to be removed.
     type Int32Value struct {
     	state protoimpl.MessageState `protogen:"open.v1"`
     	// The int32 value.
    @@ -319,6 +341,9 @@ func (x *Int32Value) GetValue() int32 {
     // Wrapper message for `uint32`.
     //
     // The JSON representation for `UInt32Value` is JSON number.
    +//
    +// Not recommended for use in new APIs, but still useful for legacy APIs and
    +// has no plan to be removed.
     type UInt32Value struct {
     	state protoimpl.MessageState `protogen:"open.v1"`
     	// The uint32 value.
    @@ -372,6 +397,9 @@ func (x *UInt32Value) GetValue() uint32 {
     // Wrapper message for `bool`.
     //
     // The JSON representation for `BoolValue` is JSON `true` and `false`.
    +//
    +// Not recommended for use in new APIs, but still useful for legacy APIs and
    +// has no plan to be removed.
     type BoolValue struct {
     	state protoimpl.MessageState `protogen:"open.v1"`
     	// The bool value.
    @@ -425,6 +453,9 @@ func (x *BoolValue) GetValue() bool {
     // Wrapper message for `string`.
     //
     // The JSON representation for `StringValue` is JSON string.
    +//
    +// Not recommended for use in new APIs, but still useful for legacy APIs and
    +// has no plan to be removed.
     type StringValue struct {
     	state protoimpl.MessageState `protogen:"open.v1"`
     	// The string value.
    @@ -478,6 +509,9 @@ func (x *StringValue) GetValue() string {
     // Wrapper message for `bytes`.
     //
     // The JSON representation for `BytesValue` is JSON string.
    +//
    +// Not recommended for use in new APIs, but still useful for legacy APIs and
    +// has no plan to be removed.
     type BytesValue struct {
     	state protoimpl.MessageState `protogen:"open.v1"`
     	// The bytes value.
    @@ -530,41 +564,32 @@ func (x *BytesValue) GetValue() []byte {
     
     var File_google_protobuf_wrappers_proto protoreflect.FileDescriptor
     
    -var file_google_protobuf_wrappers_proto_rawDesc = string([]byte{
    -	0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
    -	0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
    -	0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
    -	0x66, 0x22, 0x23, 0x0a, 0x0b, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65,
    -	0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52,
    -	0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56,
    -	0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20,
    -	0x01, 0x28, 0x02, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x49, 0x6e,
    -	0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
    -	0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x23,
    -	0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a,
    -	0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61,
    -	0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75,
    -	0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05,
    -	0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x23, 0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x33,
    -	0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
    -	0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x21, 0x0a, 0x09,
    -	0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c,
    -	0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22,
    -	0x23, 0x0a, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14,
    -	0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76,
    -	0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x42, 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c,
    -	0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
    -	0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x83, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d,
    -	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
    -	0x42, 0x0d, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
    -	0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
    -	0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79,
    -	0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65,
    -	0x72, 0x73, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e,
    -	0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
    -	0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06,
    -	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
    -})
    +const file_google_protobuf_wrappers_proto_rawDesc = "" +
    +	"\n" +
    +	"\x1egoogle/protobuf/wrappers.proto\x12\x0fgoogle.protobuf\"#\n" +
    +	"\vDoubleValue\x12\x14\n" +
    +	"\x05value\x18\x01 \x01(\x01R\x05value\"\"\n" +
    +	"\n" +
    +	"FloatValue\x12\x14\n" +
    +	"\x05value\x18\x01 \x01(\x02R\x05value\"\"\n" +
    +	"\n" +
    +	"Int64Value\x12\x14\n" +
    +	"\x05value\x18\x01 \x01(\x03R\x05value\"#\n" +
    +	"\vUInt64Value\x12\x14\n" +
    +	"\x05value\x18\x01 \x01(\x04R\x05value\"\"\n" +
    +	"\n" +
    +	"Int32Value\x12\x14\n" +
    +	"\x05value\x18\x01 \x01(\x05R\x05value\"#\n" +
    +	"\vUInt32Value\x12\x14\n" +
    +	"\x05value\x18\x01 \x01(\rR\x05value\"!\n" +
    +	"\tBoolValue\x12\x14\n" +
    +	"\x05value\x18\x01 \x01(\bR\x05value\"#\n" +
    +	"\vStringValue\x12\x14\n" +
    +	"\x05value\x18\x01 \x01(\tR\x05value\"\"\n" +
    +	"\n" +
    +	"BytesValue\x12\x14\n" +
    +	"\x05value\x18\x01 \x01(\fR\x05valueB\x83\x01\n" +
    +	"\x13com.google.protobufB\rWrappersProtoP\x01Z1google.golang.org/protobuf/types/known/wrapperspb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
     
     var (
     	file_google_protobuf_wrappers_proto_rawDescOnce sync.Once
    diff --git a/vendor/gopkg.in/evanphx/json-patch.v4/README.md b/vendor/gopkg.in/evanphx/json-patch.v4/README.md
    index 28e351693..86fefd5bf 100644
    --- a/vendor/gopkg.in/evanphx/json-patch.v4/README.md
    +++ b/vendor/gopkg.in/evanphx/json-patch.v4/README.md
    @@ -4,7 +4,7 @@
     well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396).
     
     [![GoDoc](https://godoc.org/github.com/evanphx/json-patch?status.svg)](http://godoc.org/github.com/evanphx/json-patch)
    -[![Build Status](https://travis-ci.org/evanphx/json-patch.svg?branch=master)](https://travis-ci.org/evanphx/json-patch)
    +[![Build Status](https://github.com/evanphx/json-patch/actions/workflows/go.yml/badge.svg)](https://github.com/evanphx/json-patch/actions/workflows/go.yml)
     [![Report Card](https://goreportcard.com/badge/github.com/evanphx/json-patch)](https://goreportcard.com/report/github.com/evanphx/json-patch)
     
     # Get It!
    @@ -14,9 +14,7 @@ well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ie
     go get -u github.com/evanphx/json-patch/v5
     ```
     
    -**Stable Versions**:
    -* Version 5: `go get -u gopkg.in/evanphx/json-patch.v5`
    -* Version 4: `go get -u gopkg.in/evanphx/json-patch.v4`
    +If you need version 4, use `go get -u gopkg.in/evanphx/json-patch.v4`
     
     (previous versions below `v3` are unavailable)
     
    @@ -314,4 +312,4 @@ go test -cover ./...
     ```
     
     Builds for pull requests are tested automatically 
    -using [TravisCI](https://travis-ci.org/evanphx/json-patch).
    +using [GitHub Actions](https://github.com/evanphx/json-patch/actions/workflows/go.yml).
    diff --git a/vendor/gopkg.in/evanphx/json-patch.v4/patch.go b/vendor/gopkg.in/evanphx/json-patch.v4/patch.go
    index dc2b7e51e..95136681b 100644
    --- a/vendor/gopkg.in/evanphx/json-patch.v4/patch.go
    +++ b/vendor/gopkg.in/evanphx/json-patch.v4/patch.go
    @@ -3,11 +3,10 @@ package jsonpatch
     import (
     	"bytes"
     	"encoding/json"
    +	"errors"
     	"fmt"
     	"strconv"
     	"strings"
    -
    -	"github.com/pkg/errors"
     )
     
     const (
    @@ -277,7 +276,7 @@ func (o Operation) Path() (string, error) {
     		return op, nil
     	}
     
    -	return "unknown", errors.Wrapf(ErrMissing, "operation missing path field")
    +	return "unknown", fmt.Errorf("operation missing path field: %w", ErrMissing)
     }
     
     // From reads the "from" field of the Operation.
    @@ -294,7 +293,7 @@ func (o Operation) From() (string, error) {
     		return op, nil
     	}
     
    -	return "unknown", errors.Wrapf(ErrMissing, "operation, missing from field")
    +	return "unknown", fmt.Errorf("operation, missing from field: %w", ErrMissing)
     }
     
     func (o Operation) value() *lazyNode {
    @@ -319,7 +318,7 @@ func (o Operation) ValueInterface() (interface{}, error) {
     		return v, nil
     	}
     
    -	return nil, errors.Wrapf(ErrMissing, "operation, missing value field")
    +	return nil, fmt.Errorf("operation, missing value field: %w", ErrMissing)
     }
     
     func isArray(buf []byte) bool {
    @@ -359,7 +358,7 @@ func findObject(pd *container, path string) (container, string) {
     
     		next, ok := doc.get(decodePatchKey(part))
     
    -		if next == nil || ok != nil {
    +		if next == nil || ok != nil || next.raw == nil {
     			return nil, ""
     		}
     
    @@ -398,7 +397,7 @@ func (d *partialDoc) get(key string) (*lazyNode, error) {
     func (d *partialDoc) remove(key string) error {
     	_, ok := (*d)[key]
     	if !ok {
    -		return errors.Wrapf(ErrMissing, "Unable to remove nonexistent key: %s", key)
    +		return fmt.Errorf("Unable to remove nonexistent key: %s: %w", key, ErrMissing)
     	}
     
     	delete(*d, key)
    @@ -415,10 +414,10 @@ func (d *partialArray) set(key string, val *lazyNode) error {
     
     	if idx < 0 {
     		if !SupportNegativeIndices {
    -			return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
    +			return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex)
     		}
     		if idx < -len(*d) {
    -			return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
    +			return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex)
     		}
     		idx += len(*d)
     	}
    @@ -435,7 +434,7 @@ func (d *partialArray) add(key string, val *lazyNode) error {
     
     	idx, err := strconv.Atoi(key)
     	if err != nil {
    -		return errors.Wrapf(err, "value was not a proper array index: '%s'", key)
    +		return fmt.Errorf("value was not a proper array index: '%s': %w", key, err)
     	}
     
     	sz := len(*d) + 1
    @@ -445,15 +444,15 @@ func (d *partialArray) add(key string, val *lazyNode) error {
     	cur := *d
     
     	if idx >= len(ary) {
    -		return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
    +		return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex)
     	}
     
     	if idx < 0 {
     		if !SupportNegativeIndices {
    -			return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
    +			return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex)
     		}
     		if idx < -len(ary) {
    -			return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
    +			return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex)
     		}
     		idx += len(ary)
     	}
    @@ -475,16 +474,16 @@ func (d *partialArray) get(key string) (*lazyNode, error) {
     
     	if idx < 0 {
     		if !SupportNegativeIndices {
    -			return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
    +			return nil, fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex)
     		}
     		if idx < -len(*d) {
    -			return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
    +			return nil, fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex)
     		}
     		idx += len(*d)
     	}
     
     	if idx >= len(*d) {
    -		return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
    +		return nil, fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex)
     	}
     
     	return (*d)[idx], nil
    @@ -499,15 +498,15 @@ func (d *partialArray) remove(key string) error {
     	cur := *d
     
     	if idx >= len(cur) {
    -		return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
    +		return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex)
     	}
     
     	if idx < 0 {
     		if !SupportNegativeIndices {
    -			return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
    +			return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex)
     		}
     		if idx < -len(cur) {
    -			return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
    +			return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex)
     		}
     		idx += len(cur)
     	}
    @@ -525,18 +524,18 @@ func (d *partialArray) remove(key string) error {
     func (p Patch) add(doc *container, op Operation) error {
     	path, err := op.Path()
     	if err != nil {
    -		return errors.Wrapf(ErrMissing, "add operation failed to decode path")
    +		return fmt.Errorf("add operation failed to decode path: %w", ErrMissing)
     	}
     
     	con, key := findObject(doc, path)
     
     	if con == nil {
    -		return errors.Wrapf(ErrMissing, "add operation does not apply: doc is missing path: \"%s\"", path)
    +		return fmt.Errorf("add operation does not apply: doc is missing path: \"%s\": %w", path, ErrMissing)
     	}
     
     	err = con.add(key, op.value())
     	if err != nil {
    -		return errors.Wrapf(err, "error in add for path: '%s'", path)
    +		return fmt.Errorf("error in add for path: '%s': %w", path, err)
     	}
     
     	return nil
    @@ -545,18 +544,18 @@ func (p Patch) add(doc *container, op Operation) error {
     func (p Patch) remove(doc *container, op Operation) error {
     	path, err := op.Path()
     	if err != nil {
    -		return errors.Wrapf(ErrMissing, "remove operation failed to decode path")
    +		return fmt.Errorf("remove operation failed to decode path: %w", ErrMissing)
     	}
     
     	con, key := findObject(doc, path)
     
     	if con == nil {
    -		return errors.Wrapf(ErrMissing, "remove operation does not apply: doc is missing path: \"%s\"", path)
    +		return fmt.Errorf("remove operation does not apply: doc is missing path: \"%s\": %w", path, ErrMissing)
     	}
     
     	err = con.remove(key)
     	if err != nil {
    -		return errors.Wrapf(err, "error in remove for path: '%s'", path)
    +		return fmt.Errorf("error in remove for path: '%s': %w", path, err)
     	}
     
     	return nil
    @@ -565,7 +564,7 @@ func (p Patch) remove(doc *container, op Operation) error {
     func (p Patch) replace(doc *container, op Operation) error {
     	path, err := op.Path()
     	if err != nil {
    -		return errors.Wrapf(err, "replace operation failed to decode path")
    +		return fmt.Errorf("replace operation failed to decode path: %w", err)
     	}
     
     	if path == "" {
    @@ -574,7 +573,7 @@ func (p Patch) replace(doc *container, op Operation) error {
     		if val.which == eRaw {
     			if !val.tryDoc() {
     				if !val.tryAry() {
    -					return errors.Wrapf(err, "replace operation value must be object or array")
    +					return fmt.Errorf("replace operation value must be object or array: %w", err)
     				}
     			}
     		}
    @@ -585,7 +584,7 @@ func (p Patch) replace(doc *container, op Operation) error {
     		case eDoc:
     			*doc = &val.doc
     		case eRaw:
    -			return errors.Wrapf(err, "replace operation hit impossible case")
    +			return fmt.Errorf("replace operation hit impossible case: %w", err)
     		}
     
     		return nil
    @@ -594,17 +593,17 @@ func (p Patch) replace(doc *container, op Operation) error {
     	con, key := findObject(doc, path)
     
     	if con == nil {
    -		return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing path: %s", path)
    +		return fmt.Errorf("replace operation does not apply: doc is missing path: %s: %w", path, ErrMissing)
     	}
     
     	_, ok := con.get(key)
     	if ok != nil {
    -		return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing key: %s", path)
    +		return fmt.Errorf("replace operation does not apply: doc is missing key: %s: %w", path, ErrMissing)
     	}
     
     	err = con.set(key, op.value())
     	if err != nil {
    -		return errors.Wrapf(err, "error in remove for path: '%s'", path)
    +		return fmt.Errorf("error in remove for path: '%s': %w", path, err)
     	}
     
     	return nil
    @@ -613,39 +612,39 @@ func (p Patch) replace(doc *container, op Operation) error {
     func (p Patch) move(doc *container, op Operation) error {
     	from, err := op.From()
     	if err != nil {
    -		return errors.Wrapf(err, "move operation failed to decode from")
    +		return fmt.Errorf("move operation failed to decode from: %w", err)
     	}
     
     	con, key := findObject(doc, from)
     
     	if con == nil {
    -		return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing from path: %s", from)
    +		return fmt.Errorf("move operation does not apply: doc is missing from path: %s: %w", from, ErrMissing)
     	}
     
     	val, err := con.get(key)
     	if err != nil {
    -		return errors.Wrapf(err, "error in move for path: '%s'", key)
    +		return fmt.Errorf("error in move for path: '%s': %w", key, err)
     	}
     
     	err = con.remove(key)
     	if err != nil {
    -		return errors.Wrapf(err, "error in move for path: '%s'", key)
    +		return fmt.Errorf("error in move for path: '%s': %w", key, err)
     	}
     
     	path, err := op.Path()
     	if err != nil {
    -		return errors.Wrapf(err, "move operation failed to decode path")
    +		return fmt.Errorf("move operation failed to decode path: %w", err)
     	}
     
     	con, key = findObject(doc, path)
     
     	if con == nil {
    -		return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing destination path: %s", path)
    +		return fmt.Errorf("move operation does not apply: doc is missing destination path: %s: %w", path, ErrMissing)
     	}
     
     	err = con.add(key, val)
     	if err != nil {
    -		return errors.Wrapf(err, "error in move for path: '%s'", path)
    +		return fmt.Errorf("error in move for path: '%s': %w", path, err)
     	}
     
     	return nil
    @@ -654,7 +653,7 @@ func (p Patch) move(doc *container, op Operation) error {
     func (p Patch) test(doc *container, op Operation) error {
     	path, err := op.Path()
     	if err != nil {
    -		return errors.Wrapf(err, "test operation failed to decode path")
    +		return fmt.Errorf("test operation failed to decode path: %w", err)
     	}
     
     	if path == "" {
    @@ -673,67 +672,67 @@ func (p Patch) test(doc *container, op Operation) error {
     			return nil
     		}
     
    -		return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
    +		return fmt.Errorf("testing value %s failed: %w", path, ErrTestFailed)
     	}
     
     	con, key := findObject(doc, path)
     
     	if con == nil {
    -		return errors.Wrapf(ErrMissing, "test operation does not apply: is missing path: %s", path)
    +		return fmt.Errorf("test operation does not apply: is missing path: %s: %w", path, ErrMissing)
     	}
     
     	val, err := con.get(key)
     	if err != nil {
    -		return errors.Wrapf(err, "error in test for path: '%s'", path)
    +		return fmt.Errorf("error in test for path: '%s': %w", path, err)
     	}
     
     	if val == nil {
    -		if op.value().raw == nil {
    +		if op.value() == nil || op.value().raw == nil {
     			return nil
     		}
    -		return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
    +		return fmt.Errorf("testing value %s failed: %w", path, ErrTestFailed)
     	} else if op.value() == nil {
    -		return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
    +		return fmt.Errorf("testing value %s failed: %w", path, ErrTestFailed)
     	}
     
     	if val.equal(op.value()) {
     		return nil
     	}
     
    -	return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
    +	return fmt.Errorf("testing value %s failed: %w", path, ErrTestFailed)
     }
     
     func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) error {
     	from, err := op.From()
     	if err != nil {
    -		return errors.Wrapf(err, "copy operation failed to decode from")
    +		return fmt.Errorf("copy operation failed to decode from: %w", err)
     	}
     
     	con, key := findObject(doc, from)
     
     	if con == nil {
    -		return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: %s", from)
    +		return fmt.Errorf("copy operation does not apply: doc is missing from path: %s: %w", from, ErrMissing)
     	}
     
     	val, err := con.get(key)
     	if err != nil {
    -		return errors.Wrapf(err, "error in copy for from: '%s'", from)
    +		return fmt.Errorf("error in copy for from: '%s': %w", from, err)
     	}
     
     	path, err := op.Path()
     	if err != nil {
    -		return errors.Wrapf(ErrMissing, "copy operation failed to decode path")
    +		return fmt.Errorf("copy operation failed to decode path: %w", ErrMissing)
     	}
     
     	con, key = findObject(doc, path)
     
     	if con == nil {
    -		return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path)
    +		return fmt.Errorf("copy operation does not apply: doc is missing destination path: %s: %w", path, ErrMissing)
     	}
     
     	valCopy, sz, err := deepCopy(val)
     	if err != nil {
    -		return errors.Wrapf(err, "error while performing deep copy")
    +		return fmt.Errorf("error while performing deep copy: %w", err)
     	}
     
     	(*accumulatedCopySize) += int64(sz)
    @@ -743,7 +742,7 @@ func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) er
     
     	err = con.add(key, valCopy)
     	if err != nil {
    -		return errors.Wrapf(err, "error while adding value during copy")
    +		return fmt.Errorf("error while adding value during copy: %w", err)
     	}
     
     	return nil
    diff --git a/vendor/gopkg.in/gcfg.v1/LICENSE b/vendor/gopkg.in/gcfg.v1/LICENSE
    new file mode 100644
    index 000000000..87a5cede3
    --- /dev/null
    +++ b/vendor/gopkg.in/gcfg.v1/LICENSE
    @@ -0,0 +1,28 @@
    +Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go
    +Authors. All rights reserved.
    +
    +Redistribution and use in source and binary forms, with or without
    +modification, are permitted provided that the following conditions are
    +met:
    +
    +   * Redistributions of source code must retain the above copyright
    +notice, this list of conditions and the following disclaimer.
    +   * Redistributions in binary form must reproduce the above
    +copyright notice, this list of conditions and the following disclaimer
    +in the documentation and/or other materials provided with the
    +distribution.
    +   * Neither the name of Google Inc. nor the names of its
    +contributors may be used to endorse or promote products derived from
    +this software without specific prior written permission.
    +
    +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
    +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
    +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
    +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
    +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
    +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
    +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
    +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
    +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
    +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
    +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    diff --git a/vendor/gopkg.in/gcfg.v1/README b/vendor/gopkg.in/gcfg.v1/README
    new file mode 100644
    index 000000000..1ff233a52
    --- /dev/null
    +++ b/vendor/gopkg.in/gcfg.v1/README
    @@ -0,0 +1,4 @@
    +Gcfg reads INI-style configuration files into Go structs;
    +supports user-defined types and subsections.
    +
    +Package docs: https://godoc.org/gopkg.in/gcfg.v1
    diff --git a/vendor/gopkg.in/gcfg.v1/doc.go b/vendor/gopkg.in/gcfg.v1/doc.go
    new file mode 100644
    index 000000000..32f3e9d69
    --- /dev/null
    +++ b/vendor/gopkg.in/gcfg.v1/doc.go
    @@ -0,0 +1,145 @@
    +// Package gcfg reads "INI-style" text-based configuration files with
    +// "name=value" pairs grouped into sections (gcfg files).
    +//
    +// This package is still a work in progress; see the sections below for planned
    +// changes.
    +//
    +// Syntax
    +//
    +// The syntax is based on that used by git config:
    +// http://git-scm.com/docs/git-config#_syntax .
    +// There are some (planned) differences compared to the git config format:
    +//  - improve data portability:
    +//    - must be encoded in UTF-8 (for now) and must not contain the 0 byte
    +//    - include and "path" type is not supported
    +//      (path type may be implementable as a user-defined type)
    +//  - internationalization
    +//    - section and variable names can contain unicode letters, unicode digits
    +//      (as defined in http://golang.org/ref/spec#Characters ) and hyphens
    +//      (U+002D), starting with a unicode letter
    +//  - disallow potentially ambiguous or misleading definitions:
    +//    - `[sec.sub]` format is not allowed (deprecated in gitconfig)
    +//    - `[sec ""]` is not allowed
    +//      - use `[sec]` for section name "sec" and empty subsection name
    +//    - (planned) within a single file, definitions must be contiguous for each:
    +//      - section: '[secA]' -> '[secB]' -> '[secA]' is an error
    +//      - subsection: '[sec "A"]' -> '[sec "B"]' -> '[sec "A"]' is an error
    +//      - multivalued variable: 'multi=a' -> 'other=x' -> 'multi=b' is an error
    +//
    +// Data structure
    +//
    +// The functions in this package read values into a user-defined struct.
    +// Each section corresponds to a struct field in the config struct, and each
    +// variable in a section corresponds to a data field in the section struct.
    +// The mapping of each section or variable name to fields is done either based
    +// on the "gcfg" struct tag or by matching the name of the section or variable,
    +// ignoring case. In the latter case, hyphens '-' in section and variable names
    +// correspond to underscores '_' in field names.
    +// Fields must be exported; to use a section or variable name starting with a
    +// letter that is neither upper- or lower-case, prefix the field name with 'X'.
    +// (See https://code.google.com/p/go/issues/detail?id=5763#c4 .)
    +//
    +// For sections with subsections, the corresponding field in config must be a
    +// map, rather than a struct, with string keys and pointer-to-struct values.
    +// Values for subsection variables are stored in the map with the subsection
    +// name used as the map key.
    +// (Note that unlike section and variable names, subsection names are case
    +// sensitive.)
    +// When using a map, and there is a section with the same section name but
    +// without a subsection name, its values are stored with the empty string used
    +// as the key.
    +// It is possible to provide default values for subsections in the section
    +// "default-" (or by setting values in the corresponding struct
    +// field "Default_").
    +//
    +// The functions in this package panic if config is not a pointer to a struct,
    +// or when a field is not of a suitable type (either a struct or a map with
    +// string keys and pointer-to-struct values).
    +//
    +// Parsing of values
    +//
    +// The section structs in the config struct may contain single-valued or
    +// multi-valued variables. Variables of unnamed slice type (that is, a type
    +// starting with `[]`) are treated as multi-value; all others (including named
    +// slice types) are treated as single-valued variables.
    +//
    +// Single-valued variables are handled based on the type as follows.
    +// Unnamed pointer types (that is, types starting with `*`) are dereferenced,
    +// and if necessary, a new instance is allocated.
    +//
    +// For types implementing the encoding.TextUnmarshaler interface, the
    +// UnmarshalText method is used to set the value. Implementing this method is
    +// the recommended way for parsing user-defined types.
    +//
    +// For fields of string kind, the value string is assigned to the field, after
    +// unquoting and unescaping as needed.
    +// For fields of bool kind, the field is set to true if the value is "true",
    +// "yes", "on" or "1", and set to false if the value is "false", "no", "off" or
    +// "0", ignoring case. In addition, single-valued bool fields can be specified
    +// with a "blank" value (variable name without equals sign and value); in such
    +// case the value is set to true.
    +//
    +// Predefined integer types [u]int(|8|16|32|64) and big.Int are parsed as
    +// decimal or hexadecimal (if having '0x' prefix). (This is to prevent
    +// unintuitively handling zero-padded numbers as octal.) Other types having
    +// [u]int* as the underlying type, such as os.FileMode and uintptr allow
    +// decimal, hexadecimal, or octal values.
    +// Parsing mode for integer types can be overridden using the struct tag option
    +// ",int=mode" where mode is a combination of the 'd', 'h', and 'o' characters
    +// (each standing for decimal, hexadecimal, and octal, respectively.)
    +//
    +// All other types are parsed using fmt.Sscanf with the "%v" verb.
    +//
    +// For multi-valued variables, each individual value is parsed as above and
    +// appended to the slice. If the first value is specified as a "blank" value
    +// (variable name without equals sign and value), a new slice is allocated;
    +// that is any values previously set in the slice will be ignored.
    +//
    +// The types subpackage for provides helpers for parsing "enum-like" and integer
    +// types.
    +//
    +// Error handling
    +//
    +// There are 3 types of errors:
    +//
    +//  - programmer errors / panics:
    +//    - invalid configuration structure
    +//  - data errors:
    +//    - fatal errors:
    +//      - invalid configuration syntax
    +//    - warnings:
    +//      - data that doesn't belong to any part of the config structure
    +//
    +// Programmer errors trigger panics. These are should be fixed by the programmer
    +// before releasing code that uses gcfg.
    +//
    +// Data errors cause gcfg to return a non-nil error value. This includes the
    +// case when there are extra unknown key-value definitions in the configuration
    +// data (extra data).
    +// However, in some occasions it is desirable to be able to proceed in
    +// situations when the only data error is that of extra data.
    +// These errors are handled at a different (warning) priority and can be
    +// filtered out programmatically. To ignore extra data warnings, wrap the
    +// gcfg.Read*Into invocation into a call to gcfg.FatalOnly.
    +//
    +// TODO
    +//
    +// The following is a list of changes under consideration:
    +//  - documentation
    +//    - self-contained syntax documentation
    +//    - more practical examples
    +//    - move TODOs to issue tracker (eventually)
    +//  - syntax
    +//    - reconsider valid escape sequences
    +//      (gitconfig doesn't support \r in value, \t in subsection name, etc.)
    +//  - reading / parsing gcfg files
    +//    - define internal representation structure
    +//    - support multiple inputs (readers, strings, files)
    +//    - support declaring encoding (?)
    +//    - support varying fields sets for subsections (?)
    +//  - writing gcfg files
    +//  - error handling
    +//    - make error context accessible programmatically?
    +//    - limit input size?
    +//
    +package gcfg // import "gopkg.in/gcfg.v1"
    diff --git a/vendor/gopkg.in/gcfg.v1/errors.go b/vendor/gopkg.in/gcfg.v1/errors.go
    new file mode 100644
    index 000000000..83a591dac
    --- /dev/null
    +++ b/vendor/gopkg.in/gcfg.v1/errors.go
    @@ -0,0 +1,57 @@
    +package gcfg
    +
    +import warnings "gopkg.in/warnings.v0"
    +
    +// FatalOnly filters the results of a Read*Into invocation and returns only
    +// fatal errors. That is, errors (warnings) indicating data for unknown
    +// sections / variables is ignored. Example invocation:
    +//
    +//  err := gcfg.FatalOnly(gcfg.ReadFileInto(&cfg, configFile))
    +//  if err != nil {
    +//      ...
    +//
    +func FatalOnly(err error) error {
    +	return warnings.FatalOnly(err)
    +}
    +
    +func isFatal(err error) bool {
    +	_, ok := err.(extraData)
    +	return !ok
    +}
    +
    +type loc struct {
    +	section    string
    +	subsection *string
    +	variable   *string
    +}
    +
    +type extraData struct {
    +	loc
    +}
    +
    +type locErr struct {
    +	msg string
    +	loc
    +}
    +
    +func (l loc) String() string {
    +	s := "section \"" + l.section + "\""
    +	if l.subsection != nil {
    +		s += ", subsection \"" + *l.subsection + "\""
    +	}
    +	if l.variable != nil {
    +		s += ", variable \"" + *l.variable + "\""
    +	}
    +	return s
    +}
    +
    +func (e extraData) Error() string {
    +	return "can't store data at " + e.loc.String()
    +}
    +
    +func (e locErr) Error() string {
    +	return e.msg + " at " + e.loc.String()
    +}
    +
    +var _ error = extraData{}
    +var _ error = locErr{}
    diff --git a/vendor/gopkg.in/gcfg.v1/read.go b/vendor/gopkg.in/gcfg.v1/read.go
    new file mode 100644
    index 000000000..06796653c
    --- /dev/null
    +++ b/vendor/gopkg.in/gcfg.v1/read.go
    @@ -0,0 +1,257 @@
    +package gcfg
    +
    +import (
    +	"bytes"
    +	"fmt"
    +	"io"
    +	"io/ioutil"
    +	"os"
    +	"strings"
    +
    +	"gopkg.in/gcfg.v1/scanner"
    +	"gopkg.in/gcfg.v1/token"
    +	"gopkg.in/warnings.v0"
    +)
    +
    +var unescape = map[rune]rune{'\\': '\\', '"': '"', 'n': '\n', 't': '\t'}
    +var utf8Bom = []byte("\ufeff")
    +
    +// no error: invalid literals should be caught by scanner
    +func unquote(s string) string {
    +	u, q, esc := make([]rune, 0, len(s)), false, false
    +	for _, c := range s {
    +		if esc {
    +			uc, ok := unescape[c]
    +			switch {
    +			case ok:
    +				u = append(u, uc)
    +				fallthrough
    +			case !q && c == '\n':
    +				esc = false
    +				continue
    +			}
    +			panic("invalid escape sequence")
    +		}
    +		switch c {
    +		case '"':
    +			q = !q
    +		case '\\':
    +			esc = true
    +		default:
    +			u = append(u, c)
    +		}
    +	}
    +	if q {
    +		panic("missing end quote")
    +	}
    +	if esc {
    +		panic("invalid escape sequence")
    +	}
    +	return string(u)
    +}
    +
    +func readIntoPass(c *warnings.Collector, config interface{}, fset *token.FileSet,
    +	file *token.File, src []byte, subsectPass bool) error {
    +	//
    +	var s scanner.Scanner
    +	var errs scanner.ErrorList
    +	s.Init(file, src, func(p token.Position, m string) { errs.Add(p, m) }, 0)
    +	sect, sectsub := "", ""
    +	pos, tok, lit := s.Scan()
    +	errfn := func(msg string) error {
    +		return fmt.Errorf("%s: %s", fset.Position(pos), msg)
    +	}
    +	for {
    +		if errs.Len() > 0 {
    +			if err := c.Collect(errs.Err()); err != nil {
    +				return err
    +			}
    +		}
    +		switch tok {
    +		case token.EOF:
    +			return nil
    +		case token.EOL, token.COMMENT:
    +			pos, tok, lit = s.Scan()
    +		case token.LBRACK:
    +			pos, tok, lit = s.Scan()
    +			if errs.Len() > 0 {
    +				if err := c.Collect(errs.Err()); err != nil {
    +					return err
    +				}
    +			}
    +			if tok != token.IDENT {
    +				if err := c.Collect(errfn("expected section name")); err != nil {
    +					return err
    +				}
    +			}
    +			sect, sectsub = lit, ""
    +			pos, tok, lit = s.Scan()
    +			if errs.Len() > 0 {
    +				if err := c.Collect(errs.Err()); err != nil {
    +					return err
    +				}
    +			}
    +			if tok == token.STRING {
    +				sectsub = unquote(lit)
    +				if sectsub == "" {
    +					if err := c.Collect(errfn("empty subsection name")); err != nil {
    +						return err
    +					}
    +				}
    +				pos, tok, lit = s.Scan()
    +				if errs.Len() > 0 {
    +					if err := c.Collect(errs.Err()); err != nil {
    +						return err
    +					}
    +				}
    +			}
    +			if tok != token.RBRACK {
    +				if sectsub == "" {
    +					if err := c.Collect(errfn("expected subsection name or right bracket")); err != nil {
    +						return err
    +					}
    +				}
    +				if err := c.Collect(errfn("expected right bracket")); err != nil {
    +					return err
    +				}
    +			}
    +			pos, tok, lit = s.Scan()
    +			if tok != token.EOL && tok != token.EOF && tok != token.COMMENT {
    +				if err := c.Collect(errfn("expected EOL, EOF, or comment")); err != nil {
    +					return err
    +				}
    +			}
    +			// If a section/subsection header was found, ensure a
    +			// container object is created, even if there are no
    +			// variables further down.
    +			err := c.Collect(set(c, config, sect, sectsub, "", true, "", subsectPass))
    +			if err != nil {
    +				return err
    +			}
    +		case token.IDENT:
    +			if sect == "" {
    +				if err := c.Collect(errfn("expected section header")); err != nil {
    +					return err
    +				}
    +			}
    +			n := lit
    +			pos, tok, lit = s.Scan()
    +			if errs.Len() > 0 {
    +				return errs.Err()
    +			}
    +			blank, v := tok == token.EOF || tok == token.EOL || tok == token.COMMENT, ""
    +			if !blank {
    +				if tok != token.ASSIGN {
    +					if err := c.Collect(errfn("expected '='")); err != nil {
    +						return err
    +					}
    +				}
    +				pos, tok, lit = s.Scan()
    +				if errs.Len() > 0 {
    +					if err := c.Collect(errs.Err()); err != nil {
    +						return err
    +					}
    +				}
    +				if tok != token.STRING {
    +					if err := c.Collect(errfn("expected value")); err != nil {
    +						return err
    +					}
    +				}
    +				v = unquote(lit)
    +				pos, tok, lit = s.Scan()
    +				if errs.Len() > 0 {
    +					if err := c.Collect(errs.Err()); err != nil {
    +						return err
    +					}
    +				}
    +				if tok != token.EOL && tok != token.EOF && tok != token.COMMENT {
    +					if err := c.Collect(errfn("expected EOL, EOF, or comment")); err != nil {
    +						return err
    +					}
    +				}
    +			}
    +			err := set(c, config, sect, sectsub, n, blank, v, subsectPass)
    +			if err != nil {
    +				return err
    +			}
    +		default:
    +			if sect == "" {
    +				if err := c.Collect(errfn("expected section header")); err != nil {
    +					return err
    +				}
    +			}
    +			if err := c.Collect(errfn("expected section header or variable declaration")); err != nil {
    +				return err
    +			}
    +		}
    +	}
    +}
    +
    +func readInto(config interface{}, fset *token.FileSet, file *token.File,
    +	src []byte) error {
    +	//
    +	c := warnings.NewCollector(isFatal)
    +	err := readIntoPass(c, config, fset, file, src, false)
    +	if err != nil {
    +		return err
    +	}
    +	err = readIntoPass(c, config, fset, file, src, true)
    +	if err != nil {
    +		return err
    +	}
    +	return c.Done()
    +}
    +
    +// ReadInto reads gcfg formatted data from reader and sets the values into the
    +// corresponding fields in config.
    +func ReadInto(config interface{}, reader io.Reader) error {
    +	src, err := ioutil.ReadAll(reader)
    +	if err != nil {
    +		return err
    +	}
    +	fset := token.NewFileSet()
    +	file := fset.AddFile("", fset.Base(), len(src))
    +	return readInto(config, fset, file, src)
    +}
    +
    +// ReadStringInto reads gcfg formatted data from str and sets the values into
    +// the corresponding fields in config.
    +func ReadStringInto(config interface{}, str string) error {
    +	r := strings.NewReader(str)
    +	return ReadInto(config, r)
    +}
    +
    +// ReadFileInto reads gcfg formatted data from the file filename and sets the
    +// values into the corresponding fields in config.
    +//
    +// For compatibility with files created on Windows, the ReadFileInto skips a
    +// single leading UTF8 BOM sequence if it exists.
    +func ReadFileInto(config interface{}, filename string) error {
    +	f, err := os.Open(filename)
    +	if err != nil {
    +		return err
    +	}
    +	defer f.Close()
    +	src, err := ioutil.ReadAll(f)
    +	if err != nil {
    +		return err
    +	}
    +
    +	// Skips a single leading UTF8 BOM sequence if it exists.
    +	src = skipLeadingUtf8Bom(src)
    +
    +	fset := token.NewFileSet()
    +	file := fset.AddFile(filename, fset.Base(), len(src))
    +	return readInto(config, fset, file, src)
    +}
    +
    +func skipLeadingUtf8Bom(src []byte) []byte {
    +	lengthUtf8Bom := len(utf8Bom)
    +
    +	if len(src) >= lengthUtf8Bom {
    +		if bytes.Equal(src[:lengthUtf8Bom], utf8Bom) {
    +			return src[lengthUtf8Bom:]
    +		}
    +	}
    +	return src
    +}
    diff --git a/vendor/gopkg.in/gcfg.v1/scanner/errors.go b/vendor/gopkg.in/gcfg.v1/scanner/errors.go
    new file mode 100644
    index 000000000..1a3c0f656
    --- /dev/null
    +++ b/vendor/gopkg.in/gcfg.v1/scanner/errors.go
    @@ -0,0 +1,121 @@
    +// Copyright 2009 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +package scanner
    +
    +import (
    +	"fmt"
    +	"io"
    +	"sort"
    +)
    +
    +import (
    +	"gopkg.in/gcfg.v1/token"
    +)
    +
    +// In an ErrorList, an error is represented by an *Error.
    +// The position Pos, if valid, points to the beginning of
    +// the offending token, and the error condition is described
    +// by Msg.
    +//
    +type Error struct {
    +	Pos token.Position
    +	Msg string
    +}
    +
    +// Error implements the error interface.
    +func (e Error) Error() string {
    +	if e.Pos.Filename != "" || e.Pos.IsValid() {
    +		// don't print ""
    +		// TODO(gri) reconsider the semantics of Position.IsValid
    +		return e.Pos.String() + ": " + e.Msg
    +	}
    +	return e.Msg
    +}
    +
    +// ErrorList is a list of *Errors.
    +// The zero value for an ErrorList is an empty ErrorList ready to use.
    +//
    +type ErrorList []*Error
    +
    +// Add adds an Error with given position and error message to an ErrorList.
    +func (p *ErrorList) Add(pos token.Position, msg string) {
    +	*p = append(*p, &Error{pos, msg})
    +}
    +
    +// Reset resets an ErrorList to no errors.
    +func (p *ErrorList) Reset() { *p = (*p)[0:0] }
    +
    +// ErrorList implements the sort Interface.
    +func (p ErrorList) Len() int      { return len(p) }
    +func (p ErrorList) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
    +
    +func (p ErrorList) Less(i, j int) bool {
    +	e := &p[i].Pos
    +	f := &p[j].Pos
    +	if e.Filename < f.Filename {
    +		return true
    +	}
    +	if e.Filename == f.Filename {
    +		return e.Offset < f.Offset
    +	}
    +	return false
    +}
    +
    +// Sort sorts an ErrorList. *Error entries are sorted by position,
    +// other errors are sorted by error message, and before any *Error
    +// entry.
    +//
    +func (p ErrorList) Sort() {
    +	sort.Sort(p)
    +}
    +
    +// RemoveMultiples sorts an ErrorList and removes all but the first error per line.
    +func (p *ErrorList) RemoveMultiples() {
    +	sort.Sort(p)
    +	var last token.Position // initial last.Line is != any legal error line
    +	i := 0
    +	for _, e := range *p {
    +		if e.Pos.Filename != last.Filename || e.Pos.Line != last.Line {
    +			last = e.Pos
    +			(*p)[i] = e
    +			i++
    +		}
    +	}
    +	(*p) = (*p)[0:i]
    +}
    +
    +// An ErrorList implements the error interface.
    +func (p ErrorList) Error() string {
    +	switch len(p) {
    +	case 0:
    +		return "no errors"
    +	case 1:
    +		return p[0].Error()
    +	}
    +	return fmt.Sprintf("%s (and %d more errors)", p[0], len(p)-1)
    +}
    +
    +// Err returns an error equivalent to this error list.
    +// If the list is empty, Err returns nil.
    +func (p ErrorList) Err() error {
    +	if len(p) == 0 {
    +		return nil
    +	}
    +	return p
    +}
    +
    +// PrintError is a utility function that prints a list of errors to w,
    +// one error per line, if the err parameter is an ErrorList. Otherwise
    +// it prints the err string.
    +//
    +func PrintError(w io.Writer, err error) {
    +	if list, ok := err.(ErrorList); ok {
    +		for _, e := range list {
    +			fmt.Fprintf(w, "%s\n", e)
    +		}
    +	} else if err != nil {
    +		fmt.Fprintf(w, "%s\n", err)
    +	}
    +}
    diff --git a/vendor/gopkg.in/gcfg.v1/scanner/scanner.go b/vendor/gopkg.in/gcfg.v1/scanner/scanner.go
    new file mode 100644
    index 000000000..6d0eee916
    --- /dev/null
    +++ b/vendor/gopkg.in/gcfg.v1/scanner/scanner.go
    @@ -0,0 +1,342 @@
    +// Copyright 2009 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +// Package scanner implements a scanner for gcfg configuration text.
    +// It takes a []byte as source which can then be tokenized
    +// through repeated calls to the Scan method.
    +//
    +// Note that the API for the scanner package may change to accommodate new
    +// features or implementation changes in gcfg.
    +//
    +package scanner
    +
    +import (
    +	"fmt"
    +	"path/filepath"
    +	"unicode"
    +	"unicode/utf8"
    +)
    +
    +import (
    +	"gopkg.in/gcfg.v1/token"
    +)
    +
    +// An ErrorHandler may be provided to Scanner.Init. If a syntax error is
    +// encountered and a handler was installed, the handler is called with a
    +// position and an error message. The position points to the beginning of
    +// the offending token.
    +//
    +type ErrorHandler func(pos token.Position, msg string)
    +
    +// A Scanner holds the scanner's internal state while processing
    +// a given text.  It can be allocated as part of another data
    +// structure but must be initialized via Init before use.
    +//
    +type Scanner struct {
    +	// immutable state
    +	file *token.File  // source file handle
    +	dir  string       // directory portion of file.Name()
    +	src  []byte       // source
    +	err  ErrorHandler // error reporting; or nil
    +	mode Mode         // scanning mode
    +
    +	// scanning state
    +	ch         rune // current character
    +	offset     int  // character offset
    +	rdOffset   int  // reading offset (position after current character)
    +	lineOffset int  // current line offset
    +	nextVal    bool // next token is expected to be a value
    +
    +	// public state - ok to modify
    +	ErrorCount int // number of errors encountered
    +}
    +
    +// Read the next Unicode char into s.ch.
    +// s.ch < 0 means end-of-file.
    +//
    +func (s *Scanner) next() {
    +	if s.rdOffset < len(s.src) {
    +		s.offset = s.rdOffset
    +		if s.ch == '\n' {
    +			s.lineOffset = s.offset
    +			s.file.AddLine(s.offset)
    +		}
    +		r, w := rune(s.src[s.rdOffset]), 1
    +		switch {
    +		case r == 0:
    +			s.error(s.offset, "illegal character NUL")
    +		case r >= 0x80:
    +			// not ASCII
    +			r, w = utf8.DecodeRune(s.src[s.rdOffset:])
    +			if r == utf8.RuneError && w == 1 {
    +				s.error(s.offset, "illegal UTF-8 encoding")
    +			}
    +		}
    +		s.rdOffset += w
    +		s.ch = r
    +	} else {
    +		s.offset = len(s.src)
    +		if s.ch == '\n' {
    +			s.lineOffset = s.offset
    +			s.file.AddLine(s.offset)
    +		}
    +		s.ch = -1 // eof
    +	}
    +}
    +
    +// A mode value is a set of flags (or 0).
    +// They control scanner behavior.
    +//
    +type Mode uint
    +
    +const (
    +	ScanComments Mode = 1 << iota // return comments as COMMENT tokens
    +)
    +
    +// Init prepares the scanner s to tokenize the text src by setting the
    +// scanner at the beginning of src. The scanner uses the file set file
    +// for position information and it adds line information for each line.
    +// It is ok to re-use the same file when re-scanning the same file as
    +// line information which is already present is ignored. Init causes a
    +// panic if the file size does not match the src size.
    +//
    +// Calls to Scan will invoke the error handler err if they encounter a
    +// syntax error and err is not nil. Also, for each error encountered,
    +// the Scanner field ErrorCount is incremented by one. The mode parameter
    +// determines how comments are handled.
    +//
    +// Note that Init may call err if there is an error in the first character
    +// of the file.
    +//
    +func (s *Scanner) Init(file *token.File, src []byte, err ErrorHandler, mode Mode) {
    +	// Explicitly initialize all fields since a scanner may be reused.
    +	if file.Size() != len(src) {
    +		panic(fmt.Sprintf("file size (%d) does not match src len (%d)", file.Size(), len(src)))
    +	}
    +	s.file = file
    +	s.dir, _ = filepath.Split(file.Name())
    +	s.src = src
    +	s.err = err
    +	s.mode = mode
    +
    +	s.ch = ' '
    +	s.offset = 0
    +	s.rdOffset = 0
    +	s.lineOffset = 0
    +	s.ErrorCount = 0
    +	s.nextVal = false
    +
    +	s.next()
    +}
    +
    +func (s *Scanner) error(offs int, msg string) {
    +	if s.err != nil {
    +		s.err(s.file.Position(s.file.Pos(offs)), msg)
    +	}
    +	s.ErrorCount++
    +}
    +
    +func (s *Scanner) scanComment() string {
    +	// initial [;#] already consumed
    +	offs := s.offset - 1 // position of initial [;#]
    +
    +	for s.ch != '\n' && s.ch >= 0 {
    +		s.next()
    +	}
    +	return string(s.src[offs:s.offset])
    +}
    +
    +func isLetter(ch rune) bool {
    +	return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch >= 0x80 && unicode.IsLetter(ch)
    +}
    +
    +func isDigit(ch rune) bool {
    +	return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
    +}
    +
    +func (s *Scanner) scanIdentifier() string {
    +	offs := s.offset
    +	for isLetter(s.ch) || isDigit(s.ch) || s.ch == '-' {
    +		s.next()
    +	}
    +	return string(s.src[offs:s.offset])
    +}
    +
    +func (s *Scanner) scanEscape(val bool) {
    +	offs := s.offset
    +	ch := s.ch
    +	s.next() // always make progress
    +	switch ch {
    +	case '\\', '"':
    +		// ok
    +	case 'n', 't':
    +		if val {
    +			break // ok
    +		}
    +		fallthrough
    +	default:
    +		s.error(offs, "unknown escape sequence")
    +	}
    +}
    +
    +func (s *Scanner) scanString() string {
    +	// '"' opening already consumed
    +	offs := s.offset - 1
    +
    +	for s.ch != '"' {
    +		ch := s.ch
    +		s.next()
    +		if ch == '\n' || ch < 0 {
    +			s.error(offs, "string not terminated")
    +			break
    +		}
    +		if ch == '\\' {
    +			s.scanEscape(false)
    +		}
    +	}
    +
    +	s.next()
    +
    +	return string(s.src[offs:s.offset])
    +}
    +
    +func stripCR(b []byte) []byte {
    +	c := make([]byte, len(b))
    +	i := 0
    +	for _, ch := range b {
    +		if ch != '\r' {
    +			c[i] = ch
    +			i++
    +		}
    +	}
    +	return c[:i]
    +}
    +
    +func (s *Scanner) scanValString() string {
    +	offs := s.offset
    +
    +	hasCR := false
    +	end := offs
    +	inQuote := false
    +loop:
    +	for inQuote || s.ch >= 0 && s.ch != '\n' && s.ch != ';' && s.ch != '#' {
    +		ch := s.ch
    +		s.next()
    +		switch {
    +		case inQuote && ch == '\\':
    +			s.scanEscape(true)
    +		case !inQuote && ch == '\\':
    +			if s.ch == '\r' {
    +				hasCR = true
    +				s.next()
    +			}
    +			if s.ch != '\n' && s.ch != '"' {
    +				s.error(offs, "unquoted '\\' must be followed by new line or double quote")
    +				break loop
    +			}
    +			s.next()
    +		case ch == '"':
    +			inQuote = !inQuote
    +		case ch == '\r':
    +			hasCR = true
    +		case ch < 0 || inQuote && ch == '\n':
    +			s.error(offs, "string not terminated")
    +			break loop
    +		}
    +		if inQuote || !isWhiteSpace(ch) {
    +			end = s.offset
    +		}
    +	}
    +
    +	lit := s.src[offs:end]
    +	if hasCR {
    +		lit = stripCR(lit)
    +	}
    +
    +	return string(lit)
    +}
    +
    +func isWhiteSpace(ch rune) bool {
    +	return ch == ' ' || ch == '\t' || ch == '\r'
    +}
    +
    +func (s *Scanner) skipWhitespace() {
    +	for isWhiteSpace(s.ch) {
    +		s.next()
    +	}
    +}
    +
    +// Scan scans the next token and returns the token position, the token,
    +// and its literal string if applicable. The source end is indicated by
    +// token.EOF.
    +//
    +// If the returned token is a literal (token.IDENT, token.STRING) or
    +// token.COMMENT, the literal string has the corresponding value.
    +//
    +// If the returned token is token.ILLEGAL, the literal string is the
    +// offending character.
    +//
    +// In all other cases, Scan returns an empty literal string.
    +//
    +// For more tolerant parsing, Scan will return a valid token if
    +// possible even if a syntax error was encountered. Thus, even
    +// if the resulting token sequence contains no illegal tokens,
    +// a client may not assume that no error occurred. Instead it
    +// must check the scanner's ErrorCount or the number of calls
    +// of the error handler, if there was one installed.
    +//
    +// Scan adds line information to the file added to the file
    +// set with Init. Token positions are relative to that file
    +// and thus relative to the file set.
    +//
    +func (s *Scanner) Scan() (pos token.Pos, tok token.Token, lit string) {
    +scanAgain:
    +	s.skipWhitespace()
    +
    +	// current token start
    +	pos = s.file.Pos(s.offset)
    +
    +	// determine token value
    +	switch ch := s.ch; {
    +	case s.nextVal:
    +		lit = s.scanValString()
    +		tok = token.STRING
    +		s.nextVal = false
    +	case isLetter(ch):
    +		lit = s.scanIdentifier()
    +		tok = token.IDENT
    +	default:
    +		s.next() // always make progress
    +		switch ch {
    +		case -1:
    +			tok = token.EOF
    +		case '\n':
    +			tok = token.EOL
    +		case '"':
    +			tok = token.STRING
    +			lit = s.scanString()
    +		case '[':
    +			tok = token.LBRACK
    +		case ']':
    +			tok = token.RBRACK
    +		case ';', '#':
    +			// comment
    +			lit = s.scanComment()
    +			if s.mode&ScanComments == 0 {
    +				// skip comment
    +				goto scanAgain
    +			}
    +			tok = token.COMMENT
    +		case '=':
    +			tok = token.ASSIGN
    +			s.nextVal = true
    +		default:
    +			s.error(s.file.Offset(pos), fmt.Sprintf("illegal character %#U", ch))
    +			tok = token.ILLEGAL
    +			lit = string(ch)
    +		}
    +	}
    +
    +	return
    +}
    diff --git a/vendor/gopkg.in/gcfg.v1/set.go b/vendor/gopkg.in/gcfg.v1/set.go
    new file mode 100644
    index 000000000..73aee5003
    --- /dev/null
    +++ b/vendor/gopkg.in/gcfg.v1/set.go
    @@ -0,0 +1,329 @@
    +package gcfg
    +
    +import (
    +	"bytes"
    +	"encoding"
    +	"encoding/gob"
    +	"fmt"
    +	"math/big"
    +	"reflect"
    +	"strings"
    +	"unicode"
    +	"unicode/utf8"
    +
    +	"gopkg.in/gcfg.v1/types"
    +	"gopkg.in/warnings.v0"
    +)
    +
    +type tag struct {
    +	ident   string
    +	intMode string
    +}
    +
    +func newTag(ts string) tag {
    +	t := tag{}
    +	s := strings.Split(ts, ",")
    +	t.ident = s[0]
    +	for _, tse := range s[1:] {
    +		if strings.HasPrefix(tse, "int=") {
    +			t.intMode = tse[len("int="):]
    +		}
    +	}
    +	return t
    +}
    +
    +func fieldFold(v reflect.Value, name string) (reflect.Value, tag) {
    +	var n string
    +	r0, _ := utf8.DecodeRuneInString(name)
    +	if unicode.IsLetter(r0) && !unicode.IsLower(r0) && !unicode.IsUpper(r0) {
    +		n = "X"
    +	}
    +	n += strings.Replace(name, "-", "_", -1)
    +	f, ok := v.Type().FieldByNameFunc(func(fieldName string) bool {
    +		if !v.FieldByName(fieldName).CanSet() {
    +			return false
    +		}
    +		f, _ := v.Type().FieldByName(fieldName)
    +		t := newTag(f.Tag.Get("gcfg"))
    +		if t.ident != "" {
    +			return strings.EqualFold(t.ident, name)
    +		}
    +		return strings.EqualFold(n, fieldName)
    +	})
    +	if !ok {
    +		return reflect.Value{}, tag{}
    +	}
    +	return v.FieldByName(f.Name), newTag(f.Tag.Get("gcfg"))
    +}
    +
    +type setter func(destp interface{}, blank bool, val string, t tag) error
    +
    +var errUnsupportedType = fmt.Errorf("unsupported type")
    +var errBlankUnsupported = fmt.Errorf("blank value not supported for type")
    +
    +var setters = []setter{
    +	typeSetter, textUnmarshalerSetter, kindSetter, scanSetter,
    +}
    +
    +func textUnmarshalerSetter(d interface{}, blank bool, val string, t tag) error {
    +	dtu, ok := d.(encoding.TextUnmarshaler)
    +	if !ok {
    +		return errUnsupportedType
    +	}
    +	if blank {
    +		return errBlankUnsupported
    +	}
    +	return dtu.UnmarshalText([]byte(val))
    +}
    +
    +func boolSetter(d interface{}, blank bool, val string, t tag) error {
    +	if blank {
    +		reflect.ValueOf(d).Elem().Set(reflect.ValueOf(true))
    +		return nil
    +	}
    +	b, err := types.ParseBool(val)
    +	if err == nil {
    +		reflect.ValueOf(d).Elem().Set(reflect.ValueOf(b))
    +	}
    +	return err
    +}
    +
    +func intMode(mode string) types.IntMode {
    +	var m types.IntMode
    +	if strings.ContainsAny(mode, "dD") {
    +		m |= types.Dec
    +	}
    +	if strings.ContainsAny(mode, "hH") {
    +		m |= types.Hex
    +	}
    +	if strings.ContainsAny(mode, "oO") {
    +		m |= types.Oct
    +	}
    +	return m
    +}
    +
    +var typeModes = map[reflect.Type]types.IntMode{
    +	reflect.TypeOf(int(0)):    types.Dec | types.Hex,
    +	reflect.TypeOf(int8(0)):   types.Dec | types.Hex,
    +	reflect.TypeOf(int16(0)):  types.Dec | types.Hex,
    +	reflect.TypeOf(int32(0)):  types.Dec | types.Hex,
    +	reflect.TypeOf(int64(0)):  types.Dec | types.Hex,
    +	reflect.TypeOf(uint(0)):   types.Dec | types.Hex,
    +	reflect.TypeOf(uint8(0)):  types.Dec | types.Hex,
    +	reflect.TypeOf(uint16(0)): types.Dec | types.Hex,
    +	reflect.TypeOf(uint32(0)): types.Dec | types.Hex,
    +	reflect.TypeOf(uint64(0)): types.Dec | types.Hex,
    +	// use default mode (allow dec/hex/oct) for uintptr type
    +	reflect.TypeOf(big.Int{}): types.Dec | types.Hex,
    +}
    +
    +func intModeDefault(t reflect.Type) types.IntMode {
    +	m, ok := typeModes[t]
    +	if !ok {
    +		m = types.Dec | types.Hex | types.Oct
    +	}
    +	return m
    +}
    +
    +func intSetter(d interface{}, blank bool, val string, t tag) error {
    +	if blank {
    +		return errBlankUnsupported
    +	}
    +	mode := intMode(t.intMode)
    +	if mode == 0 {
    +		mode = intModeDefault(reflect.TypeOf(d).Elem())
    +	}
    +	return types.ParseInt(d, val, mode)
    +}
    +
    +func stringSetter(d interface{}, blank bool, val string, t tag) error {
    +	if blank {
    +		return errBlankUnsupported
    +	}
    +	dsp, ok := d.(*string)
    +	if !ok {
    +		return errUnsupportedType
    +	}
    +	*dsp = val
    +	return nil
    +}
    +
    +var kindSetters = map[reflect.Kind]setter{
    +	reflect.String:  stringSetter,
    +	reflect.Bool:    boolSetter,
    +	reflect.Int:     intSetter,
    +	reflect.Int8:    intSetter,
    +	reflect.Int16:   intSetter,
    +	reflect.Int32:   intSetter,
    +	reflect.Int64:   intSetter,
    +	reflect.Uint:    intSetter,
    +	reflect.Uint8:   intSetter,
    +	reflect.Uint16:  intSetter,
    +	reflect.Uint32:  intSetter,
    +	reflect.Uint64:  intSetter,
    +	reflect.Uintptr: intSetter,
    +}
    +
    +var typeSetters = map[reflect.Type]setter{
    +	reflect.TypeOf(big.Int{}): intSetter,
    +}
    +
    +func typeSetter(d interface{}, blank bool, val string, tt tag) error {
    +	t := reflect.ValueOf(d).Type().Elem()
    +	setter, ok := typeSetters[t]
    +	if !ok {
    +		return errUnsupportedType
    +	}
    +	return setter(d, blank, val, tt)
    +}
    +
    +func kindSetter(d interface{}, blank bool, val string, tt tag) error {
    +	k := reflect.ValueOf(d).Type().Elem().Kind()
    +	setter, ok := kindSetters[k]
    +	if !ok {
    +		return errUnsupportedType
    +	}
    +	return setter(d, blank, val, tt)
    +}
    +
    +func scanSetter(d interface{}, blank bool, val string, tt tag) error {
    +	if blank {
    +		return errBlankUnsupported
    +	}
    +	return types.ScanFully(d, val, 'v')
    +}
    +
    +func newValue(c *warnings.Collector, sect string, vCfg reflect.Value,
    +	vType reflect.Type) (reflect.Value, error) {
    +	//
    +	pv := reflect.New(vType)
    +	dfltName := "default-" + sect
    +	dfltField, _ := fieldFold(vCfg, dfltName)
    +	var err error
    +	if dfltField.IsValid() {
    +		b := bytes.NewBuffer(nil)
    +		ge := gob.NewEncoder(b)
    +		if err = c.Collect(ge.EncodeValue(dfltField)); err != nil {
    +			return pv, err
    +		}
    +		gd := gob.NewDecoder(bytes.NewReader(b.Bytes()))
    +		if err = c.Collect(gd.DecodeValue(pv.Elem())); err != nil {
    +			return pv, err
    +		}
    +	}
    +	return pv, nil
    +}
    +
    +func set(c *warnings.Collector, cfg interface{}, sect, sub, name string,
    +	blank bool, value string, subsectPass bool) error {
    +	//
    +	vPCfg := reflect.ValueOf(cfg)
    +	if vPCfg.Kind() != reflect.Ptr || vPCfg.Elem().Kind() != reflect.Struct {
    +		panic(fmt.Errorf("config must be a pointer to a struct"))
    +	}
    +	vCfg := vPCfg.Elem()
    +	vSect, _ := fieldFold(vCfg, sect)
    +	l := loc{section: sect}
    +	if !vSect.IsValid() {
    +		err := extraData{loc: l}
    +		return c.Collect(err)
    +	}
    +	isSubsect := vSect.Kind() == reflect.Map
    +	if subsectPass != isSubsect {
    +		return nil
    +	}
    +	if isSubsect {
    +		l.subsection = &sub
    +		vst := vSect.Type()
    +		if vst.Key().Kind() != reflect.String ||
    +			vst.Elem().Kind() != reflect.Ptr ||
    +			vst.Elem().Elem().Kind() != reflect.Struct {
    +			panic(fmt.Errorf("map field for section must have string keys and "+
    +				" pointer-to-struct values: section %q", sect))
    +		}
    +		if vSect.IsNil() {
    +			vSect.Set(reflect.MakeMap(vst))
    +		}
    +		k := reflect.ValueOf(sub)
    +		pv := vSect.MapIndex(k)
    +		if !pv.IsValid() {
    +			vType := vSect.Type().Elem().Elem()
    +			var err error
    +			if pv, err = newValue(c, sect, vCfg, vType); err != nil {
    +				return err
    +			}
    +			vSect.SetMapIndex(k, pv)
    +		}
    +		vSect = pv.Elem()
    +	} else if vSect.Kind() != reflect.Struct {
    +		panic(fmt.Errorf("field for section must be a map or a struct: "+
    +			"section %q", sect))
    +	} else if sub != "" {
    +		return c.Collect(extraData{loc: l})
    +	}
    +	// Empty name is a special value, meaning that only the
    +	// section/subsection object is to be created, with no values set.
    +	if name == "" {
    +		return nil
    +	}
    +	vVar, t := fieldFold(vSect, name)
    +	l.variable = &name
    +	if !vVar.IsValid() {
    +		return c.Collect(extraData{loc: l})
    +	}
    +	// vVal is either single-valued var, or newly allocated value within multi-valued var
    +	var vVal reflect.Value
    +	// multi-value if unnamed slice type
    +	isMulti := vVar.Type().Name() == "" && vVar.Kind() == reflect.Slice ||
    +		vVar.Type().Name() == "" && vVar.Kind() == reflect.Ptr && vVar.Type().Elem().Name() == "" && vVar.Type().Elem().Kind() == reflect.Slice
    +	if isMulti && vVar.Kind() == reflect.Ptr {
    +		if vVar.IsNil() {
    +			vVar.Set(reflect.New(vVar.Type().Elem()))
    +		}
    +		vVar = vVar.Elem()
    +	}
    +	if isMulti && blank {
    +		vVar.Set(reflect.Zero(vVar.Type()))
    +		return nil
    +	}
    +	if isMulti {
    +		vVal = reflect.New(vVar.Type().Elem()).Elem()
    +	} else {
    +		vVal = vVar
    +	}
    +	isDeref := vVal.Type().Name() == "" && vVal.Type().Kind() == reflect.Ptr
    +	isNew := isDeref && vVal.IsNil()
    +	// vAddr is address of value to set (dereferenced & allocated as needed)
    +	var vAddr reflect.Value
    +	switch {
    +	case isNew:
    +		vAddr = reflect.New(vVal.Type().Elem())
    +	case isDeref && !isNew:
    +		vAddr = vVal
    +	default:
    +		vAddr = vVal.Addr()
    +	}
    +	vAddrI := vAddr.Interface()
    +	err, ok := error(nil), false
    +	for _, s := range setters {
    +		err = s(vAddrI, blank, value, t)
    +		if err == nil {
    +			ok = true
    +			break
    +		}
    +		if err != errUnsupportedType {
    +			return locErr{msg: err.Error(), loc: l}
    +		}
    +	}
    +	if !ok {
    +		// in case all setters returned errUnsupportedType
    +		return locErr{msg: err.Error(), loc: l}
    +	}
    +	if isNew { // set reference if it was dereferenced and newly allocated
    +		vVal.Set(vAddr)
    +	}
    +	if isMulti { // append if multi-valued
    +		vVar.Set(reflect.Append(vVar, vVal))
    +	}
    +	return nil
    +}
    diff --git a/vendor/gopkg.in/gcfg.v1/token/position.go b/vendor/gopkg.in/gcfg.v1/token/position.go
    new file mode 100644
    index 000000000..fc45c1e76
    --- /dev/null
    +++ b/vendor/gopkg.in/gcfg.v1/token/position.go
    @@ -0,0 +1,435 @@
    +// Copyright 2010 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +// TODO(gri) consider making this a separate package outside the go directory.
    +
    +package token
    +
    +import (
    +	"fmt"
    +	"sort"
    +	"sync"
    +)
    +
    +// -----------------------------------------------------------------------------
    +// Positions
    +
    +// Position describes an arbitrary source position
    +// including the file, line, and column location.
    +// A Position is valid if the line number is > 0.
    +//
    +type Position struct {
    +	Filename string // filename, if any
    +	Offset   int    // offset, starting at 0
    +	Line     int    // line number, starting at 1
    +	Column   int    // column number, starting at 1 (character count)
    +}
    +
    +// IsValid returns true if the position is valid.
    +func (pos *Position) IsValid() bool { return pos.Line > 0 }
    +
    +// String returns a string in one of several forms:
    +//
    +//	file:line:column    valid position with file name
    +//	line:column         valid position without file name
    +//	file                invalid position with file name
    +//	-                   invalid position without file name
    +//
    +func (pos Position) String() string {
    +	s := pos.Filename
    +	if pos.IsValid() {
    +		if s != "" {
    +			s += ":"
    +		}
    +		s += fmt.Sprintf("%d:%d", pos.Line, pos.Column)
    +	}
    +	if s == "" {
    +		s = "-"
    +	}
    +	return s
    +}
    +
    +// Pos is a compact encoding of a source position within a file set.
    +// It can be converted into a Position for a more convenient, but much
    +// larger, representation.
    +//
    +// The Pos value for a given file is a number in the range [base, base+size],
    +// where base and size are specified when adding the file to the file set via
    +// AddFile.
    +//
    +// To create the Pos value for a specific source offset, first add
    +// the respective file to the current file set (via FileSet.AddFile)
    +// and then call File.Pos(offset) for that file. Given a Pos value p
    +// for a specific file set fset, the corresponding Position value is
    +// obtained by calling fset.Position(p).
    +//
    +// Pos values can be compared directly with the usual comparison operators:
    +// If two Pos values p and q are in the same file, comparing p and q is
    +// equivalent to comparing the respective source file offsets. If p and q
    +// are in different files, p < q is true if the file implied by p was added
    +// to the respective file set before the file implied by q.
    +//
    +type Pos int
    +
    +// The zero value for Pos is NoPos; there is no file and line information
    +// associated with it, and NoPos().IsValid() is false. NoPos is always
    +// smaller than any other Pos value. The corresponding Position value
    +// for NoPos is the zero value for Position.
    +//
    +const NoPos Pos = 0
    +
    +// IsValid returns true if the position is valid.
    +func (p Pos) IsValid() bool {
    +	return p != NoPos
    +}
    +
    +// -----------------------------------------------------------------------------
    +// File
    +
    +// A File is a handle for a file belonging to a FileSet.
    +// A File has a name, size, and line offset table.
    +//
    +type File struct {
    +	set  *FileSet
    +	name string // file name as provided to AddFile
    +	base int    // Pos value range for this file is [base...base+size]
    +	size int    // file size as provided to AddFile
    +
    +	// lines and infos are protected by set.mutex
    +	lines []int
    +	infos []lineInfo
    +}
    +
    +// Name returns the file name of file f as registered with AddFile.
    +func (f *File) Name() string {
    +	return f.name
    +}
    +
    +// Base returns the base offset of file f as registered with AddFile.
    +func (f *File) Base() int {
    +	return f.base
    +}
    +
    +// Size returns the size of file f as registered with AddFile.
    +func (f *File) Size() int {
    +	return f.size
    +}
    +
    +// LineCount returns the number of lines in file f.
    +func (f *File) LineCount() int {
    +	f.set.mutex.RLock()
    +	n := len(f.lines)
    +	f.set.mutex.RUnlock()
    +	return n
    +}
    +
    +// AddLine adds the line offset for a new line.
    +// The line offset must be larger than the offset for the previous line
    +// and smaller than the file size; otherwise the line offset is ignored.
    +//
    +func (f *File) AddLine(offset int) {
    +	f.set.mutex.Lock()
    +	if i := len(f.lines); (i == 0 || f.lines[i-1] < offset) && offset < f.size {
    +		f.lines = append(f.lines, offset)
    +	}
    +	f.set.mutex.Unlock()
    +}
    +
    +// SetLines sets the line offsets for a file and returns true if successful.
    +// The line offsets are the offsets of the first character of each line;
    +// for instance for the content "ab\nc\n" the line offsets are {0, 3}.
    +// An empty file has an empty line offset table.
    +// Each line offset must be larger than the offset for the previous line
    +// and smaller than the file size; otherwise SetLines fails and returns
    +// false.
    +//
    +func (f *File) SetLines(lines []int) bool {
    +	// verify validity of lines table
    +	size := f.size
    +	for i, offset := range lines {
    +		if i > 0 && offset <= lines[i-1] || size <= offset {
    +			return false
    +		}
    +	}
    +
    +	// set lines table
    +	f.set.mutex.Lock()
    +	f.lines = lines
    +	f.set.mutex.Unlock()
    +	return true
    +}
    +
    +// SetLinesForContent sets the line offsets for the given file content.
    +func (f *File) SetLinesForContent(content []byte) {
    +	var lines []int
    +	line := 0
    +	for offset, b := range content {
    +		if line >= 0 {
    +			lines = append(lines, line)
    +		}
    +		line = -1
    +		if b == '\n' {
    +			line = offset + 1
    +		}
    +	}
    +
    +	// set lines table
    +	f.set.mutex.Lock()
    +	f.lines = lines
    +	f.set.mutex.Unlock()
    +}
    +
    +// A lineInfo object describes alternative file and line number
    +// information (such as provided via a //line comment in a .go
    +// file) for a given file offset.
    +type lineInfo struct {
    +	// fields are exported to make them accessible to gob
    +	Offset   int
    +	Filename string
    +	Line     int
    +}
    +
    +// AddLineInfo adds alternative file and line number information for
    +// a given file offset. The offset must be larger than the offset for
    +// the previously added alternative line info and smaller than the
    +// file size; otherwise the information is ignored.
    +//
    +// AddLineInfo is typically used to register alternative position
    +// information for //line filename:line comments in source files.
    +//
    +func (f *File) AddLineInfo(offset int, filename string, line int) {
    +	f.set.mutex.Lock()
    +	if i := len(f.infos); i == 0 || f.infos[i-1].Offset < offset && offset < f.size {
    +		f.infos = append(f.infos, lineInfo{offset, filename, line})
    +	}
    +	f.set.mutex.Unlock()
    +}
    +
    +// Pos returns the Pos value for the given file offset;
    +// the offset must be <= f.Size().
    +// f.Pos(f.Offset(p)) == p.
    +//
    +func (f *File) Pos(offset int) Pos {
    +	if offset > f.size {
    +		panic("illegal file offset")
    +	}
    +	return Pos(f.base + offset)
    +}
    +
    +// Offset returns the offset for the given file position p;
    +// p must be a valid Pos value in that file.
    +// f.Offset(f.Pos(offset)) == offset.
    +//
    +func (f *File) Offset(p Pos) int {
    +	if int(p) < f.base || int(p) > f.base+f.size {
    +		panic("illegal Pos value")
    +	}
    +	return int(p) - f.base
    +}
    +
    +// Line returns the line number for the given file position p;
    +// p must be a Pos value in that file or NoPos.
    +//
    +func (f *File) Line(p Pos) int {
    +	// TODO(gri) this can be implemented much more efficiently
    +	return f.Position(p).Line
    +}
    +
    +func searchLineInfos(a []lineInfo, x int) int {
    +	return sort.Search(len(a), func(i int) bool { return a[i].Offset > x }) - 1
    +}
    +
    +// info returns the file name, line, and column number for a file offset.
    +func (f *File) info(offset int) (filename string, line, column int) {
    +	filename = f.name
    +	if i := searchInts(f.lines, offset); i >= 0 {
    +		line, column = i+1, offset-f.lines[i]+1
    +	}
    +	if len(f.infos) > 0 {
    +		// almost no files have extra line infos
    +		if i := searchLineInfos(f.infos, offset); i >= 0 {
    +			alt := &f.infos[i]
    +			filename = alt.Filename
    +			if i := searchInts(f.lines, alt.Offset); i >= 0 {
    +				line += alt.Line - i - 1
    +			}
    +		}
    +	}
    +	return
    +}
    +
    +func (f *File) position(p Pos) (pos Position) {
    +	offset := int(p) - f.base
    +	pos.Offset = offset
    +	pos.Filename, pos.Line, pos.Column = f.info(offset)
    +	return
    +}
    +
    +// Position returns the Position value for the given file position p;
    +// p must be a Pos value in that file or NoPos.
    +//
    +func (f *File) Position(p Pos) (pos Position) {
    +	if p != NoPos {
    +		if int(p) < f.base || int(p) > f.base+f.size {
    +			panic("illegal Pos value")
    +		}
    +		pos = f.position(p)
    +	}
    +	return
    +}
    +
    +// -----------------------------------------------------------------------------
    +// FileSet
    +
    +// A FileSet represents a set of source files.
    +// Methods of file sets are synchronized; multiple goroutines
    +// may invoke them concurrently.
    +//
    +type FileSet struct {
    +	mutex sync.RWMutex // protects the file set
    +	base  int          // base offset for the next file
    +	files []*File      // list of files in the order added to the set
    +	last  *File        // cache of last file looked up
    +}
    +
    +// NewFileSet creates a new file set.
    +func NewFileSet() *FileSet {
    +	s := new(FileSet)
    +	s.base = 1 // 0 == NoPos
    +	return s
    +}
    +
    +// Base returns the minimum base offset that must be provided to
    +// AddFile when adding the next file.
    +//
    +func (s *FileSet) Base() int {
    +	s.mutex.RLock()
    +	b := s.base
    +	s.mutex.RUnlock()
    +	return b
    +
    +}
    +
    +// AddFile adds a new file with a given filename, base offset, and file size
    +// to the file set s and returns the file. Multiple files may have the same
    +// name. The base offset must not be smaller than the FileSet's Base(), and
    +// size must not be negative.
    +//
    +// Adding the file will set the file set's Base() value to base + size + 1
    +// as the minimum base value for the next file. The following relationship
    +// exists between a Pos value p for a given file offset offs:
    +//
    +//	int(p) = base + offs
    +//
    +// with offs in the range [0, size] and thus p in the range [base, base+size].
    +// For convenience, File.Pos may be used to create file-specific position
    +// values from a file offset.
    +//
    +func (s *FileSet) AddFile(filename string, base, size int) *File {
    +	s.mutex.Lock()
    +	defer s.mutex.Unlock()
    +	if base < s.base || size < 0 {
    +		panic("illegal base or size")
    +	}
    +	// base >= s.base && size >= 0
    +	f := &File{s, filename, base, size, []int{0}, nil}
    +	base += size + 1 // +1 because EOF also has a position
    +	if base < 0 {
    +		panic("token.Pos offset overflow (> 2G of source code in file set)")
    +	}
    +	// add the file to the file set
    +	s.base = base
    +	s.files = append(s.files, f)
    +	s.last = f
    +	return f
    +}
    +
    +// Iterate calls f for the files in the file set in the order they were added
    +// until f returns false.
    +//
    +func (s *FileSet) Iterate(f func(*File) bool) {
    +	for i := 0; ; i++ {
    +		var file *File
    +		s.mutex.RLock()
    +		if i < len(s.files) {
    +			file = s.files[i]
    +		}
    +		s.mutex.RUnlock()
    +		if file == nil || !f(file) {
    +			break
    +		}
    +	}
    +}
    +
    +func searchFiles(a []*File, x int) int {
    +	return sort.Search(len(a), func(i int) bool { return a[i].base > x }) - 1
    +}
    +
    +func (s *FileSet) file(p Pos) *File {
    +	// common case: p is in last file
    +	if f := s.last; f != nil && f.base <= int(p) && int(p) <= f.base+f.size {
    +		return f
    +	}
    +	// p is not in last file - search all files
    +	if i := searchFiles(s.files, int(p)); i >= 0 {
    +		f := s.files[i]
    +		// f.base <= int(p) by definition of searchFiles
    +		if int(p) <= f.base+f.size {
    +			s.last = f
    +			return f
    +		}
    +	}
    +	return nil
    +}
    +
    +// File returns the file that contains the position p.
    +// If no such file is found (for instance for p == NoPos),
    +// the result is nil.
    +//
    +func (s *FileSet) File(p Pos) (f *File) {
    +	if p != NoPos {
    +		s.mutex.RLock()
    +		f = s.file(p)
    +		s.mutex.RUnlock()
    +	}
    +	return
    +}
    +
    +// Position converts a Pos in the fileset into a general Position.
    +func (s *FileSet) Position(p Pos) (pos Position) {
    +	if p != NoPos {
    +		s.mutex.RLock()
    +		if f := s.file(p); f != nil {
    +			pos = f.position(p)
    +		}
    +		s.mutex.RUnlock()
    +	}
    +	return
    +}
    +
    +// -----------------------------------------------------------------------------
    +// Helper functions
    +
    +func searchInts(a []int, x int) int {
    +	// This function body is a manually inlined version of:
    +	//
    +	//   return sort.Search(len(a), func(i int) bool { return a[i] > x }) - 1
    +	//
    +	// With better compiler optimizations, this may not be needed in the
    +	// future, but at the moment this change improves the go/printer
    +	// benchmark performance by ~30%. This has a direct impact on the
    +	// speed of gofmt and thus seems worthwhile (2011-04-29).
    +	// TODO(gri): Remove this when compilers have caught up.
    +	i, j := 0, len(a)
    +	for i < j {
    +		h := i + (j-i)/2 // avoid overflow when computing h
    +		// i ≤ h < j
    +		if a[h] <= x {
    +			i = h + 1
    +		} else {
    +			j = h
    +		}
    +	}
    +	return i - 1
    +}
    diff --git a/vendor/gopkg.in/gcfg.v1/token/serialize.go b/vendor/gopkg.in/gcfg.v1/token/serialize.go
    new file mode 100644
    index 000000000..4adc8f9e3
    --- /dev/null
    +++ b/vendor/gopkg.in/gcfg.v1/token/serialize.go
    @@ -0,0 +1,56 @@
    +// Copyright 2011 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +package token
    +
    +type serializedFile struct {
    +	// fields correspond 1:1 to fields with same (lower-case) name in File
    +	Name  string
    +	Base  int
    +	Size  int
    +	Lines []int
    +	Infos []lineInfo
    +}
    +
    +type serializedFileSet struct {
    +	Base  int
    +	Files []serializedFile
    +}
    +
    +// Read calls decode to deserialize a file set into s; s must not be nil.
    +func (s *FileSet) Read(decode func(interface{}) error) error {
    +	var ss serializedFileSet
    +	if err := decode(&ss); err != nil {
    +		return err
    +	}
    +
    +	s.mutex.Lock()
    +	s.base = ss.Base
    +	files := make([]*File, len(ss.Files))
    +	for i := 0; i < len(ss.Files); i++ {
    +		f := &ss.Files[i]
    +		files[i] = &File{s, f.Name, f.Base, f.Size, f.Lines, f.Infos}
    +	}
    +	s.files = files
    +	s.last = nil
    +	s.mutex.Unlock()
    +
    +	return nil
    +}
    +
    +// Write calls encode to serialize the file set s.
    +func (s *FileSet) Write(encode func(interface{}) error) error {
    +	var ss serializedFileSet
    +
    +	s.mutex.Lock()
    +	ss.Base = s.base
    +	files := make([]serializedFile, len(s.files))
    +	for i, f := range s.files {
    +		files[i] = serializedFile{f.name, f.base, f.size, f.lines, f.infos}
    +	}
    +	ss.Files = files
    +	s.mutex.Unlock()
    +
    +	return encode(ss)
    +}
    diff --git a/vendor/gopkg.in/gcfg.v1/token/token.go b/vendor/gopkg.in/gcfg.v1/token/token.go
    new file mode 100644
    index 000000000..b3c7c83fa
    --- /dev/null
    +++ b/vendor/gopkg.in/gcfg.v1/token/token.go
    @@ -0,0 +1,83 @@
    +// Copyright 2009 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +// Package token defines constants representing the lexical tokens of the gcfg
    +// configuration syntax and basic operations on tokens (printing, predicates).
    +//
    +// Note that the API for the token package may change to accommodate new
    +// features or implementation changes in gcfg.
    +//
    +package token
    +
    +import "strconv"
    +
    +// Token is the set of lexical tokens of the gcfg configuration syntax.
    +type Token int
    +
    +// The list of tokens.
    +const (
    +	// Special tokens
    +	ILLEGAL Token = iota
    +	EOF
    +	COMMENT
    +
    +	literal_beg
    +	// Identifiers and basic type literals
    +	// (these tokens stand for classes of literals)
    +	IDENT  // section-name, variable-name
    +	STRING // "subsection-name", variable value
    +	literal_end
    +
    +	operator_beg
    +	// Operators and delimiters
    +	ASSIGN // =
    +	LBRACK // [
    +	RBRACK // ]
    +	EOL    // \n
    +	operator_end
    +)
    +
    +var tokens = [...]string{
    +	ILLEGAL: "ILLEGAL",
    +
    +	EOF:     "EOF",
    +	COMMENT: "COMMENT",
    +
    +	IDENT:  "IDENT",
    +	STRING: "STRING",
    +
    +	ASSIGN: "=",
    +	LBRACK: "[",
    +	RBRACK: "]",
    +	EOL:    "\n",
    +}
    +
    +// String returns the string corresponding to the token tok.
    +// For operators and delimiters, the string is the actual token character
    +// sequence (e.g., for the token ASSIGN, the string is "="). For all other
    +// tokens the string corresponds to the token constant name (e.g. for the
    +// token IDENT, the string is "IDENT").
    +//
    +func (tok Token) String() string {
    +	s := ""
    +	if 0 <= tok && tok < Token(len(tokens)) {
    +		s = tokens[tok]
    +	}
    +	if s == "" {
    +		s = "token(" + strconv.Itoa(int(tok)) + ")"
    +	}
    +	return s
    +}
    +
    +// Predicates
    +
    +// IsLiteral returns true for tokens corresponding to identifiers
    +// and basic type literals; it returns false otherwise.
    +//
    +func (tok Token) IsLiteral() bool { return literal_beg < tok && tok < literal_end }
    +
    +// IsOperator returns true for tokens corresponding to operators and
    +// delimiters; it returns false otherwise.
    +//
    +func (tok Token) IsOperator() bool { return operator_beg < tok && tok < operator_end }
    diff --git a/vendor/gopkg.in/gcfg.v1/types/bool.go b/vendor/gopkg.in/gcfg.v1/types/bool.go
    new file mode 100644
    index 000000000..8dcae0d8c
    --- /dev/null
    +++ b/vendor/gopkg.in/gcfg.v1/types/bool.go
    @@ -0,0 +1,23 @@
    +package types
    +
    +// BoolValues defines the name and value mappings for ParseBool.
    +var BoolValues = map[string]interface{}{
    +	"true": true, "yes": true, "on": true, "1": true,
    +	"false": false, "no": false, "off": false, "0": false,
    +}
    +
    +var boolParser = func() *EnumParser {
    +	ep := &EnumParser{}
    +	ep.AddVals(BoolValues)
    +	return ep
    +}()
    +
    +// ParseBool parses bool values according to the definitions in BoolValues.
    +// Parsing is case-insensitive.
    +func ParseBool(s string) (bool, error) {
    +	v, err := boolParser.Parse(s)
    +	if err != nil {
    +		return false, err
    +	}
    +	return v.(bool), nil
    +}
    diff --git a/vendor/gopkg.in/gcfg.v1/types/doc.go b/vendor/gopkg.in/gcfg.v1/types/doc.go
    new file mode 100644
    index 000000000..9f9c345f6
    --- /dev/null
    +++ b/vendor/gopkg.in/gcfg.v1/types/doc.go
    @@ -0,0 +1,4 @@
    +// Package types defines helpers for type conversions.
    +//
    +// The API for this package is not finalized yet.
    +package types
    diff --git a/vendor/gopkg.in/gcfg.v1/types/enum.go b/vendor/gopkg.in/gcfg.v1/types/enum.go
    new file mode 100644
    index 000000000..1a0c7ef45
    --- /dev/null
    +++ b/vendor/gopkg.in/gcfg.v1/types/enum.go
    @@ -0,0 +1,44 @@
    +package types
    +
    +import (
    +	"fmt"
    +	"reflect"
    +	"strings"
    +)
    +
    +// EnumParser parses "enum" values; i.e. a predefined set of strings to
    +// predefined values.
    +type EnumParser struct {
    +	Type      string // type name; if not set, use type of first value added
    +	CaseMatch bool   // if true, matching of strings is case-sensitive
    +	// PrefixMatch bool
    +	vals map[string]interface{}
    +}
    +
    +// AddVals adds strings and values to an EnumParser.
    +func (ep *EnumParser) AddVals(vals map[string]interface{}) {
    +	if ep.vals == nil {
    +		ep.vals = make(map[string]interface{})
    +	}
    +	for k, v := range vals {
    +		if ep.Type == "" {
    +			ep.Type = reflect.TypeOf(v).Name()
    +		}
    +		if !ep.CaseMatch {
    +			k = strings.ToLower(k)
    +		}
    +		ep.vals[k] = v
    +	}
    +}
    +
    +// Parse parses the string and returns the value or an error.
    +func (ep EnumParser) Parse(s string) (interface{}, error) {
    +	if !ep.CaseMatch {
    +		s = strings.ToLower(s)
    +	}
    +	v, ok := ep.vals[s]
    +	if !ok {
    +		return false, fmt.Errorf("failed to parse %s %#q", ep.Type, s)
    +	}
    +	return v, nil
    +}
    diff --git a/vendor/gopkg.in/gcfg.v1/types/int.go b/vendor/gopkg.in/gcfg.v1/types/int.go
    new file mode 100644
    index 000000000..af7e75c12
    --- /dev/null
    +++ b/vendor/gopkg.in/gcfg.v1/types/int.go
    @@ -0,0 +1,86 @@
    +package types
    +
    +import (
    +	"fmt"
    +	"strings"
    +)
    +
    +// An IntMode is a mode for parsing integer values, representing a set of
    +// accepted bases.
    +type IntMode uint8
    +
    +// IntMode values for ParseInt; can be combined using binary or.
    +const (
    +	Dec IntMode = 1 << iota
    +	Hex
    +	Oct
    +)
    +
    +// String returns a string representation of IntMode; e.g. `IntMode(Dec|Hex)`.
    +func (m IntMode) String() string {
    +	var modes []string
    +	if m&Dec != 0 {
    +		modes = append(modes, "Dec")
    +	}
    +	if m&Hex != 0 {
    +		modes = append(modes, "Hex")
    +	}
    +	if m&Oct != 0 {
    +		modes = append(modes, "Oct")
    +	}
    +	return "IntMode(" + strings.Join(modes, "|") + ")"
    +}
    +
    +var errIntAmbig = fmt.Errorf("ambiguous integer value; must include '0' prefix")
    +
    +func prefix0(val string) bool {
    +	return strings.HasPrefix(val, "0") || strings.HasPrefix(val, "-0")
    +}
    +
    +func prefix0x(val string) bool {
    +	return strings.HasPrefix(val, "0x") || strings.HasPrefix(val, "-0x")
    +}
    +
    +// ParseInt parses val using mode into intptr, which must be a pointer to an
    +// integer kind type. Non-decimal value require prefix `0` or `0x` in the cases
    +// when mode permits ambiguity of base; otherwise the prefix can be omitted.
    +func ParseInt(intptr interface{}, val string, mode IntMode) error {
    +	val = strings.TrimSpace(val)
    +	verb := byte(0)
    +	switch mode {
    +	case Dec:
    +		verb = 'd'
    +	case Dec + Hex:
    +		if prefix0x(val) {
    +			verb = 'v'
    +		} else {
    +			verb = 'd'
    +		}
    +	case Dec + Oct:
    +		if prefix0(val) && !prefix0x(val) {
    +			verb = 'v'
    +		} else {
    +			verb = 'd'
    +		}
    +	case Dec + Hex + Oct:
    +		verb = 'v'
    +	case Hex:
    +		if prefix0x(val) {
    +			verb = 'v'
    +		} else {
    +			verb = 'x'
    +		}
    +	case Oct:
    +		verb = 'o'
    +	case Hex + Oct:
    +		if prefix0(val) {
    +			verb = 'v'
    +		} else {
    +			return errIntAmbig
    +		}
    +	}
    +	if verb == 0 {
    +		panic("unsupported mode")
    +	}
    +	return ScanFully(intptr, val, verb)
    +}
    diff --git a/vendor/gopkg.in/gcfg.v1/types/scan.go b/vendor/gopkg.in/gcfg.v1/types/scan.go
    new file mode 100644
    index 000000000..db2f6ed3c
    --- /dev/null
    +++ b/vendor/gopkg.in/gcfg.v1/types/scan.go
    @@ -0,0 +1,23 @@
    +package types
    +
    +import (
    +	"fmt"
    +	"io"
    +	"reflect"
    +)
    +
    +// ScanFully uses fmt.Sscanf with verb to fully scan val into ptr.
    +func ScanFully(ptr interface{}, val string, verb byte) error {
    +	t := reflect.ValueOf(ptr).Elem().Type()
    +	// attempt to read extra bytes to make sure the value is consumed
    +	var b []byte
    +	n, err := fmt.Sscanf(val, "%"+string(verb)+"%s", ptr, &b)
    +	switch {
    +	case n < 1 || n == 1 && err != io.EOF:
    +		return fmt.Errorf("failed to parse %q as %v: %v", val, t, err)
    +	case n > 1:
    +		return fmt.Errorf("failed to parse %q as %v: extra characters %q", val, t, string(b))
    +	}
    +	// n == 1 && err == io.EOF
    +	return nil
    +}
    diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/.gitignore b/vendor/gopkg.in/natefinch/lumberjack.v2/.gitignore
    new file mode 100644
    index 000000000..836562412
    --- /dev/null
    +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/.gitignore
    @@ -0,0 +1,23 @@
    +# Compiled Object files, Static and Dynamic libs (Shared Objects)
    +*.o
    +*.a
    +*.so
    +
    +# Folders
    +_obj
    +_test
    +
    +# Architecture specific extensions/prefixes
    +*.[568vq]
    +[568vq].out
    +
    +*.cgo1.go
    +*.cgo2.c
    +_cgo_defun.c
    +_cgo_gotypes.go
    +_cgo_export.*
    +
    +_testmain.go
    +
    +*.exe
    +*.test
    diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/.travis.yml b/vendor/gopkg.in/natefinch/lumberjack.v2/.travis.yml
    new file mode 100644
    index 000000000..21166f5c7
    --- /dev/null
    +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/.travis.yml
    @@ -0,0 +1,11 @@
    +language: go
    +
    +go:
    +  - tip
    +  - 1.15.x
    +  - 1.14.x
    +  - 1.13.x
    +  - 1.12.x
    +  
    +env:
    +  - GO111MODULE=on
    diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/LICENSE b/vendor/gopkg.in/natefinch/lumberjack.v2/LICENSE
    new file mode 100644
    index 000000000..c3d4cc307
    --- /dev/null
    +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/LICENSE
    @@ -0,0 +1,21 @@
    +The MIT License (MIT)
    +
    +Copyright (c) 2014 Nate Finch 
    +
    +Permission is hereby granted, free of charge, to any person obtaining a copy
    +of this software and associated documentation files (the "Software"), to deal
    +in the Software without restriction, including without limitation the rights
    +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    +copies of the Software, and to permit persons to whom the Software is
    +furnished to do so, subject to the following conditions:
    +
    +The above copyright notice and this permission notice shall be included in all
    +copies or substantial portions of the Software.
    +
    +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
    +SOFTWARE.
    \ No newline at end of file
    diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/README.md b/vendor/gopkg.in/natefinch/lumberjack.v2/README.md
    new file mode 100644
    index 000000000..060eae52a
    --- /dev/null
    +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/README.md
    @@ -0,0 +1,179 @@
    +# lumberjack  [![GoDoc](https://godoc.org/gopkg.in/natefinch/lumberjack.v2?status.png)](https://godoc.org/gopkg.in/natefinch/lumberjack.v2) [![Build Status](https://travis-ci.org/natefinch/lumberjack.svg?branch=v2.0)](https://travis-ci.org/natefinch/lumberjack) [![Build status](https://ci.appveyor.com/api/projects/status/00gchpxtg4gkrt5d)](https://ci.appveyor.com/project/natefinch/lumberjack) [![Coverage Status](https://coveralls.io/repos/natefinch/lumberjack/badge.svg?branch=v2.0)](https://coveralls.io/r/natefinch/lumberjack?branch=v2.0)
    +
    +### Lumberjack is a Go package for writing logs to rolling files.
    +
    +Package lumberjack provides a rolling logger.
    +
    +Note that this is v2.0 of lumberjack, and should be imported using gopkg.in
    +thusly:
    +
    +    import "gopkg.in/natefinch/lumberjack.v2"
    +
    +The package name remains simply lumberjack, and the code resides at
    +https://github.com/natefinch/lumberjack under the v2.0 branch.
    +
    +Lumberjack is intended to be one part of a logging infrastructure.
    +It is not an all-in-one solution, but instead is a pluggable
    +component at the bottom of the logging stack that simply controls the files
    +to which logs are written.
    +
    +Lumberjack plays well with any logging package that can write to an
    +io.Writer, including the standard library's log package.
    +
    +Lumberjack assumes that only one process is writing to the output files.
    +Using the same lumberjack configuration from multiple processes on the same
    +machine will result in improper behavior.
    +
    +
    +**Example**
    +
    +To use lumberjack with the standard library's log package, just pass it into the SetOutput function when your application starts.
    +
    +Code:
    +
    +```go
    +log.SetOutput(&lumberjack.Logger{
    +    Filename:   "/var/log/myapp/foo.log",
    +    MaxSize:    500, // megabytes
    +    MaxBackups: 3,
    +    MaxAge:     28, //days
    +    Compress:   true, // disabled by default
    +})
    +```
    +
    +
    +
    +## type Logger
    +``` go
    +type Logger struct {
    +    // Filename is the file to write logs to.  Backup log files will be retained
    +    // in the same directory.  It uses -lumberjack.log in
    +    // os.TempDir() if empty.
    +    Filename string `json:"filename" yaml:"filename"`
    +
    +    // MaxSize is the maximum size in megabytes of the log file before it gets
    +    // rotated. It defaults to 100 megabytes.
    +    MaxSize int `json:"maxsize" yaml:"maxsize"`
    +
    +    // MaxAge is the maximum number of days to retain old log files based on the
    +    // timestamp encoded in their filename.  Note that a day is defined as 24
    +    // hours and may not exactly correspond to calendar days due to daylight
    +    // savings, leap seconds, etc. The default is not to remove old log files
    +    // based on age.
    +    MaxAge int `json:"maxage" yaml:"maxage"`
    +
    +    // MaxBackups is the maximum number of old log files to retain.  The default
    +    // is to retain all old log files (though MaxAge may still cause them to get
    +    // deleted.)
    +    MaxBackups int `json:"maxbackups" yaml:"maxbackups"`
    +
    +    // LocalTime determines if the time used for formatting the timestamps in
    +    // backup files is the computer's local time.  The default is to use UTC
    +    // time.
    +    LocalTime bool `json:"localtime" yaml:"localtime"`
    +
    +    // Compress determines if the rotated log files should be compressed
    +    // using gzip. The default is not to perform compression.
    +    Compress bool `json:"compress" yaml:"compress"`
    +    // contains filtered or unexported fields
    +}
    +```
    +Logger is an io.WriteCloser that writes to the specified filename.
    +
    +Logger opens or creates the logfile on first Write.  If the file exists and
    +is less than MaxSize megabytes, lumberjack will open and append to that file.
    +If the file exists and its size is >= MaxSize megabytes, the file is renamed
    +by putting the current time in a timestamp in the name immediately before the
    +file's extension (or the end of the filename if there's no extension). A new
    +log file is then created using original filename.
    +
    +Whenever a write would cause the current log file exceed MaxSize megabytes,
    +the current file is closed, renamed, and a new log file created with the
    +original name. Thus, the filename you give Logger is always the "current" log
    +file.
    +
    +Backups use the log file name given to Logger, in the form `name-timestamp.ext`
    +where name is the filename without the extension, timestamp is the time at which
    +the log was rotated formatted with the time.Time format of
    +`2006-01-02T15-04-05.000` and the extension is the original extension.  For
    +example, if your Logger.Filename is `/var/log/foo/server.log`, a backup created
    +at 6:30pm on Nov 11 2016 would use the filename
    +`/var/log/foo/server-2016-11-04T18-30-00.000.log`
    +
    +### Cleaning Up Old Log Files
    +Whenever a new logfile gets created, old log files may be deleted.  The most
    +recent files according to the encoded timestamp will be retained, up to a
    +number equal to MaxBackups (or all of them if MaxBackups is 0).  Any files
    +with an encoded timestamp older than MaxAge days are deleted, regardless of
    +MaxBackups.  Note that the time encoded in the timestamp is the rotation
    +time, which may differ from the last time that file was written to.
    +
    +If MaxBackups and MaxAge are both 0, no old log files will be deleted.
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +### func (\*Logger) Close
    +``` go
    +func (l *Logger) Close() error
    +```
    +Close implements io.Closer, and closes the current logfile.
    +
    +
    +
    +### func (\*Logger) Rotate
    +``` go
    +func (l *Logger) Rotate() error
    +```
    +Rotate causes Logger to close the existing log file and immediately create a
    +new one.  This is a helper function for applications that want to initiate
    +rotations outside of the normal rotation rules, such as in response to
    +SIGHUP.  After rotating, this initiates a cleanup of old log files according
    +to the normal rules.
    +
    +**Example**
    +
    +Example of how to rotate in response to SIGHUP.
    +
    +Code:
    +
    +```go
    +l := &lumberjack.Logger{}
    +log.SetOutput(l)
    +c := make(chan os.Signal, 1)
    +signal.Notify(c, syscall.SIGHUP)
    +
    +go func() {
    +    for {
    +        <-c
    +        l.Rotate()
    +    }
    +}()
    +```
    +
    +### func (\*Logger) Write
    +``` go
    +func (l *Logger) Write(p []byte) (n int, err error)
    +```
    +Write implements io.Writer.  If a write would cause the log file to be larger
    +than MaxSize, the file is closed, renamed to include a timestamp of the
    +current time, and a new log file is created using the original log file name.
    +If the length of the write is greater than MaxSize, an error is returned.
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +- - -
    +Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md)
    diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/chown.go b/vendor/gopkg.in/natefinch/lumberjack.v2/chown.go
    new file mode 100644
    index 000000000..11d066972
    --- /dev/null
    +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/chown.go
    @@ -0,0 +1,11 @@
    +// +build !linux
    +
    +package lumberjack
    +
    +import (
    +	"os"
    +)
    +
    +func chown(_ string, _ os.FileInfo) error {
    +	return nil
    +}
    diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/chown_linux.go b/vendor/gopkg.in/natefinch/lumberjack.v2/chown_linux.go
    new file mode 100644
    index 000000000..465f56927
    --- /dev/null
    +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/chown_linux.go
    @@ -0,0 +1,19 @@
    +package lumberjack
    +
    +import (
    +	"os"
    +	"syscall"
    +)
    +
    +// osChown is a var so we can mock it out during tests.
    +var osChown = os.Chown
    +
    +func chown(name string, info os.FileInfo) error {
    +	f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, info.Mode())
    +	if err != nil {
    +		return err
    +	}
    +	f.Close()
    +	stat := info.Sys().(*syscall.Stat_t)
    +	return osChown(name, int(stat.Uid), int(stat.Gid))
    +}
    diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack.go b/vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack.go
    new file mode 100644
    index 000000000..3447cdc05
    --- /dev/null
    +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack.go
    @@ -0,0 +1,541 @@
    +// Package lumberjack provides a rolling logger.
    +//
    +// Note that this is v2.0 of lumberjack, and should be imported using gopkg.in
    +// thusly:
    +//
    +//   import "gopkg.in/natefinch/lumberjack.v2"
    +//
    +// The package name remains simply lumberjack, and the code resides at
    +// https://github.com/natefinch/lumberjack under the v2.0 branch.
    +//
    +// Lumberjack is intended to be one part of a logging infrastructure.
    +// It is not an all-in-one solution, but instead is a pluggable
    +// component at the bottom of the logging stack that simply controls the files
    +// to which logs are written.
    +//
    +// Lumberjack plays well with any logging package that can write to an
    +// io.Writer, including the standard library's log package.
    +//
    +// Lumberjack assumes that only one process is writing to the output files.
    +// Using the same lumberjack configuration from multiple processes on the same
    +// machine will result in improper behavior.
    +package lumberjack
    +
    +import (
    +	"compress/gzip"
    +	"errors"
    +	"fmt"
    +	"io"
    +	"io/ioutil"
    +	"os"
    +	"path/filepath"
    +	"sort"
    +	"strings"
    +	"sync"
    +	"time"
    +)
    +
    +const (
    +	backupTimeFormat = "2006-01-02T15-04-05.000"
    +	compressSuffix   = ".gz"
    +	defaultMaxSize   = 100
    +)
    +
    +// ensure we always implement io.WriteCloser
    +var _ io.WriteCloser = (*Logger)(nil)
    +
    +// Logger is an io.WriteCloser that writes to the specified filename.
    +//
    +// Logger opens or creates the logfile on first Write.  If the file exists and
    +// is less than MaxSize megabytes, lumberjack will open and append to that file.
    +// If the file exists and its size is >= MaxSize megabytes, the file is renamed
    +// by putting the current time in a timestamp in the name immediately before the
    +// file's extension (or the end of the filename if there's no extension). A new
    +// log file is then created using original filename.
    +//
    +// Whenever a write would cause the current log file exceed MaxSize megabytes,
    +// the current file is closed, renamed, and a new log file created with the
    +// original name. Thus, the filename you give Logger is always the "current" log
    +// file.
    +//
    +// Backups use the log file name given to Logger, in the form
    +// `name-timestamp.ext` where name is the filename without the extension,
    +// timestamp is the time at which the log was rotated formatted with the
    +// time.Time format of `2006-01-02T15-04-05.000` and the extension is the
    +// original extension.  For example, if your Logger.Filename is
    +// `/var/log/foo/server.log`, a backup created at 6:30pm on Nov 11 2016 would
    +// use the filename `/var/log/foo/server-2016-11-04T18-30-00.000.log`
    +//
    +// Cleaning Up Old Log Files
    +//
    +// Whenever a new logfile gets created, old log files may be deleted.  The most
    +// recent files according to the encoded timestamp will be retained, up to a
    +// number equal to MaxBackups (or all of them if MaxBackups is 0).  Any files
    +// with an encoded timestamp older than MaxAge days are deleted, regardless of
    +// MaxBackups.  Note that the time encoded in the timestamp is the rotation
    +// time, which may differ from the last time that file was written to.
    +//
    +// If MaxBackups and MaxAge are both 0, no old log files will be deleted.
    +type Logger struct {
    +	// Filename is the file to write logs to.  Backup log files will be retained
    +	// in the same directory.  It uses -lumberjack.log in
    +	// os.TempDir() if empty.
    +	Filename string `json:"filename" yaml:"filename"`
    +
    +	// MaxSize is the maximum size in megabytes of the log file before it gets
    +	// rotated. It defaults to 100 megabytes.
    +	MaxSize int `json:"maxsize" yaml:"maxsize"`
    +
    +	// MaxAge is the maximum number of days to retain old log files based on the
    +	// timestamp encoded in their filename.  Note that a day is defined as 24
    +	// hours and may not exactly correspond to calendar days due to daylight
    +	// savings, leap seconds, etc. The default is not to remove old log files
    +	// based on age.
    +	MaxAge int `json:"maxage" yaml:"maxage"`
    +
    +	// MaxBackups is the maximum number of old log files to retain.  The default
    +	// is to retain all old log files (though MaxAge may still cause them to get
    +	// deleted.)
    +	MaxBackups int `json:"maxbackups" yaml:"maxbackups"`
    +
    +	// LocalTime determines if the time used for formatting the timestamps in
    +	// backup files is the computer's local time.  The default is to use UTC
    +	// time.
    +	LocalTime bool `json:"localtime" yaml:"localtime"`
    +
    +	// Compress determines if the rotated log files should be compressed
    +	// using gzip. The default is not to perform compression.
    +	Compress bool `json:"compress" yaml:"compress"`
    +
    +	size int64
    +	file *os.File
    +	mu   sync.Mutex
    +
    +	millCh    chan bool
    +	startMill sync.Once
    +}
    +
    +var (
    +	// currentTime exists so it can be mocked out by tests.
    +	currentTime = time.Now
    +
    +	// os_Stat exists so it can be mocked out by tests.
    +	osStat = os.Stat
    +
    +	// megabyte is the conversion factor between MaxSize and bytes.  It is a
    +	// variable so tests can mock it out and not need to write megabytes of data
    +	// to disk.
    +	megabyte = 1024 * 1024
    +)
    +
    +// Write implements io.Writer.  If a write would cause the log file to be larger
    +// than MaxSize, the file is closed, renamed to include a timestamp of the
    +// current time, and a new log file is created using the original log file name.
    +// If the length of the write is greater than MaxSize, an error is returned.
    +func (l *Logger) Write(p []byte) (n int, err error) {
    +	l.mu.Lock()
    +	defer l.mu.Unlock()
    +
    +	writeLen := int64(len(p))
    +	if writeLen > l.max() {
    +		return 0, fmt.Errorf(
    +			"write length %d exceeds maximum file size %d", writeLen, l.max(),
    +		)
    +	}
    +
    +	if l.file == nil {
    +		if err = l.openExistingOrNew(len(p)); err != nil {
    +			return 0, err
    +		}
    +	}
    +
    +	if l.size+writeLen > l.max() {
    +		if err := l.rotate(); err != nil {
    +			return 0, err
    +		}
    +	}
    +
    +	n, err = l.file.Write(p)
    +	l.size += int64(n)
    +
    +	return n, err
    +}
    +
    +// Close implements io.Closer, and closes the current logfile.
    +func (l *Logger) Close() error {
    +	l.mu.Lock()
    +	defer l.mu.Unlock()
    +	return l.close()
    +}
    +
    +// close closes the file if it is open.
    +func (l *Logger) close() error {
    +	if l.file == nil {
    +		return nil
    +	}
    +	err := l.file.Close()
    +	l.file = nil
    +	return err
    +}
    +
    +// Rotate causes Logger to close the existing log file and immediately create a
    +// new one.  This is a helper function for applications that want to initiate
    +// rotations outside of the normal rotation rules, such as in response to
    +// SIGHUP.  After rotating, this initiates compression and removal of old log
    +// files according to the configuration.
    +func (l *Logger) Rotate() error {
    +	l.mu.Lock()
    +	defer l.mu.Unlock()
    +	return l.rotate()
    +}
    +
    +// rotate closes the current file, moves it aside with a timestamp in the name,
    +// (if it exists), opens a new file with the original filename, and then runs
    +// post-rotation processing and removal.
    +func (l *Logger) rotate() error {
    +	if err := l.close(); err != nil {
    +		return err
    +	}
    +	if err := l.openNew(); err != nil {
    +		return err
    +	}
    +	l.mill()
    +	return nil
    +}
    +
    +// openNew opens a new log file for writing, moving any old log file out of the
    +// way.  This methods assumes the file has already been closed.
    +func (l *Logger) openNew() error {
    +	err := os.MkdirAll(l.dir(), 0755)
    +	if err != nil {
    +		return fmt.Errorf("can't make directories for new logfile: %s", err)
    +	}
    +
    +	name := l.filename()
    +	mode := os.FileMode(0600)
    +	info, err := osStat(name)
    +	if err == nil {
    +		// Copy the mode off the old logfile.
    +		mode = info.Mode()
    +		// move the existing file
    +		newname := backupName(name, l.LocalTime)
    +		if err := os.Rename(name, newname); err != nil {
    +			return fmt.Errorf("can't rename log file: %s", err)
    +		}
    +
    +		// this is a no-op anywhere but linux
    +		if err := chown(name, info); err != nil {
    +			return err
    +		}
    +	}
    +
    +	// we use truncate here because this should only get called when we've moved
    +	// the file ourselves. if someone else creates the file in the meantime,
    +	// just wipe out the contents.
    +	f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, mode)
    +	if err != nil {
    +		return fmt.Errorf("can't open new logfile: %s", err)
    +	}
    +	l.file = f
    +	l.size = 0
    +	return nil
    +}
    +
    +// backupName creates a new filename from the given name, inserting a timestamp
    +// between the filename and the extension, using the local time if requested
    +// (otherwise UTC).
    +func backupName(name string, local bool) string {
    +	dir := filepath.Dir(name)
    +	filename := filepath.Base(name)
    +	ext := filepath.Ext(filename)
    +	prefix := filename[:len(filename)-len(ext)]
    +	t := currentTime()
    +	if !local {
    +		t = t.UTC()
    +	}
    +
    +	timestamp := t.Format(backupTimeFormat)
    +	return filepath.Join(dir, fmt.Sprintf("%s-%s%s", prefix, timestamp, ext))
    +}
    +
    +// openExistingOrNew opens the logfile if it exists and if the current write
    +// would not put it over MaxSize.  If there is no such file or the write would
    +// put it over the MaxSize, a new file is created.
    +func (l *Logger) openExistingOrNew(writeLen int) error {
    +	l.mill()
    +
    +	filename := l.filename()
    +	info, err := osStat(filename)
    +	if os.IsNotExist(err) {
    +		return l.openNew()
    +	}
    +	if err != nil {
    +		return fmt.Errorf("error getting log file info: %s", err)
    +	}
    +
    +	if info.Size()+int64(writeLen) >= l.max() {
    +		return l.rotate()
    +	}
    +
    +	file, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0644)
    +	if err != nil {
    +		// if we fail to open the old log file for some reason, just ignore
    +		// it and open a new log file.
    +		return l.openNew()
    +	}
    +	l.file = file
    +	l.size = info.Size()
    +	return nil
    +}
    +
    +// filename generates the name of the logfile from the current time.
    +func (l *Logger) filename() string {
    +	if l.Filename != "" {
    +		return l.Filename
    +	}
    +	name := filepath.Base(os.Args[0]) + "-lumberjack.log"
    +	return filepath.Join(os.TempDir(), name)
    +}
    +
    +// millRunOnce performs compression and removal of stale log files.
    +// Log files are compressed if enabled via configuration and old log
    +// files are removed, keeping at most l.MaxBackups files, as long as
    +// none of them are older than MaxAge.
    +func (l *Logger) millRunOnce() error {
    +	if l.MaxBackups == 0 && l.MaxAge == 0 && !l.Compress {
    +		return nil
    +	}
    +
    +	files, err := l.oldLogFiles()
    +	if err != nil {
    +		return err
    +	}
    +
    +	var compress, remove []logInfo
    +
    +	if l.MaxBackups > 0 && l.MaxBackups < len(files) {
    +		preserved := make(map[string]bool)
    +		var remaining []logInfo
    +		for _, f := range files {
    +			// Only count the uncompressed log file or the
    +			// compressed log file, not both.
    +			fn := f.Name()
    +			if strings.HasSuffix(fn, compressSuffix) {
    +				fn = fn[:len(fn)-len(compressSuffix)]
    +			}
    +			preserved[fn] = true
    +
    +			if len(preserved) > l.MaxBackups {
    +				remove = append(remove, f)
    +			} else {
    +				remaining = append(remaining, f)
    +			}
    +		}
    +		files = remaining
    +	}
    +	if l.MaxAge > 0 {
    +		diff := time.Duration(int64(24*time.Hour) * int64(l.MaxAge))
    +		cutoff := currentTime().Add(-1 * diff)
    +
    +		var remaining []logInfo
    +		for _, f := range files {
    +			if f.timestamp.Before(cutoff) {
    +				remove = append(remove, f)
    +			} else {
    +				remaining = append(remaining, f)
    +			}
    +		}
    +		files = remaining
    +	}
    +
    +	if l.Compress {
    +		for _, f := range files {
    +			if !strings.HasSuffix(f.Name(), compressSuffix) {
    +				compress = append(compress, f)
    +			}
    +		}
    +	}
    +
    +	for _, f := range remove {
    +		errRemove := os.Remove(filepath.Join(l.dir(), f.Name()))
    +		if err == nil && errRemove != nil {
    +			err = errRemove
    +		}
    +	}
    +	for _, f := range compress {
    +		fn := filepath.Join(l.dir(), f.Name())
    +		errCompress := compressLogFile(fn, fn+compressSuffix)
    +		if err == nil && errCompress != nil {
    +			err = errCompress
    +		}
    +	}
    +
    +	return err
    +}
    +
    +// millRun runs in a goroutine to manage post-rotation compression and removal
    +// of old log files.
    +func (l *Logger) millRun() {
    +	for range l.millCh {
    +		// what am I going to do, log this?
    +		_ = l.millRunOnce()
    +	}
    +}
    +
    +// mill performs post-rotation compression and removal of stale log files,
    +// starting the mill goroutine if necessary.
    +func (l *Logger) mill() {
    +	l.startMill.Do(func() {
    +		l.millCh = make(chan bool, 1)
    +		go l.millRun()
    +	})
    +	select {
    +	case l.millCh <- true:
    +	default:
    +	}
    +}
    +
    +// oldLogFiles returns the list of backup log files stored in the same
    +// directory as the current log file, sorted by ModTime
    +func (l *Logger) oldLogFiles() ([]logInfo, error) {
    +	files, err := ioutil.ReadDir(l.dir())
    +	if err != nil {
    +		return nil, fmt.Errorf("can't read log file directory: %s", err)
    +	}
    +	logFiles := []logInfo{}
    +
    +	prefix, ext := l.prefixAndExt()
    +
    +	for _, f := range files {
    +		if f.IsDir() {
    +			continue
    +		}
    +		if t, err := l.timeFromName(f.Name(), prefix, ext); err == nil {
    +			logFiles = append(logFiles, logInfo{t, f})
    +			continue
    +		}
    +		if t, err := l.timeFromName(f.Name(), prefix, ext+compressSuffix); err == nil {
    +			logFiles = append(logFiles, logInfo{t, f})
    +			continue
    +		}
    +		// error parsing means that the suffix at the end was not generated
    +		// by lumberjack, and therefore it's not a backup file.
    +	}
    +
    +	sort.Sort(byFormatTime(logFiles))
    +
    +	return logFiles, nil
    +}
    +
    +// timeFromName extracts the formatted time from the filename by stripping off
    +// the filename's prefix and extension. This prevents someone's filename from
    +// confusing time.parse.
    +func (l *Logger) timeFromName(filename, prefix, ext string) (time.Time, error) {
    +	if !strings.HasPrefix(filename, prefix) {
    +		return time.Time{}, errors.New("mismatched prefix")
    +	}
    +	if !strings.HasSuffix(filename, ext) {
    +		return time.Time{}, errors.New("mismatched extension")
    +	}
    +	ts := filename[len(prefix) : len(filename)-len(ext)]
    +	return time.Parse(backupTimeFormat, ts)
    +}
    +
    +// max returns the maximum size in bytes of log files before rolling.
    +func (l *Logger) max() int64 {
    +	if l.MaxSize == 0 {
    +		return int64(defaultMaxSize * megabyte)
    +	}
    +	return int64(l.MaxSize) * int64(megabyte)
    +}
    +
    +// dir returns the directory for the current filename.
    +func (l *Logger) dir() string {
    +	return filepath.Dir(l.filename())
    +}
    +
    +// prefixAndExt returns the filename part and extension part from the Logger's
    +// filename.
    +func (l *Logger) prefixAndExt() (prefix, ext string) {
    +	filename := filepath.Base(l.filename())
    +	ext = filepath.Ext(filename)
    +	prefix = filename[:len(filename)-len(ext)] + "-"
    +	return prefix, ext
    +}
    +
    +// compressLogFile compresses the given log file, removing the
    +// uncompressed log file if successful.
    +func compressLogFile(src, dst string) (err error) {
    +	f, err := os.Open(src)
    +	if err != nil {
    +		return fmt.Errorf("failed to open log file: %v", err)
    +	}
    +	defer f.Close()
    +
    +	fi, err := osStat(src)
    +	if err != nil {
    +		return fmt.Errorf("failed to stat log file: %v", err)
    +	}
    +
    +	if err := chown(dst, fi); err != nil {
    +		return fmt.Errorf("failed to chown compressed log file: %v", err)
    +	}
    +
    +	// If this file already exists, we presume it was created by
    +	// a previous attempt to compress the log file.
    +	gzf, err := os.OpenFile(dst, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, fi.Mode())
    +	if err != nil {
    +		return fmt.Errorf("failed to open compressed log file: %v", err)
    +	}
    +	defer gzf.Close()
    +
    +	gz := gzip.NewWriter(gzf)
    +
    +	defer func() {
    +		if err != nil {
    +			os.Remove(dst)
    +			err = fmt.Errorf("failed to compress log file: %v", err)
    +		}
    +	}()
    +
    +	if _, err := io.Copy(gz, f); err != nil {
    +		return err
    +	}
    +	if err := gz.Close(); err != nil {
    +		return err
    +	}
    +	if err := gzf.Close(); err != nil {
    +		return err
    +	}
    +
    +	if err := f.Close(); err != nil {
    +		return err
    +	}
    +	if err := os.Remove(src); err != nil {
    +		return err
    +	}
    +
    +	return nil
    +}
    +
    +// logInfo is a convenience struct to return the filename and its embedded
    +// timestamp.
    +type logInfo struct {
    +	timestamp time.Time
    +	os.FileInfo
    +}
    +
    +// byFormatTime sorts by newest time formatted in the name.
    +type byFormatTime []logInfo
    +
    +func (b byFormatTime) Less(i, j int) bool {
    +	return b[i].timestamp.After(b[j].timestamp)
    +}
    +
    +func (b byFormatTime) Swap(i, j int) {
    +	b[i], b[j] = b[j], b[i]
    +}
    +
    +func (b byFormatTime) Len() int {
    +	return len(b)
    +}
    diff --git a/vendor/gopkg.in/warnings.v0/LICENSE b/vendor/gopkg.in/warnings.v0/LICENSE
    new file mode 100644
    index 000000000..d65f7e9d8
    --- /dev/null
    +++ b/vendor/gopkg.in/warnings.v0/LICENSE
    @@ -0,0 +1,24 @@
    +Copyright (c) 2016 Péter Surányi.
    +
    +Redistribution and use in source and binary forms, with or without
    +modification, are permitted provided that the following conditions are
    +met:
    +
    +   * Redistributions of source code must retain the above copyright
    +notice, this list of conditions and the following disclaimer.
    +   * Redistributions in binary form must reproduce the above
    +copyright notice, this list of conditions and the following disclaimer
    +in the documentation and/or other materials provided with the
    +distribution.
    +
    +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
    +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
    +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
    +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
    +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
    +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
    +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
    +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
    +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
    +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
    +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    diff --git a/vendor/gopkg.in/warnings.v0/README b/vendor/gopkg.in/warnings.v0/README
    new file mode 100644
    index 000000000..974212ba1
    --- /dev/null
    +++ b/vendor/gopkg.in/warnings.v0/README
    @@ -0,0 +1,77 @@
    +Package warnings implements error handling with non-fatal errors (warnings).
    +
    +import path:   "gopkg.in/warnings.v0"
    +package docs:  https://godoc.org/gopkg.in/warnings.v0 
    +issues:        https://github.com/go-warnings/warnings/issues
    +pull requests: https://github.com/go-warnings/warnings/pulls
    +
    +A recurring pattern in Go programming is the following:
    +
    + func myfunc(params) error {
    +     if err := doSomething(...); err != nil {
    +         return err
    +     }
    +     if err := doSomethingElse(...); err != nil {
    +         return err
    +     }
    +     if ok := doAnotherThing(...); !ok {
    +         return errors.New("my error")
    +     }
    +     ...
    +     return nil
    + }
    +
    +This pattern allows interrupting the flow on any received error. But what if
    +there are errors that should be noted but still not fatal, for which the flow
    +should not be interrupted? Implementing such logic at each if statement would
    +make the code complex and the flow much harder to follow.
    +
    +Package warnings provides the Collector type and a clean and simple pattern
    +for achieving such logic. The Collector takes care of deciding when to break
    +the flow and when to continue, collecting any non-fatal errors (warnings)
    +along the way. The only requirement is that fatal and non-fatal errors can be
    +distinguished programmatically; that is a function such as
    +
    + IsFatal(error) bool
    +
    +must be implemented. The following is an example of what the above snippet
    +could look like using the warnings package:
    +
    + import "gopkg.in/warnings.v0"
    +
    + func isFatal(err error) bool {
    +     _, ok := err.(WarningType)
    +     return !ok
    + }
    +
    + func myfunc(params) error {
    +     c := warnings.NewCollector(isFatal)
    +     c.FatalWithWarnings = true
    +     if err := c.Collect(doSomething()); err != nil {
    +         return err
    +     }
    +     if err := c.Collect(doSomethingElse(...)); err != nil {
    +         return err
    +     }
    +     if ok := doAnotherThing(...); !ok {
    +         if err := c.Collect(errors.New("my error")); err != nil {
    +             return err
    +         }
    +     }
    +     ...
    +     return c.Done()
    + }
    +
    +For an example of a non-trivial code base using this library, see
    +gopkg.in/gcfg.v1
    +
    +Rules for using warnings
    +
    + - ensure that warnings are programmatically distinguishable from fatal
    +   errors (i.e. implement an isFatal function and any necessary error types)
    + - ensure that there is a single Collector instance for a call of each
    +   exported function
    + - ensure that all errors (fatal or warning) are fed through Collect
    + - ensure that every time an error is returned, it is one returned by a
    +   Collector (from Collect or Done)
    + - ensure that Collect is never called after Done
    diff --git a/vendor/gopkg.in/warnings.v0/warnings.go b/vendor/gopkg.in/warnings.v0/warnings.go
    new file mode 100644
    index 000000000..b849d1e3d
    --- /dev/null
    +++ b/vendor/gopkg.in/warnings.v0/warnings.go
    @@ -0,0 +1,194 @@
    +// Package warnings implements error handling with non-fatal errors (warnings).
    +//
    +// A recurring pattern in Go programming is the following:
    +//
    +//  func myfunc(params) error {
    +//      if err := doSomething(...); err != nil {
    +//          return err
    +//      }
    +//      if err := doSomethingElse(...); err != nil {
    +//          return err
    +//      }
    +//      if ok := doAnotherThing(...); !ok {
    +//          return errors.New("my error")
    +//      }
    +//      ...
    +//      return nil
    +//  }
    +//
    +// This pattern allows interrupting the flow on any received error. But what if
    +// there are errors that should be noted but still not fatal, for which the flow
    +// should not be interrupted? Implementing such logic at each if statement would
    +// make the code complex and the flow much harder to follow.
    +//
    +// Package warnings provides the Collector type and a clean and simple pattern
    +// for achieving such logic. The Collector takes care of deciding when to break
    +// the flow and when to continue, collecting any non-fatal errors (warnings)
    +// along the way. The only requirement is that fatal and non-fatal errors can be
    +// distinguished programmatically; that is a function such as
    +//
    +//  IsFatal(error) bool
    +//
    +// must be implemented. The following is an example of what the above snippet
    +// could look like using the warnings package:
    +//
    +//  import "gopkg.in/warnings.v0"
    +//
    +//  func isFatal(err error) bool {
    +//      _, ok := err.(WarningType)
    +//      return !ok
    +//  }
    +//
    +//  func myfunc(params) error {
    +//      c := warnings.NewCollector(isFatal)
    +//      c.FatalWithWarnings = true
    +//      if err := c.Collect(doSomething()); err != nil {
    +//          return err
    +//      }
    +//      if err := c.Collect(doSomethingElse(...)); err != nil {
    +//          return err
    +//      }
    +//      if ok := doAnotherThing(...); !ok {
    +//          if err := c.Collect(errors.New("my error")); err != nil {
    +//              return err
    +//          }
    +//      }
    +//      ...
    +//      return c.Done()
    +//  }
    +//
    +// For an example of a non-trivial code base using this library, see
    +// gopkg.in/gcfg.v1
    +//
    +// Rules for using warnings
    +//
    +//  - ensure that warnings are programmatically distinguishable from fatal
    +//    errors (i.e. implement an isFatal function and any necessary error types)
    +//  - ensure that there is a single Collector instance for a call of each
    +//    exported function
    +//  - ensure that all errors (fatal or warning) are fed through Collect
    +//  - ensure that every time an error is returned, it is one returned by a
    +//    Collector (from Collect or Done)
    +//  - ensure that Collect is never called after Done
    +//
    +// TODO
    +//
    +//  - optionally limit the number of warnings (e.g. stop after 20 warnings) (?)
    +//  - consider interaction with contexts
    +//  - go vet-style invocations verifier
    +//  - semi-automatic code converter
    +//
    +package warnings // import "gopkg.in/warnings.v0"
    +
    +import (
    +	"bytes"
    +	"fmt"
    +)
    +
    +// List holds a collection of warnings and optionally one fatal error.
    +type List struct {
    +	Warnings []error
    +	Fatal    error
    +}
    +
    +// Error implements the error interface.
    +func (l List) Error() string {
    +	b := bytes.NewBuffer(nil)
    +	if l.Fatal != nil {
    +		fmt.Fprintln(b, "fatal:")
    +		fmt.Fprintln(b, l.Fatal)
    +	}
    +	switch len(l.Warnings) {
    +	case 0:
    +	// nop
    +	case 1:
    +		fmt.Fprintln(b, "warning:")
    +	default:
    +		fmt.Fprintln(b, "warnings:")
    +	}
    +	for _, err := range l.Warnings {
    +		fmt.Fprintln(b, err)
    +	}
    +	return b.String()
    +}
    +
    +// A Collector collects errors up to the first fatal error.
    +type Collector struct {
    +	// IsFatal distinguishes between warnings and fatal errors.
    +	IsFatal func(error) bool
    +	// FatalWithWarnings set to true means that a fatal error is returned as
    +	// a List together with all warnings so far. The default behavior is to
    +	// only return the fatal error and discard any warnings that have been
    +	// collected.
    +	FatalWithWarnings bool
    +
    +	l    List
    +	done bool
    +}
    +
    +// NewCollector returns a new Collector; it uses isFatal to distinguish between
    +// warnings and fatal errors.
    +func NewCollector(isFatal func(error) bool) *Collector {
    +	return &Collector{IsFatal: isFatal}
    +}
    +
    +// Collect collects a single error (warning or fatal). It returns nil if
    +// collection can continue (only warnings so far), or otherwise the errors
    +// collected. Collect mustn't be called after the first fatal error or after
    +// Done has been called.
    +func (c *Collector) Collect(err error) error {
    +	if c.done {
    +		panic("warnings.Collector already done")
    +	}
    +	if err == nil {
    +		return nil
    +	}
    +	if c.IsFatal(err) {
    +		c.done = true
    +		c.l.Fatal = err
    +	} else {
    +		c.l.Warnings = append(c.l.Warnings, err)
    +	}
    +	if c.l.Fatal != nil {
    +		return c.erorr()
    +	}
    +	return nil
    +}
    +
    +// Done ends collection and returns the collected error(s).
    +func (c *Collector) Done() error {
    +	c.done = true
    +	return c.erorr()
    +}
    +
    +func (c *Collector) erorr() error {
    +	if !c.FatalWithWarnings && c.l.Fatal != nil {
    +		return c.l.Fatal
    +	}
    +	if c.l.Fatal == nil && len(c.l.Warnings) == 0 {
    +		return nil
    +	}
    +	// Note that a single warning is also returned as a List. This is to make it
    +	// easier to determine fatal-ness of the returned error.
    +	return c.l
    +}
    +
    +// FatalOnly returns the fatal error, if any, **in an error returned by a
    +// Collector**. It returns nil if and only if err is nil or err is a List
    +// with err.Fatal == nil.
    +func FatalOnly(err error) error {
    +	l, ok := err.(List)
    +	if !ok {
    +		return err
    +	}
    +	return l.Fatal
    +}
    +
    +// WarningsOnly returns the warnings **in an error returned by a Collector**.
    +func WarningsOnly(err error) []error {
    +	l, ok := err.(List)
    +	if !ok {
    +		return nil
    +	}
    +	return l.Warnings
    +}
    diff --git a/vendor/k8s.io/api/admission/v1/doc.go b/vendor/k8s.io/api/admission/v1/doc.go
    index e7df9f629..cab652821 100644
    --- a/vendor/k8s.io/api/admission/v1/doc.go
    +++ b/vendor/k8s.io/api/admission/v1/doc.go
    @@ -20,4 +20,4 @@ limitations under the License.
     // +k8s:prerelease-lifecycle-gen=true
     // +groupName=admission.k8s.io
     
    -package v1 // import "k8s.io/api/admission/v1"
    +package v1
    diff --git a/vendor/k8s.io/api/admission/v1beta1/doc.go b/vendor/k8s.io/api/admission/v1beta1/doc.go
    index a5669022a..447495684 100644
    --- a/vendor/k8s.io/api/admission/v1beta1/doc.go
    +++ b/vendor/k8s.io/api/admission/v1beta1/doc.go
    @@ -21,4 +21,4 @@ limitations under the License.
     
     // +groupName=admission.k8s.io
     
    -package v1beta1 // import "k8s.io/api/admission/v1beta1"
    +package v1beta1
    diff --git a/vendor/k8s.io/api/admissionregistration/v1/doc.go b/vendor/k8s.io/api/admissionregistration/v1/doc.go
    index ca0086188..ec0ebb9c4 100644
    --- a/vendor/k8s.io/api/admissionregistration/v1/doc.go
    +++ b/vendor/k8s.io/api/admissionregistration/v1/doc.go
    @@ -24,4 +24,4 @@ limitations under the License.
     // AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration
     // MutatingWebhookConfiguration and ValidatingWebhookConfiguration are for the
     // new dynamic admission controller configuration.
    -package v1 // import "k8s.io/api/admissionregistration/v1"
    +package v1
    diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go
    index 98066211d..344af9ae0 100644
    --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go
    +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go
    @@ -21,4 +21,4 @@ limitations under the License.
     // +groupName=admissionregistration.k8s.io
     
     // Package v1alpha1 is the v1alpha1 version of the API.
    -package v1alpha1 // import "k8s.io/api/admissionregistration/v1alpha1"
    +package v1alpha1
    diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto
    index 88344ce87..d23f21cc8 100644
    --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto
    +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto
    @@ -272,9 +272,9 @@ message MatchResources {
       // +optional
       optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 1;
     
    -  // ObjectSelector decides whether to run the validation based on if the
    +  // ObjectSelector decides whether to run the policy based on if the
       // object has matching labels. objectSelector is evaluated against both
    -  // the oldObject and newObject that would be sent to the cel validation, and
    +  // the oldObject and newObject that would be sent to the policy's expression (CEL), and
       // is considered to match if either object matches the selector. A null
       // object (oldObject in the case of create, or newObject in the case of
       // delete) or an object that cannot have labels (like a
    @@ -286,13 +286,13 @@ message MatchResources {
       // +optional
       optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 2;
     
    -  // ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches.
    +  // ResourceRules describes what operations on what resources/subresources the admission policy matches.
       // The policy cares about an operation if it matches _any_ Rule.
       // +listType=atomic
       // +optional
       repeated NamedRuleWithOperations resourceRules = 3;
     
    -  // ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about.
    +  // ExcludeResourceRules describes what operations on what resources/subresources the policy should not care about.
       // The exclude rules take precedence over include rules (if a resource matches both, it is excluded)
       // +listType=atomic
       // +optional
    @@ -304,12 +304,13 @@ message MatchResources {
       // - Exact: match a request only if it exactly matches a specified rule.
       // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1,
       // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`,
    -  // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.
    +  // the admission policy does not consider requests to apps/v1beta1 or extensions/v1beta1 API groups.
       //
       // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version.
       // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1,
       // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`,
    -  // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.
    +  // the admission policy **does** consider requests made to apps/v1beta1 or extensions/v1beta1
    +  // API groups. The API server translates the request to a matched resource API if necessary.
       //
       // Defaults to "Equivalent"
       // +optional
    diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go
    index ee50fbe2d..f183498a5 100644
    --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go
    +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go
    @@ -56,9 +56,9 @@ const (
     type FailurePolicyType string
     
     const (
    -	// Ignore means that an error calling the webhook is ignored.
    +	// Ignore means that an error calling the admission webhook or admission policy is ignored.
     	Ignore FailurePolicyType = "Ignore"
    -	// Fail means that an error calling the webhook causes the admission to fail.
    +	// Fail means that an error calling the admission webhook or admission policy causes resource admission to fail.
     	Fail FailurePolicyType = "Fail"
     )
     
    @@ -67,9 +67,11 @@ const (
     type MatchPolicyType string
     
     const (
    -	// Exact means requests should only be sent to the webhook if they exactly match a given rule.
    +	// Exact means requests should only be sent to the admission webhook or admission policy if they exactly match a given rule.
     	Exact MatchPolicyType = "Exact"
    -	// Equivalent means requests should be sent to the webhook if they modify a resource listed in rules via another API group or version.
    +	// Equivalent means requests should be sent to the admission webhook or admission policy if they modify a resource listed
    +	// in rules via an equivalent API group or version. For example, `autoscaling/v1` and `autoscaling/v2`
    +	// HorizontalPodAutoscalers are equivalent: the same set of resources appear via both APIs.
     	Equivalent MatchPolicyType = "Equivalent"
     )
     
    @@ -577,9 +579,9 @@ type MatchResources struct {
     	// Default to the empty LabelSelector, which matches everything.
     	// +optional
     	NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,1,opt,name=namespaceSelector"`
    -	// ObjectSelector decides whether to run the validation based on if the
    +	// ObjectSelector decides whether to run the policy based on if the
     	// object has matching labels. objectSelector is evaluated against both
    -	// the oldObject and newObject that would be sent to the cel validation, and
    +	// the oldObject and newObject that would be sent to the policy's expression (CEL), and
     	// is considered to match if either object matches the selector. A null
     	// object (oldObject in the case of create, or newObject in the case of
     	// delete) or an object that cannot have labels (like a
    @@ -590,12 +592,12 @@ type MatchResources struct {
     	// Default to the empty LabelSelector, which matches everything.
     	// +optional
     	ObjectSelector *metav1.LabelSelector `json:"objectSelector,omitempty" protobuf:"bytes,2,opt,name=objectSelector"`
    -	// ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches.
    +	// ResourceRules describes what operations on what resources/subresources the admission policy matches.
     	// The policy cares about an operation if it matches _any_ Rule.
     	// +listType=atomic
     	// +optional
     	ResourceRules []NamedRuleWithOperations `json:"resourceRules,omitempty" protobuf:"bytes,3,rep,name=resourceRules"`
    -	// ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about.
    +	// ExcludeResourceRules describes what operations on what resources/subresources the policy should not care about.
     	// The exclude rules take precedence over include rules (if a resource matches both, it is excluded)
     	// +listType=atomic
     	// +optional
    @@ -606,12 +608,13 @@ type MatchResources struct {
     	// - Exact: match a request only if it exactly matches a specified rule.
     	// For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1,
     	// but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`,
    -	// a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.
    +	// the admission policy does not consider requests to apps/v1beta1 or extensions/v1beta1 API groups.
     	//
     	// - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version.
     	// For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1,
     	// and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`,
    -	// a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.
    +	// the admission policy **does** consider requests made to apps/v1beta1 or extensions/v1beta1
    +	// API groups. The API server translates the request to a matched resource API if necessary.
     	//
     	// Defaults to "Equivalent"
     	// +optional
    diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go
    index 32222a81b..116e56e06 100644
    --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go
    +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go
    @@ -68,10 +68,10 @@ func (JSONPatch) SwaggerDoc() map[string]string {
     var map_MatchResources = map[string]string{
     	"":                     "MatchResources decides whether to run the admission control policy on an object based on whether it meets the match criteria. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)",
     	"namespaceSelector":    "NamespaceSelector decides whether to run the admission control policy on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the policy.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\";  you will set the selector as follows: \"namespaceSelector\": {\n  \"matchExpressions\": [\n    {\n      \"key\": \"runlevel\",\n      \"operator\": \"NotIn\",\n      \"values\": [\n        \"0\",\n        \"1\"\n      ]\n    }\n  ]\n}\n\nIf instead you want to only run the policy on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n  \"matchExpressions\": [\n    {\n      \"key\": \"environment\",\n      \"operator\": \"In\",\n      \"values\": [\n        \"prod\",\n        \"staging\"\n      ]\n    }\n  ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.",
    -	"objectSelector":       "ObjectSelector decides whether to run the validation based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the cel validation, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.",
    -	"resourceRules":        "ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule.",
    -	"excludeResourceRules": "ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)",
    -	"matchPolicy":          "matchPolicy defines how the \"MatchResources\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.\n\nDefaults to \"Equivalent\"",
    +	"objectSelector":       "ObjectSelector decides whether to run the policy based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the policy's expression (CEL), and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.",
    +	"resourceRules":        "ResourceRules describes what operations on what resources/subresources the admission policy matches. The policy cares about an operation if it matches _any_ Rule.",
    +	"excludeResourceRules": "ExcludeResourceRules describes what operations on what resources/subresources the policy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)",
    +	"matchPolicy":          "matchPolicy defines how the \"MatchResources\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, the admission policy does not consider requests to apps/v1beta1 or extensions/v1beta1 API groups.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, the admission policy **does** consider requests made to apps/v1beta1 or extensions/v1beta1 API groups. The API server translates the request to a matched resource API if necessary.\n\nDefaults to \"Equivalent\"",
     }
     
     func (MatchResources) SwaggerDoc() map[string]string {
    diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go b/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go
    index 0095cb257..40d831573 100644
    --- a/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go
    +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go
    @@ -24,4 +24,4 @@ limitations under the License.
     // AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration
     // MutatingWebhookConfiguration and ValidatingWebhookConfiguration are for the
     // new dynamic admission controller configuration.
    -package v1beta1 // import "k8s.io/api/admissionregistration/v1beta1"
    +package v1beta1
    diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go
    index 261ae41bd..bf1ae5948 100644
    --- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go
    +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go
    @@ -25,6 +25,7 @@ import (
     	io "io"
     
     	proto "github.com/gogo/protobuf/proto"
    +	k8s_io_api_admissionregistration_v1 "k8s.io/api/admissionregistration/v1"
     	v11 "k8s.io/api/admissionregistration/v1"
     	k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    @@ -46,10 +47,38 @@ var _ = math.Inf
     // proto package needs to be updated.
     const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
     
    +func (m *ApplyConfiguration) Reset()      { *m = ApplyConfiguration{} }
    +func (*ApplyConfiguration) ProtoMessage() {}
    +func (*ApplyConfiguration) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_7f7c65a4f012fb19, []int{0}
    +}
    +func (m *ApplyConfiguration) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ApplyConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ApplyConfiguration) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ApplyConfiguration.Merge(m, src)
    +}
    +func (m *ApplyConfiguration) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ApplyConfiguration) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ApplyConfiguration.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ApplyConfiguration proto.InternalMessageInfo
    +
     func (m *AuditAnnotation) Reset()      { *m = AuditAnnotation{} }
     func (*AuditAnnotation) ProtoMessage() {}
     func (*AuditAnnotation) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_7f7c65a4f012fb19, []int{0}
    +	return fileDescriptor_7f7c65a4f012fb19, []int{1}
     }
     func (m *AuditAnnotation) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -77,7 +106,7 @@ var xxx_messageInfo_AuditAnnotation proto.InternalMessageInfo
     func (m *ExpressionWarning) Reset()      { *m = ExpressionWarning{} }
     func (*ExpressionWarning) ProtoMessage() {}
     func (*ExpressionWarning) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_7f7c65a4f012fb19, []int{1}
    +	return fileDescriptor_7f7c65a4f012fb19, []int{2}
     }
     func (m *ExpressionWarning) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -102,10 +131,38 @@ func (m *ExpressionWarning) XXX_DiscardUnknown() {
     
     var xxx_messageInfo_ExpressionWarning proto.InternalMessageInfo
     
    +func (m *JSONPatch) Reset()      { *m = JSONPatch{} }
    +func (*JSONPatch) ProtoMessage() {}
    +func (*JSONPatch) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_7f7c65a4f012fb19, []int{3}
    +}
    +func (m *JSONPatch) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *JSONPatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *JSONPatch) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_JSONPatch.Merge(m, src)
    +}
    +func (m *JSONPatch) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *JSONPatch) XXX_DiscardUnknown() {
    +	xxx_messageInfo_JSONPatch.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_JSONPatch proto.InternalMessageInfo
    +
     func (m *MatchCondition) Reset()      { *m = MatchCondition{} }
     func (*MatchCondition) ProtoMessage() {}
     func (*MatchCondition) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_7f7c65a4f012fb19, []int{2}
    +	return fileDescriptor_7f7c65a4f012fb19, []int{4}
     }
     func (m *MatchCondition) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -133,7 +190,7 @@ var xxx_messageInfo_MatchCondition proto.InternalMessageInfo
     func (m *MatchResources) Reset()      { *m = MatchResources{} }
     func (*MatchResources) ProtoMessage() {}
     func (*MatchResources) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_7f7c65a4f012fb19, []int{3}
    +	return fileDescriptor_7f7c65a4f012fb19, []int{5}
     }
     func (m *MatchResources) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -158,10 +215,178 @@ func (m *MatchResources) XXX_DiscardUnknown() {
     
     var xxx_messageInfo_MatchResources proto.InternalMessageInfo
     
    +func (m *MutatingAdmissionPolicy) Reset()      { *m = MutatingAdmissionPolicy{} }
    +func (*MutatingAdmissionPolicy) ProtoMessage() {}
    +func (*MutatingAdmissionPolicy) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_7f7c65a4f012fb19, []int{6}
    +}
    +func (m *MutatingAdmissionPolicy) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *MutatingAdmissionPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *MutatingAdmissionPolicy) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_MutatingAdmissionPolicy.Merge(m, src)
    +}
    +func (m *MutatingAdmissionPolicy) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *MutatingAdmissionPolicy) XXX_DiscardUnknown() {
    +	xxx_messageInfo_MutatingAdmissionPolicy.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_MutatingAdmissionPolicy proto.InternalMessageInfo
    +
    +func (m *MutatingAdmissionPolicyBinding) Reset()      { *m = MutatingAdmissionPolicyBinding{} }
    +func (*MutatingAdmissionPolicyBinding) ProtoMessage() {}
    +func (*MutatingAdmissionPolicyBinding) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_7f7c65a4f012fb19, []int{7}
    +}
    +func (m *MutatingAdmissionPolicyBinding) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *MutatingAdmissionPolicyBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *MutatingAdmissionPolicyBinding) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_MutatingAdmissionPolicyBinding.Merge(m, src)
    +}
    +func (m *MutatingAdmissionPolicyBinding) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *MutatingAdmissionPolicyBinding) XXX_DiscardUnknown() {
    +	xxx_messageInfo_MutatingAdmissionPolicyBinding.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_MutatingAdmissionPolicyBinding proto.InternalMessageInfo
    +
    +func (m *MutatingAdmissionPolicyBindingList) Reset()      { *m = MutatingAdmissionPolicyBindingList{} }
    +func (*MutatingAdmissionPolicyBindingList) ProtoMessage() {}
    +func (*MutatingAdmissionPolicyBindingList) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_7f7c65a4f012fb19, []int{8}
    +}
    +func (m *MutatingAdmissionPolicyBindingList) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *MutatingAdmissionPolicyBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *MutatingAdmissionPolicyBindingList) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_MutatingAdmissionPolicyBindingList.Merge(m, src)
    +}
    +func (m *MutatingAdmissionPolicyBindingList) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *MutatingAdmissionPolicyBindingList) XXX_DiscardUnknown() {
    +	xxx_messageInfo_MutatingAdmissionPolicyBindingList.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_MutatingAdmissionPolicyBindingList proto.InternalMessageInfo
    +
    +func (m *MutatingAdmissionPolicyBindingSpec) Reset()      { *m = MutatingAdmissionPolicyBindingSpec{} }
    +func (*MutatingAdmissionPolicyBindingSpec) ProtoMessage() {}
    +func (*MutatingAdmissionPolicyBindingSpec) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_7f7c65a4f012fb19, []int{9}
    +}
    +func (m *MutatingAdmissionPolicyBindingSpec) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *MutatingAdmissionPolicyBindingSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *MutatingAdmissionPolicyBindingSpec) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_MutatingAdmissionPolicyBindingSpec.Merge(m, src)
    +}
    +func (m *MutatingAdmissionPolicyBindingSpec) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *MutatingAdmissionPolicyBindingSpec) XXX_DiscardUnknown() {
    +	xxx_messageInfo_MutatingAdmissionPolicyBindingSpec.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_MutatingAdmissionPolicyBindingSpec proto.InternalMessageInfo
    +
    +func (m *MutatingAdmissionPolicyList) Reset()      { *m = MutatingAdmissionPolicyList{} }
    +func (*MutatingAdmissionPolicyList) ProtoMessage() {}
    +func (*MutatingAdmissionPolicyList) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_7f7c65a4f012fb19, []int{10}
    +}
    +func (m *MutatingAdmissionPolicyList) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *MutatingAdmissionPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *MutatingAdmissionPolicyList) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_MutatingAdmissionPolicyList.Merge(m, src)
    +}
    +func (m *MutatingAdmissionPolicyList) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *MutatingAdmissionPolicyList) XXX_DiscardUnknown() {
    +	xxx_messageInfo_MutatingAdmissionPolicyList.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_MutatingAdmissionPolicyList proto.InternalMessageInfo
    +
    +func (m *MutatingAdmissionPolicySpec) Reset()      { *m = MutatingAdmissionPolicySpec{} }
    +func (*MutatingAdmissionPolicySpec) ProtoMessage() {}
    +func (*MutatingAdmissionPolicySpec) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_7f7c65a4f012fb19, []int{11}
    +}
    +func (m *MutatingAdmissionPolicySpec) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *MutatingAdmissionPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *MutatingAdmissionPolicySpec) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_MutatingAdmissionPolicySpec.Merge(m, src)
    +}
    +func (m *MutatingAdmissionPolicySpec) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *MutatingAdmissionPolicySpec) XXX_DiscardUnknown() {
    +	xxx_messageInfo_MutatingAdmissionPolicySpec.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_MutatingAdmissionPolicySpec proto.InternalMessageInfo
    +
     func (m *MutatingWebhook) Reset()      { *m = MutatingWebhook{} }
     func (*MutatingWebhook) ProtoMessage() {}
     func (*MutatingWebhook) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_7f7c65a4f012fb19, []int{4}
    +	return fileDescriptor_7f7c65a4f012fb19, []int{12}
     }
     func (m *MutatingWebhook) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -189,7 +414,7 @@ var xxx_messageInfo_MutatingWebhook proto.InternalMessageInfo
     func (m *MutatingWebhookConfiguration) Reset()      { *m = MutatingWebhookConfiguration{} }
     func (*MutatingWebhookConfiguration) ProtoMessage() {}
     func (*MutatingWebhookConfiguration) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_7f7c65a4f012fb19, []int{5}
    +	return fileDescriptor_7f7c65a4f012fb19, []int{13}
     }
     func (m *MutatingWebhookConfiguration) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -217,7 +442,7 @@ var xxx_messageInfo_MutatingWebhookConfiguration proto.InternalMessageInfo
     func (m *MutatingWebhookConfigurationList) Reset()      { *m = MutatingWebhookConfigurationList{} }
     func (*MutatingWebhookConfigurationList) ProtoMessage() {}
     func (*MutatingWebhookConfigurationList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_7f7c65a4f012fb19, []int{6}
    +	return fileDescriptor_7f7c65a4f012fb19, []int{14}
     }
     func (m *MutatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -242,10 +467,38 @@ func (m *MutatingWebhookConfigurationList) XXX_DiscardUnknown() {
     
     var xxx_messageInfo_MutatingWebhookConfigurationList proto.InternalMessageInfo
     
    +func (m *Mutation) Reset()      { *m = Mutation{} }
    +func (*Mutation) ProtoMessage() {}
    +func (*Mutation) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_7f7c65a4f012fb19, []int{15}
    +}
    +func (m *Mutation) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *Mutation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *Mutation) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_Mutation.Merge(m, src)
    +}
    +func (m *Mutation) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *Mutation) XXX_DiscardUnknown() {
    +	xxx_messageInfo_Mutation.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_Mutation proto.InternalMessageInfo
    +
     func (m *NamedRuleWithOperations) Reset()      { *m = NamedRuleWithOperations{} }
     func (*NamedRuleWithOperations) ProtoMessage() {}
     func (*NamedRuleWithOperations) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_7f7c65a4f012fb19, []int{7}
    +	return fileDescriptor_7f7c65a4f012fb19, []int{16}
     }
     func (m *NamedRuleWithOperations) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -273,7 +526,7 @@ var xxx_messageInfo_NamedRuleWithOperations proto.InternalMessageInfo
     func (m *ParamKind) Reset()      { *m = ParamKind{} }
     func (*ParamKind) ProtoMessage() {}
     func (*ParamKind) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_7f7c65a4f012fb19, []int{8}
    +	return fileDescriptor_7f7c65a4f012fb19, []int{17}
     }
     func (m *ParamKind) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -301,7 +554,7 @@ var xxx_messageInfo_ParamKind proto.InternalMessageInfo
     func (m *ParamRef) Reset()      { *m = ParamRef{} }
     func (*ParamRef) ProtoMessage() {}
     func (*ParamRef) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_7f7c65a4f012fb19, []int{9}
    +	return fileDescriptor_7f7c65a4f012fb19, []int{18}
     }
     func (m *ParamRef) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -329,7 +582,7 @@ var xxx_messageInfo_ParamRef proto.InternalMessageInfo
     func (m *ServiceReference) Reset()      { *m = ServiceReference{} }
     func (*ServiceReference) ProtoMessage() {}
     func (*ServiceReference) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_7f7c65a4f012fb19, []int{10}
    +	return fileDescriptor_7f7c65a4f012fb19, []int{19}
     }
     func (m *ServiceReference) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -357,7 +610,7 @@ var xxx_messageInfo_ServiceReference proto.InternalMessageInfo
     func (m *TypeChecking) Reset()      { *m = TypeChecking{} }
     func (*TypeChecking) ProtoMessage() {}
     func (*TypeChecking) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_7f7c65a4f012fb19, []int{11}
    +	return fileDescriptor_7f7c65a4f012fb19, []int{20}
     }
     func (m *TypeChecking) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -385,7 +638,7 @@ var xxx_messageInfo_TypeChecking proto.InternalMessageInfo
     func (m *ValidatingAdmissionPolicy) Reset()      { *m = ValidatingAdmissionPolicy{} }
     func (*ValidatingAdmissionPolicy) ProtoMessage() {}
     func (*ValidatingAdmissionPolicy) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_7f7c65a4f012fb19, []int{12}
    +	return fileDescriptor_7f7c65a4f012fb19, []int{21}
     }
     func (m *ValidatingAdmissionPolicy) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -413,7 +666,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicy proto.InternalMessageInfo
     func (m *ValidatingAdmissionPolicyBinding) Reset()      { *m = ValidatingAdmissionPolicyBinding{} }
     func (*ValidatingAdmissionPolicyBinding) ProtoMessage() {}
     func (*ValidatingAdmissionPolicyBinding) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_7f7c65a4f012fb19, []int{13}
    +	return fileDescriptor_7f7c65a4f012fb19, []int{22}
     }
     func (m *ValidatingAdmissionPolicyBinding) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -441,7 +694,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyBinding proto.InternalMessageInfo
     func (m *ValidatingAdmissionPolicyBindingList) Reset()      { *m = ValidatingAdmissionPolicyBindingList{} }
     func (*ValidatingAdmissionPolicyBindingList) ProtoMessage() {}
     func (*ValidatingAdmissionPolicyBindingList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_7f7c65a4f012fb19, []int{14}
    +	return fileDescriptor_7f7c65a4f012fb19, []int{23}
     }
     func (m *ValidatingAdmissionPolicyBindingList) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -469,7 +722,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyBindingList proto.InternalMessageIn
     func (m *ValidatingAdmissionPolicyBindingSpec) Reset()      { *m = ValidatingAdmissionPolicyBindingSpec{} }
     func (*ValidatingAdmissionPolicyBindingSpec) ProtoMessage() {}
     func (*ValidatingAdmissionPolicyBindingSpec) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_7f7c65a4f012fb19, []int{15}
    +	return fileDescriptor_7f7c65a4f012fb19, []int{24}
     }
     func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -497,7 +750,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyBindingSpec proto.InternalMessageIn
     func (m *ValidatingAdmissionPolicyList) Reset()      { *m = ValidatingAdmissionPolicyList{} }
     func (*ValidatingAdmissionPolicyList) ProtoMessage() {}
     func (*ValidatingAdmissionPolicyList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_7f7c65a4f012fb19, []int{16}
    +	return fileDescriptor_7f7c65a4f012fb19, []int{25}
     }
     func (m *ValidatingAdmissionPolicyList) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -525,7 +778,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyList proto.InternalMessageInfo
     func (m *ValidatingAdmissionPolicySpec) Reset()      { *m = ValidatingAdmissionPolicySpec{} }
     func (*ValidatingAdmissionPolicySpec) ProtoMessage() {}
     func (*ValidatingAdmissionPolicySpec) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_7f7c65a4f012fb19, []int{17}
    +	return fileDescriptor_7f7c65a4f012fb19, []int{26}
     }
     func (m *ValidatingAdmissionPolicySpec) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -553,7 +806,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicySpec proto.InternalMessageInfo
     func (m *ValidatingAdmissionPolicyStatus) Reset()      { *m = ValidatingAdmissionPolicyStatus{} }
     func (*ValidatingAdmissionPolicyStatus) ProtoMessage() {}
     func (*ValidatingAdmissionPolicyStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_7f7c65a4f012fb19, []int{18}
    +	return fileDescriptor_7f7c65a4f012fb19, []int{27}
     }
     func (m *ValidatingAdmissionPolicyStatus) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -581,7 +834,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyStatus proto.InternalMessageInfo
     func (m *ValidatingWebhook) Reset()      { *m = ValidatingWebhook{} }
     func (*ValidatingWebhook) ProtoMessage() {}
     func (*ValidatingWebhook) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_7f7c65a4f012fb19, []int{19}
    +	return fileDescriptor_7f7c65a4f012fb19, []int{28}
     }
     func (m *ValidatingWebhook) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -609,7 +862,7 @@ var xxx_messageInfo_ValidatingWebhook proto.InternalMessageInfo
     func (m *ValidatingWebhookConfiguration) Reset()      { *m = ValidatingWebhookConfiguration{} }
     func (*ValidatingWebhookConfiguration) ProtoMessage() {}
     func (*ValidatingWebhookConfiguration) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_7f7c65a4f012fb19, []int{20}
    +	return fileDescriptor_7f7c65a4f012fb19, []int{29}
     }
     func (m *ValidatingWebhookConfiguration) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -637,7 +890,7 @@ var xxx_messageInfo_ValidatingWebhookConfiguration proto.InternalMessageInfo
     func (m *ValidatingWebhookConfigurationList) Reset()      { *m = ValidatingWebhookConfigurationList{} }
     func (*ValidatingWebhookConfigurationList) ProtoMessage() {}
     func (*ValidatingWebhookConfigurationList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_7f7c65a4f012fb19, []int{21}
    +	return fileDescriptor_7f7c65a4f012fb19, []int{30}
     }
     func (m *ValidatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -665,7 +918,7 @@ var xxx_messageInfo_ValidatingWebhookConfigurationList proto.InternalMessageInfo
     func (m *Validation) Reset()      { *m = Validation{} }
     func (*Validation) ProtoMessage() {}
     func (*Validation) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_7f7c65a4f012fb19, []int{22}
    +	return fileDescriptor_7f7c65a4f012fb19, []int{31}
     }
     func (m *Validation) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -693,7 +946,7 @@ var xxx_messageInfo_Validation proto.InternalMessageInfo
     func (m *Variable) Reset()      { *m = Variable{} }
     func (*Variable) ProtoMessage() {}
     func (*Variable) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_7f7c65a4f012fb19, []int{23}
    +	return fileDescriptor_7f7c65a4f012fb19, []int{32}
     }
     func (m *Variable) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -721,7 +974,7 @@ var xxx_messageInfo_Variable proto.InternalMessageInfo
     func (m *WebhookClientConfig) Reset()      { *m = WebhookClientConfig{} }
     func (*WebhookClientConfig) ProtoMessage() {}
     func (*WebhookClientConfig) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_7f7c65a4f012fb19, []int{24}
    +	return fileDescriptor_7f7c65a4f012fb19, []int{33}
     }
     func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -747,13 +1000,22 @@ func (m *WebhookClientConfig) XXX_DiscardUnknown() {
     var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo
     
     func init() {
    +	proto.RegisterType((*ApplyConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.ApplyConfiguration")
     	proto.RegisterType((*AuditAnnotation)(nil), "k8s.io.api.admissionregistration.v1beta1.AuditAnnotation")
     	proto.RegisterType((*ExpressionWarning)(nil), "k8s.io.api.admissionregistration.v1beta1.ExpressionWarning")
    +	proto.RegisterType((*JSONPatch)(nil), "k8s.io.api.admissionregistration.v1beta1.JSONPatch")
     	proto.RegisterType((*MatchCondition)(nil), "k8s.io.api.admissionregistration.v1beta1.MatchCondition")
     	proto.RegisterType((*MatchResources)(nil), "k8s.io.api.admissionregistration.v1beta1.MatchResources")
    +	proto.RegisterType((*MutatingAdmissionPolicy)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingAdmissionPolicy")
    +	proto.RegisterType((*MutatingAdmissionPolicyBinding)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingAdmissionPolicyBinding")
    +	proto.RegisterType((*MutatingAdmissionPolicyBindingList)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingAdmissionPolicyBindingList")
    +	proto.RegisterType((*MutatingAdmissionPolicyBindingSpec)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingAdmissionPolicyBindingSpec")
    +	proto.RegisterType((*MutatingAdmissionPolicyList)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingAdmissionPolicyList")
    +	proto.RegisterType((*MutatingAdmissionPolicySpec)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingAdmissionPolicySpec")
     	proto.RegisterType((*MutatingWebhook)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhook")
     	proto.RegisterType((*MutatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfiguration")
     	proto.RegisterType((*MutatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfigurationList")
    +	proto.RegisterType((*Mutation)(nil), "k8s.io.api.admissionregistration.v1beta1.Mutation")
     	proto.RegisterType((*NamedRuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1beta1.NamedRuleWithOperations")
     	proto.RegisterType((*ParamKind)(nil), "k8s.io.api.admissionregistration.v1beta1.ParamKind")
     	proto.RegisterType((*ParamRef)(nil), "k8s.io.api.admissionregistration.v1beta1.ParamRef")
    @@ -779,130 +1041,174 @@ func init() {
     }
     
     var fileDescriptor_7f7c65a4f012fb19 = []byte{
    -	// 1957 bytes of a gzipped FileDescriptorProto
    -	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x1a, 0x4d, 0x6f, 0x1b, 0xc7,
    -	0xd5, 0x2b, 0x52, 0x12, 0xf9, 0xa8, 0x2f, 0x4e, 0x9c, 0x8a, 0x76, 0x1c, 0x52, 0x58, 0x04, 0x85,
    -	0x0c, 0xb4, 0x64, 0xac, 0x04, 0x89, 0xeb, 0xa0, 0x28, 0x44, 0xc5, 0x76, 0xed, 0x58, 0xb2, 0x30,
    -	0x4a, 0x24, 0xa0, 0x4d, 0x00, 0x8f, 0x76, 0x87, 0xe4, 0x96, 0xe4, 0xee, 0x76, 0x67, 0x49, 0x5b,
    -	0x2d, 0xd0, 0x16, 0xe8, 0x21, 0xd7, 0x02, 0xbd, 0x14, 0xe8, 0xa9, 0x7f, 0xa1, 0xf7, 0x02, 0xed,
    -	0xcd, 0xc7, 0xdc, 0x6a, 0xa0, 0x28, 0x51, 0xb1, 0x87, 0x9e, 0x7a, 0xe8, 0xa1, 0x3d, 0xe8, 0xd2,
    -	0x62, 0x66, 0x67, 0x3f, 0xb9, 0xb4, 0x56, 0xaa, 0xac, 0x5c, 0x7c, 0xd3, 0xbe, 0xcf, 0x79, 0x6f,
    -	0xde, 0xd7, 0x3c, 0x0a, 0x6e, 0x77, 0x6f, 0xb3, 0xba, 0x61, 0x35, 0x88, 0x6d, 0x34, 0x88, 0xde,
    -	0x37, 0x18, 0x33, 0x2c, 0xd3, 0xa1, 0x6d, 0x83, 0xb9, 0x0e, 0x71, 0x0d, 0xcb, 0x6c, 0x0c, 0x6f,
    -	0x1d, 0x52, 0x97, 0xdc, 0x6a, 0xb4, 0xa9, 0x49, 0x1d, 0xe2, 0x52, 0xbd, 0x6e, 0x3b, 0x96, 0x6b,
    -	0xa1, 0x75, 0x8f, 0xb3, 0x4e, 0x6c, 0xa3, 0x9e, 0xca, 0x59, 0x97, 0x9c, 0xd7, 0xbf, 0xdd, 0x36,
    -	0xdc, 0xce, 0xe0, 0xb0, 0xae, 0x59, 0xfd, 0x46, 0xdb, 0x6a, 0x5b, 0x0d, 0x21, 0xe0, 0x70, 0xd0,
    -	0x12, 0x5f, 0xe2, 0x43, 0xfc, 0xe5, 0x09, 0xbe, 0xfe, 0x5e, 0x86, 0x23, 0x25, 0x4f, 0x73, 0xfd,
    -	0xfd, 0x90, 0xa9, 0x4f, 0xb4, 0x8e, 0x61, 0x52, 0xe7, 0xa8, 0x61, 0x77, 0xdb, 0x1c, 0xc0, 0x1a,
    -	0x7d, 0xea, 0x92, 0x34, 0xae, 0xc6, 0x34, 0x2e, 0x67, 0x60, 0xba, 0x46, 0x9f, 0x4e, 0x30, 0x7c,
    -	0x70, 0x1a, 0x03, 0xd3, 0x3a, 0xb4, 0x4f, 0x92, 0x7c, 0x2a, 0x83, 0xe5, 0xcd, 0x81, 0x6e, 0xb8,
    -	0x9b, 0xa6, 0x69, 0xb9, 0xc2, 0x08, 0xf4, 0x36, 0xe4, 0xba, 0xf4, 0xa8, 0xa2, 0xac, 0x29, 0xeb,
    -	0xc5, 0x66, 0xe9, 0xf9, 0xa8, 0x76, 0x65, 0x3c, 0xaa, 0xe5, 0x3e, 0xa1, 0x47, 0x98, 0xc3, 0xd1,
    -	0x26, 0x2c, 0x0f, 0x49, 0x6f, 0x40, 0xef, 0x3e, 0xb3, 0x1d, 0x2a, 0x5c, 0x50, 0x99, 0x11, 0xa4,
    -	0xab, 0x92, 0x74, 0x79, 0x3f, 0x8e, 0xc6, 0x49, 0x7a, 0xb5, 0x07, 0xe5, 0xf0, 0xeb, 0x80, 0x38,
    -	0xa6, 0x61, 0xb6, 0xd1, 0xb7, 0xa0, 0xd0, 0x32, 0x68, 0x4f, 0xc7, 0xb4, 0x25, 0x05, 0xae, 0x48,
    -	0x81, 0x85, 0x7b, 0x12, 0x8e, 0x03, 0x0a, 0x74, 0x13, 0xe6, 0x9f, 0x7a, 0x8c, 0x95, 0x9c, 0x20,
    -	0x5e, 0x96, 0xc4, 0xf3, 0x52, 0x1e, 0xf6, 0xf1, 0x6a, 0x0b, 0x96, 0xb6, 0x89, 0xab, 0x75, 0xb6,
    -	0x2c, 0x53, 0x37, 0x84, 0x85, 0x6b, 0x90, 0x37, 0x49, 0x9f, 0x4a, 0x13, 0x17, 0x24, 0x67, 0x7e,
    -	0x87, 0xf4, 0x29, 0x16, 0x18, 0xb4, 0x01, 0x40, 0x93, 0xf6, 0x21, 0x49, 0x07, 0x11, 0xd3, 0x22,
    -	0x54, 0xea, 0x9f, 0xf3, 0x52, 0x11, 0xa6, 0xcc, 0x1a, 0x38, 0x1a, 0x65, 0xe8, 0x19, 0x94, 0xb9,
    -	0x38, 0x66, 0x13, 0x8d, 0xee, 0xd1, 0x1e, 0xd5, 0x5c, 0xcb, 0x11, 0x5a, 0x4b, 0x1b, 0xef, 0xd5,
    -	0xc3, 0x30, 0x0d, 0x6e, 0xac, 0x6e, 0x77, 0xdb, 0x1c, 0xc0, 0xea, 0x3c, 0x30, 0xea, 0xc3, 0x5b,
    -	0xf5, 0x47, 0xe4, 0x90, 0xf6, 0x7c, 0xd6, 0xe6, 0x9b, 0xe3, 0x51, 0xad, 0xbc, 0x93, 0x94, 0x88,
    -	0x27, 0x95, 0x20, 0x0b, 0x96, 0xac, 0xc3, 0x1f, 0x51, 0xcd, 0x0d, 0xd4, 0xce, 0x9c, 0x5f, 0x2d,
    -	0x1a, 0x8f, 0x6a, 0x4b, 0x8f, 0x63, 0xe2, 0x70, 0x42, 0x3c, 0xfa, 0x19, 0x2c, 0x3a, 0xd2, 0x6e,
    -	0x3c, 0xe8, 0x51, 0x56, 0xc9, 0xad, 0xe5, 0xd6, 0x4b, 0x1b, 0x9b, 0xf5, 0xac, 0xd9, 0x58, 0xe7,
    -	0x76, 0xe9, 0x9c, 0xf7, 0xc0, 0x70, 0x3b, 0x8f, 0x6d, 0xea, 0xa1, 0x59, 0xf3, 0x4d, 0xe9, 0xf7,
    -	0x45, 0x1c, 0x95, 0x8f, 0xe3, 0xea, 0xd0, 0xaf, 0x15, 0xb8, 0x4a, 0x9f, 0x69, 0xbd, 0x81, 0x4e,
    -	0x63, 0x74, 0x95, 0xfc, 0x45, 0x9d, 0xe3, 0x86, 0x3c, 0xc7, 0xd5, 0xbb, 0x29, 0x6a, 0x70, 0xaa,
    -	0x72, 0xf4, 0x31, 0x94, 0xfa, 0x3c, 0x24, 0x76, 0xad, 0x9e, 0xa1, 0x1d, 0x55, 0xe6, 0x45, 0x20,
    -	0xa9, 0xe3, 0x51, 0xad, 0xb4, 0x1d, 0x82, 0x4f, 0x46, 0xb5, 0xe5, 0xc8, 0xe7, 0xa7, 0x47, 0x36,
    -	0xc5, 0x51, 0x36, 0xf5, 0x4f, 0x05, 0x58, 0xde, 0x1e, 0xf0, 0xf4, 0x34, 0xdb, 0x07, 0xf4, 0xb0,
    -	0x63, 0x59, 0xdd, 0x0c, 0x31, 0xfc, 0x14, 0x16, 0xb4, 0x9e, 0x41, 0x4d, 0x77, 0xcb, 0x32, 0x5b,
    -	0x46, 0x5b, 0x06, 0xc0, 0x77, 0xb3, 0x3b, 0x42, 0xaa, 0xda, 0x8a, 0x08, 0x69, 0x5e, 0x95, 0x8a,
    -	0x16, 0xa2, 0x50, 0x1c, 0x53, 0x84, 0x3e, 0x87, 0x59, 0x27, 0x12, 0x02, 0x1f, 0x66, 0xd1, 0x58,
    -	0x4f, 0x71, 0xf8, 0xa2, 0xd4, 0x35, 0xeb, 0x79, 0xd8, 0x13, 0x8a, 0x1e, 0xc1, 0x62, 0x8b, 0x18,
    -	0xbd, 0x81, 0x43, 0xa5, 0x53, 0xf3, 0xc2, 0x03, 0xdf, 0xe4, 0x11, 0x72, 0x2f, 0x8a, 0x38, 0x19,
    -	0xd5, 0xca, 0x31, 0x80, 0x70, 0x6c, 0x9c, 0x39, 0x79, 0x41, 0xc5, 0x73, 0x5d, 0x50, 0x7a, 0x9e,
    -	0xcf, 0x7e, 0x3d, 0x79, 0x5e, 0x7a, 0xb5, 0x79, 0xfe, 0x31, 0x94, 0x98, 0xa1, 0xd3, 0xbb, 0xad,
    -	0x16, 0xd5, 0x5c, 0x56, 0x99, 0x0b, 0x1d, 0xb6, 0x17, 0x82, 0xb9, 0xc3, 0xc2, 0xcf, 0xad, 0x1e,
    -	0x61, 0x0c, 0x47, 0xd9, 0xd0, 0x1d, 0x58, 0xe2, 0x5d, 0xc9, 0x1a, 0xb8, 0x7b, 0x54, 0xb3, 0x4c,
    -	0x9d, 0x89, 0xd4, 0x98, 0xf5, 0x4e, 0xf0, 0x69, 0x0c, 0x83, 0x13, 0x94, 0xe8, 0x33, 0x58, 0x0d,
    -	0xa2, 0x08, 0xd3, 0xa1, 0x41, 0x9f, 0xee, 0x53, 0x87, 0x7f, 0xb0, 0x4a, 0x61, 0x2d, 0xb7, 0x5e,
    -	0x6c, 0xbe, 0x35, 0x1e, 0xd5, 0x56, 0x37, 0xd3, 0x49, 0xf0, 0x34, 0x5e, 0xf4, 0x04, 0x90, 0x43,
    -	0x0d, 0x73, 0x68, 0x69, 0x22, 0xfc, 0x64, 0x40, 0x80, 0xb0, 0xef, 0xdd, 0xf1, 0xa8, 0x86, 0xf0,
    -	0x04, 0xf6, 0x64, 0x54, 0xfb, 0xc6, 0x24, 0x54, 0x84, 0x47, 0x8a, 0x2c, 0xf4, 0x53, 0x58, 0xee,
    -	0xc7, 0x1a, 0x11, 0xab, 0x2c, 0x88, 0x0c, 0xb9, 0x9d, 0x3d, 0x27, 0xe3, 0x9d, 0x2c, 0xec, 0xb9,
    -	0x71, 0x38, 0xc3, 0x49, 0x4d, 0xea, 0x5f, 0x15, 0xb8, 0x91, 0xa8, 0x21, 0x5e, 0xba, 0x0e, 0x3c,
    -	0x0d, 0xe8, 0x09, 0x14, 0x78, 0x54, 0xe8, 0xc4, 0x25, 0xb2, 0x45, 0xbd, 0x9b, 0x2d, 0x86, 0xbc,
    -	0x80, 0xd9, 0xa6, 0x2e, 0x09, 0x5b, 0x64, 0x08, 0xc3, 0x81, 0x54, 0xf4, 0x43, 0x28, 0x48, 0xcd,
    -	0xac, 0x32, 0x23, 0x0c, 0xff, 0xce, 0x19, 0x0c, 0x8f, 0x9f, 0xbd, 0x99, 0xe7, 0xaa, 0x70, 0x20,
    -	0x50, 0xfd, 0xa7, 0x02, 0x6b, 0x2f, 0xb3, 0xef, 0x91, 0xc1, 0x5c, 0xf4, 0xf9, 0x84, 0x8d, 0xf5,
    -	0x8c, 0x79, 0x62, 0x30, 0xcf, 0xc2, 0x60, 0x26, 0xf1, 0x21, 0x11, 0xfb, 0xba, 0x30, 0x6b, 0xb8,
    -	0xb4, 0xef, 0x1b, 0x77, 0xef, 0xdc, 0xc6, 0xc5, 0x0e, 0x1e, 0x96, 0xc1, 0x07, 0x5c, 0x38, 0xf6,
    -	0x74, 0xa8, 0x2f, 0x14, 0x58, 0x9d, 0xd2, 0xa9, 0xd0, 0x87, 0x61, 0x2f, 0x16, 0x45, 0xa4, 0xa2,
    -	0x88, 0xbc, 0x28, 0x47, 0x9b, 0xa8, 0x40, 0xe0, 0x38, 0x1d, 0xfa, 0xa5, 0x02, 0xc8, 0x99, 0x90,
    -	0x27, 0x3b, 0xc7, 0xb9, 0xeb, 0xf8, 0x75, 0x69, 0x00, 0x9a, 0xc4, 0xe1, 0x14, 0x75, 0x2a, 0x81,
    -	0xe2, 0x2e, 0x71, 0x48, 0xff, 0x13, 0xc3, 0xd4, 0xf9, 0x24, 0x46, 0x6c, 0x43, 0x66, 0xa9, 0xec,
    -	0x76, 0x41, 0x98, 0x6d, 0xee, 0x3e, 0x90, 0x18, 0x1c, 0xa1, 0xe2, 0xbd, 0xb1, 0x6b, 0x98, 0xba,
    -	0x9c, 0xdb, 0x82, 0xde, 0xc8, 0xe5, 0x61, 0x81, 0x51, 0x7f, 0x3f, 0x03, 0x05, 0xa1, 0x83, 0xcf,
    -	0x92, 0xa7, 0xb7, 0xd2, 0x06, 0x14, 0x83, 0xd2, 0x2b, 0xa5, 0x96, 0x25, 0x59, 0x31, 0x28, 0xd3,
    -	0x38, 0xa4, 0x41, 0x5f, 0x40, 0x81, 0xf9, 0x05, 0x39, 0x77, 0xfe, 0x82, 0xbc, 0xc0, 0x23, 0x2d,
    -	0x28, 0xc5, 0x81, 0x48, 0xe4, 0xc2, 0xaa, 0xcd, 0x4f, 0x4f, 0x5d, 0xea, 0xec, 0x58, 0xee, 0x3d,
    -	0x6b, 0x60, 0xea, 0x9b, 0x1a, 0xf7, 0x9e, 0xec, 0x86, 0x77, 0x78, 0x09, 0xdc, 0x4d, 0x27, 0x39,
    -	0x19, 0xd5, 0xde, 0x9a, 0x82, 0x12, 0xa5, 0x6b, 0x9a, 0x68, 0xf5, 0x77, 0x0a, 0xac, 0xec, 0x51,
    -	0x67, 0x68, 0x68, 0x14, 0xd3, 0x16, 0x75, 0xa8, 0xa9, 0x25, 0x5c, 0xa3, 0x64, 0x70, 0x8d, 0xef,
    -	0xed, 0x99, 0xa9, 0xde, 0xbe, 0x01, 0x79, 0x9b, 0xb8, 0x1d, 0x39, 0xd8, 0x17, 0x38, 0x76, 0x97,
    -	0xb8, 0x1d, 0x2c, 0xa0, 0x02, 0x6b, 0x39, 0xae, 0x30, 0x74, 0x56, 0x62, 0x2d, 0xc7, 0xc5, 0x02,
    -	0xaa, 0xfe, 0x46, 0x81, 0x05, 0x6e, 0xc5, 0x56, 0x87, 0x6a, 0x5d, 0xfe, 0xac, 0xf8, 0x52, 0x01,
    -	0x44, 0x93, 0x8f, 0x0d, 0x2f, 0x23, 0x4a, 0x1b, 0x1f, 0x65, 0x4f, 0xd1, 0x89, 0x07, 0x4b, 0x18,
    -	0xd6, 0x13, 0x28, 0x86, 0x53, 0x54, 0xaa, 0x7f, 0x99, 0x81, 0x6b, 0xfb, 0xa4, 0x67, 0xe8, 0x22,
    -	0xd5, 0x83, 0xfe, 0x24, 0x9b, 0xc3, 0xab, 0x2f, 0xbf, 0x06, 0xe4, 0x99, 0x4d, 0x35, 0x99, 0xcd,
    -	0xf7, 0xb3, 0x9b, 0x3e, 0xf5, 0xd0, 0x7b, 0x36, 0xd5, 0xc2, 0x1b, 0xe4, 0x5f, 0x58, 0xa8, 0x40,
    -	0x3f, 0x86, 0x39, 0xe6, 0x12, 0x77, 0xc0, 0x64, 0xf0, 0x3f, 0xb8, 0x08, 0x65, 0x42, 0x60, 0x73,
    -	0x49, 0xaa, 0x9b, 0xf3, 0xbe, 0xb1, 0x54, 0xa4, 0xfe, 0x47, 0x81, 0xb5, 0xa9, 0xbc, 0x4d, 0xc3,
    -	0xd4, 0x79, 0x30, 0xbc, 0x7a, 0x27, 0xdb, 0x31, 0x27, 0xef, 0x5c, 0x80, 0xdd, 0xf2, 0xec, 0xd3,
    -	0x7c, 0xad, 0xfe, 0x5b, 0x81, 0x77, 0x4e, 0x63, 0xbe, 0x84, 0xe6, 0x67, 0xc5, 0x9b, 0xdf, 0xc3,
    -	0x8b, 0xb3, 0x7c, 0x4a, 0x03, 0xfc, 0x32, 0x77, 0xba, 0xdd, 0xdc, 0x4d, 0xbc, 0x83, 0xd8, 0x02,
    -	0xb8, 0x13, 0x16, 0xf9, 0xe0, 0x12, 0x77, 0x03, 0x0c, 0x8e, 0x50, 0x71, 0x5f, 0xd9, 0xb2, 0x3d,
    -	0xc8, 0xab, 0xdc, 0xc8, 0x6e, 0x90, 0xdf, 0x58, 0xbc, 0xf2, 0xed, 0x7f, 0xe1, 0x40, 0x22, 0x72,
    -	0x61, 0xa9, 0x1f, 0x5b, 0x14, 0xc8, 0x34, 0x39, 0xeb, 0x1c, 0x18, 0xf0, 0x7b, 0x73, 0x73, 0x1c,
    -	0x86, 0x13, 0x3a, 0xd0, 0x01, 0x94, 0x87, 0xd2, 0x5f, 0x96, 0xe9, 0x95, 0x74, 0xef, 0x75, 0x5c,
    -	0x6c, 0xde, 0xe4, 0xef, 0x8d, 0xfd, 0x24, 0xf2, 0x64, 0x54, 0x5b, 0x49, 0x02, 0xf1, 0xa4, 0x0c,
    -	0xf5, 0x1f, 0x0a, 0xbc, 0x3d, 0xf5, 0x26, 0x2e, 0x21, 0xf4, 0x3a, 0xf1, 0xd0, 0xdb, 0xba, 0x88,
    -	0xd0, 0x4b, 0x8f, 0xb9, 0xdf, 0xce, 0xbd, 0xc4, 0x52, 0x11, 0x6c, 0x4f, 0xa0, 0x68, 0xfb, 0xb3,
    -	0x4b, 0xca, 0xa6, 0x27, 0x4b, 0xe4, 0x70, 0xd6, 0xe6, 0x22, 0xef, 0x9f, 0xc1, 0x27, 0x0e, 0x85,
    -	0xa2, 0x9f, 0xc0, 0x8a, 0x3f, 0xdb, 0x73, 0x7e, 0xc3, 0x74, 0xfd, 0x01, 0xed, 0xfc, 0xe1, 0x73,
    -	0x75, 0x3c, 0xaa, 0xad, 0x6c, 0x27, 0xa4, 0xe2, 0x09, 0x3d, 0xa8, 0x0b, 0xa5, 0xf0, 0xfa, 0xfd,
    -	0xf7, 0xfd, 0xfb, 0x67, 0xf7, 0xb7, 0x65, 0x36, 0xdf, 0x90, 0x0e, 0x2e, 0x85, 0x30, 0x86, 0xa3,
    -	0xd2, 0x2f, 0xf8, 0xa1, 0xff, 0x73, 0x58, 0x21, 0xf1, 0x45, 0x27, 0xab, 0xcc, 0x9e, 0xf5, 0x11,
    -	0x92, 0x58, 0x95, 0x36, 0x2b, 0xd2, 0x88, 0x95, 0x04, 0x82, 0xe1, 0x09, 0x65, 0x69, 0xaf, 0xbf,
    -	0xb9, 0xcb, 0x7a, 0xfd, 0x21, 0x0d, 0x8a, 0x43, 0xe2, 0x18, 0xe4, 0xb0, 0x47, 0xf9, 0x53, 0x3b,
    -	0x77, 0xb6, 0x82, 0xb6, 0x2f, 0x59, 0xc3, 0xc9, 0xce, 0x87, 0x30, 0x1c, 0xca, 0x55, 0xff, 0x38,
    -	0x03, 0xb5, 0x53, 0xda, 0x37, 0x7a, 0x08, 0xc8, 0x3a, 0x64, 0xd4, 0x19, 0x52, 0xfd, 0xbe, 0xb7,
    -	0x8a, 0xf6, 0xc7, 0xfa, 0x5c, 0x38, 0x50, 0x3d, 0x9e, 0xa0, 0xc0, 0x29, 0x5c, 0xa8, 0x07, 0x0b,
    -	0x6e, 0x64, 0xd4, 0x93, 0x59, 0xf0, 0x41, 0x76, 0xbb, 0xa2, 0x83, 0x62, 0x73, 0x65, 0x3c, 0xaa,
    -	0xc5, 0x46, 0x47, 0x1c, 0x93, 0x8e, 0x34, 0x00, 0x2d, 0xbc, 0x3a, 0x2f, 0xf4, 0x1b, 0xd9, 0xaa,
    -	0x58, 0x78, 0x63, 0x41, 0xdf, 0x89, 0x5c, 0x56, 0x44, 0xac, 0x7a, 0x3c, 0x0f, 0xe5, 0xd0, 0x85,
    -	0xaf, 0x77, 0x7d, 0xaf, 0x77, 0x7d, 0x2f, 0xdd, 0xf5, 0xc1, 0xeb, 0x5d, 0xdf, 0xb9, 0x76, 0x7d,
    -	0x29, 0xb5, 0xb8, 0x74, 0x69, 0x9b, 0xb8, 0x63, 0x05, 0xaa, 0x13, 0x39, 0x7e, 0xd9, 0xbb, 0xb8,
    -	0x2f, 0x26, 0x76, 0x71, 0x1f, 0x9d, 0x67, 0x6c, 0x9a, 0xb6, 0x8d, 0xfb, 0x97, 0x02, 0xea, 0xcb,
    -	0x6d, 0xbc, 0x84, 0xb9, 0xb0, 0x1f, 0x9f, 0x0b, 0xbf, 0xff, 0x7f, 0x18, 0x98, 0x65, 0x23, 0xf7,
    -	0x5f, 0x05, 0x20, 0x1c, 0x66, 0xd0, 0x3b, 0x10, 0xf9, 0xa1, 0x50, 0x96, 0x6e, 0xcf, 0x4d, 0x11,
    -	0x38, 0xba, 0x09, 0xf3, 0x7d, 0xca, 0x18, 0x69, 0xfb, 0x0b, 0x91, 0xe0, 0x77, 0xcc, 0x6d, 0x0f,
    -	0x8c, 0x7d, 0x3c, 0x3a, 0x80, 0x39, 0x87, 0x12, 0x66, 0x99, 0x72, 0x31, 0xf2, 0x3d, 0xfe, 0x0a,
    -	0xc6, 0x02, 0x72, 0x32, 0xaa, 0xdd, 0xca, 0xf2, 0x3b, 0x73, 0x5d, 0x3e, 0x9a, 0x05, 0x13, 0x96,
    -	0xe2, 0xd0, 0x7d, 0x28, 0x4b, 0x1d, 0x91, 0x03, 0x7b, 0x95, 0xf6, 0x9a, 0x3c, 0x4d, 0x79, 0x3b,
    -	0x49, 0x80, 0x27, 0x79, 0xd4, 0x87, 0x50, 0xf0, 0x07, 0x03, 0x54, 0x81, 0x7c, 0xe4, 0xbd, 0xe5,
    -	0x19, 0x2e, 0x20, 0x09, 0xc7, 0xcc, 0xa4, 0x3b, 0x46, 0xfd, 0x83, 0x02, 0x6f, 0xa4, 0x34, 0x25,
    -	0x74, 0x0d, 0x72, 0x03, 0xa7, 0x27, 0x5d, 0x30, 0x3f, 0x1e, 0xd5, 0x72, 0x9f, 0xe1, 0x47, 0x98,
    -	0xc3, 0x10, 0x81, 0x79, 0xe6, 0xad, 0xa7, 0x64, 0x30, 0xdd, 0xc9, 0x7e, 0xe3, 0xc9, 0xbd, 0x56,
    -	0xb3, 0xc4, 0xef, 0xc0, 0x87, 0xfa, 0x72, 0xd1, 0x3a, 0x14, 0x34, 0xd2, 0x1c, 0x98, 0x7a, 0xcf,
    -	0xbb, 0xaf, 0x05, 0xef, 0x8d, 0xb7, 0xb5, 0xe9, 0xc1, 0x70, 0x80, 0x6d, 0xee, 0x3c, 0x3f, 0xae,
    -	0x5e, 0xf9, 0xea, 0xb8, 0x7a, 0xe5, 0xc5, 0x71, 0xf5, 0xca, 0x2f, 0xc6, 0x55, 0xe5, 0xf9, 0xb8,
    -	0xaa, 0x7c, 0x35, 0xae, 0x2a, 0x2f, 0xc6, 0x55, 0xe5, 0x6f, 0xe3, 0xaa, 0xf2, 0xab, 0xbf, 0x57,
    -	0xaf, 0xfc, 0x60, 0x3d, 0xeb, 0x7f, 0x39, 0xfc, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x6f, 0xf2, 0xe8,
    -	0x4a, 0x10, 0x21, 0x00, 0x00,
    +	// 2215 bytes of a gzipped FileDescriptorProto
    +	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0x4d, 0x6c, 0x1b, 0xc7,
    +	0x15, 0xf6, 0x92, 0x92, 0x45, 0x3e, 0xca, 0x92, 0x38, 0x71, 0x2a, 0xfa, 0x8f, 0x14, 0x16, 0x41,
    +	0x21, 0x03, 0x2d, 0x59, 0x2b, 0x41, 0xe2, 0x3a, 0x29, 0x02, 0xae, 0x62, 0x3b, 0x76, 0x24, 0x59,
    +	0x18, 0x39, 0x52, 0xd1, 0x26, 0x40, 0x56, 0xcb, 0x21, 0xb9, 0x11, 0xb9, 0xcb, 0xee, 0x2c, 0x65,
    +	0xab, 0x05, 0xda, 0x02, 0x2d, 0x90, 0x1e, 0x0b, 0xf4, 0x52, 0xa0, 0xa7, 0xde, 0x7b, 0x69, 0xef,
    +	0x05, 0x7a, 0xf4, 0x31, 0xb7, 0x1a, 0x28, 0x4a, 0x54, 0x4c, 0xd1, 0x9e, 0x7a, 0x48, 0x81, 0xf6,
    +	0xa0, 0x4b, 0x8b, 0x99, 0x9d, 0xfd, 0xdf, 0x95, 0x56, 0xb2, 0x2c, 0x17, 0x85, 0x6f, 0xda, 0xf7,
    +	0xe6, 0xbd, 0x37, 0xef, 0xcd, 0x9b, 0xf7, 0xbe, 0x79, 0x22, 0xdc, 0xdc, 0xb9, 0x49, 0xeb, 0xba,
    +	0xd9, 0x50, 0x07, 0x7a, 0x43, 0x6d, 0xf5, 0x75, 0x4a, 0x75, 0xd3, 0xb0, 0x48, 0x47, 0xa7, 0xb6,
    +	0xa5, 0xda, 0xba, 0x69, 0x34, 0x76, 0x6f, 0x6c, 0x13, 0x5b, 0xbd, 0xd1, 0xe8, 0x10, 0x83, 0x58,
    +	0xaa, 0x4d, 0x5a, 0xf5, 0x81, 0x65, 0xda, 0x26, 0x5a, 0x74, 0x24, 0xeb, 0xea, 0x40, 0xaf, 0x27,
    +	0x4a, 0xd6, 0x85, 0xe4, 0xe5, 0xaf, 0x77, 0x74, 0xbb, 0x3b, 0xdc, 0xae, 0x6b, 0x66, 0xbf, 0xd1,
    +	0x31, 0x3b, 0x66, 0x83, 0x2b, 0xd8, 0x1e, 0xb6, 0xf9, 0x17, 0xff, 0xe0, 0x7f, 0x39, 0x8a, 0x2f,
    +	0xbf, 0x9e, 0x61, 0x4b, 0xd1, 0xdd, 0x5c, 0x7e, 0xc3, 0x17, 0xea, 0xab, 0x5a, 0x57, 0x37, 0x88,
    +	0xb5, 0xd7, 0x18, 0xec, 0x74, 0x18, 0x81, 0x36, 0xfa, 0xc4, 0x56, 0x93, 0xa4, 0x1a, 0x69, 0x52,
    +	0xd6, 0xd0, 0xb0, 0xf5, 0x3e, 0x89, 0x09, 0xbc, 0x79, 0x94, 0x00, 0xd5, 0xba, 0xa4, 0xaf, 0x46,
    +	0xe5, 0xe4, 0xf7, 0x01, 0x35, 0x07, 0x83, 0xde, 0xde, 0xb2, 0x69, 0xb4, 0xf5, 0xce, 0xd0, 0xf1,
    +	0x03, 0x2d, 0x01, 0x90, 0xc7, 0x03, 0x8b, 0x70, 0x0f, 0x2b, 0xd2, 0x82, 0xb4, 0x58, 0x54, 0xd0,
    +	0x93, 0x51, 0xed, 0xdc, 0x78, 0x54, 0x83, 0xdb, 0x1e, 0x07, 0x07, 0x56, 0xc9, 0x14, 0x66, 0x9b,
    +	0xc3, 0x96, 0x6e, 0x37, 0x0d, 0xc3, 0xb4, 0x1d, 0x35, 0xd7, 0x20, 0xbf, 0x43, 0xf6, 0x84, 0x7c,
    +	0x49, 0xc8, 0xe7, 0x3f, 0x20, 0x7b, 0x98, 0xd1, 0x51, 0x13, 0x66, 0x77, 0xd5, 0xde, 0x90, 0xf8,
    +	0x0a, 0x2b, 0x39, 0xbe, 0x74, 0x5e, 0x2c, 0x9d, 0xdd, 0x0c, 0xb3, 0x71, 0x74, 0xbd, 0xdc, 0x83,
    +	0xb2, 0xff, 0xb5, 0xa5, 0x5a, 0x86, 0x6e, 0x74, 0xd0, 0xd7, 0xa0, 0xd0, 0xd6, 0x49, 0xaf, 0x85,
    +	0x49, 0x5b, 0x28, 0x9c, 0x13, 0x0a, 0x0b, 0x77, 0x04, 0x1d, 0x7b, 0x2b, 0xd0, 0x75, 0x98, 0x7a,
    +	0xe4, 0x08, 0x56, 0xf2, 0x7c, 0xf1, 0xac, 0x58, 0x3c, 0x25, 0xf4, 0x61, 0x97, 0x2f, 0xbf, 0x0b,
    +	0xc5, 0xfb, 0x1b, 0x0f, 0xd6, 0xd6, 0x55, 0x5b, 0xeb, 0x9e, 0x28, 0x46, 0x6d, 0x98, 0x59, 0x65,
    +	0xc2, 0xcb, 0xa6, 0xd1, 0xd2, 0x79, 0x88, 0x16, 0x60, 0xc2, 0x50, 0xfb, 0x44, 0xc8, 0x4f, 0x0b,
    +	0xf9, 0x89, 0x35, 0xb5, 0x4f, 0x30, 0xe7, 0x44, 0xec, 0xe4, 0x32, 0xd9, 0xf9, 0xe3, 0x84, 0x30,
    +	0x84, 0x09, 0x35, 0x87, 0x96, 0x46, 0x28, 0x7a, 0x0c, 0x65, 0xa6, 0x8e, 0x0e, 0x54, 0x8d, 0x6c,
    +	0x90, 0x1e, 0xd1, 0x6c, 0xd3, 0xe2, 0x56, 0x4b, 0x4b, 0xaf, 0xd7, 0xfd, 0x1b, 0xe3, 0x25, 0x4f,
    +	0x7d, 0xb0, 0xd3, 0x61, 0x04, 0x5a, 0x67, 0x39, 0x5a, 0xdf, 0xbd, 0x51, 0x5f, 0x51, 0xb7, 0x49,
    +	0xcf, 0x15, 0x55, 0x5e, 0x1d, 0x8f, 0x6a, 0xe5, 0xb5, 0xa8, 0x46, 0x1c, 0x37, 0x82, 0x4c, 0x98,
    +	0x31, 0xb7, 0x3f, 0x25, 0x9a, 0xed, 0x99, 0xcd, 0x9d, 0xdc, 0x2c, 0x1a, 0x8f, 0x6a, 0x33, 0x0f,
    +	0x42, 0xea, 0x70, 0x44, 0x3d, 0xfa, 0x21, 0x5c, 0xb0, 0x84, 0xdf, 0x78, 0xd8, 0x23, 0xb4, 0x92,
    +	0x5f, 0xc8, 0x2f, 0x96, 0x96, 0x9a, 0xf5, 0xac, 0x85, 0xa1, 0xce, 0xfc, 0x6a, 0x31, 0xd9, 0x2d,
    +	0xdd, 0xee, 0x3e, 0x18, 0x10, 0x87, 0x4d, 0x95, 0x57, 0x45, 0xdc, 0x2f, 0xe0, 0xa0, 0x7e, 0x1c,
    +	0x36, 0x87, 0x7e, 0x21, 0xc1, 0x45, 0xf2, 0x58, 0xeb, 0x0d, 0x5b, 0x24, 0xb4, 0xae, 0x32, 0x71,
    +	0x5a, 0xfb, 0xb8, 0x2a, 0xf6, 0x71, 0xf1, 0x76, 0x82, 0x19, 0x9c, 0x68, 0x1c, 0xbd, 0x07, 0xa5,
    +	0x3e, 0x4b, 0x89, 0x75, 0xb3, 0xa7, 0x6b, 0x7b, 0x95, 0x29, 0x9e, 0x48, 0xf2, 0x78, 0x54, 0x2b,
    +	0xad, 0xfa, 0xe4, 0x83, 0x51, 0x6d, 0x36, 0xf0, 0xf9, 0x70, 0x6f, 0x40, 0x70, 0x50, 0x4c, 0xfe,
    +	0xab, 0x04, 0xf3, 0xab, 0x43, 0x76, 0xbf, 0x8d, 0x4e, 0xd3, 0xdd, 0xbb, 0xc3, 0x43, 0x9f, 0x40,
    +	0x81, 0x1d, 0x5a, 0x4b, 0xb5, 0x55, 0x91, 0x59, 0xdf, 0xc8, 0x76, 0xc4, 0xce, 0x79, 0xae, 0x12,
    +	0x5b, 0xf5, 0x33, 0xdb, 0xa7, 0x61, 0x4f, 0x2b, 0xea, 0xc0, 0x04, 0x1d, 0x10, 0x4d, 0x24, 0xd0,
    +	0xed, 0xec, 0x81, 0x4c, 0xd9, 0xf2, 0xc6, 0x80, 0x68, 0xfe, 0xa5, 0x63, 0x5f, 0x98, 0x1b, 0x90,
    +	0xff, 0x29, 0x41, 0x35, 0x45, 0x46, 0xd1, 0x8d, 0x16, 0xab, 0x32, 0xcf, 0xdf, 0x5b, 0x23, 0xe4,
    +	0xed, 0xca, 0x33, 0x7b, 0x2b, 0x76, 0x9e, 0xea, 0xf4, 0x97, 0x12, 0xc8, 0x87, 0x8b, 0xae, 0xe8,
    +	0xd4, 0x46, 0x1f, 0xc5, 0x1c, 0xaf, 0x67, 0xbc, 0xc9, 0x3a, 0x75, 0xdc, 0xf6, 0xca, 0xb1, 0x4b,
    +	0x09, 0x38, 0xdd, 0x87, 0x49, 0xdd, 0x26, 0x7d, 0x5a, 0xc9, 0xf1, 0xcb, 0xf2, 0xfe, 0x69, 0x79,
    +	0xad, 0x5c, 0x10, 0x46, 0x27, 0xef, 0x31, 0xf5, 0xd8, 0xb1, 0x22, 0xff, 0x26, 0x77, 0x94, 0xcf,
    +	0x2c, 0x40, 0xac, 0x08, 0x0f, 0x38, 0x71, 0xcd, 0x2f, 0xd6, 0xde, 0xe1, 0xad, 0x7b, 0x1c, 0x1c,
    +	0x58, 0xc5, 0xe2, 0x34, 0x50, 0x2d, 0xb5, 0xef, 0xb6, 0xa1, 0xd2, 0xd2, 0x52, 0x76, 0x67, 0xd6,
    +	0x85, 0xa4, 0x32, 0xcd, 0xe2, 0xe4, 0x7e, 0x61, 0x4f, 0x23, 0xb2, 0x61, 0xa6, 0x1f, 0xaa, 0xf0,
    +	0xbc, 0x7b, 0x95, 0x96, 0x6e, 0x1e, 0x23, 0x60, 0x21, 0x79, 0xa7, 0xb4, 0x86, 0x69, 0x38, 0x62,
    +	0x43, 0xfe, 0x42, 0x82, 0x2b, 0x29, 0xe1, 0x3a, 0x83, 0xdc, 0x68, 0x87, 0x73, 0xa3, 0xf9, 0xec,
    +	0xb9, 0x91, 0x9c, 0x14, 0xbf, 0x3a, 0x9f, 0xea, 0x25, 0xcf, 0x86, 0x4f, 0xa0, 0xc8, 0xcf, 0xe1,
    +	0x03, 0xdd, 0x68, 0x25, 0xf4, 0xd0, 0x2c, 0x47, 0xcb, 0x44, 0x95, 0x0b, 0xe3, 0x51, 0xad, 0xe8,
    +	0x7d, 0x62, 0x5f, 0x29, 0xfa, 0x3e, 0xcc, 0xf5, 0x05, 0x50, 0x60, 0xf2, 0xba, 0x61, 0x53, 0x91,
    +	0x43, 0x27, 0x3f, 0xdf, 0x8b, 0xe3, 0x51, 0x6d, 0x6e, 0x35, 0xa2, 0x15, 0xc7, 0xec, 0x20, 0x0d,
    +	0x8a, 0xbb, 0xaa, 0xa5, 0xab, 0xdb, 0x7e, 0xeb, 0x3c, 0x46, 0xe2, 0x6e, 0x0a, 0x51, 0xa5, 0x2c,
    +	0x42, 0x5b, 0x74, 0x29, 0x14, 0xfb, 0x7a, 0x99, 0x91, 0xfe, 0xd0, 0x81, 0x89, 0x6e, 0x5f, 0x5c,
    +	0x3a, 0xee, 0x71, 0x9a, 0x86, 0x6f, 0xc4, 0xa5, 0x50, 0xec, 0xeb, 0x45, 0x2b, 0x70, 0xa1, 0xad,
    +	0xea, 0xbd, 0xa1, 0x45, 0x44, 0xd3, 0x9b, 0xe4, 0x17, 0xf7, 0xab, 0xac, 0x83, 0xdf, 0x09, 0x32,
    +	0x0e, 0x46, 0xb5, 0x72, 0x88, 0xc0, 0x1b, 0x5f, 0x58, 0x18, 0xfd, 0x00, 0x66, 0xfb, 0x21, 0xf0,
    +	0x46, 0x2b, 0xe7, 0xf9, 0xc6, 0x8f, 0x7b, 0x24, 0x9e, 0x02, 0x1f, 0xe8, 0x86, 0xe9, 0x14, 0x47,
    +	0x2d, 0xa1, 0x9f, 0x49, 0x80, 0x2c, 0xa2, 0x1b, 0xbb, 0xa6, 0xc6, 0x35, 0x86, 0xba, 0xf8, 0xb7,
    +	0x85, 0x1a, 0x84, 0x63, 0x2b, 0x0e, 0x46, 0xb5, 0x5b, 0x19, 0x9e, 0x2d, 0xf5, 0xb8, 0x24, 0x0f,
    +	0x41, 0x82, 0x4d, 0xf9, 0x6f, 0x05, 0x98, 0x75, 0x6f, 0xc7, 0x16, 0xd9, 0xee, 0x9a, 0xe6, 0x4e,
    +	0x06, 0x18, 0xfb, 0x08, 0xa6, 0xb5, 0x9e, 0x4e, 0x0c, 0xdb, 0x79, 0x69, 0x88, 0x6c, 0xfe, 0x56,
    +	0xf6, 0xd0, 0x09, 0x53, 0xcb, 0x01, 0x25, 0xca, 0x45, 0x61, 0x68, 0x3a, 0x48, 0xc5, 0x21, 0x43,
    +	0xe8, 0x23, 0x98, 0xb4, 0x02, 0x28, 0xf0, 0xad, 0x2c, 0x16, 0xeb, 0x09, 0x98, 0xcb, 0x2b, 0x15,
    +	0x0e, 0xc8, 0x72, 0x94, 0xc6, 0x53, 0x6c, 0xe2, 0x59, 0x52, 0x2c, 0x82, 0xd1, 0x8a, 0x27, 0xc2,
    +	0x68, 0xc9, 0x50, 0x7f, 0xf2, 0xc5, 0x40, 0xfd, 0xd2, 0xf3, 0x85, 0xfa, 0xef, 0x41, 0x89, 0xea,
    +	0x2d, 0x72, 0xbb, 0xdd, 0x26, 0x9a, 0xcd, 0xee, 0xa3, 0x17, 0xb0, 0x0d, 0x9f, 0xcc, 0x02, 0xe6,
    +	0x7f, 0x2e, 0xf7, 0x54, 0x4a, 0x71, 0x50, 0x0c, 0xdd, 0x82, 0x19, 0xf6, 0x46, 0x36, 0x87, 0xf6,
    +	0x06, 0xd1, 0x4c, 0xa3, 0x45, 0xf9, 0xbd, 0x9a, 0x74, 0x76, 0xf0, 0x30, 0xc4, 0xc1, 0x91, 0x95,
    +	0xe8, 0x43, 0x98, 0xf7, 0xb2, 0x08, 0x93, 0x5d, 0x9d, 0x3c, 0xda, 0x24, 0x16, 0xe5, 0xd5, 0xa1,
    +	0xb0, 0x90, 0x5f, 0x2c, 0x2a, 0x57, 0xc6, 0xa3, 0xda, 0x7c, 0x33, 0x79, 0x09, 0x4e, 0x93, 0x45,
    +	0x3f, 0x4d, 0xbe, 0xef, 0xc0, 0x1d, 0x7c, 0x78, 0x56, 0x77, 0x3d, 0xa9, 0xe6, 0x4d, 0x9f, 0x55,
    +	0xcd, 0x93, 0xff, 0x2c, 0xc1, 0xd5, 0x48, 0xa1, 0x09, 0x8f, 0x29, 0x9e, 0x3f, 0x04, 0xff, 0x2e,
    +	0x14, 0x84, 0x65, 0x17, 0x74, 0x7c, 0xf3, 0xf8, 0xa0, 0x43, 0x68, 0x50, 0x26, 0x98, 0x29, 0xec,
    +	0x29, 0x94, 0xff, 0x21, 0xc1, 0xc2, 0x61, 0xfe, 0x9d, 0x01, 0xa2, 0xda, 0x09, 0x23, 0xaa, 0x3b,
    +	0x27, 0x76, 0x2e, 0xb4, 0xf1, 0x14, 0x58, 0xf5, 0xdb, 0x1c, 0x14, 0xdc, 0x3e, 0x8d, 0xde, 0x61,
    +	0x18, 0xca, 0xd6, 0xba, 0x2c, 0xf5, 0xc4, 0x54, 0xa3, 0xea, 0x36, 0xf3, 0x75, 0x97, 0x71, 0x10,
    +	0xfc, 0xc0, 0xbe, 0x00, 0xbf, 0x1e, 0x6a, 0x6c, 0x6e, 0x25, 0x20, 0xf0, 0x3b, 0xd9, 0xbd, 0x88,
    +	0xcf, 0xbe, 0x94, 0xaf, 0xb0, 0xcb, 0x15, 0xa7, 0xe3, 0x04, 0x7b, 0x0c, 0x08, 0x7e, 0x4a, 0x4d,
    +	0x83, 0x6f, 0x91, 0x57, 0xfe, 0x63, 0x01, 0x41, 0x6f, 0x96, 0xe4, 0x00, 0x41, 0xef, 0x13, 0xfb,
    +	0x4a, 0xe5, 0xa7, 0x12, 0xcc, 0xa7, 0x4c, 0x01, 0xd0, 0x5b, 0xfe, 0x9c, 0x83, 0x57, 0xe7, 0x8a,
    +	0xc4, 0x0b, 0x4e, 0x39, 0x38, 0xa0, 0xe0, 0x0c, 0x1c, 0x5e, 0x87, 0x7e, 0xc2, 0x8a, 0x4b, 0x4c,
    +	0x9f, 0x68, 0xc9, 0x27, 0x6e, 0x90, 0x97, 0x3d, 0x14, 0x12, 0xe3, 0xe1, 0x04, 0x73, 0xb2, 0x0a,
    +	0x3e, 0xf6, 0x65, 0x0f, 0x2c, 0x75, 0xa0, 0x8b, 0xf2, 0x17, 0x7d, 0x60, 0x35, 0xd7, 0xef, 0x09,
    +	0x0e, 0x0e, 0xac, 0x62, 0xa0, 0x63, 0x87, 0x21, 0xf0, 0x5c, 0x18, 0x74, 0x70, 0x2c, 0xcd, 0x39,
    +	0xf2, 0xef, 0x72, 0xe0, 0xbd, 0x9d, 0x32, 0x60, 0x94, 0x06, 0x14, 0xbd, 0x9e, 0x26, 0xb4, 0x7a,
    +	0x00, 0xd3, 0xeb, 0x7f, 0xd8, 0x5f, 0x83, 0x3e, 0x86, 0x02, 0x75, 0x3b, 0x5d, 0xfe, 0xe4, 0x9d,
    +	0x8e, 0xbf, 0xf1, 0xbc, 0x1e, 0xe7, 0xa9, 0x44, 0x36, 0xcc, 0xf3, 0x27, 0x01, 0xb1, 0x89, 0xb5,
    +	0x66, 0xda, 0x77, 0xcc, 0xa1, 0xd1, 0x6a, 0x6a, 0x3c, 0xd3, 0x1d, 0x98, 0x71, 0x8b, 0xf5, 0x96,
    +	0xf5, 0xe4, 0x25, 0x07, 0xa3, 0xda, 0x95, 0x14, 0x16, 0xbf, 0x4d, 0x69, 0xaa, 0xe5, 0x5f, 0x4b,
    +	0x30, 0xb7, 0x41, 0xac, 0x5d, 0x5d, 0x23, 0x98, 0xb4, 0x89, 0x45, 0x0c, 0x2d, 0x12, 0x1a, 0x29,
    +	0x43, 0x68, 0xdc, 0x68, 0xe7, 0x52, 0xa3, 0x7d, 0x15, 0x26, 0x06, 0xaa, 0xdd, 0x15, 0x53, 0xd7,
    +	0x02, 0xe3, 0xae, 0xab, 0x76, 0x17, 0x73, 0x2a, 0xe7, 0x9a, 0x96, 0xcd, 0x1d, 0x9d, 0x14, 0x5c,
    +	0xd3, 0xb2, 0x31, 0xa7, 0xca, 0xbf, 0x94, 0x60, 0x9a, 0x79, 0xb1, 0xdc, 0x25, 0xda, 0x8e, 0x6e,
    +	0x74, 0xd0, 0x67, 0x12, 0x20, 0x12, 0x9d, 0x04, 0x3b, 0x37, 0xa2, 0xb4, 0xf4, 0x76, 0xf6, 0x3b,
    +	0x19, 0x9b, 0x26, 0xfb, 0x69, 0x1d, 0x63, 0x51, 0x9c, 0x60, 0x52, 0xfe, 0x53, 0x0e, 0x2e, 0x6d,
    +	0xaa, 0x3d, 0xbd, 0xf5, 0x82, 0x66, 0x64, 0x7a, 0x68, 0x6a, 0x74, 0xf7, 0x38, 0x2f, 0xb7, 0x94,
    +	0x4d, 0xa7, 0x0d, 0x8c, 0xd0, 0xf7, 0xe0, 0x3c, 0xb5, 0x55, 0x7b, 0xe8, 0xce, 0x1e, 0xee, 0x9d,
    +	0x86, 0x31, 0xae, 0x50, 0x99, 0x11, 0xe6, 0xce, 0x3b, 0xdf, 0x58, 0x18, 0x92, 0xff, 0x2d, 0xc1,
    +	0x42, 0xaa, 0xec, 0xd9, 0x8d, 0xe6, 0x06, 0xa1, 0x20, 0xaf, 0x9d, 0x82, 0xdf, 0x47, 0x0d, 0xe7,
    +	0xfe, 0x25, 0xc1, 0x6b, 0x47, 0x09, 0x9f, 0x01, 0x60, 0x30, 0xc3, 0x80, 0xe1, 0xfe, 0xe9, 0x79,
    +	0x9e, 0x02, 0x1a, 0x3e, 0xcb, 0x1f, 0xed, 0xf7, 0xcb, 0x11, 0x5d, 0xe0, 0x1f, 0x3d, 0x5b, 0x50,
    +	0xde, 0x15, 0xf1, 0x32, 0x0d, 0xa7, 0xa4, 0x3b, 0x13, 0x96, 0xa2, 0x72, 0x9d, 0x3d, 0xe4, 0x36,
    +	0xa3, 0xcc, 0x83, 0x51, 0x6d, 0x2e, 0x4a, 0xc4, 0x71, 0x1d, 0xf2, 0xdf, 0x25, 0xb8, 0x96, 0x7a,
    +	0x12, 0x67, 0x90, 0x7a, 0xdd, 0x70, 0xea, 0x2d, 0x9f, 0x46, 0xea, 0xa5, 0xce, 0xff, 0xae, 0x1d,
    +	0x5a, 0x0d, 0xff, 0xcf, 0x27, 0x80, 0x3b, 0x50, 0xf2, 0x8f, 0xdf, 0x1d, 0x9c, 0xbc, 0x71, 0xfc,
    +	0x78, 0x9b, 0x86, 0xf2, 0x8a, 0x08, 0x70, 0xc9, 0xa7, 0x51, 0x1c, 0xd4, 0x7e, 0xca, 0x13, 0x94,
    +	0x1f, 0xc1, 0x9c, 0x1a, 0xfe, 0x2f, 0x34, 0xad, 0x4c, 0x1e, 0xf7, 0xe1, 0x16, 0xf9, 0x3f, 0xb6,
    +	0x52, 0x11, 0x4e, 0xcc, 0x45, 0x18, 0x14, 0xc7, 0x8c, 0xbd, 0xd8, 0x29, 0x61, 0x68, 0x74, 0x3b,
    +	0xf5, 0x7c, 0x46, 0xb7, 0xf2, 0x1f, 0x72, 0x50, 0x3b, 0xa2, 0x7d, 0xa3, 0xfb, 0x80, 0xcc, 0x6d,
    +	0x4a, 0xac, 0x5d, 0xd2, 0xba, 0xeb, 0xfc, 0xe2, 0xc0, 0x85, 0xf5, 0x79, 0x1f, 0x50, 0x3d, 0x88,
    +	0xad, 0xc0, 0x09, 0x52, 0xa8, 0x07, 0xd3, 0x76, 0x00, 0xea, 0x89, 0x5b, 0xf0, 0x66, 0x76, 0xbf,
    +	0x82, 0x40, 0x51, 0x99, 0x1b, 0x8f, 0x6a, 0x21, 0xe8, 0x88, 0x43, 0xda, 0x91, 0x06, 0xa0, 0xf9,
    +	0x47, 0xe7, 0xa4, 0x7e, 0x23, 0x5b, 0x15, 0xf3, 0x4f, 0xcc, 0xeb, 0x3b, 0x81, 0xc3, 0x0a, 0xa8,
    +	0x95, 0xf7, 0xa7, 0xa0, 0xec, 0x87, 0xf0, 0xe5, 0x10, 0xf5, 0xe5, 0x10, 0xf5, 0xd0, 0x21, 0x2a,
    +	0xbc, 0x1c, 0xa2, 0x9e, 0x68, 0x88, 0x9a, 0x50, 0x8b, 0x4b, 0x67, 0x36, 0xbd, 0xdc, 0x97, 0xa0,
    +	0x1a, 0xbb, 0xe3, 0x67, 0x3d, 0xbf, 0xfc, 0x38, 0x36, 0xbf, 0x7c, 0xfb, 0x24, 0xb0, 0x29, 0x6d,
    +	0x82, 0xf9, 0xa5, 0x04, 0xf2, 0xe1, 0x3e, 0xfe, 0x4f, 0xff, 0x62, 0xe0, 0xf0, 0xad, 0xa7, 0x80,
    +	0xc3, 0xff, 0x48, 0x00, 0x3e, 0x98, 0x41, 0xaf, 0x41, 0xe0, 0x47, 0x58, 0xa2, 0x74, 0x3b, 0x61,
    +	0x0a, 0xd0, 0xd1, 0x75, 0x98, 0xea, 0x13, 0x4a, 0xd5, 0x8e, 0x3b, 0x10, 0xf1, 0x7e, 0x64, 0xb6,
    +	0xea, 0x90, 0xb1, 0xcb, 0x47, 0x5b, 0x70, 0xde, 0x22, 0x2a, 0x15, 0xd3, 0xcc, 0xa2, 0xf2, 0x2e,
    +	0x7b, 0x05, 0x63, 0x4e, 0x39, 0x18, 0xd5, 0x6e, 0x64, 0xf9, 0x39, 0x61, 0x5d, 0x3c, 0x9a, 0xb9,
    +	0x10, 0x16, 0xea, 0xd0, 0x5d, 0x28, 0x0b, 0x1b, 0x81, 0x0d, 0x3b, 0x95, 0xf6, 0x92, 0xd8, 0x4d,
    +	0x79, 0x35, 0xba, 0x00, 0xc7, 0x65, 0xe4, 0xfb, 0x50, 0x70, 0x81, 0x01, 0xaa, 0xc0, 0x44, 0xe0,
    +	0xbd, 0xe5, 0x38, 0xce, 0x29, 0x91, 0xc0, 0xe4, 0x92, 0x03, 0x23, 0xff, 0x5e, 0x82, 0x57, 0x12,
    +	0x9a, 0x12, 0xba, 0x04, 0xf9, 0xa1, 0xd5, 0x13, 0x21, 0x98, 0x1a, 0x8f, 0x6a, 0xf9, 0x0f, 0xf1,
    +	0x0a, 0x66, 0x34, 0xa4, 0xc2, 0x14, 0x75, 0xc6, 0x53, 0x22, 0x99, 0x6e, 0x65, 0x3f, 0xf1, 0xe8,
    +	0x5c, 0x4b, 0x29, 0xb1, 0x33, 0x70, 0xa9, 0xae, 0x5e, 0xb4, 0x08, 0x05, 0x4d, 0x55, 0x86, 0x46,
    +	0xab, 0xe7, 0x9c, 0xd7, 0xb4, 0xf3, 0xc6, 0x5b, 0x6e, 0x3a, 0x34, 0xec, 0x71, 0x95, 0xb5, 0x27,
    +	0xfb, 0xd5, 0x73, 0x9f, 0xef, 0x57, 0xcf, 0x3d, 0xdd, 0xaf, 0x9e, 0xfb, 0xf1, 0xb8, 0x2a, 0x3d,
    +	0x19, 0x57, 0xa5, 0xcf, 0xc7, 0x55, 0xe9, 0xe9, 0xb8, 0x2a, 0xfd, 0x65, 0x5c, 0x95, 0x7e, 0xfe,
    +	0x45, 0xf5, 0xdc, 0x77, 0x16, 0xb3, 0xfe, 0x98, 0xf5, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x13,
    +	0x7c, 0x49, 0xa4, 0xf7, 0x2a, 0x00, 0x00,
    +}
    +
    +func (m *ApplyConfiguration) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ApplyConfiguration) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ApplyConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	i -= len(m.Expression)
    +	copy(dAtA[i:], m.Expression)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
     }
     
     func (m *AuditAnnotation) Marshal() (dAtA []byte, err error) {
    @@ -971,6 +1277,34 @@ func (m *ExpressionWarning) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	return len(dAtA) - i, nil
     }
     
    +func (m *JSONPatch) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *JSONPatch) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *JSONPatch) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	i -= len(m.Expression)
    +	copy(dAtA[i:], m.Expression)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
     func (m *MatchCondition) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
    @@ -1086,7 +1420,7 @@ func (m *MatchResources) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	return len(dAtA) - i, nil
     }
     
    -func (m *MutatingWebhook) Marshal() (dAtA []byte, err error) {
    +func (m *MutatingAdmissionPolicy) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
     	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    @@ -1096,112 +1430,18 @@ func (m *MutatingWebhook) Marshal() (dAtA []byte, err error) {
     	return dAtA[:n], nil
     }
     
    -func (m *MutatingWebhook) MarshalTo(dAtA []byte) (int, error) {
    +func (m *MutatingAdmissionPolicy) MarshalTo(dAtA []byte) (int, error) {
     	size := m.Size()
     	return m.MarshalToSizedBuffer(dAtA[:size])
     }
     
    -func (m *MutatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +func (m *MutatingAdmissionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	i := len(dAtA)
     	_ = i
     	var l int
     	_ = l
    -	if len(m.MatchConditions) > 0 {
    -		for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- {
    -			{
    -				size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    -				if err != nil {
    -					return 0, err
    -				}
    -				i -= size
    -				i = encodeVarintGenerated(dAtA, i, uint64(size))
    -			}
    -			i--
    -			dAtA[i] = 0x62
    -		}
    -	}
    -	if m.ObjectSelector != nil {
    -		{
    -			size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i])
    -			if err != nil {
    -				return 0, err
    -			}
    -			i -= size
    -			i = encodeVarintGenerated(dAtA, i, uint64(size))
    -		}
    -		i--
    -		dAtA[i] = 0x5a
    -	}
    -	if m.ReinvocationPolicy != nil {
    -		i -= len(*m.ReinvocationPolicy)
    -		copy(dAtA[i:], *m.ReinvocationPolicy)
    -		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReinvocationPolicy)))
    -		i--
    -		dAtA[i] = 0x52
    -	}
    -	if m.MatchPolicy != nil {
    -		i -= len(*m.MatchPolicy)
    -		copy(dAtA[i:], *m.MatchPolicy)
    -		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy)))
    -		i--
    -		dAtA[i] = 0x4a
    -	}
    -	if len(m.AdmissionReviewVersions) > 0 {
    -		for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- {
    -			i -= len(m.AdmissionReviewVersions[iNdEx])
    -			copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx])
    -			i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx])))
    -			i--
    -			dAtA[i] = 0x42
    -		}
    -	}
    -	if m.TimeoutSeconds != nil {
    -		i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds))
    -		i--
    -		dAtA[i] = 0x38
    -	}
    -	if m.SideEffects != nil {
    -		i -= len(*m.SideEffects)
    -		copy(dAtA[i:], *m.SideEffects)
    -		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects)))
    -		i--
    -		dAtA[i] = 0x32
    -	}
    -	if m.NamespaceSelector != nil {
    -		{
    -			size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i])
    -			if err != nil {
    -				return 0, err
    -			}
    -			i -= size
    -			i = encodeVarintGenerated(dAtA, i, uint64(size))
    -		}
    -		i--
    -		dAtA[i] = 0x2a
    -	}
    -	if m.FailurePolicy != nil {
    -		i -= len(*m.FailurePolicy)
    -		copy(dAtA[i:], *m.FailurePolicy)
    -		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy)))
    -		i--
    -		dAtA[i] = 0x22
    -	}
    -	if len(m.Rules) > 0 {
    -		for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- {
    -			{
    -				size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    -				if err != nil {
    -					return 0, err
    -				}
    -				i -= size
    -				i = encodeVarintGenerated(dAtA, i, uint64(size))
    -			}
    -			i--
    -			dAtA[i] = 0x1a
    -		}
    -	}
     	{
    -		size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i])
    +		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
     		if err != nil {
     			return 0, err
     		}
    @@ -1210,15 +1450,20 @@ func (m *MutatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	}
     	i--
     	dAtA[i] = 0x12
    -	i -= len(m.Name)
    -	copy(dAtA[i:], m.Name)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
    +	{
    +		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
     	i--
     	dAtA[i] = 0xa
     	return len(dAtA) - i, nil
     }
     
    -func (m *MutatingWebhookConfiguration) Marshal() (dAtA []byte, err error) {
    +func (m *MutatingAdmissionPolicyBinding) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
     	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    @@ -1228,30 +1473,26 @@ func (m *MutatingWebhookConfiguration) Marshal() (dAtA []byte, err error) {
     	return dAtA[:n], nil
     }
     
    -func (m *MutatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) {
    +func (m *MutatingAdmissionPolicyBinding) MarshalTo(dAtA []byte) (int, error) {
     	size := m.Size()
     	return m.MarshalToSizedBuffer(dAtA[:size])
     }
     
    -func (m *MutatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +func (m *MutatingAdmissionPolicyBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	i := len(dAtA)
     	_ = i
     	var l int
     	_ = l
    -	if len(m.Webhooks) > 0 {
    -		for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- {
    -			{
    -				size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    -				if err != nil {
    -					return 0, err
    -				}
    -				i -= size
    -				i = encodeVarintGenerated(dAtA, i, uint64(size))
    -			}
    -			i--
    -			dAtA[i] = 0x12
    +	{
    +		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
     		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
     	}
    +	i--
    +	dAtA[i] = 0x12
     	{
     		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
     		if err != nil {
    @@ -1265,7 +1506,7 @@ func (m *MutatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, e
     	return len(dAtA) - i, nil
     }
     
    -func (m *MutatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) {
    +func (m *MutatingAdmissionPolicyBindingList) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
     	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    @@ -1275,12 +1516,12 @@ func (m *MutatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) {
     	return dAtA[:n], nil
     }
     
    -func (m *MutatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) {
    +func (m *MutatingAdmissionPolicyBindingList) MarshalTo(dAtA []byte) (int, error) {
     	size := m.Size()
     	return m.MarshalToSizedBuffer(dAtA[:size])
     }
     
    -func (m *MutatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +func (m *MutatingAdmissionPolicyBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	i := len(dAtA)
     	_ = i
     	var l int
    @@ -1312,7 +1553,7 @@ func (m *MutatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (in
     	return len(dAtA) - i, nil
     }
     
    -func (m *NamedRuleWithOperations) Marshal() (dAtA []byte, err error) {
    +func (m *MutatingAdmissionPolicyBindingSpec) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
     	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    @@ -1322,39 +1563,49 @@ func (m *NamedRuleWithOperations) Marshal() (dAtA []byte, err error) {
     	return dAtA[:n], nil
     }
     
    -func (m *NamedRuleWithOperations) MarshalTo(dAtA []byte) (int, error) {
    +func (m *MutatingAdmissionPolicyBindingSpec) MarshalTo(dAtA []byte) (int, error) {
     	size := m.Size()
     	return m.MarshalToSizedBuffer(dAtA[:size])
     }
     
    -func (m *NamedRuleWithOperations) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +func (m *MutatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	i := len(dAtA)
     	_ = i
     	var l int
     	_ = l
    -	{
    -		size, err := m.RuleWithOperations.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    +	if m.MatchResources != nil {
    +		{
    +			size, err := m.MatchResources.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
     		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		i--
    +		dAtA[i] = 0x1a
     	}
    -	i--
    -	dAtA[i] = 0x12
    -	if len(m.ResourceNames) > 0 {
    -		for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- {
    -			i -= len(m.ResourceNames[iNdEx])
    -			copy(dAtA[i:], m.ResourceNames[iNdEx])
    -			i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx])))
    -			i--
    -			dAtA[i] = 0xa
    +	if m.ParamRef != nil {
    +		{
    +			size, err := m.ParamRef.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
     		}
    +		i--
    +		dAtA[i] = 0x12
     	}
    +	i -= len(m.PolicyName)
    +	copy(dAtA[i:], m.PolicyName)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.PolicyName)))
    +	i--
    +	dAtA[i] = 0xa
     	return len(dAtA) - i, nil
     }
     
    -func (m *ParamKind) Marshal() (dAtA []byte, err error) {
    +func (m *MutatingAdmissionPolicyList) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
     	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    @@ -1364,30 +1615,44 @@ func (m *ParamKind) Marshal() (dAtA []byte, err error) {
     	return dAtA[:n], nil
     }
     
    -func (m *ParamKind) MarshalTo(dAtA []byte) (int, error) {
    +func (m *MutatingAdmissionPolicyList) MarshalTo(dAtA []byte) (int, error) {
     	size := m.Size()
     	return m.MarshalToSizedBuffer(dAtA[:size])
     }
     
    -func (m *ParamKind) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +func (m *MutatingAdmissionPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	i := len(dAtA)
     	_ = i
     	var l int
     	_ = l
    -	i -= len(m.Kind)
    -	copy(dAtA[i:], m.Kind)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind)))
    -	i--
    -	dAtA[i] = 0x12
    -	i -= len(m.APIVersion)
    -	copy(dAtA[i:], m.APIVersion)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion)))
    +	if len(m.Items) > 0 {
    +		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
    +		}
    +	}
    +	{
    +		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
     	i--
     	dAtA[i] = 0xa
     	return len(dAtA) - i, nil
     }
     
    -func (m *ParamRef) Marshal() (dAtA []byte, err error) {
    +func (m *MutatingAdmissionPolicySpec) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
     	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    @@ -1397,26 +1662,73 @@ func (m *ParamRef) Marshal() (dAtA []byte, err error) {
     	return dAtA[:n], nil
     }
     
    -func (m *ParamRef) MarshalTo(dAtA []byte) (int, error) {
    +func (m *MutatingAdmissionPolicySpec) MarshalTo(dAtA []byte) (int, error) {
     	size := m.Size()
     	return m.MarshalToSizedBuffer(dAtA[:size])
     }
     
    -func (m *ParamRef) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +func (m *MutatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	i := len(dAtA)
     	_ = i
     	var l int
     	_ = l
    -	if m.ParameterNotFoundAction != nil {
    -		i -= len(*m.ParameterNotFoundAction)
    -		copy(dAtA[i:], *m.ParameterNotFoundAction)
    -		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ParameterNotFoundAction)))
    +	i -= len(m.ReinvocationPolicy)
    +	copy(dAtA[i:], m.ReinvocationPolicy)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReinvocationPolicy)))
    +	i--
    +	dAtA[i] = 0x3a
    +	if len(m.MatchConditions) > 0 {
    +		for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x32
    +		}
    +	}
    +	if m.FailurePolicy != nil {
    +		i -= len(*m.FailurePolicy)
    +		copy(dAtA[i:], *m.FailurePolicy)
    +		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy)))
     		i--
    -		dAtA[i] = 0x22
    +		dAtA[i] = 0x2a
     	}
    -	if m.Selector != nil {
    +	if len(m.Mutations) > 0 {
    +		for iNdEx := len(m.Mutations) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Mutations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x22
    +		}
    +	}
    +	if len(m.Variables) > 0 {
    +		for iNdEx := len(m.Variables) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Variables[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x1a
    +		}
    +	}
    +	if m.MatchConstraints != nil {
     		{
    -			size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i])
    +			size, err := m.MatchConstraints.MarshalToSizedBuffer(dAtA[:i])
     			if err != nil {
     				return 0, err
     			}
    @@ -1424,67 +1736,24 @@ func (m *ParamRef) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     			i = encodeVarintGenerated(dAtA, i, uint64(size))
     		}
     		i--
    -		dAtA[i] = 0x1a
    -	}
    -	i -= len(m.Namespace)
    -	copy(dAtA[i:], m.Namespace)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
    -	i--
    -	dAtA[i] = 0x12
    -	i -= len(m.Name)
    -	copy(dAtA[i:], m.Name)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
    -	i--
    -	dAtA[i] = 0xa
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *ServiceReference) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	if m.Port != nil {
    -		i = encodeVarintGenerated(dAtA, i, uint64(*m.Port))
    -		i--
    -		dAtA[i] = 0x20
    +		dAtA[i] = 0x12
     	}
    -	if m.Path != nil {
    -		i -= len(*m.Path)
    -		copy(dAtA[i:], *m.Path)
    -		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path)))
    +	if m.ParamKind != nil {
    +		{
    +			size, err := m.ParamKind.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
     		i--
    -		dAtA[i] = 0x1a
    +		dAtA[i] = 0xa
     	}
    -	i -= len(m.Name)
    -	copy(dAtA[i:], m.Name)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
    -	i--
    -	dAtA[i] = 0x12
    -	i -= len(m.Namespace)
    -	copy(dAtA[i:], m.Namespace)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
    -	i--
    -	dAtA[i] = 0xa
     	return len(dAtA) - i, nil
     }
     
    -func (m *TypeChecking) Marshal() (dAtA []byte, err error) {
    +func (m *MutatingWebhook) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
     	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    @@ -1494,20 +1763,20 @@ func (m *TypeChecking) Marshal() (dAtA []byte, err error) {
     	return dAtA[:n], nil
     }
     
    -func (m *TypeChecking) MarshalTo(dAtA []byte) (int, error) {
    +func (m *MutatingWebhook) MarshalTo(dAtA []byte) (int, error) {
     	size := m.Size()
     	return m.MarshalToSizedBuffer(dAtA[:size])
     }
     
    -func (m *TypeChecking) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +func (m *MutatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	i := len(dAtA)
     	_ = i
     	var l int
     	_ = l
    -	if len(m.ExpressionWarnings) > 0 {
    -		for iNdEx := len(m.ExpressionWarnings) - 1; iNdEx >= 0; iNdEx-- {
    +	if len(m.MatchConditions) > 0 {
    +		for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- {
     			{
    -				size, err := m.ExpressionWarnings[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
     				if err != nil {
     					return 0, err
     				}
    @@ -1515,54 +1784,91 @@ func (m *TypeChecking) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     				i = encodeVarintGenerated(dAtA, i, uint64(size))
     			}
     			i--
    -			dAtA[i] = 0xa
    +			dAtA[i] = 0x62
     		}
     	}
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *ValidatingAdmissionPolicy) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    +	if m.ObjectSelector != nil {
    +		{
    +			size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x5a
     	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *ValidatingAdmissionPolicy) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *ValidatingAdmissionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	{
    -		size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    +	if m.ReinvocationPolicy != nil {
    +		i -= len(*m.ReinvocationPolicy)
    +		copy(dAtA[i:], *m.ReinvocationPolicy)
    +		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReinvocationPolicy)))
    +		i--
    +		dAtA[i] = 0x52
    +	}
    +	if m.MatchPolicy != nil {
    +		i -= len(*m.MatchPolicy)
    +		copy(dAtA[i:], *m.MatchPolicy)
    +		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy)))
    +		i--
    +		dAtA[i] = 0x4a
    +	}
    +	if len(m.AdmissionReviewVersions) > 0 {
    +		for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- {
    +			i -= len(m.AdmissionReviewVersions[iNdEx])
    +			copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx])
    +			i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx])))
    +			i--
    +			dAtA[i] = 0x42
     		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
     	}
    -	i--
    -	dAtA[i] = 0x1a
    -	{
    -		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    +	if m.TimeoutSeconds != nil {
    +		i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds))
    +		i--
    +		dAtA[i] = 0x38
    +	}
    +	if m.SideEffects != nil {
    +		i -= len(*m.SideEffects)
    +		copy(dAtA[i:], *m.SideEffects)
    +		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects)))
    +		i--
    +		dAtA[i] = 0x32
    +	}
    +	if m.NamespaceSelector != nil {
    +		{
    +			size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x2a
    +	}
    +	if m.FailurePolicy != nil {
    +		i -= len(*m.FailurePolicy)
    +		copy(dAtA[i:], *m.FailurePolicy)
    +		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy)))
    +		i--
    +		dAtA[i] = 0x22
    +	}
    +	if len(m.Rules) > 0 {
    +		for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x1a
     		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
     	}
    -	i--
    -	dAtA[i] = 0x12
     	{
    -		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
    +		size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i])
     		if err != nil {
     			return 0, err
     		}
    @@ -1570,11 +1876,16 @@ func (m *ValidatingAdmissionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, erro
     		i = encodeVarintGenerated(dAtA, i, uint64(size))
     	}
     	i--
    +	dAtA[i] = 0x12
    +	i -= len(m.Name)
    +	copy(dAtA[i:], m.Name)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
    +	i--
     	dAtA[i] = 0xa
     	return len(dAtA) - i, nil
     }
     
    -func (m *ValidatingAdmissionPolicyBinding) Marshal() (dAtA []byte, err error) {
    +func (m *MutatingWebhookConfiguration) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
     	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    @@ -1584,26 +1895,30 @@ func (m *ValidatingAdmissionPolicyBinding) Marshal() (dAtA []byte, err error) {
     	return dAtA[:n], nil
     }
     
    -func (m *ValidatingAdmissionPolicyBinding) MarshalTo(dAtA []byte) (int, error) {
    +func (m *MutatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) {
     	size := m.Size()
     	return m.MarshalToSizedBuffer(dAtA[:size])
     }
     
    -func (m *ValidatingAdmissionPolicyBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +func (m *MutatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	i := len(dAtA)
     	_ = i
     	var l int
     	_ = l
    -	{
    -		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    +	if len(m.Webhooks) > 0 {
    +		for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
     		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
     	}
    -	i--
    -	dAtA[i] = 0x12
     	{
     		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
     		if err != nil {
    @@ -1617,7 +1932,7 @@ func (m *ValidatingAdmissionPolicyBinding) MarshalToSizedBuffer(dAtA []byte) (in
     	return len(dAtA) - i, nil
     }
     
    -func (m *ValidatingAdmissionPolicyBindingList) Marshal() (dAtA []byte, err error) {
    +func (m *MutatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
     	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    @@ -1627,12 +1942,12 @@ func (m *ValidatingAdmissionPolicyBindingList) Marshal() (dAtA []byte, err error
     	return dAtA[:n], nil
     }
     
    -func (m *ValidatingAdmissionPolicyBindingList) MarshalTo(dAtA []byte) (int, error) {
    +func (m *MutatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) {
     	size := m.Size()
     	return m.MarshalToSizedBuffer(dAtA[:size])
     }
     
    -func (m *ValidatingAdmissionPolicyBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +func (m *MutatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	i := len(dAtA)
     	_ = i
     	var l int
    @@ -1664,7 +1979,7 @@ func (m *ValidatingAdmissionPolicyBindingList) MarshalToSizedBuffer(dAtA []byte)
     	return len(dAtA) - i, nil
     }
     
    -func (m *ValidatingAdmissionPolicyBindingSpec) Marshal() (dAtA []byte, err error) {
    +func (m *Mutation) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
     	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    @@ -1674,28 +1989,19 @@ func (m *ValidatingAdmissionPolicyBindingSpec) Marshal() (dAtA []byte, err error
     	return dAtA[:n], nil
     }
     
    -func (m *ValidatingAdmissionPolicyBindingSpec) MarshalTo(dAtA []byte) (int, error) {
    +func (m *Mutation) MarshalTo(dAtA []byte) (int, error) {
     	size := m.Size()
     	return m.MarshalToSizedBuffer(dAtA[:size])
     }
     
    -func (m *ValidatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +func (m *Mutation) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	i := len(dAtA)
     	_ = i
     	var l int
     	_ = l
    -	if len(m.ValidationActions) > 0 {
    -		for iNdEx := len(m.ValidationActions) - 1; iNdEx >= 0; iNdEx-- {
    -			i -= len(m.ValidationActions[iNdEx])
    -			copy(dAtA[i:], m.ValidationActions[iNdEx])
    -			i = encodeVarintGenerated(dAtA, i, uint64(len(m.ValidationActions[iNdEx])))
    -			i--
    -			dAtA[i] = 0x22
    -		}
    -	}
    -	if m.MatchResources != nil {
    +	if m.JSONPatch != nil {
     		{
    -			size, err := m.MatchResources.MarshalToSizedBuffer(dAtA[:i])
    +			size, err := m.JSONPatch.MarshalToSizedBuffer(dAtA[:i])
     			if err != nil {
     				return 0, err
     			}
    @@ -1703,11 +2009,11 @@ func (m *ValidatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte)
     			i = encodeVarintGenerated(dAtA, i, uint64(size))
     		}
     		i--
    -		dAtA[i] = 0x1a
    +		dAtA[i] = 0x22
     	}
    -	if m.ParamRef != nil {
    +	if m.ApplyConfiguration != nil {
     		{
    -			size, err := m.ParamRef.MarshalToSizedBuffer(dAtA[:i])
    +			size, err := m.ApplyConfiguration.MarshalToSizedBuffer(dAtA[:i])
     			if err != nil {
     				return 0, err
     			}
    @@ -1715,17 +2021,17 @@ func (m *ValidatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte)
     			i = encodeVarintGenerated(dAtA, i, uint64(size))
     		}
     		i--
    -		dAtA[i] = 0x12
    +		dAtA[i] = 0x1a
     	}
    -	i -= len(m.PolicyName)
    -	copy(dAtA[i:], m.PolicyName)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.PolicyName)))
    +	i -= len(m.PatchType)
    +	copy(dAtA[i:], m.PatchType)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.PatchType)))
     	i--
    -	dAtA[i] = 0xa
    +	dAtA[i] = 0x12
     	return len(dAtA) - i, nil
     }
     
    -func (m *ValidatingAdmissionPolicyList) Marshal() (dAtA []byte, err error) {
    +func (m *NamedRuleWithOperations) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
     	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    @@ -1735,32 +2041,18 @@ func (m *ValidatingAdmissionPolicyList) Marshal() (dAtA []byte, err error) {
     	return dAtA[:n], nil
     }
     
    -func (m *ValidatingAdmissionPolicyList) MarshalTo(dAtA []byte) (int, error) {
    +func (m *NamedRuleWithOperations) MarshalTo(dAtA []byte) (int, error) {
     	size := m.Size()
     	return m.MarshalToSizedBuffer(dAtA[:size])
     }
     
    -func (m *ValidatingAdmissionPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +func (m *NamedRuleWithOperations) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	i := len(dAtA)
     	_ = i
     	var l int
     	_ = l
    -	if len(m.Items) > 0 {
    -		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
    -			{
    -				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    -				if err != nil {
    -					return 0, err
    -				}
    -				i -= size
    -				i = encodeVarintGenerated(dAtA, i, uint64(size))
    -			}
    -			i--
    -			dAtA[i] = 0x12
    -		}
    -	}
     	{
    -		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
    +		size, err := m.RuleWithOperations.MarshalToSizedBuffer(dAtA[:i])
     		if err != nil {
     			return 0, err
     		}
    @@ -1768,11 +2060,20 @@ func (m *ValidatingAdmissionPolicyList) MarshalToSizedBuffer(dAtA []byte) (int,
     		i = encodeVarintGenerated(dAtA, i, uint64(size))
     	}
     	i--
    -	dAtA[i] = 0xa
    +	dAtA[i] = 0x12
    +	if len(m.ResourceNames) > 0 {
    +		for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- {
    +			i -= len(m.ResourceNames[iNdEx])
    +			copy(dAtA[i:], m.ResourceNames[iNdEx])
    +			i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx])))
    +			i--
    +			dAtA[i] = 0xa
    +		}
    +	}
     	return len(dAtA) - i, nil
     }
     
    -func (m *ValidatingAdmissionPolicySpec) Marshal() (dAtA []byte, err error) {
    +func (m *ParamKind) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
     	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    @@ -1782,94 +2083,59 @@ func (m *ValidatingAdmissionPolicySpec) Marshal() (dAtA []byte, err error) {
     	return dAtA[:n], nil
     }
     
    -func (m *ValidatingAdmissionPolicySpec) MarshalTo(dAtA []byte) (int, error) {
    +func (m *ParamKind) MarshalTo(dAtA []byte) (int, error) {
     	size := m.Size()
     	return m.MarshalToSizedBuffer(dAtA[:size])
     }
     
    -func (m *ValidatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +func (m *ParamKind) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	i := len(dAtA)
     	_ = i
     	var l int
     	_ = l
    -	if len(m.Variables) > 0 {
    -		for iNdEx := len(m.Variables) - 1; iNdEx >= 0; iNdEx-- {
    -			{
    -				size, err := m.Variables[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    -				if err != nil {
    -					return 0, err
    -				}
    -				i -= size
    -				i = encodeVarintGenerated(dAtA, i, uint64(size))
    -			}
    -			i--
    -			dAtA[i] = 0x3a
    -		}
    -	}
    -	if len(m.MatchConditions) > 0 {
    -		for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- {
    -			{
    -				size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    -				if err != nil {
    -					return 0, err
    -				}
    -				i -= size
    -				i = encodeVarintGenerated(dAtA, i, uint64(size))
    -			}
    -			i--
    -			dAtA[i] = 0x32
    -		}
    -	}
    -	if len(m.AuditAnnotations) > 0 {
    -		for iNdEx := len(m.AuditAnnotations) - 1; iNdEx >= 0; iNdEx-- {
    -			{
    -				size, err := m.AuditAnnotations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    -				if err != nil {
    -					return 0, err
    -				}
    -				i -= size
    -				i = encodeVarintGenerated(dAtA, i, uint64(size))
    -			}
    -			i--
    -			dAtA[i] = 0x2a
    -		}
    +	i -= len(m.Kind)
    +	copy(dAtA[i:], m.Kind)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind)))
    +	i--
    +	dAtA[i] = 0x12
    +	i -= len(m.APIVersion)
    +	copy(dAtA[i:], m.APIVersion)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *ParamRef) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
     	}
    -	if m.FailurePolicy != nil {
    -		i -= len(*m.FailurePolicy)
    -		copy(dAtA[i:], *m.FailurePolicy)
    -		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy)))
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ParamRef) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ParamRef) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if m.ParameterNotFoundAction != nil {
    +		i -= len(*m.ParameterNotFoundAction)
    +		copy(dAtA[i:], *m.ParameterNotFoundAction)
    +		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ParameterNotFoundAction)))
     		i--
     		dAtA[i] = 0x22
     	}
    -	if len(m.Validations) > 0 {
    -		for iNdEx := len(m.Validations) - 1; iNdEx >= 0; iNdEx-- {
    -			{
    -				size, err := m.Validations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    -				if err != nil {
    -					return 0, err
    -				}
    -				i -= size
    -				i = encodeVarintGenerated(dAtA, i, uint64(size))
    -			}
    -			i--
    -			dAtA[i] = 0x1a
    -		}
    -	}
    -	if m.MatchConstraints != nil {
    -		{
    -			size, err := m.MatchConstraints.MarshalToSizedBuffer(dAtA[:i])
    -			if err != nil {
    -				return 0, err
    -			}
    -			i -= size
    -			i = encodeVarintGenerated(dAtA, i, uint64(size))
    -		}
    -		i--
    -		dAtA[i] = 0x12
    -	}
    -	if m.ParamKind != nil {
    +	if m.Selector != nil {
     		{
    -			size, err := m.ParamKind.MarshalToSizedBuffer(dAtA[:i])
    +			size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i])
     			if err != nil {
     				return 0, err
     			}
    @@ -1877,12 +2143,22 @@ func (m *ValidatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int,
     			i = encodeVarintGenerated(dAtA, i, uint64(size))
     		}
     		i--
    -		dAtA[i] = 0xa
    +		dAtA[i] = 0x1a
     	}
    +	i -= len(m.Namespace)
    +	copy(dAtA[i:], m.Namespace)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
    +	i--
    +	dAtA[i] = 0x12
    +	i -= len(m.Name)
    +	copy(dAtA[i:], m.Name)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
    +	i--
    +	dAtA[i] = 0xa
     	return len(dAtA) - i, nil
     }
     
    -func (m *ValidatingAdmissionPolicyStatus) Marshal() (dAtA []byte, err error) {
    +func (m *ServiceReference) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
     	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    @@ -1892,49 +2168,42 @@ func (m *ValidatingAdmissionPolicyStatus) Marshal() (dAtA []byte, err error) {
     	return dAtA[:n], nil
     }
     
    -func (m *ValidatingAdmissionPolicyStatus) MarshalTo(dAtA []byte) (int, error) {
    +func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) {
     	size := m.Size()
     	return m.MarshalToSizedBuffer(dAtA[:size])
     }
     
    -func (m *ValidatingAdmissionPolicyStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	i := len(dAtA)
     	_ = i
     	var l int
     	_ = l
    -	if len(m.Conditions) > 0 {
    -		for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
    -			{
    -				size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    -				if err != nil {
    -					return 0, err
    -				}
    -				i -= size
    -				i = encodeVarintGenerated(dAtA, i, uint64(size))
    -			}
    -			i--
    -			dAtA[i] = 0x1a
    -		}
    +	if m.Port != nil {
    +		i = encodeVarintGenerated(dAtA, i, uint64(*m.Port))
    +		i--
    +		dAtA[i] = 0x20
     	}
    -	if m.TypeChecking != nil {
    -		{
    -			size, err := m.TypeChecking.MarshalToSizedBuffer(dAtA[:i])
    -			if err != nil {
    -				return 0, err
    -			}
    -			i -= size
    -			i = encodeVarintGenerated(dAtA, i, uint64(size))
    -		}
    +	if m.Path != nil {
    +		i -= len(*m.Path)
    +		copy(dAtA[i:], *m.Path)
    +		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path)))
     		i--
    -		dAtA[i] = 0x12
    +		dAtA[i] = 0x1a
     	}
    -	i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration))
    +	i -= len(m.Name)
    +	copy(dAtA[i:], m.Name)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
     	i--
    -	dAtA[i] = 0x8
    +	dAtA[i] = 0x12
    +	i -= len(m.Namespace)
    +	copy(dAtA[i:], m.Namespace)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
    +	i--
    +	dAtA[i] = 0xa
     	return len(dAtA) - i, nil
     }
     
    -func (m *ValidatingWebhook) Marshal() (dAtA []byte, err error) {
    +func (m *TypeChecking) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
     	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    @@ -1944,20 +2213,20 @@ func (m *ValidatingWebhook) Marshal() (dAtA []byte, err error) {
     	return dAtA[:n], nil
     }
     
    -func (m *ValidatingWebhook) MarshalTo(dAtA []byte) (int, error) {
    +func (m *TypeChecking) MarshalTo(dAtA []byte) (int, error) {
     	size := m.Size()
     	return m.MarshalToSizedBuffer(dAtA[:size])
     }
     
    -func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +func (m *TypeChecking) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	i := len(dAtA)
     	_ = i
     	var l int
     	_ = l
    -	if len(m.MatchConditions) > 0 {
    -		for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- {
    +	if len(m.ExpressionWarnings) > 0 {
    +		for iNdEx := len(m.ExpressionWarnings) - 1; iNdEx >= 0; iNdEx-- {
     			{
    -				size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				size, err := m.ExpressionWarnings[iNdEx].MarshalToSizedBuffer(dAtA[:i])
     				if err != nil {
     					return 0, err
     				}
    @@ -1965,84 +2234,44 @@ func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     				i = encodeVarintGenerated(dAtA, i, uint64(size))
     			}
     			i--
    -			dAtA[i] = 0x5a
    -		}
    -	}
    -	if m.ObjectSelector != nil {
    -		{
    -			size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i])
    -			if err != nil {
    -				return 0, err
    -			}
    -			i -= size
    -			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			dAtA[i] = 0xa
     		}
    -		i--
    -		dAtA[i] = 0x52
     	}
    -	if m.MatchPolicy != nil {
    -		i -= len(*m.MatchPolicy)
    -		copy(dAtA[i:], *m.MatchPolicy)
    -		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy)))
    -		i--
    -		dAtA[i] = 0x4a
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *ValidatingAdmissionPolicy) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
     	}
    -	if len(m.AdmissionReviewVersions) > 0 {
    -		for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- {
    -			i -= len(m.AdmissionReviewVersions[iNdEx])
    -			copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx])
    -			i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx])))
    -			i--
    -			dAtA[i] = 0x42
    -		}
    -	}
    -	if m.TimeoutSeconds != nil {
    -		i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds))
    -		i--
    -		dAtA[i] = 0x38
    -	}
    -	if m.SideEffects != nil {
    -		i -= len(*m.SideEffects)
    -		copy(dAtA[i:], *m.SideEffects)
    -		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects)))
    -		i--
    -		dAtA[i] = 0x32
    -	}
    -	if m.NamespaceSelector != nil {
    -		{
    -			size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i])
    -			if err != nil {
    -				return 0, err
    -			}
    -			i -= size
    -			i = encodeVarintGenerated(dAtA, i, uint64(size))
    -		}
    -		i--
    -		dAtA[i] = 0x2a
    -	}
    -	if m.FailurePolicy != nil {
    -		i -= len(*m.FailurePolicy)
    -		copy(dAtA[i:], *m.FailurePolicy)
    -		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy)))
    -		i--
    -		dAtA[i] = 0x22
    -	}
    -	if len(m.Rules) > 0 {
    -		for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- {
    -			{
    -				size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    -				if err != nil {
    -					return 0, err
    -				}
    -				i -= size
    -				i = encodeVarintGenerated(dAtA, i, uint64(size))
    -			}
    -			i--
    -			dAtA[i] = 0x1a
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ValidatingAdmissionPolicy) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ValidatingAdmissionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	{
    +		size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
     		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
     	}
    +	i--
    +	dAtA[i] = 0x1a
     	{
    -		size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i])
    +		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
     		if err != nil {
     			return 0, err
     		}
    @@ -2051,15 +2280,20 @@ func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	}
     	i--
     	dAtA[i] = 0x12
    -	i -= len(m.Name)
    -	copy(dAtA[i:], m.Name)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
    +	{
    +		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
     	i--
     	dAtA[i] = 0xa
     	return len(dAtA) - i, nil
     }
     
    -func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) {
    +func (m *ValidatingAdmissionPolicyBinding) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
     	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    @@ -2069,30 +2303,26 @@ func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) {
     	return dAtA[:n], nil
     }
     
    -func (m *ValidatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) {
    +func (m *ValidatingAdmissionPolicyBinding) MarshalTo(dAtA []byte) (int, error) {
     	size := m.Size()
     	return m.MarshalToSizedBuffer(dAtA[:size])
     }
     
    -func (m *ValidatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +func (m *ValidatingAdmissionPolicyBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	i := len(dAtA)
     	_ = i
     	var l int
     	_ = l
    -	if len(m.Webhooks) > 0 {
    -		for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- {
    -			{
    -				size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    -				if err != nil {
    -					return 0, err
    -				}
    -				i -= size
    -				i = encodeVarintGenerated(dAtA, i, uint64(size))
    -			}
    -			i--
    -			dAtA[i] = 0x12
    +	{
    +		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
     		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
     	}
    +	i--
    +	dAtA[i] = 0x12
     	{
     		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
     		if err != nil {
    @@ -2106,7 +2336,7 @@ func (m *ValidatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int,
     	return len(dAtA) - i, nil
     }
     
    -func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) {
    +func (m *ValidatingAdmissionPolicyBindingList) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
     	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    @@ -2116,12 +2346,12 @@ func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error)
     	return dAtA[:n], nil
     }
     
    -func (m *ValidatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) {
    +func (m *ValidatingAdmissionPolicyBindingList) MarshalTo(dAtA []byte) (int, error) {
     	size := m.Size()
     	return m.MarshalToSizedBuffer(dAtA[:size])
     }
     
    -func (m *ValidatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +func (m *ValidatingAdmissionPolicyBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	i := len(dAtA)
     	_ = i
     	var l int
    @@ -2153,7 +2383,7 @@ func (m *ValidatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (
     	return len(dAtA) - i, nil
     }
     
    -func (m *Validation) Marshal() (dAtA []byte, err error) {
    +func (m *ValidatingAdmissionPolicyBindingSpec) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
     	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    @@ -2163,42 +2393,58 @@ func (m *Validation) Marshal() (dAtA []byte, err error) {
     	return dAtA[:n], nil
     }
     
    -func (m *Validation) MarshalTo(dAtA []byte) (int, error) {
    +func (m *ValidatingAdmissionPolicyBindingSpec) MarshalTo(dAtA []byte) (int, error) {
     	size := m.Size()
     	return m.MarshalToSizedBuffer(dAtA[:size])
     }
     
    -func (m *Validation) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +func (m *ValidatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	i := len(dAtA)
     	_ = i
     	var l int
     	_ = l
    -	i -= len(m.MessageExpression)
    -	copy(dAtA[i:], m.MessageExpression)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.MessageExpression)))
    -	i--
    -	dAtA[i] = 0x22
    -	if m.Reason != nil {
    -		i -= len(*m.Reason)
    -		copy(dAtA[i:], *m.Reason)
    -		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason)))
    +	if len(m.ValidationActions) > 0 {
    +		for iNdEx := len(m.ValidationActions) - 1; iNdEx >= 0; iNdEx-- {
    +			i -= len(m.ValidationActions[iNdEx])
    +			copy(dAtA[i:], m.ValidationActions[iNdEx])
    +			i = encodeVarintGenerated(dAtA, i, uint64(len(m.ValidationActions[iNdEx])))
    +			i--
    +			dAtA[i] = 0x22
    +		}
    +	}
    +	if m.MatchResources != nil {
    +		{
    +			size, err := m.MatchResources.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
     		i--
     		dAtA[i] = 0x1a
     	}
    -	i -= len(m.Message)
    -	copy(dAtA[i:], m.Message)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
    -	i--
    -	dAtA[i] = 0x12
    -	i -= len(m.Expression)
    -	copy(dAtA[i:], m.Expression)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression)))
    +	if m.ParamRef != nil {
    +		{
    +			size, err := m.ParamRef.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x12
    +	}
    +	i -= len(m.PolicyName)
    +	copy(dAtA[i:], m.PolicyName)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.PolicyName)))
     	i--
     	dAtA[i] = 0xa
     	return len(dAtA) - i, nil
     }
     
    -func (m *Variable) Marshal() (dAtA []byte, err error) {
    +func (m *ValidatingAdmissionPolicyList) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
     	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    @@ -2208,30 +2454,44 @@ func (m *Variable) Marshal() (dAtA []byte, err error) {
     	return dAtA[:n], nil
     }
     
    -func (m *Variable) MarshalTo(dAtA []byte) (int, error) {
    +func (m *ValidatingAdmissionPolicyList) MarshalTo(dAtA []byte) (int, error) {
     	size := m.Size()
     	return m.MarshalToSizedBuffer(dAtA[:size])
     }
     
    -func (m *Variable) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +func (m *ValidatingAdmissionPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	i := len(dAtA)
     	_ = i
     	var l int
     	_ = l
    -	i -= len(m.Expression)
    -	copy(dAtA[i:], m.Expression)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression)))
    -	i--
    -	dAtA[i] = 0x12
    -	i -= len(m.Name)
    -	copy(dAtA[i:], m.Name)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
    +	if len(m.Items) > 0 {
    +		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
    +		}
    +	}
    +	{
    +		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
     	i--
     	dAtA[i] = 0xa
     	return len(dAtA) - i, nil
     }
     
    -func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) {
    +func (m *ValidatingAdmissionPolicySpec) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
     	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    @@ -2241,335 +2501,636 @@ func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) {
     	return dAtA[:n], nil
     }
     
    -func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) {
    +func (m *ValidatingAdmissionPolicySpec) MarshalTo(dAtA []byte) (int, error) {
     	size := m.Size()
     	return m.MarshalToSizedBuffer(dAtA[:size])
     }
     
    -func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +func (m *ValidatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	i := len(dAtA)
     	_ = i
     	var l int
     	_ = l
    -	if m.URL != nil {
    -		i -= len(*m.URL)
    -		copy(dAtA[i:], *m.URL)
    -		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL)))
    -		i--
    -		dAtA[i] = 0x1a
    -	}
    -	if m.CABundle != nil {
    -		i -= len(m.CABundle)
    -		copy(dAtA[i:], m.CABundle)
    -		i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle)))
    -		i--
    -		dAtA[i] = 0x12
    -	}
    -	if m.Service != nil {
    -		{
    -			size, err := m.Service.MarshalToSizedBuffer(dAtA[:i])
    -			if err != nil {
    -				return 0, err
    +	if len(m.Variables) > 0 {
    +		for iNdEx := len(m.Variables) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Variables[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
     			}
    -			i -= size
    -			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			i--
    +			dAtA[i] = 0x3a
     		}
    -		i--
    -		dAtA[i] = 0xa
    -	}
    -	return len(dAtA) - i, nil
    -}
    -
    -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
    -	offset -= sovGenerated(v)
    -	base := offset
    -	for v >= 1<<7 {
    -		dAtA[offset] = uint8(v&0x7f | 0x80)
    -		v >>= 7
    -		offset++
     	}
    -	dAtA[offset] = uint8(v)
    -	return base
    -}
    -func (m *AuditAnnotation) Size() (n int) {
    -	if m == nil {
    -		return 0
    +	if len(m.MatchConditions) > 0 {
    +		for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x32
    +		}
     	}
    -	var l int
    -	_ = l
    -	l = len(m.Key)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = len(m.ValueExpression)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	return n
    +	if len(m.AuditAnnotations) > 0 {
    +		for iNdEx := len(m.AuditAnnotations) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.AuditAnnotations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x2a
    +		}
    +	}
    +	if m.FailurePolicy != nil {
    +		i -= len(*m.FailurePolicy)
    +		copy(dAtA[i:], *m.FailurePolicy)
    +		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy)))
    +		i--
    +		dAtA[i] = 0x22
    +	}
    +	if len(m.Validations) > 0 {
    +		for iNdEx := len(m.Validations) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Validations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x1a
    +		}
    +	}
    +	if m.MatchConstraints != nil {
    +		{
    +			size, err := m.MatchConstraints.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x12
    +	}
    +	if m.ParamKind != nil {
    +		{
    +			size, err := m.ParamKind.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0xa
    +	}
    +	return len(dAtA) - i, nil
     }
     
    -func (m *ExpressionWarning) Size() (n int) {
    -	if m == nil {
    -		return 0
    +func (m *ValidatingAdmissionPolicyStatus) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
     	}
    -	var l int
    -	_ = l
    -	l = len(m.FieldRef)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = len(m.Warning)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	return n
    +	return dAtA[:n], nil
     }
     
    -func (m *MatchCondition) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = len(m.Name)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = len(m.Expression)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	return n
    +func (m *ValidatingAdmissionPolicyStatus) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
     }
     
    -func (m *MatchResources) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    +func (m *ValidatingAdmissionPolicyStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
     	var l int
     	_ = l
    -	if m.NamespaceSelector != nil {
    -		l = m.NamespaceSelector.Size()
    -		n += 1 + l + sovGenerated(uint64(l))
    -	}
    -	if m.ObjectSelector != nil {
    -		l = m.ObjectSelector.Size()
    -		n += 1 + l + sovGenerated(uint64(l))
    -	}
    -	if len(m.ResourceRules) > 0 {
    -		for _, e := range m.ResourceRules {
    -			l = e.Size()
    -			n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.Conditions) > 0 {
    +		for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x1a
     		}
     	}
    -	if len(m.ExcludeResourceRules) > 0 {
    -		for _, e := range m.ExcludeResourceRules {
    -			l = e.Size()
    -			n += 1 + l + sovGenerated(uint64(l))
    +	if m.TypeChecking != nil {
    +		{
    +			size, err := m.TypeChecking.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
     		}
    +		i--
    +		dAtA[i] = 0x12
     	}
    -	if m.MatchPolicy != nil {
    -		l = len(*m.MatchPolicy)
    -		n += 1 + l + sovGenerated(uint64(l))
    -	}
    -	return n
    +	i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration))
    +	i--
    +	dAtA[i] = 0x8
    +	return len(dAtA) - i, nil
     }
     
    -func (m *MutatingWebhook) Size() (n int) {
    -	if m == nil {
    -		return 0
    +func (m *ValidatingWebhook) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
     	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ValidatingWebhook) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
     	var l int
     	_ = l
    -	l = len(m.Name)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = m.ClientConfig.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	if len(m.Rules) > 0 {
    -		for _, e := range m.Rules {
    -			l = e.Size()
    -			n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.MatchConditions) > 0 {
    +		for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x5a
     		}
     	}
    -	if m.FailurePolicy != nil {
    -		l = len(*m.FailurePolicy)
    -		n += 1 + l + sovGenerated(uint64(l))
    -	}
    -	if m.NamespaceSelector != nil {
    -		l = m.NamespaceSelector.Size()
    -		n += 1 + l + sovGenerated(uint64(l))
    -	}
    -	if m.SideEffects != nil {
    -		l = len(*m.SideEffects)
    -		n += 1 + l + sovGenerated(uint64(l))
    +	if m.ObjectSelector != nil {
    +		{
    +			size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x52
     	}
    -	if m.TimeoutSeconds != nil {
    -		n += 1 + sovGenerated(uint64(*m.TimeoutSeconds))
    +	if m.MatchPolicy != nil {
    +		i -= len(*m.MatchPolicy)
    +		copy(dAtA[i:], *m.MatchPolicy)
    +		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy)))
    +		i--
    +		dAtA[i] = 0x4a
     	}
     	if len(m.AdmissionReviewVersions) > 0 {
    -		for _, s := range m.AdmissionReviewVersions {
    -			l = len(s)
    -			n += 1 + l + sovGenerated(uint64(l))
    +		for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- {
    +			i -= len(m.AdmissionReviewVersions[iNdEx])
    +			copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx])
    +			i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx])))
    +			i--
    +			dAtA[i] = 0x42
     		}
     	}
    -	if m.MatchPolicy != nil {
    -		l = len(*m.MatchPolicy)
    -		n += 1 + l + sovGenerated(uint64(l))
    -	}
    -	if m.ReinvocationPolicy != nil {
    -		l = len(*m.ReinvocationPolicy)
    -		n += 1 + l + sovGenerated(uint64(l))
    +	if m.TimeoutSeconds != nil {
    +		i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds))
    +		i--
    +		dAtA[i] = 0x38
     	}
    -	if m.ObjectSelector != nil {
    -		l = m.ObjectSelector.Size()
    -		n += 1 + l + sovGenerated(uint64(l))
    +	if m.SideEffects != nil {
    +		i -= len(*m.SideEffects)
    +		copy(dAtA[i:], *m.SideEffects)
    +		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects)))
    +		i--
    +		dAtA[i] = 0x32
     	}
    -	if len(m.MatchConditions) > 0 {
    -		for _, e := range m.MatchConditions {
    -			l = e.Size()
    -			n += 1 + l + sovGenerated(uint64(l))
    +	if m.NamespaceSelector != nil {
    +		{
    +			size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
     		}
    +		i--
    +		dAtA[i] = 0x2a
     	}
    -	return n
    -}
    -
    -func (m *MutatingWebhookConfiguration) Size() (n int) {
    -	if m == nil {
    -		return 0
    +	if m.FailurePolicy != nil {
    +		i -= len(*m.FailurePolicy)
    +		copy(dAtA[i:], *m.FailurePolicy)
    +		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy)))
    +		i--
    +		dAtA[i] = 0x22
     	}
    -	var l int
    -	_ = l
    -	l = m.ObjectMeta.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	if len(m.Webhooks) > 0 {
    -		for _, e := range m.Webhooks {
    -			l = e.Size()
    -			n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.Rules) > 0 {
    +		for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x1a
     		}
     	}
    -	return n
    -}
    -
    -func (m *MutatingWebhookConfigurationList) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = m.ListMeta.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	if len(m.Items) > 0 {
    -		for _, e := range m.Items {
    -			l = e.Size()
    -			n += 1 + l + sovGenerated(uint64(l))
    +	{
    +		size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
     		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
     	}
    -	return n
    +	i--
    +	dAtA[i] = 0x12
    +	i -= len(m.Name)
    +	copy(dAtA[i:], m.Name)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
     }
     
    -func (m *NamedRuleWithOperations) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	if len(m.ResourceNames) > 0 {
    -		for _, s := range m.ResourceNames {
    -			l = len(s)
    -			n += 1 + l + sovGenerated(uint64(l))
    -		}
    +func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
     	}
    -	l = m.RuleWithOperations.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	return n
    +	return dAtA[:n], nil
     }
     
    -func (m *ParamKind) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = len(m.APIVersion)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = len(m.Kind)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	return n
    +func (m *ValidatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
     }
     
    -func (m *ParamRef) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    +func (m *ValidatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
     	var l int
     	_ = l
    -	l = len(m.Name)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = len(m.Namespace)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	if m.Selector != nil {
    -		l = m.Selector.Size()
    -		n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.Webhooks) > 0 {
    +		for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
    +		}
     	}
    -	if m.ParameterNotFoundAction != nil {
    -		l = len(*m.ParameterNotFoundAction)
    -		n += 1 + l + sovGenerated(uint64(l))
    +	{
    +		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
     	}
    -	return n
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
     }
     
    -func (m *ServiceReference) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = len(m.Namespace)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = len(m.Name)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	if m.Path != nil {
    -		l = len(*m.Path)
    -		n += 1 + l + sovGenerated(uint64(l))
    -	}
    -	if m.Port != nil {
    -		n += 1 + sovGenerated(uint64(*m.Port))
    +func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
     	}
    -	return n
    +	return dAtA[:n], nil
     }
     
    -func (m *TypeChecking) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    +func (m *ValidatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ValidatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
     	var l int
     	_ = l
    -	if len(m.ExpressionWarnings) > 0 {
    -		for _, e := range m.ExpressionWarnings {
    -			l = e.Size()
    -			n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.Items) > 0 {
    +		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
     		}
     	}
    -	return n
    +	{
    +		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
     }
     
    -func (m *ValidatingAdmissionPolicy) Size() (n int) {
    -	if m == nil {
    -		return 0
    +func (m *Validation) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
     	}
    -	var l int
    -	_ = l
    -	l = m.ObjectMeta.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = m.Spec.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = m.Status.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	return n
    +	return dAtA[:n], nil
     }
     
    -func (m *ValidatingAdmissionPolicyBinding) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = m.ObjectMeta.Size()
    +func (m *Validation) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *Validation) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	i -= len(m.MessageExpression)
    +	copy(dAtA[i:], m.MessageExpression)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.MessageExpression)))
    +	i--
    +	dAtA[i] = 0x22
    +	if m.Reason != nil {
    +		i -= len(*m.Reason)
    +		copy(dAtA[i:], *m.Reason)
    +		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason)))
    +		i--
    +		dAtA[i] = 0x1a
    +	}
    +	i -= len(m.Message)
    +	copy(dAtA[i:], m.Message)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
    +	i--
    +	dAtA[i] = 0x12
    +	i -= len(m.Expression)
    +	copy(dAtA[i:], m.Expression)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *Variable) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *Variable) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *Variable) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	i -= len(m.Expression)
    +	copy(dAtA[i:], m.Expression)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression)))
    +	i--
    +	dAtA[i] = 0x12
    +	i -= len(m.Name)
    +	copy(dAtA[i:], m.Name)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if m.URL != nil {
    +		i -= len(*m.URL)
    +		copy(dAtA[i:], *m.URL)
    +		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL)))
    +		i--
    +		dAtA[i] = 0x1a
    +	}
    +	if m.CABundle != nil {
    +		i -= len(m.CABundle)
    +		copy(dAtA[i:], m.CABundle)
    +		i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle)))
    +		i--
    +		dAtA[i] = 0x12
    +	}
    +	if m.Service != nil {
    +		{
    +			size, err := m.Service.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0xa
    +	}
    +	return len(dAtA) - i, nil
    +}
    +
    +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
    +	offset -= sovGenerated(v)
    +	base := offset
    +	for v >= 1<<7 {
    +		dAtA[offset] = uint8(v&0x7f | 0x80)
    +		v >>= 7
    +		offset++
    +	}
    +	dAtA[offset] = uint8(v)
    +	return base
    +}
    +func (m *ApplyConfiguration) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.Expression)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *AuditAnnotation) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.Key)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.ValueExpression)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *ExpressionWarning) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.FieldRef)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.Warning)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *JSONPatch) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.Expression)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *MatchCondition) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.Name)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.Expression)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *MatchResources) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	if m.NamespaceSelector != nil {
    +		l = m.NamespaceSelector.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	if m.ObjectSelector != nil {
    +		l = m.ObjectSelector.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	if len(m.ResourceRules) > 0 {
    +		for _, e := range m.ResourceRules {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	if len(m.ExcludeResourceRules) > 0 {
    +		for _, e := range m.ExcludeResourceRules {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	if m.MatchPolicy != nil {
    +		l = len(*m.MatchPolicy)
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	return n
    +}
    +
    +func (m *MutatingAdmissionPolicy) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.ObjectMeta.Size()
     	n += 1 + l + sovGenerated(uint64(l))
     	l = m.Spec.Size()
     	n += 1 + l + sovGenerated(uint64(l))
     	return n
     }
     
    -func (m *ValidatingAdmissionPolicyBindingList) Size() (n int) {
    +func (m *MutatingAdmissionPolicyBinding) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.ObjectMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = m.Spec.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *MutatingAdmissionPolicyBindingList) Size() (n int) {
     	if m == nil {
     		return 0
     	}
    @@ -2586,7 +3147,7 @@ func (m *ValidatingAdmissionPolicyBindingList) Size() (n int) {
     	return n
     }
     
    -func (m *ValidatingAdmissionPolicyBindingSpec) Size() (n int) {
    +func (m *MutatingAdmissionPolicyBindingSpec) Size() (n int) {
     	if m == nil {
     		return 0
     	}
    @@ -2602,16 +3163,10 @@ func (m *ValidatingAdmissionPolicyBindingSpec) Size() (n int) {
     		l = m.MatchResources.Size()
     		n += 1 + l + sovGenerated(uint64(l))
     	}
    -	if len(m.ValidationActions) > 0 {
    -		for _, s := range m.ValidationActions {
    -			l = len(s)
    -			n += 1 + l + sovGenerated(uint64(l))
    -		}
    -	}
     	return n
     }
     
    -func (m *ValidatingAdmissionPolicyList) Size() (n int) {
    +func (m *MutatingAdmissionPolicyList) Size() (n int) {
     	if m == nil {
     		return 0
     	}
    @@ -2628,7 +3183,7 @@ func (m *ValidatingAdmissionPolicyList) Size() (n int) {
     	return n
     }
     
    -func (m *ValidatingAdmissionPolicySpec) Size() (n int) {
    +func (m *MutatingAdmissionPolicySpec) Size() (n int) {
     	if m == nil {
     		return 0
     	}
    @@ -2642,8 +3197,14 @@ func (m *ValidatingAdmissionPolicySpec) Size() (n int) {
     		l = m.MatchConstraints.Size()
     		n += 1 + l + sovGenerated(uint64(l))
     	}
    -	if len(m.Validations) > 0 {
    -		for _, e := range m.Validations {
    +	if len(m.Variables) > 0 {
    +		for _, e := range m.Variables {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	if len(m.Mutations) > 0 {
    +		for _, e := range m.Mutations {
     			l = e.Size()
     			n += 1 + l + sovGenerated(uint64(l))
     		}
    @@ -2652,48 +3213,18 @@ func (m *ValidatingAdmissionPolicySpec) Size() (n int) {
     		l = len(*m.FailurePolicy)
     		n += 1 + l + sovGenerated(uint64(l))
     	}
    -	if len(m.AuditAnnotations) > 0 {
    -		for _, e := range m.AuditAnnotations {
    -			l = e.Size()
    -			n += 1 + l + sovGenerated(uint64(l))
    -		}
    -	}
     	if len(m.MatchConditions) > 0 {
     		for _, e := range m.MatchConditions {
     			l = e.Size()
     			n += 1 + l + sovGenerated(uint64(l))
     		}
     	}
    -	if len(m.Variables) > 0 {
    -		for _, e := range m.Variables {
    -			l = e.Size()
    -			n += 1 + l + sovGenerated(uint64(l))
    -		}
    -	}
    -	return n
    -}
    -
    -func (m *ValidatingAdmissionPolicyStatus) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	n += 1 + sovGenerated(uint64(m.ObservedGeneration))
    -	if m.TypeChecking != nil {
    -		l = m.TypeChecking.Size()
    -		n += 1 + l + sovGenerated(uint64(l))
    -	}
    -	if len(m.Conditions) > 0 {
    -		for _, e := range m.Conditions {
    -			l = e.Size()
    -			n += 1 + l + sovGenerated(uint64(l))
    -		}
    -	}
    +	l = len(m.ReinvocationPolicy)
    +	n += 1 + l + sovGenerated(uint64(l))
     	return n
     }
     
    -func (m *ValidatingWebhook) Size() (n int) {
    +func (m *MutatingWebhook) Size() (n int) {
     	if m == nil {
     		return 0
     	}
    @@ -2734,6 +3265,10 @@ func (m *ValidatingWebhook) Size() (n int) {
     		l = len(*m.MatchPolicy)
     		n += 1 + l + sovGenerated(uint64(l))
     	}
    +	if m.ReinvocationPolicy != nil {
    +		l = len(*m.ReinvocationPolicy)
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
     	if m.ObjectSelector != nil {
     		l = m.ObjectSelector.Size()
     		n += 1 + l + sovGenerated(uint64(l))
    @@ -2747,7 +3282,7 @@ func (m *ValidatingWebhook) Size() (n int) {
     	return n
     }
     
    -func (m *ValidatingWebhookConfiguration) Size() (n int) {
    +func (m *MutatingWebhookConfiguration) Size() (n int) {
     	if m == nil {
     		return 0
     	}
    @@ -2764,7 +3299,7 @@ func (m *ValidatingWebhookConfiguration) Size() (n int) {
     	return n
     }
     
    -func (m *ValidatingWebhookConfigurationList) Size() (n int) {
    +func (m *MutatingWebhookConfigurationList) Size() (n int) {
     	if m == nil {
     		return 0
     	}
    @@ -2781,476 +3316,1911 @@ func (m *ValidatingWebhookConfigurationList) Size() (n int) {
     	return n
     }
     
    -func (m *Validation) Size() (n int) {
    +func (m *Mutation) Size() (n int) {
     	if m == nil {
     		return 0
     	}
     	var l int
     	_ = l
    -	l = len(m.Expression)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = len(m.Message)
    +	l = len(m.PatchType)
     	n += 1 + l + sovGenerated(uint64(l))
    -	if m.Reason != nil {
    -		l = len(*m.Reason)
    +	if m.ApplyConfiguration != nil {
    +		l = m.ApplyConfiguration.Size()
     		n += 1 + l + sovGenerated(uint64(l))
     	}
    -	l = len(m.MessageExpression)
    +	if m.JSONPatch != nil {
    +		l = m.JSONPatch.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	return n
    +}
    +
    +func (m *NamedRuleWithOperations) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	if len(m.ResourceNames) > 0 {
    +		for _, s := range m.ResourceNames {
    +			l = len(s)
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	l = m.RuleWithOperations.Size()
     	n += 1 + l + sovGenerated(uint64(l))
     	return n
     }
     
    -func (m *Variable) Size() (n int) {
    +func (m *ParamKind) Size() (n int) {
     	if m == nil {
     		return 0
     	}
     	var l int
     	_ = l
    -	l = len(m.Name)
    +	l = len(m.APIVersion)
     	n += 1 + l + sovGenerated(uint64(l))
    -	l = len(m.Expression)
    +	l = len(m.Kind)
     	n += 1 + l + sovGenerated(uint64(l))
     	return n
     }
     
    -func (m *WebhookClientConfig) Size() (n int) {
    +func (m *ParamRef) Size() (n int) {
     	if m == nil {
     		return 0
     	}
     	var l int
     	_ = l
    -	if m.Service != nil {
    -		l = m.Service.Size()
    +	l = len(m.Name)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.Namespace)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if m.Selector != nil {
    +		l = m.Selector.Size()
     		n += 1 + l + sovGenerated(uint64(l))
     	}
    -	if m.CABundle != nil {
    -		l = len(m.CABundle)
    +	if m.ParameterNotFoundAction != nil {
    +		l = len(*m.ParameterNotFoundAction)
     		n += 1 + l + sovGenerated(uint64(l))
     	}
    -	if m.URL != nil {
    -		l = len(*m.URL)
    +	return n
    +}
    +
    +func (m *ServiceReference) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.Namespace)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.Name)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if m.Path != nil {
    +		l = len(*m.Path)
     		n += 1 + l + sovGenerated(uint64(l))
     	}
    +	if m.Port != nil {
    +		n += 1 + sovGenerated(uint64(*m.Port))
    +	}
     	return n
     }
     
    -func sovGenerated(x uint64) (n int) {
    -	return (math_bits.Len64(x|1) + 6) / 7
    -}
    -func sozGenerated(x uint64) (n int) {
    -	return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
    +func (m *TypeChecking) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	if len(m.ExpressionWarnings) > 0 {
    +		for _, e := range m.ExpressionWarnings {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
     }
    -func (this *AuditAnnotation) String() string {
    -	if this == nil {
    -		return "nil"
    +
    +func (m *ValidatingAdmissionPolicy) Size() (n int) {
    +	if m == nil {
    +		return 0
     	}
    -	s := strings.Join([]string{`&AuditAnnotation{`,
    -		`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
    -		`ValueExpression:` + fmt.Sprintf("%v", this.ValueExpression) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    +	var l int
    +	_ = l
    +	l = m.ObjectMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = m.Spec.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = m.Status.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
     }
    -func (this *ExpressionWarning) String() string {
    -	if this == nil {
    -		return "nil"
    +
    +func (m *ValidatingAdmissionPolicyBinding) Size() (n int) {
    +	if m == nil {
    +		return 0
     	}
    -	s := strings.Join([]string{`&ExpressionWarning{`,
    -		`FieldRef:` + fmt.Sprintf("%v", this.FieldRef) + `,`,
    -		`Warning:` + fmt.Sprintf("%v", this.Warning) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    +	var l int
    +	_ = l
    +	l = m.ObjectMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = m.Spec.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
     }
    -func (this *MatchCondition) String() string {
    -	if this == nil {
    -		return "nil"
    +
    +func (m *ValidatingAdmissionPolicyBindingList) Size() (n int) {
    +	if m == nil {
    +		return 0
     	}
    -	s := strings.Join([]string{`&MatchCondition{`,
    -		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
    -		`Expression:` + fmt.Sprintf("%v", this.Expression) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    +	var l int
    +	_ = l
    +	l = m.ListMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.Items) > 0 {
    +		for _, e := range m.Items {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
     }
    -func (this *MatchResources) String() string {
    -	if this == nil {
    -		return "nil"
    +
    +func (m *ValidatingAdmissionPolicyBindingSpec) Size() (n int) {
    +	if m == nil {
    +		return 0
     	}
    -	repeatedStringForResourceRules := "[]NamedRuleWithOperations{"
    -	for _, f := range this.ResourceRules {
    -		repeatedStringForResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + ","
    +	var l int
    +	_ = l
    +	l = len(m.PolicyName)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if m.ParamRef != nil {
    +		l = m.ParamRef.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
     	}
    -	repeatedStringForResourceRules += "}"
    -	repeatedStringForExcludeResourceRules := "[]NamedRuleWithOperations{"
    -	for _, f := range this.ExcludeResourceRules {
    -		repeatedStringForExcludeResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + ","
    +	if m.MatchResources != nil {
    +		l = m.MatchResources.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
     	}
    -	repeatedStringForExcludeResourceRules += "}"
    -	s := strings.Join([]string{`&MatchResources{`,
    -		`NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
    -		`ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
    -		`ResourceRules:` + repeatedStringForResourceRules + `,`,
    -		`ExcludeResourceRules:` + repeatedStringForExcludeResourceRules + `,`,
    -		`MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    +	if len(m.ValidationActions) > 0 {
    +		for _, s := range m.ValidationActions {
    +			l = len(s)
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
     }
    -func (this *MutatingWebhook) String() string {
    -	if this == nil {
    -		return "nil"
    +
    +func (m *ValidatingAdmissionPolicyList) Size() (n int) {
    +	if m == nil {
    +		return 0
     	}
    -	repeatedStringForRules := "[]RuleWithOperations{"
    -	for _, f := range this.Rules {
    -		repeatedStringForRules += fmt.Sprintf("%v", f) + ","
    +	var l int
    +	_ = l
    +	l = m.ListMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.Items) > 0 {
    +		for _, e := range m.Items {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
     	}
    -	repeatedStringForRules += "}"
    -	repeatedStringForMatchConditions := "[]MatchCondition{"
    -	for _, f := range this.MatchConditions {
    -		repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + ","
    -	}
    -	repeatedStringForMatchConditions += "}"
    -	s := strings.Join([]string{`&MutatingWebhook{`,
    -		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
    -		`ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`,
    -		`Rules:` + repeatedStringForRules + `,`,
    -		`FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`,
    -		`NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
    -		`SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`,
    -		`TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`,
    -		`AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`,
    -		`MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`,
    -		`ReinvocationPolicy:` + valueToStringGenerated(this.ReinvocationPolicy) + `,`,
    -		`ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
    -		`MatchConditions:` + repeatedStringForMatchConditions + `,`,
    -		`}`,
    -	}, "")
    -	return s
    +	return n
     }
    -func (this *MutatingWebhookConfiguration) String() string {
    -	if this == nil {
    -		return "nil"
    +
    +func (m *ValidatingAdmissionPolicySpec) Size() (n int) {
    +	if m == nil {
    +		return 0
     	}
    -	repeatedStringForWebhooks := "[]MutatingWebhook{"
    -	for _, f := range this.Webhooks {
    -		repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "MutatingWebhook", "MutatingWebhook", 1), `&`, ``, 1) + ","
    +	var l int
    +	_ = l
    +	if m.ParamKind != nil {
    +		l = m.ParamKind.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
     	}
    -	repeatedStringForWebhooks += "}"
    -	s := strings.Join([]string{`&MutatingWebhookConfiguration{`,
    -		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
    -		`Webhooks:` + repeatedStringForWebhooks + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *MutatingWebhookConfigurationList) String() string {
    -	if this == nil {
    -		return "nil"
    +	if m.MatchConstraints != nil {
    +		l = m.MatchConstraints.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
     	}
    -	repeatedStringForItems := "[]MutatingWebhookConfiguration{"
    -	for _, f := range this.Items {
    -		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingWebhookConfiguration", "MutatingWebhookConfiguration", 1), `&`, ``, 1) + ","
    +	if len(m.Validations) > 0 {
    +		for _, e := range m.Validations {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
     	}
    -	repeatedStringForItems += "}"
    -	s := strings.Join([]string{`&MutatingWebhookConfigurationList{`,
    -		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
    -		`Items:` + repeatedStringForItems + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *NamedRuleWithOperations) String() string {
    -	if this == nil {
    -		return "nil"
    +	if m.FailurePolicy != nil {
    +		l = len(*m.FailurePolicy)
    +		n += 1 + l + sovGenerated(uint64(l))
     	}
    -	s := strings.Join([]string{`&NamedRuleWithOperations{`,
    -		`ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`,
    -		`RuleWithOperations:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RuleWithOperations), "RuleWithOperations", "v11.RuleWithOperations", 1), `&`, ``, 1) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *ParamKind) String() string {
    -	if this == nil {
    -		return "nil"
    +	if len(m.AuditAnnotations) > 0 {
    +		for _, e := range m.AuditAnnotations {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
     	}
    -	s := strings.Join([]string{`&ParamKind{`,
    -		`APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`,
    -		`Kind:` + fmt.Sprintf("%v", this.Kind) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *ParamRef) String() string {
    -	if this == nil {
    -		return "nil"
    +	if len(m.MatchConditions) > 0 {
    +		for _, e := range m.MatchConditions {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
     	}
    -	s := strings.Join([]string{`&ParamRef{`,
    -		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
    -		`Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
    -		`Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
    -		`ParameterNotFoundAction:` + valueToStringGenerated(this.ParameterNotFoundAction) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *ServiceReference) String() string {
    -	if this == nil {
    -		return "nil"
    +	if len(m.Variables) > 0 {
    +		for _, e := range m.Variables {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
     	}
    -	s := strings.Join([]string{`&ServiceReference{`,
    -		`Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
    -		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
    -		`Path:` + valueToStringGenerated(this.Path) + `,`,
    -		`Port:` + valueToStringGenerated(this.Port) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    +	return n
     }
    -func (this *TypeChecking) String() string {
    -	if this == nil {
    -		return "nil"
    +
    +func (m *ValidatingAdmissionPolicyStatus) Size() (n int) {
    +	if m == nil {
    +		return 0
     	}
    -	repeatedStringForExpressionWarnings := "[]ExpressionWarning{"
    -	for _, f := range this.ExpressionWarnings {
    -		repeatedStringForExpressionWarnings += strings.Replace(strings.Replace(f.String(), "ExpressionWarning", "ExpressionWarning", 1), `&`, ``, 1) + ","
    +	var l int
    +	_ = l
    +	n += 1 + sovGenerated(uint64(m.ObservedGeneration))
    +	if m.TypeChecking != nil {
    +		l = m.TypeChecking.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
     	}
    -	repeatedStringForExpressionWarnings += "}"
    -	s := strings.Join([]string{`&TypeChecking{`,
    -		`ExpressionWarnings:` + repeatedStringForExpressionWarnings + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *ValidatingAdmissionPolicy) String() string {
    -	if this == nil {
    -		return "nil"
    +	if len(m.Conditions) > 0 {
    +		for _, e := range m.Conditions {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
     	}
    -	s := strings.Join([]string{`&ValidatingAdmissionPolicy{`,
    -		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
    -		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicySpec", "ValidatingAdmissionPolicySpec", 1), `&`, ``, 1) + `,`,
    -		`Status:` + strings.Replace(strings.Replace(this.Status.String(), "ValidatingAdmissionPolicyStatus", "ValidatingAdmissionPolicyStatus", 1), `&`, ``, 1) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    +	return n
     }
    -func (this *ValidatingAdmissionPolicyBinding) String() string {
    -	if this == nil {
    -		return "nil"
    +
    +func (m *ValidatingWebhook) Size() (n int) {
    +	if m == nil {
    +		return 0
     	}
    -	s := strings.Join([]string{`&ValidatingAdmissionPolicyBinding{`,
    -		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
    -		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicyBindingSpec", "ValidatingAdmissionPolicyBindingSpec", 1), `&`, ``, 1) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *ValidatingAdmissionPolicyBindingList) String() string {
    -	if this == nil {
    -		return "nil"
    +	var l int
    +	_ = l
    +	l = len(m.Name)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = m.ClientConfig.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.Rules) > 0 {
    +		for _, e := range m.Rules {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
     	}
    -	repeatedStringForItems := "[]ValidatingAdmissionPolicyBinding{"
    -	for _, f := range this.Items {
    -		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicyBinding", "ValidatingAdmissionPolicyBinding", 1), `&`, ``, 1) + ","
    +	if m.FailurePolicy != nil {
    +		l = len(*m.FailurePolicy)
    +		n += 1 + l + sovGenerated(uint64(l))
     	}
    -	repeatedStringForItems += "}"
    -	s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingList{`,
    -		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
    -		`Items:` + repeatedStringForItems + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *ValidatingAdmissionPolicyBindingSpec) String() string {
    -	if this == nil {
    -		return "nil"
    +	if m.NamespaceSelector != nil {
    +		l = m.NamespaceSelector.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
     	}
    -	s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingSpec{`,
    -		`PolicyName:` + fmt.Sprintf("%v", this.PolicyName) + `,`,
    -		`ParamRef:` + strings.Replace(this.ParamRef.String(), "ParamRef", "ParamRef", 1) + `,`,
    -		`MatchResources:` + strings.Replace(this.MatchResources.String(), "MatchResources", "MatchResources", 1) + `,`,
    -		`ValidationActions:` + fmt.Sprintf("%v", this.ValidationActions) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *ValidatingAdmissionPolicyList) String() string {
    -	if this == nil {
    -		return "nil"
    +	if m.SideEffects != nil {
    +		l = len(*m.SideEffects)
    +		n += 1 + l + sovGenerated(uint64(l))
     	}
    -	repeatedStringForItems := "[]ValidatingAdmissionPolicy{"
    -	for _, f := range this.Items {
    -		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicy", "ValidatingAdmissionPolicy", 1), `&`, ``, 1) + ","
    +	if m.TimeoutSeconds != nil {
    +		n += 1 + sovGenerated(uint64(*m.TimeoutSeconds))
     	}
    -	repeatedStringForItems += "}"
    -	s := strings.Join([]string{`&ValidatingAdmissionPolicyList{`,
    -		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
    -		`Items:` + repeatedStringForItems + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *ValidatingAdmissionPolicySpec) String() string {
    -	if this == nil {
    -		return "nil"
    +	if len(m.AdmissionReviewVersions) > 0 {
    +		for _, s := range m.AdmissionReviewVersions {
    +			l = len(s)
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
     	}
    -	repeatedStringForValidations := "[]Validation{"
    -	for _, f := range this.Validations {
    -		repeatedStringForValidations += strings.Replace(strings.Replace(f.String(), "Validation", "Validation", 1), `&`, ``, 1) + ","
    +	if m.MatchPolicy != nil {
    +		l = len(*m.MatchPolicy)
    +		n += 1 + l + sovGenerated(uint64(l))
     	}
    -	repeatedStringForValidations += "}"
    -	repeatedStringForAuditAnnotations := "[]AuditAnnotation{"
    -	for _, f := range this.AuditAnnotations {
    -		repeatedStringForAuditAnnotations += strings.Replace(strings.Replace(f.String(), "AuditAnnotation", "AuditAnnotation", 1), `&`, ``, 1) + ","
    +	if m.ObjectSelector != nil {
    +		l = m.ObjectSelector.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
     	}
    -	repeatedStringForAuditAnnotations += "}"
    -	repeatedStringForMatchConditions := "[]MatchCondition{"
    -	for _, f := range this.MatchConditions {
    -		repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + ","
    +	if len(m.MatchConditions) > 0 {
    +		for _, e := range m.MatchConditions {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
     	}
    -	repeatedStringForMatchConditions += "}"
    -	repeatedStringForVariables := "[]Variable{"
    -	for _, f := range this.Variables {
    -		repeatedStringForVariables += strings.Replace(strings.Replace(f.String(), "Variable", "Variable", 1), `&`, ``, 1) + ","
    +	return n
    +}
    +
    +func (m *ValidatingWebhookConfiguration) Size() (n int) {
    +	if m == nil {
    +		return 0
     	}
    -	repeatedStringForVariables += "}"
    -	s := strings.Join([]string{`&ValidatingAdmissionPolicySpec{`,
    -		`ParamKind:` + strings.Replace(this.ParamKind.String(), "ParamKind", "ParamKind", 1) + `,`,
    -		`MatchConstraints:` + strings.Replace(this.MatchConstraints.String(), "MatchResources", "MatchResources", 1) + `,`,
    -		`Validations:` + repeatedStringForValidations + `,`,
    -		`FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`,
    -		`AuditAnnotations:` + repeatedStringForAuditAnnotations + `,`,
    -		`MatchConditions:` + repeatedStringForMatchConditions + `,`,
    -		`Variables:` + repeatedStringForVariables + `,`,
    +	var l int
    +	_ = l
    +	l = m.ObjectMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.Webhooks) > 0 {
    +		for _, e := range m.Webhooks {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
    +}
    +
    +func (m *ValidatingWebhookConfigurationList) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.ListMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.Items) > 0 {
    +		for _, e := range m.Items {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
    +}
    +
    +func (m *Validation) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.Expression)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.Message)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if m.Reason != nil {
    +		l = len(*m.Reason)
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	l = len(m.MessageExpression)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *Variable) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.Name)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.Expression)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *WebhookClientConfig) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	if m.Service != nil {
    +		l = m.Service.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	if m.CABundle != nil {
    +		l = len(m.CABundle)
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	if m.URL != nil {
    +		l = len(*m.URL)
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	return n
    +}
    +
    +func sovGenerated(x uint64) (n int) {
    +	return (math_bits.Len64(x|1) + 6) / 7
    +}
    +func sozGenerated(x uint64) (n int) {
    +	return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
    +}
    +func (this *ApplyConfiguration) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&ApplyConfiguration{`,
    +		`Expression:` + fmt.Sprintf("%v", this.Expression) + `,`,
     		`}`,
     	}, "")
     	return s
     }
    -func (this *ValidatingAdmissionPolicyStatus) String() string {
    +func (this *AuditAnnotation) String() string {
     	if this == nil {
     		return "nil"
     	}
    -	repeatedStringForConditions := "[]Condition{"
    -	for _, f := range this.Conditions {
    -		repeatedStringForConditions += fmt.Sprintf("%v", f) + ","
    -	}
    -	repeatedStringForConditions += "}"
    -	s := strings.Join([]string{`&ValidatingAdmissionPolicyStatus{`,
    -		`ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
    -		`TypeChecking:` + strings.Replace(this.TypeChecking.String(), "TypeChecking", "TypeChecking", 1) + `,`,
    -		`Conditions:` + repeatedStringForConditions + `,`,
    +	s := strings.Join([]string{`&AuditAnnotation{`,
    +		`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
    +		`ValueExpression:` + fmt.Sprintf("%v", this.ValueExpression) + `,`,
     		`}`,
     	}, "")
     	return s
     }
    -func (this *ValidatingWebhook) String() string {
    +func (this *ExpressionWarning) String() string {
     	if this == nil {
     		return "nil"
     	}
    -	repeatedStringForRules := "[]RuleWithOperations{"
    -	for _, f := range this.Rules {
    -		repeatedStringForRules += fmt.Sprintf("%v", f) + ","
    +	s := strings.Join([]string{`&ExpressionWarning{`,
    +		`FieldRef:` + fmt.Sprintf("%v", this.FieldRef) + `,`,
    +		`Warning:` + fmt.Sprintf("%v", this.Warning) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *JSONPatch) String() string {
    +	if this == nil {
    +		return "nil"
     	}
    -	repeatedStringForRules += "}"
    -	repeatedStringForMatchConditions := "[]MatchCondition{"
    -	for _, f := range this.MatchConditions {
    -		repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + ","
    +	s := strings.Join([]string{`&JSONPatch{`,
    +		`Expression:` + fmt.Sprintf("%v", this.Expression) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *MatchCondition) String() string {
    +	if this == nil {
    +		return "nil"
     	}
    -	repeatedStringForMatchConditions += "}"
    -	s := strings.Join([]string{`&ValidatingWebhook{`,
    +	s := strings.Join([]string{`&MatchCondition{`,
     		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
    -		`ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`,
    -		`Rules:` + repeatedStringForRules + `,`,
    -		`FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`,
    +		`Expression:` + fmt.Sprintf("%v", this.Expression) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *MatchResources) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForResourceRules := "[]NamedRuleWithOperations{"
    +	for _, f := range this.ResourceRules {
    +		repeatedStringForResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForResourceRules += "}"
    +	repeatedStringForExcludeResourceRules := "[]NamedRuleWithOperations{"
    +	for _, f := range this.ExcludeResourceRules {
    +		repeatedStringForExcludeResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForExcludeResourceRules += "}"
    +	s := strings.Join([]string{`&MatchResources{`,
     		`NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
    -		`SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`,
    -		`TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`,
    -		`AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`,
    -		`MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`,
     		`ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
    -		`MatchConditions:` + repeatedStringForMatchConditions + `,`,
    +		`ResourceRules:` + repeatedStringForResourceRules + `,`,
    +		`ExcludeResourceRules:` + repeatedStringForExcludeResourceRules + `,`,
    +		`MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`,
     		`}`,
     	}, "")
     	return s
     }
    -func (this *ValidatingWebhookConfiguration) String() string {
    +func (this *MutatingAdmissionPolicy) String() string {
     	if this == nil {
     		return "nil"
     	}
    -	repeatedStringForWebhooks := "[]ValidatingWebhook{"
    -	for _, f := range this.Webhooks {
    -		repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "ValidatingWebhook", "ValidatingWebhook", 1), `&`, ``, 1) + ","
    +	s := strings.Join([]string{`&MutatingAdmissionPolicy{`,
    +		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
    +		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "MutatingAdmissionPolicySpec", "MutatingAdmissionPolicySpec", 1), `&`, ``, 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *MutatingAdmissionPolicyBinding) String() string {
    +	if this == nil {
    +		return "nil"
     	}
    -	repeatedStringForWebhooks += "}"
    -	s := strings.Join([]string{`&ValidatingWebhookConfiguration{`,
    +	s := strings.Join([]string{`&MutatingAdmissionPolicyBinding{`,
     		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
    -		`Webhooks:` + repeatedStringForWebhooks + `,`,
    +		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "MutatingAdmissionPolicyBindingSpec", "MutatingAdmissionPolicyBindingSpec", 1), `&`, ``, 1) + `,`,
     		`}`,
     	}, "")
     	return s
     }
    -func (this *ValidatingWebhookConfigurationList) String() string {
    +func (this *MutatingAdmissionPolicyBindingList) String() string {
     	if this == nil {
     		return "nil"
     	}
    -	repeatedStringForItems := "[]ValidatingWebhookConfiguration{"
    +	repeatedStringForItems := "[]MutatingAdmissionPolicyBinding{"
     	for _, f := range this.Items {
    -		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingWebhookConfiguration", "ValidatingWebhookConfiguration", 1), `&`, ``, 1) + ","
    +		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingAdmissionPolicyBinding", "MutatingAdmissionPolicyBinding", 1), `&`, ``, 1) + ","
     	}
     	repeatedStringForItems += "}"
    -	s := strings.Join([]string{`&ValidatingWebhookConfigurationList{`,
    +	s := strings.Join([]string{`&MutatingAdmissionPolicyBindingList{`,
     		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
     		`Items:` + repeatedStringForItems + `,`,
     		`}`,
     	}, "")
     	return s
     }
    -func (this *Validation) String() string {
    +func (this *MutatingAdmissionPolicyBindingSpec) String() string {
     	if this == nil {
     		return "nil"
     	}
    -	s := strings.Join([]string{`&Validation{`,
    -		`Expression:` + fmt.Sprintf("%v", this.Expression) + `,`,
    -		`Message:` + fmt.Sprintf("%v", this.Message) + `,`,
    -		`Reason:` + valueToStringGenerated(this.Reason) + `,`,
    -		`MessageExpression:` + fmt.Sprintf("%v", this.MessageExpression) + `,`,
    +	s := strings.Join([]string{`&MutatingAdmissionPolicyBindingSpec{`,
    +		`PolicyName:` + fmt.Sprintf("%v", this.PolicyName) + `,`,
    +		`ParamRef:` + strings.Replace(this.ParamRef.String(), "ParamRef", "ParamRef", 1) + `,`,
    +		`MatchResources:` + strings.Replace(this.MatchResources.String(), "MatchResources", "MatchResources", 1) + `,`,
     		`}`,
     	}, "")
     	return s
     }
    -func (this *Variable) String() string {
    +func (this *MutatingAdmissionPolicyList) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForItems := "[]MutatingAdmissionPolicy{"
    +	for _, f := range this.Items {
    +		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingAdmissionPolicy", "MutatingAdmissionPolicy", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForItems += "}"
    +	s := strings.Join([]string{`&MutatingAdmissionPolicyList{`,
    +		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
    +		`Items:` + repeatedStringForItems + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *MutatingAdmissionPolicySpec) String() string {
     	if this == nil {
     		return "nil"
     	}
    -	s := strings.Join([]string{`&Variable{`,
    -		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
    -		`Expression:` + fmt.Sprintf("%v", this.Expression) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *WebhookClientConfig) String() string {
    -	if this == nil {
    -		return "nil"
    +	repeatedStringForVariables := "[]Variable{"
    +	for _, f := range this.Variables {
    +		repeatedStringForVariables += strings.Replace(strings.Replace(f.String(), "Variable", "Variable", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForVariables += "}"
    +	repeatedStringForMutations := "[]Mutation{"
    +	for _, f := range this.Mutations {
    +		repeatedStringForMutations += strings.Replace(strings.Replace(f.String(), "Mutation", "Mutation", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForMutations += "}"
    +	repeatedStringForMatchConditions := "[]MatchCondition{"
    +	for _, f := range this.MatchConditions {
    +		repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForMatchConditions += "}"
    +	s := strings.Join([]string{`&MutatingAdmissionPolicySpec{`,
    +		`ParamKind:` + strings.Replace(this.ParamKind.String(), "ParamKind", "ParamKind", 1) + `,`,
    +		`MatchConstraints:` + strings.Replace(this.MatchConstraints.String(), "MatchResources", "MatchResources", 1) + `,`,
    +		`Variables:` + repeatedStringForVariables + `,`,
    +		`Mutations:` + repeatedStringForMutations + `,`,
    +		`FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`,
    +		`MatchConditions:` + repeatedStringForMatchConditions + `,`,
    +		`ReinvocationPolicy:` + fmt.Sprintf("%v", this.ReinvocationPolicy) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *MutatingWebhook) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForRules := "[]RuleWithOperations{"
    +	for _, f := range this.Rules {
    +		repeatedStringForRules += fmt.Sprintf("%v", f) + ","
    +	}
    +	repeatedStringForRules += "}"
    +	repeatedStringForMatchConditions := "[]MatchCondition{"
    +	for _, f := range this.MatchConditions {
    +		repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForMatchConditions += "}"
    +	s := strings.Join([]string{`&MutatingWebhook{`,
    +		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
    +		`ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`,
    +		`Rules:` + repeatedStringForRules + `,`,
    +		`FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`,
    +		`NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
    +		`SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`,
    +		`TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`,
    +		`AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`,
    +		`MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`,
    +		`ReinvocationPolicy:` + valueToStringGenerated(this.ReinvocationPolicy) + `,`,
    +		`ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
    +		`MatchConditions:` + repeatedStringForMatchConditions + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *MutatingWebhookConfiguration) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForWebhooks := "[]MutatingWebhook{"
    +	for _, f := range this.Webhooks {
    +		repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "MutatingWebhook", "MutatingWebhook", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForWebhooks += "}"
    +	s := strings.Join([]string{`&MutatingWebhookConfiguration{`,
    +		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
    +		`Webhooks:` + repeatedStringForWebhooks + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *MutatingWebhookConfigurationList) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForItems := "[]MutatingWebhookConfiguration{"
    +	for _, f := range this.Items {
    +		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingWebhookConfiguration", "MutatingWebhookConfiguration", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForItems += "}"
    +	s := strings.Join([]string{`&MutatingWebhookConfigurationList{`,
    +		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
    +		`Items:` + repeatedStringForItems + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *Mutation) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&Mutation{`,
    +		`PatchType:` + fmt.Sprintf("%v", this.PatchType) + `,`,
    +		`ApplyConfiguration:` + strings.Replace(this.ApplyConfiguration.String(), "ApplyConfiguration", "ApplyConfiguration", 1) + `,`,
    +		`JSONPatch:` + strings.Replace(this.JSONPatch.String(), "JSONPatch", "JSONPatch", 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *NamedRuleWithOperations) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&NamedRuleWithOperations{`,
    +		`ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`,
    +		`RuleWithOperations:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RuleWithOperations), "RuleWithOperations", "v11.RuleWithOperations", 1), `&`, ``, 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ParamKind) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&ParamKind{`,
    +		`APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`,
    +		`Kind:` + fmt.Sprintf("%v", this.Kind) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ParamRef) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&ParamRef{`,
    +		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
    +		`Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
    +		`Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
    +		`ParameterNotFoundAction:` + valueToStringGenerated(this.ParameterNotFoundAction) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ServiceReference) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&ServiceReference{`,
    +		`Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
    +		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
    +		`Path:` + valueToStringGenerated(this.Path) + `,`,
    +		`Port:` + valueToStringGenerated(this.Port) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *TypeChecking) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForExpressionWarnings := "[]ExpressionWarning{"
    +	for _, f := range this.ExpressionWarnings {
    +		repeatedStringForExpressionWarnings += strings.Replace(strings.Replace(f.String(), "ExpressionWarning", "ExpressionWarning", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForExpressionWarnings += "}"
    +	s := strings.Join([]string{`&TypeChecking{`,
    +		`ExpressionWarnings:` + repeatedStringForExpressionWarnings + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ValidatingAdmissionPolicy) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&ValidatingAdmissionPolicy{`,
    +		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
    +		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicySpec", "ValidatingAdmissionPolicySpec", 1), `&`, ``, 1) + `,`,
    +		`Status:` + strings.Replace(strings.Replace(this.Status.String(), "ValidatingAdmissionPolicyStatus", "ValidatingAdmissionPolicyStatus", 1), `&`, ``, 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ValidatingAdmissionPolicyBinding) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&ValidatingAdmissionPolicyBinding{`,
    +		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
    +		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicyBindingSpec", "ValidatingAdmissionPolicyBindingSpec", 1), `&`, ``, 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ValidatingAdmissionPolicyBindingList) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForItems := "[]ValidatingAdmissionPolicyBinding{"
    +	for _, f := range this.Items {
    +		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicyBinding", "ValidatingAdmissionPolicyBinding", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForItems += "}"
    +	s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingList{`,
    +		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
    +		`Items:` + repeatedStringForItems + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ValidatingAdmissionPolicyBindingSpec) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingSpec{`,
    +		`PolicyName:` + fmt.Sprintf("%v", this.PolicyName) + `,`,
    +		`ParamRef:` + strings.Replace(this.ParamRef.String(), "ParamRef", "ParamRef", 1) + `,`,
    +		`MatchResources:` + strings.Replace(this.MatchResources.String(), "MatchResources", "MatchResources", 1) + `,`,
    +		`ValidationActions:` + fmt.Sprintf("%v", this.ValidationActions) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ValidatingAdmissionPolicyList) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForItems := "[]ValidatingAdmissionPolicy{"
    +	for _, f := range this.Items {
    +		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicy", "ValidatingAdmissionPolicy", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForItems += "}"
    +	s := strings.Join([]string{`&ValidatingAdmissionPolicyList{`,
    +		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
    +		`Items:` + repeatedStringForItems + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ValidatingAdmissionPolicySpec) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForValidations := "[]Validation{"
    +	for _, f := range this.Validations {
    +		repeatedStringForValidations += strings.Replace(strings.Replace(f.String(), "Validation", "Validation", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForValidations += "}"
    +	repeatedStringForAuditAnnotations := "[]AuditAnnotation{"
    +	for _, f := range this.AuditAnnotations {
    +		repeatedStringForAuditAnnotations += strings.Replace(strings.Replace(f.String(), "AuditAnnotation", "AuditAnnotation", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForAuditAnnotations += "}"
    +	repeatedStringForMatchConditions := "[]MatchCondition{"
    +	for _, f := range this.MatchConditions {
    +		repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForMatchConditions += "}"
    +	repeatedStringForVariables := "[]Variable{"
    +	for _, f := range this.Variables {
    +		repeatedStringForVariables += strings.Replace(strings.Replace(f.String(), "Variable", "Variable", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForVariables += "}"
    +	s := strings.Join([]string{`&ValidatingAdmissionPolicySpec{`,
    +		`ParamKind:` + strings.Replace(this.ParamKind.String(), "ParamKind", "ParamKind", 1) + `,`,
    +		`MatchConstraints:` + strings.Replace(this.MatchConstraints.String(), "MatchResources", "MatchResources", 1) + `,`,
    +		`Validations:` + repeatedStringForValidations + `,`,
    +		`FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`,
    +		`AuditAnnotations:` + repeatedStringForAuditAnnotations + `,`,
    +		`MatchConditions:` + repeatedStringForMatchConditions + `,`,
    +		`Variables:` + repeatedStringForVariables + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ValidatingAdmissionPolicyStatus) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForConditions := "[]Condition{"
    +	for _, f := range this.Conditions {
    +		repeatedStringForConditions += fmt.Sprintf("%v", f) + ","
    +	}
    +	repeatedStringForConditions += "}"
    +	s := strings.Join([]string{`&ValidatingAdmissionPolicyStatus{`,
    +		`ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
    +		`TypeChecking:` + strings.Replace(this.TypeChecking.String(), "TypeChecking", "TypeChecking", 1) + `,`,
    +		`Conditions:` + repeatedStringForConditions + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ValidatingWebhook) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForRules := "[]RuleWithOperations{"
    +	for _, f := range this.Rules {
    +		repeatedStringForRules += fmt.Sprintf("%v", f) + ","
    +	}
    +	repeatedStringForRules += "}"
    +	repeatedStringForMatchConditions := "[]MatchCondition{"
    +	for _, f := range this.MatchConditions {
    +		repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForMatchConditions += "}"
    +	s := strings.Join([]string{`&ValidatingWebhook{`,
    +		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
    +		`ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`,
    +		`Rules:` + repeatedStringForRules + `,`,
    +		`FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`,
    +		`NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
    +		`SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`,
    +		`TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`,
    +		`AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`,
    +		`MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`,
    +		`ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
    +		`MatchConditions:` + repeatedStringForMatchConditions + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ValidatingWebhookConfiguration) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForWebhooks := "[]ValidatingWebhook{"
    +	for _, f := range this.Webhooks {
    +		repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "ValidatingWebhook", "ValidatingWebhook", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForWebhooks += "}"
    +	s := strings.Join([]string{`&ValidatingWebhookConfiguration{`,
    +		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
    +		`Webhooks:` + repeatedStringForWebhooks + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ValidatingWebhookConfigurationList) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForItems := "[]ValidatingWebhookConfiguration{"
    +	for _, f := range this.Items {
    +		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingWebhookConfiguration", "ValidatingWebhookConfiguration", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForItems += "}"
    +	s := strings.Join([]string{`&ValidatingWebhookConfigurationList{`,
    +		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
    +		`Items:` + repeatedStringForItems + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *Validation) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&Validation{`,
    +		`Expression:` + fmt.Sprintf("%v", this.Expression) + `,`,
    +		`Message:` + fmt.Sprintf("%v", this.Message) + `,`,
    +		`Reason:` + valueToStringGenerated(this.Reason) + `,`,
    +		`MessageExpression:` + fmt.Sprintf("%v", this.MessageExpression) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *Variable) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&Variable{`,
    +		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
    +		`Expression:` + fmt.Sprintf("%v", this.Expression) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *WebhookClientConfig) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&WebhookClientConfig{`,
    +		`Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`,
    +		`CABundle:` + valueToStringGenerated(this.CABundle) + `,`,
    +		`URL:` + valueToStringGenerated(this.URL) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func valueToStringGenerated(v interface{}) string {
    +	rv := reflect.ValueOf(v)
    +	if rv.IsNil() {
    +		return "nil"
    +	}
    +	pv := reflect.Indirect(rv).Interface()
    +	return fmt.Sprintf("*%v", pv)
    +}
    +func (m *ApplyConfiguration) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ApplyConfiguration: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ApplyConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Expression = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *AuditAnnotation) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: AuditAnnotation: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: AuditAnnotation: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Key = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ValueExpression", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.ValueExpression = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *ExpressionWarning) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ExpressionWarning: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ExpressionWarning: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.FieldRef = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Warning", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Warning = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *JSONPatch) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: JSONPatch: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: JSONPatch: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Expression = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *MatchCondition) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: MatchCondition: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: MatchCondition: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Name = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Expression = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *MatchResources) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: MatchResources: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: MatchResources: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.NamespaceSelector == nil {
    +				m.NamespaceSelector = &v1.LabelSelector{}
    +			}
    +			if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.ObjectSelector == nil {
    +				m.ObjectSelector = &v1.LabelSelector{}
    +			}
    +			if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ResourceRules", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.ResourceRules = append(m.ResourceRules, NamedRuleWithOperations{})
    +			if err := m.ResourceRules[len(m.ResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 4:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ExcludeResourceRules", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.ExcludeResourceRules = append(m.ExcludeResourceRules, NamedRuleWithOperations{})
    +			if err := m.ExcludeResourceRules[len(m.ExcludeResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 7:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			s := MatchPolicyType(dAtA[iNdEx:postIndex])
    +			m.MatchPolicy = &s
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *MutatingAdmissionPolicy) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: MutatingAdmissionPolicy: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: MutatingAdmissionPolicy: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
     	}
    -	s := strings.Join([]string{`&WebhookClientConfig{`,
    -		`Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`,
    -		`CABundle:` + valueToStringGenerated(this.CABundle) + `,`,
    -		`URL:` + valueToStringGenerated(this.URL) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    +	return nil
     }
    -func valueToStringGenerated(v interface{}) string {
    -	rv := reflect.ValueOf(v)
    -	if rv.IsNil() {
    -		return "nil"
    +func (m *MutatingAdmissionPolicyBinding) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: MutatingAdmissionPolicyBinding: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: MutatingAdmissionPolicyBinding: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
     	}
    -	pv := reflect.Indirect(rv).Interface()
    -	return fmt.Sprintf("*%v", pv)
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
     }
    -func (m *AuditAnnotation) Unmarshal(dAtA []byte) error {
    +func (m *MutatingAdmissionPolicyBindingList) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -3273,17 +5243,17 @@ func (m *AuditAnnotation) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: AuditAnnotation: wiretype end group for non-group")
    +			return fmt.Errorf("proto: MutatingAdmissionPolicyBindingList: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: AuditAnnotation: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: MutatingAdmissionPolicyBindingList: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
     			}
    -			var stringLen uint64
    +			var msglen int
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -3293,29 +5263,30 @@ func (m *AuditAnnotation) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    +				msglen |= int(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    +			if msglen < 0 {
     				return ErrInvalidLengthGenerated
     			}
    -			postIndex := iNdEx + intStringLen
    +			postIndex := iNdEx + msglen
     			if postIndex < 0 {
     				return ErrInvalidLengthGenerated
     			}
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Key = string(dAtA[iNdEx:postIndex])
    +			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
     			iNdEx = postIndex
     		case 2:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ValueExpression", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
     			}
    -			var stringLen uint64
    +			var msglen int
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -3325,23 +5296,25 @@ func (m *AuditAnnotation) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    +				msglen |= int(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    +			if msglen < 0 {
     				return ErrInvalidLengthGenerated
     			}
    -			postIndex := iNdEx + intStringLen
    +			postIndex := iNdEx + msglen
     			if postIndex < 0 {
     				return ErrInvalidLengthGenerated
     			}
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.ValueExpression = string(dAtA[iNdEx:postIndex])
    +			m.Items = append(m.Items, MutatingAdmissionPolicyBinding{})
    +			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
     			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
    @@ -3364,7 +5337,7 @@ func (m *AuditAnnotation) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *ExpressionWarning) Unmarshal(dAtA []byte) error {
    +func (m *MutatingAdmissionPolicyBindingSpec) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -3387,15 +5360,15 @@ func (m *ExpressionWarning) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: ExpressionWarning: wiretype end group for non-group")
    +			return fmt.Errorf("proto: MutatingAdmissionPolicyBindingSpec: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: ExpressionWarning: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: MutatingAdmissionPolicyBindingSpec: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
    -		case 2:
    +		case 1:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field PolicyName", wireType)
     			}
     			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
    @@ -3423,13 +5396,49 @@ func (m *ExpressionWarning) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.FieldRef = string(dAtA[iNdEx:postIndex])
    +			m.PolicyName = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ParamRef", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.ParamRef == nil {
    +				m.ParamRef = &ParamRef{}
    +			}
    +			if err := m.ParamRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
     			iNdEx = postIndex
     		case 3:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Warning", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field MatchResources", wireType)
     			}
    -			var stringLen uint64
    +			var msglen int
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -3439,23 +5448,27 @@ func (m *ExpressionWarning) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    +				msglen |= int(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    +			if msglen < 0 {
     				return ErrInvalidLengthGenerated
     			}
    -			postIndex := iNdEx + intStringLen
    +			postIndex := iNdEx + msglen
     			if postIndex < 0 {
     				return ErrInvalidLengthGenerated
     			}
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Warning = string(dAtA[iNdEx:postIndex])
    +			if m.MatchResources == nil {
    +				m.MatchResources = &MatchResources{}
    +			}
    +			if err := m.MatchResources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
     			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
    @@ -3478,7 +5491,7 @@ func (m *ExpressionWarning) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *MatchCondition) Unmarshal(dAtA []byte) error {
    +func (m *MutatingAdmissionPolicyList) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -3501,17 +5514,17 @@ func (m *MatchCondition) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: MatchCondition: wiretype end group for non-group")
    +			return fmt.Errorf("proto: MutatingAdmissionPolicyList: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: MatchCondition: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: MutatingAdmissionPolicyList: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
     			}
    -			var stringLen uint64
    +			var msglen int
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -3521,29 +5534,30 @@ func (m *MatchCondition) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    +				msglen |= int(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    +			if msglen < 0 {
     				return ErrInvalidLengthGenerated
     			}
    -			postIndex := iNdEx + intStringLen
    +			postIndex := iNdEx + msglen
     			if postIndex < 0 {
     				return ErrInvalidLengthGenerated
     			}
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Name = string(dAtA[iNdEx:postIndex])
    +			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
     			iNdEx = postIndex
     		case 2:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
     			}
    -			var stringLen uint64
    +			var msglen int
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -3553,23 +5567,25 @@ func (m *MatchCondition) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    +				msglen |= int(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    +			if msglen < 0 {
     				return ErrInvalidLengthGenerated
     			}
    -			postIndex := iNdEx + intStringLen
    +			postIndex := iNdEx + msglen
     			if postIndex < 0 {
     				return ErrInvalidLengthGenerated
     			}
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Expression = string(dAtA[iNdEx:postIndex])
    +			m.Items = append(m.Items, MutatingAdmissionPolicy{})
    +			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
     			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
    @@ -3592,7 +5608,7 @@ func (m *MatchCondition) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *MatchResources) Unmarshal(dAtA []byte) error {
    +func (m *MutatingAdmissionPolicySpec) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -3615,15 +5631,15 @@ func (m *MatchResources) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: MatchResources: wiretype end group for non-group")
    +			return fmt.Errorf("proto: MutatingAdmissionPolicySpec: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: MatchResources: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: MutatingAdmissionPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field ParamKind", wireType)
     			}
     			var msglen int
     			for shift := uint(0); ; shift += 7 {
    @@ -3650,16 +5666,16 @@ func (m *MatchResources) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			if m.NamespaceSelector == nil {
    -				m.NamespaceSelector = &v1.LabelSelector{}
    +			if m.ParamKind == nil {
    +				m.ParamKind = &ParamKind{}
     			}
    -			if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			if err := m.ParamKind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
     			iNdEx = postIndex
     		case 2:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field MatchConstraints", wireType)
     			}
     			var msglen int
     			for shift := uint(0); ; shift += 7 {
    @@ -3686,16 +5702,16 @@ func (m *MatchResources) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			if m.ObjectSelector == nil {
    -				m.ObjectSelector = &v1.LabelSelector{}
    +			if m.MatchConstraints == nil {
    +				m.MatchConstraints = &MatchResources{}
     			}
    -			if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			if err := m.MatchConstraints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
     			iNdEx = postIndex
     		case 3:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ResourceRules", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Variables", wireType)
     			}
     			var msglen int
     			for shift := uint(0); ; shift += 7 {
    @@ -3722,14 +5738,14 @@ func (m *MatchResources) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.ResourceRules = append(m.ResourceRules, NamedRuleWithOperations{})
    -			if err := m.ResourceRules[len(m.ResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			m.Variables = append(m.Variables, Variable{})
    +			if err := m.Variables[len(m.Variables)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
     			iNdEx = postIndex
     		case 4:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ExcludeResourceRules", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Mutations", wireType)
     			}
     			var msglen int
     			for shift := uint(0); ; shift += 7 {
    @@ -3756,14 +5772,81 @@ func (m *MatchResources) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.ExcludeResourceRules = append(m.ExcludeResourceRules, NamedRuleWithOperations{})
    -			if err := m.ExcludeResourceRules[len(m.ExcludeResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			m.Mutations = append(m.Mutations, Mutation{})
    +			if err := m.Mutations[len(m.Mutations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 5:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			s := FailurePolicyType(dAtA[iNdEx:postIndex])
    +			m.FailurePolicy = &s
    +			iNdEx = postIndex
    +		case 6:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.MatchConditions = append(m.MatchConditions, MatchCondition{})
    +			if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
     			iNdEx = postIndex
     		case 7:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field ReinvocationPolicy", wireType)
     			}
     			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
    @@ -3791,8 +5874,7 @@ func (m *MatchResources) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			s := MatchPolicyType(dAtA[iNdEx:postIndex])
    -			m.MatchPolicy = &s
    +			m.ReinvocationPolicy = k8s_io_api_admissionregistration_v1.ReinvocationPolicyType(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
    @@ -4160,7 +6242,7 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			s := ReinvocationPolicyType(dAtA[iNdEx:postIndex])
    +			s := k8s_io_api_admissionregistration_v1.ReinvocationPolicyType(dAtA[iNdEx:postIndex])
     			m.ReinvocationPolicy = &s
     			iNdEx = postIndex
     		case 11:
    @@ -4488,6 +6570,160 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    +func (m *Mutation) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: Mutation: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: Mutation: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field PatchType", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.PatchType = PatchType(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ApplyConfiguration", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.ApplyConfiguration == nil {
    +				m.ApplyConfiguration = &ApplyConfiguration{}
    +			}
    +			if err := m.ApplyConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 4:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field JSONPatch", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.JSONPatch == nil {
    +				m.JSONPatch = &JSONPatch{}
    +			}
    +			if err := m.JSONPatch.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
     func (m *NamedRuleWithOperations) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
    diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto
    index 30f99f64d..fb47a2005 100644
    --- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto
    +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto
    @@ -29,6 +29,51 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
     // Package-wide variables from generator "generated".
     option go_package = "k8s.io/api/admissionregistration/v1beta1";
     
    +// ApplyConfiguration defines the desired configuration values of an object.
    +message ApplyConfiguration {
    +  // expression will be evaluated by CEL to create an apply configuration.
    +  // ref: https://github.com/google/cel-spec
    +  //
    +  // Apply configurations are declared in CEL using object initialization. For example, this CEL expression
    +  // returns an apply configuration to set a single field:
    +  //
    +  // 	Object{
    +  // 	  spec: Object.spec{
    +  // 	    serviceAccountName: "example"
    +  // 	  }
    +  // 	}
    +  //
    +  // Apply configurations may not modify atomic structs, maps or arrays due to the risk of accidental deletion of
    +  // values not included in the apply configuration.
    +  //
    +  // CEL expressions have access to the object types needed to create apply configurations:
    +  //
    +  // - 'Object' - CEL type of the resource object.
    +  // - 'Object.' - CEL type of object field (such as 'Object.spec')
    +  // - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers')
    +  //
    +  // CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:
    +  //
    +  // - 'object' - The object from the incoming request. The value is null for DELETE requests.
    +  // - 'oldObject' - The existing object. The value is null for CREATE requests.
    +  // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)).
    +  // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind.
    +  // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources.
    +  // - 'variables' - Map of composited variables, from its name to its lazily evaluated value.
    +  //   For example, a variable named 'foo' can be accessed as 'variables.foo'.
    +  // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.
    +  //   See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz
    +  // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the
    +  //   request resource.
    +  //
    +  // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the
    +  // object. No other metadata properties are accessible.
    +  //
    +  // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible.
    +  // Required.
    +  optional string expression = 1;
    +}
    +
     // AuditAnnotation describes how to produce an audit annotation for an API request.
     message AuditAnnotation {
       // key specifies the audit annotation key. The audit annotation keys of
    @@ -79,6 +124,75 @@ message ExpressionWarning {
       optional string warning = 3;
     }
     
    +// JSONPatch defines a JSON Patch.
    +message JSONPatch {
    +  // expression will be evaluated by CEL to create a [JSON patch](https://jsonpatch.com/).
    +  // ref: https://github.com/google/cel-spec
    +  //
    +  // expression must return an array of JSONPatch values.
    +  //
    +  // For example, this CEL expression returns a JSON patch to conditionally modify a value:
    +  //
    +  // 	  [
    +  // 	    JSONPatch{op: "test", path: "/spec/example", value: "Red"},
    +  // 	    JSONPatch{op: "replace", path: "/spec/example", value: "Green"}
    +  // 	  ]
    +  //
    +  // To define an object for the patch value, use Object types. For example:
    +  //
    +  // 	  [
    +  // 	    JSONPatch{
    +  // 	      op: "add",
    +  // 	      path: "/spec/selector",
    +  // 	      value: Object.spec.selector{matchLabels: {"environment": "test"}}
    +  // 	    }
    +  // 	  ]
    +  //
    +  // To use strings containing '/' and '~' as JSONPatch path keys, use "jsonpatch.escapeKey". For example:
    +  //
    +  // 	  [
    +  // 	    JSONPatch{
    +  // 	      op: "add",
    +  // 	      path: "/metadata/labels/" + jsonpatch.escapeKey("example.com/environment"),
    +  // 	      value: "test"
    +  // 	    },
    +  // 	  ]
    +  //
    +  // CEL expressions have access to the types needed to create JSON patches and objects:
    +  //
    +  // - 'JSONPatch' - CEL type of JSON Patch operations. JSONPatch has the fields 'op', 'from', 'path' and 'value'.
    +  //   See [JSON patch](https://jsonpatch.com/) for more details. The 'value' field may be set to any of: string,
    +  //   integer, array, map or object.  If set, the 'path' and 'from' fields must be set to a
    +  //   [JSON pointer](https://datatracker.ietf.org/doc/html/rfc6901/) string, where the 'jsonpatch.escapeKey()' CEL
    +  //   function may be used to escape path keys containing '/' and '~'.
    +  // - 'Object' - CEL type of the resource object.
    +  // - 'Object.' - CEL type of object field (such as 'Object.spec')
    +  // - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers')
    +  //
    +  // CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:
    +  //
    +  // - 'object' - The object from the incoming request. The value is null for DELETE requests.
    +  // - 'oldObject' - The existing object. The value is null for CREATE requests.
    +  // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)).
    +  // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind.
    +  // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources.
    +  // - 'variables' - Map of composited variables, from its name to its lazily evaluated value.
    +  //   For example, a variable named 'foo' can be accessed as 'variables.foo'.
    +  // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.
    +  //   See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz
    +  // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the
    +  //   request resource.
    +  //
    +  // CEL expressions have access to [Kubernetes CEL function libraries](https://kubernetes.io/docs/reference/using-api/cel/#cel-options-language-features-and-libraries)
    +  // as well as:
    +  //
    +  // - 'jsonpatch.escapeKey' - Performs JSONPatch key escaping. '~' and  '/' are escaped as '~0' and `~1' respectively).
    +  //
    +  // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible.
    +  // Required.
    +  optional string expression = 1;
    +}
    +
     // MatchCondition represents a condition which must be fulfilled for a request to be sent to a webhook.
     message MatchCondition {
       // Name is an identifier for this match condition, used for strategic merging of MatchConditions,
    @@ -203,6 +317,173 @@ message MatchResources {
       optional string matchPolicy = 7;
     }
     
    +// MutatingAdmissionPolicy describes the definition of an admission mutation policy that mutates the object coming into admission chain.
    +message MutatingAdmissionPolicy {
    +  // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +
    +  // Specification of the desired behavior of the MutatingAdmissionPolicy.
    +  optional MutatingAdmissionPolicySpec spec = 2;
    +}
    +
    +// MutatingAdmissionPolicyBinding binds the MutatingAdmissionPolicy with parametrized resources.
    +// MutatingAdmissionPolicyBinding and the optional parameter resource together define how cluster administrators
    +// configure policies for clusters.
    +//
    +// For a given admission request, each binding will cause its policy to be
    +// evaluated N times, where N is 1 for policies/bindings that don't use
    +// params, otherwise N is the number of parameters selected by the binding.
    +// Each evaluation is constrained by a [runtime cost budget](https://kubernetes.io/docs/reference/using-api/cel/#runtime-cost-budget).
    +//
    +// Adding/removing policies, bindings, or params can not affect whether a
    +// given (policy, binding, param) combination is within its own CEL budget.
    +message MutatingAdmissionPolicyBinding {
    +  // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +
    +  // Specification of the desired behavior of the MutatingAdmissionPolicyBinding.
    +  optional MutatingAdmissionPolicyBindingSpec spec = 2;
    +}
    +
    +// MutatingAdmissionPolicyBindingList is a list of MutatingAdmissionPolicyBinding.
    +message MutatingAdmissionPolicyBindingList {
    +  // Standard list metadata.
    +  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +
    +  // List of PolicyBinding.
    +  repeated MutatingAdmissionPolicyBinding items = 2;
    +}
    +
    +// MutatingAdmissionPolicyBindingSpec is the specification of the MutatingAdmissionPolicyBinding.
    +message MutatingAdmissionPolicyBindingSpec {
    +  // policyName references a MutatingAdmissionPolicy name which the MutatingAdmissionPolicyBinding binds to.
    +  // If the referenced resource does not exist, this binding is considered invalid and will be ignored
    +  // Required.
    +  optional string policyName = 1;
    +
    +  // paramRef specifies the parameter resource used to configure the admission control policy.
    +  // It should point to a resource of the type specified in spec.ParamKind of the bound MutatingAdmissionPolicy.
    +  // If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the MutatingAdmissionPolicy applied.
    +  // If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.
    +  // +optional
    +  optional ParamRef paramRef = 2;
    +
    +  // matchResources limits what resources match this binding and may be mutated by it.
    +  // Note that if matchResources matches a resource, the resource must also match a policy's matchConstraints and
    +  // matchConditions before the resource may be mutated.
    +  // When matchResources is unset, it does not constrain resource matching, and only the policy's matchConstraints
    +  // and matchConditions must match for the resource to be mutated.
    +  // Additionally, matchResources.resourceRules are optional and do not constraint matching when unset.
    +  // Note that this is differs from MutatingAdmissionPolicy matchConstraints, where resourceRules are required.
    +  // The CREATE, UPDATE and CONNECT operations are allowed.  The DELETE operation may not be matched.
    +  // '*' matches CREATE, UPDATE and CONNECT.
    +  // +optional
    +  optional MatchResources matchResources = 3;
    +}
    +
    +// MutatingAdmissionPolicyList is a list of MutatingAdmissionPolicy.
    +message MutatingAdmissionPolicyList {
    +  // Standard list metadata.
    +  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +
    +  // List of ValidatingAdmissionPolicy.
    +  repeated MutatingAdmissionPolicy items = 2;
    +}
    +
    +// MutatingAdmissionPolicySpec is the specification of the desired behavior of the admission policy.
    +message MutatingAdmissionPolicySpec {
    +  // paramKind specifies the kind of resources used to parameterize this policy.
    +  // If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions.
    +  // If paramKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied.
    +  // If paramKind is specified but paramRef is unset in MutatingAdmissionPolicyBinding, the params variable will be null.
    +  // +optional
    +  optional ParamKind paramKind = 1;
    +
    +  // matchConstraints specifies what resources this policy is designed to validate.
    +  // The MutatingAdmissionPolicy cares about a request if it matches _all_ Constraints.
    +  // However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API
    +  // MutatingAdmissionPolicy cannot match MutatingAdmissionPolicy and MutatingAdmissionPolicyBinding.
    +  // The CREATE, UPDATE and CONNECT operations are allowed.  The DELETE operation may not be matched.
    +  // '*' matches CREATE, UPDATE and CONNECT.
    +  // Required.
    +  optional MatchResources matchConstraints = 2;
    +
    +  // variables contain definitions of variables that can be used in composition of other expressions.
    +  // Each variable is defined as a named CEL expression.
    +  // The variables defined here will be available under `variables` in other expressions of the policy
    +  // except matchConditions because matchConditions are evaluated before the rest of the policy.
    +  //
    +  // The expression of a variable can refer to other variables defined earlier in the list but not those after.
    +  // Thus, variables must be sorted by the order of first appearance and acyclic.
    +  // +listType=atomic
    +  // +optional
    +  repeated Variable variables = 3;
    +
    +  // mutations contain operations to perform on matching objects.
    +  // mutations may not be empty; a minimum of one mutation is required.
    +  // mutations are evaluated in order, and are reinvoked according to
    +  // the reinvocationPolicy.
    +  // The mutations of a policy are invoked for each binding of this policy
    +  // and reinvocation of mutations occurs on a per binding basis.
    +  //
    +  // +listType=atomic
    +  // +optional
    +  repeated Mutation mutations = 4;
    +
    +  // failurePolicy defines how to handle failures for the admission policy. Failures can
    +  // occur from CEL expression parse errors, type check errors, runtime errors and invalid
    +  // or mis-configured policy definitions or bindings.
    +  //
    +  // A policy is invalid if paramKind refers to a non-existent Kind.
    +  // A binding is invalid if paramRef.name refers to a non-existent resource.
    +  //
    +  // failurePolicy does not define how validations that evaluate to false are handled.
    +  //
    +  // Allowed values are Ignore or Fail. Defaults to Fail.
    +  // +optional
    +  optional string failurePolicy = 5;
    +
    +  // matchConditions is a list of conditions that must be met for a request to be validated.
    +  // Match conditions filter requests that have already been matched by the matchConstraints.
    +  // An empty list of matchConditions matches all requests.
    +  // There are a maximum of 64 match conditions allowed.
    +  //
    +  // If a parameter object is provided, it can be accessed via the `params` handle in the same
    +  // manner as validation expressions.
    +  //
    +  // The exact matching logic is (in order):
    +  //   1. If ANY matchCondition evaluates to FALSE, the policy is skipped.
    +  //   2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.
    +  //   3. If any matchCondition evaluates to an error (but none are FALSE):
    +  //      - If failurePolicy=Fail, reject the request
    +  //      - If failurePolicy=Ignore, the policy is skipped
    +  //
    +  // +patchMergeKey=name
    +  // +patchStrategy=merge
    +  // +listType=map
    +  // +listMapKey=name
    +  // +optional
    +  repeated MatchCondition matchConditions = 6;
    +
    +  // reinvocationPolicy indicates whether mutations may be called multiple times per MutatingAdmissionPolicyBinding
    +  // as part of a single admission evaluation.
    +  // Allowed values are "Never" and "IfNeeded".
    +  //
    +  // Never: These mutations will not be called more than once per binding in a single admission evaluation.
    +  //
    +  // IfNeeded: These mutations may be invoked more than once per binding for a single admission request and there is no guarantee of
    +  // order with respect to other admission plugins, admission webhooks, bindings of this policy and admission policies.  Mutations are only
    +  // reinvoked when mutations change the object after this mutation is invoked.
    +  // Required.
    +  optional string reinvocationPolicy = 7;
    +}
    +
     // MutatingWebhook describes an admission webhook and the resources and operations it applies to.
     message MutatingWebhook {
       // The name of the admission webhook.
    @@ -401,6 +682,26 @@ message MutatingWebhookConfigurationList {
       repeated MutatingWebhookConfiguration items = 2;
     }
     
    +// Mutation specifies the CEL expression which is used to apply the Mutation.
    +message Mutation {
    +  // patchType indicates the patch strategy used.
    +  // Allowed values are "ApplyConfiguration" and "JSONPatch".
    +  // Required.
    +  //
    +  // +unionDiscriminator
    +  optional string patchType = 2;
    +
    +  // applyConfiguration defines the desired configuration values of an object.
    +  // The configuration is applied to the admission object using
    +  // [structured merge diff](https://github.com/kubernetes-sigs/structured-merge-diff).
    +  // A CEL expression is used to create apply configuration.
    +  optional ApplyConfiguration applyConfiguration = 3;
    +
    +  // jsonPatch defines a [JSON patch](https://jsonpatch.com/) operation to perform a mutation to the object.
    +  // A CEL expression is used to create the JSON patch.
    +  optional JSONPatch jsonPatch = 4;
    +}
    +
     // NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames.
     // +structType=atomic
     message NamedRuleWithOperations {
    diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/register.go b/vendor/k8s.io/api/admissionregistration/v1beta1/register.go
    index 363233a2f..be64c4a5f 100644
    --- a/vendor/k8s.io/api/admissionregistration/v1beta1/register.go
    +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/register.go
    @@ -54,6 +54,10 @@ func addKnownTypes(scheme *runtime.Scheme) error {
     		&ValidatingAdmissionPolicyList{},
     		&ValidatingAdmissionPolicyBinding{},
     		&ValidatingAdmissionPolicyBindingList{},
    +		&MutatingAdmissionPolicy{},
    +		&MutatingAdmissionPolicyList{},
    +		&MutatingAdmissionPolicyBinding{},
    +		&MutatingAdmissionPolicyBindingList{},
     	)
     	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
     	return nil
    diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go
    index 0f5903123..cffdda82c 100644
    --- a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go
    +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go
    @@ -1073,7 +1073,7 @@ type MutatingWebhook struct {
     }
     
     // ReinvocationPolicyType specifies what type of policy the admission hook uses.
    -type ReinvocationPolicyType string
    +type ReinvocationPolicyType = v1.ReinvocationPolicyType
     
     const (
     	// NeverReinvocationPolicy indicates that the webhook must not be called more than once in a
    @@ -1197,3 +1197,332 @@ type MatchCondition struct {
     	// Required.
     	Expression string `json:"expression" protobuf:"bytes,2,opt,name=expression"`
     }
    +
    +// +genclient
    +// +genclient:nonNamespaced
    +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.34
    +
    +// MutatingAdmissionPolicy describes the definition of an admission mutation policy that mutates the object coming into admission chain.
    +type MutatingAdmissionPolicy struct {
    +	metav1.TypeMeta `json:",inline"`
    +	// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
    +	// +optional
    +	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    +	// Specification of the desired behavior of the MutatingAdmissionPolicy.
    +	Spec MutatingAdmissionPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
    +}
    +
    +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.34
    +
    +// MutatingAdmissionPolicyList is a list of MutatingAdmissionPolicy.
    +type MutatingAdmissionPolicyList struct {
    +	metav1.TypeMeta `json:",inline"`
    +	// Standard list metadata.
    +	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
    +	// +optional
    +	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    +	// List of ValidatingAdmissionPolicy.
    +	Items []MutatingAdmissionPolicy `json:"items" protobuf:"bytes,2,rep,name=items"`
    +}
    +
    +// MutatingAdmissionPolicySpec is the specification of the desired behavior of the admission policy.
    +type MutatingAdmissionPolicySpec struct {
    +	// paramKind specifies the kind of resources used to parameterize this policy.
    +	// If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions.
    +	// If paramKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied.
    +	// If paramKind is specified but paramRef is unset in MutatingAdmissionPolicyBinding, the params variable will be null.
    +	// +optional
    +	ParamKind *ParamKind `json:"paramKind,omitempty" protobuf:"bytes,1,rep,name=paramKind"`
    +
    +	// matchConstraints specifies what resources this policy is designed to validate.
    +	// The MutatingAdmissionPolicy cares about a request if it matches _all_ Constraints.
    +	// However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API
    +	// MutatingAdmissionPolicy cannot match MutatingAdmissionPolicy and MutatingAdmissionPolicyBinding.
    +	// The CREATE, UPDATE and CONNECT operations are allowed.  The DELETE operation may not be matched.
    +	// '*' matches CREATE, UPDATE and CONNECT.
    +	// Required.
    +	MatchConstraints *MatchResources `json:"matchConstraints,omitempty" protobuf:"bytes,2,rep,name=matchConstraints"`
    +
    +	// variables contain definitions of variables that can be used in composition of other expressions.
    +	// Each variable is defined as a named CEL expression.
    +	// The variables defined here will be available under `variables` in other expressions of the policy
    +	// except matchConditions because matchConditions are evaluated before the rest of the policy.
    +	//
    +	// The expression of a variable can refer to other variables defined earlier in the list but not those after.
    +	// Thus, variables must be sorted by the order of first appearance and acyclic.
    +	// +listType=atomic
    +	// +optional
    +	Variables []Variable `json:"variables,omitempty" protobuf:"bytes,3,rep,name=variables"`
    +
    +	// mutations contain operations to perform on matching objects.
    +	// mutations may not be empty; a minimum of one mutation is required.
    +	// mutations are evaluated in order, and are reinvoked according to
    +	// the reinvocationPolicy.
    +	// The mutations of a policy are invoked for each binding of this policy
    +	// and reinvocation of mutations occurs on a per binding basis.
    +	//
    +	// +listType=atomic
    +	// +optional
    +	Mutations []Mutation `json:"mutations,omitempty" protobuf:"bytes,4,rep,name=mutations"`
    +
    +	// failurePolicy defines how to handle failures for the admission policy. Failures can
    +	// occur from CEL expression parse errors, type check errors, runtime errors and invalid
    +	// or mis-configured policy definitions or bindings.
    +	//
    +	// A policy is invalid if paramKind refers to a non-existent Kind.
    +	// A binding is invalid if paramRef.name refers to a non-existent resource.
    +	//
    +	// failurePolicy does not define how validations that evaluate to false are handled.
    +	//
    +	// Allowed values are Ignore or Fail. Defaults to Fail.
    +	// +optional
    +	FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,5,opt,name=failurePolicy,casttype=FailurePolicyType"`
    +
    +	// matchConditions is a list of conditions that must be met for a request to be validated.
    +	// Match conditions filter requests that have already been matched by the matchConstraints.
    +	// An empty list of matchConditions matches all requests.
    +	// There are a maximum of 64 match conditions allowed.
    +	//
    +	// If a parameter object is provided, it can be accessed via the `params` handle in the same
    +	// manner as validation expressions.
    +	//
    +	// The exact matching logic is (in order):
    +	//   1. If ANY matchCondition evaluates to FALSE, the policy is skipped.
    +	//   2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.
    +	//   3. If any matchCondition evaluates to an error (but none are FALSE):
    +	//      - If failurePolicy=Fail, reject the request
    +	//      - If failurePolicy=Ignore, the policy is skipped
    +	//
    +	// +patchMergeKey=name
    +	// +patchStrategy=merge
    +	// +listType=map
    +	// +listMapKey=name
    +	// +optional
    +	MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,6,rep,name=matchConditions"`
    +
    +	// reinvocationPolicy indicates whether mutations may be called multiple times per MutatingAdmissionPolicyBinding
    +	// as part of a single admission evaluation.
    +	// Allowed values are "Never" and "IfNeeded".
    +	//
    +	// Never: These mutations will not be called more than once per binding in a single admission evaluation.
    +	//
    +	// IfNeeded: These mutations may be invoked more than once per binding for a single admission request and there is no guarantee of
    +	// order with respect to other admission plugins, admission webhooks, bindings of this policy and admission policies.  Mutations are only
    +	// reinvoked when mutations change the object after this mutation is invoked.
    +	// Required.
    +	ReinvocationPolicy ReinvocationPolicyType `json:"reinvocationPolicy,omitempty" protobuf:"bytes,7,opt,name=reinvocationPolicy,casttype=ReinvocationPolicyType"`
    +}
    +
    +// Mutation specifies the CEL expression which is used to apply the Mutation.
    +type Mutation struct {
    +	// patchType indicates the patch strategy used.
    +	// Allowed values are "ApplyConfiguration" and "JSONPatch".
    +	// Required.
    +	//
    +	// +unionDiscriminator
    +	PatchType PatchType `json:"patchType" protobuf:"bytes,2,opt,name=patchType,casttype=PatchType"`
    +
    +	// applyConfiguration defines the desired configuration values of an object.
    +	// The configuration is applied to the admission object using
    +	// [structured merge diff](https://github.com/kubernetes-sigs/structured-merge-diff).
    +	// A CEL expression is used to create apply configuration.
    +	ApplyConfiguration *ApplyConfiguration `json:"applyConfiguration,omitempty" protobuf:"bytes,3,opt,name=applyConfiguration"`
    +
    +	// jsonPatch defines a [JSON patch](https://jsonpatch.com/) operation to perform a mutation to the object.
    +	// A CEL expression is used to create the JSON patch.
    +	JSONPatch *JSONPatch `json:"jsonPatch,omitempty" protobuf:"bytes,4,opt,name=jsonPatch"`
    +}
    +
    +// PatchType specifies the type of patch operation for a mutation.
    +// +enum
    +type PatchType string
    +
    +const (
    +	// ApplyConfiguration indicates that the mutation is using apply configuration to mutate the object.
    +	PatchTypeApplyConfiguration PatchType = "ApplyConfiguration"
    +	// JSONPatch indicates that the object is mutated through JSON Patch.
    +	PatchTypeJSONPatch PatchType = "JSONPatch"
    +)
    +
    +// ApplyConfiguration defines the desired configuration values of an object.
    +type ApplyConfiguration struct {
    +	// expression will be evaluated by CEL to create an apply configuration.
    +	// ref: https://github.com/google/cel-spec
    +	//
    +	// Apply configurations are declared in CEL using object initialization. For example, this CEL expression
    +	// returns an apply configuration to set a single field:
    +	//
    +	//	Object{
    +	//	  spec: Object.spec{
    +	//	    serviceAccountName: "example"
    +	//	  }
    +	//	}
    +	//
    +	// Apply configurations may not modify atomic structs, maps or arrays due to the risk of accidental deletion of
    +	// values not included in the apply configuration.
    +	//
    +	// CEL expressions have access to the object types needed to create apply configurations:
    +	//
    +	// - 'Object' - CEL type of the resource object.
    +	// - 'Object.' - CEL type of object field (such as 'Object.spec')
    +	// - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers')
    +	//
    +	// CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:
    +	//
    +	// - 'object' - The object from the incoming request. The value is null for DELETE requests.
    +	// - 'oldObject' - The existing object. The value is null for CREATE requests.
    +	// - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)).
    +	// - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind.
    +	// - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources.
    +	// - 'variables' - Map of composited variables, from its name to its lazily evaluated value.
    +	//   For example, a variable named 'foo' can be accessed as 'variables.foo'.
    +	// - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.
    +	//   See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz
    +	// - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the
    +	//   request resource.
    +	//
    +	// The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the
    +	// object. No other metadata properties are accessible.
    +	//
    +	// Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible.
    +	// Required.
    +	Expression string `json:"expression,omitempty" protobuf:"bytes,1,opt,name=expression"`
    +}
    +
    +// JSONPatch defines a JSON Patch.
    +type JSONPatch struct {
    +	// expression will be evaluated by CEL to create a [JSON patch](https://jsonpatch.com/).
    +	// ref: https://github.com/google/cel-spec
    +	//
    +	// expression must return an array of JSONPatch values.
    +	//
    +	// For example, this CEL expression returns a JSON patch to conditionally modify a value:
    +	//
    +	//	  [
    +	//	    JSONPatch{op: "test", path: "/spec/example", value: "Red"},
    +	//	    JSONPatch{op: "replace", path: "/spec/example", value: "Green"}
    +	//	  ]
    +	//
    +	// To define an object for the patch value, use Object types. For example:
    +	//
    +	//	  [
    +	//	    JSONPatch{
    +	//	      op: "add",
    +	//	      path: "/spec/selector",
    +	//	      value: Object.spec.selector{matchLabels: {"environment": "test"}}
    +	//	    }
    +	//	  ]
    +	//
    +	// To use strings containing '/' and '~' as JSONPatch path keys, use "jsonpatch.escapeKey". For example:
    +	//
    +	//	  [
    +	//	    JSONPatch{
    +	//	      op: "add",
    +	//	      path: "/metadata/labels/" + jsonpatch.escapeKey("example.com/environment"),
    +	//	      value: "test"
    +	//	    },
    +	//	  ]
    +	//
    +	// CEL expressions have access to the types needed to create JSON patches and objects:
    +	//
    +	// - 'JSONPatch' - CEL type of JSON Patch operations. JSONPatch has the fields 'op', 'from', 'path' and 'value'.
    +	//   See [JSON patch](https://jsonpatch.com/) for more details. The 'value' field may be set to any of: string,
    +	//   integer, array, map or object.  If set, the 'path' and 'from' fields must be set to a
    +	//   [JSON pointer](https://datatracker.ietf.org/doc/html/rfc6901/) string, where the 'jsonpatch.escapeKey()' CEL
    +	//   function may be used to escape path keys containing '/' and '~'.
    +	// - 'Object' - CEL type of the resource object.
    +	// - 'Object.' - CEL type of object field (such as 'Object.spec')
    +	// - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers')
    +	//
    +	// CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:
    +	//
    +	// - 'object' - The object from the incoming request. The value is null for DELETE requests.
    +	// - 'oldObject' - The existing object. The value is null for CREATE requests.
    +	// - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)).
    +	// - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind.
    +	// - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources.
    +	// - 'variables' - Map of composited variables, from its name to its lazily evaluated value.
    +	//   For example, a variable named 'foo' can be accessed as 'variables.foo'.
    +	// - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.
    +	//   See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz
    +	// - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the
    +	//   request resource.
    +	//
    +	// CEL expressions have access to [Kubernetes CEL function libraries](https://kubernetes.io/docs/reference/using-api/cel/#cel-options-language-features-and-libraries)
    +	// as well as:
    +	//
    +	// - 'jsonpatch.escapeKey' - Performs JSONPatch key escaping. '~' and  '/' are escaped as '~0' and `~1' respectively).
    +	//
    +	//
    +	// Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible.
    +	// Required.
    +	Expression string `json:"expression,omitempty" protobuf:"bytes,1,opt,name=expression"`
    +}
    +
    +// +genclient
    +// +genclient:nonNamespaced
    +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.34
    +
    +// MutatingAdmissionPolicyBinding binds the MutatingAdmissionPolicy with parametrized resources.
    +// MutatingAdmissionPolicyBinding and the optional parameter resource together define how cluster administrators
    +// configure policies for clusters.
    +//
    +// For a given admission request, each binding will cause its policy to be
    +// evaluated N times, where N is 1 for policies/bindings that don't use
    +// params, otherwise N is the number of parameters selected by the binding.
    +// Each evaluation is constrained by a [runtime cost budget](https://kubernetes.io/docs/reference/using-api/cel/#runtime-cost-budget).
    +//
    +// Adding/removing policies, bindings, or params can not affect whether a
    +// given (policy, binding, param) combination is within its own CEL budget.
    +type MutatingAdmissionPolicyBinding struct {
    +	metav1.TypeMeta `json:",inline"`
    +	// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
    +	// +optional
    +	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    +	// Specification of the desired behavior of the MutatingAdmissionPolicyBinding.
    +	Spec MutatingAdmissionPolicyBindingSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
    +}
    +
    +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.34
    +
    +// MutatingAdmissionPolicyBindingList is a list of MutatingAdmissionPolicyBinding.
    +type MutatingAdmissionPolicyBindingList struct {
    +	metav1.TypeMeta `json:",inline"`
    +	// Standard list metadata.
    +	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
    +	// +optional
    +	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    +	// List of PolicyBinding.
    +	Items []MutatingAdmissionPolicyBinding `json:"items" protobuf:"bytes,2,rep,name=items"`
    +}
    +
    +// MutatingAdmissionPolicyBindingSpec is the specification of the MutatingAdmissionPolicyBinding.
    +type MutatingAdmissionPolicyBindingSpec struct {
    +	// policyName references a MutatingAdmissionPolicy name which the MutatingAdmissionPolicyBinding binds to.
    +	// If the referenced resource does not exist, this binding is considered invalid and will be ignored
    +	// Required.
    +	PolicyName string `json:"policyName,omitempty" protobuf:"bytes,1,rep,name=policyName"`
    +
    +	// paramRef specifies the parameter resource used to configure the admission control policy.
    +	// It should point to a resource of the type specified in spec.ParamKind of the bound MutatingAdmissionPolicy.
    +	// If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the MutatingAdmissionPolicy applied.
    +	// If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.
    +	// +optional
    +	ParamRef *ParamRef `json:"paramRef,omitempty" protobuf:"bytes,2,rep,name=paramRef"`
    +
    +	// matchResources limits what resources match this binding and may be mutated by it.
    +	// Note that if matchResources matches a resource, the resource must also match a policy's matchConstraints and
    +	// matchConditions before the resource may be mutated.
    +	// When matchResources is unset, it does not constrain resource matching, and only the policy's matchConstraints
    +	// and matchConditions must match for the resource to be mutated.
    +	// Additionally, matchResources.resourceRules are optional and do not constraint matching when unset.
    +	// Note that this is differs from MutatingAdmissionPolicy matchConstraints, where resourceRules are required.
    +	// The CREATE, UPDATE and CONNECT operations are allowed.  The DELETE operation may not be matched.
    +	// '*' matches CREATE, UPDATE and CONNECT.
    +	// +optional
    +	MatchResources *MatchResources `json:"matchResources,omitempty" protobuf:"bytes,3,rep,name=matchResources"`
    +}
    diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go
    index cc1509b53..1a97c9472 100644
    --- a/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go
    +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go
    @@ -27,6 +27,15 @@ package v1beta1
     // Those methods can be generated by using hack/update-codegen.sh
     
     // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
    +var map_ApplyConfiguration = map[string]string{
    +	"":           "ApplyConfiguration defines the desired configuration values of an object.",
    +	"expression": "expression will be evaluated by CEL to create an apply configuration. ref: https://github.com/google/cel-spec\n\nApply configurations are declared in CEL using object initialization. For example, this CEL expression returns an apply configuration to set a single field:\n\n\tObject{\n\t  spec: Object.spec{\n\t    serviceAccountName: \"example\"\n\t  }\n\t}\n\nApply configurations may not modify atomic structs, maps or arrays due to the risk of accidental deletion of values not included in the apply configuration.\n\nCEL expressions have access to the object types needed to create apply configurations:\n\n- 'Object' - CEL type of the resource object. - 'Object.' - CEL type of object field (such as 'Object.spec') - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers')\n\nCEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:\n\n- 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value.\n  For example, a variable named 'foo' can be accessed as 'variables.foo'.\n- 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n  See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n- 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n  request resource.\n\nThe `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible.\n\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Required.",
    +}
    +
    +func (ApplyConfiguration) SwaggerDoc() map[string]string {
    +	return map_ApplyConfiguration
    +}
    +
     var map_AuditAnnotation = map[string]string{
     	"":                "AuditAnnotation describes how to produce an audit annotation for an API request.",
     	"key":             "key specifies the audit annotation key. The audit annotation keys of a ValidatingAdmissionPolicy must be unique. The key must be a qualified name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length.\n\nThe key is combined with the resource name of the ValidatingAdmissionPolicy to construct an audit annotation key: \"{ValidatingAdmissionPolicy name}/{key}\".\n\nIf an admission webhook uses the same resource name as this ValidatingAdmissionPolicy and the same audit annotation key, the annotation key will be identical. In this case, the first annotation written with the key will be included in the audit event and all subsequent annotations with the same key will be discarded.\n\nRequired.",
    @@ -47,6 +56,15 @@ func (ExpressionWarning) SwaggerDoc() map[string]string {
     	return map_ExpressionWarning
     }
     
    +var map_JSONPatch = map[string]string{
    +	"":           "JSONPatch defines a JSON Patch.",
    +	"expression": "expression will be evaluated by CEL to create a [JSON patch](https://jsonpatch.com/). ref: https://github.com/google/cel-spec\n\nexpression must return an array of JSONPatch values.\n\nFor example, this CEL expression returns a JSON patch to conditionally modify a value:\n\n\t  [\n\t    JSONPatch{op: \"test\", path: \"/spec/example\", value: \"Red\"},\n\t    JSONPatch{op: \"replace\", path: \"/spec/example\", value: \"Green\"}\n\t  ]\n\nTo define an object for the patch value, use Object types. For example:\n\n\t  [\n\t    JSONPatch{\n\t      op: \"add\",\n\t      path: \"/spec/selector\",\n\t      value: Object.spec.selector{matchLabels: {\"environment\": \"test\"}}\n\t    }\n\t  ]\n\nTo use strings containing '/' and '~' as JSONPatch path keys, use \"jsonpatch.escapeKey\". For example:\n\n\t  [\n\t    JSONPatch{\n\t      op: \"add\",\n\t      path: \"/metadata/labels/\" + jsonpatch.escapeKey(\"example.com/environment\"),\n\t      value: \"test\"\n\t    },\n\t  ]\n\nCEL expressions have access to the types needed to create JSON patches and objects:\n\n- 'JSONPatch' - CEL type of JSON Patch operations. JSONPatch has the fields 'op', 'from', 'path' and 'value'.\n  See [JSON patch](https://jsonpatch.com/) for more details. The 'value' field may be set to any of: string,\n  integer, array, map or object.  If set, the 'path' and 'from' fields must be set to a\n  [JSON pointer](https://datatracker.ietf.org/doc/html/rfc6901/) string, where the 'jsonpatch.escapeKey()' CEL\n  function may be used to escape path keys containing '/' and '~'.\n- 'Object' - CEL type of the resource object. - 'Object.' - CEL type of object field (such as 'Object.spec') - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers')\n\nCEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:\n\n- 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value.\n  For example, a variable named 'foo' can be accessed as 'variables.foo'.\n- 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n  See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n- 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n  request resource.\n\nCEL expressions have access to [Kubernetes CEL function libraries](https://kubernetes.io/docs/reference/using-api/cel/#cel-options-language-features-and-libraries) as well as:\n\n- 'jsonpatch.escapeKey' - Performs JSONPatch key escaping. '~' and  '/' are escaped as '~0' and `~1' respectively).\n\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Required.",
    +}
    +
    +func (JSONPatch) SwaggerDoc() map[string]string {
    +	return map_JSONPatch
    +}
    +
     var map_MatchCondition = map[string]string{
     	"":           "MatchCondition represents a condition which must be fulfilled for a request to be sent to a webhook.",
     	"name":       "Name is an identifier for this match condition, used for strategic merging of MatchConditions, as well as providing an identifier for logging purposes. A good name should be descriptive of the associated expression. Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName',  or 'my.name',  or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')\n\nRequired.",
    @@ -70,6 +88,72 @@ func (MatchResources) SwaggerDoc() map[string]string {
     	return map_MatchResources
     }
     
    +var map_MutatingAdmissionPolicy = map[string]string{
    +	"":         "MutatingAdmissionPolicy describes the definition of an admission mutation policy that mutates the object coming into admission chain.",
    +	"metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.",
    +	"spec":     "Specification of the desired behavior of the MutatingAdmissionPolicy.",
    +}
    +
    +func (MutatingAdmissionPolicy) SwaggerDoc() map[string]string {
    +	return map_MutatingAdmissionPolicy
    +}
    +
    +var map_MutatingAdmissionPolicyBinding = map[string]string{
    +	"":         "MutatingAdmissionPolicyBinding binds the MutatingAdmissionPolicy with parametrized resources. MutatingAdmissionPolicyBinding and the optional parameter resource together define how cluster administrators configure policies for clusters.\n\nFor a given admission request, each binding will cause its policy to be evaluated N times, where N is 1 for policies/bindings that don't use params, otherwise N is the number of parameters selected by the binding. Each evaluation is constrained by a [runtime cost budget](https://kubernetes.io/docs/reference/using-api/cel/#runtime-cost-budget).\n\nAdding/removing policies, bindings, or params can not affect whether a given (policy, binding, param) combination is within its own CEL budget.",
    +	"metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.",
    +	"spec":     "Specification of the desired behavior of the MutatingAdmissionPolicyBinding.",
    +}
    +
    +func (MutatingAdmissionPolicyBinding) SwaggerDoc() map[string]string {
    +	return map_MutatingAdmissionPolicyBinding
    +}
    +
    +var map_MutatingAdmissionPolicyBindingList = map[string]string{
    +	"":         "MutatingAdmissionPolicyBindingList is a list of MutatingAdmissionPolicyBinding.",
    +	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
    +	"items":    "List of PolicyBinding.",
    +}
    +
    +func (MutatingAdmissionPolicyBindingList) SwaggerDoc() map[string]string {
    +	return map_MutatingAdmissionPolicyBindingList
    +}
    +
    +var map_MutatingAdmissionPolicyBindingSpec = map[string]string{
    +	"":               "MutatingAdmissionPolicyBindingSpec is the specification of the MutatingAdmissionPolicyBinding.",
    +	"policyName":     "policyName references a MutatingAdmissionPolicy name which the MutatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required.",
    +	"paramRef":       "paramRef specifies the parameter resource used to configure the admission control policy. It should point to a resource of the type specified in spec.ParamKind of the bound MutatingAdmissionPolicy. If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the MutatingAdmissionPolicy applied. If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.",
    +	"matchResources": "matchResources limits what resources match this binding and may be mutated by it. Note that if matchResources matches a resource, the resource must also match a policy's matchConstraints and matchConditions before the resource may be mutated. When matchResources is unset, it does not constrain resource matching, and only the policy's matchConstraints and matchConditions must match for the resource to be mutated. Additionally, matchResources.resourceRules are optional and do not constraint matching when unset. Note that this is differs from MutatingAdmissionPolicy matchConstraints, where resourceRules are required. The CREATE, UPDATE and CONNECT operations are allowed.  The DELETE operation may not be matched. '*' matches CREATE, UPDATE and CONNECT.",
    +}
    +
    +func (MutatingAdmissionPolicyBindingSpec) SwaggerDoc() map[string]string {
    +	return map_MutatingAdmissionPolicyBindingSpec
    +}
    +
    +var map_MutatingAdmissionPolicyList = map[string]string{
    +	"":         "MutatingAdmissionPolicyList is a list of MutatingAdmissionPolicy.",
    +	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
    +	"items":    "List of ValidatingAdmissionPolicy.",
    +}
    +
    +func (MutatingAdmissionPolicyList) SwaggerDoc() map[string]string {
    +	return map_MutatingAdmissionPolicyList
    +}
    +
    +var map_MutatingAdmissionPolicySpec = map[string]string{
    +	"":                   "MutatingAdmissionPolicySpec is the specification of the desired behavior of the admission policy.",
    +	"paramKind":          "paramKind specifies the kind of resources used to parameterize this policy. If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. If paramKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. If paramKind is specified but paramRef is unset in MutatingAdmissionPolicyBinding, the params variable will be null.",
    +	"matchConstraints":   "matchConstraints specifies what resources this policy is designed to validate. The MutatingAdmissionPolicy cares about a request if it matches _all_ Constraints. However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API MutatingAdmissionPolicy cannot match MutatingAdmissionPolicy and MutatingAdmissionPolicyBinding. The CREATE, UPDATE and CONNECT operations are allowed.  The DELETE operation may not be matched. '*' matches CREATE, UPDATE and CONNECT. Required.",
    +	"variables":          "variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except matchConditions because matchConditions are evaluated before the rest of the policy.\n\nThe expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, variables must be sorted by the order of first appearance and acyclic.",
    +	"mutations":          "mutations contain operations to perform on matching objects. mutations may not be empty; a minimum of one mutation is required. mutations are evaluated in order, and are reinvoked according to the reinvocationPolicy. The mutations of a policy are invoked for each binding of this policy and reinvocation of mutations occurs on a per binding basis.",
    +	"failurePolicy":      "failurePolicy defines how to handle failures for the admission policy. Failures can occur from CEL expression parse errors, type check errors, runtime errors and invalid or mis-configured policy definitions or bindings.\n\nA policy is invalid if paramKind refers to a non-existent Kind. A binding is invalid if paramRef.name refers to a non-existent resource.\n\nfailurePolicy does not define how validations that evaluate to false are handled.\n\nAllowed values are Ignore or Fail. Defaults to Fail.",
    +	"matchConditions":    "matchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the matchConstraints. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nIf a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions.\n\nThe exact matching logic is (in order):\n  1. If ANY matchCondition evaluates to FALSE, the policy is skipped.\n  2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.\n  3. If any matchCondition evaluates to an error (but none are FALSE):\n     - If failurePolicy=Fail, reject the request\n     - If failurePolicy=Ignore, the policy is skipped",
    +	"reinvocationPolicy": "reinvocationPolicy indicates whether mutations may be called multiple times per MutatingAdmissionPolicyBinding as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\".\n\nNever: These mutations will not be called more than once per binding in a single admission evaluation.\n\nIfNeeded: These mutations may be invoked more than once per binding for a single admission request and there is no guarantee of order with respect to other admission plugins, admission webhooks, bindings of this policy and admission policies.  Mutations are only reinvoked when mutations change the object after this mutation is invoked. Required.",
    +}
    +
    +func (MutatingAdmissionPolicySpec) SwaggerDoc() map[string]string {
    +	return map_MutatingAdmissionPolicySpec
    +}
    +
     var map_MutatingWebhook = map[string]string{
     	"":                        "MutatingWebhook describes an admission webhook and the resources and operations it applies to.",
     	"name":                    "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.",
    @@ -110,6 +194,17 @@ func (MutatingWebhookConfigurationList) SwaggerDoc() map[string]string {
     	return map_MutatingWebhookConfigurationList
     }
     
    +var map_Mutation = map[string]string{
    +	"":                   "Mutation specifies the CEL expression which is used to apply the Mutation.",
    +	"patchType":          "patchType indicates the patch strategy used. Allowed values are \"ApplyConfiguration\" and \"JSONPatch\". Required.",
    +	"applyConfiguration": "applyConfiguration defines the desired configuration values of an object. The configuration is applied to the admission object using [structured merge diff](https://github.com/kubernetes-sigs/structured-merge-diff). A CEL expression is used to create apply configuration.",
    +	"jsonPatch":          "jsonPatch defines a [JSON patch](https://jsonpatch.com/) operation to perform a mutation to the object. A CEL expression is used to create the JSON patch.",
    +}
    +
    +func (Mutation) SwaggerDoc() map[string]string {
    +	return map_Mutation
    +}
    +
     var map_NamedRuleWithOperations = map[string]string{
     	"":              "NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames.",
     	"resourceNames": "ResourceNames is an optional white list of names that the rule applies to.  An empty set means that everything is allowed.",
    diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go
    index 4c10b1d11..3749a3d14 100644
    --- a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go
    +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go
    @@ -27,6 +27,22 @@ import (
     	runtime "k8s.io/apimachinery/pkg/runtime"
     )
     
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *ApplyConfiguration) DeepCopyInto(out *ApplyConfiguration) {
    +	*out = *in
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplyConfiguration.
    +func (in *ApplyConfiguration) DeepCopy() *ApplyConfiguration {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(ApplyConfiguration)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
     // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
     func (in *AuditAnnotation) DeepCopyInto(out *AuditAnnotation) {
     	*out = *in
    @@ -59,6 +75,22 @@ func (in *ExpressionWarning) DeepCopy() *ExpressionWarning {
     	return out
     }
     
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *JSONPatch) DeepCopyInto(out *JSONPatch) {
    +	*out = *in
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONPatch.
    +func (in *JSONPatch) DeepCopy() *JSONPatch {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(JSONPatch)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
     // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
     func (in *MatchCondition) DeepCopyInto(out *MatchCondition) {
     	*out = *in
    @@ -120,6 +152,200 @@ func (in *MatchResources) DeepCopy() *MatchResources {
     	return out
     }
     
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *MutatingAdmissionPolicy) DeepCopyInto(out *MutatingAdmissionPolicy) {
    +	*out = *in
    +	out.TypeMeta = in.TypeMeta
    +	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
    +	in.Spec.DeepCopyInto(&out.Spec)
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicy.
    +func (in *MutatingAdmissionPolicy) DeepCopy() *MutatingAdmissionPolicy {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(MutatingAdmissionPolicy)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    +func (in *MutatingAdmissionPolicy) DeepCopyObject() runtime.Object {
    +	if c := in.DeepCopy(); c != nil {
    +		return c
    +	}
    +	return nil
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *MutatingAdmissionPolicyBinding) DeepCopyInto(out *MutatingAdmissionPolicyBinding) {
    +	*out = *in
    +	out.TypeMeta = in.TypeMeta
    +	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
    +	in.Spec.DeepCopyInto(&out.Spec)
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicyBinding.
    +func (in *MutatingAdmissionPolicyBinding) DeepCopy() *MutatingAdmissionPolicyBinding {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(MutatingAdmissionPolicyBinding)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    +func (in *MutatingAdmissionPolicyBinding) DeepCopyObject() runtime.Object {
    +	if c := in.DeepCopy(); c != nil {
    +		return c
    +	}
    +	return nil
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *MutatingAdmissionPolicyBindingList) DeepCopyInto(out *MutatingAdmissionPolicyBindingList) {
    +	*out = *in
    +	out.TypeMeta = in.TypeMeta
    +	in.ListMeta.DeepCopyInto(&out.ListMeta)
    +	if in.Items != nil {
    +		in, out := &in.Items, &out.Items
    +		*out = make([]MutatingAdmissionPolicyBinding, len(*in))
    +		for i := range *in {
    +			(*in)[i].DeepCopyInto(&(*out)[i])
    +		}
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicyBindingList.
    +func (in *MutatingAdmissionPolicyBindingList) DeepCopy() *MutatingAdmissionPolicyBindingList {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(MutatingAdmissionPolicyBindingList)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    +func (in *MutatingAdmissionPolicyBindingList) DeepCopyObject() runtime.Object {
    +	if c := in.DeepCopy(); c != nil {
    +		return c
    +	}
    +	return nil
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *MutatingAdmissionPolicyBindingSpec) DeepCopyInto(out *MutatingAdmissionPolicyBindingSpec) {
    +	*out = *in
    +	if in.ParamRef != nil {
    +		in, out := &in.ParamRef, &out.ParamRef
    +		*out = new(ParamRef)
    +		(*in).DeepCopyInto(*out)
    +	}
    +	if in.MatchResources != nil {
    +		in, out := &in.MatchResources, &out.MatchResources
    +		*out = new(MatchResources)
    +		(*in).DeepCopyInto(*out)
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicyBindingSpec.
    +func (in *MutatingAdmissionPolicyBindingSpec) DeepCopy() *MutatingAdmissionPolicyBindingSpec {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(MutatingAdmissionPolicyBindingSpec)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *MutatingAdmissionPolicyList) DeepCopyInto(out *MutatingAdmissionPolicyList) {
    +	*out = *in
    +	out.TypeMeta = in.TypeMeta
    +	in.ListMeta.DeepCopyInto(&out.ListMeta)
    +	if in.Items != nil {
    +		in, out := &in.Items, &out.Items
    +		*out = make([]MutatingAdmissionPolicy, len(*in))
    +		for i := range *in {
    +			(*in)[i].DeepCopyInto(&(*out)[i])
    +		}
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicyList.
    +func (in *MutatingAdmissionPolicyList) DeepCopy() *MutatingAdmissionPolicyList {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(MutatingAdmissionPolicyList)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    +func (in *MutatingAdmissionPolicyList) DeepCopyObject() runtime.Object {
    +	if c := in.DeepCopy(); c != nil {
    +		return c
    +	}
    +	return nil
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *MutatingAdmissionPolicySpec) DeepCopyInto(out *MutatingAdmissionPolicySpec) {
    +	*out = *in
    +	if in.ParamKind != nil {
    +		in, out := &in.ParamKind, &out.ParamKind
    +		*out = new(ParamKind)
    +		**out = **in
    +	}
    +	if in.MatchConstraints != nil {
    +		in, out := &in.MatchConstraints, &out.MatchConstraints
    +		*out = new(MatchResources)
    +		(*in).DeepCopyInto(*out)
    +	}
    +	if in.Variables != nil {
    +		in, out := &in.Variables, &out.Variables
    +		*out = make([]Variable, len(*in))
    +		copy(*out, *in)
    +	}
    +	if in.Mutations != nil {
    +		in, out := &in.Mutations, &out.Mutations
    +		*out = make([]Mutation, len(*in))
    +		for i := range *in {
    +			(*in)[i].DeepCopyInto(&(*out)[i])
    +		}
    +	}
    +	if in.FailurePolicy != nil {
    +		in, out := &in.FailurePolicy, &out.FailurePolicy
    +		*out = new(FailurePolicyType)
    +		**out = **in
    +	}
    +	if in.MatchConditions != nil {
    +		in, out := &in.MatchConditions, &out.MatchConditions
    +		*out = make([]MatchCondition, len(*in))
    +		copy(*out, *in)
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicySpec.
    +func (in *MutatingAdmissionPolicySpec) DeepCopy() *MutatingAdmissionPolicySpec {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(MutatingAdmissionPolicySpec)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
     // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
     func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) {
     	*out = *in
    @@ -168,7 +394,7 @@ func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) {
     	}
     	if in.ReinvocationPolicy != nil {
     		in, out := &in.ReinvocationPolicy, &out.ReinvocationPolicy
    -		*out = new(ReinvocationPolicyType)
    +		*out = new(admissionregistrationv1.ReinvocationPolicyType)
     		**out = **in
     	}
     	if in.MatchConditions != nil {
    @@ -255,6 +481,32 @@ func (in *MutatingWebhookConfigurationList) DeepCopyObject() runtime.Object {
     	return nil
     }
     
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *Mutation) DeepCopyInto(out *Mutation) {
    +	*out = *in
    +	if in.ApplyConfiguration != nil {
    +		in, out := &in.ApplyConfiguration, &out.ApplyConfiguration
    +		*out = new(ApplyConfiguration)
    +		**out = **in
    +	}
    +	if in.JSONPatch != nil {
    +		in, out := &in.JSONPatch, &out.JSONPatch
    +		*out = new(JSONPatch)
    +		**out = **in
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Mutation.
    +func (in *Mutation) DeepCopy() *Mutation {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(Mutation)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
     // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
     func (in *NamedRuleWithOperations) DeepCopyInto(out *NamedRuleWithOperations) {
     	*out = *in
    diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.prerelease-lifecycle.go
    index c1be5122a..4fc0596b3 100644
    --- a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.prerelease-lifecycle.go
    +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.prerelease-lifecycle.go
    @@ -25,6 +25,78 @@ import (
     	schema "k8s.io/apimachinery/pkg/runtime/schema"
     )
     
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *MutatingAdmissionPolicy) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 34
    +}
    +
    +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or  "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
    +func (in *MutatingAdmissionPolicy) APILifecycleDeprecated() (major, minor int) {
    +	return 1, 37
    +}
    +
    +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or  "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
    +func (in *MutatingAdmissionPolicy) APILifecycleRemoved() (major, minor int) {
    +	return 1, 40
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *MutatingAdmissionPolicyBinding) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 34
    +}
    +
    +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or  "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
    +func (in *MutatingAdmissionPolicyBinding) APILifecycleDeprecated() (major, minor int) {
    +	return 1, 37
    +}
    +
    +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or  "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
    +func (in *MutatingAdmissionPolicyBinding) APILifecycleRemoved() (major, minor int) {
    +	return 1, 40
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *MutatingAdmissionPolicyBindingList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 34
    +}
    +
    +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or  "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
    +func (in *MutatingAdmissionPolicyBindingList) APILifecycleDeprecated() (major, minor int) {
    +	return 1, 37
    +}
    +
    +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or  "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
    +func (in *MutatingAdmissionPolicyBindingList) APILifecycleRemoved() (major, minor int) {
    +	return 1, 40
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *MutatingAdmissionPolicyList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 34
    +}
    +
    +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or  "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
    +func (in *MutatingAdmissionPolicyList) APILifecycleDeprecated() (major, minor int) {
    +	return 1, 37
    +}
    +
    +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or  "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
    +func (in *MutatingAdmissionPolicyList) APILifecycleRemoved() (major, minor int) {
    +	return 1, 40
    +}
    +
     // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
     // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
     func (in *MutatingWebhookConfiguration) APILifecycleIntroduced() (major, minor int) {
    diff --git a/vendor/k8s.io/api/apidiscovery/v2/doc.go b/vendor/k8s.io/api/apidiscovery/v2/doc.go
    index 4f3ad5f13..f46d33e94 100644
    --- a/vendor/k8s.io/api/apidiscovery/v2/doc.go
    +++ b/vendor/k8s.io/api/apidiscovery/v2/doc.go
    @@ -20,4 +20,4 @@ limitations under the License.
     // +k8s:prerelease-lifecycle-gen=true
     // +groupName=apidiscovery.k8s.io
     
    -package v2 // import "k8s.io/api/apidiscovery/v2"
    +package v2
    diff --git a/vendor/k8s.io/api/apidiscovery/v2beta1/doc.go b/vendor/k8s.io/api/apidiscovery/v2beta1/doc.go
    index e85da226e..d4fceab68 100644
    --- a/vendor/k8s.io/api/apidiscovery/v2beta1/doc.go
    +++ b/vendor/k8s.io/api/apidiscovery/v2beta1/doc.go
    @@ -21,4 +21,4 @@ limitations under the License.
     
     // +groupName=apidiscovery.k8s.io
     
    -package v2beta1 // import "k8s.io/api/apidiscovery/v2beta1"
    +package v2beta1
    diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/doc.go b/vendor/k8s.io/api/apiserverinternal/v1alpha1/doc.go
    index a4da95d44..867d74165 100644
    --- a/vendor/k8s.io/api/apiserverinternal/v1alpha1/doc.go
    +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/doc.go
    @@ -22,4 +22,4 @@ limitations under the License.
     
     // Package v1alpha1 contains the v1alpha1 version of the API used by the
     // apiservers themselves.
    -package v1alpha1 // import "k8s.io/api/apiserverinternal/v1alpha1"
    +package v1alpha1
    diff --git a/vendor/k8s.io/api/apps/v1/doc.go b/vendor/k8s.io/api/apps/v1/doc.go
    index d189e860f..51fe12c53 100644
    --- a/vendor/k8s.io/api/apps/v1/doc.go
    +++ b/vendor/k8s.io/api/apps/v1/doc.go
    @@ -19,4 +19,4 @@ limitations under the License.
     // +k8s:openapi-gen=true
     // +k8s:prerelease-lifecycle-gen=true
     
    -package v1 // import "k8s.io/api/apps/v1"
    +package v1
    diff --git a/vendor/k8s.io/api/apps/v1/generated.pb.go b/vendor/k8s.io/api/apps/v1/generated.pb.go
    index ea62a099f..eacc25931 100644
    --- a/vendor/k8s.io/api/apps/v1/generated.pb.go
    +++ b/vendor/k8s.io/api/apps/v1/generated.pb.go
    @@ -928,145 +928,147 @@ func init() {
     }
     
     var fileDescriptor_5b781835628d5338 = []byte{
    -	// 2194 bytes of a gzipped FileDescriptorProto
    +	// 2225 bytes of a gzipped FileDescriptorProto
     	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x1b, 0xc7,
    -	0x15, 0xd7, 0xf2, 0x43, 0xa2, 0x86, 0x96, 0x64, 0x8f, 0x54, 0x89, 0xb1, 0x1b, 0xd2, 0xdd, 0xb8,
    -	0xb6, 0x12, 0xc7, 0x64, 0xed, 0x38, 0x41, 0xe0, 0x14, 0x09, 0x44, 0x2a, 0x4d, 0xd3, 0xe8, 0xab,
    -	0x43, 0xcb, 0x01, 0xdc, 0xb4, 0xe8, 0x68, 0x39, 0xa6, 0x36, 0xde, 0x2f, 0xec, 0x0e, 0x15, 0x0b,
    -	0xbd, 0x14, 0x05, 0x7a, 0xeb, 0xa1, 0x7f, 0x43, 0xff, 0x81, 0xa2, 0x28, 0x9a, 0x5b, 0x10, 0x04,
    -	0xbd, 0xf8, 0x52, 0x20, 0xe8, 0xa5, 0x39, 0x11, 0x35, 0x73, 0x2a, 0x8a, 0xde, 0xda, 0x8b, 0x2f,
    -	0x2d, 0x66, 0x76, 0xf6, 0x7b, 0x56, 0xa4, 0xe4, 0x58, 0x69, 0x82, 0xdc, 0xb8, 0x33, 0xbf, 0xf7,
    -	0xdb, 0x37, 0x33, 0xef, 0xcd, 0xfb, 0xcd, 0x2c, 0x81, 0x7a, 0xff, 0x55, 0xaf, 0xa9, 0xdb, 0x2d,
    -	0xec, 0xe8, 0x2d, 0xec, 0x38, 0x5e, 0xeb, 0xe0, 0x7a, 0xab, 0x4f, 0x2c, 0xe2, 0x62, 0x4a, 0x7a,
    -	0x4d, 0xc7, 0xb5, 0xa9, 0x0d, 0xa1, 0x8f, 0x69, 0x62, 0x47, 0x6f, 0x32, 0x4c, 0xf3, 0xe0, 0xfa,
    -	0xf9, 0x6b, 0x7d, 0x9d, 0xee, 0x0f, 0xf6, 0x9a, 0x9a, 0x6d, 0xb6, 0xfa, 0x76, 0xdf, 0x6e, 0x71,
    -	0xe8, 0xde, 0xe0, 0x1e, 0x7f, 0xe2, 0x0f, 0xfc, 0x97, 0x4f, 0x71, 0x3e, 0xfe, 0x1a, 0xcd, 0x76,
    -	0x89, 0xe4, 0x35, 0xe7, 0x6f, 0x46, 0x18, 0x13, 0x6b, 0xfb, 0xba, 0x45, 0xdc, 0xc3, 0x96, 0x73,
    -	0xbf, 0xcf, 0x1a, 0xbc, 0x96, 0x49, 0x28, 0x96, 0x59, 0xb5, 0xf2, 0xac, 0xdc, 0x81, 0x45, 0x75,
    -	0x93, 0x64, 0x0c, 0x5e, 0x19, 0x67, 0xe0, 0x69, 0xfb, 0xc4, 0xc4, 0x19, 0xbb, 0x97, 0xf2, 0xec,
    -	0x06, 0x54, 0x37, 0x5a, 0xba, 0x45, 0x3d, 0xea, 0xa6, 0x8d, 0xd4, 0xff, 0x28, 0x00, 0x76, 0x6c,
    -	0x8b, 0xba, 0xb6, 0x61, 0x10, 0x17, 0x91, 0x03, 0xdd, 0xd3, 0x6d, 0x0b, 0xfe, 0x1c, 0x54, 0xd8,
    -	0x78, 0x7a, 0x98, 0xe2, 0x9a, 0x72, 0x51, 0x59, 0xad, 0xde, 0xf8, 0x5e, 0x33, 0x9a, 0xe4, 0x90,
    -	0xbe, 0xe9, 0xdc, 0xef, 0xb3, 0x06, 0xaf, 0xc9, 0xd0, 0xcd, 0x83, 0xeb, 0xcd, 0xed, 0xbd, 0xf7,
    -	0x89, 0x46, 0x37, 0x09, 0xc5, 0x6d, 0xf8, 0x70, 0xd8, 0x98, 0x1a, 0x0d, 0x1b, 0x20, 0x6a, 0x43,
    -	0x21, 0x2b, 0xdc, 0x06, 0x25, 0xce, 0x5e, 0xe0, 0xec, 0xd7, 0x72, 0xd9, 0xc5, 0xa0, 0x9b, 0x08,
    -	0x7f, 0xf0, 0xe6, 0x03, 0x4a, 0x2c, 0xe6, 0x5e, 0xfb, 0x8c, 0xa0, 0x2e, 0xad, 0x63, 0x8a, 0x11,
    -	0x27, 0x82, 0x2f, 0x82, 0x8a, 0x2b, 0xdc, 0xaf, 0x15, 0x2f, 0x2a, 0xab, 0xc5, 0xf6, 0x59, 0x81,
    -	0xaa, 0x04, 0xc3, 0x42, 0x21, 0x42, 0xfd, 0xb3, 0x02, 0x96, 0xb3, 0xe3, 0xde, 0xd0, 0x3d, 0x0a,
    -	0xdf, 0xcb, 0x8c, 0xbd, 0x39, 0xd9, 0xd8, 0x99, 0x35, 0x1f, 0x79, 0xf8, 0xe2, 0xa0, 0x25, 0x36,
    -	0xee, 0x77, 0x40, 0x59, 0xa7, 0xc4, 0xf4, 0x6a, 0x85, 0x8b, 0xc5, 0xd5, 0xea, 0x8d, 0xcb, 0xcd,
    -	0x6c, 0xec, 0x36, 0xb3, 0x8e, 0xb5, 0xe7, 0x04, 0x65, 0xf9, 0x6d, 0x66, 0x8c, 0x7c, 0x0e, 0xf5,
    -	0xbf, 0x0a, 0x98, 0x5d, 0xc7, 0xc4, 0xb4, 0xad, 0x2e, 0xa1, 0xa7, 0xb0, 0x68, 0x1d, 0x50, 0xf2,
    -	0x1c, 0xa2, 0x89, 0x45, 0xfb, 0x8e, 0xcc, 0xf7, 0xd0, 0x9d, 0xae, 0x43, 0xb4, 0x68, 0xa1, 0xd8,
    -	0x13, 0xe2, 0xc6, 0xf0, 0x1d, 0x30, 0xed, 0x51, 0x4c, 0x07, 0x1e, 0x5f, 0xa6, 0xea, 0x8d, 0xe7,
    -	0x8e, 0xa6, 0xe1, 0xd0, 0xf6, 0xbc, 0x20, 0x9a, 0xf6, 0x9f, 0x91, 0xa0, 0x50, 0xff, 0x51, 0x00,
    -	0x30, 0xc4, 0x76, 0x6c, 0xab, 0xa7, 0x53, 0x16, 0xbf, 0xb7, 0x40, 0x89, 0x1e, 0x3a, 0x84, 0x4f,
    -	0xc3, 0x6c, 0xfb, 0x72, 0xe0, 0xc5, 0xed, 0x43, 0x87, 0x3c, 0x1e, 0x36, 0x96, 0xb3, 0x16, 0xac,
    -	0x07, 0x71, 0x1b, 0xb8, 0x11, 0xfa, 0x57, 0xe0, 0xd6, 0x37, 0x93, 0xaf, 0x7e, 0x3c, 0x6c, 0x48,
    -	0x36, 0x8b, 0x66, 0xc8, 0x94, 0x74, 0x10, 0x1e, 0x00, 0x68, 0x60, 0x8f, 0xde, 0x76, 0xb1, 0xe5,
    -	0xf9, 0x6f, 0xd2, 0x4d, 0x22, 0x46, 0xfe, 0xc2, 0x64, 0xcb, 0xc3, 0x2c, 0xda, 0xe7, 0x85, 0x17,
    -	0x70, 0x23, 0xc3, 0x86, 0x24, 0x6f, 0x80, 0x97, 0xc1, 0xb4, 0x4b, 0xb0, 0x67, 0x5b, 0xb5, 0x12,
    -	0x1f, 0x45, 0x38, 0x81, 0x88, 0xb7, 0x22, 0xd1, 0x0b, 0x9f, 0x07, 0x33, 0x26, 0xf1, 0x3c, 0xdc,
    -	0x27, 0xb5, 0x32, 0x07, 0x2e, 0x08, 0xe0, 0xcc, 0xa6, 0xdf, 0x8c, 0x82, 0x7e, 0xf5, 0x0f, 0x0a,
    -	0x98, 0x0b, 0x67, 0xee, 0x14, 0x52, 0xa5, 0x9d, 0x4c, 0x95, 0x67, 0x8f, 0x8c, 0x93, 0x9c, 0x0c,
    -	0xf9, 0xb8, 0x18, 0xf3, 0x99, 0x05, 0x21, 0xfc, 0x29, 0xa8, 0x78, 0xc4, 0x20, 0x1a, 0xb5, 0x5d,
    -	0xe1, 0xf3, 0x4b, 0x13, 0xfa, 0x8c, 0xf7, 0x88, 0xd1, 0x15, 0xa6, 0xed, 0x33, 0xcc, 0xe9, 0xe0,
    -	0x09, 0x85, 0x94, 0xf0, 0xc7, 0xa0, 0x42, 0x89, 0xe9, 0x18, 0x98, 0x12, 0x91, 0x26, 0x89, 0xf8,
    -	0x66, 0xe1, 0xc2, 0xc8, 0x76, 0xec, 0xde, 0x6d, 0x01, 0xe3, 0x89, 0x12, 0xce, 0x43, 0xd0, 0x8a,
    -	0x42, 0x1a, 0x78, 0x1f, 0xcc, 0x0f, 0x9c, 0x1e, 0x43, 0x52, 0xb6, 0x75, 0xf7, 0x0f, 0x45, 0xf8,
    -	0x5c, 0x3d, 0x72, 0x42, 0x76, 0x13, 0x26, 0xed, 0x65, 0xf1, 0x82, 0xf9, 0x64, 0x3b, 0x4a, 0x51,
    -	0xc3, 0x35, 0xb0, 0x60, 0xea, 0x16, 0x22, 0xb8, 0x77, 0xd8, 0x25, 0x9a, 0x6d, 0xf5, 0x3c, 0x1e,
    -	0x40, 0xe5, 0xf6, 0x8a, 0x20, 0x58, 0xd8, 0x4c, 0x76, 0xa3, 0x34, 0x1e, 0x6e, 0x80, 0xa5, 0x60,
    -	0x9f, 0xfd, 0xa1, 0xee, 0x51, 0xdb, 0x3d, 0xdc, 0xd0, 0x4d, 0x9d, 0xd6, 0xa6, 0x39, 0x4f, 0x6d,
    -	0x34, 0x6c, 0x2c, 0x21, 0x49, 0x3f, 0x92, 0x5a, 0xa9, 0xbf, 0x99, 0x06, 0x0b, 0xa9, 0xdd, 0x00,
    -	0xde, 0x01, 0xcb, 0xda, 0xc0, 0x75, 0x89, 0x45, 0xb7, 0x06, 0xe6, 0x1e, 0x71, 0xbb, 0xda, 0x3e,
    -	0xe9, 0x0d, 0x0c, 0xd2, 0xe3, 0x2b, 0x5a, 0x6e, 0xd7, 0x85, 0xaf, 0xcb, 0x1d, 0x29, 0x0a, 0xe5,
    -	0x58, 0xc3, 0x1f, 0x01, 0x68, 0xf1, 0xa6, 0x4d, 0xdd, 0xf3, 0x42, 0xce, 0x02, 0xe7, 0x0c, 0x13,
    -	0x70, 0x2b, 0x83, 0x40, 0x12, 0x2b, 0xe6, 0x63, 0x8f, 0x78, 0xba, 0x4b, 0x7a, 0x69, 0x1f, 0x8b,
    -	0x49, 0x1f, 0xd7, 0xa5, 0x28, 0x94, 0x63, 0x0d, 0x5f, 0x06, 0x55, 0xff, 0x6d, 0x7c, 0xce, 0xc5,
    -	0xe2, 0x2c, 0x0a, 0xb2, 0xea, 0x56, 0xd4, 0x85, 0xe2, 0x38, 0x36, 0x34, 0x7b, 0xcf, 0x23, 0xee,
    -	0x01, 0xe9, 0xbd, 0xe5, 0x6b, 0x00, 0x56, 0x28, 0xcb, 0xbc, 0x50, 0x86, 0x43, 0xdb, 0xce, 0x20,
    -	0x90, 0xc4, 0x8a, 0x0d, 0xcd, 0x8f, 0x9a, 0xcc, 0xd0, 0xa6, 0x93, 0x43, 0xdb, 0x95, 0xa2, 0x50,
    -	0x8e, 0x35, 0x8b, 0x3d, 0xdf, 0xe5, 0xb5, 0x03, 0xac, 0x1b, 0x78, 0xcf, 0x20, 0xb5, 0x99, 0x64,
    -	0xec, 0x6d, 0x25, 0xbb, 0x51, 0x1a, 0x0f, 0xdf, 0x02, 0xe7, 0xfc, 0xa6, 0x5d, 0x0b, 0x87, 0x24,
    -	0x15, 0x4e, 0xf2, 0x8c, 0x20, 0x39, 0xb7, 0x95, 0x06, 0xa0, 0xac, 0x0d, 0xbc, 0x05, 0xe6, 0x35,
    -	0xdb, 0x30, 0x78, 0x3c, 0x76, 0xec, 0x81, 0x45, 0x6b, 0xb3, 0x9c, 0x05, 0xb2, 0x1c, 0xea, 0x24,
    -	0x7a, 0x50, 0x0a, 0x09, 0xef, 0x02, 0xa0, 0x05, 0xe5, 0xc0, 0xab, 0x81, 0xfc, 0x42, 0x9f, 0xad,
    -	0x43, 0x51, 0x01, 0x0e, 0x9b, 0x3c, 0x14, 0x63, 0x53, 0x3f, 0x56, 0xc0, 0x4a, 0x4e, 0x8e, 0xc3,
    -	0x37, 0x12, 0x55, 0xef, 0x6a, 0xaa, 0xea, 0x5d, 0xc8, 0x31, 0x8b, 0x95, 0x3e, 0x0d, 0xcc, 0x31,
    -	0xdd, 0xa1, 0x5b, 0x7d, 0x1f, 0x22, 0x76, 0xb0, 0x17, 0x64, 0xbe, 0xa3, 0x38, 0x30, 0xda, 0x86,
    -	0xcf, 0x8d, 0x86, 0x8d, 0xb9, 0x44, 0x1f, 0x4a, 0x72, 0xaa, 0xbf, 0x2a, 0x00, 0xb0, 0x4e, 0x1c,
    -	0xc3, 0x3e, 0x34, 0x89, 0x75, 0x1a, 0xaa, 0x65, 0x3d, 0xa1, 0x5a, 0x54, 0xe9, 0x42, 0x84, 0xfe,
    -	0xe4, 0xca, 0x96, 0x8d, 0x94, 0x6c, 0xb9, 0x34, 0x86, 0xe7, 0x68, 0xdd, 0xf2, 0xb7, 0x22, 0x58,
    -	0x8c, 0xc0, 0x91, 0x70, 0x79, 0x2d, 0xb1, 0x84, 0x57, 0x52, 0x4b, 0xb8, 0x22, 0x31, 0x79, 0x6a,
    -	0xca, 0xe5, 0x7d, 0x30, 0xcf, 0x74, 0x85, 0xbf, 0x6a, 0x5c, 0xb5, 0x4c, 0x1f, 0x5b, 0xb5, 0x84,
    -	0x55, 0x67, 0x23, 0xc1, 0x84, 0x52, 0xcc, 0x39, 0x2a, 0x69, 0xe6, 0xab, 0xa8, 0x92, 0xfe, 0xa8,
    -	0x80, 0xf9, 0x68, 0x99, 0x4e, 0x41, 0x26, 0x75, 0x92, 0x32, 0xa9, 0x7e, 0x74, 0x5c, 0xe6, 0xe8,
    -	0xa4, 0xbf, 0x96, 0xe2, 0x5e, 0x73, 0xa1, 0xb4, 0xca, 0x0e, 0x54, 0x8e, 0xa1, 0x6b, 0xd8, 0x13,
    -	0x65, 0xf5, 0x8c, 0x7f, 0x98, 0xf2, 0xdb, 0x50, 0xd8, 0x9b, 0x90, 0x54, 0x85, 0xa7, 0x2b, 0xa9,
    -	0x8a, 0x5f, 0x8c, 0xa4, 0xba, 0x0d, 0x2a, 0x5e, 0x20, 0xa6, 0x4a, 0x9c, 0xf2, 0xf2, 0xb8, 0x74,
    -	0x16, 0x3a, 0x2a, 0x64, 0x0d, 0x15, 0x54, 0xc8, 0x24, 0xd3, 0x4e, 0xe5, 0x2f, 0x53, 0x3b, 0xb1,
    -	0xf0, 0x76, 0xf0, 0xc0, 0x23, 0x3d, 0x9e, 0x4a, 0x95, 0x28, 0xbc, 0x77, 0x78, 0x2b, 0x12, 0xbd,
    -	0x70, 0x17, 0xac, 0x38, 0xae, 0xdd, 0x77, 0x89, 0xe7, 0xad, 0x13, 0xdc, 0x33, 0x74, 0x8b, 0x04,
    -	0x03, 0xf0, 0xab, 0xde, 0x85, 0xd1, 0xb0, 0xb1, 0xb2, 0x23, 0x87, 0xa0, 0x3c, 0x5b, 0xf5, 0xa3,
    -	0x12, 0x38, 0x9b, 0xde, 0x11, 0x73, 0x84, 0x88, 0x72, 0x22, 0x21, 0xf2, 0x62, 0x2c, 0x44, 0x7d,
    -	0x95, 0x16, 0x3b, 0xf3, 0x67, 0xc2, 0x74, 0x0d, 0x2c, 0x08, 0xe1, 0x11, 0x74, 0x0a, 0x29, 0x16,
    -	0x2e, 0xcf, 0x6e, 0xb2, 0x1b, 0xa5, 0xf1, 0xf0, 0x35, 0x30, 0xe7, 0x72, 0x6d, 0x15, 0x10, 0xf8,
    -	0xfa, 0xe4, 0x5b, 0x82, 0x60, 0x0e, 0xc5, 0x3b, 0x51, 0x12, 0xcb, 0xb4, 0x49, 0x24, 0x39, 0x02,
    -	0x82, 0x52, 0x52, 0x9b, 0xac, 0xa5, 0x01, 0x28, 0x6b, 0x03, 0x37, 0xc1, 0xe2, 0xc0, 0xca, 0x52,
    -	0xf9, 0xb1, 0x76, 0x41, 0x50, 0x2d, 0xee, 0x66, 0x21, 0x48, 0x66, 0x07, 0x7f, 0x92, 0x90, 0x2b,
    -	0xd3, 0x7c, 0x17, 0xb9, 0x72, 0x74, 0x3a, 0x4c, 0xac, 0x57, 0x24, 0x3a, 0xaa, 0x32, 0xa9, 0x8e,
    -	0x52, 0x3f, 0x54, 0x00, 0xcc, 0xa6, 0xe0, 0xd8, 0xc3, 0x7d, 0xc6, 0x22, 0x56, 0x22, 0x7b, 0x72,
    -	0x85, 0x73, 0x75, 0xbc, 0xc2, 0x89, 0x76, 0xd0, 0xc9, 0x24, 0x8e, 0x98, 0xde, 0xd3, 0xb9, 0x98,
    -	0x99, 0x40, 0xe2, 0x44, 0xfe, 0x3c, 0x99, 0xc4, 0x89, 0xf1, 0x1c, 0x2d, 0x71, 0xfe, 0x59, 0x00,
    -	0x8b, 0x11, 0x78, 0x62, 0x89, 0x23, 0x31, 0xf9, 0xe6, 0x72, 0x66, 0x32, 0xd9, 0x11, 0x4d, 0xdd,
    -	0xff, 0x89, 0xec, 0x88, 0x1c, 0xca, 0x91, 0x1d, 0xbf, 0x2f, 0xc4, 0xbd, 0x3e, 0xa6, 0xec, 0xf8,
    -	0x02, 0xae, 0x2a, 0xbe, 0x72, 0xca, 0x45, 0xfd, 0xa4, 0x08, 0xce, 0xa6, 0x53, 0x30, 0x51, 0x07,
    -	0x95, 0xb1, 0x75, 0x70, 0x07, 0x2c, 0xdd, 0x1b, 0x18, 0xc6, 0x21, 0x1f, 0x43, 0xac, 0x18, 0xfa,
    -	0x15, 0xf4, 0xdb, 0xc2, 0x72, 0xe9, 0x07, 0x12, 0x0c, 0x92, 0x5a, 0x66, 0xcb, 0x62, 0xe9, 0x49,
    -	0xcb, 0x62, 0xf9, 0x04, 0x65, 0x51, 0xae, 0x2c, 0x8a, 0x27, 0x52, 0x16, 0x13, 0xd7, 0x44, 0xc9,
    -	0x76, 0x35, 0xf6, 0x0c, 0x3f, 0x52, 0xc0, 0xb2, 0xfc, 0xf8, 0x0c, 0x0d, 0x30, 0x6f, 0xe2, 0x07,
    -	0xf1, 0xcb, 0x8b, 0x71, 0x05, 0x63, 0x40, 0x75, 0xa3, 0xe9, 0x7f, 0xdd, 0x69, 0xbe, 0x6d, 0xd1,
    -	0x6d, 0xb7, 0x4b, 0x5d, 0xdd, 0xea, 0xfb, 0x05, 0x76, 0x33, 0xc1, 0x85, 0x52, 0xdc, 0xf0, 0x2e,
    -	0xa8, 0x98, 0xf8, 0x41, 0x77, 0xe0, 0xf6, 0x83, 0x42, 0x78, 0xfc, 0xf7, 0xf0, 0xd8, 0xdf, 0x14,
    -	0x2c, 0x28, 0xe4, 0x53, 0x3f, 0x57, 0xc0, 0x4a, 0x4e, 0x05, 0xfd, 0x1a, 0x8d, 0xf2, 0x23, 0x05,
    -	0x5c, 0x4c, 0x8c, 0x92, 0x65, 0x24, 0xb9, 0x37, 0x30, 0x78, 0x72, 0x0a, 0xc1, 0x72, 0x15, 0xcc,
    -	0x3a, 0xd8, 0xa5, 0x7a, 0xa8, 0x74, 0xcb, 0xed, 0xb9, 0xd1, 0xb0, 0x31, 0xbb, 0x13, 0x34, 0xa2,
    -	0xa8, 0x5f, 0x32, 0x37, 0x85, 0xa7, 0x37, 0x37, 0xea, 0xaf, 0x0b, 0xa0, 0x1a, 0x73, 0xf9, 0x14,
    -	0xa4, 0xca, 0x9b, 0x09, 0xa9, 0x22, 0xfd, 0xf8, 0x13, 0x9f, 0xc3, 0x3c, 0xad, 0xb2, 0x99, 0xd2,
    -	0x2a, 0xdf, 0x1d, 0x47, 0x74, 0xb4, 0x58, 0xf9, 0x57, 0x01, 0x2c, 0xc5, 0xd0, 0x91, 0x5a, 0xf9,
    -	0x7e, 0x42, 0xad, 0xac, 0xa6, 0xd4, 0x4a, 0x4d, 0x66, 0xf3, 0x8d, 0x5c, 0x19, 0x2f, 0x57, 0xfe,
    -	0xa4, 0x80, 0x85, 0xd8, 0xdc, 0x9d, 0x82, 0x5e, 0x59, 0x4f, 0xea, 0x95, 0xc6, 0x98, 0x78, 0xc9,
    -	0x11, 0x2c, 0xb7, 0xc0, 0x62, 0x0c, 0xb4, 0xed, 0xf6, 0x74, 0x0b, 0x1b, 0x1e, 0x7c, 0x0e, 0x94,
    -	0x3d, 0x8a, 0x5d, 0x1a, 0x64, 0x77, 0x60, 0xdb, 0x65, 0x8d, 0xc8, 0xef, 0x53, 0xff, 0xad, 0x80,
    -	0x56, 0xcc, 0x78, 0x87, 0xb8, 0x9e, 0xee, 0x51, 0x62, 0xd1, 0x3b, 0xb6, 0x31, 0x30, 0x49, 0xc7,
    -	0xc0, 0xba, 0x89, 0x08, 0x6b, 0xd0, 0x6d, 0x6b, 0xc7, 0x36, 0x74, 0xed, 0x10, 0x62, 0x50, 0xfd,
    -	0x60, 0x9f, 0x58, 0xeb, 0xc4, 0x20, 0x54, 0x7c, 0xde, 0x98, 0x6d, 0xbf, 0x11, 0xdc, 0xf6, 0xbf,
    -	0x1b, 0x75, 0x3d, 0x1e, 0x36, 0x56, 0x27, 0x61, 0xe4, 0xc1, 0x19, 0xe7, 0x84, 0x3f, 0x03, 0x80,
    -	0x3d, 0x76, 0x35, 0x1c, 0x7c, 0xec, 0x98, 0x6d, 0xbf, 0x1e, 0xa4, 0xf0, 0xbb, 0x61, 0xcf, 0xb1,
    -	0x5e, 0x10, 0x63, 0x54, 0x7f, 0x57, 0x49, 0x2c, 0xf5, 0xd7, 0xfe, 0x6e, 0xe9, 0x17, 0x60, 0xe9,
    -	0x20, 0x9a, 0x9d, 0x00, 0xc0, 0x34, 0x11, 0x8b, 0xbb, 0xe7, 0xa5, 0xf4, 0xb2, 0x79, 0x8d, 0x94,
    -	0xd8, 0x1d, 0x09, 0x1d, 0x92, 0xbe, 0x04, 0xbe, 0x0c, 0xaa, 0x4c, 0xcb, 0xe8, 0x1a, 0xd9, 0xc2,
    -	0x66, 0x90, 0x86, 0xe1, 0xd7, 0xa1, 0x6e, 0xd4, 0x85, 0xe2, 0x38, 0xb8, 0x0f, 0x16, 0x1d, 0xbb,
    -	0xb7, 0x89, 0x2d, 0xdc, 0x27, 0xac, 0x42, 0xfb, 0x4b, 0xc9, 0x6f, 0x9d, 0x66, 0xdb, 0xaf, 0x04,
    -	0x37, 0x0a, 0x3b, 0x59, 0x08, 0x3b, 0xb1, 0x49, 0x9a, 0x79, 0x10, 0xc8, 0x28, 0xa1, 0x99, 0xf9,
    -	0x98, 0x39, 0x93, 0xf9, 0x07, 0x88, 0x2c, 0x1f, 0x4f, 0xf8, 0x39, 0x33, 0xef, 0x3e, 0xad, 0x72,
    -	0xa2, 0xfb, 0x34, 0xc9, 0x89, 0x63, 0xf6, 0x98, 0x27, 0x8e, 0x4f, 0x14, 0x70, 0xc9, 0x99, 0x20,
    -	0x8d, 0x6a, 0x80, 0x4f, 0x4b, 0x67, 0xcc, 0xb4, 0x4c, 0x92, 0x91, 0xed, 0xd5, 0xd1, 0xb0, 0x71,
    -	0x69, 0x12, 0x24, 0x9a, 0xc8, 0x35, 0x96, 0x34, 0xb6, 0xd8, 0xf9, 0x6a, 0x55, 0xee, 0xe6, 0x95,
    -	0x31, 0x6e, 0x06, 0x1b, 0xa5, 0x9f, 0x87, 0xc1, 0x13, 0x0a, 0x69, 0xd4, 0x0f, 0xcb, 0xe0, 0x5c,
    -	0xa6, 0x5a, 0x7f, 0x89, 0x77, 0x85, 0x99, 0x13, 0x4d, 0xf1, 0x18, 0x27, 0x9a, 0x35, 0xb0, 0x20,
    -	0x3e, 0x30, 0xa7, 0x0e, 0x44, 0x61, 0x98, 0x74, 0x92, 0xdd, 0x28, 0x8d, 0x97, 0xdd, 0x55, 0x96,
    -	0x8f, 0x79, 0x57, 0x19, 0xf7, 0x42, 0xfc, 0x2f, 0xca, 0xcf, 0xe7, 0xac, 0x17, 0xe2, 0xef, 0x51,
    -	0x69, 0x3c, 0x7c, 0x3d, 0x48, 0xd6, 0x90, 0x61, 0x86, 0x33, 0xa4, 0xb2, 0x2f, 0x24, 0x48, 0xa1,
    -	0x9f, 0xe8, 0x23, 0xea, 0x7b, 0x92, 0x8f, 0xa8, 0xab, 0x63, 0xc2, 0x6c, 0xf2, 0x6b, 0x49, 0xe9,
    -	0xa1, 0xb3, 0x7a, 0xfc, 0x43, 0xa7, 0xfa, 0x17, 0x05, 0x3c, 0x93, 0xbb, 0x4d, 0xc1, 0xb5, 0x84,
    -	0x7a, 0xbc, 0x96, 0x52, 0x8f, 0xcf, 0xe6, 0x1a, 0xc6, 0x24, 0xa4, 0x29, 0xbf, 0xb1, 0xbc, 0x39,
    -	0xf6, 0xc6, 0x52, 0x72, 0x12, 0x19, 0x7f, 0x75, 0xd9, 0x7e, 0xf5, 0xe1, 0xa3, 0xfa, 0xd4, 0xa7,
    -	0x8f, 0xea, 0x53, 0x9f, 0x3d, 0xaa, 0x4f, 0xfd, 0x72, 0x54, 0x57, 0x1e, 0x8e, 0xea, 0xca, 0xa7,
    -	0xa3, 0xba, 0xf2, 0xd9, 0xa8, 0xae, 0xfc, 0x7d, 0x54, 0x57, 0x7e, 0xfb, 0x79, 0x7d, 0xea, 0x2e,
    -	0xcc, 0xfe, 0x2b, 0xf3, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xd3, 0xfa, 0xed, 0x70, 0xaa, 0x29,
    -	0x00, 0x00,
    +	0x15, 0xd7, 0x52, 0xa4, 0x44, 0x0d, 0x2d, 0xc9, 0x1e, 0xa9, 0x12, 0x63, 0x37, 0xa4, 0xbb, 0x71,
    +	0x6d, 0x25, 0x8e, 0xc9, 0xda, 0x71, 0x82, 0xc0, 0x29, 0x12, 0x88, 0x54, 0x9a, 0xba, 0xd1, 0x57,
    +	0x87, 0x92, 0x03, 0xb8, 0x69, 0xd1, 0xd1, 0x72, 0x4c, 0x6d, 0xbc, 0x5f, 0xd8, 0x1d, 0x2a, 0x16,
    +	0x7a, 0x29, 0x0a, 0x14, 0xe8, 0x21, 0x87, 0xfe, 0x0d, 0xfd, 0x07, 0x8a, 0xa2, 0x68, 0x6e, 0x45,
    +	0x50, 0xf4, 0xe2, 0x4b, 0x81, 0xa0, 0x97, 0xe6, 0x44, 0xd4, 0xcc, 0xa9, 0x28, 0x7a, 0x6b, 0x2f,
    +	0xbe, 0xb4, 0x98, 0xd9, 0xd9, 0xef, 0x59, 0x91, 0x92, 0x63, 0xa5, 0x09, 0x7c, 0xe3, 0xce, 0x7b,
    +	0xef, 0x37, 0x6f, 0x66, 0xde, 0x9b, 0xf7, 0x9b, 0x19, 0x02, 0xf5, 0xfe, 0xeb, 0x5e, 0x43, 0xb7,
    +	0x9b, 0xd8, 0xd1, 0x9b, 0xd8, 0x71, 0xbc, 0xe6, 0xc1, 0xf5, 0x66, 0x8f, 0x58, 0xc4, 0xc5, 0x94,
    +	0x74, 0x1b, 0x8e, 0x6b, 0x53, 0x1b, 0x42, 0x5f, 0xa7, 0x81, 0x1d, 0xbd, 0xc1, 0x74, 0x1a, 0x07,
    +	0xd7, 0xcf, 0x5f, 0xeb, 0xe9, 0x74, 0xbf, 0xbf, 0xd7, 0xd0, 0x6c, 0xb3, 0xd9, 0xb3, 0x7b, 0x76,
    +	0x93, 0xab, 0xee, 0xf5, 0xef, 0xf1, 0x2f, 0xfe, 0xc1, 0x7f, 0xf9, 0x10, 0xe7, 0xe3, 0xdd, 0x68,
    +	0xb6, 0x4b, 0x24, 0xdd, 0x9c, 0xbf, 0x19, 0xe9, 0x98, 0x58, 0xdb, 0xd7, 0x2d, 0xe2, 0x1e, 0x36,
    +	0x9d, 0xfb, 0x3d, 0xd6, 0xe0, 0x35, 0x4d, 0x42, 0xb1, 0xcc, 0xaa, 0x99, 0x67, 0xe5, 0xf6, 0x2d,
    +	0xaa, 0x9b, 0x24, 0x63, 0xf0, 0xda, 0x28, 0x03, 0x4f, 0xdb, 0x27, 0x26, 0xce, 0xd8, 0xbd, 0x92,
    +	0x67, 0xd7, 0xa7, 0xba, 0xd1, 0xd4, 0x2d, 0xea, 0x51, 0x37, 0x6d, 0xa4, 0xfe, 0x47, 0x01, 0xb0,
    +	0x6d, 0x5b, 0xd4, 0xb5, 0x0d, 0x83, 0xb8, 0x88, 0x1c, 0xe8, 0x9e, 0x6e, 0x5b, 0xf0, 0xa7, 0xa0,
    +	0xcc, 0xc6, 0xd3, 0xc5, 0x14, 0x57, 0x95, 0x8b, 0xca, 0x4a, 0xe5, 0xc6, 0x77, 0x1a, 0xd1, 0x24,
    +	0x87, 0xf0, 0x0d, 0xe7, 0x7e, 0x8f, 0x35, 0x78, 0x0d, 0xa6, 0xdd, 0x38, 0xb8, 0xde, 0xd8, 0xda,
    +	0xfb, 0x80, 0x68, 0x74, 0x83, 0x50, 0xdc, 0x82, 0x0f, 0x07, 0xf5, 0x89, 0xe1, 0xa0, 0x0e, 0xa2,
    +	0x36, 0x14, 0xa2, 0xc2, 0x2d, 0x50, 0xe4, 0xe8, 0x05, 0x8e, 0x7e, 0x2d, 0x17, 0x5d, 0x0c, 0xba,
    +	0x81, 0xf0, 0x87, 0x6f, 0x3f, 0xa0, 0xc4, 0x62, 0xee, 0xb5, 0xce, 0x08, 0xe8, 0xe2, 0x1a, 0xa6,
    +	0x18, 0x71, 0x20, 0xf8, 0x32, 0x28, 0xbb, 0xc2, 0xfd, 0xea, 0xe4, 0x45, 0x65, 0x65, 0xb2, 0x75,
    +	0x56, 0x68, 0x95, 0x83, 0x61, 0xa1, 0x50, 0x43, 0xfd, 0xb3, 0x02, 0x96, 0xb2, 0xe3, 0x5e, 0xd7,
    +	0x3d, 0x0a, 0xdf, 0xcf, 0x8c, 0xbd, 0x31, 0xde, 0xd8, 0x99, 0x35, 0x1f, 0x79, 0xd8, 0x71, 0xd0,
    +	0x12, 0x1b, 0xf7, 0xbb, 0xa0, 0xa4, 0x53, 0x62, 0x7a, 0xd5, 0xc2, 0xc5, 0xc9, 0x95, 0xca, 0x8d,
    +	0xcb, 0x8d, 0x6c, 0xec, 0x36, 0xb2, 0x8e, 0xb5, 0x66, 0x05, 0x64, 0xe9, 0x36, 0x33, 0x46, 0x3e,
    +	0x86, 0xfa, 0x5f, 0x05, 0xcc, 0xac, 0x61, 0x62, 0xda, 0x56, 0x87, 0xd0, 0x53, 0x58, 0xb4, 0x36,
    +	0x28, 0x7a, 0x0e, 0xd1, 0xc4, 0xa2, 0x7d, 0x4b, 0xe6, 0x7b, 0xe8, 0x4e, 0xc7, 0x21, 0x5a, 0xb4,
    +	0x50, 0xec, 0x0b, 0x71, 0x63, 0xf8, 0x2e, 0x98, 0xf2, 0x28, 0xa6, 0x7d, 0x8f, 0x2f, 0x53, 0xe5,
    +	0xc6, 0x0b, 0x47, 0xc3, 0x70, 0xd5, 0xd6, 0x9c, 0x00, 0x9a, 0xf2, 0xbf, 0x91, 0x80, 0x50, 0xff,
    +	0x51, 0x00, 0x30, 0xd4, 0x6d, 0xdb, 0x56, 0x57, 0xa7, 0x2c, 0x7e, 0x6f, 0x81, 0x22, 0x3d, 0x74,
    +	0x08, 0x9f, 0x86, 0x99, 0xd6, 0xe5, 0xc0, 0x8b, 0x9d, 0x43, 0x87, 0x3c, 0x1e, 0xd4, 0x97, 0xb2,
    +	0x16, 0x4c, 0x82, 0xb8, 0x0d, 0x5c, 0x0f, 0xfd, 0x2b, 0x70, 0xeb, 0x9b, 0xc9, 0xae, 0x1f, 0x0f,
    +	0xea, 0x92, 0xcd, 0xa2, 0x11, 0x22, 0x25, 0x1d, 0x84, 0x07, 0x00, 0x1a, 0xd8, 0xa3, 0x3b, 0x2e,
    +	0xb6, 0x3c, 0xbf, 0x27, 0xdd, 0x24, 0x62, 0xe4, 0x2f, 0x8d, 0xb7, 0x3c, 0xcc, 0xa2, 0x75, 0x5e,
    +	0x78, 0x01, 0xd7, 0x33, 0x68, 0x48, 0xd2, 0x03, 0xbc, 0x0c, 0xa6, 0x5c, 0x82, 0x3d, 0xdb, 0xaa,
    +	0x16, 0xf9, 0x28, 0xc2, 0x09, 0x44, 0xbc, 0x15, 0x09, 0x29, 0x7c, 0x11, 0x4c, 0x9b, 0xc4, 0xf3,
    +	0x70, 0x8f, 0x54, 0x4b, 0x5c, 0x71, 0x5e, 0x28, 0x4e, 0x6f, 0xf8, 0xcd, 0x28, 0x90, 0xab, 0xbf,
    +	0x53, 0xc0, 0x6c, 0x38, 0x73, 0xa7, 0x90, 0x2a, 0xad, 0x64, 0xaa, 0x3c, 0x7f, 0x64, 0x9c, 0xe4,
    +	0x64, 0xc8, 0x27, 0x93, 0x31, 0x9f, 0x59, 0x10, 0xc2, 0x1f, 0x83, 0xb2, 0x47, 0x0c, 0xa2, 0x51,
    +	0xdb, 0x15, 0x3e, 0xbf, 0x32, 0xa6, 0xcf, 0x78, 0x8f, 0x18, 0x1d, 0x61, 0xda, 0x3a, 0xc3, 0x9c,
    +	0x0e, 0xbe, 0x50, 0x08, 0x09, 0x7f, 0x08, 0xca, 0x94, 0x98, 0x8e, 0x81, 0x29, 0x11, 0x69, 0x92,
    +	0x88, 0x6f, 0x16, 0x2e, 0x0c, 0x6c, 0xdb, 0xee, 0xee, 0x08, 0x35, 0x9e, 0x28, 0xe1, 0x3c, 0x04,
    +	0xad, 0x28, 0x84, 0x81, 0xf7, 0xc1, 0x5c, 0xdf, 0xe9, 0x32, 0x4d, 0xca, 0xb6, 0xee, 0xde, 0xa1,
    +	0x08, 0x9f, 0xab, 0x47, 0x4e, 0xc8, 0x6e, 0xc2, 0xa4, 0xb5, 0x24, 0x3a, 0x98, 0x4b, 0xb6, 0xa3,
    +	0x14, 0x34, 0x5c, 0x05, 0xf3, 0xa6, 0x6e, 0x21, 0x82, 0xbb, 0x87, 0x1d, 0xa2, 0xd9, 0x56, 0xd7,
    +	0xe3, 0x01, 0x54, 0x6a, 0x2d, 0x0b, 0x80, 0xf9, 0x8d, 0xa4, 0x18, 0xa5, 0xf5, 0xe1, 0x3a, 0x58,
    +	0x0c, 0xf6, 0xd9, 0xef, 0xeb, 0x1e, 0xb5, 0xdd, 0xc3, 0x75, 0xdd, 0xd4, 0x69, 0x75, 0x8a, 0xe3,
    +	0x54, 0x87, 0x83, 0xfa, 0x22, 0x92, 0xc8, 0x91, 0xd4, 0x4a, 0xfd, 0x68, 0x0a, 0xcc, 0xa7, 0x76,
    +	0x03, 0x78, 0x07, 0x2c, 0x69, 0x7d, 0xd7, 0x25, 0x16, 0xdd, 0xec, 0x9b, 0x7b, 0xc4, 0xed, 0x68,
    +	0xfb, 0xa4, 0xdb, 0x37, 0x48, 0x97, 0xaf, 0x68, 0xa9, 0x55, 0x13, 0xbe, 0x2e, 0xb5, 0xa5, 0x5a,
    +	0x28, 0xc7, 0x1a, 0xfe, 0x00, 0x40, 0x8b, 0x37, 0x6d, 0xe8, 0x9e, 0x17, 0x62, 0x16, 0x38, 0x66,
    +	0x98, 0x80, 0x9b, 0x19, 0x0d, 0x24, 0xb1, 0x62, 0x3e, 0x76, 0x89, 0xa7, 0xbb, 0xa4, 0x9b, 0xf6,
    +	0x71, 0x32, 0xe9, 0xe3, 0x9a, 0x54, 0x0b, 0xe5, 0x58, 0xc3, 0x57, 0x41, 0xc5, 0xef, 0x8d, 0xcf,
    +	0xb9, 0x58, 0x9c, 0x05, 0x01, 0x56, 0xd9, 0x8c, 0x44, 0x28, 0xae, 0xc7, 0x86, 0x66, 0xef, 0x79,
    +	0xc4, 0x3d, 0x20, 0xdd, 0x77, 0x7c, 0x0e, 0xc0, 0x0a, 0x65, 0x89, 0x17, 0xca, 0x70, 0x68, 0x5b,
    +	0x19, 0x0d, 0x24, 0xb1, 0x62, 0x43, 0xf3, 0xa3, 0x26, 0x33, 0xb4, 0xa9, 0xe4, 0xd0, 0x76, 0xa5,
    +	0x5a, 0x28, 0xc7, 0x9a, 0xc5, 0x9e, 0xef, 0xf2, 0xea, 0x01, 0xd6, 0x0d, 0xbc, 0x67, 0x90, 0xea,
    +	0x74, 0x32, 0xf6, 0x36, 0x93, 0x62, 0x94, 0xd6, 0x87, 0xef, 0x80, 0x73, 0x7e, 0xd3, 0xae, 0x85,
    +	0x43, 0x90, 0x32, 0x07, 0x79, 0x4e, 0x80, 0x9c, 0xdb, 0x4c, 0x2b, 0xa0, 0xac, 0x0d, 0xbc, 0x05,
    +	0xe6, 0x34, 0xdb, 0x30, 0x78, 0x3c, 0xb6, 0xed, 0xbe, 0x45, 0xab, 0x33, 0x1c, 0x05, 0xb2, 0x1c,
    +	0x6a, 0x27, 0x24, 0x28, 0xa5, 0x09, 0xef, 0x02, 0xa0, 0x05, 0xe5, 0xc0, 0xab, 0x82, 0xfc, 0x42,
    +	0x9f, 0xad, 0x43, 0x51, 0x01, 0x0e, 0x9b, 0x3c, 0x14, 0x43, 0x53, 0x3f, 0x51, 0xc0, 0x72, 0x4e,
    +	0x8e, 0xc3, 0xb7, 0x12, 0x55, 0xef, 0x6a, 0xaa, 0xea, 0x5d, 0xc8, 0x31, 0x8b, 0x95, 0x3e, 0x0d,
    +	0xcc, 0x32, 0xde, 0xa1, 0x5b, 0x3d, 0x5f, 0x45, 0xec, 0x60, 0x2f, 0xc9, 0x7c, 0x47, 0x71, 0xc5,
    +	0x68, 0x1b, 0x3e, 0x37, 0x1c, 0xd4, 0x67, 0x13, 0x32, 0x94, 0xc4, 0x54, 0x7f, 0x51, 0x00, 0x60,
    +	0x8d, 0x38, 0x86, 0x7d, 0x68, 0x12, 0xeb, 0x34, 0x58, 0xcb, 0x5a, 0x82, 0xb5, 0xa8, 0xd2, 0x85,
    +	0x08, 0xfd, 0xc9, 0xa5, 0x2d, 0xeb, 0x29, 0xda, 0x72, 0x69, 0x04, 0xce, 0xd1, 0xbc, 0xe5, 0x6f,
    +	0x93, 0x60, 0x21, 0x52, 0x8e, 0x88, 0xcb, 0x1b, 0x89, 0x25, 0xbc, 0x92, 0x5a, 0xc2, 0x65, 0x89,
    +	0xc9, 0x53, 0x63, 0x2e, 0x1f, 0x80, 0x39, 0xc6, 0x2b, 0xfc, 0x55, 0xe3, 0xac, 0x65, 0xea, 0xd8,
    +	0xac, 0x25, 0xac, 0x3a, 0xeb, 0x09, 0x24, 0x94, 0x42, 0xce, 0x61, 0x49, 0xd3, 0x5f, 0x45, 0x96,
    +	0xf4, 0x7b, 0x05, 0xcc, 0x45, 0xcb, 0x74, 0x0a, 0x34, 0xa9, 0x9d, 0xa4, 0x49, 0xb5, 0xa3, 0xe3,
    +	0x32, 0x87, 0x27, 0xfd, 0xb5, 0x18, 0xf7, 0x9a, 0x13, 0xa5, 0x15, 0x76, 0xa0, 0x72, 0x0c, 0x5d,
    +	0xc3, 0x9e, 0x28, 0xab, 0x67, 0xfc, 0xc3, 0x94, 0xdf, 0x86, 0x42, 0x69, 0x82, 0x52, 0x15, 0x9e,
    +	0x2e, 0xa5, 0x9a, 0xfc, 0x62, 0x28, 0xd5, 0x0e, 0x28, 0x7b, 0x01, 0x99, 0x2a, 0x72, 0xc8, 0xcb,
    +	0xa3, 0xd2, 0x59, 0xf0, 0xa8, 0x10, 0x35, 0x64, 0x50, 0x21, 0x92, 0x8c, 0x3b, 0x95, 0xbe, 0x4c,
    +	0xee, 0xc4, 0xc2, 0xdb, 0xc1, 0x7d, 0x8f, 0x74, 0x79, 0x2a, 0x95, 0xa3, 0xf0, 0xde, 0xe6, 0xad,
    +	0x48, 0x48, 0xe1, 0x2e, 0x58, 0x76, 0x5c, 0xbb, 0xe7, 0x12, 0xcf, 0x5b, 0x23, 0xb8, 0x6b, 0xe8,
    +	0x16, 0x09, 0x06, 0xe0, 0x57, 0xbd, 0x0b, 0xc3, 0x41, 0x7d, 0x79, 0x5b, 0xae, 0x82, 0xf2, 0x6c,
    +	0xd5, 0x5f, 0x95, 0xc0, 0xd9, 0xf4, 0x8e, 0x98, 0x43, 0x44, 0x94, 0x13, 0x11, 0x91, 0x97, 0x63,
    +	0x21, 0xea, 0xb3, 0xb4, 0xd8, 0x99, 0x3f, 0x13, 0xa6, 0xab, 0x60, 0x5e, 0x10, 0x8f, 0x40, 0x28,
    +	0xa8, 0x58, 0xb8, 0x3c, 0xbb, 0x49, 0x31, 0x4a, 0xeb, 0xc3, 0x37, 0xc0, 0xac, 0xcb, 0xb9, 0x55,
    +	0x00, 0xe0, 0xf3, 0x93, 0x6f, 0x08, 0x80, 0x59, 0x14, 0x17, 0xa2, 0xa4, 0x2e, 0xe3, 0x26, 0x11,
    +	0xe5, 0x08, 0x00, 0x8a, 0x49, 0x6e, 0xb2, 0x9a, 0x56, 0x40, 0x59, 0x1b, 0xb8, 0x01, 0x16, 0xfa,
    +	0x56, 0x16, 0xca, 0x8f, 0xb5, 0x0b, 0x02, 0x6a, 0x61, 0x37, 0xab, 0x82, 0x64, 0x76, 0xf0, 0x36,
    +	0x58, 0xa0, 0xc4, 0x35, 0x75, 0x0b, 0x53, 0xdd, 0xea, 0x85, 0x70, 0xfe, 0xca, 0x2f, 0x33, 0xa8,
    +	0x9d, 0xac, 0x18, 0xc9, 0x6c, 0xe0, 0x8f, 0x12, 0xcc, 0x67, 0x8a, 0x6f, 0x48, 0x57, 0x8e, 0xce,
    +	0xac, 0xb1, 0xa9, 0x8f, 0x84, 0x92, 0x95, 0xc7, 0xa5, 0x64, 0xea, 0xc7, 0x0a, 0x80, 0xd9, 0x6c,
    +	0x1e, 0x79, 0x4f, 0x90, 0xb1, 0x88, 0x55, 0xdb, 0xae, 0x9c, 0x2c, 0x5d, 0x1d, 0x4d, 0x96, 0xa2,
    +	0xcd, 0x78, 0x3c, 0xb6, 0x24, 0xa6, 0xf7, 0x74, 0xee, 0x78, 0xc6, 0x60, 0x4b, 0x91, 0x3f, 0x4f,
    +	0xc6, 0x96, 0x62, 0x38, 0x47, 0xb3, 0xa5, 0x7f, 0x16, 0xc0, 0x42, 0xa4, 0x3c, 0x36, 0x5b, 0x92,
    +	0x98, 0x3c, 0xbb, 0xe7, 0x19, 0x8f, 0xc1, 0x44, 0x53, 0xf7, 0x7f, 0xc2, 0x60, 0x22, 0x87, 0x72,
    +	0x18, 0xcc, 0x6f, 0x0b, 0x71, 0xaf, 0x8f, 0xc9, 0x60, 0xbe, 0x80, 0x5b, 0x8f, 0xaf, 0x1c, 0x09,
    +	0x52, 0x3f, 0x2a, 0x82, 0xb3, 0xe9, 0x14, 0x4c, 0x94, 0x54, 0x65, 0x64, 0x49, 0xdd, 0x06, 0x8b,
    +	0xf7, 0xfa, 0x86, 0x71, 0xc8, 0xc7, 0x10, 0xab, 0xab, 0x7e, 0x31, 0xfe, 0xa6, 0xb0, 0x5c, 0xfc,
    +	0x9e, 0x44, 0x07, 0x49, 0x2d, 0xb3, 0x15, 0xb6, 0xf8, 0xa4, 0x15, 0xb6, 0x74, 0x82, 0x0a, 0x9b,
    +	0x53, 0x12, 0xa7, 0x4f, 0x50, 0x12, 0xe5, 0x7c, 0x67, 0xf2, 0x44, 0x7c, 0x67, 0xec, 0xf2, 0x2a,
    +	0xd9, 0xf9, 0x46, 0xde, 0x2c, 0x0c, 0x15, 0xb0, 0x24, 0x3f, 0xd4, 0x43, 0x03, 0xcc, 0x99, 0xf8,
    +	0x41, 0xfc, 0x4a, 0x65, 0x54, 0xed, 0xe9, 0x53, 0xdd, 0x68, 0xf8, 0x6f, 0x4e, 0x8d, 0xdb, 0x16,
    +	0xdd, 0x72, 0x3b, 0xd4, 0xd5, 0xad, 0x9e, 0x5f, 0xab, 0x37, 0x12, 0x58, 0x28, 0x85, 0x0d, 0xef,
    +	0x82, 0xb2, 0x89, 0x1f, 0x74, 0xfa, 0x6e, 0x2f, 0xa8, 0xa9, 0xc7, 0xef, 0x87, 0xa7, 0xd1, 0x86,
    +	0x40, 0x41, 0x21, 0x9e, 0xfa, 0xb9, 0x02, 0x96, 0x73, 0x8a, 0xf1, 0xd7, 0x68, 0x94, 0x7f, 0x54,
    +	0xc0, 0xc5, 0xc4, 0x28, 0x59, 0x72, 0x93, 0x7b, 0x7d, 0x83, 0xe7, 0xb9, 0xe0, 0x3e, 0x57, 0xc1,
    +	0x8c, 0x83, 0x5d, 0xaa, 0x87, 0xfc, 0xbb, 0xd4, 0x9a, 0x1d, 0x0e, 0xea, 0x33, 0xdb, 0x41, 0x23,
    +	0x8a, 0xe4, 0x92, 0xb9, 0x29, 0x3c, 0xbd, 0xb9, 0x51, 0x7f, 0x59, 0x00, 0x95, 0x98, 0xcb, 0xa7,
    +	0xc0, 0x7a, 0xde, 0x4e, 0xb0, 0x1e, 0xe9, 0x93, 0x54, 0x7c, 0x0e, 0xf3, 0x68, 0xcf, 0x46, 0x8a,
    +	0xf6, 0x7c, 0x7b, 0x14, 0xd0, 0xd1, 0xbc, 0xe7, 0x5f, 0x05, 0xb0, 0x18, 0xd3, 0x8e, 0x88, 0xcf,
    +	0x77, 0x13, 0xc4, 0x67, 0x25, 0x45, 0x7c, 0xaa, 0x32, 0x9b, 0x67, 0xcc, 0x67, 0x34, 0xf3, 0xf9,
    +	0x83, 0x02, 0xe6, 0x63, 0x73, 0x77, 0x0a, 0xd4, 0x67, 0x2d, 0x49, 0x7d, 0xea, 0x23, 0xe2, 0x25,
    +	0x87, 0xfb, 0xdc, 0x02, 0x0b, 0x31, 0xa5, 0x2d, 0xb7, 0xab, 0x5b, 0xd8, 0xf0, 0xe0, 0x0b, 0xa0,
    +	0xe4, 0x51, 0xec, 0xd2, 0x20, 0xbb, 0x03, 0xdb, 0x0e, 0x6b, 0x44, 0xbe, 0x4c, 0xfd, 0xb7, 0x02,
    +	0x9a, 0x31, 0xe3, 0x6d, 0xe2, 0x7a, 0xba, 0x47, 0x89, 0x45, 0xef, 0xd8, 0x46, 0xdf, 0x24, 0x6d,
    +	0x03, 0xeb, 0x26, 0x22, 0xac, 0x41, 0xb7, 0xad, 0x6d, 0xdb, 0xd0, 0xb5, 0x43, 0x88, 0x41, 0xe5,
    +	0xc3, 0x7d, 0x62, 0xad, 0x11, 0x83, 0x50, 0xf1, 0xe8, 0x32, 0xd3, 0x7a, 0x2b, 0x78, 0x83, 0x78,
    +	0x2f, 0x12, 0x3d, 0x1e, 0xd4, 0x57, 0xc6, 0x41, 0xe4, 0xc1, 0x19, 0xc7, 0x84, 0x3f, 0x01, 0x80,
    +	0x7d, 0x76, 0x34, 0x1c, 0x3c, 0xc1, 0xcc, 0xb4, 0xde, 0x0c, 0x52, 0xf8, 0xbd, 0x50, 0x72, 0xac,
    +	0x0e, 0x62, 0x88, 0xea, 0x6f, 0xca, 0x89, 0xa5, 0xfe, 0xda, 0xdf, 0x78, 0xfd, 0x0c, 0x2c, 0x1e,
    +	0x44, 0xb3, 0x13, 0x28, 0x30, 0x7a, 0xc5, 0xe2, 0xee, 0x45, 0x29, 0xbc, 0x6c, 0x5e, 0x23, 0x52,
    +	0x77, 0x47, 0x02, 0x87, 0xa4, 0x9d, 0xc0, 0x57, 0x41, 0x85, 0x71, 0x19, 0x5d, 0x23, 0x9b, 0xd8,
    +	0x0c, 0xd2, 0x30, 0x7c, 0xb3, 0xea, 0x44, 0x22, 0x14, 0xd7, 0x83, 0xfb, 0x60, 0xc1, 0xb1, 0xbb,
    +	0x1b, 0xd8, 0xc2, 0x3d, 0xc2, 0x2a, 0xb4, 0xbf, 0x94, 0xfc, 0x2e, 0x6c, 0xa6, 0xf5, 0x5a, 0x70,
    +	0xcf, 0xb1, 0x9d, 0x55, 0x61, 0x87, 0x3f, 0x49, 0x33, 0x0f, 0x02, 0x19, 0x24, 0x34, 0x33, 0x4f,
    +	0xac, 0xd3, 0x99, 0xff, 0xa5, 0xc8, 0xf2, 0xf1, 0x84, 0x8f, 0xac, 0x79, 0xb7, 0x7c, 0xe5, 0x13,
    +	0xdd, 0xf2, 0x49, 0x0e, 0x2f, 0x33, 0xc7, 0x3c, 0xbc, 0xfc, 0x49, 0x01, 0x97, 0x9c, 0x31, 0xd2,
    +	0xa8, 0x0a, 0xf8, 0xb4, 0xb4, 0x47, 0x4c, 0xcb, 0x38, 0x19, 0xd9, 0x5a, 0x19, 0x0e, 0xea, 0x97,
    +	0xc6, 0xd1, 0x44, 0x63, 0xb9, 0xc6, 0x92, 0xc6, 0x16, 0x3b, 0x5f, 0xb5, 0xc2, 0xdd, 0xbc, 0x32,
    +	0xc2, 0xcd, 0x60, 0xa3, 0xf4, 0xf3, 0x30, 0xf8, 0x42, 0x21, 0x8c, 0xfa, 0x71, 0x09, 0x9c, 0xcb,
    +	0x54, 0xeb, 0x2f, 0xf1, 0x06, 0x33, 0x73, 0x38, 0x9a, 0x3c, 0xc6, 0xe1, 0x68, 0x15, 0xcc, 0x8b,
    +	0x67, 0xef, 0xd4, 0xd9, 0x2a, 0x0c, 0x93, 0x76, 0x52, 0x8c, 0xd2, 0xfa, 0xb2, 0x1b, 0xd4, 0xd2,
    +	0x31, 0x6f, 0x50, 0xe3, 0x5e, 0x88, 0x7f, 0x6b, 0xf9, 0xf9, 0x9c, 0xf5, 0x42, 0xfc, 0x69, 0x2b,
    +	0xad, 0x0f, 0xdf, 0x0c, 0x92, 0x35, 0x44, 0x98, 0xe6, 0x08, 0xa9, 0xec, 0x0b, 0x01, 0x52, 0xda,
    +	0x4f, 0xf4, 0xb4, 0xfb, 0xbe, 0xe4, 0x69, 0x77, 0x65, 0x44, 0x98, 0x8d, 0x7f, 0xc3, 0x29, 0x3d,
    +	0xbf, 0x56, 0x8e, 0x7f, 0x7e, 0x55, 0xff, 0xa2, 0x80, 0xe7, 0x72, 0xb7, 0x29, 0xb8, 0x9a, 0x60,
    +	0x8f, 0xd7, 0x52, 0xec, 0xf1, 0xf9, 0x5c, 0xc3, 0x18, 0x85, 0x34, 0xe5, 0x97, 0x9f, 0x37, 0x47,
    +	0x5e, 0x7e, 0x4a, 0x4e, 0x22, 0xa3, 0x6f, 0x41, 0x5b, 0xaf, 0x3f, 0x7c, 0x54, 0x9b, 0xf8, 0xf4,
    +	0x51, 0x6d, 0xe2, 0xb3, 0x47, 0xb5, 0x89, 0x9f, 0x0f, 0x6b, 0xca, 0xc3, 0x61, 0x4d, 0xf9, 0x74,
    +	0x58, 0x53, 0x3e, 0x1b, 0xd6, 0x94, 0xbf, 0x0f, 0x6b, 0xca, 0xaf, 0x3f, 0xaf, 0x4d, 0xdc, 0x85,
    +	0xd9, 0xff, 0x8a, 0xfe, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x5f, 0x0a, 0xea, 0xf9, 0x40, 0x2a, 0x00,
    +	0x00,
     }
     
     func (m *ControllerRevision) Marshal() (dAtA []byte, err error) {
    @@ -1748,6 +1750,11 @@ func (m *DeploymentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	_ = i
     	var l int
     	_ = l
    +	if m.TerminatingReplicas != nil {
    +		i = encodeVarintGenerated(dAtA, i, uint64(*m.TerminatingReplicas))
    +		i--
    +		dAtA[i] = 0x48
    +	}
     	if m.CollisionCount != nil {
     		i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount))
     		i--
    @@ -2054,6 +2061,11 @@ func (m *ReplicaSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	_ = i
     	var l int
     	_ = l
    +	if m.TerminatingReplicas != nil {
    +		i = encodeVarintGenerated(dAtA, i, uint64(*m.TerminatingReplicas))
    +		i--
    +		dAtA[i] = 0x38
    +	}
     	if len(m.Conditions) > 0 {
     		for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
     			{
    @@ -2915,6 +2927,9 @@ func (m *DeploymentStatus) Size() (n int) {
     	if m.CollisionCount != nil {
     		n += 1 + sovGenerated(uint64(*m.CollisionCount))
     	}
    +	if m.TerminatingReplicas != nil {
    +		n += 1 + sovGenerated(uint64(*m.TerminatingReplicas))
    +	}
     	return n
     }
     
    @@ -3020,6 +3035,9 @@ func (m *ReplicaSetStatus) Size() (n int) {
     			n += 1 + l + sovGenerated(uint64(l))
     		}
     	}
    +	if m.TerminatingReplicas != nil {
    +		n += 1 + sovGenerated(uint64(*m.TerminatingReplicas))
    +	}
     	return n
     }
     
    @@ -3435,6 +3453,7 @@ func (this *DeploymentStatus) String() string {
     		`Conditions:` + repeatedStringForConditions + `,`,
     		`ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`,
     		`CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`,
    +		`TerminatingReplicas:` + valueToStringGenerated(this.TerminatingReplicas) + `,`,
     		`}`,
     	}, "")
     	return s
    @@ -3521,6 +3540,7 @@ func (this *ReplicaSetStatus) String() string {
     		`ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`,
     		`AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`,
     		`Conditions:` + repeatedStringForConditions + `,`,
    +		`TerminatingReplicas:` + valueToStringGenerated(this.TerminatingReplicas) + `,`,
     		`}`,
     	}, "")
     	return s
    @@ -5941,6 +5961,26 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error {
     				}
     			}
     			m.CollisionCount = &v
    +		case 9:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field TerminatingReplicas", wireType)
    +			}
    +			var v int32
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				v |= int32(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			m.TerminatingReplicas = &v
     		default:
     			iNdEx = preIndex
     			skippy, err := skipGenerated(dAtA[iNdEx:])
    @@ -6873,6 +6913,26 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error {
     				return err
     			}
     			iNdEx = postIndex
    +		case 7:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field TerminatingReplicas", wireType)
    +			}
    +			var v int32
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				v |= int32(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			m.TerminatingReplicas = &v
     		default:
     			iNdEx = preIndex
     			skippy, err := skipGenerated(dAtA[iNdEx:])
    diff --git a/vendor/k8s.io/api/apps/v1/generated.proto b/vendor/k8s.io/api/apps/v1/generated.proto
    index 388e638f4..5885a6222 100644
    --- a/vendor/k8s.io/api/apps/v1/generated.proto
    +++ b/vendor/k8s.io/api/apps/v1/generated.proto
    @@ -318,19 +318,19 @@ message DeploymentStatus {
       // +optional
       optional int64 observedGeneration = 1;
     
    -  // Total number of non-terminated pods targeted by this deployment (their labels match the selector).
    +  // Total number of non-terminating pods targeted by this deployment (their labels match the selector).
       // +optional
       optional int32 replicas = 2;
     
    -  // Total number of non-terminated pods targeted by this deployment that have the desired template spec.
    +  // Total number of non-terminating pods targeted by this deployment that have the desired template spec.
       // +optional
       optional int32 updatedReplicas = 3;
     
    -  // readyReplicas is the number of pods targeted by this Deployment with a Ready Condition.
    +  // Total number of non-terminating pods targeted by this Deployment with a Ready Condition.
       // +optional
       optional int32 readyReplicas = 7;
     
    -  // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.
    +  // Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment.
       // +optional
       optional int32 availableReplicas = 4;
     
    @@ -340,6 +340,13 @@ message DeploymentStatus {
       // +optional
       optional int32 unavailableReplicas = 5;
     
    +  // Total number of terminating pods targeted by this deployment. Terminating pods have a non-null
    +  // .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.
    +  //
    +  // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
    +  // +optional
    +  optional int32 terminatingReplicas = 9;
    +
       // Represents the latest available observations of a deployment's current state.
       // +patchMergeKey=type
       // +patchStrategy=merge
    @@ -421,16 +428,16 @@ message ReplicaSetList {
       optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // List of ReplicaSets.
    -  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
    +  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset
       repeated ReplicaSet items = 2;
     }
     
     // ReplicaSetSpec is the specification of a ReplicaSet.
     message ReplicaSetSpec {
    -  // Replicas is the number of desired replicas.
    +  // Replicas is the number of desired pods.
       // This is a pointer to distinguish between explicit zero and unspecified.
       // Defaults to 1.
    -  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
    +  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset
       // +optional
       optional int32 replicas = 1;
     
    @@ -448,29 +455,36 @@ message ReplicaSetSpec {
     
       // Template is the object that describes the pod that will be created if
       // insufficient replicas are detected.
    -  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
    +  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/#pod-template
       // +optional
       optional .k8s.io.api.core.v1.PodTemplateSpec template = 3;
     }
     
     // ReplicaSetStatus represents the current status of a ReplicaSet.
     message ReplicaSetStatus {
    -  // Replicas is the most recently observed number of replicas.
    -  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
    +  // Replicas is the most recently observed number of non-terminating pods.
    +  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset
       optional int32 replicas = 1;
     
    -  // The number of pods that have labels matching the labels of the pod template of the replicaset.
    +  // The number of non-terminating pods that have labels matching the labels of the pod template of the replicaset.
       // +optional
       optional int32 fullyLabeledReplicas = 2;
     
    -  // readyReplicas is the number of pods targeted by this ReplicaSet with a Ready Condition.
    +  // The number of non-terminating pods targeted by this ReplicaSet with a Ready Condition.
       // +optional
       optional int32 readyReplicas = 4;
     
    -  // The number of available replicas (ready for at least minReadySeconds) for this replica set.
    +  // The number of available non-terminating pods (ready for at least minReadySeconds) for this replica set.
       // +optional
       optional int32 availableReplicas = 5;
     
    +  // The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp
    +  // and have not yet reached the Failed or Succeeded .status.phase.
    +  //
    +  // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
    +  // +optional
    +  optional int32 terminatingReplicas = 7;
    +
       // ObservedGeneration reflects the generation of the most recently observed ReplicaSet.
       // +optional
       optional int64 observedGeneration = 3;
    @@ -516,7 +530,7 @@ message RollingUpdateDaemonSet {
       // pod is available (Ready for at least minReadySeconds) the old DaemonSet pod
       // on that node is marked deleted. If the old pod becomes unavailable for any
       // reason (Ready transitions to false, is evicted, or is drained) an updated
    -  // pod is immediatedly created on that node without considering surge limits.
    +  // pod is immediately created on that node without considering surge limits.
       // Allowing surge implies the possibility that the resources consumed by the
       // daemonset on any given node can double if the readiness check fails, and
       // so resource intensive daemonsets should take into account that they may
    @@ -702,6 +716,7 @@ message StatefulSetSpec {
       // the network identity of the set. Pods get DNS/hostnames that follow the
       // pattern: pod-specific-string.serviceName.default.svc.cluster.local
       // where "pod-specific-string" is managed by the StatefulSet controller.
    +  // +optional
       optional string serviceName = 5;
     
       // podManagementPolicy controls how pods are created during initial scale up,
    diff --git a/vendor/k8s.io/api/apps/v1/types.go b/vendor/k8s.io/api/apps/v1/types.go
    index a68690b44..4cf54cc99 100644
    --- a/vendor/k8s.io/api/apps/v1/types.go
    +++ b/vendor/k8s.io/api/apps/v1/types.go
    @@ -220,6 +220,7 @@ type StatefulSetSpec struct {
     	// the network identity of the set. Pods get DNS/hostnames that follow the
     	// pattern: pod-specific-string.serviceName.default.svc.cluster.local
     	// where "pod-specific-string" is managed by the StatefulSet controller.
    +	// +optional
     	ServiceName string `json:"serviceName" protobuf:"bytes,5,opt,name=serviceName"`
     
     	// podManagementPolicy controls how pods are created during initial scale up,
    @@ -486,19 +487,19 @@ type DeploymentStatus struct {
     	// +optional
     	ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"`
     
    -	// Total number of non-terminated pods targeted by this deployment (their labels match the selector).
    +	// Total number of non-terminating pods targeted by this deployment (their labels match the selector).
     	// +optional
     	Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"`
     
    -	// Total number of non-terminated pods targeted by this deployment that have the desired template spec.
    +	// Total number of non-terminating pods targeted by this deployment that have the desired template spec.
     	// +optional
     	UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"`
     
    -	// readyReplicas is the number of pods targeted by this Deployment with a Ready Condition.
    +	// Total number of non-terminating pods targeted by this Deployment with a Ready Condition.
     	// +optional
     	ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"`
     
    -	// Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.
    +	// Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment.
     	// +optional
     	AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"`
     
    @@ -508,6 +509,13 @@ type DeploymentStatus struct {
     	// +optional
     	UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"`
     
    +	// Total number of terminating pods targeted by this deployment. Terminating pods have a non-null
    +	// .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.
    +	//
    +	// This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
    +	// +optional
    +	TerminatingReplicas *int32 `json:"terminatingReplicas,omitempty" protobuf:"varint,9,opt,name=terminatingReplicas"`
    +
     	// Represents the latest available observations of a deployment's current state.
     	// +patchMergeKey=type
     	// +patchStrategy=merge
    @@ -627,7 +635,7 @@ type RollingUpdateDaemonSet struct {
     	// pod is available (Ready for at least minReadySeconds) the old DaemonSet pod
     	// on that node is marked deleted. If the old pod becomes unavailable for any
     	// reason (Ready transitions to false, is evicted, or is drained) an updated
    -	// pod is immediatedly created on that node without considering surge limits.
    +	// pod is immediately created on that node without considering surge limits.
     	// Allowing surge implies the possibility that the resources consumed by the
     	// daemonset on any given node can double if the readiness check fails, and
     	// so resource intensive daemonsets should take into account that they may
    @@ -839,16 +847,16 @@ type ReplicaSetList struct {
     	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
     
     	// List of ReplicaSets.
    -	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
    +	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset
     	Items []ReplicaSet `json:"items" protobuf:"bytes,2,rep,name=items"`
     }
     
     // ReplicaSetSpec is the specification of a ReplicaSet.
     type ReplicaSetSpec struct {
    -	// Replicas is the number of desired replicas.
    +	// Replicas is the number of desired pods.
     	// This is a pointer to distinguish between explicit zero and unspecified.
     	// Defaults to 1.
    -	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
    +	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset
     	// +optional
     	Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
     
    @@ -866,29 +874,36 @@ type ReplicaSetSpec struct {
     
     	// Template is the object that describes the pod that will be created if
     	// insufficient replicas are detected.
    -	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
    +	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/#pod-template
     	// +optional
     	Template v1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"`
     }
     
     // ReplicaSetStatus represents the current status of a ReplicaSet.
     type ReplicaSetStatus struct {
    -	// Replicas is the most recently observed number of replicas.
    -	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
    +	// Replicas is the most recently observed number of non-terminating pods.
    +	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset
     	Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"`
     
    -	// The number of pods that have labels matching the labels of the pod template of the replicaset.
    +	// The number of non-terminating pods that have labels matching the labels of the pod template of the replicaset.
     	// +optional
     	FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"`
     
    -	// readyReplicas is the number of pods targeted by this ReplicaSet with a Ready Condition.
    +	// The number of non-terminating pods targeted by this ReplicaSet with a Ready Condition.
     	// +optional
     	ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"`
     
    -	// The number of available replicas (ready for at least minReadySeconds) for this replica set.
    +	// The number of available non-terminating pods (ready for at least minReadySeconds) for this replica set.
     	// +optional
     	AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"`
     
    +	// The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp
    +	// and have not yet reached the Failed or Succeeded .status.phase.
    +	//
    +	// This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
    +	// +optional
    +	TerminatingReplicas *int32 `json:"terminatingReplicas,omitempty" protobuf:"varint,7,opt,name=terminatingReplicas"`
    +
     	// ObservedGeneration reflects the generation of the most recently observed ReplicaSet.
     	// +optional
     	ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"`
    diff --git a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go
    index 341ecdadb..ac54033fd 100644
    --- a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go
    +++ b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go
    @@ -177,11 +177,12 @@ func (DeploymentSpec) SwaggerDoc() map[string]string {
     var map_DeploymentStatus = map[string]string{
     	"":                    "DeploymentStatus is the most recently observed status of the Deployment.",
     	"observedGeneration":  "The generation observed by the deployment controller.",
    -	"replicas":            "Total number of non-terminated pods targeted by this deployment (their labels match the selector).",
    -	"updatedReplicas":     "Total number of non-terminated pods targeted by this deployment that have the desired template spec.",
    -	"readyReplicas":       "readyReplicas is the number of pods targeted by this Deployment with a Ready Condition.",
    -	"availableReplicas":   "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.",
    +	"replicas":            "Total number of non-terminating pods targeted by this deployment (their labels match the selector).",
    +	"updatedReplicas":     "Total number of non-terminating pods targeted by this deployment that have the desired template spec.",
    +	"readyReplicas":       "Total number of non-terminating pods targeted by this Deployment with a Ready Condition.",
    +	"availableReplicas":   "Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment.",
     	"unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.",
    +	"terminatingReplicas": "Total number of terminating pods targeted by this deployment. Terminating pods have a non-null .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.\n\nThis is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.",
     	"conditions":          "Represents the latest available observations of a deployment's current state.",
     	"collisionCount":      "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.",
     }
    @@ -227,7 +228,7 @@ func (ReplicaSetCondition) SwaggerDoc() map[string]string {
     var map_ReplicaSetList = map[string]string{
     	"":         "ReplicaSetList is a collection of ReplicaSets.",
     	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
    -	"items":    "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller",
    +	"items":    "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset",
     }
     
     func (ReplicaSetList) SwaggerDoc() map[string]string {
    @@ -236,10 +237,10 @@ func (ReplicaSetList) SwaggerDoc() map[string]string {
     
     var map_ReplicaSetSpec = map[string]string{
     	"":                "ReplicaSetSpec is the specification of a ReplicaSet.",
    -	"replicas":        "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller",
    +	"replicas":        "Replicas is the number of desired pods. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset",
     	"minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)",
     	"selector":        "Selector is a label query over pods that should match the replica count. Label keys and values that must match in order to be controlled by this replica set. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
    -	"template":        "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template",
    +	"template":        "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/#pod-template",
     }
     
     func (ReplicaSetSpec) SwaggerDoc() map[string]string {
    @@ -248,10 +249,11 @@ func (ReplicaSetSpec) SwaggerDoc() map[string]string {
     
     var map_ReplicaSetStatus = map[string]string{
     	"":                     "ReplicaSetStatus represents the current status of a ReplicaSet.",
    -	"replicas":             "Replicas is the most recently observed number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller",
    -	"fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replicaset.",
    -	"readyReplicas":        "readyReplicas is the number of pods targeted by this ReplicaSet with a Ready Condition.",
    -	"availableReplicas":    "The number of available replicas (ready for at least minReadySeconds) for this replica set.",
    +	"replicas":             "Replicas is the most recently observed number of non-terminating pods. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset",
    +	"fullyLabeledReplicas": "The number of non-terminating pods that have labels matching the labels of the pod template of the replicaset.",
    +	"readyReplicas":        "The number of non-terminating pods targeted by this ReplicaSet with a Ready Condition.",
    +	"availableReplicas":    "The number of available non-terminating pods (ready for at least minReadySeconds) for this replica set.",
    +	"terminatingReplicas":  "The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.\n\nThis is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.",
     	"observedGeneration":   "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.",
     	"conditions":           "Represents the latest available observations of a replica set's current state.",
     }
    @@ -263,7 +265,7 @@ func (ReplicaSetStatus) SwaggerDoc() map[string]string {
     var map_RollingUpdateDaemonSet = map[string]string{
     	"":               "Spec to control the desired behavior of daemon set rolling update.",
     	"maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.",
    -	"maxSurge":       "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption.",
    +	"maxSurge":       "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediately created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption.",
     }
     
     func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string {
    diff --git a/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go
    index 6912986ac..9e67658ba 100644
    --- a/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go
    +++ b/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go
    @@ -363,6 +363,11 @@ func (in *DeploymentSpec) DeepCopy() *DeploymentSpec {
     // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
     func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) {
     	*out = *in
    +	if in.TerminatingReplicas != nil {
    +		in, out := &in.TerminatingReplicas, &out.TerminatingReplicas
    +		*out = new(int32)
    +		**out = **in
    +	}
     	if in.Conditions != nil {
     		in, out := &in.Conditions, &out.Conditions
     		*out = make([]DeploymentCondition, len(*in))
    @@ -517,6 +522,11 @@ func (in *ReplicaSetSpec) DeepCopy() *ReplicaSetSpec {
     // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
     func (in *ReplicaSetStatus) DeepCopyInto(out *ReplicaSetStatus) {
     	*out = *in
    +	if in.TerminatingReplicas != nil {
    +		in, out := &in.TerminatingReplicas, &out.TerminatingReplicas
    +		*out = new(int32)
    +		**out = **in
    +	}
     	if in.Conditions != nil {
     		in, out := &in.Conditions, &out.Conditions
     		*out = make([]ReplicaSetCondition, len(*in))
    diff --git a/vendor/k8s.io/api/apps/v1beta1/doc.go b/vendor/k8s.io/api/apps/v1beta1/doc.go
    index 38a358551..7770fab5d 100644
    --- a/vendor/k8s.io/api/apps/v1beta1/doc.go
    +++ b/vendor/k8s.io/api/apps/v1beta1/doc.go
    @@ -19,4 +19,4 @@ limitations under the License.
     // +k8s:openapi-gen=true
     // +k8s:prerelease-lifecycle-gen=true
     
    -package v1beta1 // import "k8s.io/api/apps/v1beta1"
    +package v1beta1
    diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go
    index 76e755b4a..ae84aaf48 100644
    --- a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go
    +++ b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go
    @@ -728,134 +728,135 @@ func init() {
     }
     
     var fileDescriptor_2747f709ac7c95e7 = []byte{
    -	// 2018 bytes of a gzipped FileDescriptorProto
    -	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x59, 0xcd, 0x6f, 0x1b, 0xc7,
    -	0x15, 0xf7, 0x52, 0xa2, 0x44, 0x3d, 0x45, 0x94, 0x3d, 0x52, 0x2d, 0x46, 0x69, 0x25, 0x61, 0x63,
    -	0xc4, 0x4a, 0x62, 0x2f, 0x63, 0x25, 0x0d, 0x12, 0xbb, 0x75, 0x21, 0x4a, 0x6e, 0xec, 0x40, 0x8a,
    -	0x94, 0x91, 0x64, 0xa3, 0xe9, 0x07, 0x32, 0x22, 0xc7, 0xd4, 0x46, 0xfb, 0x85, 0xdd, 0x21, 0x63,
    -	0xa2, 0x97, 0xfe, 0x01, 0x05, 0xd2, 0x73, 0xff, 0x8a, 0xf6, 0xd4, 0xa2, 0x45, 0x2f, 0x3d, 0x14,
    -	0x3e, 0x06, 0xbd, 0x34, 0x27, 0xa2, 0x66, 0xae, 0xed, 0xad, 0xbd, 0x18, 0x28, 0x50, 0xcc, 0xec,
    -	0xec, 0xf7, 0xae, 0xb4, 0x2c, 0x60, 0x01, 0xcd, 0x8d, 0x3b, 0xef, 0xbd, 0xdf, 0x7b, 0xf3, 0xe6,
    -	0xbd, 0x37, 0xef, 0x0d, 0xe1, 0xfa, 0xe9, 0x7b, 0x9e, 0xa6, 0xdb, 0x4d, 0xe2, 0xe8, 0x4d, 0xe2,
    -	0x38, 0x5e, 0xb3, 0x7f, 0xeb, 0x98, 0x32, 0x72, 0xab, 0xd9, 0xa5, 0x16, 0x75, 0x09, 0xa3, 0x1d,
    -	0xcd, 0x71, 0x6d, 0x66, 0xa3, 0x25, 0x9f, 0x51, 0x23, 0x8e, 0xae, 0x71, 0x46, 0x4d, 0x32, 0x2e,
    -	0xdf, 0xec, 0xea, 0xec, 0xa4, 0x77, 0xac, 0xb5, 0x6d, 0xb3, 0xd9, 0xb5, 0xbb, 0x76, 0x53, 0xf0,
    -	0x1f, 0xf7, 0x1e, 0x8b, 0x2f, 0xf1, 0x21, 0x7e, 0xf9, 0x38, 0xcb, 0x6a, 0x4c, 0x61, 0xdb, 0x76,
    -	0x69, 0xb3, 0x9f, 0xd1, 0xb5, 0xfc, 0x4e, 0xc4, 0x63, 0x92, 0xf6, 0x89, 0x6e, 0x51, 0x77, 0xd0,
    -	0x74, 0x4e, 0xbb, 0x7c, 0xc1, 0x6b, 0x9a, 0x94, 0x91, 0x3c, 0xa9, 0x66, 0x91, 0x94, 0xdb, 0xb3,
    -	0x98, 0x6e, 0xd2, 0x8c, 0xc0, 0xbb, 0xe7, 0x09, 0x78, 0xed, 0x13, 0x6a, 0x92, 0x8c, 0xdc, 0xdb,
    -	0x45, 0x72, 0x3d, 0xa6, 0x1b, 0x4d, 0xdd, 0x62, 0x1e, 0x73, 0xd3, 0x42, 0xea, 0xbf, 0x15, 0x40,
    -	0x5b, 0xb6, 0xc5, 0x5c, 0xdb, 0x30, 0xa8, 0x8b, 0x69, 0x5f, 0xf7, 0x74, 0xdb, 0x42, 0x9f, 0x42,
    -	0x8d, 0xef, 0xa7, 0x43, 0x18, 0x69, 0x28, 0x6b, 0xca, 0xfa, 0xec, 0xc6, 0x5b, 0x5a, 0xe4, 0xe9,
    -	0x10, 0x5e, 0x73, 0x4e, 0xbb, 0x7c, 0xc1, 0xd3, 0x38, 0xb7, 0xd6, 0xbf, 0xa5, 0xed, 0x1d, 0x7f,
    -	0x46, 0xdb, 0x6c, 0x97, 0x32, 0xd2, 0x42, 0x4f, 0x87, 0xab, 0x97, 0x46, 0xc3, 0x55, 0x88, 0xd6,
    -	0x70, 0x88, 0x8a, 0xf6, 0x60, 0x52, 0xa0, 0x57, 0x04, 0xfa, 0xcd, 0x42, 0x74, 0xb9, 0x69, 0x0d,
    -	0x93, 0xcf, 0xef, 0x3d, 0x61, 0xd4, 0xe2, 0xe6, 0xb5, 0x5e, 0x92, 0xd0, 0x93, 0xdb, 0x84, 0x11,
    -	0x2c, 0x80, 0xd0, 0x0d, 0xa8, 0xb9, 0xd2, 0xfc, 0xc6, 0xc4, 0x9a, 0xb2, 0x3e, 0xd1, 0xba, 0x2c,
    -	0xb9, 0x6a, 0xc1, 0xb6, 0x70, 0xc8, 0xa1, 0x3e, 0x55, 0xe0, 0x6a, 0x76, 0xdf, 0x3b, 0xba, 0xc7,
    -	0xd0, 0x4f, 0x32, 0x7b, 0xd7, 0xca, 0xed, 0x9d, 0x4b, 0x8b, 0x9d, 0x87, 0x8a, 0x83, 0x95, 0xd8,
    -	0xbe, 0xf7, 0xa1, 0xaa, 0x33, 0x6a, 0x7a, 0x8d, 0xca, 0xda, 0xc4, 0xfa, 0xec, 0xc6, 0x9b, 0x5a,
    -	0x41, 0x00, 0x6b, 0x59, 0xeb, 0x5a, 0x73, 0x12, 0xb7, 0xfa, 0x80, 0x23, 0x60, 0x1f, 0x48, 0xfd,
    -	0x65, 0x05, 0x60, 0x9b, 0x3a, 0x86, 0x3d, 0x30, 0xa9, 0xc5, 0x2e, 0xe0, 0xe8, 0x1e, 0xc0, 0xa4,
    -	0xe7, 0xd0, 0xb6, 0x3c, 0xba, 0xeb, 0x85, 0x3b, 0x88, 0x8c, 0x3a, 0x70, 0x68, 0x3b, 0x3a, 0x34,
    -	0xfe, 0x85, 0x05, 0x04, 0xfa, 0x18, 0xa6, 0x3c, 0x46, 0x58, 0xcf, 0x13, 0x47, 0x36, 0xbb, 0xf1,
    -	0x7a, 0x19, 0x30, 0x21, 0xd0, 0xaa, 0x4b, 0xb8, 0x29, 0xff, 0x1b, 0x4b, 0x20, 0xf5, 0x6f, 0x13,
    -	0xb0, 0x10, 0x31, 0x6f, 0xd9, 0x56, 0x47, 0x67, 0x3c, 0xa4, 0xef, 0xc0, 0x24, 0x1b, 0x38, 0x54,
    -	0xf8, 0x64, 0xa6, 0x75, 0x3d, 0x30, 0xe6, 0x70, 0xe0, 0xd0, 0xe7, 0xc3, 0xd5, 0xa5, 0x1c, 0x11,
    -	0x4e, 0xc2, 0x42, 0x08, 0xed, 0x84, 0x76, 0x56, 0x84, 0xf8, 0x3b, 0x49, 0xe5, 0xcf, 0x87, 0xab,
    -	0x39, 0x05, 0x44, 0x0b, 0x91, 0x92, 0x26, 0xa2, 0xcf, 0xa0, 0x6e, 0x10, 0x8f, 0x1d, 0x39, 0x1d,
    -	0xc2, 0xe8, 0xa1, 0x6e, 0xd2, 0xc6, 0x94, 0xd8, 0xfd, 0x1b, 0xe5, 0x0e, 0x8a, 0x4b, 0xb4, 0xae,
    -	0x4a, 0x0b, 0xea, 0x3b, 0x09, 0x24, 0x9c, 0x42, 0x46, 0x7d, 0x40, 0x7c, 0xe5, 0xd0, 0x25, 0x96,
    -	0xe7, 0xef, 0x8a, 0xeb, 0x9b, 0x1e, 0x5b, 0xdf, 0xb2, 0xd4, 0x87, 0x76, 0x32, 0x68, 0x38, 0x47,
    -	0x03, 0x7a, 0x0d, 0xa6, 0x5c, 0x4a, 0x3c, 0xdb, 0x6a, 0x4c, 0x0a, 0x8f, 0x85, 0xc7, 0x85, 0xc5,
    -	0x2a, 0x96, 0x54, 0xf4, 0x3a, 0x4c, 0x9b, 0xd4, 0xf3, 0x48, 0x97, 0x36, 0xaa, 0x82, 0x71, 0x5e,
    -	0x32, 0x4e, 0xef, 0xfa, 0xcb, 0x38, 0xa0, 0xab, 0xbf, 0x57, 0xa0, 0x1e, 0x1d, 0xd3, 0x05, 0xe4,
    -	0xea, 0xfd, 0x64, 0xae, 0xbe, 0x5a, 0x22, 0x38, 0x0b, 0x72, 0xf4, 0x1f, 0x15, 0x40, 0x11, 0x13,
    -	0xb6, 0x0d, 0xe3, 0x98, 0xb4, 0x4f, 0xd1, 0x1a, 0x4c, 0x5a, 0xc4, 0x0c, 0x62, 0x32, 0x4c, 0x90,
    -	0x8f, 0x88, 0x49, 0xb1, 0xa0, 0xa0, 0x2f, 0x14, 0x40, 0x3d, 0x71, 0x9a, 0x9d, 0x4d, 0xcb, 0xb2,
    -	0x19, 0xe1, 0x0e, 0x0e, 0x0c, 0xda, 0x2a, 0x61, 0x50, 0xa0, 0x4b, 0x3b, 0xca, 0xa0, 0xdc, 0xb3,
    -	0x98, 0x3b, 0x88, 0x0e, 0x36, 0xcb, 0x80, 0x73, 0x54, 0xa3, 0x1f, 0x03, 0xb8, 0x12, 0xf3, 0xd0,
    -	0x96, 0x69, 0x5b, 0x5c, 0x03, 0x02, 0xf5, 0x5b, 0xb6, 0xf5, 0x58, 0xef, 0x46, 0x85, 0x05, 0x87,
    -	0x10, 0x38, 0x06, 0xb7, 0x7c, 0x0f, 0x96, 0x0a, 0xec, 0x44, 0x97, 0x61, 0xe2, 0x94, 0x0e, 0x7c,
    -	0x57, 0x61, 0xfe, 0x13, 0x2d, 0x42, 0xb5, 0x4f, 0x8c, 0x1e, 0xf5, 0x73, 0x12, 0xfb, 0x1f, 0xb7,
    -	0x2b, 0xef, 0x29, 0xea, 0x6f, 0xaa, 0xf1, 0x48, 0xe1, 0xf5, 0x06, 0xad, 0xf3, 0xeb, 0xc1, 0x31,
    -	0xf4, 0x36, 0xf1, 0x04, 0x46, 0xb5, 0xf5, 0x92, 0x7f, 0x35, 0xf8, 0x6b, 0x38, 0xa4, 0xa2, 0x9f,
    -	0x42, 0xcd, 0xa3, 0x06, 0x6d, 0x33, 0xdb, 0x95, 0x25, 0xee, 0xed, 0x92, 0x31, 0x45, 0x8e, 0xa9,
    -	0x71, 0x20, 0x45, 0x7d, 0xf8, 0xe0, 0x0b, 0x87, 0x90, 0xe8, 0x63, 0xa8, 0x31, 0x6a, 0x3a, 0x06,
    -	0x61, 0x54, 0x7a, 0x2f, 0x11, 0x57, 0xbc, 0x76, 0x70, 0xb0, 0x7d, 0xbb, 0x73, 0x28, 0xd9, 0x44,
    -	0xf5, 0x0c, 0xe3, 0x34, 0x58, 0xc5, 0x21, 0x0c, 0xfa, 0x11, 0xd4, 0x3c, 0xc6, 0x6f, 0xf5, 0xee,
    -	0x40, 0x64, 0xdb, 0x59, 0xd7, 0x4a, 0xbc, 0x8e, 0xfa, 0x22, 0x11, 0x74, 0xb0, 0x82, 0x43, 0x38,
    -	0xb4, 0x09, 0xf3, 0xa6, 0x6e, 0x61, 0x4a, 0x3a, 0x83, 0x03, 0xda, 0xb6, 0xad, 0x8e, 0x27, 0xd2,
    -	0xb4, 0xda, 0x5a, 0x92, 0x42, 0xf3, 0xbb, 0x49, 0x32, 0x4e, 0xf3, 0xa3, 0x1d, 0x58, 0x0c, 0xae,
    -	0xdd, 0xfb, 0xba, 0xc7, 0x6c, 0x77, 0xb0, 0xa3, 0x9b, 0x3a, 0x13, 0x35, 0xaf, 0xda, 0x6a, 0x8c,
    -	0x86, 0xab, 0x8b, 0x38, 0x87, 0x8e, 0x73, 0xa5, 0x78, 0x5d, 0x71, 0x48, 0xcf, 0xa3, 0x1d, 0x51,
    -	0xc3, 0x6a, 0x51, 0x5d, 0xd9, 0x17, 0xab, 0x58, 0x52, 0xd1, 0xa3, 0x44, 0x98, 0xd6, 0xc6, 0x0b,
    -	0xd3, 0x7a, 0x71, 0x88, 0xa2, 0x23, 0x58, 0x72, 0x5c, 0xbb, 0xeb, 0x52, 0xcf, 0xdb, 0xa6, 0xa4,
    -	0x63, 0xe8, 0x16, 0x0d, 0x3c, 0x33, 0x23, 0x76, 0xf4, 0xca, 0x68, 0xb8, 0xba, 0xb4, 0x9f, 0xcf,
    -	0x82, 0x8b, 0x64, 0xd5, 0x3f, 0x4f, 0xc2, 0xe5, 0xf4, 0x1d, 0x87, 0x3e, 0x04, 0x64, 0x1f, 0x7b,
    -	0xd4, 0xed, 0xd3, 0xce, 0x07, 0x7e, 0xe3, 0xc6, 0xbb, 0x1b, 0x45, 0x74, 0x37, 0x61, 0xde, 0xee,
    -	0x65, 0x38, 0x70, 0x8e, 0x94, 0xdf, 0x1f, 0xc9, 0x04, 0xa8, 0x08, 0x43, 0x63, 0xfd, 0x51, 0x26,
    -	0x09, 0x36, 0x61, 0x5e, 0xe6, 0x7e, 0x40, 0x14, 0xc1, 0x1a, 0x3b, 0xf7, 0xa3, 0x24, 0x19, 0xa7,
    -	0xf9, 0xd1, 0x1d, 0x98, 0x73, 0x79, 0x1c, 0x84, 0x00, 0xd3, 0x02, 0xe0, 0x5b, 0x12, 0x60, 0x0e,
    -	0xc7, 0x89, 0x38, 0xc9, 0x8b, 0x3e, 0x80, 0x2b, 0xa4, 0x4f, 0x74, 0x83, 0x1c, 0x1b, 0x34, 0x04,
    -	0x98, 0x14, 0x00, 0x2f, 0x4b, 0x80, 0x2b, 0x9b, 0x69, 0x06, 0x9c, 0x95, 0x41, 0xbb, 0xb0, 0xd0,
    -	0xb3, 0xb2, 0x50, 0x7e, 0x10, 0xbf, 0x22, 0xa1, 0x16, 0x8e, 0xb2, 0x2c, 0x38, 0x4f, 0x0e, 0x7d,
    -	0x0a, 0xd0, 0x0e, 0x6e, 0x75, 0xaf, 0x31, 0x25, 0xca, 0xf0, 0x8d, 0x12, 0xc9, 0x16, 0xb6, 0x02,
    -	0x51, 0x09, 0x0c, 0x97, 0x3c, 0x1c, 0xc3, 0x44, 0xb7, 0xa1, 0xde, 0xb6, 0x0d, 0x43, 0x44, 0xfe,
    -	0x96, 0xdd, 0xb3, 0x98, 0x08, 0xde, 0x6a, 0x0b, 0xf1, 0xcb, 0x7e, 0x2b, 0x41, 0xc1, 0x29, 0x4e,
    -	0xf5, 0x8f, 0x4a, 0xfc, 0x9a, 0x09, 0xd2, 0x19, 0xdd, 0x4e, 0xb4, 0x3e, 0xaf, 0xa5, 0x5a, 0x9f,
    -	0xab, 0x59, 0x89, 0x58, 0xe7, 0xa3, 0xc3, 0x1c, 0x0f, 0x7e, 0xdd, 0xea, 0xfa, 0x07, 0x2e, 0x4b,
    -	0xe2, 0x5b, 0x67, 0xa6, 0x52, 0xc8, 0x1d, 0xbb, 0x18, 0xaf, 0x88, 0x33, 0x8f, 0x13, 0x71, 0x12,
    -	0x59, 0xbd, 0x0b, 0xf5, 0x64, 0x1e, 0x26, 0x7a, 0x7a, 0xe5, 0xdc, 0x9e, 0xfe, 0x6b, 0x05, 0x96,
    -	0x0a, 0xb4, 0x23, 0x03, 0xea, 0x26, 0x79, 0x12, 0x3b, 0xe6, 0x73, 0x7b, 0x63, 0x3e, 0x35, 0x69,
    -	0xfe, 0xd4, 0xa4, 0x3d, 0xb0, 0xd8, 0x9e, 0x7b, 0xc0, 0x5c, 0xdd, 0xea, 0xfa, 0xe7, 0xb0, 0x9b,
    -	0xc0, 0xc2, 0x29, 0x6c, 0xf4, 0x09, 0xd4, 0x4c, 0xf2, 0xe4, 0xa0, 0xe7, 0x76, 0xf3, 0xfc, 0x55,
    -	0x4e, 0x8f, 0xb8, 0x3f, 0x76, 0x25, 0x0a, 0x0e, 0xf1, 0xd4, 0x3f, 0x29, 0xb0, 0x96, 0xd8, 0x25,
    -	0xaf, 0x15, 0xf4, 0x71, 0xcf, 0x38, 0xa0, 0xd1, 0x89, 0xbf, 0x09, 0x33, 0x0e, 0x71, 0x99, 0x1e,
    -	0xd6, 0x8b, 0x6a, 0x6b, 0x6e, 0x34, 0x5c, 0x9d, 0xd9, 0x0f, 0x16, 0x71, 0x44, 0xcf, 0xf1, 0x4d,
    -	0xe5, 0xc5, 0xf9, 0x46, 0xfd, 0x8f, 0x02, 0xd5, 0x83, 0x36, 0x31, 0xe8, 0x05, 0x4c, 0x2a, 0xdb,
    -	0x89, 0x49, 0x45, 0x2d, 0x8c, 0x59, 0x61, 0x4f, 0xe1, 0x90, 0xb2, 0x93, 0x1a, 0x52, 0xae, 0x9d,
    -	0x83, 0x73, 0xf6, 0x7c, 0xf2, 0x3e, 0xcc, 0x84, 0xea, 0x12, 0x45, 0x59, 0x39, 0xaf, 0x28, 0xab,
    -	0xbf, 0xae, 0xc0, 0x6c, 0x4c, 0xc5, 0x78, 0xd2, 0xdc, 0xdd, 0xb1, 0xbe, 0x86, 0x17, 0xae, 0x8d,
    -	0x32, 0x1b, 0xd1, 0x82, 0x1e, 0xc6, 0x6f, 0x17, 0xa3, 0x66, 0x21, 0xdb, 0xda, 0xdc, 0x85, 0x3a,
    -	0x23, 0x6e, 0x97, 0xb2, 0x80, 0x26, 0x1c, 0x36, 0x13, 0xcd, 0x2a, 0x87, 0x09, 0x2a, 0x4e, 0x71,
    -	0x2f, 0xdf, 0x81, 0xb9, 0x84, 0xb2, 0xb1, 0x7a, 0xbe, 0x2f, 0xb8, 0x73, 0xa2, 0x54, 0xb8, 0x80,
    -	0xe8, 0xfa, 0x30, 0x11, 0x5d, 0xeb, 0xc5, 0xce, 0x8c, 0x25, 0x68, 0x51, 0x8c, 0xe1, 0x54, 0x8c,
    -	0xbd, 0x51, 0x0a, 0xed, 0xec, 0x48, 0xfb, 0x67, 0x05, 0x16, 0x63, 0xdc, 0xd1, 0x28, 0xfc, 0xbd,
    -	0xc4, 0x7d, 0xb0, 0x9e, 0xba, 0x0f, 0x1a, 0x79, 0x32, 0x2f, 0x6c, 0x16, 0xce, 0x9f, 0x4f, 0x27,
    -	0xfe, 0x1f, 0xe7, 0xd3, 0x3f, 0x28, 0x30, 0x1f, 0xf3, 0xdd, 0x05, 0x0c, 0xa8, 0x0f, 0x92, 0x03,
    -	0xea, 0xb5, 0x32, 0x41, 0x53, 0x30, 0xa1, 0xde, 0x86, 0x85, 0x18, 0xd3, 0x9e, 0xdb, 0xd1, 0x2d,
    -	0x62, 0x78, 0xe8, 0x55, 0xa8, 0x7a, 0x8c, 0xb8, 0x2c, 0xb8, 0x44, 0x02, 0xd9, 0x03, 0xbe, 0x88,
    -	0x7d, 0x9a, 0xfa, 0x2f, 0x05, 0x9a, 0x31, 0xe1, 0x7d, 0xea, 0x7a, 0xba, 0xc7, 0xa8, 0xc5, 0x1e,
    -	0xda, 0x46, 0xcf, 0xa4, 0x5b, 0x06, 0xd1, 0x4d, 0x4c, 0xf9, 0x82, 0x6e, 0x5b, 0xfb, 0xb6, 0xa1,
    -	0xb7, 0x07, 0x88, 0xc0, 0xec, 0xe7, 0x27, 0xd4, 0xda, 0xa6, 0x06, 0x65, 0xb4, 0x23, 0x43, 0xf1,
    -	0x07, 0x12, 0x7e, 0xf6, 0x51, 0x44, 0x7a, 0x3e, 0x5c, 0x5d, 0x2f, 0x83, 0x28, 0x22, 0x34, 0x8e,
    -	0x89, 0x7e, 0x06, 0xc0, 0x3f, 0x45, 0x2d, 0xeb, 0xc8, 0x60, 0xbd, 0x1b, 0x64, 0xf4, 0xa3, 0x90,
    -	0x32, 0x96, 0x82, 0x18, 0xa2, 0xfa, 0xdb, 0x5a, 0xe2, 0xbc, 0xbf, 0xf1, 0x63, 0xe6, 0xcf, 0x61,
    -	0xb1, 0x1f, 0x79, 0x27, 0x60, 0xe0, 0x6d, 0xf9, 0x44, 0xfa, 0xe9, 0x2e, 0x84, 0xcf, 0xf3, 0x6b,
    -	0xeb, 0xdb, 0x52, 0xc9, 0xe2, 0xc3, 0x1c, 0x38, 0x9c, 0xab, 0x04, 0x7d, 0x17, 0x66, 0xf9, 0x48,
    -	0xa3, 0xb7, 0xe9, 0x47, 0xc4, 0x0c, 0x72, 0x71, 0x21, 0x88, 0x97, 0x83, 0x88, 0x84, 0xe3, 0x7c,
    -	0xe8, 0x04, 0x16, 0x1c, 0xbb, 0xb3, 0x4b, 0x2c, 0xd2, 0xa5, 0xbc, 0x11, 0xf4, 0x8f, 0x52, 0xcc,
    -	0x9e, 0x33, 0xad, 0x77, 0x83, 0xf6, 0x7f, 0x3f, 0xcb, 0xf2, 0x9c, 0x0f, 0x71, 0xd9, 0x65, 0x11,
    -	0x04, 0x79, 0x90, 0xc8, 0x85, 0x7a, 0x4f, 0xf6, 0x63, 0x72, 0x14, 0xf7, 0x1f, 0xd9, 0x36, 0xca,
    -	0x24, 0xe5, 0x51, 0x42, 0x32, 0xba, 0x30, 0x93, 0xeb, 0x38, 0xa5, 0xa1, 0x70, 0xb4, 0xae, 0xfd,
    -	0x4f, 0xa3, 0x75, 0xce, 0xac, 0x3f, 0x33, 0xe6, 0xac, 0xff, 0x17, 0x05, 0xae, 0x39, 0x25, 0x72,
    -	0xa9, 0x01, 0xc2, 0x37, 0xf7, 0xcb, 0xf8, 0xa6, 0x4c, 0x6e, 0xb6, 0xd6, 0x47, 0xc3, 0xd5, 0x6b,
    -	0x65, 0x38, 0x71, 0x29, 0xfb, 0xd0, 0x43, 0xa8, 0xd9, 0xb2, 0x06, 0x36, 0x66, 0x85, 0xad, 0x37,
    -	0xca, 0xd8, 0x1a, 0xd4, 0x4d, 0x3f, 0x2d, 0x83, 0x2f, 0x1c, 0x62, 0xa9, 0xbf, 0xab, 0xc2, 0x95,
    -	0xcc, 0x0d, 0x8e, 0x7e, 0x78, 0xc6, 0x9c, 0x7f, 0xf5, 0x85, 0xcd, 0xf8, 0x99, 0x01, 0x7d, 0x62,
    -	0x8c, 0x01, 0x7d, 0x13, 0xe6, 0xdb, 0x3d, 0xd7, 0xa5, 0x16, 0x4b, 0x8d, 0xe7, 0x61, 0xb0, 0x6c,
    -	0x25, 0xc9, 0x38, 0xcd, 0x9f, 0xf7, 0xc6, 0x50, 0x1d, 0xf3, 0x8d, 0x21, 0x6e, 0x85, 0x9c, 0x13,
    -	0xfd, 0xd4, 0xce, 0x5a, 0x21, 0xc7, 0xc5, 0x34, 0x3f, 0x6f, 0x5a, 0x7d, 0xd4, 0x10, 0x61, 0x3a,
    -	0xd9, 0xb4, 0x1e, 0x25, 0xa8, 0x38, 0xc5, 0x9d, 0x33, 0xaf, 0xcf, 0x94, 0x9d, 0xd7, 0x11, 0x49,
    -	0xbc, 0x26, 0x80, 0xa8, 0xa3, 0x37, 0xcb, 0xc4, 0x59, 0xf9, 0xe7, 0x84, 0xdc, 0x87, 0x94, 0xd9,
    -	0xf1, 0x1f, 0x52, 0xd4, 0xbf, 0x2a, 0xf0, 0x72, 0x61, 0xc5, 0x42, 0x9b, 0x89, 0x96, 0xf2, 0x66,
    -	0xaa, 0xa5, 0xfc, 0x4e, 0xa1, 0x60, 0xac, 0xaf, 0x74, 0xf3, 0x5f, 0x1a, 0xde, 0x2f, 0xf7, 0xd2,
    -	0x90, 0x33, 0x05, 0x9f, 0xff, 0xe4, 0xd0, 0xfa, 0xfe, 0xd3, 0x67, 0x2b, 0x97, 0xbe, 0x7c, 0xb6,
    -	0x72, 0xe9, 0xab, 0x67, 0x2b, 0x97, 0x7e, 0x31, 0x5a, 0x51, 0x9e, 0x8e, 0x56, 0x94, 0x2f, 0x47,
    -	0x2b, 0xca, 0x57, 0xa3, 0x15, 0xe5, 0xef, 0xa3, 0x15, 0xe5, 0x57, 0x5f, 0xaf, 0x5c, 0xfa, 0x64,
    -	0xa9, 0xe0, 0xdf, 0xe8, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0xb9, 0xc9, 0xe6, 0x8c, 0xa7, 0x1e,
    -	0x00, 0x00,
    +	// 2041 bytes of a gzipped FileDescriptorProto
    +	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x59, 0xdd, 0x6f, 0x1b, 0xc7,
    +	0x11, 0xd7, 0x51, 0xa2, 0x44, 0x8d, 0x22, 0xca, 0x5e, 0xa9, 0x16, 0xa3, 0xb4, 0x92, 0x70, 0x31,
    +	0x62, 0x25, 0xb1, 0x8f, 0xb1, 0x92, 0x06, 0x89, 0xdd, 0xba, 0x10, 0x25, 0x37, 0x56, 0x20, 0x45,
    +	0xca, 0x4a, 0xb2, 0xd1, 0xf4, 0x03, 0x59, 0x91, 0x6b, 0xea, 0xa2, 0xfb, 0xc2, 0xdd, 0x52, 0x31,
    +	0xd1, 0x97, 0xfe, 0x01, 0x2d, 0xd2, 0xe7, 0xfe, 0x15, 0xed, 0x53, 0x8b, 0x16, 0x7d, 0x2d, 0xfc,
    +	0x18, 0xf4, 0xa5, 0x79, 0x22, 0x6a, 0xe6, 0xb5, 0x7d, 0x6b, 0x5f, 0x0c, 0x14, 0x28, 0x76, 0x6f,
    +	0xef, 0xfb, 0x4e, 0x3a, 0x16, 0xb0, 0x80, 0xe6, 0x8d, 0xb7, 0x33, 0xf3, 0x9b, 0xd9, 0xd9, 0x99,
    +	0xd9, 0x99, 0x25, 0xdc, 0x38, 0x7d, 0xcf, 0xd3, 0x74, 0xbb, 0x49, 0x1c, 0xbd, 0x49, 0x1c, 0xc7,
    +	0x6b, 0x9e, 0xdd, 0x3e, 0xa6, 0x8c, 0xdc, 0x6e, 0x76, 0xa9, 0x45, 0x5d, 0xc2, 0x68, 0x47, 0x73,
    +	0x5c, 0x9b, 0xd9, 0x68, 0xd1, 0x67, 0xd4, 0x88, 0xa3, 0x6b, 0x9c, 0x51, 0x93, 0x8c, 0x4b, 0xb7,
    +	0xba, 0x3a, 0x3b, 0xe9, 0x1d, 0x6b, 0x6d, 0xdb, 0x6c, 0x76, 0xed, 0xae, 0xdd, 0x14, 0xfc, 0xc7,
    +	0xbd, 0xc7, 0xe2, 0x4b, 0x7c, 0x88, 0x5f, 0x3e, 0xce, 0x92, 0x1a, 0x53, 0xd8, 0xb6, 0x5d, 0xda,
    +	0x3c, 0xcb, 0xe8, 0x5a, 0x7a, 0x27, 0xe2, 0x31, 0x49, 0xfb, 0x44, 0xb7, 0xa8, 0xdb, 0x6f, 0x3a,
    +	0xa7, 0x5d, 0xbe, 0xe0, 0x35, 0x4d, 0xca, 0x48, 0x9e, 0x54, 0xb3, 0x48, 0xca, 0xed, 0x59, 0x4c,
    +	0x37, 0x69, 0x46, 0xe0, 0xdd, 0x8b, 0x04, 0xbc, 0xf6, 0x09, 0x35, 0x49, 0x46, 0xee, 0xed, 0x22,
    +	0xb9, 0x1e, 0xd3, 0x8d, 0xa6, 0x6e, 0x31, 0x8f, 0xb9, 0x69, 0x21, 0xf5, 0xdf, 0x0a, 0xa0, 0x4d,
    +	0xdb, 0x62, 0xae, 0x6d, 0x18, 0xd4, 0xc5, 0xf4, 0x4c, 0xf7, 0x74, 0xdb, 0x42, 0x9f, 0x42, 0x8d,
    +	0xef, 0xa7, 0x43, 0x18, 0x69, 0x28, 0xab, 0xca, 0xda, 0xcc, 0xfa, 0x5b, 0x5a, 0xe4, 0xe9, 0x10,
    +	0x5e, 0x73, 0x4e, 0xbb, 0x7c, 0xc1, 0xd3, 0x38, 0xb7, 0x76, 0x76, 0x5b, 0xdb, 0x3b, 0xfe, 0x8c,
    +	0xb6, 0xd9, 0x2e, 0x65, 0xa4, 0x85, 0x9e, 0x0e, 0x56, 0xc6, 0x86, 0x83, 0x15, 0x88, 0xd6, 0x70,
    +	0x88, 0x8a, 0xf6, 0x60, 0x42, 0xa0, 0x57, 0x04, 0xfa, 0xad, 0x42, 0x74, 0xb9, 0x69, 0x0d, 0x93,
    +	0xcf, 0xef, 0x3f, 0x61, 0xd4, 0xe2, 0xe6, 0xb5, 0x5e, 0x92, 0xd0, 0x13, 0x5b, 0x84, 0x11, 0x2c,
    +	0x80, 0xd0, 0x4d, 0xa8, 0xb9, 0xd2, 0xfc, 0xc6, 0xf8, 0xaa, 0xb2, 0x36, 0xde, 0xba, 0x22, 0xb9,
    +	0x6a, 0xc1, 0xb6, 0x70, 0xc8, 0xa1, 0x3e, 0x55, 0xe0, 0x5a, 0x76, 0xdf, 0x3b, 0xba, 0xc7, 0xd0,
    +	0x4f, 0x32, 0x7b, 0xd7, 0xca, 0xed, 0x9d, 0x4b, 0x8b, 0x9d, 0x87, 0x8a, 0x83, 0x95, 0xd8, 0xbe,
    +	0xf7, 0xa1, 0xaa, 0x33, 0x6a, 0x7a, 0x8d, 0xca, 0xea, 0xf8, 0xda, 0xcc, 0xfa, 0x9b, 0x5a, 0x41,
    +	0x00, 0x6b, 0x59, 0xeb, 0x5a, 0xb3, 0x12, 0xb7, 0xba, 0xcd, 0x11, 0xb0, 0x0f, 0xa4, 0xfe, 0xb2,
    +	0x02, 0xb0, 0x45, 0x1d, 0xc3, 0xee, 0x9b, 0xd4, 0x62, 0x97, 0x70, 0x74, 0xdb, 0x30, 0xe1, 0x39,
    +	0xb4, 0x2d, 0x8f, 0xee, 0x46, 0xe1, 0x0e, 0x22, 0xa3, 0x0e, 0x1c, 0xda, 0x8e, 0x0e, 0x8d, 0x7f,
    +	0x61, 0x01, 0x81, 0x3e, 0x86, 0x49, 0x8f, 0x11, 0xd6, 0xf3, 0xc4, 0x91, 0xcd, 0xac, 0xbf, 0x5e,
    +	0x06, 0x4c, 0x08, 0xb4, 0xea, 0x12, 0x6e, 0xd2, 0xff, 0xc6, 0x12, 0x48, 0xfd, 0xdb, 0x38, 0xcc,
    +	0x47, 0xcc, 0x9b, 0xb6, 0xd5, 0xd1, 0x19, 0x0f, 0xe9, 0xbb, 0x30, 0xc1, 0xfa, 0x0e, 0x15, 0x3e,
    +	0x99, 0x6e, 0xdd, 0x08, 0x8c, 0x39, 0xec, 0x3b, 0xf4, 0xf9, 0x60, 0x65, 0x31, 0x47, 0x84, 0x93,
    +	0xb0, 0x10, 0x42, 0x3b, 0xa1, 0x9d, 0x15, 0x21, 0xfe, 0x4e, 0x52, 0xf9, 0xf3, 0xc1, 0x4a, 0x4e,
    +	0x01, 0xd1, 0x42, 0xa4, 0xa4, 0x89, 0xe8, 0x33, 0xa8, 0x1b, 0xc4, 0x63, 0x47, 0x4e, 0x87, 0x30,
    +	0x7a, 0xa8, 0x9b, 0xb4, 0x31, 0x29, 0x76, 0xff, 0x46, 0xb9, 0x83, 0xe2, 0x12, 0xad, 0x6b, 0xd2,
    +	0x82, 0xfa, 0x4e, 0x02, 0x09, 0xa7, 0x90, 0xd1, 0x19, 0x20, 0xbe, 0x72, 0xe8, 0x12, 0xcb, 0xf3,
    +	0x77, 0xc5, 0xf5, 0x4d, 0x8d, 0xac, 0x6f, 0x49, 0xea, 0x43, 0x3b, 0x19, 0x34, 0x9c, 0xa3, 0x01,
    +	0xbd, 0x06, 0x93, 0x2e, 0x25, 0x9e, 0x6d, 0x35, 0x26, 0x84, 0xc7, 0xc2, 0xe3, 0xc2, 0x62, 0x15,
    +	0x4b, 0x2a, 0x7a, 0x1d, 0xa6, 0x4c, 0xea, 0x79, 0xa4, 0x4b, 0x1b, 0x55, 0xc1, 0x38, 0x27, 0x19,
    +	0xa7, 0x76, 0xfd, 0x65, 0x1c, 0xd0, 0xd5, 0x3f, 0x28, 0x50, 0x8f, 0x8e, 0xe9, 0x12, 0x72, 0xf5,
    +	0x41, 0x32, 0x57, 0x5f, 0x2d, 0x11, 0x9c, 0x05, 0x39, 0xfa, 0x8f, 0x0a, 0xa0, 0x88, 0x09, 0xdb,
    +	0x86, 0x71, 0x4c, 0xda, 0xa7, 0x68, 0x15, 0x26, 0x2c, 0x62, 0x06, 0x31, 0x19, 0x26, 0xc8, 0x47,
    +	0xc4, 0xa4, 0x58, 0x50, 0xd0, 0x17, 0x0a, 0xa0, 0x9e, 0x38, 0xcd, 0xce, 0x86, 0x65, 0xd9, 0x8c,
    +	0x70, 0x07, 0x07, 0x06, 0x6d, 0x96, 0x30, 0x28, 0xd0, 0xa5, 0x1d, 0x65, 0x50, 0xee, 0x5b, 0xcc,
    +	0xed, 0x47, 0x07, 0x9b, 0x65, 0xc0, 0x39, 0xaa, 0xd1, 0x8f, 0x01, 0x5c, 0x89, 0x79, 0x68, 0xcb,
    +	0xb4, 0x2d, 0xae, 0x01, 0x81, 0xfa, 0x4d, 0xdb, 0x7a, 0xac, 0x77, 0xa3, 0xc2, 0x82, 0x43, 0x08,
    +	0x1c, 0x83, 0x5b, 0xba, 0x0f, 0x8b, 0x05, 0x76, 0xa2, 0x2b, 0x30, 0x7e, 0x4a, 0xfb, 0xbe, 0xab,
    +	0x30, 0xff, 0x89, 0x16, 0xa0, 0x7a, 0x46, 0x8c, 0x1e, 0xf5, 0x73, 0x12, 0xfb, 0x1f, 0x77, 0x2a,
    +	0xef, 0x29, 0xea, 0x6f, 0xab, 0xf1, 0x48, 0xe1, 0xf5, 0x06, 0xad, 0xf1, 0xeb, 0xc1, 0x31, 0xf4,
    +	0x36, 0xf1, 0x04, 0x46, 0xb5, 0xf5, 0x92, 0x7f, 0x35, 0xf8, 0x6b, 0x38, 0xa4, 0xa2, 0x9f, 0x42,
    +	0xcd, 0xa3, 0x06, 0x6d, 0x33, 0xdb, 0x95, 0x25, 0xee, 0xed, 0x92, 0x31, 0x45, 0x8e, 0xa9, 0x71,
    +	0x20, 0x45, 0x7d, 0xf8, 0xe0, 0x0b, 0x87, 0x90, 0xe8, 0x63, 0xa8, 0x31, 0x6a, 0x3a, 0x06, 0x61,
    +	0x54, 0x7a, 0x2f, 0x11, 0x57, 0xbc, 0x76, 0x70, 0xb0, 0x7d, 0xbb, 0x73, 0x28, 0xd9, 0x44, 0xf5,
    +	0x0c, 0xe3, 0x34, 0x58, 0xc5, 0x21, 0x0c, 0xfa, 0x11, 0xd4, 0x3c, 0xc6, 0x6f, 0xf5, 0x6e, 0x5f,
    +	0x64, 0xdb, 0x79, 0xd7, 0x4a, 0xbc, 0x8e, 0xfa, 0x22, 0x11, 0x74, 0xb0, 0x82, 0x43, 0x38, 0xb4,
    +	0x01, 0x73, 0xa6, 0x6e, 0x61, 0x4a, 0x3a, 0xfd, 0x03, 0xda, 0xb6, 0xad, 0x8e, 0x27, 0xd2, 0xb4,
    +	0xda, 0x5a, 0x94, 0x42, 0x73, 0xbb, 0x49, 0x32, 0x4e, 0xf3, 0xa3, 0x1d, 0x58, 0x08, 0xae, 0xdd,
    +	0x07, 0xba, 0xc7, 0x6c, 0xb7, 0xbf, 0xa3, 0x9b, 0x3a, 0x13, 0x35, 0xaf, 0xda, 0x6a, 0x0c, 0x07,
    +	0x2b, 0x0b, 0x38, 0x87, 0x8e, 0x73, 0xa5, 0x78, 0x5d, 0x71, 0x48, 0xcf, 0xa3, 0x1d, 0x51, 0xc3,
    +	0x6a, 0x51, 0x5d, 0xd9, 0x17, 0xab, 0x58, 0x52, 0xd1, 0xa3, 0x44, 0x98, 0xd6, 0x46, 0x0b, 0xd3,
    +	0x7a, 0x71, 0x88, 0xa2, 0x23, 0x58, 0x74, 0x5c, 0xbb, 0xeb, 0x52, 0xcf, 0xdb, 0xa2, 0xa4, 0x63,
    +	0xe8, 0x16, 0x0d, 0x3c, 0x33, 0x2d, 0x76, 0xf4, 0xca, 0x70, 0xb0, 0xb2, 0xb8, 0x9f, 0xcf, 0x82,
    +	0x8b, 0x64, 0xd5, 0x5f, 0x55, 0xe1, 0x4a, 0xfa, 0x8e, 0x43, 0x1f, 0x02, 0xb2, 0x8f, 0x3d, 0xea,
    +	0x9e, 0xd1, 0xce, 0x07, 0x7e, 0xe3, 0xc6, 0xbb, 0x1b, 0x45, 0x74, 0x37, 0x61, 0xde, 0xee, 0x65,
    +	0x38, 0x70, 0x8e, 0x94, 0xdf, 0x1f, 0xc9, 0x04, 0xa8, 0x08, 0x43, 0x63, 0xfd, 0x51, 0x26, 0x09,
    +	0x36, 0x60, 0x4e, 0xe6, 0x7e, 0x40, 0x14, 0xc1, 0x1a, 0x3b, 0xf7, 0xa3, 0x24, 0x19, 0xa7, 0xf9,
    +	0xd1, 0x5d, 0x98, 0x75, 0x79, 0x1c, 0x84, 0x00, 0x53, 0x02, 0xe0, 0x5b, 0x12, 0x60, 0x16, 0xc7,
    +	0x89, 0x38, 0xc9, 0x8b, 0x3e, 0x80, 0xab, 0xe4, 0x8c, 0xe8, 0x06, 0x39, 0x36, 0x68, 0x08, 0x30,
    +	0x21, 0x00, 0x5e, 0x96, 0x00, 0x57, 0x37, 0xd2, 0x0c, 0x38, 0x2b, 0x83, 0x76, 0x61, 0xbe, 0x67,
    +	0x65, 0xa1, 0xfc, 0x20, 0x7e, 0x45, 0x42, 0xcd, 0x1f, 0x65, 0x59, 0x70, 0x9e, 0x1c, 0xda, 0x86,
    +	0x79, 0x46, 0x5d, 0x53, 0xb7, 0x08, 0xd3, 0xad, 0x6e, 0x08, 0xe7, 0x9f, 0xfc, 0x22, 0x87, 0x3a,
    +	0xcc, 0x92, 0x71, 0x9e, 0x0c, 0xfa, 0x14, 0xa0, 0x1d, 0x34, 0x08, 0x5e, 0x63, 0x52, 0x54, 0xf4,
    +	0x9b, 0x25, 0xf2, 0x36, 0xec, 0x2a, 0xa2, 0x6a, 0x1a, 0x2e, 0x79, 0x38, 0x86, 0x89, 0xee, 0x40,
    +	0xbd, 0x6d, 0x1b, 0x86, 0x48, 0xa2, 0x4d, 0xbb, 0x67, 0x31, 0x91, 0x07, 0xd5, 0x16, 0xe2, 0x7d,
    +	0xc3, 0x66, 0x82, 0x82, 0x53, 0x9c, 0xea, 0x9f, 0x94, 0xf8, 0x8d, 0x15, 0x54, 0x06, 0x74, 0x27,
    +	0xd1, 0x45, 0xbd, 0x96, 0xea, 0xa2, 0xae, 0x65, 0x25, 0x62, 0x4d, 0x94, 0x0e, 0xb3, 0x3c, 0x8f,
    +	0x74, 0xab, 0xeb, 0xc7, 0x8e, 0xac, 0xae, 0x6f, 0x9d, 0x9b, 0x95, 0x21, 0x77, 0xec, 0x8e, 0xbd,
    +	0x2a, 0xc2, 0x27, 0x4e, 0xc4, 0x49, 0x64, 0xf5, 0x1e, 0xd4, 0x93, 0x29, 0x9d, 0x18, 0x0f, 0x94,
    +	0x0b, 0xc7, 0x83, 0xaf, 0x15, 0x58, 0x2c, 0xd0, 0x8e, 0x0c, 0xa8, 0x9b, 0xe4, 0x49, 0x2c, 0x62,
    +	0x2e, 0x6c, 0xb3, 0xf9, 0x00, 0xa6, 0xf9, 0x03, 0x98, 0xb6, 0x6d, 0xb1, 0x3d, 0xf7, 0x80, 0xb9,
    +	0xba, 0xd5, 0xf5, 0xcf, 0x61, 0x37, 0x81, 0x85, 0x53, 0xd8, 0xe8, 0x13, 0xa8, 0x99, 0xe4, 0xc9,
    +	0x41, 0xcf, 0xed, 0xe6, 0xf9, 0xab, 0x9c, 0x1e, 0x71, 0x15, 0xed, 0x4a, 0x14, 0x1c, 0xe2, 0xa9,
    +	0x7f, 0x56, 0x60, 0x35, 0xb1, 0x4b, 0x5e, 0x76, 0xe8, 0xe3, 0x9e, 0x71, 0x40, 0xa3, 0x13, 0x7f,
    +	0x13, 0xa6, 0x1d, 0xe2, 0x32, 0x3d, 0x2c, 0x3d, 0xd5, 0xd6, 0xec, 0x70, 0xb0, 0x32, 0xbd, 0x1f,
    +	0x2c, 0xe2, 0x88, 0x9e, 0xe3, 0x9b, 0xca, 0x8b, 0xf3, 0x8d, 0xfa, 0x1f, 0x05, 0xaa, 0x07, 0x6d,
    +	0x62, 0xd0, 0x4b, 0x18, 0x7a, 0xb6, 0x12, 0x43, 0x8f, 0x5a, 0x18, 0xb3, 0xc2, 0x9e, 0xc2, 0x79,
    +	0x67, 0x27, 0x35, 0xef, 0x5c, 0xbf, 0x00, 0xe7, 0xfc, 0x51, 0xe7, 0x7d, 0x98, 0x0e, 0xd5, 0x25,
    +	0xea, 0xbb, 0x72, 0x51, 0x7d, 0x57, 0x7f, 0x53, 0x81, 0x99, 0x98, 0x8a, 0xd1, 0xa4, 0xb9, 0xbb,
    +	0x63, 0x2d, 0x12, 0x2f, 0x5c, 0xeb, 0x65, 0x36, 0xa2, 0x05, 0xed, 0x90, 0xdf, 0x79, 0x46, 0x7d,
    +	0x47, 0xb6, 0x4b, 0xba, 0x07, 0x75, 0x46, 0xdc, 0x2e, 0x65, 0x01, 0x4d, 0x38, 0x6c, 0x3a, 0x1a,
    +	0x7b, 0x0e, 0x13, 0x54, 0x9c, 0xe2, 0x5e, 0xba, 0x0b, 0xb3, 0x09, 0x65, 0x23, 0xb5, 0x8f, 0x5f,
    +	0x70, 0xe7, 0x44, 0xa9, 0x70, 0x09, 0xd1, 0xf5, 0x61, 0x22, 0xba, 0xd6, 0x8a, 0x9d, 0x19, 0x4b,
    +	0xd0, 0xa2, 0x18, 0xc3, 0xa9, 0x18, 0x7b, 0xa3, 0x14, 0xda, 0xf9, 0x91, 0xf6, 0xcf, 0x0a, 0x2c,
    +	0xc4, 0xb8, 0xa3, 0xa9, 0xfa, 0x7b, 0x89, 0xfb, 0x60, 0x2d, 0x75, 0x1f, 0x34, 0xf2, 0x64, 0x5e,
    +	0xd8, 0x58, 0x9d, 0x3f, 0xea, 0x8e, 0xff, 0x3f, 0x8e, 0xba, 0x7f, 0x54, 0x60, 0x2e, 0xe6, 0xbb,
    +	0x4b, 0x98, 0x75, 0xb7, 0x93, 0xb3, 0xee, 0xf5, 0x32, 0x41, 0x53, 0x30, 0xec, 0xde, 0x81, 0xf9,
    +	0x18, 0xd3, 0x9e, 0xdb, 0xd1, 0x2d, 0x62, 0x78, 0xe8, 0x55, 0xa8, 0x7a, 0x8c, 0xb8, 0x2c, 0xb8,
    +	0x44, 0x02, 0xd9, 0x03, 0xbe, 0x88, 0x7d, 0x9a, 0xfa, 0x2f, 0x05, 0x9a, 0x31, 0xe1, 0x7d, 0xea,
    +	0x7a, 0xba, 0xc7, 0xa8, 0xc5, 0x1e, 0xda, 0x46, 0xcf, 0xa4, 0x9b, 0x06, 0xd1, 0x4d, 0x4c, 0xf9,
    +	0x82, 0x6e, 0x5b, 0xfb, 0xb6, 0xa1, 0xb7, 0xfb, 0x88, 0xc0, 0xcc, 0xe7, 0x27, 0xd4, 0xda, 0xa2,
    +	0x06, 0x65, 0xb4, 0x23, 0x43, 0xf1, 0x07, 0x12, 0x7e, 0xe6, 0x51, 0x44, 0x7a, 0x3e, 0x58, 0x59,
    +	0x2b, 0x83, 0x28, 0x22, 0x34, 0x8e, 0x89, 0x7e, 0x06, 0xc0, 0x3f, 0x45, 0x2d, 0xeb, 0xc8, 0x60,
    +	0xbd, 0x17, 0x64, 0xf4, 0xa3, 0x90, 0x32, 0x92, 0x82, 0x18, 0xa2, 0xfa, 0xbb, 0x5a, 0xe2, 0xbc,
    +	0xbf, 0xf1, 0x13, 0xeb, 0xcf, 0x61, 0xe1, 0x2c, 0xf2, 0x4e, 0xc0, 0xc0, 0x3b, 0xfc, 0xf1, 0xf4,
    +	0x2b, 0x60, 0x08, 0x9f, 0xe7, 0xd7, 0xd6, 0xb7, 0xa5, 0x92, 0x85, 0x87, 0x39, 0x70, 0x38, 0x57,
    +	0x09, 0xfa, 0x2e, 0xcc, 0xf0, 0xe9, 0x48, 0x6f, 0xd3, 0x8f, 0x88, 0x19, 0xe4, 0xe2, 0x7c, 0x10,
    +	0x2f, 0x07, 0x11, 0x09, 0xc7, 0xf9, 0xd0, 0x09, 0xcc, 0x3b, 0x76, 0x67, 0x97, 0x58, 0xa4, 0x4b,
    +	0x79, 0x23, 0xe8, 0x1f, 0xa5, 0x18, 0x63, 0xa7, 0x5b, 0xef, 0x06, 0x93, 0xc4, 0x7e, 0x96, 0xe5,
    +	0x39, 0x9f, 0x07, 0xb3, 0xcb, 0x22, 0x08, 0xf2, 0x20, 0x91, 0x0b, 0xf5, 0x9e, 0xec, 0xc7, 0xe4,
    +	0x54, 0xef, 0xbf, 0xd7, 0xad, 0x97, 0x49, 0xca, 0xa3, 0x84, 0x64, 0x74, 0x61, 0x26, 0xd7, 0x71,
    +	0x4a, 0x43, 0xe1, 0x94, 0x5e, 0xfb, 0x9f, 0xa6, 0xf4, 0x9c, 0x67, 0x83, 0xe9, 0x11, 0x9f, 0x0d,
    +	0xfe, 0xa2, 0xc0, 0x75, 0xa7, 0x44, 0x2e, 0x35, 0x40, 0xf8, 0xe6, 0x41, 0x19, 0xdf, 0x94, 0xc9,
    +	0xcd, 0xd6, 0xda, 0x70, 0xb0, 0x72, 0xbd, 0x0c, 0x27, 0x2e, 0x65, 0x1f, 0x7a, 0x08, 0x35, 0x5b,
    +	0xd6, 0xc0, 0xc6, 0x8c, 0xb0, 0xf5, 0x66, 0x19, 0x5b, 0x83, 0xba, 0xe9, 0xa7, 0x65, 0xf0, 0x85,
    +	0x43, 0x2c, 0xf5, 0xf7, 0x55, 0xb8, 0x9a, 0xb9, 0xc1, 0xd1, 0x0f, 0xcf, 0x79, 0x32, 0xb8, 0xf6,
    +	0xc2, 0x9e, 0x0b, 0x32, 0xb3, 0xfe, 0xf8, 0x08, 0xb3, 0xfe, 0x06, 0xcc, 0xb5, 0x7b, 0xae, 0x4b,
    +	0x2d, 0x96, 0x9a, 0xf4, 0xc3, 0x60, 0xd9, 0x4c, 0x92, 0x71, 0x9a, 0x3f, 0xef, 0xb9, 0xa2, 0x3a,
    +	0xe2, 0x73, 0x45, 0xdc, 0x0a, 0x39, 0x27, 0xfa, 0xa9, 0x9d, 0xb5, 0x42, 0x8e, 0x8b, 0x69, 0x7e,
    +	0xde, 0xb4, 0xfa, 0xa8, 0x21, 0xc2, 0x54, 0xb2, 0x69, 0x3d, 0x4a, 0x50, 0x71, 0x8a, 0x3b, 0x67,
    +	0x5e, 0x9f, 0x2e, 0x3b, 0xaf, 0x23, 0x92, 0x78, 0x4d, 0x00, 0x51, 0x47, 0x6f, 0x95, 0x89, 0xb3,
    +	0xf2, 0xcf, 0x09, 0xb9, 0x6f, 0x32, 0x33, 0xa3, 0xbf, 0xc9, 0xa8, 0x7f, 0x55, 0xe0, 0xe5, 0xc2,
    +	0x8a, 0x85, 0x36, 0x12, 0x2d, 0xe5, 0xad, 0x54, 0x4b, 0xf9, 0x9d, 0x42, 0xc1, 0x58, 0x5f, 0xe9,
    +	0xe6, 0xbf, 0x34, 0xbc, 0x5f, 0xee, 0xa5, 0x21, 0x67, 0x0a, 0xbe, 0xf8, 0xc9, 0xa1, 0xf5, 0xfd,
    +	0xa7, 0xcf, 0x96, 0xc7, 0xbe, 0x7c, 0xb6, 0x3c, 0xf6, 0xd5, 0xb3, 0xe5, 0xb1, 0x5f, 0x0c, 0x97,
    +	0x95, 0xa7, 0xc3, 0x65, 0xe5, 0xcb, 0xe1, 0xb2, 0xf2, 0xd5, 0x70, 0x59, 0xf9, 0xfb, 0x70, 0x59,
    +	0xf9, 0xf5, 0xd7, 0xcb, 0x63, 0x9f, 0x2c, 0x16, 0xfc, 0xb1, 0xfd, 0xdf, 0x00, 0x00, 0x00, 0xff,
    +	0xff, 0x40, 0xa4, 0x4b, 0xb9, 0xf2, 0x1e, 0x00, 0x00,
     }
     
     func (m *ControllerRevision) Marshal() (dAtA []byte, err error) {
    @@ -1289,6 +1290,11 @@ func (m *DeploymentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	_ = i
     	var l int
     	_ = l
    +	if m.TerminatingReplicas != nil {
    +		i = encodeVarintGenerated(dAtA, i, uint64(*m.TerminatingReplicas))
    +		i--
    +		dAtA[i] = 0x48
    +	}
     	if m.CollisionCount != nil {
     		i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount))
     		i--
    @@ -2225,6 +2231,9 @@ func (m *DeploymentStatus) Size() (n int) {
     	if m.CollisionCount != nil {
     		n += 1 + sovGenerated(uint64(*m.CollisionCount))
     	}
    +	if m.TerminatingReplicas != nil {
    +		n += 1 + sovGenerated(uint64(*m.TerminatingReplicas))
    +	}
     	return n
     }
     
    @@ -2627,6 +2636,7 @@ func (this *DeploymentStatus) String() string {
     		`Conditions:` + repeatedStringForConditions + `,`,
     		`ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`,
     		`CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`,
    +		`TerminatingReplicas:` + valueToStringGenerated(this.TerminatingReplicas) + `,`,
     		`}`,
     	}, "")
     	return s
    @@ -4337,6 +4347,26 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error {
     				}
     			}
     			m.CollisionCount = &v
    +		case 9:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field TerminatingReplicas", wireType)
    +			}
    +			var v int32
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				v |= int32(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			m.TerminatingReplicas = &v
     		default:
     			iNdEx = preIndex
     			skippy, err := skipGenerated(dAtA[iNdEx:])
    diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.proto b/vendor/k8s.io/api/apps/v1beta1/generated.proto
    index 46d7bfdf9..b61dc490d 100644
    --- a/vendor/k8s.io/api/apps/v1beta1/generated.proto
    +++ b/vendor/k8s.io/api/apps/v1beta1/generated.proto
    @@ -179,33 +179,40 @@ message DeploymentSpec {
     
     // DeploymentStatus is the most recently observed status of the Deployment.
     message DeploymentStatus {
    -  // observedGeneration is the generation observed by the deployment controller.
    +  // The generation observed by the deployment controller.
       // +optional
       optional int64 observedGeneration = 1;
     
    -  // replicas is the total number of non-terminated pods targeted by this deployment (their labels match the selector).
    +  // Total number of non-terminating pods targeted by this deployment (their labels match the selector).
       // +optional
       optional int32 replicas = 2;
     
    -  // updatedReplicas is the total number of non-terminated pods targeted by this deployment that have the desired template spec.
    +  // Total number of non-terminating pods targeted by this deployment that have the desired template spec.
       // +optional
       optional int32 updatedReplicas = 3;
     
    -  // readyReplicas is the number of pods targeted by this Deployment controller with a Ready Condition.
    +  // Total number of non-terminating pods targeted by this Deployment with a Ready Condition.
       // +optional
       optional int32 readyReplicas = 7;
     
    -  // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.
    +  // Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment.
       // +optional
       optional int32 availableReplicas = 4;
     
    -  // unavailableReplicas is the total number of unavailable pods targeted by this deployment. This is the total number of
    +  // Total number of unavailable pods targeted by this deployment. This is the total number of
       // pods that are still required for the deployment to have 100% available capacity. They may
       // either be pods that are running but not yet available or pods that still have not been created.
       // +optional
       optional int32 unavailableReplicas = 5;
     
    -  // Conditions represent the latest available observations of a deployment's current state.
    +  // Total number of terminating pods targeted by this deployment. Terminating pods have a non-null
    +  // .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.
    +  //
    +  // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
    +  // +optional
    +  optional int32 terminatingReplicas = 9;
    +
    +  // Represents the latest available observations of a deployment's current state.
       // +patchMergeKey=type
       // +patchStrategy=merge
       // +listType=map
    @@ -309,6 +316,9 @@ message Scale {
     message ScaleSpec {
       // replicas is the number of observed instances of the scaled object.
       // +optional
    +  // +k8s:optional
    +  // +default=0
    +  // +k8s:minimum=0
       optional int32 replicas = 1;
     }
     
    @@ -455,6 +465,7 @@ message StatefulSetSpec {
       // the network identity of the set. Pods get DNS/hostnames that follow the
       // pattern: pod-specific-string.serviceName.default.svc.cluster.local
       // where "pod-specific-string" is managed by the StatefulSet controller.
    +  // +optional
       optional string serviceName = 5;
     
       // podManagementPolicy controls how pods are created during initial scale up,
    diff --git a/vendor/k8s.io/api/apps/v1beta1/types.go b/vendor/k8s.io/api/apps/v1beta1/types.go
    index bc4851957..cd140be12 100644
    --- a/vendor/k8s.io/api/apps/v1beta1/types.go
    +++ b/vendor/k8s.io/api/apps/v1beta1/types.go
    @@ -33,6 +33,9 @@ const (
     type ScaleSpec struct {
     	// replicas is the number of observed instances of the scaled object.
     	// +optional
    +	// +k8s:optional
    +	// +default=0
    +	// +k8s:minimum=0
     	Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
     }
     
    @@ -60,6 +63,7 @@ type ScaleStatus struct {
     // +k8s:prerelease-lifecycle-gen:deprecated=1.8
     // +k8s:prerelease-lifecycle-gen:removed=1.16
     // +k8s:prerelease-lifecycle-gen:replacement=autoscaling,v1,Scale
    +// +k8s:isSubresource=/scale
     
     // Scale represents a scaling request for a resource.
     type Scale struct {
    @@ -259,6 +263,7 @@ type StatefulSetSpec struct {
     	// the network identity of the set. Pods get DNS/hostnames that follow the
     	// pattern: pod-specific-string.serviceName.default.svc.cluster.local
     	// where "pod-specific-string" is managed by the StatefulSet controller.
    +	// +optional
     	ServiceName string `json:"serviceName" protobuf:"bytes,5,opt,name=serviceName"`
     
     	// podManagementPolicy controls how pods are created during initial scale up,
    @@ -548,33 +553,40 @@ type RollingUpdateDeployment struct {
     
     // DeploymentStatus is the most recently observed status of the Deployment.
     type DeploymentStatus struct {
    -	// observedGeneration is the generation observed by the deployment controller.
    +	// The generation observed by the deployment controller.
     	// +optional
     	ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"`
     
    -	// replicas is the total number of non-terminated pods targeted by this deployment (their labels match the selector).
    +	// Total number of non-terminating pods targeted by this deployment (their labels match the selector).
     	// +optional
     	Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"`
     
    -	// updatedReplicas is the total number of non-terminated pods targeted by this deployment that have the desired template spec.
    +	// Total number of non-terminating pods targeted by this deployment that have the desired template spec.
     	// +optional
     	UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"`
     
    -	// readyReplicas is the number of pods targeted by this Deployment controller with a Ready Condition.
    +	// Total number of non-terminating pods targeted by this Deployment with a Ready Condition.
     	// +optional
     	ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"`
     
    -	// Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.
    +	// Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment.
     	// +optional
     	AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"`
     
    -	// unavailableReplicas is the total number of unavailable pods targeted by this deployment. This is the total number of
    +	// Total number of unavailable pods targeted by this deployment. This is the total number of
     	// pods that are still required for the deployment to have 100% available capacity. They may
     	// either be pods that are running but not yet available or pods that still have not been created.
     	// +optional
     	UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"`
     
    -	// Conditions represent the latest available observations of a deployment's current state.
    +	// Total number of terminating pods targeted by this deployment. Terminating pods have a non-null
    +	// .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.
    +	//
    +	// This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
    +	// +optional
    +	TerminatingReplicas *int32 `json:"terminatingReplicas,omitempty" protobuf:"varint,9,opt,name=terminatingReplicas"`
    +
    +	// Represents the latest available observations of a deployment's current state.
     	// +patchMergeKey=type
     	// +patchStrategy=merge
     	// +listType=map
    diff --git a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go
    index 1381d75dc..02ea5f7f2 100644
    --- a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go
    +++ b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go
    @@ -113,13 +113,14 @@ func (DeploymentSpec) SwaggerDoc() map[string]string {
     
     var map_DeploymentStatus = map[string]string{
     	"":                    "DeploymentStatus is the most recently observed status of the Deployment.",
    -	"observedGeneration":  "observedGeneration is the generation observed by the deployment controller.",
    -	"replicas":            "replicas is the total number of non-terminated pods targeted by this deployment (their labels match the selector).",
    -	"updatedReplicas":     "updatedReplicas is the total number of non-terminated pods targeted by this deployment that have the desired template spec.",
    -	"readyReplicas":       "readyReplicas is the number of pods targeted by this Deployment controller with a Ready Condition.",
    -	"availableReplicas":   "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.",
    -	"unavailableReplicas": "unavailableReplicas is the total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.",
    -	"conditions":          "Conditions represent the latest available observations of a deployment's current state.",
    +	"observedGeneration":  "The generation observed by the deployment controller.",
    +	"replicas":            "Total number of non-terminating pods targeted by this deployment (their labels match the selector).",
    +	"updatedReplicas":     "Total number of non-terminating pods targeted by this deployment that have the desired template spec.",
    +	"readyReplicas":       "Total number of non-terminating pods targeted by this Deployment with a Ready Condition.",
    +	"availableReplicas":   "Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment.",
    +	"unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.",
    +	"terminatingReplicas": "Total number of terminating pods targeted by this deployment. Terminating pods have a non-null .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.\n\nThis is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.",
    +	"conditions":          "Represents the latest available observations of a deployment's current state.",
     	"collisionCount":      "collisionCount is the count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.",
     }
     
    diff --git a/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go
    index dd73f1a5a..e8594766c 100644
    --- a/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go
    +++ b/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go
    @@ -246,6 +246,11 @@ func (in *DeploymentSpec) DeepCopy() *DeploymentSpec {
     // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
     func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) {
     	*out = *in
    +	if in.TerminatingReplicas != nil {
    +		in, out := &in.TerminatingReplicas, &out.TerminatingReplicas
    +		*out = new(int32)
    +		**out = **in
    +	}
     	if in.Conditions != nil {
     		in, out := &in.Conditions, &out.Conditions
     		*out = make([]DeploymentCondition, len(*in))
    diff --git a/vendor/k8s.io/api/apps/v1beta2/doc.go b/vendor/k8s.io/api/apps/v1beta2/doc.go
    index ac91fddfd..7d28fe42d 100644
    --- a/vendor/k8s.io/api/apps/v1beta2/doc.go
    +++ b/vendor/k8s.io/api/apps/v1beta2/doc.go
    @@ -19,4 +19,4 @@ limitations under the License.
     // +k8s:openapi-gen=true
     // +k8s:prerelease-lifecycle-gen=true
     
    -package v1beta2 // import "k8s.io/api/apps/v1beta2"
    +package v1beta2
    diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go
    index 1c3d3be5b..9fcba6feb 100644
    --- a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go
    +++ b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go
    @@ -1017,153 +1017,155 @@ func init() {
     }
     
     var fileDescriptor_c423c016abf485d4 = []byte{
    -	// 2328 bytes of a gzipped FileDescriptorProto
    +	// 2359 bytes of a gzipped FileDescriptorProto
     	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x1b, 0xc7,
    -	0x15, 0xf7, 0xf2, 0x43, 0x26, 0x87, 0x96, 0x64, 0x8f, 0x54, 0x89, 0xb1, 0x5b, 0xd2, 0x58, 0x1b,
    -	0xb6, 0x12, 0xdb, 0xa4, 0xad, 0x7c, 0x20, 0xb1, 0xdb, 0x04, 0xa2, 0x94, 0xda, 0x0e, 0xf4, 0xc1,
    -	0x0c, 0x2d, 0x07, 0x0d, 0xfa, 0xe1, 0x11, 0x39, 0xa6, 0x36, 0xde, 0x2f, 0xec, 0x0e, 0x15, 0x13,
    -	0xbd, 0xf4, 0x5a, 0xa0, 0x40, 0xdb, 0x6b, 0xff, 0x89, 0xa2, 0x97, 0xa2, 0x68, 0xd0, 0x4b, 0x11,
    -	0x04, 0x3e, 0x06, 0xbd, 0x24, 0x27, 0xa2, 0x66, 0x4e, 0x45, 0xd1, 0x5b, 0x7b, 0x31, 0x50, 0xa0,
    -	0x98, 0xd9, 0xd9, 0xef, 0x5d, 0x73, 0xa9, 0xd8, 0x4a, 0x13, 0xe4, 0xc6, 0x9d, 0xf7, 0xde, 0x6f,
    -	0xde, 0xcc, 0xbc, 0x37, 0xef, 0x37, 0x33, 0x04, 0x17, 0x1f, 0xbc, 0x6e, 0x37, 0x14, 0xa3, 0x89,
    -	0x4d, 0xa5, 0x89, 0x4d, 0xd3, 0x6e, 0x1e, 0x5c, 0xdb, 0x23, 0x14, 0xaf, 0x36, 0xfb, 0x44, 0x27,
    -	0x16, 0xa6, 0xa4, 0xd7, 0x30, 0x2d, 0x83, 0x1a, 0x70, 0xd9, 0x51, 0x6c, 0x60, 0x53, 0x69, 0x30,
    -	0xc5, 0x86, 0x50, 0x3c, 0x7d, 0xa5, 0xaf, 0xd0, 0xfd, 0xc1, 0x5e, 0xa3, 0x6b, 0x68, 0xcd, 0xbe,
    -	0xd1, 0x37, 0x9a, 0x5c, 0x7f, 0x6f, 0x70, 0x9f, 0x7f, 0xf1, 0x0f, 0xfe, 0xcb, 0xc1, 0x39, 0x2d,
    -	0x07, 0x3a, 0xec, 0x1a, 0x16, 0x69, 0x1e, 0x5c, 0x8b, 0xf6, 0x75, 0xfa, 0x15, 0x5f, 0x47, 0xc3,
    -	0xdd, 0x7d, 0x45, 0x27, 0xd6, 0xb0, 0x69, 0x3e, 0xe8, 0xb3, 0x06, 0xbb, 0xa9, 0x11, 0x8a, 0x93,
    -	0xac, 0x9a, 0x69, 0x56, 0xd6, 0x40, 0xa7, 0x8a, 0x46, 0x62, 0x06, 0xaf, 0x4d, 0x32, 0xb0, 0xbb,
    -	0xfb, 0x44, 0xc3, 0x31, 0xbb, 0x97, 0xd3, 0xec, 0x06, 0x54, 0x51, 0x9b, 0x8a, 0x4e, 0x6d, 0x6a,
    -	0x45, 0x8d, 0xe4, 0xff, 0x48, 0x00, 0xae, 0x1b, 0x3a, 0xb5, 0x0c, 0x55, 0x25, 0x16, 0x22, 0x07,
    -	0x8a, 0xad, 0x18, 0x3a, 0xbc, 0x07, 0x4a, 0x6c, 0x3c, 0x3d, 0x4c, 0x71, 0x55, 0x3a, 0x2b, 0xad,
    -	0x54, 0x56, 0xaf, 0x36, 0xfc, 0x99, 0xf6, 0xe0, 0x1b, 0xe6, 0x83, 0x3e, 0x6b, 0xb0, 0x1b, 0x4c,
    -	0xbb, 0x71, 0x70, 0xad, 0xb1, 0xb3, 0xf7, 0x01, 0xe9, 0xd2, 0x2d, 0x42, 0x71, 0x0b, 0x3e, 0x1a,
    -	0xd5, 0x8f, 0x8d, 0x47, 0x75, 0xe0, 0xb7, 0x21, 0x0f, 0x15, 0xee, 0x80, 0x02, 0x47, 0xcf, 0x71,
    -	0xf4, 0x2b, 0xa9, 0xe8, 0x62, 0xd0, 0x0d, 0x84, 0x3f, 0x7c, 0xfb, 0x21, 0x25, 0x3a, 0x73, 0xaf,
    -	0x75, 0x42, 0x40, 0x17, 0x36, 0x30, 0xc5, 0x88, 0x03, 0xc1, 0xcb, 0xa0, 0x64, 0x09, 0xf7, 0xab,
    -	0xf9, 0xb3, 0xd2, 0x4a, 0xbe, 0x75, 0x52, 0x68, 0x95, 0xdc, 0x61, 0x21, 0x4f, 0x43, 0x7e, 0x24,
    -	0x81, 0xa5, 0xf8, 0xb8, 0x37, 0x15, 0x9b, 0xc2, 0x1f, 0xc7, 0xc6, 0xde, 0xc8, 0x36, 0x76, 0x66,
    -	0xcd, 0x47, 0xee, 0x75, 0xec, 0xb6, 0x04, 0xc6, 0xdd, 0x06, 0x45, 0x85, 0x12, 0xcd, 0xae, 0xe6,
    -	0xce, 0xe6, 0x57, 0x2a, 0xab, 0x97, 0x1a, 0x29, 0x01, 0xdc, 0x88, 0x7b, 0xd7, 0x9a, 0x15, 0xb8,
    -	0xc5, 0xdb, 0x0c, 0x01, 0x39, 0x40, 0xf2, 0x2f, 0x73, 0xa0, 0xbc, 0x81, 0x89, 0x66, 0xe8, 0x1d,
    -	0x42, 0x8f, 0x60, 0xe5, 0x6e, 0x81, 0x82, 0x6d, 0x92, 0xae, 0x58, 0xb9, 0x0b, 0xa9, 0x03, 0xf0,
    -	0x7c, 0xea, 0x98, 0xa4, 0xeb, 0x2f, 0x19, 0xfb, 0x42, 0x1c, 0x01, 0xb6, 0xc1, 0x8c, 0x4d, 0x31,
    -	0x1d, 0xd8, 0x7c, 0xc1, 0x2a, 0xab, 0x2b, 0x19, 0xb0, 0xb8, 0x7e, 0x6b, 0x4e, 0xa0, 0xcd, 0x38,
    -	0xdf, 0x48, 0xe0, 0xc8, 0xff, 0xc8, 0x01, 0xe8, 0xe9, 0xae, 0x1b, 0x7a, 0x4f, 0xa1, 0x2c, 0x9c,
    -	0xaf, 0x83, 0x02, 0x1d, 0x9a, 0x84, 0x4f, 0x48, 0xb9, 0x75, 0xc1, 0x75, 0xe5, 0xce, 0xd0, 0x24,
    -	0x4f, 0x46, 0xf5, 0xa5, 0xb8, 0x05, 0x93, 0x20, 0x6e, 0x03, 0x37, 0x3d, 0x27, 0x73, 0xdc, 0xfa,
    -	0x95, 0x70, 0xd7, 0x4f, 0x46, 0xf5, 0x84, 0xbd, 0xa3, 0xe1, 0x21, 0x85, 0x1d, 0x84, 0x07, 0x00,
    -	0xaa, 0xd8, 0xa6, 0x77, 0x2c, 0xac, 0xdb, 0x4e, 0x4f, 0x8a, 0x46, 0xc4, 0xf0, 0x5f, 0xca, 0xb6,
    -	0x50, 0xcc, 0xa2, 0x75, 0x5a, 0x78, 0x01, 0x37, 0x63, 0x68, 0x28, 0xa1, 0x07, 0x78, 0x01, 0xcc,
    -	0x58, 0x04, 0xdb, 0x86, 0x5e, 0x2d, 0xf0, 0x51, 0x78, 0x13, 0x88, 0x78, 0x2b, 0x12, 0x52, 0xf8,
    -	0x22, 0x38, 0xae, 0x11, 0xdb, 0xc6, 0x7d, 0x52, 0x2d, 0x72, 0xc5, 0x79, 0xa1, 0x78, 0x7c, 0xcb,
    -	0x69, 0x46, 0xae, 0x5c, 0xfe, 0xa3, 0x04, 0x66, 0xbd, 0x99, 0x3b, 0x82, 0xcc, 0xb9, 0x19, 0xce,
    -	0x1c, 0x79, 0x72, 0xb0, 0xa4, 0x24, 0xcc, 0xc7, 0xf9, 0x80, 0xe3, 0x2c, 0x1c, 0xe1, 0x4f, 0x40,
    -	0xc9, 0x26, 0x2a, 0xe9, 0x52, 0xc3, 0x12, 0x8e, 0xbf, 0x9c, 0xd1, 0x71, 0xbc, 0x47, 0xd4, 0x8e,
    -	0x30, 0x6d, 0x9d, 0x60, 0x9e, 0xbb, 0x5f, 0xc8, 0x83, 0x84, 0xef, 0x82, 0x12, 0x25, 0x9a, 0xa9,
    -	0x62, 0x4a, 0x44, 0xd6, 0x9c, 0x0b, 0x3a, 0xcf, 0x62, 0x86, 0x81, 0xb5, 0x8d, 0xde, 0x1d, 0xa1,
    -	0xc6, 0x53, 0xc6, 0x9b, 0x0c, 0xb7, 0x15, 0x79, 0x30, 0xd0, 0x04, 0x73, 0x03, 0xb3, 0xc7, 0x34,
    -	0x29, 0xdb, 0xce, 0xfb, 0x43, 0x11, 0x43, 0x57, 0x27, 0xcf, 0xca, 0x6e, 0xc8, 0xae, 0xb5, 0x24,
    -	0x7a, 0x99, 0x0b, 0xb7, 0xa3, 0x08, 0x3e, 0x5c, 0x03, 0xf3, 0x9a, 0xa2, 0x23, 0x82, 0x7b, 0xc3,
    -	0x0e, 0xe9, 0x1a, 0x7a, 0xcf, 0xe6, 0xa1, 0x54, 0x6c, 0x2d, 0x0b, 0x80, 0xf9, 0xad, 0xb0, 0x18,
    -	0x45, 0xf5, 0xe1, 0x26, 0x58, 0x74, 0x37, 0xe0, 0x5b, 0x8a, 0x4d, 0x0d, 0x6b, 0xb8, 0xa9, 0x68,
    -	0x0a, 0xad, 0xce, 0x70, 0x9c, 0xea, 0x78, 0x54, 0x5f, 0x44, 0x09, 0x72, 0x94, 0x68, 0x25, 0xff,
    -	0x76, 0x06, 0xcc, 0x47, 0xf6, 0x05, 0x78, 0x17, 0x2c, 0x75, 0x07, 0x96, 0x45, 0x74, 0xba, 0x3d,
    -	0xd0, 0xf6, 0x88, 0xd5, 0xe9, 0xee, 0x93, 0xde, 0x40, 0x25, 0x3d, 0xbe, 0xac, 0xc5, 0x56, 0x4d,
    -	0xf8, 0xba, 0xb4, 0x9e, 0xa8, 0x85, 0x52, 0xac, 0xe1, 0x3b, 0x00, 0xea, 0xbc, 0x69, 0x4b, 0xb1,
    -	0x6d, 0x0f, 0x33, 0xc7, 0x31, 0xbd, 0x54, 0xdc, 0x8e, 0x69, 0xa0, 0x04, 0x2b, 0xe6, 0x63, 0x8f,
    -	0xd8, 0x8a, 0x45, 0x7a, 0x51, 0x1f, 0xf3, 0x61, 0x1f, 0x37, 0x12, 0xb5, 0x50, 0x8a, 0x35, 0x7c,
    -	0x15, 0x54, 0x9c, 0xde, 0xf8, 0x9c, 0x8b, 0xc5, 0x59, 0x10, 0x60, 0x95, 0x6d, 0x5f, 0x84, 0x82,
    -	0x7a, 0x6c, 0x68, 0xc6, 0x9e, 0x4d, 0xac, 0x03, 0xd2, 0xbb, 0xe9, 0x90, 0x03, 0x56, 0x41, 0x8b,
    -	0xbc, 0x82, 0x7a, 0x43, 0xdb, 0x89, 0x69, 0xa0, 0x04, 0x2b, 0x36, 0x34, 0x27, 0x6a, 0x62, 0x43,
    -	0x9b, 0x09, 0x0f, 0x6d, 0x37, 0x51, 0x0b, 0xa5, 0x58, 0xb3, 0xd8, 0x73, 0x5c, 0x5e, 0x3b, 0xc0,
    -	0x8a, 0x8a, 0xf7, 0x54, 0x52, 0x3d, 0x1e, 0x8e, 0xbd, 0xed, 0xb0, 0x18, 0x45, 0xf5, 0xe1, 0x4d,
    -	0x70, 0xca, 0x69, 0xda, 0xd5, 0xb1, 0x07, 0x52, 0xe2, 0x20, 0x2f, 0x08, 0x90, 0x53, 0xdb, 0x51,
    -	0x05, 0x14, 0xb7, 0x81, 0xd7, 0xc1, 0x5c, 0xd7, 0x50, 0x55, 0x1e, 0x8f, 0xeb, 0xc6, 0x40, 0xa7,
    -	0xd5, 0x32, 0x47, 0x81, 0x2c, 0x87, 0xd6, 0x43, 0x12, 0x14, 0xd1, 0x84, 0x3f, 0x03, 0xa0, 0xeb,
    -	0x16, 0x06, 0xbb, 0x0a, 0x26, 0x30, 0x80, 0x78, 0x59, 0xf2, 0x2b, 0xb3, 0xd7, 0x64, 0xa3, 0x00,
    -	0xa4, 0xfc, 0xb1, 0x04, 0x96, 0x53, 0x12, 0x1d, 0xbe, 0x15, 0x2a, 0x82, 0x97, 0x22, 0x45, 0xf0,
    -	0x4c, 0x8a, 0x59, 0xa0, 0x12, 0xee, 0x83, 0x59, 0x46, 0x48, 0x14, 0xbd, 0xef, 0xa8, 0x88, 0xbd,
    -	0xac, 0x99, 0x3a, 0x00, 0x14, 0xd4, 0xf6, 0x77, 0xe5, 0x53, 0xe3, 0x51, 0x7d, 0x36, 0x24, 0x43,
    -	0x61, 0x60, 0xf9, 0x57, 0x39, 0x00, 0x36, 0x88, 0xa9, 0x1a, 0x43, 0x8d, 0xe8, 0x47, 0xc1, 0x69,
    -	0x6e, 0x87, 0x38, 0xcd, 0xc5, 0xf4, 0x25, 0xf1, 0x9c, 0x4a, 0x25, 0x35, 0xef, 0x46, 0x48, 0xcd,
    -	0x8b, 0x59, 0xc0, 0x9e, 0xce, 0x6a, 0x3e, 0xcb, 0x83, 0x05, 0x5f, 0xd9, 0xa7, 0x35, 0x37, 0x42,
    -	0x2b, 0x7a, 0x31, 0xb2, 0xa2, 0xcb, 0x09, 0x26, 0xcf, 0x8d, 0xd7, 0x7c, 0x00, 0xe6, 0x18, 0xeb,
    -	0x70, 0xd6, 0x8f, 0x73, 0x9a, 0x99, 0xa9, 0x39, 0x8d, 0x57, 0x89, 0x36, 0x43, 0x48, 0x28, 0x82,
    -	0x9c, 0xc2, 0xa1, 0x8e, 0x7f, 0x1d, 0x39, 0xd4, 0x9f, 0x24, 0x30, 0xe7, 0x2f, 0xd3, 0x11, 0x90,
    -	0xa8, 0x5b, 0x61, 0x12, 0x75, 0x2e, 0x43, 0x70, 0xa6, 0xb0, 0xa8, 0xcf, 0x0a, 0x41, 0xd7, 0x39,
    -	0x8d, 0x5a, 0x61, 0x47, 0x30, 0x53, 0x55, 0xba, 0xd8, 0x16, 0xf5, 0xf6, 0x84, 0x73, 0xfc, 0x72,
    -	0xda, 0x90, 0x27, 0x0d, 0x11, 0xae, 0xdc, 0xf3, 0x25, 0x5c, 0xf9, 0x67, 0x43, 0xb8, 0x7e, 0x04,
    -	0x4a, 0xb6, 0x4b, 0xb5, 0x0a, 0x1c, 0xf2, 0x52, 0xa6, 0xc4, 0x16, 0x2c, 0xcb, 0x83, 0xf6, 0xf8,
    -	0x95, 0x07, 0x97, 0xc4, 0xac, 0x8a, 0x5f, 0x25, 0xb3, 0x62, 0x81, 0x6e, 0xe2, 0x81, 0x4d, 0x7a,
    -	0x3c, 0xa9, 0x4a, 0x7e, 0xa0, 0xb7, 0x79, 0x2b, 0x12, 0x52, 0xb8, 0x0b, 0x96, 0x4d, 0xcb, 0xe8,
    -	0x5b, 0xc4, 0xb6, 0x37, 0x08, 0xee, 0xa9, 0x8a, 0x4e, 0xdc, 0x01, 0x38, 0x35, 0xf1, 0xcc, 0x78,
    -	0x54, 0x5f, 0x6e, 0x27, 0xab, 0xa0, 0x34, 0x5b, 0xf9, 0xaf, 0x05, 0x70, 0x32, 0xba, 0x37, 0xa6,
    -	0xd0, 0x14, 0xe9, 0x50, 0x34, 0xe5, 0x72, 0x20, 0x4e, 0x1d, 0x0e, 0x17, 0xb8, 0x2a, 0x88, 0xc5,
    -	0xea, 0x1a, 0x98, 0x17, 0xb4, 0xc4, 0x15, 0x0a, 0xa2, 0xe6, 0x2d, 0xcf, 0x6e, 0x58, 0x8c, 0xa2,
    -	0xfa, 0xf0, 0x06, 0x98, 0xb5, 0x38, 0xf3, 0x72, 0x01, 0x1c, 0xf6, 0xf2, 0x1d, 0x01, 0x30, 0x8b,
    -	0x82, 0x42, 0x14, 0xd6, 0x65, 0xcc, 0xc5, 0x27, 0x24, 0x2e, 0x40, 0x21, 0xcc, 0x5c, 0xd6, 0xa2,
    -	0x0a, 0x28, 0x6e, 0x03, 0xb7, 0xc0, 0xc2, 0x40, 0x8f, 0x43, 0x39, 0xb1, 0x76, 0x46, 0x40, 0x2d,
    -	0xec, 0xc6, 0x55, 0x50, 0x92, 0x1d, 0xbc, 0x17, 0x22, 0x33, 0x33, 0x7c, 0x3f, 0xb9, 0x9c, 0x21,
    -	0x27, 0x32, 0xb3, 0x99, 0x04, 0xaa, 0x55, 0xca, 0x4a, 0xb5, 0xe4, 0x8f, 0x24, 0x00, 0xe3, 0x79,
    -	0x38, 0xf1, 0x26, 0x20, 0x66, 0x11, 0xa8, 0x98, 0x4a, 0x32, 0xff, 0xb9, 0x9a, 0x91, 0xff, 0xf8,
    -	0x1b, 0x6a, 0x36, 0x02, 0x24, 0x26, 0xfa, 0x68, 0x2e, 0x75, 0xb2, 0x12, 0x20, 0xdf, 0xa9, 0x67,
    -	0x40, 0x80, 0x02, 0x60, 0x4f, 0x27, 0x40, 0xff, 0xcc, 0x81, 0x05, 0x5f, 0x39, 0x33, 0x01, 0x4a,
    -	0x30, 0xf9, 0xf6, 0x62, 0x27, 0x1b, 0x29, 0xf1, 0xa7, 0xee, 0xff, 0x89, 0x94, 0xf8, 0x5e, 0xa5,
    -	0x90, 0x92, 0xdf, 0xe7, 0x82, 0xae, 0x4f, 0x49, 0x4a, 0x9e, 0xc1, 0x0d, 0xc7, 0xd7, 0x8e, 0xd7,
    -	0xc8, 0x9f, 0xe4, 0xc1, 0xc9, 0x68, 0x1e, 0x86, 0x0a, 0xa4, 0x34, 0xb1, 0x40, 0xb6, 0xc1, 0xe2,
    -	0xfd, 0x81, 0xaa, 0x0e, 0xf9, 0x18, 0x02, 0x55, 0xd2, 0x29, 0xad, 0xdf, 0x15, 0x96, 0x8b, 0x3f,
    -	0x4c, 0xd0, 0x41, 0x89, 0x96, 0xf1, 0x7a, 0x59, 0xf8, 0xb2, 0xf5, 0xb2, 0x78, 0x88, 0x7a, 0x99,
    -	0x4c, 0x39, 0xf2, 0x87, 0xa2, 0x1c, 0xd3, 0x15, 0xcb, 0x84, 0x8d, 0x6b, 0xe2, 0xd1, 0x7f, 0x2c,
    -	0x81, 0xa5, 0xe4, 0x03, 0x37, 0x54, 0xc1, 0x9c, 0x86, 0x1f, 0x06, 0x2f, 0x3e, 0x26, 0x15, 0x91,
    -	0x01, 0x55, 0xd4, 0x86, 0xf3, 0x64, 0xd4, 0xb8, 0xad, 0xd3, 0x1d, 0xab, 0x43, 0x2d, 0x45, 0xef,
    -	0x3b, 0x95, 0x77, 0x2b, 0x84, 0x85, 0x22, 0xd8, 0xf0, 0x7d, 0x50, 0xd2, 0xf0, 0xc3, 0xce, 0xc0,
    -	0xea, 0x27, 0x55, 0xc8, 0x6c, 0xfd, 0xf0, 0x04, 0xd8, 0x12, 0x28, 0xc8, 0xc3, 0x93, 0xbf, 0x90,
    -	0xc0, 0x72, 0x4a, 0x55, 0xfd, 0x06, 0x8d, 0xf2, 0x2f, 0x12, 0x38, 0x1b, 0x1a, 0x25, 0x4b, 0x4b,
    -	0x72, 0x7f, 0xa0, 0xf2, 0x0c, 0x15, 0x4c, 0xe6, 0x12, 0x28, 0x9b, 0xd8, 0xa2, 0x8a, 0xc7, 0x83,
    -	0x8b, 0xad, 0xd9, 0xf1, 0xa8, 0x5e, 0x6e, 0xbb, 0x8d, 0xc8, 0x97, 0x27, 0xcc, 0x4d, 0xee, 0xf9,
    -	0xcd, 0x8d, 0xfc, 0x5f, 0x09, 0x14, 0x3b, 0x5d, 0xac, 0x92, 0x23, 0x20, 0x2e, 0x1b, 0x21, 0xe2,
    -	0x92, 0xfe, 0x28, 0xc0, 0xfd, 0x49, 0xe5, 0x2c, 0x9b, 0x11, 0xce, 0x72, 0x7e, 0x02, 0xce, 0xd3,
    -	0xe9, 0xca, 0x1b, 0xa0, 0xec, 0x75, 0x37, 0xdd, 0x5e, 0x2a, 0xff, 0x2e, 0x07, 0x2a, 0x81, 0x2e,
    -	0xa6, 0xdc, 0x89, 0xef, 0x85, 0xca, 0x0f, 0xdb, 0x63, 0x56, 0xb3, 0x0c, 0xa4, 0xe1, 0x96, 0x9a,
    -	0xb7, 0x75, 0x6a, 0x05, 0xcf, 0xaa, 0xf1, 0x0a, 0xf4, 0x26, 0x98, 0xa3, 0xd8, 0xea, 0x13, 0xea,
    -	0xca, 0xf8, 0x84, 0x95, 0xfd, 0xbb, 0x9b, 0x3b, 0x21, 0x29, 0x8a, 0x68, 0x9f, 0xbe, 0x01, 0x66,
    -	0x43, 0x9d, 0xc1, 0x93, 0x20, 0xff, 0x80, 0x0c, 0x1d, 0x06, 0x87, 0xd8, 0x4f, 0xb8, 0x08, 0x8a,
    -	0x07, 0x58, 0x1d, 0x38, 0x21, 0x5a, 0x46, 0xce, 0xc7, 0xf5, 0xdc, 0xeb, 0x92, 0xfc, 0x6b, 0x36,
    -	0x39, 0x7e, 0x2a, 0x1c, 0x41, 0x74, 0xbd, 0x13, 0x8a, 0xae, 0xf4, 0xf7, 0xc9, 0x60, 0x82, 0xa6,
    -	0xc5, 0x18, 0x8a, 0xc4, 0xd8, 0x4b, 0x99, 0xd0, 0x9e, 0x1e, 0x69, 0xff, 0xca, 0x81, 0xc5, 0x80,
    -	0xb6, 0xcf, 0x8c, 0xbf, 0x1f, 0x62, 0xc6, 0x2b, 0x11, 0x66, 0x5c, 0x4d, 0xb2, 0xf9, 0x96, 0x1a,
    -	0x4f, 0xa6, 0xc6, 0x7f, 0x96, 0xc0, 0x7c, 0x60, 0xee, 0x8e, 0x80, 0x1b, 0xdf, 0x0e, 0x73, 0xe3,
    -	0xf3, 0x59, 0x82, 0x26, 0x85, 0x1c, 0x5f, 0x07, 0x0b, 0x01, 0xa5, 0x1d, 0xab, 0xa7, 0xe8, 0x58,
    -	0xb5, 0xe1, 0x39, 0x50, 0xb4, 0x29, 0xb6, 0xa8, 0x5b, 0x44, 0x5c, 0xdb, 0x0e, 0x6b, 0x44, 0x8e,
    -	0x4c, 0xfe, 0xb7, 0x04, 0x9a, 0x01, 0xe3, 0x36, 0xb1, 0x6c, 0xc5, 0xa6, 0x44, 0xa7, 0x77, 0x0d,
    -	0x75, 0xa0, 0x91, 0x75, 0x15, 0x2b, 0x1a, 0x22, 0xac, 0x41, 0x31, 0xf4, 0xb6, 0xa1, 0x2a, 0xdd,
    -	0x21, 0xc4, 0xa0, 0xf2, 0xe1, 0x3e, 0xd1, 0x37, 0x88, 0x4a, 0xa8, 0x78, 0x81, 0x2b, 0xb7, 0xde,
    -	0x72, 0x1f, 0xa4, 0xde, 0xf3, 0x45, 0x4f, 0x46, 0xf5, 0x95, 0x2c, 0x88, 0x3c, 0x42, 0x83, 0x98,
    -	0xf0, 0xa7, 0x00, 0xb0, 0x4f, 0xbe, 0x97, 0xf5, 0x44, 0xb0, 0xbe, 0xe9, 0x66, 0xf4, 0x7b, 0x9e,
    -	0x64, 0xaa, 0x0e, 0x02, 0x88, 0xf2, 0x1f, 0x4a, 0xa1, 0xf5, 0xfe, 0xc6, 0xdf, 0x72, 0xfe, 0x1c,
    -	0x2c, 0x1e, 0xf8, 0xb3, 0xe3, 0x2a, 0x30, 0xfe, 0x9d, 0x8f, 0x9e, 0xe4, 0x3d, 0xf8, 0xa4, 0x79,
    -	0xf5, 0x59, 0xff, 0xdd, 0x04, 0x38, 0x94, 0xd8, 0x09, 0x7c, 0x15, 0x54, 0x18, 0x6f, 0x56, 0xba,
    -	0x64, 0x1b, 0x6b, 0x6e, 0x2e, 0x7a, 0x0f, 0x98, 0x1d, 0x5f, 0x84, 0x82, 0x7a, 0x70, 0x1f, 0x2c,
    -	0x98, 0x46, 0x6f, 0x0b, 0xeb, 0xb8, 0x4f, 0x18, 0x11, 0x74, 0x96, 0x92, 0x5f, 0x7d, 0x96, 0x5b,
    -	0xaf, 0xb9, 0xd7, 0x5a, 0xed, 0xb8, 0xca, 0x93, 0x51, 0x7d, 0x39, 0xa1, 0x99, 0x07, 0x41, 0x12,
    -	0x24, 0xb4, 0x62, 0x8f, 0xee, 0xce, 0xa3, 0xc3, 0x6a, 0x96, 0xa4, 0x3c, 0xe4, 0xb3, 0x7b, 0xda,
    -	0xcd, 0x6e, 0xe9, 0x50, 0x37, 0xbb, 0x09, 0x47, 0xdc, 0xf2, 0x94, 0x47, 0xdc, 0x4f, 0x24, 0x70,
    -	0xde, 0xcc, 0x90, 0x4b, 0x55, 0xc0, 0xe7, 0xe6, 0x56, 0x96, 0xb9, 0xc9, 0x92, 0x9b, 0xad, 0x95,
    -	0xf1, 0xa8, 0x7e, 0x3e, 0x8b, 0x26, 0xca, 0xe4, 0x1f, 0xbc, 0x0b, 0x4a, 0x86, 0xd8, 0x03, 0xab,
    -	0x15, 0xee, 0xeb, 0xe5, 0x2c, 0xbe, 0xba, 0xfb, 0xa6, 0x93, 0x96, 0xee, 0x17, 0xf2, 0xb0, 0xe4,
    -	0x8f, 0x8a, 0xe0, 0x54, 0xac, 0x82, 0x7f, 0x85, 0xf7, 0xd7, 0xb1, 0xc3, 0x74, 0x7e, 0x8a, 0xc3,
    -	0xf4, 0x1a, 0x98, 0x17, 0x7f, 0x89, 0x88, 0x9c, 0xc5, 0xbd, 0x80, 0x59, 0x0f, 0x8b, 0x51, 0x54,
    -	0x3f, 0xe9, 0xfe, 0xbc, 0x38, 0xe5, 0xfd, 0x79, 0xd0, 0x0b, 0xf1, 0x17, 0x3f, 0x27, 0xbd, 0xe3,
    -	0x5e, 0x88, 0x7f, 0xfa, 0x45, 0xf5, 0x19, 0x71, 0x75, 0x50, 0x3d, 0x84, 0xe3, 0x61, 0xe2, 0xba,
    -	0x1b, 0x92, 0xa2, 0x88, 0xf6, 0x97, 0x7a, 0xf6, 0xc7, 0x09, 0xcf, 0xfe, 0x57, 0xb2, 0xc4, 0x5a,
    -	0xf6, 0xab, 0xf2, 0xc4, 0x4b, 0x8f, 0xca, 0xf4, 0x97, 0x1e, 0xf2, 0xdf, 0x24, 0xf0, 0x42, 0xea,
    -	0xae, 0x05, 0xd7, 0x42, 0xb4, 0xf2, 0x4a, 0x84, 0x56, 0x7e, 0x2f, 0xd5, 0x30, 0xc0, 0x2d, 0xad,
    -	0xe4, 0x5b, 0xf4, 0x37, 0xb2, 0xdd, 0xa2, 0x27, 0x9c, 0x84, 0x27, 0x5f, 0xa7, 0xb7, 0x7e, 0xf0,
    -	0xe8, 0x71, 0xed, 0xd8, 0xa7, 0x8f, 0x6b, 0xc7, 0x3e, 0x7f, 0x5c, 0x3b, 0xf6, 0x8b, 0x71, 0x4d,
    -	0x7a, 0x34, 0xae, 0x49, 0x9f, 0x8e, 0x6b, 0xd2, 0xe7, 0xe3, 0x9a, 0xf4, 0xf7, 0x71, 0x4d, 0xfa,
    -	0xcd, 0x17, 0xb5, 0x63, 0xef, 0x2f, 0xa7, 0xfc, 0xe9, 0xf8, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff,
    -	0xa4, 0x79, 0xcd, 0x52, 0x8e, 0x2c, 0x00, 0x00,
    +	0x15, 0xf7, 0x92, 0xa2, 0x44, 0x0e, 0x2d, 0xc9, 0x1e, 0xa9, 0x22, 0x63, 0xb7, 0xa4, 0xb1, 0x36,
    +	0x6c, 0x25, 0xb6, 0x49, 0x5b, 0xf9, 0x40, 0x62, 0xb7, 0x09, 0x44, 0x29, 0xb5, 0x1d, 0x48, 0x32,
    +	0x33, 0xb4, 0x1c, 0x34, 0xe8, 0x87, 0x47, 0xe4, 0x98, 0xda, 0x78, 0xbf, 0xb0, 0x3b, 0x54, 0x4c,
    +	0xf4, 0xd2, 0x6b, 0x81, 0x16, 0x6d, 0xae, 0xfd, 0x27, 0x8a, 0x5e, 0x8a, 0xa2, 0x41, 0x6f, 0x41,
    +	0xe1, 0x63, 0xd0, 0x4b, 0x72, 0x22, 0x6a, 0xe6, 0x54, 0x14, 0xbd, 0xb5, 0x17, 0x03, 0x05, 0x8a,
    +	0x99, 0x9d, 0xfd, 0xde, 0x35, 0x97, 0x8a, 0xad, 0x34, 0x41, 0x6e, 0xdc, 0x79, 0xef, 0xfd, 0xe6,
    +	0xcd, 0xcc, 0x7b, 0xf3, 0x7e, 0xfb, 0xb8, 0xe0, 0xc2, 0x83, 0xd7, 0xed, 0x86, 0x62, 0x34, 0xb1,
    +	0xa9, 0x34, 0xb1, 0x69, 0xda, 0xcd, 0x83, 0xab, 0x7b, 0x84, 0xe2, 0xb5, 0x66, 0x9f, 0xe8, 0xc4,
    +	0xc2, 0x94, 0xf4, 0x1a, 0xa6, 0x65, 0x50, 0x03, 0x56, 0x1c, 0xc5, 0x06, 0x36, 0x95, 0x06, 0x53,
    +	0x6c, 0x08, 0xc5, 0x53, 0x97, 0xfb, 0x0a, 0xdd, 0x1f, 0xec, 0x35, 0xba, 0x86, 0xd6, 0xec, 0x1b,
    +	0x7d, 0xa3, 0xc9, 0xf5, 0xf7, 0x06, 0xf7, 0xf9, 0x13, 0x7f, 0xe0, 0xbf, 0x1c, 0x9c, 0x53, 0x72,
    +	0x60, 0xc2, 0xae, 0x61, 0x91, 0xe6, 0xc1, 0xd5, 0xe8, 0x5c, 0xa7, 0x5e, 0xf1, 0x75, 0x34, 0xdc,
    +	0xdd, 0x57, 0x74, 0x62, 0x0d, 0x9b, 0xe6, 0x83, 0x3e, 0x1b, 0xb0, 0x9b, 0x1a, 0xa1, 0x38, 0xc9,
    +	0xaa, 0x99, 0x66, 0x65, 0x0d, 0x74, 0xaa, 0x68, 0x24, 0x66, 0xf0, 0xda, 0x24, 0x03, 0xbb, 0xbb,
    +	0x4f, 0x34, 0x1c, 0xb3, 0x7b, 0x39, 0xcd, 0x6e, 0x40, 0x15, 0xb5, 0xa9, 0xe8, 0xd4, 0xa6, 0x56,
    +	0xd4, 0x48, 0xfe, 0x8f, 0x04, 0xe0, 0x86, 0xa1, 0x53, 0xcb, 0x50, 0x55, 0x62, 0x21, 0x72, 0xa0,
    +	0xd8, 0x8a, 0xa1, 0xc3, 0x7b, 0xa0, 0xc8, 0xd6, 0xd3, 0xc3, 0x14, 0x57, 0xa5, 0x33, 0xd2, 0x6a,
    +	0x79, 0xed, 0x4a, 0xc3, 0xdf, 0x69, 0x0f, 0xbe, 0x61, 0x3e, 0xe8, 0xb3, 0x01, 0xbb, 0xc1, 0xb4,
    +	0x1b, 0x07, 0x57, 0x1b, 0xb7, 0xf7, 0x3e, 0x20, 0x5d, 0xba, 0x4d, 0x28, 0x6e, 0xc1, 0x47, 0xa3,
    +	0xfa, 0xb1, 0xf1, 0xa8, 0x0e, 0xfc, 0x31, 0xe4, 0xa1, 0xc2, 0xdb, 0x60, 0x86, 0xa3, 0xe7, 0x38,
    +	0xfa, 0xe5, 0x54, 0x74, 0xb1, 0xe8, 0x06, 0xc2, 0x1f, 0xbe, 0xfd, 0x90, 0x12, 0x9d, 0xb9, 0xd7,
    +	0x3a, 0x2e, 0xa0, 0x67, 0x36, 0x31, 0xc5, 0x88, 0x03, 0xc1, 0x4b, 0xa0, 0x68, 0x09, 0xf7, 0xab,
    +	0xf9, 0x33, 0xd2, 0x6a, 0xbe, 0x75, 0x42, 0x68, 0x15, 0xdd, 0x65, 0x21, 0x4f, 0x43, 0x7e, 0x24,
    +	0x81, 0x95, 0xf8, 0xba, 0xb7, 0x14, 0x9b, 0xc2, 0x1f, 0xc7, 0xd6, 0xde, 0xc8, 0xb6, 0x76, 0x66,
    +	0xcd, 0x57, 0xee, 0x4d, 0xec, 0x8e, 0x04, 0xd6, 0xdd, 0x06, 0x05, 0x85, 0x12, 0xcd, 0xae, 0xe6,
    +	0xce, 0xe4, 0x57, 0xcb, 0x6b, 0x17, 0x1b, 0x29, 0x01, 0xdc, 0x88, 0x7b, 0xd7, 0x9a, 0x17, 0xb8,
    +	0x85, 0x5b, 0x0c, 0x01, 0x39, 0x40, 0xf2, 0x2f, 0x73, 0xa0, 0xb4, 0x89, 0x89, 0x66, 0xe8, 0x1d,
    +	0x42, 0x8f, 0xe0, 0xe4, 0x6e, 0x82, 0x19, 0xdb, 0x24, 0x5d, 0x71, 0x72, 0xe7, 0x53, 0x17, 0xe0,
    +	0xf9, 0xd4, 0x31, 0x49, 0xd7, 0x3f, 0x32, 0xf6, 0x84, 0x38, 0x02, 0x6c, 0x83, 0x59, 0x9b, 0x62,
    +	0x3a, 0xb0, 0xf9, 0x81, 0x95, 0xd7, 0x56, 0x33, 0x60, 0x71, 0xfd, 0xd6, 0x82, 0x40, 0x9b, 0x75,
    +	0x9e, 0x91, 0xc0, 0x91, 0xff, 0x91, 0x03, 0xd0, 0xd3, 0xdd, 0x30, 0xf4, 0x9e, 0x42, 0x59, 0x38,
    +	0x5f, 0x03, 0x33, 0x74, 0x68, 0x12, 0xbe, 0x21, 0xa5, 0xd6, 0x79, 0xd7, 0x95, 0x3b, 0x43, 0x93,
    +	0x3c, 0x19, 0xd5, 0x57, 0xe2, 0x16, 0x4c, 0x82, 0xb8, 0x0d, 0xdc, 0xf2, 0x9c, 0xcc, 0x71, 0xeb,
    +	0x57, 0xc2, 0x53, 0x3f, 0x19, 0xd5, 0x13, 0xee, 0x8e, 0x86, 0x87, 0x14, 0x76, 0x10, 0x1e, 0x00,
    +	0xa8, 0x62, 0x9b, 0xde, 0xb1, 0xb0, 0x6e, 0x3b, 0x33, 0x29, 0x1a, 0x11, 0xcb, 0x7f, 0x29, 0xdb,
    +	0x41, 0x31, 0x8b, 0xd6, 0x29, 0xe1, 0x05, 0xdc, 0x8a, 0xa1, 0xa1, 0x84, 0x19, 0xe0, 0x79, 0x30,
    +	0x6b, 0x11, 0x6c, 0x1b, 0x7a, 0x75, 0x86, 0xaf, 0xc2, 0xdb, 0x40, 0xc4, 0x47, 0x91, 0x90, 0xc2,
    +	0x17, 0xc1, 0x9c, 0x46, 0x6c, 0x1b, 0xf7, 0x49, 0xb5, 0xc0, 0x15, 0x17, 0x85, 0xe2, 0xdc, 0xb6,
    +	0x33, 0x8c, 0x5c, 0xb9, 0xfc, 0x47, 0x09, 0xcc, 0x7b, 0x3b, 0x77, 0x04, 0x99, 0x73, 0x23, 0x9c,
    +	0x39, 0xf2, 0xe4, 0x60, 0x49, 0x49, 0x98, 0x4f, 0xf2, 0x01, 0xc7, 0x59, 0x38, 0xc2, 0x9f, 0x80,
    +	0xa2, 0x4d, 0x54, 0xd2, 0xa5, 0x86, 0x25, 0x1c, 0x7f, 0x39, 0xa3, 0xe3, 0x78, 0x8f, 0xa8, 0x1d,
    +	0x61, 0xda, 0x3a, 0xce, 0x3c, 0x77, 0x9f, 0x90, 0x07, 0x09, 0xdf, 0x05, 0x45, 0x4a, 0x34, 0x53,
    +	0xc5, 0x94, 0x88, 0xac, 0x39, 0x1b, 0x74, 0x9e, 0xc5, 0x0c, 0x03, 0x6b, 0x1b, 0xbd, 0x3b, 0x42,
    +	0x8d, 0xa7, 0x8c, 0xb7, 0x19, 0xee, 0x28, 0xf2, 0x60, 0xa0, 0x09, 0x16, 0x06, 0x66, 0x8f, 0x69,
    +	0x52, 0x76, 0x9d, 0xf7, 0x87, 0x22, 0x86, 0xae, 0x4c, 0xde, 0x95, 0xdd, 0x90, 0x5d, 0x6b, 0x45,
    +	0xcc, 0xb2, 0x10, 0x1e, 0x47, 0x11, 0x7c, 0xb8, 0x0e, 0x16, 0x35, 0x45, 0x47, 0x04, 0xf7, 0x86,
    +	0x1d, 0xd2, 0x35, 0xf4, 0x9e, 0xcd, 0x43, 0xa9, 0xd0, 0xaa, 0x08, 0x80, 0xc5, 0xed, 0xb0, 0x18,
    +	0x45, 0xf5, 0xe1, 0x16, 0x58, 0x76, 0x2f, 0xe0, 0x9b, 0x8a, 0x4d, 0x0d, 0x6b, 0xb8, 0xa5, 0x68,
    +	0x0a, 0xad, 0xce, 0x72, 0x9c, 0xea, 0x78, 0x54, 0x5f, 0x46, 0x09, 0x72, 0x94, 0x68, 0x25, 0x7f,
    +	0x34, 0x0b, 0x16, 0x23, 0xf7, 0x02, 0xbc, 0x0b, 0x56, 0xba, 0x03, 0xcb, 0x22, 0x3a, 0xdd, 0x19,
    +	0x68, 0x7b, 0xc4, 0xea, 0x74, 0xf7, 0x49, 0x6f, 0xa0, 0x92, 0x1e, 0x3f, 0xd6, 0x42, 0xab, 0x26,
    +	0x7c, 0x5d, 0xd9, 0x48, 0xd4, 0x42, 0x29, 0xd6, 0xf0, 0x1d, 0x00, 0x75, 0x3e, 0xb4, 0xad, 0xd8,
    +	0xb6, 0x87, 0x99, 0xe3, 0x98, 0x5e, 0x2a, 0xee, 0xc4, 0x34, 0x50, 0x82, 0x15, 0xf3, 0xb1, 0x47,
    +	0x6c, 0xc5, 0x22, 0xbd, 0xa8, 0x8f, 0xf9, 0xb0, 0x8f, 0x9b, 0x89, 0x5a, 0x28, 0xc5, 0x1a, 0xbe,
    +	0x0a, 0xca, 0xce, 0x6c, 0x7c, 0xcf, 0xc5, 0xe1, 0x2c, 0x09, 0xb0, 0xf2, 0x8e, 0x2f, 0x42, 0x41,
    +	0x3d, 0xb6, 0x34, 0x63, 0xcf, 0x26, 0xd6, 0x01, 0xe9, 0xdd, 0x70, 0xc8, 0x01, 0xab, 0xa0, 0x05,
    +	0x5e, 0x41, 0xbd, 0xa5, 0xdd, 0x8e, 0x69, 0xa0, 0x04, 0x2b, 0xb6, 0x34, 0x27, 0x6a, 0x62, 0x4b,
    +	0x9b, 0x0d, 0x2f, 0x6d, 0x37, 0x51, 0x0b, 0xa5, 0x58, 0xb3, 0xd8, 0x73, 0x5c, 0x5e, 0x3f, 0xc0,
    +	0x8a, 0x8a, 0xf7, 0x54, 0x52, 0x9d, 0x0b, 0xc7, 0xde, 0x4e, 0x58, 0x8c, 0xa2, 0xfa, 0xf0, 0x06,
    +	0x38, 0xe9, 0x0c, 0xed, 0xea, 0xd8, 0x03, 0x29, 0x72, 0x90, 0x17, 0x04, 0xc8, 0xc9, 0x9d, 0xa8,
    +	0x02, 0x8a, 0xdb, 0xc0, 0x6b, 0x60, 0xa1, 0x6b, 0xa8, 0x2a, 0x8f, 0xc7, 0x0d, 0x63, 0xa0, 0xd3,
    +	0x6a, 0x89, 0xa3, 0x40, 0x96, 0x43, 0x1b, 0x21, 0x09, 0x8a, 0x68, 0xc2, 0x9f, 0x01, 0xd0, 0x75,
    +	0x0b, 0x83, 0x5d, 0x05, 0x13, 0x18, 0x40, 0xbc, 0x2c, 0xf9, 0x95, 0xd9, 0x1b, 0xb2, 0x51, 0x00,
    +	0x52, 0xfe, 0x44, 0x02, 0x95, 0x94, 0x44, 0x87, 0x6f, 0x85, 0x8a, 0xe0, 0xc5, 0x48, 0x11, 0x3c,
    +	0x9d, 0x62, 0x16, 0xa8, 0x84, 0xfb, 0x60, 0x9e, 0x11, 0x12, 0x45, 0xef, 0x3b, 0x2a, 0xe2, 0x2e,
    +	0x6b, 0xa6, 0x2e, 0x00, 0x05, 0xb5, 0xfd, 0x5b, 0xf9, 0xe4, 0x78, 0x54, 0x9f, 0x0f, 0xc9, 0x50,
    +	0x18, 0x58, 0xfe, 0x55, 0x0e, 0x80, 0x4d, 0x62, 0xaa, 0xc6, 0x50, 0x23, 0xfa, 0x51, 0x70, 0x9a,
    +	0x5b, 0x21, 0x4e, 0x73, 0x21, 0xfd, 0x48, 0x3c, 0xa7, 0x52, 0x49, 0xcd, 0xbb, 0x11, 0x52, 0xf3,
    +	0x62, 0x16, 0xb0, 0xa7, 0xb3, 0x9a, 0xcf, 0xf2, 0x60, 0xc9, 0x57, 0xf6, 0x69, 0xcd, 0xf5, 0xd0,
    +	0x89, 0x5e, 0x88, 0x9c, 0x68, 0x25, 0xc1, 0xe4, 0xb9, 0xf1, 0x9a, 0x0f, 0xc0, 0x02, 0x63, 0x1d,
    +	0xce, 0xf9, 0x71, 0x4e, 0x33, 0x3b, 0x35, 0xa7, 0xf1, 0x2a, 0xd1, 0x56, 0x08, 0x09, 0x45, 0x90,
    +	0x53, 0x38, 0xd4, 0xdc, 0xd7, 0x91, 0x43, 0xfd, 0x49, 0x02, 0x0b, 0xfe, 0x31, 0x1d, 0x01, 0x89,
    +	0xba, 0x19, 0x26, 0x51, 0x67, 0x33, 0x04, 0x67, 0x0a, 0x8b, 0xfa, 0x6c, 0x26, 0xe8, 0x3a, 0xa7,
    +	0x51, 0xab, 0xec, 0x15, 0xcc, 0x54, 0x95, 0x2e, 0xb6, 0x45, 0xbd, 0x3d, 0xee, 0xbc, 0x7e, 0x39,
    +	0x63, 0xc8, 0x93, 0x86, 0x08, 0x57, 0xee, 0xf9, 0x12, 0xae, 0xfc, 0xb3, 0x21, 0x5c, 0x3f, 0x02,
    +	0x45, 0xdb, 0xa5, 0x5a, 0x33, 0x1c, 0xf2, 0x62, 0xa6, 0xc4, 0x16, 0x2c, 0xcb, 0x83, 0xf6, 0xf8,
    +	0x95, 0x07, 0x97, 0xc4, 0xac, 0x0a, 0x5f, 0x25, 0xb3, 0x62, 0x81, 0x6e, 0xe2, 0x81, 0x4d, 0x7a,
    +	0x3c, 0xa9, 0x8a, 0x7e, 0xa0, 0xb7, 0xf9, 0x28, 0x12, 0x52, 0xb8, 0x0b, 0x2a, 0xa6, 0x65, 0xf4,
    +	0x2d, 0x62, 0xdb, 0x9b, 0x04, 0xf7, 0x54, 0x45, 0x27, 0xee, 0x02, 0x9c, 0x9a, 0x78, 0x7a, 0x3c,
    +	0xaa, 0x57, 0xda, 0xc9, 0x2a, 0x28, 0xcd, 0x56, 0xfe, 0x75, 0x01, 0x9c, 0x88, 0xde, 0x8d, 0x29,
    +	0x34, 0x45, 0x3a, 0x14, 0x4d, 0xb9, 0x14, 0x88, 0x53, 0x87, 0xc3, 0x05, 0x5a, 0x05, 0xb1, 0x58,
    +	0x5d, 0x07, 0x8b, 0x82, 0x96, 0xb8, 0x42, 0x41, 0xd4, 0xbc, 0xe3, 0xd9, 0x0d, 0x8b, 0x51, 0x54,
    +	0x1f, 0x5e, 0x07, 0xf3, 0x16, 0x67, 0x5e, 0x2e, 0x80, 0xc3, 0x5e, 0xbe, 0x23, 0x00, 0xe6, 0x51,
    +	0x50, 0x88, 0xc2, 0xba, 0x8c, 0xb9, 0xf8, 0x84, 0xc4, 0x05, 0x98, 0x09, 0x33, 0x97, 0xf5, 0xa8,
    +	0x02, 0x8a, 0xdb, 0xc0, 0x6d, 0xb0, 0x34, 0xd0, 0xe3, 0x50, 0x4e, 0xac, 0x9d, 0x16, 0x50, 0x4b,
    +	0xbb, 0x71, 0x15, 0x94, 0x64, 0x07, 0x6f, 0x81, 0x25, 0x4a, 0x2c, 0x4d, 0xd1, 0x31, 0x55, 0xf4,
    +	0xbe, 0x07, 0xe7, 0x9c, 0x7c, 0x85, 0x41, 0xdd, 0x89, 0x8b, 0x51, 0x92, 0x0d, 0xbc, 0x17, 0xe2,
    +	0x45, 0xb3, 0xfc, 0x6a, 0xba, 0x94, 0x21, 0xbd, 0x32, 0x13, 0xa3, 0x04, 0xd6, 0x56, 0xcc, 0xca,
    +	0xda, 0xe4, 0x8f, 0x25, 0x00, 0xe3, 0x29, 0x3d, 0xb1, 0xa9, 0x10, 0xb3, 0x08, 0x14, 0x5f, 0x25,
    +	0x99, 0x4a, 0x5d, 0xc9, 0x48, 0xa5, 0xfc, 0xbb, 0x39, 0x1b, 0x97, 0x12, 0x1b, 0x7d, 0x34, 0xfd,
    +	0xa1, 0xac, 0x5c, 0xca, 0x77, 0xea, 0x19, 0x70, 0xa9, 0x00, 0xd8, 0xd3, 0xb9, 0xd4, 0x3f, 0x73,
    +	0x60, 0xc9, 0x57, 0xce, 0xcc, 0xa5, 0x12, 0x4c, 0xbe, 0xed, 0x11, 0x65, 0xe3, 0x37, 0xfe, 0xd6,
    +	0xfd, 0x3f, 0xf1, 0x1b, 0xdf, 0xab, 0x14, 0x7e, 0xf3, 0xfb, 0x5c, 0xd0, 0xf5, 0x29, 0xf9, 0xcd,
    +	0x33, 0x68, 0x96, 0x7c, 0xed, 0x28, 0x92, 0xfc, 0xd1, 0x0c, 0x38, 0x11, 0xcd, 0xc3, 0x50, 0xad,
    +	0x95, 0x26, 0xd6, 0xda, 0x36, 0x58, 0xbe, 0x3f, 0x50, 0xd5, 0x21, 0x5f, 0x43, 0xa0, 0xe0, 0x3a,
    +	0x55, 0xfa, 0xbb, 0xc2, 0x72, 0xf9, 0x87, 0x09, 0x3a, 0x28, 0xd1, 0x32, 0x5e, 0x7a, 0x67, 0xbe,
    +	0x6c, 0xe9, 0x2d, 0x1c, 0xa2, 0xf4, 0xa6, 0xd4, 0xca, 0xb9, 0x43, 0xd4, 0xca, 0x64, 0x22, 0x94,
    +	0x3f, 0x14, 0x11, 0x9a, 0xae, 0xee, 0x26, 0xdc, 0x81, 0x13, 0x1b, 0x12, 0x63, 0x09, 0xac, 0x24,
    +	0xb7, 0x01, 0xa0, 0x0a, 0x16, 0x34, 0xfc, 0x30, 0xd8, 0x8e, 0x99, 0x54, 0x8f, 0x06, 0x54, 0x51,
    +	0x1b, 0xce, 0x1f, 0x59, 0x8d, 0x5b, 0x3a, 0xbd, 0x6d, 0x75, 0xa8, 0xa5, 0xe8, 0x7d, 0xa7, 0x88,
    +	0x6f, 0x87, 0xb0, 0x50, 0x04, 0x1b, 0xbe, 0x0f, 0x8a, 0x1a, 0x7e, 0xd8, 0x19, 0x58, 0xfd, 0xa4,
    +	0x62, 0x9b, 0x6d, 0x1e, 0x9e, 0x4b, 0xdb, 0x02, 0x05, 0x79, 0x78, 0xf2, 0x17, 0x12, 0xa8, 0xa4,
    +	0x14, 0xe8, 0x6f, 0xd0, 0x2a, 0xff, 0x22, 0x81, 0x33, 0xa1, 0x55, 0xb2, 0x0c, 0x27, 0xf7, 0x07,
    +	0x2a, 0x4f, 0x76, 0x41, 0x8a, 0x2e, 0x82, 0x92, 0x89, 0x2d, 0xaa, 0x78, 0xec, 0xbc, 0xd0, 0x9a,
    +	0x1f, 0x8f, 0xea, 0xa5, 0xb6, 0x3b, 0x88, 0x7c, 0x79, 0xc2, 0xde, 0xe4, 0x9e, 0xdf, 0xde, 0xc8,
    +	0xff, 0x95, 0x40, 0xa1, 0xd3, 0xc5, 0x2a, 0x39, 0x02, 0x0e, 0xb4, 0x19, 0xe2, 0x40, 0xe9, 0x7f,
    +	0x55, 0x70, 0x7f, 0x52, 0xe9, 0xcf, 0x56, 0x84, 0xfe, 0x9c, 0x9b, 0x80, 0xf3, 0x74, 0xe6, 0xf3,
    +	0x06, 0x28, 0x79, 0xd3, 0x4d, 0x77, 0x2d, 0xcb, 0xbf, 0xcb, 0x81, 0x72, 0x60, 0x8a, 0x29, 0x2f,
    +	0xf5, 0x7b, 0xa1, 0x4a, 0xc6, 0xee, 0x98, 0xb5, 0x2c, 0x0b, 0x69, 0xb8, 0x55, 0xeb, 0x6d, 0x9d,
    +	0x5a, 0xc1, 0x37, 0xe8, 0x78, 0x31, 0x7b, 0x13, 0x2c, 0x50, 0x6c, 0xf5, 0x09, 0x75, 0x65, 0x7c,
    +	0xc3, 0x4a, 0x7e, 0x47, 0xe9, 0x4e, 0x48, 0x8a, 0x22, 0xda, 0xa7, 0xae, 0x83, 0xf9, 0xd0, 0x64,
    +	0xf0, 0x04, 0xc8, 0x3f, 0x20, 0x43, 0x87, 0x0c, 0x22, 0xf6, 0x13, 0x2e, 0x83, 0xc2, 0x01, 0x56,
    +	0x07, 0x4e, 0x88, 0x96, 0x90, 0xf3, 0x70, 0x2d, 0xf7, 0xba, 0x24, 0xff, 0x86, 0x6d, 0x8e, 0x9f,
    +	0x0a, 0x47, 0x10, 0x5d, 0xef, 0x84, 0xa2, 0x2b, 0xfd, 0x5f, 0xd3, 0x60, 0x82, 0xa6, 0xc5, 0x18,
    +	0x8a, 0xc4, 0xd8, 0x4b, 0x99, 0xd0, 0x9e, 0x1e, 0x69, 0xff, 0xca, 0x81, 0xe5, 0x80, 0xb6, 0x4f,
    +	0xb2, 0xbf, 0x1f, 0x22, 0xd9, 0xab, 0x11, 0x92, 0x5d, 0x4d, 0xb2, 0xf9, 0x96, 0x65, 0x4f, 0x66,
    +	0xd9, 0x7f, 0x96, 0xc0, 0x62, 0x60, 0xef, 0x8e, 0x80, 0x66, 0xdf, 0x0a, 0xd3, 0xec, 0x73, 0x59,
    +	0x82, 0x26, 0x85, 0x67, 0x5f, 0x03, 0x4b, 0x01, 0xa5, 0xdb, 0x56, 0x4f, 0xd1, 0xb1, 0x6a, 0xc3,
    +	0xb3, 0xa0, 0x60, 0x53, 0x6c, 0x51, 0xb7, 0x88, 0xb8, 0xb6, 0x1d, 0x36, 0x88, 0x1c, 0x99, 0xfc,
    +	0x6f, 0x09, 0x34, 0x03, 0xc6, 0x6d, 0x62, 0xd9, 0x8a, 0x4d, 0x89, 0x4e, 0xef, 0x1a, 0xea, 0x40,
    +	0x23, 0x1b, 0x2a, 0x56, 0x34, 0x44, 0xd8, 0x80, 0x62, 0xe8, 0x6d, 0x43, 0x55, 0xba, 0x43, 0x88,
    +	0x41, 0xf9, 0xc3, 0x7d, 0xa2, 0x6f, 0x12, 0x95, 0x50, 0xf1, 0xbf, 0x60, 0xa9, 0xf5, 0x96, 0xfb,
    +	0x37, 0xd9, 0x7b, 0xbe, 0xe8, 0xc9, 0xa8, 0xbe, 0x9a, 0x05, 0x91, 0x47, 0x68, 0x10, 0x13, 0xfe,
    +	0x14, 0x00, 0xf6, 0xc8, 0xef, 0xb2, 0x9e, 0x08, 0xd6, 0x37, 0xdd, 0x8c, 0x7e, 0xcf, 0x93, 0x4c,
    +	0x35, 0x41, 0x00, 0x51, 0xfe, 0x43, 0x31, 0x74, 0xde, 0xdf, 0xf8, 0xde, 0xeb, 0xcf, 0xc1, 0xf2,
    +	0x81, 0xbf, 0x3b, 0xae, 0x02, 0xa3, 0xf2, 0xf9, 0x68, 0x53, 0xc0, 0x83, 0x4f, 0xda, 0x57, 0xff,
    +	0x05, 0xe2, 0x6e, 0x02, 0x1c, 0x4a, 0x9c, 0x04, 0xbe, 0x0a, 0xca, 0x8c, 0x37, 0x2b, 0x5d, 0xb2,
    +	0x83, 0x35, 0x37, 0x17, 0xbd, 0xbf, 0x55, 0x3b, 0xbe, 0x08, 0x05, 0xf5, 0xe0, 0x3e, 0x58, 0x32,
    +	0x8d, 0xde, 0x36, 0xd6, 0x71, 0x9f, 0x30, 0x22, 0xe8, 0x1c, 0x25, 0x6f, 0xc8, 0x96, 0x5a, 0xaf,
    +	0xb9, 0xcd, 0xb6, 0x76, 0x5c, 0xe5, 0xc9, 0xa8, 0x5e, 0x49, 0x18, 0xe6, 0x41, 0x90, 0x04, 0x09,
    +	0xad, 0xd8, 0xa7, 0x00, 0xce, 0x5f, 0x21, 0x6b, 0x59, 0x92, 0xf2, 0x90, 0x1f, 0x03, 0xa4, 0xf5,
    +	0x9b, 0x8b, 0x87, 0xea, 0x37, 0x27, 0xbc, 0x2d, 0x97, 0xa6, 0x7c, 0x5b, 0xfe, 0xab, 0x04, 0xce,
    +	0x99, 0x19, 0x72, 0xa9, 0x0a, 0xf8, 0xde, 0xdc, 0xcc, 0xb2, 0x37, 0x59, 0x72, 0xb3, 0xb5, 0x3a,
    +	0x1e, 0xd5, 0xcf, 0x65, 0xd1, 0x44, 0x99, 0xfc, 0x83, 0x77, 0x41, 0xd1, 0x10, 0x77, 0x60, 0xb5,
    +	0xcc, 0x7d, 0xbd, 0x94, 0xc5, 0x57, 0xf7, 0xde, 0x74, 0xd2, 0xd2, 0x7d, 0x42, 0x1e, 0x96, 0xfc,
    +	0x71, 0x01, 0x9c, 0x8c, 0x55, 0xf0, 0xaf, 0xb0, 0xab, 0x1e, 0x7b, 0x2f, 0xcf, 0x4f, 0xf1, 0x5e,
    +	0xbe, 0x0e, 0x16, 0xc5, 0x87, 0x1a, 0x91, 0xd7, 0x7a, 0x2f, 0x60, 0x36, 0xc2, 0x62, 0x14, 0xd5,
    +	0x4f, 0xea, 0xea, 0x17, 0xa6, 0xec, 0xea, 0x07, 0xbd, 0x10, 0x1f, 0x1e, 0x3a, 0xe9, 0x1d, 0xf7,
    +	0x42, 0x7c, 0x7f, 0x18, 0xd5, 0x67, 0xc4, 0xd5, 0x41, 0xf5, 0x10, 0xe6, 0xc2, 0xc4, 0x75, 0x37,
    +	0x24, 0x45, 0x11, 0xed, 0x2f, 0xf5, 0x31, 0x02, 0x4e, 0xf8, 0x18, 0xe1, 0x72, 0x96, 0x58, 0xcb,
    +	0xde, 0x75, 0x4f, 0xec, 0x9f, 0x94, 0xa7, 0xef, 0x9f, 0xc8, 0x7f, 0x93, 0xc0, 0x0b, 0xa9, 0xb7,
    +	0x16, 0x5c, 0x0f, 0xd1, 0xca, 0xcb, 0x11, 0x5a, 0xf9, 0xbd, 0x54, 0xc3, 0x00, 0xb7, 0xb4, 0x92,
    +	0x1b, 0xf2, 0x6f, 0x64, 0x6b, 0xc8, 0x27, 0xbc, 0x09, 0x4f, 0xee, 0xcc, 0xb7, 0x7e, 0xf0, 0xe8,
    +	0x71, 0xed, 0xd8, 0xa7, 0x8f, 0x6b, 0xc7, 0x3e, 0x7f, 0x5c, 0x3b, 0xf6, 0x8b, 0x71, 0x4d, 0x7a,
    +	0x34, 0xae, 0x49, 0x9f, 0x8e, 0x6b, 0xd2, 0xe7, 0xe3, 0x9a, 0xf4, 0xf7, 0x71, 0x4d, 0xfa, 0xed,
    +	0x17, 0xb5, 0x63, 0xef, 0x57, 0x52, 0x3e, 0x85, 0xfe, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd4,
    +	0x01, 0x82, 0xf5, 0x24, 0x2d, 0x00, 0x00,
     }
     
     func (m *ControllerRevision) Marshal() (dAtA []byte, err error) {
    @@ -1845,6 +1847,11 @@ func (m *DeploymentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	_ = i
     	var l int
     	_ = l
    +	if m.TerminatingReplicas != nil {
    +		i = encodeVarintGenerated(dAtA, i, uint64(*m.TerminatingReplicas))
    +		i--
    +		dAtA[i] = 0x48
    +	}
     	if m.CollisionCount != nil {
     		i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount))
     		i--
    @@ -2151,6 +2158,11 @@ func (m *ReplicaSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	_ = i
     	var l int
     	_ = l
    +	if m.TerminatingReplicas != nil {
    +		i = encodeVarintGenerated(dAtA, i, uint64(*m.TerminatingReplicas))
    +		i--
    +		dAtA[i] = 0x38
    +	}
     	if len(m.Conditions) > 0 {
     		for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
     			{
    @@ -3146,6 +3158,9 @@ func (m *DeploymentStatus) Size() (n int) {
     	if m.CollisionCount != nil {
     		n += 1 + sovGenerated(uint64(*m.CollisionCount))
     	}
    +	if m.TerminatingReplicas != nil {
    +		n += 1 + sovGenerated(uint64(*m.TerminatingReplicas))
    +	}
     	return n
     }
     
    @@ -3251,6 +3266,9 @@ func (m *ReplicaSetStatus) Size() (n int) {
     			n += 1 + l + sovGenerated(uint64(l))
     		}
     	}
    +	if m.TerminatingReplicas != nil {
    +		n += 1 + sovGenerated(uint64(*m.TerminatingReplicas))
    +	}
     	return n
     }
     
    @@ -3711,6 +3729,7 @@ func (this *DeploymentStatus) String() string {
     		`Conditions:` + repeatedStringForConditions + `,`,
     		`ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`,
     		`CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`,
    +		`TerminatingReplicas:` + valueToStringGenerated(this.TerminatingReplicas) + `,`,
     		`}`,
     	}, "")
     	return s
    @@ -3797,6 +3816,7 @@ func (this *ReplicaSetStatus) String() string {
     		`ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`,
     		`AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`,
     		`Conditions:` + repeatedStringForConditions + `,`,
    +		`TerminatingReplicas:` + valueToStringGenerated(this.TerminatingReplicas) + `,`,
     		`}`,
     	}, "")
     	return s
    @@ -6261,6 +6281,26 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error {
     				}
     			}
     			m.CollisionCount = &v
    +		case 9:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field TerminatingReplicas", wireType)
    +			}
    +			var v int32
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				v |= int32(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			m.TerminatingReplicas = &v
     		default:
     			iNdEx = preIndex
     			skippy, err := skipGenerated(dAtA[iNdEx:])
    @@ -7193,6 +7233,26 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error {
     				return err
     			}
     			iNdEx = postIndex
    +		case 7:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field TerminatingReplicas", wireType)
    +			}
    +			var v int32
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				v |= int32(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			m.TerminatingReplicas = &v
     		default:
     			iNdEx = preIndex
     			skippy, err := skipGenerated(dAtA[iNdEx:])
    diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.proto b/vendor/k8s.io/api/apps/v1beta2/generated.proto
    index c08a4c78b..37c6d5ae1 100644
    --- a/vendor/k8s.io/api/apps/v1beta2/generated.proto
    +++ b/vendor/k8s.io/api/apps/v1beta2/generated.proto
    @@ -323,19 +323,19 @@ message DeploymentStatus {
       // +optional
       optional int64 observedGeneration = 1;
     
    -  // Total number of non-terminated pods targeted by this deployment (their labels match the selector).
    +  // Total number of non-terminating pods targeted by this deployment (their labels match the selector).
       // +optional
       optional int32 replicas = 2;
     
    -  // Total number of non-terminated pods targeted by this deployment that have the desired template spec.
    +  // Total number of non-terminating pods targeted by this deployment that have the desired template spec.
       // +optional
       optional int32 updatedReplicas = 3;
     
    -  // readyReplicas is the number of pods targeted by this Deployment controller with a Ready Condition.
    +  // Total number of non-terminating pods targeted by this Deployment with a Ready Condition.
       // +optional
       optional int32 readyReplicas = 7;
     
    -  // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.
    +  // Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment.
       // +optional
       optional int32 availableReplicas = 4;
     
    @@ -345,6 +345,13 @@ message DeploymentStatus {
       // +optional
       optional int32 unavailableReplicas = 5;
     
    +  // Total number of terminating pods targeted by this deployment. Terminating pods have a non-null
    +  // .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.
    +  //
    +  // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
    +  // +optional
    +  optional int32 terminatingReplicas = 9;
    +
       // Represents the latest available observations of a deployment's current state.
       // +patchMergeKey=type
       // +patchStrategy=merge
    @@ -427,16 +434,16 @@ message ReplicaSetList {
       optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // List of ReplicaSets.
    -  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
    +  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset
       repeated ReplicaSet items = 2;
     }
     
     // ReplicaSetSpec is the specification of a ReplicaSet.
     message ReplicaSetSpec {
    -  // Replicas is the number of desired replicas.
    +  // Replicas is the number of desired pods.
       // This is a pointer to distinguish between explicit zero and unspecified.
       // Defaults to 1.
    -  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
    +  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset
       // +optional
       optional int32 replicas = 1;
     
    @@ -454,29 +461,36 @@ message ReplicaSetSpec {
     
       // Template is the object that describes the pod that will be created if
       // insufficient replicas are detected.
    -  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
    +  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/#pod-template
       // +optional
       optional .k8s.io.api.core.v1.PodTemplateSpec template = 3;
     }
     
     // ReplicaSetStatus represents the current status of a ReplicaSet.
     message ReplicaSetStatus {
    -  // Replicas is the most recently observed number of replicas.
    -  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
    +  // Replicas is the most recently observed number of non-terminating pods.
    +  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset
       optional int32 replicas = 1;
     
    -  // The number of pods that have labels matching the labels of the pod template of the replicaset.
    +  // The number of non-terminating pods that have labels matching the labels of the pod template of the replicaset.
       // +optional
       optional int32 fullyLabeledReplicas = 2;
     
    -  // readyReplicas is the number of pods targeted by this ReplicaSet controller with a Ready Condition.
    +  // The number of non-terminating pods targeted by this ReplicaSet with a Ready Condition.
       // +optional
       optional int32 readyReplicas = 4;
     
    -  // The number of available replicas (ready for at least minReadySeconds) for this replica set.
    +  // The number of available non-terminating pods (ready for at least minReadySeconds) for this replica set.
       // +optional
       optional int32 availableReplicas = 5;
     
    +  // The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp
    +  // and have not yet reached the Failed or Succeeded .status.phase.
    +  //
    +  // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
    +  // +optional
    +  optional int32 terminatingReplicas = 7;
    +
       // ObservedGeneration reflects the generation of the most recently observed ReplicaSet.
       // +optional
       optional int64 observedGeneration = 3;
    @@ -522,7 +536,7 @@ message RollingUpdateDaemonSet {
       // pod is available (Ready for at least minReadySeconds) the old DaemonSet pod
       // on that node is marked deleted. If the old pod becomes unavailable for any
       // reason (Ready transitions to false, is evicted, or is drained) an updated
    -  // pod is immediatedly created on that node without considering surge limits.
    +  // pod is immediately created on that node without considering surge limits.
       // Allowing surge implies the possibility that the resources consumed by the
       // daemonset on any given node can double if the readiness check fails, and
       // so resource intensive daemonsets should take into account that they may
    @@ -600,6 +614,9 @@ message Scale {
     message ScaleSpec {
       // desired number of instances for the scaled object.
       // +optional
    +  // +k8s:optional
    +  // +default=0
    +  // +k8s:minimum=0
       optional int32 replicas = 1;
     }
     
    @@ -747,6 +764,7 @@ message StatefulSetSpec {
       // the network identity of the set. Pods get DNS/hostnames that follow the
       // pattern: pod-specific-string.serviceName.default.svc.cluster.local
       // where "pod-specific-string" is managed by the StatefulSet controller.
    +  // +optional
       optional string serviceName = 5;
     
       // podManagementPolicy controls how pods are created during initial scale up,
    diff --git a/vendor/k8s.io/api/apps/v1beta2/types.go b/vendor/k8s.io/api/apps/v1beta2/types.go
    index c2624a941..e9dc85df0 100644
    --- a/vendor/k8s.io/api/apps/v1beta2/types.go
    +++ b/vendor/k8s.io/api/apps/v1beta2/types.go
    @@ -35,6 +35,9 @@ const (
     type ScaleSpec struct {
     	// desired number of instances for the scaled object.
     	// +optional
    +	// +k8s:optional
    +	// +default=0
    +	// +k8s:minimum=0
     	Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
     }
     
    @@ -63,6 +66,7 @@ type ScaleStatus struct {
     // +k8s:prerelease-lifecycle-gen:deprecated=1.9
     // +k8s:prerelease-lifecycle-gen:removed=1.16
     // +k8s:prerelease-lifecycle-gen:replacement=autoscaling,v1,Scale
    +// +k8s:isSubresource=/scale
     
     // Scale represents a scaling request for a resource.
     type Scale struct {
    @@ -269,6 +273,7 @@ type StatefulSetSpec struct {
     	// the network identity of the set. Pods get DNS/hostnames that follow the
     	// pattern: pod-specific-string.serviceName.default.svc.cluster.local
     	// where "pod-specific-string" is managed by the StatefulSet controller.
    +	// +optional
     	ServiceName string `json:"serviceName" protobuf:"bytes,5,opt,name=serviceName"`
     
     	// podManagementPolicy controls how pods are created during initial scale up,
    @@ -530,19 +535,19 @@ type DeploymentStatus struct {
     	// +optional
     	ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"`
     
    -	// Total number of non-terminated pods targeted by this deployment (their labels match the selector).
    +	// Total number of non-terminating pods targeted by this deployment (their labels match the selector).
     	// +optional
     	Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"`
     
    -	// Total number of non-terminated pods targeted by this deployment that have the desired template spec.
    +	// Total number of non-terminating pods targeted by this deployment that have the desired template spec.
     	// +optional
     	UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"`
     
    -	// readyReplicas is the number of pods targeted by this Deployment controller with a Ready Condition.
    +	// Total number of non-terminating pods targeted by this Deployment with a Ready Condition.
     	// +optional
     	ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"`
     
    -	// Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.
    +	// Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment.
     	// +optional
     	AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"`
     
    @@ -552,6 +557,13 @@ type DeploymentStatus struct {
     	// +optional
     	UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"`
     
    +	// Total number of terminating pods targeted by this deployment. Terminating pods have a non-null
    +	// .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.
    +	//
    +	// This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
    +	// +optional
    +	TerminatingReplicas *int32 `json:"terminatingReplicas,omitempty" protobuf:"varint,9,opt,name=terminatingReplicas"`
    +
     	// Represents the latest available observations of a deployment's current state.
     	// +patchMergeKey=type
     	// +patchStrategy=merge
    @@ -673,7 +685,7 @@ type RollingUpdateDaemonSet struct {
     	// pod is available (Ready for at least minReadySeconds) the old DaemonSet pod
     	// on that node is marked deleted. If the old pod becomes unavailable for any
     	// reason (Ready transitions to false, is evicted, or is drained) an updated
    -	// pod is immediatedly created on that node without considering surge limits.
    +	// pod is immediately created on that node without considering surge limits.
     	// Allowing surge implies the possibility that the resources consumed by the
     	// daemonset on any given node can double if the readiness check fails, and
     	// so resource intensive daemonsets should take into account that they may
    @@ -897,16 +909,16 @@ type ReplicaSetList struct {
     	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
     
     	// List of ReplicaSets.
    -	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
    +	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset
     	Items []ReplicaSet `json:"items" protobuf:"bytes,2,rep,name=items"`
     }
     
     // ReplicaSetSpec is the specification of a ReplicaSet.
     type ReplicaSetSpec struct {
    -	// Replicas is the number of desired replicas.
    +	// Replicas is the number of desired pods.
     	// This is a pointer to distinguish between explicit zero and unspecified.
     	// Defaults to 1.
    -	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
    +	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset
     	// +optional
     	Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
     
    @@ -924,29 +936,36 @@ type ReplicaSetSpec struct {
     
     	// Template is the object that describes the pod that will be created if
     	// insufficient replicas are detected.
    -	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
    +	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/#pod-template
     	// +optional
     	Template v1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"`
     }
     
     // ReplicaSetStatus represents the current status of a ReplicaSet.
     type ReplicaSetStatus struct {
    -	// Replicas is the most recently observed number of replicas.
    -	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
    +	// Replicas is the most recently observed number of non-terminating pods.
    +	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset
     	Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"`
     
    -	// The number of pods that have labels matching the labels of the pod template of the replicaset.
    +	// The number of non-terminating pods that have labels matching the labels of the pod template of the replicaset.
     	// +optional
     	FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"`
     
    -	// readyReplicas is the number of pods targeted by this ReplicaSet controller with a Ready Condition.
    +	// The number of non-terminating pods targeted by this ReplicaSet with a Ready Condition.
     	// +optional
     	ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"`
     
    -	// The number of available replicas (ready for at least minReadySeconds) for this replica set.
    +	// The number of available non-terminating pods (ready for at least minReadySeconds) for this replica set.
     	// +optional
     	AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"`
     
    +	// The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp
    +	// and have not yet reached the Failed or Succeeded .status.phase.
    +	//
    +	// This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
    +	// +optional
    +	TerminatingReplicas *int32 `json:"terminatingReplicas,omitempty" protobuf:"varint,7,opt,name=terminatingReplicas"`
    +
     	// ObservedGeneration reflects the generation of the most recently observed ReplicaSet.
     	// +optional
     	ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"`
    diff --git a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go
    index beec4b755..34d80af58 100644
    --- a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go
    +++ b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go
    @@ -177,11 +177,12 @@ func (DeploymentSpec) SwaggerDoc() map[string]string {
     var map_DeploymentStatus = map[string]string{
     	"":                    "DeploymentStatus is the most recently observed status of the Deployment.",
     	"observedGeneration":  "The generation observed by the deployment controller.",
    -	"replicas":            "Total number of non-terminated pods targeted by this deployment (their labels match the selector).",
    -	"updatedReplicas":     "Total number of non-terminated pods targeted by this deployment that have the desired template spec.",
    -	"readyReplicas":       "readyReplicas is the number of pods targeted by this Deployment controller with a Ready Condition.",
    -	"availableReplicas":   "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.",
    +	"replicas":            "Total number of non-terminating pods targeted by this deployment (their labels match the selector).",
    +	"updatedReplicas":     "Total number of non-terminating pods targeted by this deployment that have the desired template spec.",
    +	"readyReplicas":       "Total number of non-terminating pods targeted by this Deployment with a Ready Condition.",
    +	"availableReplicas":   "Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment.",
     	"unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.",
    +	"terminatingReplicas": "Total number of terminating pods targeted by this deployment. Terminating pods have a non-null .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.\n\nThis is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.",
     	"conditions":          "Represents the latest available observations of a deployment's current state.",
     	"collisionCount":      "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.",
     }
    @@ -227,7 +228,7 @@ func (ReplicaSetCondition) SwaggerDoc() map[string]string {
     var map_ReplicaSetList = map[string]string{
     	"":         "ReplicaSetList is a collection of ReplicaSets.",
     	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
    -	"items":    "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller",
    +	"items":    "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset",
     }
     
     func (ReplicaSetList) SwaggerDoc() map[string]string {
    @@ -236,10 +237,10 @@ func (ReplicaSetList) SwaggerDoc() map[string]string {
     
     var map_ReplicaSetSpec = map[string]string{
     	"":                "ReplicaSetSpec is the specification of a ReplicaSet.",
    -	"replicas":        "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller",
    +	"replicas":        "Replicas is the number of desired pods. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset",
     	"minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)",
     	"selector":        "Selector is a label query over pods that should match the replica count. Label keys and values that must match in order to be controlled by this replica set. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
    -	"template":        "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template",
    +	"template":        "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/#pod-template",
     }
     
     func (ReplicaSetSpec) SwaggerDoc() map[string]string {
    @@ -248,10 +249,11 @@ func (ReplicaSetSpec) SwaggerDoc() map[string]string {
     
     var map_ReplicaSetStatus = map[string]string{
     	"":                     "ReplicaSetStatus represents the current status of a ReplicaSet.",
    -	"replicas":             "Replicas is the most recently observed number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller",
    -	"fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replicaset.",
    -	"readyReplicas":        "readyReplicas is the number of pods targeted by this ReplicaSet controller with a Ready Condition.",
    -	"availableReplicas":    "The number of available replicas (ready for at least minReadySeconds) for this replica set.",
    +	"replicas":             "Replicas is the most recently observed number of non-terminating pods. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset",
    +	"fullyLabeledReplicas": "The number of non-terminating pods that have labels matching the labels of the pod template of the replicaset.",
    +	"readyReplicas":        "The number of non-terminating pods targeted by this ReplicaSet with a Ready Condition.",
    +	"availableReplicas":    "The number of available non-terminating pods (ready for at least minReadySeconds) for this replica set.",
    +	"terminatingReplicas":  "The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.\n\nThis is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.",
     	"observedGeneration":   "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.",
     	"conditions":           "Represents the latest available observations of a replica set's current state.",
     }
    @@ -263,7 +265,7 @@ func (ReplicaSetStatus) SwaggerDoc() map[string]string {
     var map_RollingUpdateDaemonSet = map[string]string{
     	"":               "Spec to control the desired behavior of daemon set rolling update.",
     	"maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.",
    -	"maxSurge":       "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption.",
    +	"maxSurge":       "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediately created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption.",
     }
     
     func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string {
    diff --git a/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go
    index cd92792db..917ad4a22 100644
    --- a/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go
    +++ b/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go
    @@ -363,6 +363,11 @@ func (in *DeploymentSpec) DeepCopy() *DeploymentSpec {
     // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
     func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) {
     	*out = *in
    +	if in.TerminatingReplicas != nil {
    +		in, out := &in.TerminatingReplicas, &out.TerminatingReplicas
    +		*out = new(int32)
    +		**out = **in
    +	}
     	if in.Conditions != nil {
     		in, out := &in.Conditions, &out.Conditions
     		*out = make([]DeploymentCondition, len(*in))
    @@ -517,6 +522,11 @@ func (in *ReplicaSetSpec) DeepCopy() *ReplicaSetSpec {
     // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
     func (in *ReplicaSetStatus) DeepCopyInto(out *ReplicaSetStatus) {
     	*out = *in
    +	if in.TerminatingReplicas != nil {
    +		in, out := &in.TerminatingReplicas, &out.TerminatingReplicas
    +		*out = new(int32)
    +		**out = **in
    +	}
     	if in.Conditions != nil {
     		in, out := &in.Conditions, &out.Conditions
     		*out = make([]ReplicaSetCondition, len(*in))
    diff --git a/vendor/k8s.io/api/authentication/v1/doc.go b/vendor/k8s.io/api/authentication/v1/doc.go
    index 3bdc89bad..dc3aed4e4 100644
    --- a/vendor/k8s.io/api/authentication/v1/doc.go
    +++ b/vendor/k8s.io/api/authentication/v1/doc.go
    @@ -20,4 +20,4 @@ limitations under the License.
     // +k8s:openapi-gen=true
     // +k8s:prerelease-lifecycle-gen=true
     
    -package v1 // import "k8s.io/api/authentication/v1"
    +package v1
    diff --git a/vendor/k8s.io/api/authentication/v1alpha1/doc.go b/vendor/k8s.io/api/authentication/v1alpha1/doc.go
    index eb32def90..c199ccd49 100644
    --- a/vendor/k8s.io/api/authentication/v1alpha1/doc.go
    +++ b/vendor/k8s.io/api/authentication/v1alpha1/doc.go
    @@ -20,4 +20,4 @@ limitations under the License.
     // +k8s:openapi-gen=true
     // +k8s:prerelease-lifecycle-gen=true
     
    -package v1alpha1 // import "k8s.io/api/authentication/v1alpha1"
    +package v1alpha1
    diff --git a/vendor/k8s.io/api/authentication/v1beta1/doc.go b/vendor/k8s.io/api/authentication/v1beta1/doc.go
    index 2a2b176e4..af63dc845 100644
    --- a/vendor/k8s.io/api/authentication/v1beta1/doc.go
    +++ b/vendor/k8s.io/api/authentication/v1beta1/doc.go
    @@ -20,4 +20,4 @@ limitations under the License.
     // +k8s:openapi-gen=true
     // +k8s:prerelease-lifecycle-gen=true
     
    -package v1beta1 // import "k8s.io/api/authentication/v1beta1"
    +package v1beta1
    diff --git a/vendor/k8s.io/api/authorization/v1/doc.go b/vendor/k8s.io/api/authorization/v1/doc.go
    index 77e5a19c4..40bf8006e 100644
    --- a/vendor/k8s.io/api/authorization/v1/doc.go
    +++ b/vendor/k8s.io/api/authorization/v1/doc.go
    @@ -20,4 +20,4 @@ limitations under the License.
     // +k8s:prerelease-lifecycle-gen=true
     // +groupName=authorization.k8s.io
     
    -package v1 // import "k8s.io/api/authorization/v1"
    +package v1
    diff --git a/vendor/k8s.io/api/authorization/v1/generated.proto b/vendor/k8s.io/api/authorization/v1/generated.proto
    index 37b05b855..ff529c969 100644
    --- a/vendor/k8s.io/api/authorization/v1/generated.proto
    +++ b/vendor/k8s.io/api/authorization/v1/generated.proto
    @@ -167,16 +167,10 @@ message ResourceAttributes {
       optional string name = 7;
     
       // fieldSelector describes the limitation on access based on field.  It can only limit access, not broaden it.
    -  //
    -  // This field  is alpha-level. To use this field, you must enable the
    -  // `AuthorizeWithSelectors` feature gate (disabled by default).
       // +optional
       optional FieldSelectorAttributes fieldSelector = 8;
     
       // labelSelector describes the limitation on access based on labels.  It can only limit access, not broaden it.
    -  //
    -  // This field  is alpha-level. To use this field, you must enable the
    -  // `AuthorizeWithSelectors` feature gate (disabled by default).
       // +optional
       optional LabelSelectorAttributes labelSelector = 9;
     }
    diff --git a/vendor/k8s.io/api/authorization/v1/types.go b/vendor/k8s.io/api/authorization/v1/types.go
    index 36f5fa410..251e776b0 100644
    --- a/vendor/k8s.io/api/authorization/v1/types.go
    +++ b/vendor/k8s.io/api/authorization/v1/types.go
    @@ -119,15 +119,9 @@ type ResourceAttributes struct {
     	// +optional
     	Name string `json:"name,omitempty" protobuf:"bytes,7,opt,name=name"`
     	// fieldSelector describes the limitation on access based on field.  It can only limit access, not broaden it.
    -	//
    -	// This field  is alpha-level. To use this field, you must enable the
    -	// `AuthorizeWithSelectors` feature gate (disabled by default).
     	// +optional
     	FieldSelector *FieldSelectorAttributes `json:"fieldSelector,omitempty" protobuf:"bytes,8,opt,name=fieldSelector"`
     	// labelSelector describes the limitation on access based on labels.  It can only limit access, not broaden it.
    -	//
    -	// This field  is alpha-level. To use this field, you must enable the
    -	// `AuthorizeWithSelectors` feature gate (disabled by default).
     	// +optional
     	LabelSelector *LabelSelectorAttributes `json:"labelSelector,omitempty" protobuf:"bytes,9,opt,name=labelSelector"`
     }
    diff --git a/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go
    index dc6b8a89e..29d0aa846 100644
    --- a/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go
    +++ b/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go
    @@ -87,8 +87,8 @@ var map_ResourceAttributes = map[string]string{
     	"resource":      "Resource is one of the existing resource types.  \"*\" means all.",
     	"subresource":   "Subresource is one of the existing resource types.  \"\" means none.",
     	"name":          "Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.",
    -	"fieldSelector": "fieldSelector describes the limitation on access based on field.  It can only limit access, not broaden it.\n\nThis field  is alpha-level. To use this field, you must enable the `AuthorizeWithSelectors` feature gate (disabled by default).",
    -	"labelSelector": "labelSelector describes the limitation on access based on labels.  It can only limit access, not broaden it.\n\nThis field  is alpha-level. To use this field, you must enable the `AuthorizeWithSelectors` feature gate (disabled by default).",
    +	"fieldSelector": "fieldSelector describes the limitation on access based on field.  It can only limit access, not broaden it.",
    +	"labelSelector": "labelSelector describes the limitation on access based on labels.  It can only limit access, not broaden it.",
     }
     
     func (ResourceAttributes) SwaggerDoc() map[string]string {
    diff --git a/vendor/k8s.io/api/authorization/v1beta1/doc.go b/vendor/k8s.io/api/authorization/v1beta1/doc.go
    index c996e35cc..9f7332d49 100644
    --- a/vendor/k8s.io/api/authorization/v1beta1/doc.go
    +++ b/vendor/k8s.io/api/authorization/v1beta1/doc.go
    @@ -21,4 +21,4 @@ limitations under the License.
     
     // +groupName=authorization.k8s.io
     
    -package v1beta1 // import "k8s.io/api/authorization/v1beta1"
    +package v1beta1
    diff --git a/vendor/k8s.io/api/autoscaling/v1/doc.go b/vendor/k8s.io/api/autoscaling/v1/doc.go
    index d64c9cbc1..4ee085e16 100644
    --- a/vendor/k8s.io/api/autoscaling/v1/doc.go
    +++ b/vendor/k8s.io/api/autoscaling/v1/doc.go
    @@ -19,4 +19,4 @@ limitations under the License.
     // +k8s:openapi-gen=true
     // +k8s:prerelease-lifecycle-gen=true
     
    -package v1 // import "k8s.io/api/autoscaling/v1"
    +package v1
    diff --git a/vendor/k8s.io/api/autoscaling/v1/generated.proto b/vendor/k8s.io/api/autoscaling/v1/generated.proto
    index 68c35b6b2..a17d7989d 100644
    --- a/vendor/k8s.io/api/autoscaling/v1/generated.proto
    +++ b/vendor/k8s.io/api/autoscaling/v1/generated.proto
    @@ -472,6 +472,9 @@ message Scale {
     message ScaleSpec {
       // replicas is the desired number of instances for the scaled object.
       // +optional
    +  // +k8s:optional
    +  // +default=0
    +  // +k8s:minimum=0
       optional int32 replicas = 1;
     }
     
    diff --git a/vendor/k8s.io/api/autoscaling/v1/types.go b/vendor/k8s.io/api/autoscaling/v1/types.go
    index 85c609e5c..e1e8809fe 100644
    --- a/vendor/k8s.io/api/autoscaling/v1/types.go
    +++ b/vendor/k8s.io/api/autoscaling/v1/types.go
    @@ -117,6 +117,7 @@ type HorizontalPodAutoscalerList struct {
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
     // +k8s:prerelease-lifecycle-gen:introduced=1.2
    +// +k8s:isSubresource=/scale
     
     // Scale represents a scaling request for a resource.
     type Scale struct {
    @@ -138,6 +139,9 @@ type Scale struct {
     type ScaleSpec struct {
     	// replicas is the desired number of instances for the scaled object.
     	// +optional
    +	// +k8s:optional
    +	// +default=0
    +	// +k8s:minimum=0
     	Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
     }
     
    diff --git a/vendor/k8s.io/api/autoscaling/v2/doc.go b/vendor/k8s.io/api/autoscaling/v2/doc.go
    index aafa2d4de..8dea6339d 100644
    --- a/vendor/k8s.io/api/autoscaling/v2/doc.go
    +++ b/vendor/k8s.io/api/autoscaling/v2/doc.go
    @@ -19,4 +19,4 @@ limitations under the License.
     // +k8s:openapi-gen=true
     // +k8s:prerelease-lifecycle-gen=true
     
    -package v2 // import "k8s.io/api/autoscaling/v2"
    +package v2
    diff --git a/vendor/k8s.io/api/autoscaling/v2/generated.pb.go b/vendor/k8s.io/api/autoscaling/v2/generated.pb.go
    index ece6dedad..40b60ebec 100644
    --- a/vendor/k8s.io/api/autoscaling/v2/generated.pb.go
    +++ b/vendor/k8s.io/api/autoscaling/v2/generated.pb.go
    @@ -751,115 +751,116 @@ func init() {
     }
     
     var fileDescriptor_4d5f2c8767749221 = []byte{
    -	// 1722 bytes of a gzipped FileDescriptorProto
    -	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0xcb, 0x8f, 0x1b, 0x49,
    -	0x19, 0x9f, 0xb6, 0x3d, 0xaf, 0xf2, 0x3c, 0x2b, 0x2f, 0x67, 0xa2, 0xd8, 0xa3, 0x26, 0x90, 0x07,
    -	0xa4, 0x4d, 0x4c, 0x88, 0x22, 0x72, 0x40, 0xd3, 0x13, 0x20, 0xa3, 0xcc, 0x30, 0x4e, 0x39, 0xc9,
    -	0x00, 0x02, 0x94, 0x72, 0x77, 0x8d, 0xa7, 0x18, 0xbb, 0xdb, 0xea, 0x6e, 0x3b, 0x99, 0x48, 0x48,
    -	0x5c, 0xb8, 0x23, 0x50, 0x84, 0xf8, 0x1f, 0x22, 0x4e, 0xa0, 0x70, 0x00, 0x09, 0x69, 0xf7, 0x90,
    -	0xcb, 0x4a, 0x39, 0xec, 0x21, 0x27, 0x6b, 0xe3, 0x95, 0xf6, 0xb8, 0x7f, 0x40, 0x4e, 0xab, 0x7a,
    -	0xf4, 0xd3, 0xaf, 0x71, 0x76, 0x32, 0xd2, 0xdc, 0x5c, 0x55, 0xdf, 0xf7, 0xfb, 0x1e, 0xf5, 0xbd,
    -	0xaa, 0x0d, 0xae, 0xee, 0xdf, 0x76, 0x35, 0x6a, 0x17, 0x71, 0x93, 0x16, 0x71, 0xcb, 0xb3, 0x5d,
    -	0x03, 0xd7, 0xa9, 0x55, 0x2b, 0xb6, 0x4b, 0xc5, 0x1a, 0xb1, 0x88, 0x83, 0x3d, 0x62, 0x6a, 0x4d,
    -	0xc7, 0xf6, 0x6c, 0x78, 0x5e, 0x90, 0x6a, 0xb8, 0x49, 0xb5, 0x08, 0xa9, 0xd6, 0x2e, 0xad, 0x5c,
    -	0xaf, 0x51, 0x6f, 0xaf, 0x55, 0xd5, 0x0c, 0xbb, 0x51, 0xac, 0xd9, 0x35, 0xbb, 0xc8, 0x39, 0xaa,
    -	0xad, 0x5d, 0xbe, 0xe2, 0x0b, 0xfe, 0x4b, 0x20, 0xad, 0xa8, 0x11, 0xa1, 0x86, 0xed, 0x90, 0x62,
    -	0xfb, 0x46, 0x52, 0xda, 0xca, 0xcd, 0x90, 0xa6, 0x81, 0x8d, 0x3d, 0x6a, 0x11, 0xe7, 0xa0, 0xd8,
    -	0xdc, 0xaf, 0x71, 0x26, 0x87, 0xb8, 0x76, 0xcb, 0x31, 0xc8, 0x58, 0x5c, 0x6e, 0xb1, 0x41, 0x3c,
    -	0xdc, 0x4f, 0x56, 0x71, 0x10, 0x97, 0xd3, 0xb2, 0x3c, 0xda, 0xe8, 0x15, 0x73, 0x6b, 0x14, 0x83,
    -	0x6b, 0xec, 0x91, 0x06, 0x4e, 0xf2, 0xa9, 0x5f, 0x29, 0xe0, 0xe2, 0xba, 0x6d, 0x79, 0x98, 0x71,
    -	0x20, 0x69, 0xc4, 0x16, 0xf1, 0x1c, 0x6a, 0x54, 0xf8, 0x6f, 0xb8, 0x0e, 0x32, 0x16, 0x6e, 0x90,
    -	0x9c, 0xb2, 0xaa, 0x5c, 0x99, 0xd5, 0x8b, 0xaf, 0x3b, 0x85, 0x89, 0x6e, 0xa7, 0x90, 0xf9, 0x25,
    -	0x6e, 0x90, 0xf7, 0x9d, 0x42, 0xa1, 0xd7, 0x71, 0x9a, 0x0f, 0xc3, 0x48, 0x10, 0x67, 0x86, 0xdb,
    -	0x60, 0xca, 0xc3, 0x4e, 0x8d, 0x78, 0xb9, 0xd4, 0xaa, 0x72, 0x25, 0x5b, 0xba, 0xac, 0x0d, 0xbc,
    -	0x3a, 0x4d, 0x48, 0x7f, 0xc8, 0xc9, 0xf5, 0x05, 0x29, 0x6f, 0x4a, 0xac, 0x91, 0x84, 0x81, 0x45,
    -	0x30, 0x6b, 0xf8, 0x6a, 0xe7, 0xd2, 0x5c, 0xb5, 0x65, 0x49, 0x3a, 0x1b, 0xda, 0x13, 0xd2, 0xa8,
    -	0x5f, 0x0f, 0x31, 0xd4, 0xc3, 0x5e, 0xcb, 0x3d, 0x1a, 0x43, 0x77, 0xc0, 0xb4, 0xd1, 0x72, 0x1c,
    -	0x62, 0xf9, 0x96, 0xfe, 0x60, 0xa4, 0xa5, 0x8f, 0x71, 0xbd, 0x45, 0x84, 0x0e, 0xfa, 0xa2, 0x94,
    -	0x3a, 0xbd, 0x2e, 0x40, 0x90, 0x8f, 0x36, 0xbe, 0xc1, 0x2f, 0x14, 0x70, 0x61, 0xdd, 0xb1, 0x5d,
    -	0xf7, 0x31, 0x71, 0x5c, 0x6a, 0x5b, 0xdb, 0xd5, 0x3f, 0x10, 0xc3, 0x43, 0x64, 0x97, 0x38, 0xc4,
    -	0x32, 0x08, 0x5c, 0x05, 0x99, 0x7d, 0x6a, 0x99, 0xd2, 0xdc, 0x39, 0xdf, 0xdc, 0xfb, 0xd4, 0x32,
    -	0x11, 0x3f, 0x61, 0x14, 0xdc, 0x21, 0xa9, 0x38, 0x45, 0xc4, 0xda, 0x12, 0x00, 0xb8, 0x49, 0xa5,
    -	0x00, 0xa9, 0x15, 0x94, 0x74, 0x60, 0xad, 0xbc, 0x21, 0x4f, 0x50, 0x84, 0x4a, 0xfd, 0xaf, 0x02,
    -	0x4e, 0xff, 0xec, 0x99, 0x47, 0x1c, 0x0b, 0xd7, 0x63, 0x81, 0x56, 0x01, 0x53, 0x0d, 0xbe, 0xe6,
    -	0x2a, 0x65, 0x4b, 0xdf, 0x1f, 0xe9, 0xb9, 0x0d, 0x93, 0x58, 0x1e, 0xdd, 0xa5, 0xc4, 0x09, 0xe3,
    -	0x44, 0x9c, 0x20, 0x09, 0x75, 0xe4, 0x81, 0xa7, 0x7e, 0xda, 0xab, 0xbe, 0x08, 0x9f, 0x8f, 0xa2,
    -	0xfe, 0xc7, 0x0a, 0x27, 0xf5, 0x9f, 0x0a, 0x58, 0xba, 0x57, 0x5e, 0xab, 0x08, 0xee, 0xb2, 0x5d,
    -	0xa7, 0xc6, 0x01, 0xbc, 0x0d, 0x32, 0xde, 0x41, 0xd3, 0xcf, 0x80, 0x4b, 0xfe, 0x85, 0x3f, 0x3c,
    -	0x68, 0xb2, 0x0c, 0x38, 0x9d, 0xa4, 0x67, 0xfb, 0x88, 0x73, 0xc0, 0xef, 0x80, 0xc9, 0x36, 0x93,
    -	0xcb, 0xb5, 0x9c, 0xd4, 0xe7, 0x25, 0xeb, 0x24, 0x57, 0x06, 0x89, 0x33, 0x78, 0x07, 0xcc, 0x37,
    -	0x89, 0x43, 0x6d, 0xb3, 0x42, 0x0c, 0xdb, 0x32, 0x5d, 0x1e, 0x30, 0x93, 0xfa, 0x19, 0x49, 0x3c,
    -	0x5f, 0x8e, 0x1e, 0xa2, 0x38, 0xad, 0xfa, 0x8f, 0x14, 0x58, 0x0c, 0x15, 0x40, 0xad, 0x3a, 0x71,
    -	0xe1, 0xef, 0xc1, 0x8a, 0xeb, 0xe1, 0x2a, 0xad, 0xd3, 0xe7, 0xd8, 0xa3, 0xb6, 0xb5, 0x43, 0x2d,
    -	0xd3, 0x7e, 0x1a, 0x47, 0xcf, 0x77, 0x3b, 0x85, 0x95, 0xca, 0x40, 0x2a, 0x34, 0x04, 0x01, 0xde,
    -	0x07, 0x73, 0x2e, 0xa9, 0x13, 0xc3, 0x13, 0xf6, 0x4a, 0xbf, 0x5c, 0xee, 0x76, 0x0a, 0x73, 0x95,
    -	0xc8, 0xfe, 0xfb, 0x4e, 0xe1, 0x54, 0xcc, 0x31, 0xe2, 0x10, 0xc5, 0x98, 0xe1, 0xaf, 0xc1, 0x4c,
    -	0x93, 0xfd, 0xa2, 0xc4, 0xcd, 0xa5, 0x56, 0xd3, 0x23, 0x22, 0x24, 0xe9, 0x6b, 0x7d, 0x49, 0x7a,
    -	0x69, 0xa6, 0x2c, 0x41, 0x50, 0x00, 0xa7, 0xbe, 0x4a, 0x81, 0x73, 0xf7, 0x6c, 0x87, 0x3e, 0x67,
    -	0xc9, 0x5f, 0x2f, 0xdb, 0xe6, 0x9a, 0x04, 0x23, 0x0e, 0x7c, 0x02, 0x66, 0x58, 0x93, 0x31, 0xb1,
    -	0x87, 0x65, 0x60, 0xfe, 0x30, 0x22, 0x36, 0xe8, 0x15, 0x5a, 0x73, 0xbf, 0xc6, 0x36, 0x5c, 0x8d,
    -	0x51, 0x6b, 0xed, 0x1b, 0x9a, 0xa8, 0x17, 0x5b, 0xc4, 0xc3, 0x61, 0x4a, 0x87, 0x7b, 0x28, 0x40,
    -	0x85, 0xbf, 0x02, 0x19, 0xb7, 0x49, 0x0c, 0x19, 0xa0, 0xb7, 0x86, 0x19, 0xd5, 0x5f, 0xc7, 0x4a,
    -	0x93, 0x18, 0x61, 0x79, 0x61, 0x2b, 0xc4, 0x11, 0xe1, 0x13, 0x30, 0xe5, 0xf2, 0x40, 0xe6, 0x77,
    -	0x99, 0x2d, 0xdd, 0xfe, 0x00, 0x6c, 0x91, 0x08, 0x41, 0x7e, 0x89, 0x35, 0x92, 0xb8, 0xea, 0x67,
    -	0x0a, 0x28, 0x0c, 0xe0, 0xd4, 0xc9, 0x1e, 0x6e, 0x53, 0xdb, 0x81, 0x0f, 0xc0, 0x34, 0xdf, 0x79,
    -	0xd4, 0x94, 0x0e, 0xbc, 0x76, 0xa8, 0x7b, 0xe3, 0x21, 0xaa, 0x67, 0x59, 0xf6, 0x55, 0x04, 0x3b,
    -	0xf2, 0x71, 0xe0, 0x0e, 0x98, 0xe5, 0x3f, 0xef, 0xda, 0x4f, 0x2d, 0xe9, 0xb7, 0x71, 0x40, 0xe7,
    -	0x59, 0xd1, 0xaf, 0xf8, 0x00, 0x28, 0xc4, 0x52, 0xff, 0x9c, 0x06, 0xab, 0x03, 0xec, 0x59, 0xb7,
    -	0x2d, 0x93, 0xb2, 0x18, 0x87, 0xf7, 0x62, 0x69, 0x7e, 0x33, 0x91, 0xe6, 0x97, 0x46, 0xf1, 0x47,
    -	0xd2, 0x7e, 0x33, 0xb8, 0xa0, 0x54, 0x0c, 0x4b, 0xba, 0xf9, 0x7d, 0xa7, 0xd0, 0x67, 0xb0, 0xd2,
    -	0x02, 0xa4, 0xf8, 0x65, 0xc0, 0x36, 0x80, 0x75, 0xec, 0x7a, 0x0f, 0x1d, 0x6c, 0xb9, 0x42, 0x12,
    -	0x6d, 0x10, 0x79, 0xf5, 0xd7, 0x0e, 0x17, 0xb4, 0x8c, 0x43, 0x5f, 0x91, 0x5a, 0xc0, 0xcd, 0x1e,
    -	0x34, 0xd4, 0x47, 0x02, 0xfc, 0x1e, 0x98, 0x72, 0x08, 0x76, 0x6d, 0x2b, 0x97, 0xe1, 0x56, 0x04,
    -	0xc1, 0x82, 0xf8, 0x2e, 0x92, 0xa7, 0xf0, 0x2a, 0x98, 0x6e, 0x10, 0xd7, 0xc5, 0x35, 0x92, 0x9b,
    -	0xe4, 0x84, 0x41, 0x79, 0xdd, 0x12, 0xdb, 0xc8, 0x3f, 0x57, 0x3f, 0x57, 0xc0, 0x85, 0x01, 0x7e,
    -	0xdc, 0xa4, 0xae, 0x07, 0x7f, 0xdb, 0x93, 0x95, 0xda, 0xe1, 0x0c, 0x64, 0xdc, 0x3c, 0x27, 0x83,
    -	0x7a, 0xe0, 0xef, 0x44, 0x32, 0x72, 0x07, 0x4c, 0x52, 0x8f, 0x34, 0xfc, 0x3a, 0x53, 0x1a, 0x3f,
    -	0x6d, 0xc2, 0x0a, 0xbe, 0xc1, 0x80, 0x90, 0xc0, 0x53, 0x5f, 0xa5, 0x07, 0x9a, 0xc5, 0xd2, 0x16,
    -	0xb6, 0xc1, 0x02, 0x5f, 0xc9, 0x9e, 0x49, 0x76, 0xa5, 0x71, 0xc3, 0x8a, 0xc2, 0x90, 0x19, 0x45,
    -	0x3f, 0x2b, 0xb5, 0x58, 0xa8, 0xc4, 0x50, 0x51, 0x42, 0x0a, 0xbc, 0x01, 0xb2, 0x0d, 0x6a, 0x21,
    -	0xd2, 0xac, 0x53, 0x03, 0xbb, 0xb2, 0x09, 0x2d, 0x76, 0x3b, 0x85, 0xec, 0x56, 0xb8, 0x8d, 0xa2,
    -	0x34, 0xf0, 0xc7, 0x20, 0xdb, 0xc0, 0xcf, 0x02, 0x16, 0xd1, 0x2c, 0x4e, 0x49, 0x79, 0xd9, 0xad,
    -	0xf0, 0x08, 0x45, 0xe9, 0x60, 0x99, 0xc5, 0x00, 0x6b, 0xb3, 0x6e, 0x2e, 0xc3, 0x9d, 0xfb, 0xdd,
    -	0x91, 0x0d, 0x99, 0x97, 0xb7, 0x48, 0xa8, 0x70, 0x6e, 0xe4, 0xc3, 0x40, 0x13, 0xcc, 0x54, 0x65,
    -	0xa9, 0xe1, 0x61, 0x95, 0x2d, 0xfd, 0xe4, 0x03, 0xee, 0x4b, 0x22, 0xe8, 0x73, 0x2c, 0x24, 0xfc,
    -	0x15, 0x0a, 0x90, 0xd5, 0x97, 0x19, 0x70, 0x71, 0x68, 0x89, 0x84, 0x3f, 0x07, 0xd0, 0xae, 0xba,
    -	0xc4, 0x69, 0x13, 0xf3, 0x17, 0xe2, 0x91, 0xc0, 0x66, 0x3a, 0x76, 0x7f, 0x69, 0xfd, 0x2c, 0xcb,
    -	0xa6, 0xed, 0x9e, 0x53, 0xd4, 0x87, 0x03, 0x1a, 0x60, 0x9e, 0xe5, 0x98, 0xb8, 0x31, 0x2a, 0xc7,
    -	0xc7, 0xf1, 0x12, 0x78, 0x99, 0x4d, 0x03, 0x9b, 0x51, 0x10, 0x14, 0xc7, 0x84, 0x6b, 0x60, 0x51,
    -	0x4e, 0x32, 0x89, 0x1b, 0x3c, 0x27, 0xfd, 0xbc, 0xb8, 0x1e, 0x3f, 0x46, 0x49, 0x7a, 0x06, 0x61,
    -	0x12, 0x97, 0x3a, 0xc4, 0x0c, 0x20, 0x32, 0x71, 0x88, 0xbb, 0xf1, 0x63, 0x94, 0xa4, 0x87, 0x35,
    -	0xb0, 0x20, 0x51, 0xe5, 0xad, 0xe6, 0x26, 0x79, 0x4c, 0x8c, 0x1e, 0x32, 0x65, 0x5b, 0x0a, 0xe2,
    -	0x7b, 0x3d, 0x06, 0x83, 0x12, 0xb0, 0xd0, 0x06, 0xc0, 0xf0, 0x8b, 0xa6, 0x9b, 0x9b, 0xe2, 0x42,
    -	0xee, 0x8c, 0x1f, 0x25, 0x41, 0xe1, 0x0d, 0x3b, 0x7a, 0xb0, 0xe5, 0xa2, 0x88, 0x08, 0xf5, 0x6f,
    -	0x0a, 0x58, 0x4a, 0x0e, 0xa9, 0xc1, 0x7b, 0x40, 0x19, 0xf8, 0x1e, 0xf8, 0x1d, 0x98, 0x11, 0x33,
    -	0x8f, 0xed, 0xc8, 0x6b, 0xff, 0xd1, 0x21, 0xcb, 0x1a, 0xae, 0x92, 0x7a, 0x45, 0xb2, 0x8a, 0x20,
    -	0xf6, 0x57, 0x28, 0x80, 0x54, 0x5f, 0x64, 0x00, 0x08, 0x73, 0x0a, 0xde, 0x8c, 0xf5, 0xb1, 0xd5,
    -	0x44, 0x1f, 0x5b, 0x8a, 0x3e, 0x2e, 0x22, 0x3d, 0xeb, 0x01, 0x98, 0xb2, 0x79, 0x99, 0x91, 0x1a,
    -	0x5e, 0x1f, 0xe2, 0xc7, 0x60, 0xde, 0x09, 0x80, 0x74, 0xc0, 0x1a, 0x83, 0xac, 0x53, 0x12, 0x08,
    -	0x6e, 0x80, 0x4c, 0xd3, 0x36, 0xfd, 0x29, 0x65, 0xd8, 0x58, 0x57, 0xb6, 0x4d, 0x37, 0x06, 0x37,
    -	0xc3, 0x34, 0x66, 0xbb, 0x88, 0x43, 0xb0, 0x29, 0xd1, 0xff, 0x94, 0xc0, 0xc3, 0x31, 0x5b, 0x2a,
    -	0x0e, 0x81, 0xeb, 0xf7, 0x60, 0x17, 0xde, 0xf3, 0x4f, 0x50, 0x00, 0x07, 0xff, 0x08, 0x96, 0x8d,
    -	0xe4, 0x03, 0x38, 0x37, 0x3d, 0x72, 0xb0, 0x1a, 0xfa, 0x75, 0x40, 0x3f, 0xd3, 0xed, 0x14, 0x96,
    -	0x7b, 0x48, 0x50, 0xaf, 0x24, 0x66, 0x19, 0x91, 0xef, 0x26, 0x59, 0xe7, 0x86, 0x59, 0xd6, 0xef,
    -	0x85, 0x28, 0x2c, 0xf3, 0x4f, 0x50, 0x00, 0xa7, 0xfe, 0x3d, 0x03, 0xe6, 0x62, 0x6f, 0xb1, 0x63,
    -	0x8e, 0x0c, 0x91, 0xcc, 0x47, 0x16, 0x19, 0x02, 0xee, 0x48, 0x23, 0x43, 0x40, 0x1e, 0x53, 0x64,
    -	0x08, 0x61, 0xc7, 0x14, 0x19, 0x11, 0xcb, 0xfa, 0x44, 0xc6, 0x27, 0x29, 0x3f, 0x32, 0xc4, 0xb0,
    -	0x70, 0xb8, 0xc8, 0x10, 0xb4, 0x91, 0xc8, 0xd8, 0x8e, 0x3e, 0x6f, 0x47, 0xcc, 0x6a, 0x9a, 0xef,
    -	0x56, 0xed, 0x41, 0x0b, 0x5b, 0x1e, 0xf5, 0x0e, 0xf4, 0xd9, 0x9e, 0xa7, 0xb0, 0x09, 0xe6, 0x70,
    -	0x9b, 0x38, 0xb8, 0x46, 0xf8, 0xb6, 0x8c, 0x8f, 0x71, 0x71, 0x97, 0xd8, 0x4b, 0x74, 0x2d, 0x82,
    -	0x83, 0x62, 0xa8, 0xac, 0xa5, 0xcb, 0xf5, 0x23, 0x2f, 0x78, 0xe2, 0xca, 0x2e, 0xc7, 0x5b, 0xfa,
    -	0x5a, 0xcf, 0x29, 0xea, 0xc3, 0xa1, 0xfe, 0x35, 0x05, 0x96, 0x7b, 0x3e, 0x2e, 0x84, 0x4e, 0x51,
    -	0x3e, 0x92, 0x53, 0x52, 0xc7, 0xe8, 0x94, 0xf4, 0xd8, 0x4e, 0xf9, 0x77, 0x0a, 0xc0, 0xde, 0xfe,
    -	0x00, 0x0f, 0xf8, 0x58, 0x61, 0x38, 0xb4, 0x4a, 0x4c, 0x71, 0xfc, 0x2d, 0x67, 0xe0, 0xe8, 0x38,
    -	0x12, 0x85, 0x45, 0x49, 0x39, 0x47, 0xff, 0x91, 0x35, 0xfc, 0xa4, 0x95, 0x3e, 0xb2, 0x4f, 0x5a,
    -	0xea, 0xff, 0x92, 0x7e, 0x3b, 0x81, 0x9f, 0xcf, 0xfa, 0xdd, 0x72, 0xfa, 0x78, 0x6e, 0x59, 0xfd,
    -	0x8f, 0x02, 0x96, 0x92, 0x63, 0xc4, 0x09, 0xf9, 0x76, 0xfa, 0xff, 0xb8, 0xea, 0x27, 0xf1, 0xbb,
    -	0xe9, 0x4b, 0x05, 0x9c, 0x3e, 0x39, 0x7f, 0x93, 0xa8, 0xff, 0xea, 0x55, 0xf7, 0x04, 0xfc, 0xd9,
    -	0xa1, 0xff, 0xf4, 0xf5, 0xbb, 0xfc, 0xc4, 0x9b, 0x77, 0xf9, 0x89, 0xb7, 0xef, 0xf2, 0x13, 0x7f,
    -	0xea, 0xe6, 0x95, 0xd7, 0xdd, 0xbc, 0xf2, 0xa6, 0x9b, 0x57, 0xde, 0x76, 0xf3, 0xca, 0x17, 0xdd,
    -	0xbc, 0xf2, 0x97, 0x2f, 0xf3, 0x13, 0xbf, 0x39, 0x3f, 0xf0, 0x9f, 0xc2, 0x6f, 0x02, 0x00, 0x00,
    -	0xff, 0xff, 0xca, 0x8b, 0x47, 0xba, 0x45, 0x1c, 0x00, 0x00,
    +	// 1742 bytes of a gzipped FileDescriptorProto
    +	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0xc9, 0x8f, 0x1b, 0x4b,
    +	0x19, 0x9f, 0xb6, 0x3d, 0x5b, 0x79, 0xd6, 0xca, 0xe6, 0x4c, 0x14, 0x7b, 0xd4, 0x04, 0xb2, 0x40,
    +	0xda, 0xc4, 0x84, 0x28, 0x22, 0x07, 0x34, 0x3d, 0x01, 0x32, 0xca, 0x0c, 0xe3, 0x94, 0x27, 0x19,
    +	0x76, 0xa5, 0xdc, 0x5d, 0xe3, 0x29, 0xc6, 0xee, 0xb6, 0xba, 0xdb, 0x4e, 0x26, 0x12, 0x12, 0x17,
    +	0xee, 0x08, 0x14, 0xf1, 0x4f, 0x44, 0x9c, 0x40, 0xe1, 0x00, 0x12, 0x12, 0x1c, 0x72, 0x41, 0xca,
    +	0x81, 0x43, 0x4e, 0x16, 0x31, 0xd2, 0x3b, 0xbe, 0xe3, 0x3b, 0xe4, 0xf4, 0x54, 0x4b, 0xaf, 0xde,
    +	0xc6, 0x79, 0x93, 0x91, 0xe6, 0xe6, 0xaa, 0xfa, 0xbe, 0xdf, 0xb7, 0xd4, 0xb7, 0x55, 0x1b, 0x5c,
    +	0x3f, 0xb8, 0xeb, 0x6a, 0xd4, 0x2e, 0xe2, 0x26, 0x2d, 0xe2, 0x96, 0x67, 0xbb, 0x06, 0xae, 0x53,
    +	0xab, 0x56, 0x6c, 0x97, 0x8a, 0x35, 0x62, 0x11, 0x07, 0x7b, 0xc4, 0xd4, 0x9a, 0x8e, 0xed, 0xd9,
    +	0xf0, 0xa2, 0x20, 0xd5, 0x70, 0x93, 0x6a, 0x11, 0x52, 0xad, 0x5d, 0x5a, 0xb9, 0x59, 0xa3, 0xde,
    +	0x7e, 0xab, 0xaa, 0x19, 0x76, 0xa3, 0x58, 0xb3, 0x6b, 0x76, 0x91, 0x73, 0x54, 0x5b, 0x7b, 0x7c,
    +	0xc5, 0x17, 0xfc, 0x97, 0x40, 0x5a, 0x51, 0x23, 0x42, 0x0d, 0xdb, 0x21, 0xc5, 0xf6, 0xad, 0xa4,
    +	0xb4, 0x95, 0xdb, 0x21, 0x4d, 0x03, 0x1b, 0xfb, 0xd4, 0x22, 0xce, 0x61, 0xb1, 0x79, 0x50, 0xe3,
    +	0x4c, 0x0e, 0x71, 0xed, 0x96, 0x63, 0x90, 0xb1, 0xb8, 0xdc, 0x62, 0x83, 0x78, 0xb8, 0x9f, 0xac,
    +	0xe2, 0x20, 0x2e, 0xa7, 0x65, 0x79, 0xb4, 0xd1, 0x2b, 0xe6, 0xce, 0x28, 0x06, 0xd7, 0xd8, 0x27,
    +	0x0d, 0x9c, 0xe4, 0x53, 0x3f, 0x53, 0xc0, 0xe5, 0x75, 0xdb, 0xf2, 0x30, 0xe3, 0x40, 0xd2, 0x88,
    +	0x2d, 0xe2, 0x39, 0xd4, 0xa8, 0xf0, 0xdf, 0x70, 0x1d, 0x64, 0x2c, 0xdc, 0x20, 0x39, 0x65, 0x55,
    +	0xb9, 0x36, 0xab, 0x17, 0xdf, 0x74, 0x0a, 0x13, 0xdd, 0x4e, 0x21, 0xf3, 0x63, 0xdc, 0x20, 0x1f,
    +	0x3a, 0x85, 0x42, 0xaf, 0xe3, 0x34, 0x1f, 0x86, 0x91, 0x20, 0xce, 0x0c, 0xb7, 0xc1, 0x94, 0x87,
    +	0x9d, 0x1a, 0xf1, 0x72, 0xa9, 0x55, 0xe5, 0x5a, 0xb6, 0x74, 0x55, 0x1b, 0x78, 0x75, 0x9a, 0x90,
    +	0xbe, 0xc3, 0xc9, 0xf5, 0x05, 0x29, 0x6f, 0x4a, 0xac, 0x91, 0x84, 0x81, 0x45, 0x30, 0x6b, 0xf8,
    +	0x6a, 0xe7, 0xd2, 0x5c, 0xb5, 0x65, 0x49, 0x3a, 0x1b, 0xda, 0x13, 0xd2, 0xa8, 0x9f, 0x0f, 0x31,
    +	0xd4, 0xc3, 0x5e, 0xcb, 0x3d, 0x1e, 0x43, 0x77, 0xc1, 0xb4, 0xd1, 0x72, 0x1c, 0x62, 0xf9, 0x96,
    +	0x7e, 0x6b, 0xa4, 0xa5, 0x4f, 0x70, 0xbd, 0x45, 0x84, 0x0e, 0xfa, 0xa2, 0x94, 0x3a, 0xbd, 0x2e,
    +	0x40, 0x90, 0x8f, 0x36, 0xbe, 0xc1, 0x2f, 0x15, 0x70, 0x69, 0xdd, 0xb1, 0x5d, 0xf7, 0x09, 0x71,
    +	0x5c, 0x6a, 0x5b, 0xdb, 0xd5, 0x5f, 0x13, 0xc3, 0x43, 0x64, 0x8f, 0x38, 0xc4, 0x32, 0x08, 0x5c,
    +	0x05, 0x99, 0x03, 0x6a, 0x99, 0xd2, 0xdc, 0x39, 0xdf, 0xdc, 0x87, 0xd4, 0x32, 0x11, 0x3f, 0x61,
    +	0x14, 0xdc, 0x21, 0xa9, 0x38, 0x45, 0xc4, 0xda, 0x12, 0x00, 0xb8, 0x49, 0xa5, 0x00, 0xa9, 0x15,
    +	0x94, 0x74, 0x60, 0xad, 0xbc, 0x21, 0x4f, 0x50, 0x84, 0x4a, 0xfd, 0xbb, 0x02, 0xce, 0xfe, 0xe0,
    +	0xb9, 0x47, 0x1c, 0x0b, 0xd7, 0x63, 0x81, 0x56, 0x01, 0x53, 0x0d, 0xbe, 0xe6, 0x2a, 0x65, 0x4b,
    +	0xdf, 0x1c, 0xe9, 0xb9, 0x0d, 0x93, 0x58, 0x1e, 0xdd, 0xa3, 0xc4, 0x09, 0xe3, 0x44, 0x9c, 0x20,
    +	0x09, 0x75, 0xec, 0x81, 0xa7, 0xfe, 0xbb, 0x57, 0x7d, 0x11, 0x3e, 0x9f, 0x44, 0xfd, 0x4f, 0x15,
    +	0x4e, 0xea, 0x9f, 0x15, 0xb0, 0xf4, 0xa0, 0xbc, 0x56, 0x11, 0xdc, 0x65, 0xbb, 0x4e, 0x8d, 0x43,
    +	0x78, 0x17, 0x64, 0xbc, 0xc3, 0xa6, 0x9f, 0x01, 0x57, 0xfc, 0x0b, 0xdf, 0x39, 0x6c, 0xb2, 0x0c,
    +	0x38, 0x9b, 0xa4, 0x67, 0xfb, 0x88, 0x73, 0xc0, 0xaf, 0x81, 0xc9, 0x36, 0x93, 0xcb, 0xb5, 0x9c,
    +	0xd4, 0xe7, 0x25, 0xeb, 0x24, 0x57, 0x06, 0x89, 0x33, 0x78, 0x0f, 0xcc, 0x37, 0x89, 0x43, 0x6d,
    +	0xb3, 0x42, 0x0c, 0xdb, 0x32, 0x5d, 0x1e, 0x30, 0x93, 0xfa, 0x39, 0x49, 0x3c, 0x5f, 0x8e, 0x1e,
    +	0xa2, 0x38, 0xad, 0xfa, 0x45, 0x0a, 0x2c, 0x86, 0x0a, 0xa0, 0x56, 0x9d, 0xb8, 0xf0, 0x57, 0x60,
    +	0xc5, 0xf5, 0x70, 0x95, 0xd6, 0xe9, 0x0b, 0xec, 0x51, 0xdb, 0xda, 0xa5, 0x96, 0x69, 0x3f, 0x8b,
    +	0xa3, 0xe7, 0xbb, 0x9d, 0xc2, 0x4a, 0x65, 0x20, 0x15, 0x1a, 0x82, 0x00, 0x1f, 0x82, 0x39, 0x97,
    +	0xd4, 0x89, 0xe1, 0x09, 0x7b, 0xa5, 0x5f, 0xae, 0x76, 0x3b, 0x85, 0xb9, 0x4a, 0x64, 0xff, 0x43,
    +	0xa7, 0x70, 0x26, 0xe6, 0x18, 0x71, 0x88, 0x62, 0xcc, 0xf0, 0xa7, 0x60, 0xa6, 0xc9, 0x7e, 0x51,
    +	0xe2, 0xe6, 0x52, 0xab, 0xe9, 0x11, 0x11, 0x92, 0xf4, 0xb5, 0xbe, 0x24, 0xbd, 0x34, 0x53, 0x96,
    +	0x20, 0x28, 0x80, 0x83, 0x3f, 0x07, 0xb3, 0x9e, 0x5d, 0x27, 0x0e, 0xb6, 0x0c, 0x92, 0xcb, 0xf0,
    +	0x38, 0xd1, 0x22, 0xd8, 0x41, 0x43, 0xd0, 0x9a, 0x07, 0x35, 0x2e, 0xcc, 0xef, 0x56, 0xda, 0xa3,
    +	0x16, 0xb6, 0x3c, 0xea, 0x1d, 0xea, 0xf3, 0xac, 0x8e, 0xec, 0xf8, 0x20, 0x28, 0xc4, 0x53, 0x5f,
    +	0xa7, 0xc0, 0x85, 0x07, 0xb6, 0x43, 0x5f, 0xb0, 0xca, 0x52, 0x2f, 0xdb, 0xe6, 0x9a, 0xd4, 0x94,
    +	0x38, 0xf0, 0x29, 0x98, 0x61, 0x1d, 0xcc, 0xc4, 0x1e, 0x96, 0x51, 0xff, 0xed, 0x61, 0x72, 0x5d,
    +	0x8d, 0x51, 0x6b, 0xed, 0x5b, 0x9a, 0x28, 0x46, 0x5b, 0xc4, 0xc3, 0x61, 0xbd, 0x08, 0xf7, 0x50,
    +	0x80, 0x0a, 0x7f, 0x02, 0x32, 0x6e, 0x93, 0x18, 0x32, 0xfa, 0xef, 0x0c, 0xf3, 0x58, 0x7f, 0x1d,
    +	0x2b, 0x4d, 0x62, 0x84, 0xb5, 0x8b, 0xad, 0x10, 0x47, 0x84, 0x4f, 0xc1, 0x94, 0xcb, 0xb3, 0x84,
    +	0x07, 0x4a, 0xb6, 0x74, 0xf7, 0x23, 0xb0, 0x45, 0x96, 0x05, 0xc9, 0x2b, 0xd6, 0x48, 0xe2, 0xaa,
    +	0xff, 0x51, 0x40, 0x61, 0x00, 0xa7, 0x4e, 0xf6, 0x71, 0x9b, 0xda, 0x0e, 0x7c, 0x04, 0xa6, 0xf9,
    +	0xce, 0xe3, 0xa6, 0x74, 0xe0, 0x8d, 0x23, 0x05, 0x05, 0x8f, 0x7f, 0x3d, 0xcb, 0x52, 0xbb, 0x22,
    +	0xd8, 0x91, 0x8f, 0x03, 0x77, 0xc1, 0x2c, 0xff, 0x79, 0xdf, 0x7e, 0x66, 0x49, 0xbf, 0x8d, 0x03,
    +	0xca, 0x23, 0xa1, 0xe2, 0x03, 0xa0, 0x10, 0x4b, 0xfd, 0x5d, 0x1a, 0xac, 0x0e, 0xb0, 0x67, 0xdd,
    +	0xb6, 0x4c, 0xca, 0x12, 0x08, 0x3e, 0x88, 0xd5, 0x90, 0xdb, 0x89, 0x1a, 0x72, 0x65, 0x14, 0x7f,
    +	0xa4, 0xa6, 0x6c, 0x06, 0x17, 0x94, 0x8a, 0x61, 0x49, 0x37, 0x7f, 0xe8, 0x14, 0xfa, 0x4c, 0x6d,
    +	0x5a, 0x80, 0x14, 0xbf, 0x0c, 0xd8, 0x06, 0xb0, 0x8e, 0x5d, 0x6f, 0xc7, 0xc1, 0x96, 0x2b, 0x24,
    +	0xd1, 0x06, 0x91, 0x57, 0x7f, 0xe3, 0x68, 0x41, 0xcb, 0x38, 0xf4, 0x15, 0xa9, 0x05, 0xdc, 0xec,
    +	0x41, 0x43, 0x7d, 0x24, 0xc0, 0x6f, 0x80, 0x29, 0x87, 0x60, 0xd7, 0xb6, 0x78, 0x62, 0xce, 0x86,
    +	0xc1, 0x82, 0xf8, 0x2e, 0x92, 0xa7, 0xf0, 0x3a, 0x98, 0x6e, 0x10, 0xd7, 0xc5, 0x35, 0x92, 0x9b,
    +	0xe4, 0x84, 0x41, 0xed, 0xde, 0x12, 0xdb, 0xc8, 0x3f, 0x57, 0xff, 0xab, 0x80, 0x4b, 0x03, 0xfc,
    +	0xb8, 0x49, 0x5d, 0x0f, 0xfe, 0xa2, 0x27, 0x2b, 0xb5, 0xa3, 0x19, 0xc8, 0xb8, 0x79, 0x4e, 0x06,
    +	0xc5, 0xc6, 0xdf, 0x89, 0x64, 0xe4, 0x2e, 0x98, 0xa4, 0x1e, 0x69, 0xf8, 0x45, 0xac, 0x34, 0x7e,
    +	0xda, 0x84, 0xed, 0x61, 0x83, 0x01, 0x21, 0x81, 0xa7, 0xbe, 0x4e, 0x0f, 0x34, 0x8b, 0xa5, 0x2d,
    +	0x6c, 0x83, 0x05, 0xbe, 0x92, 0x0d, 0x99, 0xec, 0x49, 0xe3, 0x86, 0x15, 0x85, 0x21, 0x03, 0x90,
    +	0x7e, 0x5e, 0x6a, 0xb1, 0x50, 0x89, 0xa1, 0xa2, 0x84, 0x14, 0x78, 0x0b, 0x64, 0x1b, 0xd4, 0x42,
    +	0xa4, 0x59, 0xa7, 0x06, 0x76, 0x65, 0x87, 0x5b, 0xec, 0x76, 0x0a, 0xd9, 0xad, 0x70, 0x1b, 0x45,
    +	0x69, 0xe0, 0x77, 0x41, 0xb6, 0x81, 0x9f, 0x07, 0x2c, 0xa2, 0x13, 0x9d, 0x91, 0xf2, 0xb2, 0x5b,
    +	0xe1, 0x11, 0x8a, 0xd2, 0xc1, 0x32, 0x8b, 0x01, 0xd6, 0xc3, 0xdd, 0x5c, 0x86, 0x3b, 0xf7, 0xeb,
    +	0x23, 0xbb, 0x3d, 0x2f, 0x6f, 0x91, 0x50, 0xe1, 0xdc, 0xc8, 0x87, 0x81, 0x26, 0x98, 0xa9, 0xca,
    +	0x52, 0xc3, 0xc3, 0x2a, 0x5b, 0xfa, 0xde, 0x47, 0xdc, 0x97, 0x44, 0xd0, 0xe7, 0x58, 0x48, 0xf8,
    +	0x2b, 0x14, 0x20, 0xab, 0xaf, 0x32, 0xe0, 0xf2, 0xd0, 0x12, 0x09, 0x7f, 0x08, 0xa0, 0x5d, 0x75,
    +	0x89, 0xd3, 0x26, 0xe6, 0x8f, 0xc4, 0x0b, 0x84, 0x0d, 0x8c, 0xec, 0xfe, 0xd2, 0xfa, 0x79, 0x96,
    +	0x4d, 0xdb, 0x3d, 0xa7, 0xa8, 0x0f, 0x07, 0x34, 0xc0, 0x3c, 0xcb, 0x31, 0x71, 0x63, 0x54, 0xce,
    +	0xa6, 0xe3, 0x25, 0xf0, 0x32, 0x1b, 0x35, 0x36, 0xa3, 0x20, 0x28, 0x8e, 0x09, 0xd7, 0xc0, 0xa2,
    +	0x1c, 0x93, 0x12, 0x37, 0x78, 0x41, 0xfa, 0x79, 0x71, 0x3d, 0x7e, 0x8c, 0x92, 0xf4, 0x0c, 0xc2,
    +	0x24, 0x2e, 0x75, 0x88, 0x19, 0x40, 0x64, 0xe2, 0x10, 0xf7, 0xe3, 0xc7, 0x28, 0x49, 0x0f, 0x6b,
    +	0x60, 0x41, 0xa2, 0xca, 0x5b, 0xcd, 0x4d, 0xf2, 0x98, 0x18, 0x3d, 0xc1, 0xca, 0xb6, 0x14, 0xc4,
    +	0xf7, 0x7a, 0x0c, 0x06, 0x25, 0x60, 0xa1, 0x0d, 0x80, 0xe1, 0x17, 0x4d, 0x37, 0x37, 0xc5, 0x85,
    +	0xdc, 0x1b, 0x3f, 0x4a, 0x82, 0xc2, 0x1b, 0x76, 0xf4, 0x60, 0xcb, 0x45, 0x11, 0x11, 0xea, 0x1f,
    +	0x15, 0xb0, 0x94, 0x9c, 0x80, 0x83, 0xc7, 0x86, 0x32, 0xf0, 0xb1, 0xf1, 0x4b, 0x30, 0x23, 0x06,
    +	0x2a, 0xdb, 0x91, 0xd7, 0xfe, 0x9d, 0x23, 0x96, 0x35, 0x5c, 0x25, 0xf5, 0x8a, 0x64, 0x15, 0x41,
    +	0xec, 0xaf, 0x50, 0x00, 0xa9, 0xbe, 0xcc, 0x00, 0x10, 0xe6, 0x14, 0xbc, 0x1d, 0xeb, 0x63, 0xab,
    +	0x89, 0x3e, 0xb6, 0x14, 0x7d, 0xb9, 0x44, 0x7a, 0xd6, 0x23, 0x30, 0x65, 0xf3, 0x32, 0x23, 0x35,
    +	0xbc, 0x39, 0xc4, 0x8f, 0xc1, 0xbc, 0x13, 0x00, 0xe9, 0x80, 0x35, 0x06, 0x59, 0xa7, 0x24, 0x10,
    +	0xdc, 0x00, 0x99, 0xa6, 0x6d, 0xfa, 0x53, 0xca, 0xb0, 0x99, 0xb1, 0x6c, 0x9b, 0x6e, 0x0c, 0x6e,
    +	0x86, 0x69, 0xcc, 0x76, 0x11, 0x87, 0x60, 0x23, 0xa8, 0x3f, 0xf9, 0xc9, 0x31, 0xb1, 0x38, 0x04,
    +	0xae, 0xdf, 0xd7, 0x00, 0xe1, 0x3d, 0xff, 0x04, 0x05, 0x70, 0xf0, 0x37, 0x60, 0xd9, 0x48, 0xbe,
    +	0xae, 0x73, 0xd3, 0x23, 0x07, 0xab, 0xa1, 0x9f, 0x1e, 0xf4, 0x73, 0xdd, 0x4e, 0x61, 0xb9, 0x87,
    +	0x04, 0xf5, 0x4a, 0x62, 0x96, 0x11, 0xf9, 0x28, 0x93, 0x75, 0x6e, 0x98, 0x65, 0xfd, 0x9e, 0x9f,
    +	0xc2, 0x32, 0xff, 0x04, 0x05, 0x70, 0xea, 0x9f, 0x32, 0x60, 0x2e, 0xf6, 0xd0, 0x3b, 0xe1, 0xc8,
    +	0x10, 0xc9, 0x7c, 0x6c, 0x91, 0x21, 0xe0, 0x8e, 0x35, 0x32, 0x04, 0xe4, 0x09, 0x45, 0x86, 0x10,
    +	0x76, 0x42, 0x91, 0x11, 0xb1, 0xac, 0x4f, 0x64, 0xfc, 0x2b, 0xe5, 0x47, 0x86, 0x18, 0x16, 0x8e,
    +	0x16, 0x19, 0x82, 0x36, 0x12, 0x19, 0xdb, 0xd1, 0xb7, 0xf3, 0xf8, 0x2f, 0xb7, 0xd9, 0x9e, 0x77,
    +	0xb6, 0x09, 0xe6, 0x70, 0x9b, 0x38, 0xb8, 0x46, 0xf8, 0xb6, 0x8c, 0x8f, 0x71, 0x71, 0x97, 0xd8,
    +	0x33, 0x77, 0x2d, 0x82, 0x83, 0x62, 0xa8, 0xac, 0xa5, 0xcb, 0xf5, 0x63, 0x2f, 0x78, 0x3f, 0xcb,
    +	0x2e, 0xc7, 0x5b, 0xfa, 0x5a, 0xcf, 0x29, 0xea, 0xc3, 0xa1, 0xfe, 0x21, 0x05, 0x96, 0x7b, 0xbe,
    +	0x5c, 0x84, 0x4e, 0x51, 0x3e, 0x91, 0x53, 0x52, 0x27, 0xe8, 0x94, 0xf4, 0xd8, 0x4e, 0xf9, 0x6b,
    +	0x0a, 0xc0, 0xde, 0xfe, 0x00, 0x0f, 0xf9, 0x58, 0x61, 0x38, 0xb4, 0x4a, 0x4c, 0x71, 0xfc, 0x15,
    +	0x67, 0xe0, 0xe8, 0x38, 0x12, 0x85, 0x45, 0x49, 0x39, 0xc7, 0xff, 0x05, 0x37, 0xfc, 0x5e, 0x96,
    +	0x3e, 0xb6, 0xef, 0x65, 0xea, 0x3f, 0x92, 0x7e, 0x3b, 0x85, 0xdf, 0xe6, 0xfa, 0xdd, 0x72, 0xfa,
    +	0x64, 0x6e, 0x59, 0xfd, 0x9b, 0x02, 0x96, 0x92, 0x63, 0xc4, 0x29, 0xf9, 0x30, 0xfb, 0xcf, 0xb8,
    +	0xea, 0xa7, 0xf1, 0xa3, 0xec, 0x2b, 0x05, 0x9c, 0x3d, 0x3d, 0xff, 0xc1, 0xa8, 0x7f, 0xe9, 0x55,
    +	0xf7, 0x14, 0xfc, 0x93, 0xa2, 0x7f, 0xff, 0xcd, 0xfb, 0xfc, 0xc4, 0xdb, 0xf7, 0xf9, 0x89, 0x77,
    +	0xef, 0xf3, 0x13, 0xbf, 0xed, 0xe6, 0x95, 0x37, 0xdd, 0xbc, 0xf2, 0xb6, 0x9b, 0x57, 0xde, 0x75,
    +	0xf3, 0xca, 0xff, 0xba, 0x79, 0xe5, 0xf7, 0xff, 0xcf, 0x4f, 0xfc, 0xec, 0xe2, 0xc0, 0xbf, 0x21,
    +	0xbf, 0x0c, 0x00, 0x00, 0xff, 0xff, 0xbe, 0x23, 0xae, 0x54, 0xa2, 0x1c, 0x00, 0x00,
     }
     
     func (m *ContainerResourceMetricSource) Marshal() (dAtA []byte, err error) {
    @@ -1126,6 +1127,18 @@ func (m *HPAScalingRules) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	_ = i
     	var l int
     	_ = l
    +	if m.Tolerance != nil {
    +		{
    +			size, err := m.Tolerance.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x22
    +	}
     	if m.StabilizationWindowSeconds != nil {
     		i = encodeVarintGenerated(dAtA, i, uint64(*m.StabilizationWindowSeconds))
     		i--
    @@ -2203,6 +2216,10 @@ func (m *HPAScalingRules) Size() (n int) {
     	if m.StabilizationWindowSeconds != nil {
     		n += 1 + sovGenerated(uint64(*m.StabilizationWindowSeconds))
     	}
    +	if m.Tolerance != nil {
    +		l = m.Tolerance.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
     	return n
     }
     
    @@ -2619,6 +2636,7 @@ func (this *HPAScalingRules) String() string {
     		`SelectPolicy:` + valueToStringGenerated(this.SelectPolicy) + `,`,
     		`Policies:` + repeatedStringForPolicies + `,`,
     		`StabilizationWindowSeconds:` + valueToStringGenerated(this.StabilizationWindowSeconds) + `,`,
    +		`Tolerance:` + strings.Replace(fmt.Sprintf("%v", this.Tolerance), "Quantity", "resource.Quantity", 1) + `,`,
     		`}`,
     	}, "")
     	return s
    @@ -3770,6 +3788,42 @@ func (m *HPAScalingRules) Unmarshal(dAtA []byte) error {
     				}
     			}
     			m.StabilizationWindowSeconds = &v
    +		case 4:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Tolerance", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.Tolerance == nil {
    +				m.Tolerance = &resource.Quantity{}
    +			}
    +			if err := m.Tolerance.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
     			skippy, err := skipGenerated(dAtA[iNdEx:])
    diff --git a/vendor/k8s.io/api/autoscaling/v2/generated.proto b/vendor/k8s.io/api/autoscaling/v2/generated.proto
    index 4e6dc0592..04c34d6e1 100644
    --- a/vendor/k8s.io/api/autoscaling/v2/generated.proto
    +++ b/vendor/k8s.io/api/autoscaling/v2/generated.proto
    @@ -112,12 +112,18 @@ message HPAScalingPolicy {
       optional int32 periodSeconds = 3;
     }
     
    -// HPAScalingRules configures the scaling behavior for one direction.
    -// These Rules are applied after calculating DesiredReplicas from metrics for the HPA.
    +// HPAScalingRules configures the scaling behavior for one direction via
    +// scaling Policy Rules and a configurable metric tolerance.
    +//
    +// Scaling Policy Rules are applied after calculating DesiredReplicas from metrics for the HPA.
     // They can limit the scaling velocity by specifying scaling policies.
     // They can prevent flapping by specifying the stabilization window, so that the
     // number of replicas is not set instantly, instead, the safest value from the stabilization
     // window is chosen.
    +//
    +// The tolerance is applied to the metric values and prevents scaling too
    +// eagerly for small metric variations. (Note that setting a tolerance requires
    +// enabling the alpha HPAConfigurableTolerance feature gate.)
     message HPAScalingRules {
       // stabilizationWindowSeconds is the number of seconds for which past recommendations should be
       // considered while scaling up or scaling down.
    @@ -134,10 +140,28 @@ message HPAScalingRules {
       optional string selectPolicy = 1;
     
       // policies is a list of potential scaling polices which can be used during scaling.
    -  // At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid
    +  // If not set, use the default values:
    +  // - For scale up: allow doubling the number of pods, or an absolute change of 4 pods in a 15s window.
    +  // - For scale down: allow all pods to be removed in a 15s window.
       // +listType=atomic
       // +optional
       repeated HPAScalingPolicy policies = 2;
    +
    +  // tolerance is the tolerance on the ratio between the current and desired
    +  // metric value under which no updates are made to the desired number of
    +  // replicas (e.g. 0.01 for 1%). Must be greater than or equal to zero. If not
    +  // set, the default cluster-wide tolerance is applied (by default 10%).
    +  //
    +  // For example, if autoscaling is configured with a memory consumption target of 100Mi,
    +  // and scale-down and scale-up tolerances of 5% and 1% respectively, scaling will be
    +  // triggered when the actual consumption falls below 95Mi or exceeds 101Mi.
    +  //
    +  // This is an alpha field and requires enabling the HPAConfigurableTolerance
    +  // feature gate.
    +  //
    +  // +featureGate=HPAConfigurableTolerance
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity tolerance = 4;
     }
     
     // HorizontalPodAutoscaler is the configuration for a horizontal pod
    diff --git a/vendor/k8s.io/api/autoscaling/v2/types.go b/vendor/k8s.io/api/autoscaling/v2/types.go
    index 99e8db09d..9ce69b1ed 100644
    --- a/vendor/k8s.io/api/autoscaling/v2/types.go
    +++ b/vendor/k8s.io/api/autoscaling/v2/types.go
    @@ -171,12 +171,18 @@ const (
     	DisabledPolicySelect ScalingPolicySelect = "Disabled"
     )
     
    -// HPAScalingRules configures the scaling behavior for one direction.
    -// These Rules are applied after calculating DesiredReplicas from metrics for the HPA.
    +// HPAScalingRules configures the scaling behavior for one direction via
    +// scaling Policy Rules and a configurable metric tolerance.
    +//
    +// Scaling Policy Rules are applied after calculating DesiredReplicas from metrics for the HPA.
     // They can limit the scaling velocity by specifying scaling policies.
     // They can prevent flapping by specifying the stabilization window, so that the
     // number of replicas is not set instantly, instead, the safest value from the stabilization
     // window is chosen.
    +//
    +// The tolerance is applied to the metric values and prevents scaling too
    +// eagerly for small metric variations. (Note that setting a tolerance requires
    +// enabling the alpha HPAConfigurableTolerance feature gate.)
     type HPAScalingRules struct {
     	// stabilizationWindowSeconds is the number of seconds for which past recommendations should be
     	// considered while scaling up or scaling down.
    @@ -193,10 +199,28 @@ type HPAScalingRules struct {
     	SelectPolicy *ScalingPolicySelect `json:"selectPolicy,omitempty" protobuf:"bytes,1,opt,name=selectPolicy"`
     
     	// policies is a list of potential scaling polices which can be used during scaling.
    -	// At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid
    +	// If not set, use the default values:
    +	// - For scale up: allow doubling the number of pods, or an absolute change of 4 pods in a 15s window.
    +	// - For scale down: allow all pods to be removed in a 15s window.
     	// +listType=atomic
     	// +optional
     	Policies []HPAScalingPolicy `json:"policies,omitempty" listType:"atomic" protobuf:"bytes,2,rep,name=policies"`
    +
    +	// tolerance is the tolerance on the ratio between the current and desired
    +	// metric value under which no updates are made to the desired number of
    +	// replicas (e.g. 0.01 for 1%). Must be greater than or equal to zero. If not
    +	// set, the default cluster-wide tolerance is applied (by default 10%).
    +	//
    +	// For example, if autoscaling is configured with a memory consumption target of 100Mi,
    +	// and scale-down and scale-up tolerances of 5% and 1% respectively, scaling will be
    +	// triggered when the actual consumption falls below 95Mi or exceeds 101Mi.
    +	//
    +	// This is an alpha field and requires enabling the HPAConfigurableTolerance
    +	// feature gate.
    +	//
    +	// +featureGate=HPAConfigurableTolerance
    +	// +optional
    +	Tolerance *resource.Quantity `json:"tolerance,omitempty" protobuf:"bytes,4,opt,name=tolerance"`
     }
     
     // HPAScalingPolicyType is the type of the policy which could be used while making scaling decisions.
    diff --git a/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go
    index 649cd04a0..017fefcde 100644
    --- a/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go
    +++ b/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go
    @@ -92,10 +92,11 @@ func (HPAScalingPolicy) SwaggerDoc() map[string]string {
     }
     
     var map_HPAScalingRules = map[string]string{
    -	"":                           "HPAScalingRules configures the scaling behavior for one direction. These Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen.",
    +	"":                           "HPAScalingRules configures the scaling behavior for one direction via scaling Policy Rules and a configurable metric tolerance.\n\nScaling Policy Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen.\n\nThe tolerance is applied to the metric values and prevents scaling too eagerly for small metric variations. (Note that setting a tolerance requires enabling the alpha HPAConfigurableTolerance feature gate.)",
     	"stabilizationWindowSeconds": "stabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).",
     	"selectPolicy":               "selectPolicy is used to specify which policy should be used. If not set, the default value Max is used.",
    -	"policies":                   "policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid",
    +	"policies":                   "policies is a list of potential scaling polices which can be used during scaling. If not set, use the default values: - For scale up: allow doubling the number of pods, or an absolute change of 4 pods in a 15s window. - For scale down: allow all pods to be removed in a 15s window.",
    +	"tolerance":                  "tolerance is the tolerance on the ratio between the current and desired metric value under which no updates are made to the desired number of replicas (e.g. 0.01 for 1%). Must be greater than or equal to zero. If not set, the default cluster-wide tolerance is applied (by default 10%).\n\nFor example, if autoscaling is configured with a memory consumption target of 100Mi, and scale-down and scale-up tolerances of 5% and 1% respectively, scaling will be triggered when the actual consumption falls below 95Mi or exceeds 101Mi.\n\nThis is an alpha field and requires enabling the HPAConfigurableTolerance feature gate.",
     }
     
     func (HPAScalingRules) SwaggerDoc() map[string]string {
    diff --git a/vendor/k8s.io/api/autoscaling/v2/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v2/zz_generated.deepcopy.go
    index 125708d6f..5fbcf9f80 100644
    --- a/vendor/k8s.io/api/autoscaling/v2/zz_generated.deepcopy.go
    +++ b/vendor/k8s.io/api/autoscaling/v2/zz_generated.deepcopy.go
    @@ -146,6 +146,11 @@ func (in *HPAScalingRules) DeepCopyInto(out *HPAScalingRules) {
     		*out = make([]HPAScalingPolicy, len(*in))
     		copy(*out, *in)
     	}
    +	if in.Tolerance != nil {
    +		in, out := &in.Tolerance, &out.Tolerance
    +		x := (*in).DeepCopy()
    +		*out = &x
    +	}
     	return
     }
     
    diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/doc.go b/vendor/k8s.io/api/autoscaling/v2beta1/doc.go
    index 25ca507bb..eac92e86e 100644
    --- a/vendor/k8s.io/api/autoscaling/v2beta1/doc.go
    +++ b/vendor/k8s.io/api/autoscaling/v2beta1/doc.go
    @@ -19,4 +19,4 @@ limitations under the License.
     // +k8s:openapi-gen=true
     // +k8s:prerelease-lifecycle-gen=true
     
    -package v2beta1 // import "k8s.io/api/autoscaling/v2beta1"
    +package v2beta1
    diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/doc.go b/vendor/k8s.io/api/autoscaling/v2beta2/doc.go
    index 76fb0aff8..150037297 100644
    --- a/vendor/k8s.io/api/autoscaling/v2beta2/doc.go
    +++ b/vendor/k8s.io/api/autoscaling/v2beta2/doc.go
    @@ -19,4 +19,4 @@ limitations under the License.
     // +k8s:openapi-gen=true
     // +k8s:prerelease-lifecycle-gen=true
     
    -package v2beta2 // import "k8s.io/api/autoscaling/v2beta2"
    +package v2beta2
    diff --git a/vendor/k8s.io/api/batch/v1/doc.go b/vendor/k8s.io/api/batch/v1/doc.go
    index cb5cbb600..69088e2c5 100644
    --- a/vendor/k8s.io/api/batch/v1/doc.go
    +++ b/vendor/k8s.io/api/batch/v1/doc.go
    @@ -18,4 +18,4 @@ limitations under the License.
     // +k8s:protobuf-gen=package
     // +k8s:openapi-gen=true
     // +k8s:prerelease-lifecycle-gen=true
    -package v1 // import "k8s.io/api/batch/v1"
    +package v1
    diff --git a/vendor/k8s.io/api/batch/v1/generated.proto b/vendor/k8s.io/api/batch/v1/generated.proto
    index 361ebdca1..c0ce8cef2 100644
    --- a/vendor/k8s.io/api/batch/v1/generated.proto
    +++ b/vendor/k8s.io/api/batch/v1/generated.proto
    @@ -222,13 +222,12 @@ message JobSpec {
       // When the field is specified, it must be immutable and works only for the Indexed Jobs.
       // Once the Job meets the SuccessPolicy, the lingering pods are terminated.
       //
    -  // This field is beta-level. To use this field, you must enable the
    -  // `JobSuccessPolicy` feature gate (enabled by default).
       // +optional
       optional SuccessPolicy successPolicy = 16;
     
       // Specifies the number of retries before marking this job failed.
    -  // Defaults to 6
    +  // Defaults to 6, unless backoffLimitPerIndex (only Indexed Job) is specified.
    +  // When backoffLimitPerIndex is specified, backoffLimit defaults to 2147483647.
       // +optional
       optional int32 backoffLimit = 7;
     
    @@ -238,8 +237,6 @@ message JobSpec {
       // batch.kubernetes.io/job-index-failure-count annotation. It can only
       // be set when Job's completionMode=Indexed, and the Pod's restart
       // policy is Never. The field is immutable.
    -  // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex`
    -  // feature gate is enabled (enabled by default).
       // +optional
       optional int32 backoffLimitPerIndex = 12;
     
    @@ -251,8 +248,6 @@ message JobSpec {
       // It can only be specified when backoffLimitPerIndex is set.
       // It can be null or up to completions. It is required and must be
       // less than or equal to 10^4 when is completions greater than 10^5.
    -  // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex`
    -  // feature gate is enabled (enabled by default).
       // +optional
       optional int32 maxFailedIndexes = 13;
     
    @@ -335,8 +330,6 @@ message JobSpec {
       //
       // When using podFailurePolicy, Failed is the the only allowed value.
       // TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use.
    -  // This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle.
    -  // This is on by default.
       // +optional
       optional string podReplacementPolicy = 14;
     
    @@ -442,8 +435,6 @@ message JobStatus {
       // represented as "1,3-5,7".
       // The set of failed indexes cannot overlap with the set of completed indexes.
       //
    -  // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex`
    -  // feature gate is enabled (enabled by default).
       // +optional
       optional string failedIndexes = 10;
     
    @@ -554,8 +545,6 @@ message PodFailurePolicyRule {
       //   running pods are terminated.
       // - FailIndex: indicates that the pod's index is marked as Failed and will
       //   not be restarted.
    -  //   This value is beta-level. It can be used when the
    -  //   `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).
       // - Ignore: indicates that the counter towards the .backoffLimit is not
       //   incremented and a replacement pod is created.
       // - Count: indicates that the pod is handled in the default way - the
    @@ -580,7 +569,7 @@ message PodFailurePolicyRule {
     message SuccessPolicy {
       // rules represents the list of alternative rules for the declaring the Jobs
       // as successful before `.status.succeeded >= .spec.completions`. Once any of the rules are met,
    -  // the "SucceededCriteriaMet" condition is added, and the lingering pods are removed.
    +  // the "SuccessCriteriaMet" condition is added, and the lingering pods are removed.
       // The terminal state for such a Job has the "Complete" condition.
       // Additionally, these rules are evaluated in order; Once the Job meets one of the rules,
       // other rules are ignored. At most 20 elements are allowed.
    diff --git a/vendor/k8s.io/api/batch/v1/types.go b/vendor/k8s.io/api/batch/v1/types.go
    index 8e9a761b9..9183c073d 100644
    --- a/vendor/k8s.io/api/batch/v1/types.go
    +++ b/vendor/k8s.io/api/batch/v1/types.go
    @@ -128,7 +128,6 @@ const (
     	// This is an action which might be taken on a pod failure - mark the
     	// Job's index as failed to avoid restarts within this index. This action
     	// can only be used when backoffLimitPerIndex is set.
    -	// This value is beta-level.
     	PodFailurePolicyActionFailIndex PodFailurePolicyAction = "FailIndex"
     
     	// This is an action which might be taken on a pod failure - the counter towards
    @@ -223,8 +222,6 @@ type PodFailurePolicyRule struct {
     	//   running pods are terminated.
     	// - FailIndex: indicates that the pod's index is marked as Failed and will
     	//   not be restarted.
    -	//   This value is beta-level. It can be used when the
    -	//   `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).
     	// - Ignore: indicates that the counter towards the .backoffLimit is not
     	//   incremented and a replacement pod is created.
     	// - Count: indicates that the pod is handled in the default way - the
    @@ -260,7 +257,7 @@ type PodFailurePolicy struct {
     type SuccessPolicy struct {
     	// rules represents the list of alternative rules for the declaring the Jobs
     	// as successful before `.status.succeeded >= .spec.completions`. Once any of the rules are met,
    -	// the "SucceededCriteriaMet" condition is added, and the lingering pods are removed.
    +	// the "SuccessCriteriaMet" condition is added, and the lingering pods are removed.
     	// The terminal state for such a Job has the "Complete" condition.
     	// Additionally, these rules are evaluated in order; Once the Job meets one of the rules,
     	// other rules are ignored. At most 20 elements are allowed.
    @@ -346,13 +343,12 @@ type JobSpec struct {
     	// When the field is specified, it must be immutable and works only for the Indexed Jobs.
     	// Once the Job meets the SuccessPolicy, the lingering pods are terminated.
     	//
    -	// This field is beta-level. To use this field, you must enable the
    -	// `JobSuccessPolicy` feature gate (enabled by default).
     	// +optional
     	SuccessPolicy *SuccessPolicy `json:"successPolicy,omitempty" protobuf:"bytes,16,opt,name=successPolicy"`
     
     	// Specifies the number of retries before marking this job failed.
    -	// Defaults to 6
    +	// Defaults to 6, unless backoffLimitPerIndex (only Indexed Job) is specified.
    +	// When backoffLimitPerIndex is specified, backoffLimit defaults to 2147483647.
     	// +optional
     	BackoffLimit *int32 `json:"backoffLimit,omitempty" protobuf:"varint,7,opt,name=backoffLimit"`
     
    @@ -362,8 +358,6 @@ type JobSpec struct {
     	// batch.kubernetes.io/job-index-failure-count annotation. It can only
     	// be set when Job's completionMode=Indexed, and the Pod's restart
     	// policy is Never. The field is immutable.
    -	// This field is beta-level. It can be used when the `JobBackoffLimitPerIndex`
    -	// feature gate is enabled (enabled by default).
     	// +optional
     	BackoffLimitPerIndex *int32 `json:"backoffLimitPerIndex,omitempty" protobuf:"varint,12,opt,name=backoffLimitPerIndex"`
     
    @@ -375,8 +369,6 @@ type JobSpec struct {
     	// It can only be specified when backoffLimitPerIndex is set.
     	// It can be null or up to completions. It is required and must be
     	// less than or equal to 10^4 when is completions greater than 10^5.
    -	// This field is beta-level. It can be used when the `JobBackoffLimitPerIndex`
    -	// feature gate is enabled (enabled by default).
     	// +optional
     	MaxFailedIndexes *int32 `json:"maxFailedIndexes,omitempty" protobuf:"varint,13,opt,name=maxFailedIndexes"`
     
    @@ -464,8 +456,6 @@ type JobSpec struct {
     	//
     	// When using podFailurePolicy, Failed is the the only allowed value.
     	// TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use.
    -	// This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle.
    -	// This is on by default.
     	// +optional
     	PodReplacementPolicy *PodReplacementPolicy `json:"podReplacementPolicy,omitempty" protobuf:"bytes,14,opt,name=podReplacementPolicy,casttype=podReplacementPolicy"`
     
    @@ -571,8 +561,6 @@ type JobStatus struct {
     	// represented as "1,3-5,7".
     	// The set of failed indexes cannot overlap with the set of completed indexes.
     	//
    -	// This field is beta-level. It can be used when the `JobBackoffLimitPerIndex`
    -	// feature gate is enabled (enabled by default).
     	// +optional
     	FailedIndexes *string `json:"failedIndexes,omitempty" protobuf:"bytes,10,opt,name=failedIndexes"`
     
    @@ -647,13 +635,9 @@ const (
     	JobReasonFailedIndexes string = "FailedIndexes"
     	// JobReasonSuccessPolicy reason indicates a SuccessCriteriaMet condition is added due to
     	// a Job met successPolicy.
    -	// https://kep.k8s.io/3998
    -	// This is currently a beta field.
     	JobReasonSuccessPolicy string = "SuccessPolicy"
     	// JobReasonCompletionsReached reason indicates a SuccessCriteriaMet condition is added due to
     	// a number of succeeded Job pods met completions.
    -	// - https://kep.k8s.io/3998
    -	// This is currently a beta field.
     	JobReasonCompletionsReached string = "CompletionsReached"
     )
     
    diff --git a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go
    index 893f3371f..451f4609f 100644
    --- a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go
    +++ b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go
    @@ -116,17 +116,17 @@ var map_JobSpec = map[string]string{
     	"completions":             "Specifies the desired number of successfully finished pods the job should be run with.  Setting to null means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value.  Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/",
     	"activeDeadlineSeconds":   "Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again.",
     	"podFailurePolicy":        "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.",
    -	"successPolicy":           "successPolicy specifies the policy when the Job can be declared as succeeded. If empty, the default behavior applies - the Job is declared as succeeded only when the number of succeeded pods equals to the completions. When the field is specified, it must be immutable and works only for the Indexed Jobs. Once the Job meets the SuccessPolicy, the lingering pods are terminated.\n\nThis field is beta-level. To use this field, you must enable the `JobSuccessPolicy` feature gate (enabled by default).",
    -	"backoffLimit":            "Specifies the number of retries before marking this job failed. Defaults to 6",
    -	"backoffLimitPerIndex":    "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).",
    -	"maxFailedIndexes":        "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).",
    +	"successPolicy":           "successPolicy specifies the policy when the Job can be declared as succeeded. If empty, the default behavior applies - the Job is declared as succeeded only when the number of succeeded pods equals to the completions. When the field is specified, it must be immutable and works only for the Indexed Jobs. Once the Job meets the SuccessPolicy, the lingering pods are terminated.",
    +	"backoffLimit":            "Specifies the number of retries before marking this job failed. Defaults to 6, unless backoffLimitPerIndex (only Indexed Job) is specified. When backoffLimitPerIndex is specified, backoffLimit defaults to 2147483647.",
    +	"backoffLimitPerIndex":    "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable.",
    +	"maxFailedIndexes":        "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5.",
     	"selector":                "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
     	"manualSelector":          "manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template.  When true, the user is responsible for picking unique labels and specifying the selector.  Failure to pick a unique label may cause this and other jobs to not function correctly.  However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector",
     	"template":                "Describes the pod that will be created when executing a job. The only allowed template.spec.restartPolicy values are \"Never\" or \"OnFailure\". More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/",
     	"ttlSecondsAfterFinished": "ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes.",
     	"completionMode":          "completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\n\nMore completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job.",
     	"suspend":                 "suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.",
    -	"podReplacementPolicy":    "podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods\n  when they are terminating (has a metadata.deletionTimestamp) or failed.\n- Failed means to wait until a previously created Pod is fully terminated (has phase\n  Failed or Succeeded) before creating a replacement Pod.\n\nWhen using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. This is on by default.",
    +	"podReplacementPolicy":    "podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods\n  when they are terminating (has a metadata.deletionTimestamp) or failed.\n- Failed means to wait until a previously created Pod is fully terminated (has phase\n  Failed or Succeeded) before creating a replacement Pod.\n\nWhen using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use.",
     	"managedBy":               "ManagedBy field indicates the controller that manages a Job. The k8s Job controller reconciles jobs which don't have this field at all or the field value is the reserved string `kubernetes.io/job-controller`, but skips reconciling Jobs with a custom value for this field. The value must be a valid domain-prefixed path (e.g. acme.io/foo) - all characters before the first \"/\" must be a valid subdomain as defined by RFC 1123. All characters trailing the first \"/\" must be valid HTTP Path characters as defined by RFC 3986. The value cannot exceed 63 characters. This field is immutable.\n\nThis field is beta-level. The job controller accepts setting the field when the feature gate JobManagedBy is enabled (enabled by default).",
     }
     
    @@ -144,7 +144,7 @@ var map_JobStatus = map[string]string{
     	"failed":                  "The number of pods which reached phase Failed. The value increases monotonically.",
     	"terminating":             "The number of pods which are terminating (in phase Pending or Running and have a deletionTimestamp).\n\nThis field is beta-level. The job controller populates the field when the feature gate JobPodReplacementPolicy is enabled (enabled by default).",
     	"completedIndexes":        "completedIndexes holds the completed indexes when .spec.completionMode = \"Indexed\" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\".",
    -	"failedIndexes":           "FailedIndexes holds the failed indexes when spec.backoffLimitPerIndex is set. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". The set of failed indexes cannot overlap with the set of completed indexes.\n\nThis field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).",
    +	"failedIndexes":           "FailedIndexes holds the failed indexes when spec.backoffLimitPerIndex is set. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". The set of failed indexes cannot overlap with the set of completed indexes.",
     	"uncountedTerminatedPods": "uncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status:\n\n1. Add the pod UID to the arrays in this field. 2. Remove the pod finalizer. 3. Remove the pod UID from the arrays while increasing the corresponding\n    counter.\n\nOld jobs might not be tracked using this field, in which case the field remains null. The structure is empty for finished jobs.",
     	"ready":                   "The number of active pods which have a Ready condition and are not terminating (without a deletionTimestamp).",
     }
    @@ -195,7 +195,7 @@ func (PodFailurePolicyOnPodConditionsPattern) SwaggerDoc() map[string]string {
     
     var map_PodFailurePolicyRule = map[string]string{
     	"":                "PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. One of onExitCodes and onPodConditions, but not both, can be used in each rule.",
    -	"action":          "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are:\n\n- FailJob: indicates that the pod's job is marked as Failed and all\n  running pods are terminated.\n- FailIndex: indicates that the pod's index is marked as Failed and will\n  not be restarted.\n  This value is beta-level. It can be used when the\n  `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).\n- Ignore: indicates that the counter towards the .backoffLimit is not\n  incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n  counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.",
    +	"action":          "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are:\n\n- FailJob: indicates that the pod's job is marked as Failed and all\n  running pods are terminated.\n- FailIndex: indicates that the pod's index is marked as Failed and will\n  not be restarted.\n- Ignore: indicates that the counter towards the .backoffLimit is not\n  incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n  counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.",
     	"onExitCodes":     "Represents the requirement on the container exit codes.",
     	"onPodConditions": "Represents the requirement on the pod conditions. The requirement is represented as a list of pod condition patterns. The requirement is satisfied if at least one pattern matches an actual pod condition. At most 20 elements are allowed.",
     }
    @@ -206,7 +206,7 @@ func (PodFailurePolicyRule) SwaggerDoc() map[string]string {
     
     var map_SuccessPolicy = map[string]string{
     	"":      "SuccessPolicy describes when a Job can be declared as succeeded based on the success of some indexes.",
    -	"rules": "rules represents the list of alternative rules for the declaring the Jobs as successful before `.status.succeeded >= .spec.completions`. Once any of the rules are met, the \"SucceededCriteriaMet\" condition is added, and the lingering pods are removed. The terminal state for such a Job has the \"Complete\" condition. Additionally, these rules are evaluated in order; Once the Job meets one of the rules, other rules are ignored. At most 20 elements are allowed.",
    +	"rules": "rules represents the list of alternative rules for the declaring the Jobs as successful before `.status.succeeded >= .spec.completions`. Once any of the rules are met, the \"SuccessCriteriaMet\" condition is added, and the lingering pods are removed. The terminal state for such a Job has the \"Complete\" condition. Additionally, these rules are evaluated in order; Once the Job meets one of the rules, other rules are ignored. At most 20 elements are allowed.",
     }
     
     func (SuccessPolicy) SwaggerDoc() map[string]string {
    diff --git a/vendor/k8s.io/api/batch/v1beta1/doc.go b/vendor/k8s.io/api/batch/v1beta1/doc.go
    index cb2572f5d..3430d6939 100644
    --- a/vendor/k8s.io/api/batch/v1beta1/doc.go
    +++ b/vendor/k8s.io/api/batch/v1beta1/doc.go
    @@ -19,4 +19,4 @@ limitations under the License.
     // +k8s:openapi-gen=true
     // +k8s:prerelease-lifecycle-gen=true
     
    -package v1beta1 // import "k8s.io/api/batch/v1beta1"
    +package v1beta1
    diff --git a/vendor/k8s.io/api/certificates/v1/doc.go b/vendor/k8s.io/api/certificates/v1/doc.go
    index 78434478e..6c16fc29b 100644
    --- a/vendor/k8s.io/api/certificates/v1/doc.go
    +++ b/vendor/k8s.io/api/certificates/v1/doc.go
    @@ -20,4 +20,4 @@ limitations under the License.
     // +k8s:prerelease-lifecycle-gen=true
     // +groupName=certificates.k8s.io
     
    -package v1 // import "k8s.io/api/certificates/v1"
    +package v1
    diff --git a/vendor/k8s.io/api/certificates/v1/generated.proto b/vendor/k8s.io/api/certificates/v1/generated.proto
    index dac7c7f5f..24528fc8b 100644
    --- a/vendor/k8s.io/api/certificates/v1/generated.proto
    +++ b/vendor/k8s.io/api/certificates/v1/generated.proto
    @@ -39,6 +39,8 @@ option go_package = "k8s.io/api/certificates/v1";
     // This API can be used to request client certificates to authenticate to kube-apiserver
     // (with the "kubernetes.io/kube-apiserver-client" signerName),
     // or to obtain certificates from custom non-Kubernetes signers.
    +// +k8s:supportsSubresource=/status
    +// +k8s:supportsSubresource=/approval
     message CertificateSigningRequest {
       // +optional
       optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    @@ -203,6 +205,11 @@ message CertificateSigningRequestStatus {
       // +listType=map
       // +listMapKey=type
       // +optional
    +  // +k8s:listType=map
    +  // +k8s:listMapKey=type
    +  // +k8s:optional
    +  // +k8s:item(type: "Approved")=+k8s:zeroOrOneOfMember
    +  // +k8s:item(type: "Denied")=+k8s:zeroOrOneOfMember
       repeated CertificateSigningRequestCondition conditions = 1;
     
       // certificate is populated with an issued certificate by the signer after an Approved condition is present.
    diff --git a/vendor/k8s.io/api/certificates/v1/types.go b/vendor/k8s.io/api/certificates/v1/types.go
    index ba8009840..71203e80d 100644
    --- a/vendor/k8s.io/api/certificates/v1/types.go
    +++ b/vendor/k8s.io/api/certificates/v1/types.go
    @@ -39,6 +39,8 @@ import (
     // This API can be used to request client certificates to authenticate to kube-apiserver
     // (with the "kubernetes.io/kube-apiserver-client" signerName),
     // or to obtain certificates from custom non-Kubernetes signers.
    +// +k8s:supportsSubresource=/status
    +// +k8s:supportsSubresource=/approval
     type CertificateSigningRequest struct {
     	metav1.TypeMeta `json:",inline"`
     	// +optional
    @@ -178,6 +180,11 @@ type CertificateSigningRequestStatus struct {
     	// +listType=map
     	// +listMapKey=type
     	// +optional
    +	// +k8s:listType=map
    +	// +k8s:listMapKey=type
    +	// +k8s:optional
    +	// +k8s:item(type: "Approved")=+k8s:zeroOrOneOfMember
    +	// +k8s:item(type: "Denied")=+k8s:zeroOrOneOfMember
     	Conditions []CertificateSigningRequestCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"`
     
     	// certificate is populated with an issued certificate by the signer after an Approved condition is present.
    diff --git a/vendor/k8s.io/api/certificates/v1alpha1/doc.go b/vendor/k8s.io/api/certificates/v1alpha1/doc.go
    index d83d0e820..01481df8e 100644
    --- a/vendor/k8s.io/api/certificates/v1alpha1/doc.go
    +++ b/vendor/k8s.io/api/certificates/v1alpha1/doc.go
    @@ -21,4 +21,4 @@ limitations under the License.
     
     // +groupName=certificates.k8s.io
     
    -package v1alpha1 // import "k8s.io/api/certificates/v1alpha1"
    +package v1alpha1
    diff --git a/vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go b/vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go
    index a62a40059..c260f0436 100644
    --- a/vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go
    +++ b/vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go
    @@ -25,11 +25,14 @@ import (
     	io "io"
     
     	proto "github.com/gogo/protobuf/proto"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     
     	math "math"
     	math_bits "math/bits"
     	reflect "reflect"
     	strings "strings"
    +
    +	k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types"
     )
     
     // Reference imports to suppress errors if they are not otherwise used.
    @@ -127,10 +130,126 @@ func (m *ClusterTrustBundleSpec) XXX_DiscardUnknown() {
     
     var xxx_messageInfo_ClusterTrustBundleSpec proto.InternalMessageInfo
     
    +func (m *PodCertificateRequest) Reset()      { *m = PodCertificateRequest{} }
    +func (*PodCertificateRequest) ProtoMessage() {}
    +func (*PodCertificateRequest) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f73d5fe56c015bb8, []int{3}
    +}
    +func (m *PodCertificateRequest) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *PodCertificateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *PodCertificateRequest) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_PodCertificateRequest.Merge(m, src)
    +}
    +func (m *PodCertificateRequest) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *PodCertificateRequest) XXX_DiscardUnknown() {
    +	xxx_messageInfo_PodCertificateRequest.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_PodCertificateRequest proto.InternalMessageInfo
    +
    +func (m *PodCertificateRequestList) Reset()      { *m = PodCertificateRequestList{} }
    +func (*PodCertificateRequestList) ProtoMessage() {}
    +func (*PodCertificateRequestList) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f73d5fe56c015bb8, []int{4}
    +}
    +func (m *PodCertificateRequestList) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *PodCertificateRequestList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *PodCertificateRequestList) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_PodCertificateRequestList.Merge(m, src)
    +}
    +func (m *PodCertificateRequestList) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *PodCertificateRequestList) XXX_DiscardUnknown() {
    +	xxx_messageInfo_PodCertificateRequestList.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_PodCertificateRequestList proto.InternalMessageInfo
    +
    +func (m *PodCertificateRequestSpec) Reset()      { *m = PodCertificateRequestSpec{} }
    +func (*PodCertificateRequestSpec) ProtoMessage() {}
    +func (*PodCertificateRequestSpec) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f73d5fe56c015bb8, []int{5}
    +}
    +func (m *PodCertificateRequestSpec) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *PodCertificateRequestSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *PodCertificateRequestSpec) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_PodCertificateRequestSpec.Merge(m, src)
    +}
    +func (m *PodCertificateRequestSpec) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *PodCertificateRequestSpec) XXX_DiscardUnknown() {
    +	xxx_messageInfo_PodCertificateRequestSpec.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_PodCertificateRequestSpec proto.InternalMessageInfo
    +
    +func (m *PodCertificateRequestStatus) Reset()      { *m = PodCertificateRequestStatus{} }
    +func (*PodCertificateRequestStatus) ProtoMessage() {}
    +func (*PodCertificateRequestStatus) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f73d5fe56c015bb8, []int{6}
    +}
    +func (m *PodCertificateRequestStatus) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *PodCertificateRequestStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *PodCertificateRequestStatus) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_PodCertificateRequestStatus.Merge(m, src)
    +}
    +func (m *PodCertificateRequestStatus) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *PodCertificateRequestStatus) XXX_DiscardUnknown() {
    +	xxx_messageInfo_PodCertificateRequestStatus.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_PodCertificateRequestStatus proto.InternalMessageInfo
    +
     func init() {
     	proto.RegisterType((*ClusterTrustBundle)(nil), "k8s.io.api.certificates.v1alpha1.ClusterTrustBundle")
     	proto.RegisterType((*ClusterTrustBundleList)(nil), "k8s.io.api.certificates.v1alpha1.ClusterTrustBundleList")
     	proto.RegisterType((*ClusterTrustBundleSpec)(nil), "k8s.io.api.certificates.v1alpha1.ClusterTrustBundleSpec")
    +	proto.RegisterType((*PodCertificateRequest)(nil), "k8s.io.api.certificates.v1alpha1.PodCertificateRequest")
    +	proto.RegisterType((*PodCertificateRequestList)(nil), "k8s.io.api.certificates.v1alpha1.PodCertificateRequestList")
    +	proto.RegisterType((*PodCertificateRequestSpec)(nil), "k8s.io.api.certificates.v1alpha1.PodCertificateRequestSpec")
    +	proto.RegisterType((*PodCertificateRequestStatus)(nil), "k8s.io.api.certificates.v1alpha1.PodCertificateRequestStatus")
     }
     
     func init() {
    @@ -138,35 +257,65 @@ func init() {
     }
     
     var fileDescriptor_f73d5fe56c015bb8 = []byte{
    -	// 437 bytes of a gzipped FileDescriptorProto
    -	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0xcf, 0x6a, 0xdb, 0x40,
    -	0x10, 0xc6, 0xb5, 0x69, 0x02, 0xc9, 0xba, 0x85, 0xa2, 0x42, 0x31, 0x3e, 0x6c, 0x8c, 0x4f, 0xb9,
    -	0x74, 0x37, 0x36, 0x69, 0xc9, 0x59, 0x85, 0x42, 0xa1, 0x7f, 0x40, 0xe9, 0xa5, 0xa1, 0x87, 0xae,
    -	0xd7, 0x13, 0x79, 0x6b, 0x4b, 0x5a, 0x76, 0x57, 0x86, 0xde, 0x0a, 0x7d, 0x81, 0x3e, 0x96, 0x8f,
    -	0x69, 0x4f, 0x39, 0x85, 0x5a, 0x7d, 0x91, 0xb2, 0x6b, 0xd9, 0x12, 0x55, 0x8b, 0x4b, 0x6e, 0x9a,
    -	0xd1, 0xfc, 0xbe, 0x6f, 0xbe, 0x11, 0xc2, 0xa7, 0xb3, 0x73, 0x43, 0x65, 0xce, 0xb8, 0x92, 0x4c,
    -	0x80, 0xb6, 0xf2, 0x4a, 0x0a, 0x6e, 0xc1, 0xb0, 0xc5, 0x90, 0xcf, 0xd5, 0x94, 0x0f, 0x59, 0x02,
    -	0x19, 0x68, 0x6e, 0x61, 0x42, 0x95, 0xce, 0x6d, 0x1e, 0xf6, 0xd7, 0x04, 0xe5, 0x4a, 0xd2, 0x26,
    -	0x41, 0x37, 0x44, 0xef, 0x49, 0x22, 0xed, 0xb4, 0x18, 0x53, 0x91, 0xa7, 0x2c, 0xc9, 0x93, 0x9c,
    -	0x79, 0x70, 0x5c, 0x5c, 0xf9, 0xca, 0x17, 0xfe, 0x69, 0x2d, 0xd8, 0x3b, 0xab, 0x57, 0x48, 0xb9,
    -	0x98, 0xca, 0x0c, 0xf4, 0x67, 0xa6, 0x66, 0x89, 0x6b, 0x18, 0x96, 0x82, 0xe5, 0x6c, 0xd1, 0x5a,
    -	0xa3, 0xc7, 0xfe, 0x45, 0xe9, 0x22, 0xb3, 0x32, 0x85, 0x16, 0xf0, 0x6c, 0x17, 0x60, 0xc4, 0x14,
    -	0x52, 0xfe, 0x27, 0x37, 0xf8, 0x81, 0x70, 0xf8, 0x7c, 0x5e, 0x18, 0x0b, 0xfa, 0x9d, 0x2e, 0x8c,
    -	0x8d, 0x8a, 0x6c, 0x32, 0x87, 0xf0, 0x23, 0x3e, 0x74, 0xab, 0x4d, 0xb8, 0xe5, 0x5d, 0xd4, 0x47,
    -	0x27, 0x9d, 0xd1, 0x29, 0xad, 0x2f, 0xb3, 0x75, 0xa0, 0x6a, 0x96, 0xb8, 0x86, 0xa1, 0x6e, 0x9a,
    -	0x2e, 0x86, 0xf4, 0xed, 0xf8, 0x13, 0x08, 0xfb, 0x1a, 0x2c, 0x8f, 0xc2, 0xe5, 0xed, 0x71, 0x50,
    -	0xde, 0x1e, 0xe3, 0xba, 0x17, 0x6f, 0x55, 0xc3, 0x4b, 0xbc, 0x6f, 0x14, 0x88, 0xee, 0x9e, 0x57,
    -	0x3f, 0xa7, 0xbb, 0xee, 0x4e, 0xdb, 0x5b, 0x5e, 0x28, 0x10, 0xd1, 0xfd, 0xca, 0x65, 0xdf, 0x55,
    -	0xb1, 0xd7, 0x1c, 0x7c, 0x47, 0xf8, 0x71, 0x7b, 0xfc, 0x95, 0x34, 0x36, 0xfc, 0xd0, 0x0a, 0x46,
    -	0xff, 0x2f, 0x98, 0xa3, 0x7d, 0xac, 0x87, 0x95, 0xe1, 0xe1, 0xa6, 0xd3, 0x08, 0xf5, 0x1e, 0x1f,
    -	0x48, 0x0b, 0xa9, 0xe9, 0xee, 0xf5, 0xef, 0x9d, 0x74, 0x46, 0x67, 0x77, 0x49, 0x15, 0x3d, 0xa8,
    -	0x0c, 0x0e, 0x5e, 0x3a, 0xa9, 0x78, 0xad, 0x38, 0xf8, 0xfa, 0xd7, 0x4c, 0x2e, 0x74, 0x38, 0xc2,
    -	0xd8, 0xc8, 0x24, 0x03, 0xfd, 0x86, 0xa7, 0xe0, 0x53, 0x1d, 0xd5, 0xc7, 0xbf, 0xd8, 0xbe, 0x89,
    -	0x1b, 0x53, 0xe1, 0x53, 0xdc, 0xb1, 0xb5, 0x8c, 0xff, 0x0a, 0x47, 0xd1, 0xa3, 0x0a, 0xea, 0x34,
    -	0x1c, 0xe2, 0xe6, 0x5c, 0xf4, 0x62, 0xb9, 0x22, 0xc1, 0xf5, 0x8a, 0x04, 0x37, 0x2b, 0x12, 0x7c,
    -	0x29, 0x09, 0x5a, 0x96, 0x04, 0x5d, 0x97, 0x04, 0xdd, 0x94, 0x04, 0xfd, 0x2c, 0x09, 0xfa, 0xf6,
    -	0x8b, 0x04, 0x97, 0xfd, 0x5d, 0xbf, 0xdd, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd4, 0x1c, 0xcb,
    -	0xdd, 0x99, 0x03, 0x00, 0x00,
    +	// 918 bytes of a gzipped FileDescriptorProto
    +	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x96, 0xcf, 0x6f, 0xe3, 0x44,
    +	0x14, 0xc7, 0xe3, 0xb6, 0x69, 0x9b, 0x49, 0x5b, 0xda, 0x61, 0x17, 0x99, 0x22, 0x39, 0x21, 0x07,
    +	0x54, 0x90, 0xb0, 0xb7, 0xa5, 0xb0, 0x2b, 0x10, 0x48, 0x75, 0x0a, 0x52, 0xe9, 0x6e, 0x36, 0x9a,
    +	0x74, 0xf9, 0xb1, 0x5a, 0x24, 0x1c, 0xe7, 0x25, 0x19, 0x1a, 0x7b, 0x8c, 0x67, 0x5c, 0xb5, 0x37,
    +	0x24, 0xfe, 0x01, 0xfe, 0x23, 0xae, 0x3d, 0x2e, 0x5c, 0xd8, 0x53, 0xa0, 0xe6, 0x6f, 0xe0, 0xb2,
    +	0x27, 0xe4, 0xb1, 0x9d, 0x5f, 0x4e, 0xb6, 0xd9, 0x1e, 0x7a, 0xcb, 0xbc, 0x79, 0xdf, 0xcf, 0xfb,
    +	0xbe, 0x99, 0x37, 0x56, 0xd0, 0xbd, 0xd3, 0x07, 0x5c, 0xa7, 0xcc, 0xb0, 0x3c, 0x6a, 0xd8, 0xe0,
    +	0x0b, 0xda, 0xa6, 0xb6, 0x25, 0x80, 0x1b, 0x67, 0xbb, 0x56, 0xcf, 0xeb, 0x5a, 0xbb, 0x46, 0x07,
    +	0x5c, 0xf0, 0x2d, 0x01, 0x2d, 0xdd, 0xf3, 0x99, 0x60, 0xb8, 0x1c, 0x2b, 0x74, 0xcb, 0xa3, 0xfa,
    +	0xa8, 0x42, 0x4f, 0x15, 0xdb, 0x1f, 0x76, 0xa8, 0xe8, 0x06, 0x4d, 0xdd, 0x66, 0x8e, 0xd1, 0x61,
    +	0x1d, 0x66, 0x48, 0x61, 0x33, 0x68, 0xcb, 0x95, 0x5c, 0xc8, 0x5f, 0x31, 0x70, 0x7b, 0x7f, 0x68,
    +	0xc1, 0xb1, 0xec, 0x2e, 0x75, 0xc1, 0xbf, 0x30, 0xbc, 0xd3, 0x4e, 0x14, 0xe0, 0x86, 0x03, 0xc2,
    +	0x32, 0xce, 0x32, 0x36, 0xb6, 0x8d, 0x59, 0x2a, 0x3f, 0x70, 0x05, 0x75, 0x20, 0x23, 0xf8, 0xe4,
    +	0x3a, 0x01, 0xb7, 0xbb, 0xe0, 0x58, 0x93, 0xba, 0xca, 0x9f, 0x0a, 0xc2, 0xd5, 0x5e, 0xc0, 0x05,
    +	0xf8, 0x27, 0x7e, 0xc0, 0x85, 0x19, 0xb8, 0xad, 0x1e, 0xe0, 0x1f, 0xd1, 0x6a, 0x64, 0xad, 0x65,
    +	0x09, 0x4b, 0x55, 0xca, 0xca, 0x4e, 0x71, 0xef, 0x9e, 0x3e, 0x3c, 0x99, 0x41, 0x05, 0xdd, 0x3b,
    +	0xed, 0x44, 0x01, 0xae, 0x47, 0xd9, 0xfa, 0xd9, 0xae, 0xfe, 0xb8, 0xf9, 0x13, 0xd8, 0xe2, 0x11,
    +	0x08, 0xcb, 0xc4, 0x97, 0xfd, 0x52, 0x2e, 0xec, 0x97, 0xd0, 0x30, 0x46, 0x06, 0x54, 0xfc, 0x14,
    +	0x2d, 0x71, 0x0f, 0x6c, 0x75, 0x41, 0xd2, 0x1f, 0xe8, 0xd7, 0x9d, 0xbb, 0x9e, 0x75, 0xd9, 0xf0,
    +	0xc0, 0x36, 0xd7, 0x92, 0x2a, 0x4b, 0xd1, 0x8a, 0x48, 0x66, 0xe5, 0x0f, 0x05, 0xbd, 0x95, 0x4d,
    +	0x7f, 0x48, 0xb9, 0xc0, 0xcf, 0x32, 0x8d, 0xe9, 0xf3, 0x35, 0x16, 0xa9, 0x65, 0x5b, 0x9b, 0x49,
    +	0xc1, 0xd5, 0x34, 0x32, 0xd2, 0xd4, 0xf7, 0x28, 0x4f, 0x05, 0x38, 0x5c, 0x5d, 0x28, 0x2f, 0xee,
    +	0x14, 0xf7, 0xf6, 0x6f, 0xd2, 0x95, 0xb9, 0x9e, 0x14, 0xc8, 0x1f, 0x45, 0x28, 0x12, 0x13, 0x2b,
    +	0xbf, 0x4e, 0xed, 0x29, 0x6a, 0x1a, 0xef, 0x21, 0xc4, 0x69, 0xc7, 0x05, 0xbf, 0x66, 0x39, 0x20,
    +	0xbb, 0x2a, 0x0c, 0x0f, 0xbf, 0x31, 0xd8, 0x21, 0x23, 0x59, 0xf8, 0x63, 0x54, 0x14, 0x43, 0x8c,
    +	0xbc, 0x85, 0x82, 0xf9, 0x66, 0x22, 0x2a, 0x8e, 0x54, 0x20, 0xa3, 0x79, 0x95, 0xdf, 0x17, 0xd0,
    +	0xdd, 0x3a, 0x6b, 0x55, 0x87, 0xbd, 0x10, 0xf8, 0x39, 0x00, 0x2e, 0x6e, 0x61, 0x62, 0x7e, 0x18,
    +	0x9b, 0x98, 0xcf, 0xae, 0x3f, 0xdb, 0xa9, 0x46, 0x67, 0x0d, 0x0d, 0x06, 0xb4, 0xcc, 0x85, 0x25,
    +	0x02, 0xae, 0x2e, 0xca, 0x02, 0x9f, 0xdf, 0xb4, 0x80, 0x84, 0x98, 0x1b, 0x49, 0x89, 0xe5, 0x78,
    +	0x4d, 0x12, 0x78, 0xe5, 0x2f, 0x05, 0xbd, 0x3d, 0x55, 0x77, 0x0b, 0xe3, 0xf9, 0x6c, 0x7c, 0x3c,
    +	0xef, 0xdf, 0xb0, 0xc3, 0x19, 0x13, 0xfa, 0x5f, 0x7e, 0x46, 0x67, 0x37, 0x1e, 0xd2, 0xf7, 0xd1,
    +	0x8a, 0xc7, 0x5a, 0x52, 0x10, 0x0f, 0xe8, 0x1b, 0x89, 0x60, 0xa5, 0x1e, 0x87, 0x49, 0xba, 0x8f,
    +	0x8f, 0xd1, 0xb2, 0xc7, 0x5a, 0x4f, 0x8e, 0x0e, 0xe5, 0xed, 0x15, 0xcc, 0x8f, 0xd2, 0xe3, 0xaf,
    +	0xcb, 0xe8, 0xcb, 0x7e, 0xe9, 0xdd, 0x59, 0x5f, 0x48, 0x71, 0xe1, 0x01, 0xd7, 0x9f, 0x1c, 0x1d,
    +	0x92, 0x04, 0x81, 0xbf, 0x46, 0x98, 0x83, 0x7f, 0x46, 0x6d, 0x38, 0xb0, 0x6d, 0x16, 0xb8, 0x42,
    +	0x5a, 0x58, 0x92, 0xe0, 0xed, 0x04, 0x8c, 0x1b, 0x99, 0x0c, 0x32, 0x45, 0x85, 0x7b, 0x68, 0x6b,
    +	0x3c, 0x1a, 0x79, 0xcc, 0x4b, 0xd4, 0x17, 0x09, 0x6a, 0xab, 0x31, 0x99, 0x30, 0x9f, 0xdd, 0x2c,
    +	0x18, 0x7f, 0x83, 0x56, 0x5d, 0xd6, 0x02, 0xe9, 0x77, 0x59, 0x16, 0xf9, 0x34, 0x9d, 0x87, 0x5a,
    +	0x12, 0x7f, 0xd9, 0x2f, 0xbd, 0xf7, 0x6a, 0x76, 0x9a, 0x49, 0x06, 0x2c, 0x5c, 0x43, 0x2b, 0xd1,
    +	0xef, 0xc8, 0xfb, 0x8a, 0xc4, 0xee, 0xa7, 0x37, 0x51, 0x8b, 0xc3, 0xf3, 0x39, 0x4e, 0x21, 0xf8,
    +	0x21, 0xba, 0xe3, 0x58, 0xe7, 0x5f, 0x9e, 0x7b, 0xd4, 0xb7, 0x04, 0x65, 0x6e, 0x03, 0x6c, 0xe6,
    +	0xb6, 0xb8, 0xba, 0x5a, 0x56, 0x76, 0xf2, 0xa6, 0x1a, 0xf6, 0x4b, 0x77, 0x1e, 0x4d, 0xd9, 0x27,
    +	0x53, 0x55, 0xf8, 0x3e, 0x5a, 0xf7, 0x4e, 0xe9, 0x79, 0x3d, 0x68, 0xf6, 0xa8, 0x7d, 0x0c, 0x17,
    +	0x6a, 0xa1, 0xac, 0xec, 0xac, 0x99, 0x5b, 0x61, 0xbf, 0xb4, 0x5e, 0x3f, 0x3e, 0xfa, 0x6e, 0xb0,
    +	0x41, 0xc6, 0xf3, 0x70, 0x15, 0x6d, 0x79, 0x3e, 0x63, 0xed, 0xc7, 0xed, 0x3a, 0xe3, 0x1c, 0x38,
    +	0xa7, 0xcc, 0x55, 0x91, 0x14, 0xdf, 0x8d, 0x2e, 0xa6, 0x3e, 0xb9, 0x49, 0xb2, 0xf9, 0x95, 0xbf,
    +	0x17, 0xd1, 0x3b, 0xaf, 0xf8, 0x12, 0x60, 0x1b, 0xa1, 0xc8, 0x26, 0x8d, 0x1c, 0x73, 0x55, 0x91,
    +	0x4f, 0xcf, 0x98, 0xef, 0x55, 0x57, 0x53, 0xdd, 0xf0, 0xa9, 0x0c, 0x42, 0x9c, 0x8c, 0x60, 0xf1,
    +	0x21, 0xda, 0x1c, 0x79, 0xc1, 0xd5, 0xae, 0x45, 0xdd, 0xe4, 0xcd, 0xa8, 0x89, 0x72, 0xb3, 0x3a,
    +	0xb1, 0x4f, 0x32, 0x0a, 0xfc, 0x2d, 0x2a, 0xb8, 0x4c, 0x98, 0xd0, 0x66, 0x7e, 0x3c, 0xef, 0xc5,
    +	0xbd, 0x0f, 0xe6, 0x73, 0x7a, 0x42, 0x1d, 0x30, 0xd7, 0xc3, 0x7e, 0xa9, 0x50, 0x4b, 0x01, 0x64,
    +	0xc8, 0xc2, 0x6d, 0xb4, 0xd1, 0x84, 0x0e, 0x75, 0x09, 0xb4, 0x7d, 0xe0, 0xdd, 0x03, 0x21, 0x9f,
    +	0xc0, 0xeb, 0xd1, 0x71, 0xd8, 0x2f, 0x6d, 0x98, 0x63, 0x14, 0x32, 0x41, 0xc5, 0x27, 0xd1, 0xfc,
    +	0x8b, 0x83, 0xb6, 0x00, 0x5f, 0xce, 0xff, 0xeb, 0x55, 0x58, 0x8b, 0xdf, 0x49, 0xac, 0x27, 0x03,
    +	0x92, 0xf9, 0xd5, 0xe5, 0x95, 0x96, 0x7b, 0x7e, 0xa5, 0xe5, 0x5e, 0x5c, 0x69, 0xb9, 0x5f, 0x42,
    +	0x4d, 0xb9, 0x0c, 0x35, 0xe5, 0x79, 0xa8, 0x29, 0x2f, 0x42, 0x4d, 0xf9, 0x27, 0xd4, 0x94, 0xdf,
    +	0xfe, 0xd5, 0x72, 0x4f, 0xcb, 0xd7, 0xfd, 0xd9, 0xfc, 0x3f, 0x00, 0x00, 0xff, 0xff, 0xcf, 0x6c,
    +	0x5a, 0xc4, 0x8f, 0x0a, 0x00, 0x00,
     }
     
     func (m *ClusterTrustBundle) Marshal() (dAtA []byte, err error) {
    @@ -292,6 +441,261 @@ func (m *ClusterTrustBundleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)
     	return len(dAtA) - i, nil
     }
     
    +func (m *PodCertificateRequest) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *PodCertificateRequest) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *PodCertificateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	{
    +		size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0x1a
    +	{
    +		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0x12
    +	{
    +		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *PodCertificateRequestList) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *PodCertificateRequestList) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *PodCertificateRequestList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.Items) > 0 {
    +		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
    +		}
    +	}
    +	{
    +		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *PodCertificateRequestSpec) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *PodCertificateRequestSpec) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *PodCertificateRequestSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if m.ProofOfPossession != nil {
    +		i -= len(m.ProofOfPossession)
    +		copy(dAtA[i:], m.ProofOfPossession)
    +		i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProofOfPossession)))
    +		i--
    +		dAtA[i] = 0x52
    +	}
    +	if m.PKIXPublicKey != nil {
    +		i -= len(m.PKIXPublicKey)
    +		copy(dAtA[i:], m.PKIXPublicKey)
    +		i = encodeVarintGenerated(dAtA, i, uint64(len(m.PKIXPublicKey)))
    +		i--
    +		dAtA[i] = 0x4a
    +	}
    +	if m.MaxExpirationSeconds != nil {
    +		i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxExpirationSeconds))
    +		i--
    +		dAtA[i] = 0x40
    +	}
    +	i -= len(m.NodeUID)
    +	copy(dAtA[i:], m.NodeUID)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeUID)))
    +	i--
    +	dAtA[i] = 0x3a
    +	i -= len(m.NodeName)
    +	copy(dAtA[i:], m.NodeName)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName)))
    +	i--
    +	dAtA[i] = 0x32
    +	i -= len(m.ServiceAccountUID)
    +	copy(dAtA[i:], m.ServiceAccountUID)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountUID)))
    +	i--
    +	dAtA[i] = 0x2a
    +	i -= len(m.ServiceAccountName)
    +	copy(dAtA[i:], m.ServiceAccountName)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountName)))
    +	i--
    +	dAtA[i] = 0x22
    +	i -= len(m.PodUID)
    +	copy(dAtA[i:], m.PodUID)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodUID)))
    +	i--
    +	dAtA[i] = 0x1a
    +	i -= len(m.PodName)
    +	copy(dAtA[i:], m.PodName)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodName)))
    +	i--
    +	dAtA[i] = 0x12
    +	i -= len(m.SignerName)
    +	copy(dAtA[i:], m.SignerName)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.SignerName)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *PodCertificateRequestStatus) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *PodCertificateRequestStatus) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *PodCertificateRequestStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if m.NotAfter != nil {
    +		{
    +			size, err := m.NotAfter.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x32
    +	}
    +	if m.BeginRefreshAt != nil {
    +		{
    +			size, err := m.BeginRefreshAt.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x2a
    +	}
    +	if m.NotBefore != nil {
    +		{
    +			size, err := m.NotBefore.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x22
    +	}
    +	i -= len(m.CertificateChain)
    +	copy(dAtA[i:], m.CertificateChain)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.CertificateChain)))
    +	i--
    +	dAtA[i] = 0x12
    +	if len(m.Conditions) > 0 {
    +		for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0xa
    +		}
    +	}
    +	return len(dAtA) - i, nil
    +}
    +
     func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
     	offset -= sovGenerated(v)
     	base := offset
    @@ -346,25 +750,120 @@ func (m *ClusterTrustBundleSpec) Size() (n int) {
     	return n
     }
     
    -func sovGenerated(x uint64) (n int) {
    -	return (math_bits.Len64(x|1) + 6) / 7
    -}
    -func sozGenerated(x uint64) (n int) {
    -	return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
    -}
    -func (this *ClusterTrustBundle) String() string {
    -	if this == nil {
    -		return "nil"
    +func (m *PodCertificateRequest) Size() (n int) {
    +	if m == nil {
    +		return 0
     	}
    -	s := strings.Join([]string{`&ClusterTrustBundle{`,
    -		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
    -		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterTrustBundleSpec", "ClusterTrustBundleSpec", 1), `&`, ``, 1) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *ClusterTrustBundleList) String() string {
    -	if this == nil {
    +	var l int
    +	_ = l
    +	l = m.ObjectMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = m.Spec.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = m.Status.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *PodCertificateRequestList) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.ListMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.Items) > 0 {
    +		for _, e := range m.Items {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
    +}
    +
    +func (m *PodCertificateRequestSpec) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.SignerName)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.PodName)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.PodUID)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.ServiceAccountName)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.ServiceAccountUID)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.NodeName)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.NodeUID)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if m.MaxExpirationSeconds != nil {
    +		n += 1 + sovGenerated(uint64(*m.MaxExpirationSeconds))
    +	}
    +	if m.PKIXPublicKey != nil {
    +		l = len(m.PKIXPublicKey)
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	if m.ProofOfPossession != nil {
    +		l = len(m.ProofOfPossession)
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	return n
    +}
    +
    +func (m *PodCertificateRequestStatus) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	if len(m.Conditions) > 0 {
    +		for _, e := range m.Conditions {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	l = len(m.CertificateChain)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if m.NotBefore != nil {
    +		l = m.NotBefore.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	if m.BeginRefreshAt != nil {
    +		l = m.BeginRefreshAt.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	if m.NotAfter != nil {
    +		l = m.NotAfter.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	return n
    +}
    +
    +func sovGenerated(x uint64) (n int) {
    +	return (math_bits.Len64(x|1) + 6) / 7
    +}
    +func sozGenerated(x uint64) (n int) {
    +	return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
    +}
    +func (this *ClusterTrustBundle) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&ClusterTrustBundle{`,
    +		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
    +		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterTrustBundleSpec", "ClusterTrustBundleSpec", 1), `&`, ``, 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ClusterTrustBundleList) String() string {
    +	if this == nil {
     		return "nil"
     	}
     	repeatedStringForItems := "[]ClusterTrustBundle{"
    @@ -390,6 +889,72 @@ func (this *ClusterTrustBundleSpec) String() string {
     	}, "")
     	return s
     }
    +func (this *PodCertificateRequest) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&PodCertificateRequest{`,
    +		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
    +		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodCertificateRequestSpec", "PodCertificateRequestSpec", 1), `&`, ``, 1) + `,`,
    +		`Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodCertificateRequestStatus", "PodCertificateRequestStatus", 1), `&`, ``, 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *PodCertificateRequestList) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForItems := "[]PodCertificateRequest{"
    +	for _, f := range this.Items {
    +		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodCertificateRequest", "PodCertificateRequest", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForItems += "}"
    +	s := strings.Join([]string{`&PodCertificateRequestList{`,
    +		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
    +		`Items:` + repeatedStringForItems + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *PodCertificateRequestSpec) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&PodCertificateRequestSpec{`,
    +		`SignerName:` + fmt.Sprintf("%v", this.SignerName) + `,`,
    +		`PodName:` + fmt.Sprintf("%v", this.PodName) + `,`,
    +		`PodUID:` + fmt.Sprintf("%v", this.PodUID) + `,`,
    +		`ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`,
    +		`ServiceAccountUID:` + fmt.Sprintf("%v", this.ServiceAccountUID) + `,`,
    +		`NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`,
    +		`NodeUID:` + fmt.Sprintf("%v", this.NodeUID) + `,`,
    +		`MaxExpirationSeconds:` + valueToStringGenerated(this.MaxExpirationSeconds) + `,`,
    +		`PKIXPublicKey:` + valueToStringGenerated(this.PKIXPublicKey) + `,`,
    +		`ProofOfPossession:` + valueToStringGenerated(this.ProofOfPossession) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *PodCertificateRequestStatus) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForConditions := "[]Condition{"
    +	for _, f := range this.Conditions {
    +		repeatedStringForConditions += fmt.Sprintf("%v", f) + ","
    +	}
    +	repeatedStringForConditions += "}"
    +	s := strings.Join([]string{`&PodCertificateRequestStatus{`,
    +		`Conditions:` + repeatedStringForConditions + `,`,
    +		`CertificateChain:` + fmt.Sprintf("%v", this.CertificateChain) + `,`,
    +		`NotBefore:` + strings.Replace(fmt.Sprintf("%v", this.NotBefore), "Time", "v1.Time", 1) + `,`,
    +		`BeginRefreshAt:` + strings.Replace(fmt.Sprintf("%v", this.BeginRefreshAt), "Time", "v1.Time", 1) + `,`,
    +		`NotAfter:` + strings.Replace(fmt.Sprintf("%v", this.NotAfter), "Time", "v1.Time", 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
     func valueToStringGenerated(v interface{}) string {
     	rv := reflect.ValueOf(v)
     	if rv.IsNil() {
    @@ -745,6 +1310,858 @@ func (m *ClusterTrustBundleSpec) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    +func (m *PodCertificateRequest) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: PodCertificateRequest: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: PodCertificateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *PodCertificateRequestList) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: PodCertificateRequestList: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: PodCertificateRequestList: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Items = append(m.Items, PodCertificateRequest{})
    +			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *PodCertificateRequestSpec) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: PodCertificateRequestSpec: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: PodCertificateRequestSpec: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field SignerName", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.SignerName = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field PodName", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.PodName = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field PodUID", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.PodUID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 4:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountName", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.ServiceAccountName = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 5:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountUID", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.ServiceAccountUID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 6:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.NodeName = k8s_io_apimachinery_pkg_types.NodeName(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 7:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field NodeUID", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.NodeUID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 8:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field MaxExpirationSeconds", wireType)
    +			}
    +			var v int32
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				v |= int32(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			m.MaxExpirationSeconds = &v
    +		case 9:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field PKIXPublicKey", wireType)
    +			}
    +			var byteLen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				byteLen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if byteLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + byteLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.PKIXPublicKey = append(m.PKIXPublicKey[:0], dAtA[iNdEx:postIndex]...)
    +			if m.PKIXPublicKey == nil {
    +				m.PKIXPublicKey = []byte{}
    +			}
    +			iNdEx = postIndex
    +		case 10:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ProofOfPossession", wireType)
    +			}
    +			var byteLen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				byteLen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if byteLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + byteLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.ProofOfPossession = append(m.ProofOfPossession[:0], dAtA[iNdEx:postIndex]...)
    +			if m.ProofOfPossession == nil {
    +				m.ProofOfPossession = []byte{}
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *PodCertificateRequestStatus) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: PodCertificateRequestStatus: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: PodCertificateRequestStatus: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Conditions = append(m.Conditions, v1.Condition{})
    +			if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field CertificateChain", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.CertificateChain = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 4:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field NotBefore", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.NotBefore == nil {
    +				m.NotBefore = &v1.Time{}
    +			}
    +			if err := m.NotBefore.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 5:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field BeginRefreshAt", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.BeginRefreshAt == nil {
    +				m.BeginRefreshAt = &v1.Time{}
    +			}
    +			if err := m.BeginRefreshAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 6:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field NotAfter", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.NotAfter == nil {
    +				m.NotAfter = &v1.Time{}
    +			}
    +			if err := m.NotAfter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
     func skipGenerated(dAtA []byte) (n int, err error) {
     	l := len(dAtA)
     	iNdEx := 0
    diff --git a/vendor/k8s.io/api/certificates/v1alpha1/generated.proto b/vendor/k8s.io/api/certificates/v1alpha1/generated.proto
    index 7155f778c..194bdbc14 100644
    --- a/vendor/k8s.io/api/certificates/v1alpha1/generated.proto
    +++ b/vendor/k8s.io/api/certificates/v1alpha1/generated.proto
    @@ -101,3 +101,208 @@ message ClusterTrustBundleSpec {
       optional string trustBundle = 2;
     }
     
    +// PodCertificateRequest encodes a pod requesting a certificate from a given
    +// signer.
    +//
    +// Kubelets use this API to implement podCertificate projected volumes
    +message PodCertificateRequest {
    +  // metadata contains the object metadata.
    +  //
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +
    +  // spec contains the details about the certificate being requested.
    +  optional PodCertificateRequestSpec spec = 2;
    +
    +  // status contains the issued certificate, and a standard set of conditions.
    +  // +optional
    +  optional PodCertificateRequestStatus status = 3;
    +}
    +
    +// PodCertificateRequestList is a collection of PodCertificateRequest objects
    +message PodCertificateRequestList {
    +  // metadata contains the list metadata.
    +  //
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +
    +  // items is a collection of PodCertificateRequest objects
    +  repeated PodCertificateRequest items = 2;
    +}
    +
    +// PodCertificateRequestSpec describes the certificate request.  All fields are
    +// immutable after creation.
    +message PodCertificateRequestSpec {
    +  // signerName indicates the requested signer.
    +  //
    +  // All signer names beginning with `kubernetes.io` are reserved for use by
    +  // the Kubernetes project.  There is currently one well-known signer
    +  // documented by the Kubernetes project,
    +  // `kubernetes.io/kube-apiserver-client-pod`, which will issue client
    +  // certificates understood by kube-apiserver.  It is currently
    +  // unimplemented.
    +  //
    +  // +required
    +  optional string signerName = 1;
    +
    +  // podName is the name of the pod into which the certificate will be mounted.
    +  //
    +  // +required
    +  optional string podName = 2;
    +
    +  // podUID is the UID of the pod into which the certificate will be mounted.
    +  //
    +  // +required
    +  optional string podUID = 3;
    +
    +  // serviceAccountName is the name of the service account the pod is running as.
    +  //
    +  // +required
    +  optional string serviceAccountName = 4;
    +
    +  // serviceAccountUID is the UID of the service account the pod is running as.
    +  //
    +  // +required
    +  optional string serviceAccountUID = 5;
    +
    +  // nodeName is the name of the node the pod is assigned to.
    +  //
    +  // +required
    +  optional string nodeName = 6;
    +
    +  // nodeUID is the UID of the node the pod is assigned to.
    +  //
    +  // +required
    +  optional string nodeUID = 7;
    +
    +  // maxExpirationSeconds is the maximum lifetime permitted for the
    +  // certificate.
    +  //
    +  // If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver
    +  // will reject values shorter than 3600 (1 hour).  The maximum allowable
    +  // value is 7862400 (91 days).
    +  //
    +  // The signer implementation is then free to issue a certificate with any
    +  // lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600
    +  // seconds (1 hour).  This constraint is enforced by kube-apiserver.
    +  // `kubernetes.io` signers will never issue certificates with a lifetime
    +  // longer than 24 hours.
    +  //
    +  // +optional
    +  // +default=86400
    +  optional int32 maxExpirationSeconds = 8;
    +
    +  // pkixPublicKey is the PKIX-serialized public key the signer will issue the
    +  // certificate to.
    +  //
    +  // The key must be one of RSA3072, RSA4096, ECDSAP256, ECDSAP384, ECDSAP521,
    +  // or ED25519. Note that this list may be expanded in the future.
    +  //
    +  // Signer implementations do not need to support all key types supported by
    +  // kube-apiserver and kubelet.  If a signer does not support the key type
    +  // used for a given PodCertificateRequest, it must deny the request by
    +  // setting a status.conditions entry with a type of "Denied" and a reason of
    +  // "UnsupportedKeyType". It may also suggest a key type that it does support
    +  // in the message field.
    +  //
    +  // +required
    +  optional bytes pkixPublicKey = 9;
    +
    +  // proofOfPossession proves that the requesting kubelet holds the private
    +  // key corresponding to pkixPublicKey.
    +  //
    +  // It is contructed by signing the ASCII bytes of the pod's UID using
    +  // `pkixPublicKey`.
    +  //
    +  // kube-apiserver validates the proof of possession during creation of the
    +  // PodCertificateRequest.
    +  //
    +  // If the key is an RSA key, then the signature is over the ASCII bytes of
    +  // the pod UID, using RSASSA-PSS from RFC 8017 (as implemented by the golang
    +  // function crypto/rsa.SignPSS with nil options).
    +  //
    +  // If the key is an ECDSA key, then the signature is as described by [SEC 1,
    +  // Version 2.0](https://www.secg.org/sec1-v2.pdf) (as implemented by the
    +  // golang library function crypto/ecdsa.SignASN1)
    +  //
    +  // If the key is an ED25519 key, the the signature is as described by the
    +  // [ED25519 Specification](https://ed25519.cr.yp.to/) (as implemented by
    +  // the golang library crypto/ed25519.Sign).
    +  //
    +  // +required
    +  optional bytes proofOfPossession = 10;
    +}
    +
    +// PodCertificateRequestStatus describes the status of the request, and holds
    +// the certificate data if the request is issued.
    +message PodCertificateRequestStatus {
    +  // conditions applied to the request.
    +  //
    +  // The types "Issued", "Denied", and "Failed" have special handling.  At
    +  // most one of these conditions may be present, and they must have status
    +  // "True".
    +  //
    +  // If the request is denied with `Reason=UnsupportedKeyType`, the signer may
    +  // suggest a key type that will work in the message field.
    +  //
    +  // +patchMergeKey=type
    +  // +patchStrategy=merge
    +  // +listType=map
    +  // +listMapKey=type
    +  // +optional
    +  repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1;
    +
    +  // certificateChain is populated with an issued certificate by the signer.
    +  // This field is set via the /status subresource. Once populated, this field
    +  // is immutable.
    +  //
    +  // If the certificate signing request is denied, a condition of type
    +  // "Denied" is added and this field remains empty. If the signer cannot
    +  // issue the certificate, a condition of type "Failed" is added and this
    +  // field remains empty.
    +  //
    +  // Validation requirements:
    +  //  1. certificateChain must consist of one or more PEM-formatted certificates.
    +  //  2. Each entry must be a valid PEM-wrapped, DER-encoded ASN.1 Certificate as
    +  //     described in section 4 of RFC5280.
    +  //
    +  // If more than one block is present, and the definition of the requested
    +  // spec.signerName does not indicate otherwise, the first block is the
    +  // issued certificate, and subsequent blocks should be treated as
    +  // intermediate certificates and presented in TLS handshakes.  When
    +  // projecting the chain into a pod volume, kubelet will drop any data
    +  // in-between the PEM blocks, as well as any PEM block headers.
    +  //
    +  // +optional
    +  optional string certificateChain = 2;
    +
    +  // notBefore is the time at which the certificate becomes valid.  The value
    +  // must be the same as the notBefore value in the leaf certificate in
    +  // certificateChain.  This field is set via the /status subresource.  Once
    +  // populated, it is immutable. The signer must set this field at the same
    +  // time it sets certificateChain.
    +  //
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time notBefore = 4;
    +
    +  // beginRefreshAt is the time at which the kubelet should begin trying to
    +  // refresh the certificate.  This field is set via the /status subresource,
    +  // and must be set at the same time as certificateChain.  Once populated,
    +  // this field is immutable.
    +  //
    +  // This field is only a hint.  Kubelet may start refreshing before or after
    +  // this time if necessary.
    +  //
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time beginRefreshAt = 5;
    +
    +  // notAfter is the time at which the certificate expires.  The value must be
    +  // the same as the notAfter value in the leaf certificate in
    +  // certificateChain.  This field is set via the /status subresource.  Once
    +  // populated, it is immutable.  The signer must set this field at the same
    +  // time it sets certificateChain.
    +  //
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time notAfter = 6;
    +}
    +
    diff --git a/vendor/k8s.io/api/certificates/v1alpha1/register.go b/vendor/k8s.io/api/certificates/v1alpha1/register.go
    index 7288ed9a3..ae541e15c 100644
    --- a/vendor/k8s.io/api/certificates/v1alpha1/register.go
    +++ b/vendor/k8s.io/api/certificates/v1alpha1/register.go
    @@ -53,6 +53,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
     	scheme.AddKnownTypes(SchemeGroupVersion,
     		&ClusterTrustBundle{},
     		&ClusterTrustBundleList{},
    +		&PodCertificateRequest{},
    +		&PodCertificateRequestList{},
     	)
     
     	// Add the watch version that applies
    diff --git a/vendor/k8s.io/api/certificates/v1alpha1/types.go b/vendor/k8s.io/api/certificates/v1alpha1/types.go
    index beef02599..a5cb3809e 100644
    --- a/vendor/k8s.io/api/certificates/v1alpha1/types.go
    +++ b/vendor/k8s.io/api/certificates/v1alpha1/types.go
    @@ -18,6 +18,7 @@ package v1alpha1
     
     import (
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	"k8s.io/apimachinery/pkg/types"
     )
     
     // +genclient
    @@ -106,3 +107,233 @@ type ClusterTrustBundleList struct {
     	// items is a collection of ClusterTrustBundle objects
     	Items []ClusterTrustBundle `json:"items" protobuf:"bytes,2,rep,name=items"`
     }
    +
    +// +genclient
    +// +k8s:prerelease-lifecycle-gen:introduced=1.34
    +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +
    +// PodCertificateRequest encodes a pod requesting a certificate from a given
    +// signer.
    +//
    +// Kubelets use this API to implement podCertificate projected volumes
    +type PodCertificateRequest struct {
    +	metav1.TypeMeta `json:",inline"`
    +
    +	// metadata contains the object metadata.
    +	//
    +	// +optional
    +	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    +
    +	// spec contains the details about the certificate being requested.
    +	Spec PodCertificateRequestSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
    +
    +	// status contains the issued certificate, and a standard set of conditions.
    +	// +optional
    +	Status PodCertificateRequestStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
    +}
    +
    +// PodCertificateRequestSpec describes the certificate request.  All fields are
    +// immutable after creation.
    +type PodCertificateRequestSpec struct {
    +	// signerName indicates the requested signer.
    +	//
    +	// All signer names beginning with `kubernetes.io` are reserved for use by
    +	// the Kubernetes project.  There is currently one well-known signer
    +	// documented by the Kubernetes project,
    +	// `kubernetes.io/kube-apiserver-client-pod`, which will issue client
    +	// certificates understood by kube-apiserver.  It is currently
    +	// unimplemented.
    +	//
    +	// +required
    +	SignerName string `json:"signerName" protobuf:"bytes,1,opt,name=signerName"`
    +
    +	// podName is the name of the pod into which the certificate will be mounted.
    +	//
    +	// +required
    +	PodName string `json:"podName" protobuf:"bytes,2,opt,name=podName"`
    +	// podUID is the UID of the pod into which the certificate will be mounted.
    +	//
    +	// +required
    +	PodUID types.UID `json:"podUID" protobuf:"bytes,3,opt,name=podUID"`
    +
    +	// serviceAccountName is the name of the service account the pod is running as.
    +	//
    +	// +required
    +	ServiceAccountName string `json:"serviceAccountName" protobuf:"bytes,4,opt,name=serviceAccountName"`
    +	// serviceAccountUID is the UID of the service account the pod is running as.
    +	//
    +	// +required
    +	ServiceAccountUID types.UID `json:"serviceAccountUID" protobuf:"bytes,5,opt,name=serviceAccountUID"`
    +
    +	// nodeName is the name of the node the pod is assigned to.
    +	//
    +	// +required
    +	NodeName types.NodeName `json:"nodeName" protobuf:"bytes,6,opt,name=nodeName"`
    +	// nodeUID is the UID of the node the pod is assigned to.
    +	//
    +	// +required
    +	NodeUID types.UID `json:"nodeUID" protobuf:"bytes,7,opt,name=nodeUID"`
    +
    +	// maxExpirationSeconds is the maximum lifetime permitted for the
    +	// certificate.
    +	//
    +	// If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver
    +	// will reject values shorter than 3600 (1 hour).  The maximum allowable
    +	// value is 7862400 (91 days).
    +	//
    +	// The signer implementation is then free to issue a certificate with any
    +	// lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600
    +	// seconds (1 hour).  This constraint is enforced by kube-apiserver.
    +	// `kubernetes.io` signers will never issue certificates with a lifetime
    +	// longer than 24 hours.
    +	//
    +	// +optional
    +	// +default=86400
    +	MaxExpirationSeconds *int32 `json:"maxExpirationSeconds,omitempty" protobuf:"varint,8,opt,name=maxExpirationSeconds"`
    +
    +	// pkixPublicKey is the PKIX-serialized public key the signer will issue the
    +	// certificate to.
    +	//
    +	// The key must be one of RSA3072, RSA4096, ECDSAP256, ECDSAP384, ECDSAP521,
    +	// or ED25519. Note that this list may be expanded in the future.
    +	//
    +	// Signer implementations do not need to support all key types supported by
    +	// kube-apiserver and kubelet.  If a signer does not support the key type
    +	// used for a given PodCertificateRequest, it must deny the request by
    +	// setting a status.conditions entry with a type of "Denied" and a reason of
    +	// "UnsupportedKeyType". It may also suggest a key type that it does support
    +	// in the message field.
    +	//
    +	// +required
    +	PKIXPublicKey []byte `json:"pkixPublicKey" protobuf:"bytes,9,opt,name=pkixPublicKey"`
    +
    +	// proofOfPossession proves that the requesting kubelet holds the private
    +	// key corresponding to pkixPublicKey.
    +	//
    +	// It is contructed by signing the ASCII bytes of the pod's UID using
    +	// `pkixPublicKey`.
    +	//
    +	// kube-apiserver validates the proof of possession during creation of the
    +	// PodCertificateRequest.
    +	//
    +	// If the key is an RSA key, then the signature is over the ASCII bytes of
    +	// the pod UID, using RSASSA-PSS from RFC 8017 (as implemented by the golang
    +	// function crypto/rsa.SignPSS with nil options).
    +	//
    +	// If the key is an ECDSA key, then the signature is as described by [SEC 1,
    +	// Version 2.0](https://www.secg.org/sec1-v2.pdf) (as implemented by the
    +	// golang library function crypto/ecdsa.SignASN1)
    +	//
    +	// If the key is an ED25519 key, the the signature is as described by the
    +	// [ED25519 Specification](https://ed25519.cr.yp.to/) (as implemented by
    +	// the golang library crypto/ed25519.Sign).
    +	//
    +	// +required
    +	ProofOfPossession []byte `json:"proofOfPossession" protobuf:"bytes,10,opt,name=proofOfPossession"`
    +}
    +
    +// PodCertificateRequestStatus describes the status of the request, and holds
    +// the certificate data if the request is issued.
    +type PodCertificateRequestStatus struct {
    +	// conditions applied to the request.
    +	//
    +	// The types "Issued", "Denied", and "Failed" have special handling.  At
    +	// most one of these conditions may be present, and they must have status
    +	// "True".
    +	//
    +	// If the request is denied with `Reason=UnsupportedKeyType`, the signer may
    +	// suggest a key type that will work in the message field.
    +	//
    +	// +patchMergeKey=type
    +	// +patchStrategy=merge
    +	// +listType=map
    +	// +listMapKey=type
    +	// +optional
    +	Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
    +
    +	// certificateChain is populated with an issued certificate by the signer.
    +	// This field is set via the /status subresource. Once populated, this field
    +	// is immutable.
    +	//
    +	// If the certificate signing request is denied, a condition of type
    +	// "Denied" is added and this field remains empty. If the signer cannot
    +	// issue the certificate, a condition of type "Failed" is added and this
    +	// field remains empty.
    +	//
    +	// Validation requirements:
    +	//  1. certificateChain must consist of one or more PEM-formatted certificates.
    +	//  2. Each entry must be a valid PEM-wrapped, DER-encoded ASN.1 Certificate as
    +	//     described in section 4 of RFC5280.
    +	//
    +	// If more than one block is present, and the definition of the requested
    +	// spec.signerName does not indicate otherwise, the first block is the
    +	// issued certificate, and subsequent blocks should be treated as
    +	// intermediate certificates and presented in TLS handshakes.  When
    +	// projecting the chain into a pod volume, kubelet will drop any data
    +	// in-between the PEM blocks, as well as any PEM block headers.
    +	//
    +	// +optional
    +	CertificateChain string `json:"certificateChain,omitempty" protobuf:"bytes,2,opt,name=certificateChain"`
    +
    +	// notBefore is the time at which the certificate becomes valid.  The value
    +	// must be the same as the notBefore value in the leaf certificate in
    +	// certificateChain.  This field is set via the /status subresource.  Once
    +	// populated, it is immutable. The signer must set this field at the same
    +	// time it sets certificateChain.
    +	//
    +	// +optional
    +	NotBefore *metav1.Time `json:"notBefore,omitempty" protobuf:"bytes,4,opt,name=notBefore"`
    +
    +	// beginRefreshAt is the time at which the kubelet should begin trying to
    +	// refresh the certificate.  This field is set via the /status subresource,
    +	// and must be set at the same time as certificateChain.  Once populated,
    +	// this field is immutable.
    +	//
    +	// This field is only a hint.  Kubelet may start refreshing before or after
    +	// this time if necessary.
    +	//
    +	// +optional
    +	BeginRefreshAt *metav1.Time `json:"beginRefreshAt,omitempty" protobuf:"bytes,5,opt,name=beginRefreshAt"`
    +
    +	// notAfter is the time at which the certificate expires.  The value must be
    +	// the same as the notAfter value in the leaf certificate in
    +	// certificateChain.  This field is set via the /status subresource.  Once
    +	// populated, it is immutable.  The signer must set this field at the same
    +	// time it sets certificateChain.
    +	//
    +	// +optional
    +	NotAfter *metav1.Time `json:"notAfter,omitempty" protobuf:"bytes,6,opt,name=notAfter"`
    +}
    +
    +// Well-known condition types for PodCertificateRequests
    +const (
    +	// Denied indicates the request was denied by the signer.
    +	PodCertificateRequestConditionTypeDenied string = "Denied"
    +	// Failed indicates the signer failed to issue the certificate.
    +	PodCertificateRequestConditionTypeFailed string = "Failed"
    +	// Issued indicates the certificate has been issued.
    +	PodCertificateRequestConditionTypeIssued string = "Issued"
    +)
    +
    +// Well-known condition reasons for PodCertificateRequests
    +const (
    +	// UnsupportedKeyType should be set on "Denied" conditions when the signer
    +	// doesn't support the key type of publicKey.
    +	PodCertificateRequestConditionUnsupportedKeyType string = "UnsupportedKeyType"
    +)
    +
    +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.34
    +
    +// PodCertificateRequestList is a collection of PodCertificateRequest objects
    +type PodCertificateRequestList struct {
    +	metav1.TypeMeta `json:",inline"`
    +
    +	// metadata contains the list metadata.
    +	//
    +	// +optional
    +	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    +
    +	// items is a collection of PodCertificateRequest objects
    +	Items []PodCertificateRequest `json:"items" protobuf:"bytes,2,rep,name=items"`
    +}
    diff --git a/vendor/k8s.io/api/certificates/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/certificates/v1alpha1/types_swagger_doc_generated.go
    index bff649e3c..d29f2d850 100644
    --- a/vendor/k8s.io/api/certificates/v1alpha1/types_swagger_doc_generated.go
    +++ b/vendor/k8s.io/api/certificates/v1alpha1/types_swagger_doc_generated.go
    @@ -57,4 +57,56 @@ func (ClusterTrustBundleSpec) SwaggerDoc() map[string]string {
     	return map_ClusterTrustBundleSpec
     }
     
    +var map_PodCertificateRequest = map[string]string{
    +	"":         "PodCertificateRequest encodes a pod requesting a certificate from a given signer.\n\nKubelets use this API to implement podCertificate projected volumes",
    +	"metadata": "metadata contains the object metadata.",
    +	"spec":     "spec contains the details about the certificate being requested.",
    +	"status":   "status contains the issued certificate, and a standard set of conditions.",
    +}
    +
    +func (PodCertificateRequest) SwaggerDoc() map[string]string {
    +	return map_PodCertificateRequest
    +}
    +
    +var map_PodCertificateRequestList = map[string]string{
    +	"":         "PodCertificateRequestList is a collection of PodCertificateRequest objects",
    +	"metadata": "metadata contains the list metadata.",
    +	"items":    "items is a collection of PodCertificateRequest objects",
    +}
    +
    +func (PodCertificateRequestList) SwaggerDoc() map[string]string {
    +	return map_PodCertificateRequestList
    +}
    +
    +var map_PodCertificateRequestSpec = map[string]string{
    +	"":                     "PodCertificateRequestSpec describes the certificate request.  All fields are immutable after creation.",
    +	"signerName":           "signerName indicates the requested signer.\n\nAll signer names beginning with `kubernetes.io` are reserved for use by the Kubernetes project.  There is currently one well-known signer documented by the Kubernetes project, `kubernetes.io/kube-apiserver-client-pod`, which will issue client certificates understood by kube-apiserver.  It is currently unimplemented.",
    +	"podName":              "podName is the name of the pod into which the certificate will be mounted.",
    +	"podUID":               "podUID is the UID of the pod into which the certificate will be mounted.",
    +	"serviceAccountName":   "serviceAccountName is the name of the service account the pod is running as.",
    +	"serviceAccountUID":    "serviceAccountUID is the UID of the service account the pod is running as.",
    +	"nodeName":             "nodeName is the name of the node the pod is assigned to.",
    +	"nodeUID":              "nodeUID is the UID of the node the pod is assigned to.",
    +	"maxExpirationSeconds": "maxExpirationSeconds is the maximum lifetime permitted for the certificate.\n\nIf omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver will reject values shorter than 3600 (1 hour).  The maximum allowable value is 7862400 (91 days).\n\nThe signer implementation is then free to issue a certificate with any lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 seconds (1 hour).  This constraint is enforced by kube-apiserver. `kubernetes.io` signers will never issue certificates with a lifetime longer than 24 hours.",
    +	"pkixPublicKey":        "pkixPublicKey is the PKIX-serialized public key the signer will issue the certificate to.\n\nThe key must be one of RSA3072, RSA4096, ECDSAP256, ECDSAP384, ECDSAP521, or ED25519. Note that this list may be expanded in the future.\n\nSigner implementations do not need to support all key types supported by kube-apiserver and kubelet.  If a signer does not support the key type used for a given PodCertificateRequest, it must deny the request by setting a status.conditions entry with a type of \"Denied\" and a reason of \"UnsupportedKeyType\". It may also suggest a key type that it does support in the message field.",
    +	"proofOfPossession":    "proofOfPossession proves that the requesting kubelet holds the private key corresponding to pkixPublicKey.\n\nIt is contructed by signing the ASCII bytes of the pod's UID using `pkixPublicKey`.\n\nkube-apiserver validates the proof of possession during creation of the PodCertificateRequest.\n\nIf the key is an RSA key, then the signature is over the ASCII bytes of the pod UID, using RSASSA-PSS from RFC 8017 (as implemented by the golang function crypto/rsa.SignPSS with nil options).\n\nIf the key is an ECDSA key, then the signature is as described by [SEC 1, Version 2.0](https://www.secg.org/sec1-v2.pdf) (as implemented by the golang library function crypto/ecdsa.SignASN1)\n\nIf the key is an ED25519 key, the the signature is as described by the [ED25519 Specification](https://ed25519.cr.yp.to/) (as implemented by the golang library crypto/ed25519.Sign).",
    +}
    +
    +func (PodCertificateRequestSpec) SwaggerDoc() map[string]string {
    +	return map_PodCertificateRequestSpec
    +}
    +
    +var map_PodCertificateRequestStatus = map[string]string{
    +	"":                 "PodCertificateRequestStatus describes the status of the request, and holds the certificate data if the request is issued.",
    +	"conditions":       "conditions applied to the request.\n\nThe types \"Issued\", \"Denied\", and \"Failed\" have special handling.  At most one of these conditions may be present, and they must have status \"True\".\n\nIf the request is denied with `Reason=UnsupportedKeyType`, the signer may suggest a key type that will work in the message field.",
    +	"certificateChain": "certificateChain is populated with an issued certificate by the signer. This field is set via the /status subresource. Once populated, this field is immutable.\n\nIf the certificate signing request is denied, a condition of type \"Denied\" is added and this field remains empty. If the signer cannot issue the certificate, a condition of type \"Failed\" is added and this field remains empty.\n\nValidation requirements:\n 1. certificateChain must consist of one or more PEM-formatted certificates.\n 2. Each entry must be a valid PEM-wrapped, DER-encoded ASN.1 Certificate as\n    described in section 4 of RFC5280.\n\nIf more than one block is present, and the definition of the requested spec.signerName does not indicate otherwise, the first block is the issued certificate, and subsequent blocks should be treated as intermediate certificates and presented in TLS handshakes.  When projecting the chain into a pod volume, kubelet will drop any data in-between the PEM blocks, as well as any PEM block headers.",
    +	"notBefore":        "notBefore is the time at which the certificate becomes valid.  The value must be the same as the notBefore value in the leaf certificate in certificateChain.  This field is set via the /status subresource.  Once populated, it is immutable. The signer must set this field at the same time it sets certificateChain.",
    +	"beginRefreshAt":   "beginRefreshAt is the time at which the kubelet should begin trying to refresh the certificate.  This field is set via the /status subresource, and must be set at the same time as certificateChain.  Once populated, this field is immutable.\n\nThis field is only a hint.  Kubelet may start refreshing before or after this time if necessary.",
    +	"notAfter":         "notAfter is the time at which the certificate expires.  The value must be the same as the notAfter value in the leaf certificate in certificateChain.  This field is set via the /status subresource.  Once populated, it is immutable.  The signer must set this field at the same time it sets certificateChain.",
    +}
    +
    +func (PodCertificateRequestStatus) SwaggerDoc() map[string]string {
    +	return map_PodCertificateRequestStatus
    +}
    +
     // AUTO-GENERATED FUNCTIONS END HERE
    diff --git a/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.deepcopy.go
    index 30a4dc1e8..25bc0ed6c 100644
    --- a/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.deepcopy.go
    +++ b/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.deepcopy.go
    @@ -22,6 +22,7 @@ limitations under the License.
     package v1alpha1
     
     import (
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	runtime "k8s.io/apimachinery/pkg/runtime"
     )
     
    @@ -100,3 +101,130 @@ func (in *ClusterTrustBundleSpec) DeepCopy() *ClusterTrustBundleSpec {
     	in.DeepCopyInto(out)
     	return out
     }
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *PodCertificateRequest) DeepCopyInto(out *PodCertificateRequest) {
    +	*out = *in
    +	out.TypeMeta = in.TypeMeta
    +	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
    +	in.Spec.DeepCopyInto(&out.Spec)
    +	in.Status.DeepCopyInto(&out.Status)
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCertificateRequest.
    +func (in *PodCertificateRequest) DeepCopy() *PodCertificateRequest {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(PodCertificateRequest)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    +func (in *PodCertificateRequest) DeepCopyObject() runtime.Object {
    +	if c := in.DeepCopy(); c != nil {
    +		return c
    +	}
    +	return nil
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *PodCertificateRequestList) DeepCopyInto(out *PodCertificateRequestList) {
    +	*out = *in
    +	out.TypeMeta = in.TypeMeta
    +	in.ListMeta.DeepCopyInto(&out.ListMeta)
    +	if in.Items != nil {
    +		in, out := &in.Items, &out.Items
    +		*out = make([]PodCertificateRequest, len(*in))
    +		for i := range *in {
    +			(*in)[i].DeepCopyInto(&(*out)[i])
    +		}
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCertificateRequestList.
    +func (in *PodCertificateRequestList) DeepCopy() *PodCertificateRequestList {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(PodCertificateRequestList)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    +func (in *PodCertificateRequestList) DeepCopyObject() runtime.Object {
    +	if c := in.DeepCopy(); c != nil {
    +		return c
    +	}
    +	return nil
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *PodCertificateRequestSpec) DeepCopyInto(out *PodCertificateRequestSpec) {
    +	*out = *in
    +	if in.MaxExpirationSeconds != nil {
    +		in, out := &in.MaxExpirationSeconds, &out.MaxExpirationSeconds
    +		*out = new(int32)
    +		**out = **in
    +	}
    +	if in.PKIXPublicKey != nil {
    +		in, out := &in.PKIXPublicKey, &out.PKIXPublicKey
    +		*out = make([]byte, len(*in))
    +		copy(*out, *in)
    +	}
    +	if in.ProofOfPossession != nil {
    +		in, out := &in.ProofOfPossession, &out.ProofOfPossession
    +		*out = make([]byte, len(*in))
    +		copy(*out, *in)
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCertificateRequestSpec.
    +func (in *PodCertificateRequestSpec) DeepCopy() *PodCertificateRequestSpec {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(PodCertificateRequestSpec)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *PodCertificateRequestStatus) DeepCopyInto(out *PodCertificateRequestStatus) {
    +	*out = *in
    +	if in.Conditions != nil {
    +		in, out := &in.Conditions, &out.Conditions
    +		*out = make([]v1.Condition, len(*in))
    +		for i := range *in {
    +			(*in)[i].DeepCopyInto(&(*out)[i])
    +		}
    +	}
    +	if in.NotBefore != nil {
    +		in, out := &in.NotBefore, &out.NotBefore
    +		*out = (*in).DeepCopy()
    +	}
    +	if in.BeginRefreshAt != nil {
    +		in, out := &in.BeginRefreshAt, &out.BeginRefreshAt
    +		*out = (*in).DeepCopy()
    +	}
    +	if in.NotAfter != nil {
    +		in, out := &in.NotAfter, &out.NotAfter
    +		*out = (*in).DeepCopy()
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCertificateRequestStatus.
    +func (in *PodCertificateRequestStatus) DeepCopy() *PodCertificateRequestStatus {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(PodCertificateRequestStatus)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    diff --git a/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go
    index 3121a87d0..edbfce79b 100644
    --- a/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go
    +++ b/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go
    @@ -56,3 +56,39 @@ func (in *ClusterTrustBundleList) APILifecycleDeprecated() (major, minor int) {
     func (in *ClusterTrustBundleList) APILifecycleRemoved() (major, minor int) {
     	return 1, 37
     }
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *PodCertificateRequest) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 34
    +}
    +
    +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or  "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
    +func (in *PodCertificateRequest) APILifecycleDeprecated() (major, minor int) {
    +	return 1, 37
    +}
    +
    +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or  "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
    +func (in *PodCertificateRequest) APILifecycleRemoved() (major, minor int) {
    +	return 1, 40
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *PodCertificateRequestList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 34
    +}
    +
    +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or  "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
    +func (in *PodCertificateRequestList) APILifecycleDeprecated() (major, minor int) {
    +	return 1, 37
    +}
    +
    +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or  "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
    +func (in *PodCertificateRequestList) APILifecycleRemoved() (major, minor int) {
    +	return 1, 40
    +}
    diff --git a/vendor/k8s.io/api/certificates/v1beta1/doc.go b/vendor/k8s.io/api/certificates/v1beta1/doc.go
    index 1165518c6..81608a554 100644
    --- a/vendor/k8s.io/api/certificates/v1beta1/doc.go
    +++ b/vendor/k8s.io/api/certificates/v1beta1/doc.go
    @@ -21,4 +21,4 @@ limitations under the License.
     
     // +groupName=certificates.k8s.io
     
    -package v1beta1 // import "k8s.io/api/certificates/v1beta1"
    +package v1beta1
    diff --git a/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go b/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go
    index b6d8ab3f5..199a54496 100644
    --- a/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go
    +++ b/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go
    @@ -186,10 +186,94 @@ func (m *CertificateSigningRequestStatus) XXX_DiscardUnknown() {
     
     var xxx_messageInfo_CertificateSigningRequestStatus proto.InternalMessageInfo
     
    +func (m *ClusterTrustBundle) Reset()      { *m = ClusterTrustBundle{} }
    +func (*ClusterTrustBundle) ProtoMessage() {}
    +func (*ClusterTrustBundle) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_6529c11a462c48a5, []int{5}
    +}
    +func (m *ClusterTrustBundle) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ClusterTrustBundle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ClusterTrustBundle) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ClusterTrustBundle.Merge(m, src)
    +}
    +func (m *ClusterTrustBundle) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ClusterTrustBundle) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ClusterTrustBundle.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ClusterTrustBundle proto.InternalMessageInfo
    +
    +func (m *ClusterTrustBundleList) Reset()      { *m = ClusterTrustBundleList{} }
    +func (*ClusterTrustBundleList) ProtoMessage() {}
    +func (*ClusterTrustBundleList) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_6529c11a462c48a5, []int{6}
    +}
    +func (m *ClusterTrustBundleList) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ClusterTrustBundleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ClusterTrustBundleList) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ClusterTrustBundleList.Merge(m, src)
    +}
    +func (m *ClusterTrustBundleList) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ClusterTrustBundleList) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ClusterTrustBundleList.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ClusterTrustBundleList proto.InternalMessageInfo
    +
    +func (m *ClusterTrustBundleSpec) Reset()      { *m = ClusterTrustBundleSpec{} }
    +func (*ClusterTrustBundleSpec) ProtoMessage() {}
    +func (*ClusterTrustBundleSpec) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_6529c11a462c48a5, []int{7}
    +}
    +func (m *ClusterTrustBundleSpec) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ClusterTrustBundleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ClusterTrustBundleSpec) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ClusterTrustBundleSpec.Merge(m, src)
    +}
    +func (m *ClusterTrustBundleSpec) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ClusterTrustBundleSpec) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ClusterTrustBundleSpec.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ClusterTrustBundleSpec proto.InternalMessageInfo
    +
     func (m *ExtraValue) Reset()      { *m = ExtraValue{} }
     func (*ExtraValue) ProtoMessage() {}
     func (*ExtraValue) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6529c11a462c48a5, []int{5}
    +	return fileDescriptor_6529c11a462c48a5, []int{8}
     }
     func (m *ExtraValue) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -221,6 +305,9 @@ func init() {
     	proto.RegisterType((*CertificateSigningRequestSpec)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestSpec")
     	proto.RegisterMapType((map[string]ExtraValue)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestSpec.ExtraEntry")
     	proto.RegisterType((*CertificateSigningRequestStatus)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestStatus")
    +	proto.RegisterType((*ClusterTrustBundle)(nil), "k8s.io.api.certificates.v1beta1.ClusterTrustBundle")
    +	proto.RegisterType((*ClusterTrustBundleList)(nil), "k8s.io.api.certificates.v1beta1.ClusterTrustBundleList")
    +	proto.RegisterType((*ClusterTrustBundleSpec)(nil), "k8s.io.api.certificates.v1beta1.ClusterTrustBundleSpec")
     	proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.certificates.v1beta1.ExtraValue")
     }
     
    @@ -229,64 +316,69 @@ func init() {
     }
     
     var fileDescriptor_6529c11a462c48a5 = []byte{
    -	// 901 bytes of a gzipped FileDescriptorProto
    -	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0x4d, 0x6f, 0x1b, 0x45,
    -	0x18, 0xf6, 0xc6, 0x1f, 0xb1, 0xc7, 0x21, 0x6d, 0x47, 0x50, 0x2d, 0x96, 0xea, 0xb5, 0x56, 0x80,
    -	0xc2, 0xd7, 0x2c, 0xa9, 0x2a, 0x88, 0x72, 0x40, 0xb0, 0x21, 0x42, 0x11, 0x29, 0x48, 0x93, 0x84,
    -	0x03, 0x42, 0xa2, 0x93, 0xf5, 0xdb, 0xcd, 0x34, 0xdd, 0x0f, 0x76, 0x66, 0x4d, 0x7d, 0xeb, 0x4f,
    -	0xe0, 0xc8, 0x91, 0xff, 0xc0, 0x9f, 0x08, 0x07, 0xa4, 0x1e, 0x7b, 0x40, 0x16, 0x71, 0xff, 0x45,
    -	0x4e, 0x68, 0x66, 0xc7, 0x6b, 0xc7, 0x4e, 0x70, 0x69, 0x6f, 0x3b, 0xcf, 0xbc, 0xcf, 0xf3, 0xbc,
    -	0xf3, 0xce, 0xfb, 0x8e, 0x8d, 0xbc, 0xd3, 0x2d, 0x41, 0x78, 0xe2, 0xb1, 0x94, 0x7b, 0x01, 0x64,
    -	0x92, 0x3f, 0xe4, 0x01, 0x93, 0x20, 0xbc, 0xc1, 0xe6, 0x31, 0x48, 0xb6, 0xe9, 0x85, 0x10, 0x43,
    -	0xc6, 0x24, 0xf4, 0x49, 0x9a, 0x25, 0x32, 0xc1, 0x4e, 0x41, 0x20, 0x2c, 0xe5, 0x64, 0x96, 0x40,
    -	0x0c, 0xa1, 0xf3, 0x71, 0xc8, 0xe5, 0x49, 0x7e, 0x4c, 0x82, 0x24, 0xf2, 0xc2, 0x24, 0x4c, 0x3c,
    -	0xcd, 0x3b, 0xce, 0x1f, 0xea, 0x95, 0x5e, 0xe8, 0xaf, 0x42, 0xaf, 0xe3, 0xce, 0x26, 0x90, 0x64,
    -	0xe0, 0x0d, 0x16, 0x3c, 0x3b, 0xf7, 0xa6, 0x31, 0x11, 0x0b, 0x4e, 0x78, 0x0c, 0xd9, 0xd0, 0x4b,
    -	0x4f, 0x43, 0x05, 0x08, 0x2f, 0x02, 0xc9, 0xae, 0x62, 0x79, 0xd7, 0xb1, 0xb2, 0x3c, 0x96, 0x3c,
    -	0x82, 0x05, 0xc2, 0xa7, 0xcb, 0x08, 0x22, 0x38, 0x81, 0x88, 0xcd, 0xf3, 0xdc, 0x3f, 0x57, 0xd0,
    -	0xdb, 0x3b, 0xd3, 0x52, 0x1c, 0xf0, 0x30, 0xe6, 0x71, 0x48, 0xe1, 0xe7, 0x1c, 0x84, 0xc4, 0x0f,
    -	0x50, 0x53, 0x65, 0xd8, 0x67, 0x92, 0xd9, 0x56, 0xcf, 0xda, 0x68, 0xdf, 0xfd, 0x84, 0x4c, 0x6b,
    -	0x58, 0x1a, 0x91, 0xf4, 0x34, 0x54, 0x80, 0x20, 0x2a, 0x9a, 0x0c, 0x36, 0xc9, 0x77, 0xc7, 0x8f,
    -	0x20, 0x90, 0xf7, 0x41, 0x32, 0x1f, 0x9f, 0x8d, 0x9c, 0xca, 0x78, 0xe4, 0xa0, 0x29, 0x46, 0x4b,
    -	0x55, 0xfc, 0x00, 0xd5, 0x44, 0x0a, 0x81, 0xbd, 0xa2, 0xd5, 0x3f, 0x27, 0x4b, 0x6e, 0x88, 0x5c,
    -	0x9b, 0xeb, 0x41, 0x0a, 0x81, 0xbf, 0x66, 0xbc, 0x6a, 0x6a, 0x45, 0xb5, 0x32, 0x3e, 0x41, 0x0d,
    -	0x21, 0x99, 0xcc, 0x85, 0x5d, 0xd5, 0x1e, 0x5f, 0xbc, 0x86, 0x87, 0xd6, 0xf1, 0xd7, 0x8d, 0x4b,
    -	0xa3, 0x58, 0x53, 0xa3, 0xef, 0xbe, 0xa8, 0x22, 0xf7, 0x5a, 0xee, 0x4e, 0x12, 0xf7, 0xb9, 0xe4,
    -	0x49, 0x8c, 0xb7, 0x50, 0x4d, 0x0e, 0x53, 0xd0, 0x05, 0x6d, 0xf9, 0xef, 0x4c, 0x52, 0x3e, 0x1c,
    -	0xa6, 0x70, 0x31, 0x72, 0xde, 0x9c, 0x8f, 0x57, 0x38, 0xd5, 0x0c, 0xbc, 0x5f, 0x1e, 0xa5, 0xa1,
    -	0xb9, 0xf7, 0x2e, 0x27, 0x72, 0x31, 0x72, 0xae, 0xe8, 0x48, 0x52, 0x2a, 0x5d, 0x4e, 0x17, 0xbf,
    -	0x87, 0x1a, 0x19, 0x30, 0x91, 0xc4, 0xba, 0xf8, 0xad, 0xe9, 0xb1, 0xa8, 0x46, 0xa9, 0xd9, 0xc5,
    -	0xef, 0xa3, 0xd5, 0x08, 0x84, 0x60, 0x21, 0xe8, 0x0a, 0xb6, 0xfc, 0x1b, 0x26, 0x70, 0xf5, 0x7e,
    -	0x01, 0xd3, 0xc9, 0x3e, 0x7e, 0x84, 0xd6, 0x1f, 0x33, 0x21, 0x8f, 0xd2, 0x3e, 0x93, 0x70, 0xc8,
    -	0x23, 0xb0, 0x6b, 0xba, 0xe6, 0x1f, 0xbc, 0x5c, 0xd7, 0x28, 0x86, 0x7f, 0xdb, 0xa8, 0xaf, 0xef,
    -	0x5f, 0x52, 0xa2, 0x73, 0xca, 0x78, 0x80, 0xb0, 0x42, 0x0e, 0x33, 0x16, 0x8b, 0xa2, 0x50, 0xca,
    -	0xaf, 0xfe, 0xbf, 0xfd, 0x3a, 0xc6, 0x0f, 0xef, 0x2f, 0xa8, 0xd1, 0x2b, 0x1c, 0xdc, 0x91, 0x85,
    -	0xee, 0x5c, 0x7b, 0xcb, 0xfb, 0x5c, 0x48, 0xfc, 0xe3, 0xc2, 0xd4, 0x90, 0x97, 0xcb, 0x47, 0xb1,
    -	0xf5, 0xcc, 0xdc, 0x34, 0x39, 0x35, 0x27, 0xc8, 0xcc, 0xc4, 0xfc, 0x84, 0xea, 0x5c, 0x42, 0x24,
    -	0xec, 0x95, 0x5e, 0x75, 0xa3, 0x7d, 0x77, 0xfb, 0xd5, 0xdb, 0xd9, 0x7f, 0xc3, 0xd8, 0xd4, 0xf7,
    -	0x94, 0x20, 0x2d, 0x74, 0xdd, 0x3f, 0x6a, 0xff, 0x71, 0x40, 0x35, 0x58, 0xf8, 0x5d, 0xb4, 0x9a,
    -	0x15, 0x4b, 0x7d, 0xbe, 0x35, 0xbf, 0xad, 0xba, 0xc1, 0x44, 0xd0, 0xc9, 0x1e, 0x26, 0x08, 0x09,
    -	0x1e, 0xc6, 0x90, 0x7d, 0xcb, 0x22, 0xb0, 0x57, 0x8b, 0x26, 0x53, 0x2f, 0xc1, 0x41, 0x89, 0xd2,
    -	0x99, 0x08, 0xbc, 0x83, 0x6e, 0xc1, 0x93, 0x94, 0x67, 0x4c, 0x37, 0x2b, 0x04, 0x49, 0xdc, 0x17,
    -	0x76, 0xb3, 0x67, 0x6d, 0xd4, 0xfd, 0xb7, 0xc6, 0x23, 0xe7, 0xd6, 0xee, 0xfc, 0x26, 0x5d, 0x8c,
    -	0xc7, 0x04, 0x35, 0x72, 0xd5, 0x8b, 0xc2, 0xae, 0xf7, 0xaa, 0x1b, 0x2d, 0xff, 0xb6, 0xea, 0xe8,
    -	0x23, 0x8d, 0x5c, 0x8c, 0x9c, 0xe6, 0x37, 0x30, 0xd4, 0x0b, 0x6a, 0xa2, 0xf0, 0x47, 0xa8, 0x99,
    -	0x0b, 0xc8, 0x62, 0x95, 0x62, 0x31, 0x07, 0x65, 0xf1, 0x8f, 0x0c, 0x4e, 0xcb, 0x08, 0x7c, 0x07,
    -	0x55, 0x73, 0xde, 0x37, 0x73, 0xd0, 0x36, 0x81, 0xd5, 0xa3, 0xbd, 0xaf, 0xa8, 0xc2, 0xb1, 0x8b,
    -	0x1a, 0x61, 0x96, 0xe4, 0xa9, 0xb0, 0x6b, 0xda, 0x1c, 0x29, 0xf3, 0xaf, 0x35, 0x42, 0xcd, 0x0e,
    -	0x8e, 0x51, 0x1d, 0x9e, 0xc8, 0x8c, 0xd9, 0x0d, 0x7d, 0x7f, 0x7b, 0xaf, 0xf7, 0xe4, 0x91, 0x5d,
    -	0xa5, 0xb5, 0x1b, 0xcb, 0x6c, 0x38, 0xbd, 0x4e, 0x8d, 0xd1, 0xc2, 0xa6, 0x03, 0x08, 0x4d, 0x63,
    -	0xf0, 0x4d, 0x54, 0x3d, 0x85, 0x61, 0xf1, 0xf6, 0x50, 0xf5, 0x89, 0xbf, 0x44, 0xf5, 0x01, 0x7b,
    -	0x9c, 0x83, 0x79, 0x82, 0x3f, 0x5c, 0x9a, 0x8f, 0x56, 0xfb, 0x5e, 0x51, 0x68, 0xc1, 0xdc, 0x5e,
    -	0xd9, 0xb2, 0xdc, 0xbf, 0x2c, 0xe4, 0x2c, 0x79, 0x38, 0xf1, 0x2f, 0x08, 0x05, 0x93, 0xc7, 0x48,
    -	0xd8, 0x96, 0x3e, 0xff, 0xce, 0xab, 0x9f, 0xbf, 0x7c, 0xd8, 0xa6, 0xbf, 0x31, 0x25, 0x24, 0xe8,
    -	0x8c, 0x15, 0xde, 0x44, 0xed, 0x19, 0x69, 0x7d, 0xd2, 0x35, 0xff, 0xc6, 0x78, 0xe4, 0xb4, 0x67,
    -	0xc4, 0xe9, 0x6c, 0x8c, 0xfb, 0x99, 0x29, 0x9b, 0x3e, 0x28, 0x76, 0x26, 0x43, 0x67, 0xe9, 0x7b,
    -	0x6d, 0xcd, 0x0f, 0xcd, 0x76, 0xf3, 0xb7, 0xdf, 0x9d, 0xca, 0xd3, 0xbf, 0x7b, 0x15, 0x7f, 0xf7,
    -	0xec, 0xbc, 0x5b, 0x79, 0x76, 0xde, 0xad, 0x3c, 0x3f, 0xef, 0x56, 0x9e, 0x8e, 0xbb, 0xd6, 0xd9,
    -	0xb8, 0x6b, 0x3d, 0x1b, 0x77, 0xad, 0xe7, 0xe3, 0xae, 0xf5, 0xcf, 0xb8, 0x6b, 0xfd, 0xfa, 0xa2,
    -	0x5b, 0xf9, 0xc1, 0x59, 0xf2, 0xdf, 0xe5, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x35, 0x2f, 0x11,
    -	0xe8, 0xdd, 0x08, 0x00, 0x00,
    +	// 991 bytes of a gzipped FileDescriptorProto
    +	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4f, 0x6f, 0xe3, 0x44,
    +	0x14, 0x8f, 0x9b, 0x3f, 0x4d, 0x26, 0xa5, 0xbb, 0x3b, 0x40, 0x65, 0x22, 0x6d, 0x1c, 0x59, 0x80,
    +	0xca, 0x3f, 0x9b, 0x96, 0x85, 0xad, 0x7a, 0x40, 0xe0, 0x50, 0xa1, 0x8a, 0x2e, 0x48, 0xd3, 0x16,
    +	0x01, 0x42, 0x62, 0xa7, 0xce, 0x5b, 0xd7, 0xdb, 0xc6, 0x36, 0x9e, 0x71, 0xd8, 0xdc, 0x56, 0xe2,
    +	0x0b, 0x70, 0xe4, 0xc8, 0x77, 0xe0, 0x4b, 0x94, 0x03, 0x52, 0xb9, 0xed, 0x01, 0x45, 0x34, 0xfb,
    +	0x2d, 0x7a, 0x42, 0x33, 0x9e, 0x38, 0x4e, 0xd2, 0x90, 0xa5, 0x2b, 0xed, 0x2d, 0xf3, 0xe6, 0xfd,
    +	0x7e, 0xbf, 0xf7, 0x9e, 0xdf, 0x7b, 0x13, 0x64, 0x9f, 0x6c, 0x31, 0xcb, 0x0f, 0x6d, 0x1a, 0xf9,
    +	0xb6, 0x0b, 0x31, 0xf7, 0x1f, 0xf8, 0x2e, 0xe5, 0xc0, 0xec, 0xde, 0xc6, 0x11, 0x70, 0xba, 0x61,
    +	0x7b, 0x10, 0x40, 0x4c, 0x39, 0x74, 0xac, 0x28, 0x0e, 0x79, 0x88, 0x8d, 0x14, 0x60, 0xd1, 0xc8,
    +	0xb7, 0xf2, 0x00, 0x4b, 0x01, 0x1a, 0xef, 0x79, 0x3e, 0x3f, 0x4e, 0x8e, 0x2c, 0x37, 0xec, 0xda,
    +	0x5e, 0xe8, 0x85, 0xb6, 0xc4, 0x1d, 0x25, 0x0f, 0xe4, 0x49, 0x1e, 0xe4, 0xaf, 0x94, 0xaf, 0x61,
    +	0xe6, 0x03, 0x08, 0x63, 0xb0, 0x7b, 0x33, 0x9a, 0x8d, 0x3b, 0x63, 0x9f, 0x2e, 0x75, 0x8f, 0xfd,
    +	0x00, 0xe2, 0xbe, 0x1d, 0x9d, 0x78, 0xc2, 0xc0, 0xec, 0x2e, 0x70, 0x7a, 0x15, 0xca, 0x9e, 0x87,
    +	0x8a, 0x93, 0x80, 0xfb, 0x5d, 0x98, 0x01, 0x7c, 0xb4, 0x08, 0xc0, 0xdc, 0x63, 0xe8, 0xd2, 0x69,
    +	0x9c, 0xf9, 0xc7, 0x12, 0x7a, 0xad, 0x3d, 0x2e, 0xc5, 0xbe, 0xef, 0x05, 0x7e, 0xe0, 0x11, 0xf8,
    +	0x31, 0x01, 0xc6, 0xf1, 0x7d, 0x54, 0x15, 0x11, 0x76, 0x28, 0xa7, 0xba, 0xd6, 0xd2, 0xd6, 0xeb,
    +	0x9b, 0xef, 0x5b, 0xe3, 0x1a, 0x66, 0x42, 0x56, 0x74, 0xe2, 0x09, 0x03, 0xb3, 0x84, 0xb7, 0xd5,
    +	0xdb, 0xb0, 0xbe, 0x3a, 0x7a, 0x08, 0x2e, 0xbf, 0x07, 0x9c, 0x3a, 0xf8, 0x6c, 0x60, 0x14, 0x86,
    +	0x03, 0x03, 0x8d, 0x6d, 0x24, 0x63, 0xc5, 0xf7, 0x51, 0x89, 0x45, 0xe0, 0xea, 0x4b, 0x92, 0xfd,
    +	0x63, 0x6b, 0xc1, 0x17, 0xb2, 0xe6, 0xc6, 0xba, 0x1f, 0x81, 0xeb, 0xac, 0x28, 0xad, 0x92, 0x38,
    +	0x11, 0xc9, 0x8c, 0x8f, 0x51, 0x85, 0x71, 0xca, 0x13, 0xa6, 0x17, 0xa5, 0xc6, 0x27, 0xcf, 0xa1,
    +	0x21, 0x79, 0x9c, 0x55, 0xa5, 0x52, 0x49, 0xcf, 0x44, 0xf1, 0x9b, 0x4f, 0x8b, 0xc8, 0x9c, 0x8b,
    +	0x6d, 0x87, 0x41, 0xc7, 0xe7, 0x7e, 0x18, 0xe0, 0x2d, 0x54, 0xe2, 0xfd, 0x08, 0x64, 0x41, 0x6b,
    +	0xce, 0xeb, 0xa3, 0x90, 0x0f, 0xfa, 0x11, 0x5c, 0x0e, 0x8c, 0x57, 0xa6, 0xfd, 0x85, 0x9d, 0x48,
    +	0x04, 0xde, 0xcb, 0x52, 0xa9, 0x48, 0xec, 0x9d, 0xc9, 0x40, 0x2e, 0x07, 0xc6, 0x15, 0x1d, 0x69,
    +	0x65, 0x4c, 0x93, 0xe1, 0xe2, 0x37, 0x51, 0x25, 0x06, 0xca, 0xc2, 0x40, 0x16, 0xbf, 0x36, 0x4e,
    +	0x8b, 0x48, 0x2b, 0x51, 0xb7, 0xf8, 0x2d, 0xb4, 0xdc, 0x05, 0xc6, 0xa8, 0x07, 0xb2, 0x82, 0x35,
    +	0xe7, 0x86, 0x72, 0x5c, 0xbe, 0x97, 0x9a, 0xc9, 0xe8, 0x1e, 0x3f, 0x44, 0xab, 0xa7, 0x94, 0xf1,
    +	0xc3, 0xa8, 0x43, 0x39, 0x1c, 0xf8, 0x5d, 0xd0, 0x4b, 0xb2, 0xe6, 0x6f, 0x3f, 0x5b, 0xd7, 0x08,
    +	0x84, 0xb3, 0xa6, 0xd8, 0x57, 0xf7, 0x26, 0x98, 0xc8, 0x14, 0x33, 0xee, 0x21, 0x2c, 0x2c, 0x07,
    +	0x31, 0x0d, 0x58, 0x5a, 0x28, 0xa1, 0x57, 0xfe, 0xdf, 0x7a, 0x0d, 0xa5, 0x87, 0xf7, 0x66, 0xd8,
    +	0xc8, 0x15, 0x0a, 0xe6, 0x40, 0x43, 0xb7, 0xe7, 0x7e, 0xe5, 0x3d, 0x9f, 0x71, 0xfc, 0xfd, 0xcc,
    +	0xd4, 0x58, 0xcf, 0x16, 0x8f, 0x40, 0xcb, 0x99, 0xb9, 0xa9, 0x62, 0xaa, 0x8e, 0x2c, 0xb9, 0x89,
    +	0xf9, 0x01, 0x95, 0x7d, 0x0e, 0x5d, 0xa6, 0x2f, 0xb5, 0x8a, 0xeb, 0xf5, 0xcd, 0xed, 0xeb, 0xb7,
    +	0xb3, 0xf3, 0x92, 0x92, 0x29, 0xef, 0x0a, 0x42, 0x92, 0xf2, 0x9a, 0xbf, 0x97, 0xfe, 0x23, 0x41,
    +	0x31, 0x58, 0xf8, 0x0d, 0xb4, 0x1c, 0xa7, 0x47, 0x99, 0xdf, 0x8a, 0x53, 0x17, 0xdd, 0xa0, 0x3c,
    +	0xc8, 0xe8, 0x0e, 0x5b, 0x08, 0x31, 0xdf, 0x0b, 0x20, 0xfe, 0x92, 0x76, 0x41, 0x5f, 0x4e, 0x9b,
    +	0x4c, 0x6c, 0x82, 0xfd, 0xcc, 0x4a, 0x72, 0x1e, 0xb8, 0x8d, 0x6e, 0xc1, 0xa3, 0xc8, 0x8f, 0xa9,
    +	0x6c, 0x56, 0x70, 0xc3, 0xa0, 0xc3, 0xf4, 0x6a, 0x4b, 0x5b, 0x2f, 0x3b, 0xaf, 0x0e, 0x07, 0xc6,
    +	0xad, 0x9d, 0xe9, 0x4b, 0x32, 0xeb, 0x8f, 0x2d, 0x54, 0x49, 0x44, 0x2f, 0x32, 0xbd, 0xdc, 0x2a,
    +	0xae, 0xd7, 0x9c, 0x35, 0xd1, 0xd1, 0x87, 0xd2, 0x72, 0x39, 0x30, 0xaa, 0x5f, 0x40, 0x5f, 0x1e,
    +	0x88, 0xf2, 0xc2, 0xef, 0xa2, 0x6a, 0xc2, 0x20, 0x0e, 0x44, 0x88, 0xe9, 0x1c, 0x64, 0xc5, 0x3f,
    +	0x54, 0x76, 0x92, 0x79, 0xe0, 0xdb, 0xa8, 0x98, 0xf8, 0x1d, 0x35, 0x07, 0x75, 0xe5, 0x58, 0x3c,
    +	0xdc, 0xfd, 0x8c, 0x08, 0x3b, 0x36, 0x51, 0xc5, 0x8b, 0xc3, 0x24, 0x62, 0x7a, 0x49, 0x8a, 0x23,
    +	0x21, 0xfe, 0xb9, 0xb4, 0x10, 0x75, 0x83, 0x03, 0x54, 0x86, 0x47, 0x3c, 0xa6, 0x7a, 0x45, 0x7e,
    +	0xbf, 0xdd, 0xe7, 0x5b, 0x79, 0xd6, 0x8e, 0xe0, 0xda, 0x09, 0x78, 0xdc, 0x1f, 0x7f, 0x4e, 0x69,
    +	0x23, 0xa9, 0x4c, 0x03, 0x10, 0x1a, 0xfb, 0xe0, 0x9b, 0xa8, 0x78, 0x02, 0xfd, 0x74, 0xf7, 0x10,
    +	0xf1, 0x13, 0x7f, 0x8a, 0xca, 0x3d, 0x7a, 0x9a, 0x80, 0x5a, 0xc1, 0xef, 0x2c, 0x8c, 0x47, 0xb2,
    +	0x7d, 0x2d, 0x20, 0x24, 0x45, 0x6e, 0x2f, 0x6d, 0x69, 0xe6, 0x9f, 0x1a, 0x32, 0x16, 0x2c, 0x4e,
    +	0xfc, 0x13, 0x42, 0xee, 0x68, 0x19, 0x31, 0x5d, 0x93, 0xf9, 0xb7, 0xaf, 0x9f, 0x7f, 0xb6, 0xd8,
    +	0xc6, 0x6f, 0x4c, 0x66, 0x62, 0x24, 0x27, 0x85, 0x37, 0x50, 0x3d, 0x47, 0x2d, 0x33, 0x5d, 0x71,
    +	0x6e, 0x0c, 0x07, 0x46, 0x3d, 0x47, 0x4e, 0xf2, 0x3e, 0xe6, 0x5f, 0x1a, 0xc2, 0xed, 0xd3, 0x84,
    +	0x71, 0x88, 0x0f, 0xe2, 0x84, 0x71, 0x27, 0x09, 0x3a, 0xa7, 0xf0, 0x02, 0x5e, 0xc4, 0x6f, 0x27,
    +	0x5e, 0xc4, 0xbb, 0x8b, 0xcb, 0x33, 0x13, 0xe4, 0xbc, 0xa7, 0xd0, 0x3c, 0xd7, 0xd0, 0xda, 0xac,
    +	0xfb, 0x0b, 0xd8, 0x59, 0xdf, 0x4c, 0xee, 0xac, 0x0f, 0xae, 0x91, 0xd4, 0x9c, 0x65, 0xf5, 0xf3,
    +	0x95, 0x29, 0xc9, 0x2d, 0xb5, 0x39, 0xb1, 0x7e, 0xd2, 0xd7, 0x36, 0x2b, 0xfd, 0x9c, 0x15, 0xf4,
    +	0x21, 0xaa, 0xf3, 0x31, 0x8d, 0x5a, 0x08, 0x2f, 0x2b, 0x50, 0x3d, 0xa7, 0x40, 0xf2, 0x7e, 0xe6,
    +	0x5d, 0x35, 0x63, 0x72, 0x2a, 0xb0, 0x31, 0xca, 0x56, 0x93, 0x4b, 0xa0, 0x36, 0x1d, 0xf4, 0x76,
    +	0xf5, 0xd7, 0xdf, 0x8c, 0xc2, 0xe3, 0xbf, 0x5b, 0x05, 0x67, 0xe7, 0xec, 0xa2, 0x59, 0x38, 0xbf,
    +	0x68, 0x16, 0x9e, 0x5c, 0x34, 0x0b, 0x8f, 0x87, 0x4d, 0xed, 0x6c, 0xd8, 0xd4, 0xce, 0x87, 0x4d,
    +	0xed, 0xc9, 0xb0, 0xa9, 0xfd, 0x33, 0x6c, 0x6a, 0xbf, 0x3c, 0x6d, 0x16, 0xbe, 0x33, 0x16, 0xfc,
    +	0xd1, 0xfd, 0x37, 0x00, 0x00, 0xff, 0xff, 0x17, 0xbe, 0xe3, 0x02, 0x0a, 0x0b, 0x00, 0x00,
     }
     
     func (m *CertificateSigningRequest) Marshal() (dAtA []byte, err error) {
    @@ -595,6 +687,129 @@ func (m *CertificateSigningRequestStatus) MarshalToSizedBuffer(dAtA []byte) (int
     	return len(dAtA) - i, nil
     }
     
    +func (m *ClusterTrustBundle) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ClusterTrustBundle) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ClusterTrustBundle) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	{
    +		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0x12
    +	{
    +		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *ClusterTrustBundleList) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ClusterTrustBundleList) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ClusterTrustBundleList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.Items) > 0 {
    +		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
    +		}
    +	}
    +	{
    +		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *ClusterTrustBundleSpec) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ClusterTrustBundleSpec) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ClusterTrustBundleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	i -= len(m.TrustBundle)
    +	copy(dAtA[i:], m.TrustBundle)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.TrustBundle)))
    +	i--
    +	dAtA[i] = 0x12
    +	i -= len(m.SignerName)
    +	copy(dAtA[i:], m.SignerName)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.SignerName)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
     func (m ExtraValue) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
    @@ -755,6 +970,49 @@ func (m *CertificateSigningRequestStatus) Size() (n int) {
     	return n
     }
     
    +func (m *ClusterTrustBundle) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.ObjectMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = m.Spec.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *ClusterTrustBundleList) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.ListMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.Items) > 0 {
    +		for _, e := range m.Items {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
    +}
    +
    +func (m *ClusterTrustBundleSpec) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.SignerName)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.TrustBundle)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
     func (m ExtraValue) Size() (n int) {
     	if m == nil {
     		return 0
    @@ -862,6 +1120,44 @@ func (this *CertificateSigningRequestStatus) String() string {
     	}, "")
     	return s
     }
    +func (this *ClusterTrustBundle) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&ClusterTrustBundle{`,
    +		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
    +		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterTrustBundleSpec", "ClusterTrustBundleSpec", 1), `&`, ``, 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ClusterTrustBundleList) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForItems := "[]ClusterTrustBundle{"
    +	for _, f := range this.Items {
    +		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterTrustBundle", "ClusterTrustBundle", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForItems += "}"
    +	s := strings.Join([]string{`&ClusterTrustBundleList{`,
    +		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
    +		`Items:` + repeatedStringForItems + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ClusterTrustBundleSpec) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&ClusterTrustBundleSpec{`,
    +		`SignerName:` + fmt.Sprintf("%v", this.SignerName) + `,`,
    +		`TrustBundle:` + fmt.Sprintf("%v", this.TrustBundle) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
     func valueToStringGenerated(v interface{}) string {
     	rv := reflect.ValueOf(v)
     	if rv.IsNil() {
    @@ -1892,6 +2188,353 @@ func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    +func (m *ClusterTrustBundle) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ClusterTrustBundle: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ClusterTrustBundle: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *ClusterTrustBundleList) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ClusterTrustBundleList: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ClusterTrustBundleList: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Items = append(m.Items, ClusterTrustBundle{})
    +			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *ClusterTrustBundleSpec) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ClusterTrustBundleSpec: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ClusterTrustBundleSpec: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field SignerName", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.SignerName = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field TrustBundle", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.TrustBundle = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
     func (m *ExtraValue) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
    diff --git a/vendor/k8s.io/api/certificates/v1beta1/generated.proto b/vendor/k8s.io/api/certificates/v1beta1/generated.proto
    index f3ec4c06e..4c9385c19 100644
    --- a/vendor/k8s.io/api/certificates/v1beta1/generated.proto
    +++ b/vendor/k8s.io/api/certificates/v1beta1/generated.proto
    @@ -30,6 +30,8 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
     option go_package = "k8s.io/api/certificates/v1beta1";
     
     // Describes a certificate signing request
    +// +k8s:supportsSubresource=/status
    +// +k8s:supportsSubresource=/approval
     message CertificateSigningRequest {
       // +optional
       optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    @@ -182,6 +184,11 @@ message CertificateSigningRequestStatus {
       // +listType=map
       // +listMapKey=type
       // +optional
    +  // +k8s:listType=map
    +  // +k8s:listMapKey=type
    +  // +k8s:optional
    +  // +k8s:item(type: "Approved")=+k8s:zeroOrOneOfMember
    +  // +k8s:item(type: "Denied")=+k8s:zeroOrOneOfMember
       repeated CertificateSigningRequestCondition conditions = 1;
     
       // If request was approved, the controller will place the issued certificate here.
    @@ -190,6 +197,79 @@ message CertificateSigningRequestStatus {
       optional bytes certificate = 2;
     }
     
    +// ClusterTrustBundle is a cluster-scoped container for X.509 trust anchors
    +// (root certificates).
    +//
    +// ClusterTrustBundle objects are considered to be readable by any authenticated
    +// user in the cluster, because they can be mounted by pods using the
    +// `clusterTrustBundle` projection.  All service accounts have read access to
    +// ClusterTrustBundles by default.  Users who only have namespace-level access
    +// to a cluster can read ClusterTrustBundles by impersonating a serviceaccount
    +// that they have access to.
    +//
    +// It can be optionally associated with a particular assigner, in which case it
    +// contains one valid set of trust anchors for that signer. Signers may have
    +// multiple associated ClusterTrustBundles; each is an independent set of trust
    +// anchors for that signer. Admission control is used to enforce that only users
    +// with permissions on the signer can create or modify the corresponding bundle.
    +message ClusterTrustBundle {
    +  // metadata contains the object metadata.
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +
    +  // spec contains the signer (if any) and trust anchors.
    +  optional ClusterTrustBundleSpec spec = 2;
    +}
    +
    +// ClusterTrustBundleList is a collection of ClusterTrustBundle objects
    +message ClusterTrustBundleList {
    +  // metadata contains the list metadata.
    +  //
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +
    +  // items is a collection of ClusterTrustBundle objects
    +  repeated ClusterTrustBundle items = 2;
    +}
    +
    +// ClusterTrustBundleSpec contains the signer and trust anchors.
    +message ClusterTrustBundleSpec {
    +  // signerName indicates the associated signer, if any.
    +  //
    +  // In order to create or update a ClusterTrustBundle that sets signerName,
    +  // you must have the following cluster-scoped permission:
    +  // group=certificates.k8s.io resource=signers resourceName=
    +  // verb=attest.
    +  //
    +  // If signerName is not empty, then the ClusterTrustBundle object must be
    +  // named with the signer name as a prefix (translating slashes to colons).
    +  // For example, for the signer name `example.com/foo`, valid
    +  // ClusterTrustBundle object names include `example.com:foo:abc` and
    +  // `example.com:foo:v1`.
    +  //
    +  // If signerName is empty, then the ClusterTrustBundle object's name must
    +  // not have such a prefix.
    +  //
    +  // List/watch requests for ClusterTrustBundles can filter on this field
    +  // using a `spec.signerName=NAME` field selector.
    +  //
    +  // +optional
    +  optional string signerName = 1;
    +
    +  // trustBundle contains the individual X.509 trust anchors for this
    +  // bundle, as PEM bundle of PEM-wrapped, DER-formatted X.509 certificates.
    +  //
    +  // The data must consist only of PEM certificate blocks that parse as valid
    +  // X.509 certificates.  Each certificate must include a basic constraints
    +  // extension with the CA bit set.  The API server will reject objects that
    +  // contain duplicate certificates, or that use PEM block headers.
    +  //
    +  // Users of ClusterTrustBundles, including Kubelet, are free to reorder and
    +  // deduplicate certificate blocks in this file according to their own logic,
    +  // as well as to drop PEM block headers and inter-block data.
    +  optional string trustBundle = 2;
    +}
    +
     // ExtraValue masks the value so protobuf can generate
     // +protobuf.nullable=true
     // +protobuf.options.(gogoproto.goproto_stringer)=false
    diff --git a/vendor/k8s.io/api/certificates/v1beta1/register.go b/vendor/k8s.io/api/certificates/v1beta1/register.go
    index b4f3af9b9..800dccd07 100644
    --- a/vendor/k8s.io/api/certificates/v1beta1/register.go
    +++ b/vendor/k8s.io/api/certificates/v1beta1/register.go
    @@ -51,6 +51,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
     	scheme.AddKnownTypes(SchemeGroupVersion,
     		&CertificateSigningRequest{},
     		&CertificateSigningRequestList{},
    +		&ClusterTrustBundle{},
    +		&ClusterTrustBundleList{},
     	)
     
     	// Add the watch version that applies
    diff --git a/vendor/k8s.io/api/certificates/v1beta1/types.go b/vendor/k8s.io/api/certificates/v1beta1/types.go
    index 7e5a5c198..fadb7e082 100644
    --- a/vendor/k8s.io/api/certificates/v1beta1/types.go
    +++ b/vendor/k8s.io/api/certificates/v1beta1/types.go
    @@ -31,6 +31,8 @@ import (
     // +k8s:prerelease-lifecycle-gen:replacement=certificates.k8s.io,v1,CertificateSigningRequest
     
     // Describes a certificate signing request
    +// +k8s:supportsSubresource=/status
    +// +k8s:supportsSubresource=/approval
     type CertificateSigningRequest struct {
     	metav1.TypeMeta `json:",inline"`
     	// +optional
    @@ -175,6 +177,11 @@ type CertificateSigningRequestStatus struct {
     	// +listType=map
     	// +listMapKey=type
     	// +optional
    +	// +k8s:listType=map
    +	// +k8s:listMapKey=type
    +	// +k8s:optional
    +	// +k8s:item(type: "Approved")=+k8s:zeroOrOneOfMember
    +	// +k8s:item(type: "Denied")=+k8s:zeroOrOneOfMember
     	Conditions []CertificateSigningRequestCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"`
     
     	// If request was approved, the controller will place the issued certificate here.
    @@ -262,3 +269,88 @@ const (
     	UsageMicrosoftSGC      KeyUsage = "microsoft sgc"
     	UsageNetscapeSGC       KeyUsage = "netscape sgc"
     )
    +
    +// +genclient
    +// +genclient:nonNamespaced
    +// +k8s:prerelease-lifecycle-gen:introduced=1.33
    +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +
    +// ClusterTrustBundle is a cluster-scoped container for X.509 trust anchors
    +// (root certificates).
    +//
    +// ClusterTrustBundle objects are considered to be readable by any authenticated
    +// user in the cluster, because they can be mounted by pods using the
    +// `clusterTrustBundle` projection.  All service accounts have read access to
    +// ClusterTrustBundles by default.  Users who only have namespace-level access
    +// to a cluster can read ClusterTrustBundles by impersonating a serviceaccount
    +// that they have access to.
    +//
    +// It can be optionally associated with a particular assigner, in which case it
    +// contains one valid set of trust anchors for that signer. Signers may have
    +// multiple associated ClusterTrustBundles; each is an independent set of trust
    +// anchors for that signer. Admission control is used to enforce that only users
    +// with permissions on the signer can create or modify the corresponding bundle.
    +type ClusterTrustBundle struct {
    +	metav1.TypeMeta `json:",inline"`
    +
    +	// metadata contains the object metadata.
    +	// +optional
    +	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    +
    +	// spec contains the signer (if any) and trust anchors.
    +	Spec ClusterTrustBundleSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
    +}
    +
    +// ClusterTrustBundleSpec contains the signer and trust anchors.
    +type ClusterTrustBundleSpec struct {
    +	// signerName indicates the associated signer, if any.
    +	//
    +	// In order to create or update a ClusterTrustBundle that sets signerName,
    +	// you must have the following cluster-scoped permission:
    +	// group=certificates.k8s.io resource=signers resourceName=
    +	// verb=attest.
    +	//
    +	// If signerName is not empty, then the ClusterTrustBundle object must be
    +	// named with the signer name as a prefix (translating slashes to colons).
    +	// For example, for the signer name `example.com/foo`, valid
    +	// ClusterTrustBundle object names include `example.com:foo:abc` and
    +	// `example.com:foo:v1`.
    +	//
    +	// If signerName is empty, then the ClusterTrustBundle object's name must
    +	// not have such a prefix.
    +	//
    +	// List/watch requests for ClusterTrustBundles can filter on this field
    +	// using a `spec.signerName=NAME` field selector.
    +	//
    +	// +optional
    +	SignerName string `json:"signerName,omitempty" protobuf:"bytes,1,opt,name=signerName"`
    +
    +	// trustBundle contains the individual X.509 trust anchors for this
    +	// bundle, as PEM bundle of PEM-wrapped, DER-formatted X.509 certificates.
    +	//
    +	// The data must consist only of PEM certificate blocks that parse as valid
    +	// X.509 certificates.  Each certificate must include a basic constraints
    +	// extension with the CA bit set.  The API server will reject objects that
    +	// contain duplicate certificates, or that use PEM block headers.
    +	//
    +	// Users of ClusterTrustBundles, including Kubelet, are free to reorder and
    +	// deduplicate certificate blocks in this file according to their own logic,
    +	// as well as to drop PEM block headers and inter-block data.
    +	TrustBundle string `json:"trustBundle" protobuf:"bytes,2,opt,name=trustBundle"`
    +}
    +
    +// +k8s:prerelease-lifecycle-gen:introduced=1.33
    +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +
    +// ClusterTrustBundleList is a collection of ClusterTrustBundle objects
    +type ClusterTrustBundleList struct {
    +	metav1.TypeMeta `json:",inline"`
    +
    +	// metadata contains the list metadata.
    +	//
    +	// +optional
    +	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    +
    +	// items is a collection of ClusterTrustBundle objects
    +	Items []ClusterTrustBundle `json:"items" protobuf:"bytes,2,rep,name=items"`
    +}
    diff --git a/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go
    index f9ab1f13d..58c69e54d 100644
    --- a/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go
    +++ b/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go
    @@ -75,4 +75,34 @@ func (CertificateSigningRequestStatus) SwaggerDoc() map[string]string {
     	return map_CertificateSigningRequestStatus
     }
     
    +var map_ClusterTrustBundle = map[string]string{
    +	"":         "ClusterTrustBundle is a cluster-scoped container for X.509 trust anchors (root certificates).\n\nClusterTrustBundle objects are considered to be readable by any authenticated user in the cluster, because they can be mounted by pods using the `clusterTrustBundle` projection.  All service accounts have read access to ClusterTrustBundles by default.  Users who only have namespace-level access to a cluster can read ClusterTrustBundles by impersonating a serviceaccount that they have access to.\n\nIt can be optionally associated with a particular assigner, in which case it contains one valid set of trust anchors for that signer. Signers may have multiple associated ClusterTrustBundles; each is an independent set of trust anchors for that signer. Admission control is used to enforce that only users with permissions on the signer can create or modify the corresponding bundle.",
    +	"metadata": "metadata contains the object metadata.",
    +	"spec":     "spec contains the signer (if any) and trust anchors.",
    +}
    +
    +func (ClusterTrustBundle) SwaggerDoc() map[string]string {
    +	return map_ClusterTrustBundle
    +}
    +
    +var map_ClusterTrustBundleList = map[string]string{
    +	"":         "ClusterTrustBundleList is a collection of ClusterTrustBundle objects",
    +	"metadata": "metadata contains the list metadata.",
    +	"items":    "items is a collection of ClusterTrustBundle objects",
    +}
    +
    +func (ClusterTrustBundleList) SwaggerDoc() map[string]string {
    +	return map_ClusterTrustBundleList
    +}
    +
    +var map_ClusterTrustBundleSpec = map[string]string{
    +	"":            "ClusterTrustBundleSpec contains the signer and trust anchors.",
    +	"signerName":  "signerName indicates the associated signer, if any.\n\nIn order to create or update a ClusterTrustBundle that sets signerName, you must have the following cluster-scoped permission: group=certificates.k8s.io resource=signers resourceName= verb=attest.\n\nIf signerName is not empty, then the ClusterTrustBundle object must be named with the signer name as a prefix (translating slashes to colons). For example, for the signer name `example.com/foo`, valid ClusterTrustBundle object names include `example.com:foo:abc` and `example.com:foo:v1`.\n\nIf signerName is empty, then the ClusterTrustBundle object's name must not have such a prefix.\n\nList/watch requests for ClusterTrustBundles can filter on this field using a `spec.signerName=NAME` field selector.",
    +	"trustBundle": "trustBundle contains the individual X.509 trust anchors for this bundle, as PEM bundle of PEM-wrapped, DER-formatted X.509 certificates.\n\nThe data must consist only of PEM certificate blocks that parse as valid X.509 certificates.  Each certificate must include a basic constraints extension with the CA bit set.  The API server will reject objects that contain duplicate certificates, or that use PEM block headers.\n\nUsers of ClusterTrustBundles, including Kubelet, are free to reorder and deduplicate certificate blocks in this file according to their own logic, as well as to drop PEM block headers and inter-block data.",
    +}
    +
    +func (ClusterTrustBundleSpec) SwaggerDoc() map[string]string {
    +	return map_ClusterTrustBundleSpec
    +}
    +
     // AUTO-GENERATED FUNCTIONS END HERE
    diff --git a/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go
    index a315e2ac6..854e83473 100644
    --- a/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go
    +++ b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go
    @@ -188,6 +188,82 @@ func (in *CertificateSigningRequestStatus) DeepCopy() *CertificateSigningRequest
     	return out
     }
     
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *ClusterTrustBundle) DeepCopyInto(out *ClusterTrustBundle) {
    +	*out = *in
    +	out.TypeMeta = in.TypeMeta
    +	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
    +	out.Spec = in.Spec
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTrustBundle.
    +func (in *ClusterTrustBundle) DeepCopy() *ClusterTrustBundle {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(ClusterTrustBundle)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    +func (in *ClusterTrustBundle) DeepCopyObject() runtime.Object {
    +	if c := in.DeepCopy(); c != nil {
    +		return c
    +	}
    +	return nil
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *ClusterTrustBundleList) DeepCopyInto(out *ClusterTrustBundleList) {
    +	*out = *in
    +	out.TypeMeta = in.TypeMeta
    +	in.ListMeta.DeepCopyInto(&out.ListMeta)
    +	if in.Items != nil {
    +		in, out := &in.Items, &out.Items
    +		*out = make([]ClusterTrustBundle, len(*in))
    +		for i := range *in {
    +			(*in)[i].DeepCopyInto(&(*out)[i])
    +		}
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTrustBundleList.
    +func (in *ClusterTrustBundleList) DeepCopy() *ClusterTrustBundleList {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(ClusterTrustBundleList)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    +func (in *ClusterTrustBundleList) DeepCopyObject() runtime.Object {
    +	if c := in.DeepCopy(); c != nil {
    +		return c
    +	}
    +	return nil
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *ClusterTrustBundleSpec) DeepCopyInto(out *ClusterTrustBundleSpec) {
    +	*out = *in
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTrustBundleSpec.
    +func (in *ClusterTrustBundleSpec) DeepCopy() *ClusterTrustBundleSpec {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(ClusterTrustBundleSpec)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
     // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
     func (in ExtraValue) DeepCopyInto(out *ExtraValue) {
     	{
    diff --git a/vendor/k8s.io/api/certificates/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.prerelease-lifecycle.go
    index 480a32936..062b46f16 100644
    --- a/vendor/k8s.io/api/certificates/v1beta1/zz_generated.prerelease-lifecycle.go
    +++ b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.prerelease-lifecycle.go
    @@ -72,3 +72,39 @@ func (in *CertificateSigningRequestList) APILifecycleReplacement() schema.GroupV
     func (in *CertificateSigningRequestList) APILifecycleRemoved() (major, minor int) {
     	return 1, 22
     }
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *ClusterTrustBundle) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 33
    +}
    +
    +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or  "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
    +func (in *ClusterTrustBundle) APILifecycleDeprecated() (major, minor int) {
    +	return 1, 36
    +}
    +
    +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or  "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
    +func (in *ClusterTrustBundle) APILifecycleRemoved() (major, minor int) {
    +	return 1, 39
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *ClusterTrustBundleList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 33
    +}
    +
    +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or  "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
    +func (in *ClusterTrustBundleList) APILifecycleDeprecated() (major, minor int) {
    +	return 1, 36
    +}
    +
    +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or  "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
    +func (in *ClusterTrustBundleList) APILifecycleRemoved() (major, minor int) {
    +	return 1, 39
    +}
    diff --git a/vendor/k8s.io/api/coordination/v1/doc.go b/vendor/k8s.io/api/coordination/v1/doc.go
    index 9b2fbbda3..82ae6340c 100644
    --- a/vendor/k8s.io/api/coordination/v1/doc.go
    +++ b/vendor/k8s.io/api/coordination/v1/doc.go
    @@ -21,4 +21,4 @@ limitations under the License.
     
     // +groupName=coordination.k8s.io
     
    -package v1 // import "k8s.io/api/coordination/v1"
    +package v1
    diff --git a/vendor/k8s.io/api/coordination/v1alpha2/doc.go b/vendor/k8s.io/api/coordination/v1alpha2/doc.go
    index 5e6d65530..dff7df47f 100644
    --- a/vendor/k8s.io/api/coordination/v1alpha2/doc.go
    +++ b/vendor/k8s.io/api/coordination/v1alpha2/doc.go
    @@ -21,4 +21,4 @@ limitations under the License.
     
     // +groupName=coordination.k8s.io
     
    -package v1alpha2 // import "k8s.io/api/coordination/v1alpha2"
    +package v1alpha2
    diff --git a/vendor/k8s.io/api/coordination/v1alpha2/generated.proto b/vendor/k8s.io/api/coordination/v1alpha2/generated.proto
    index 7e56cd7f9..250c6113e 100644
    --- a/vendor/k8s.io/api/coordination/v1alpha2/generated.proto
    +++ b/vendor/k8s.io/api/coordination/v1alpha2/generated.proto
    @@ -92,8 +92,6 @@ message LeaseCandidateSpec {
       // If multiple candidates for the same Lease return different strategies, the strategy provided
       // by the candidate with the latest BinaryVersion will be used. If there is still conflict,
       // this is a user error and coordinated leader election will not operate the Lease until resolved.
    -  // (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled.
    -  // +featureGate=CoordinatedLeaderElection
       // +required
       optional string strategy = 6;
     }
    diff --git a/vendor/k8s.io/api/coordination/v1alpha2/types.go b/vendor/k8s.io/api/coordination/v1alpha2/types.go
    index 2f53b097a..13e1deb06 100644
    --- a/vendor/k8s.io/api/coordination/v1alpha2/types.go
    +++ b/vendor/k8s.io/api/coordination/v1alpha2/types.go
    @@ -73,8 +73,6 @@ type LeaseCandidateSpec struct {
     	// If multiple candidates for the same Lease return different strategies, the strategy provided
     	// by the candidate with the latest BinaryVersion will be used. If there is still conflict,
     	// this is a user error and coordinated leader election will not operate the Lease until resolved.
    -	// (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled.
    -	// +featureGate=CoordinatedLeaderElection
     	// +required
     	Strategy v1.CoordinatedLeaseStrategy `json:"strategy,omitempty" protobuf:"bytes,6,opt,name=strategy"`
     }
    diff --git a/vendor/k8s.io/api/coordination/v1alpha2/types_swagger_doc_generated.go b/vendor/k8s.io/api/coordination/v1alpha2/types_swagger_doc_generated.go
    index 39534e6ad..f7e29849e 100644
    --- a/vendor/k8s.io/api/coordination/v1alpha2/types_swagger_doc_generated.go
    +++ b/vendor/k8s.io/api/coordination/v1alpha2/types_swagger_doc_generated.go
    @@ -54,7 +54,7 @@ var map_LeaseCandidateSpec = map[string]string{
     	"renewTime":        "RenewTime is the time that the LeaseCandidate was last updated. Any time a Lease needs to do leader election, the PingTime field is updated to signal to the LeaseCandidate that they should update the RenewTime. Old LeaseCandidate objects are also garbage collected if it has been hours since the last renew. The PingTime field is updated regularly to prevent garbage collection for still active LeaseCandidates.",
     	"binaryVersion":    "BinaryVersion is the binary version. It must be in a semver format without leading `v`. This field is required.",
     	"emulationVersion": "EmulationVersion is the emulation version. It must be in a semver format without leading `v`. EmulationVersion must be less than or equal to BinaryVersion. This field is required when strategy is \"OldestEmulationVersion\"",
    -	"strategy":         "Strategy is the strategy that coordinated leader election will use for picking the leader. If multiple candidates for the same Lease return different strategies, the strategy provided by the candidate with the latest BinaryVersion will be used. If there is still conflict, this is a user error and coordinated leader election will not operate the Lease until resolved. (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled.",
    +	"strategy":         "Strategy is the strategy that coordinated leader election will use for picking the leader. If multiple candidates for the same Lease return different strategies, the strategy provided by the candidate with the latest BinaryVersion will be used. If there is still conflict, this is a user error and coordinated leader election will not operate the Lease until resolved.",
     }
     
     func (LeaseCandidateSpec) SwaggerDoc() map[string]string {
    diff --git a/vendor/k8s.io/api/coordination/v1beta1/doc.go b/vendor/k8s.io/api/coordination/v1beta1/doc.go
    index e733411aa..cab8becf6 100644
    --- a/vendor/k8s.io/api/coordination/v1beta1/doc.go
    +++ b/vendor/k8s.io/api/coordination/v1beta1/doc.go
    @@ -21,4 +21,4 @@ limitations under the License.
     
     // +groupName=coordination.k8s.io
     
    -package v1beta1 // import "k8s.io/api/coordination/v1beta1"
    +package v1beta1
    diff --git a/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go b/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go
    index bea9b8146..52fd4167f 100644
    --- a/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go
    +++ b/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go
    @@ -74,10 +74,94 @@ func (m *Lease) XXX_DiscardUnknown() {
     
     var xxx_messageInfo_Lease proto.InternalMessageInfo
     
    +func (m *LeaseCandidate) Reset()      { *m = LeaseCandidate{} }
    +func (*LeaseCandidate) ProtoMessage() {}
    +func (*LeaseCandidate) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_8d4e223b8bb23da3, []int{1}
    +}
    +func (m *LeaseCandidate) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *LeaseCandidate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *LeaseCandidate) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_LeaseCandidate.Merge(m, src)
    +}
    +func (m *LeaseCandidate) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *LeaseCandidate) XXX_DiscardUnknown() {
    +	xxx_messageInfo_LeaseCandidate.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_LeaseCandidate proto.InternalMessageInfo
    +
    +func (m *LeaseCandidateList) Reset()      { *m = LeaseCandidateList{} }
    +func (*LeaseCandidateList) ProtoMessage() {}
    +func (*LeaseCandidateList) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_8d4e223b8bb23da3, []int{2}
    +}
    +func (m *LeaseCandidateList) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *LeaseCandidateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *LeaseCandidateList) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_LeaseCandidateList.Merge(m, src)
    +}
    +func (m *LeaseCandidateList) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *LeaseCandidateList) XXX_DiscardUnknown() {
    +	xxx_messageInfo_LeaseCandidateList.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_LeaseCandidateList proto.InternalMessageInfo
    +
    +func (m *LeaseCandidateSpec) Reset()      { *m = LeaseCandidateSpec{} }
    +func (*LeaseCandidateSpec) ProtoMessage() {}
    +func (*LeaseCandidateSpec) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_8d4e223b8bb23da3, []int{3}
    +}
    +func (m *LeaseCandidateSpec) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *LeaseCandidateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *LeaseCandidateSpec) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_LeaseCandidateSpec.Merge(m, src)
    +}
    +func (m *LeaseCandidateSpec) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *LeaseCandidateSpec) XXX_DiscardUnknown() {
    +	xxx_messageInfo_LeaseCandidateSpec.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_LeaseCandidateSpec proto.InternalMessageInfo
    +
     func (m *LeaseList) Reset()      { *m = LeaseList{} }
     func (*LeaseList) ProtoMessage() {}
     func (*LeaseList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_8d4e223b8bb23da3, []int{1}
    +	return fileDescriptor_8d4e223b8bb23da3, []int{4}
     }
     func (m *LeaseList) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -105,7 +189,7 @@ var xxx_messageInfo_LeaseList proto.InternalMessageInfo
     func (m *LeaseSpec) Reset()      { *m = LeaseSpec{} }
     func (*LeaseSpec) ProtoMessage() {}
     func (*LeaseSpec) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_8d4e223b8bb23da3, []int{2}
    +	return fileDescriptor_8d4e223b8bb23da3, []int{5}
     }
     func (m *LeaseSpec) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -132,6 +216,9 @@ var xxx_messageInfo_LeaseSpec proto.InternalMessageInfo
     
     func init() {
     	proto.RegisterType((*Lease)(nil), "k8s.io.api.coordination.v1beta1.Lease")
    +	proto.RegisterType((*LeaseCandidate)(nil), "k8s.io.api.coordination.v1beta1.LeaseCandidate")
    +	proto.RegisterType((*LeaseCandidateList)(nil), "k8s.io.api.coordination.v1beta1.LeaseCandidateList")
    +	proto.RegisterType((*LeaseCandidateSpec)(nil), "k8s.io.api.coordination.v1beta1.LeaseCandidateSpec")
     	proto.RegisterType((*LeaseList)(nil), "k8s.io.api.coordination.v1beta1.LeaseList")
     	proto.RegisterType((*LeaseSpec)(nil), "k8s.io.api.coordination.v1beta1.LeaseSpec")
     }
    @@ -141,45 +228,54 @@ func init() {
     }
     
     var fileDescriptor_8d4e223b8bb23da3 = []byte{
    -	// 600 bytes of a gzipped FileDescriptorProto
    -	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0xdf, 0x4e, 0xd4, 0x4e,
    -	0x14, 0xc7, 0xb7, 0xb0, 0xfb, 0xfb, 0xb1, 0xb3, 0xf2, 0x27, 0x23, 0x17, 0x0d, 0x17, 0x2d, 0xe1,
    -	0xc2, 0x10, 0x12, 0xa7, 0x82, 0xc6, 0x18, 0x13, 0x13, 0x2d, 0x9a, 0x48, 0x2c, 0xd1, 0x14, 0xae,
    -	0x0c, 0x89, 0xce, 0xb6, 0x87, 0xee, 0x08, 0xed, 0xd4, 0x99, 0x59, 0x0c, 0x77, 0x3e, 0x82, 0x4f,
    -	0xa3, 0xf1, 0x0d, 0xb8, 0xe4, 0x92, 0xab, 0x46, 0xc6, 0xb7, 0xf0, 0xca, 0xcc, 0x6c, 0x61, 0x61,
    -	0x81, 0xb0, 0xf1, 0x6e, 0xe7, 0x9c, 0xf3, 0xfd, 0x9c, 0xef, 0x9c, 0xb3, 0x53, 0x14, 0xec, 0x3d,
    -	0x91, 0x84, 0xf1, 0x80, 0x96, 0x2c, 0x48, 0x38, 0x17, 0x29, 0x2b, 0xa8, 0x62, 0xbc, 0x08, 0x0e,
    -	0x56, 0xbb, 0xa0, 0xe8, 0x6a, 0x90, 0x41, 0x01, 0x82, 0x2a, 0x48, 0x49, 0x29, 0xb8, 0xe2, 0xd8,
    -	0x1f, 0x08, 0x08, 0x2d, 0x19, 0xb9, 0x28, 0x20, 0xb5, 0x60, 0xe1, 0x7e, 0xc6, 0x54, 0xaf, 0xdf,
    -	0x25, 0x09, 0xcf, 0x83, 0x8c, 0x67, 0x3c, 0xb0, 0xba, 0x6e, 0x7f, 0xd7, 0x9e, 0xec, 0xc1, 0xfe,
    -	0x1a, 0xf0, 0x16, 0x56, 0x6e, 0x36, 0x30, 0xda, 0x7b, 0xe1, 0xd1, 0xb0, 0x36, 0xa7, 0x49, 0x8f,
    -	0x15, 0x20, 0x0e, 0x83, 0x72, 0x2f, 0x33, 0x01, 0x19, 0xe4, 0xa0, 0xe8, 0x75, 0xaa, 0xe0, 0x26,
    -	0x95, 0xe8, 0x17, 0x8a, 0xe5, 0x70, 0x45, 0xf0, 0xf8, 0x36, 0x81, 0x4c, 0x7a, 0x90, 0xd3, 0x51,
    -	0xdd, 0xd2, 0x0f, 0x07, 0xb5, 0x22, 0xa0, 0x12, 0xf0, 0x47, 0x34, 0x65, 0xdc, 0xa4, 0x54, 0x51,
    -	0xd7, 0x59, 0x74, 0x96, 0x3b, 0x6b, 0x0f, 0xc8, 0x70, 0x6e, 0xe7, 0x50, 0x52, 0xee, 0x65, 0x26,
    -	0x20, 0x89, 0xa9, 0x26, 0x07, 0xab, 0xe4, 0x6d, 0xf7, 0x13, 0x24, 0x6a, 0x13, 0x14, 0x0d, 0xf1,
    -	0x51, 0xe5, 0x37, 0x74, 0xe5, 0xa3, 0x61, 0x2c, 0x3e, 0xa7, 0xe2, 0x08, 0x35, 0x65, 0x09, 0x89,
    -	0x3b, 0x61, 0xe9, 0x2b, 0xe4, 0x96, 0xad, 0x10, 0xeb, 0x6b, 0xab, 0x84, 0x24, 0xbc, 0x53, 0x73,
    -	0x9b, 0xe6, 0x14, 0x5b, 0xca, 0xd2, 0x77, 0x07, 0xb5, 0x6d, 0x45, 0xc4, 0xa4, 0xc2, 0x3b, 0x57,
    -	0xdc, 0x93, 0xf1, 0xdc, 0x1b, 0xb5, 0xf5, 0x3e, 0x57, 0xf7, 0x98, 0x3a, 0x8b, 0x5c, 0x70, 0xfe,
    -	0x06, 0xb5, 0x98, 0x82, 0x5c, 0xba, 0x13, 0x8b, 0x93, 0xcb, 0x9d, 0xb5, 0x7b, 0xe3, 0x59, 0x0f,
    -	0xa7, 0x6b, 0x64, 0x6b, 0xc3, 0x88, 0xe3, 0x01, 0x63, 0xe9, 0x67, 0xb3, 0x36, 0x6e, 0x2e, 0x83,
    -	0x9f, 0xa2, 0x99, 0x1e, 0xdf, 0x4f, 0x41, 0x6c, 0xa4, 0x50, 0x28, 0xa6, 0x0e, 0xad, 0xfd, 0x76,
    -	0x88, 0x75, 0xe5, 0xcf, 0xbc, 0xbe, 0x94, 0x89, 0x47, 0x2a, 0x71, 0x84, 0xe6, 0xf7, 0x0d, 0xe8,
    -	0x65, 0x5f, 0xd8, 0xf6, 0x5b, 0x90, 0xf0, 0x22, 0x95, 0x76, 0xc0, 0xad, 0xd0, 0xd5, 0x95, 0x3f,
    -	0x1f, 0x5d, 0x93, 0x8f, 0xaf, 0x55, 0xe1, 0x2e, 0xea, 0xd0, 0xe4, 0x73, 0x9f, 0x09, 0xd8, 0x66,
    -	0x39, 0xb8, 0x93, 0x76, 0x8a, 0xc1, 0x78, 0x53, 0xdc, 0x64, 0x89, 0xe0, 0x46, 0x16, 0xce, 0xea,
    -	0xca, 0xef, 0xbc, 0x18, 0x72, 0xe2, 0x8b, 0x50, 0xbc, 0x83, 0xda, 0x02, 0x0a, 0xf8, 0x62, 0x3b,
    -	0x34, 0xff, 0xad, 0xc3, 0xb4, 0xae, 0xfc, 0x76, 0x7c, 0x46, 0x89, 0x87, 0x40, 0xfc, 0x1c, 0xcd,
    -	0xd9, 0x9b, 0x6d, 0x0b, 0x5a, 0x48, 0x66, 0xee, 0x26, 0xdd, 0x96, 0x9d, 0xc5, 0xbc, 0xae, 0xfc,
    -	0xb9, 0x68, 0x24, 0x17, 0x5f, 0xa9, 0xc6, 0x1f, 0xd0, 0x94, 0x54, 0xe6, 0x7d, 0x64, 0x87, 0xee,
    -	0x7f, 0x76, 0x0f, 0xeb, 0xe6, 0x2f, 0xb1, 0x55, 0xc7, 0xfe, 0x54, 0xfe, 0xc3, 0x9b, 0xdf, 0x3e,
    -	0x59, 0x3f, 0x3b, 0x43, 0x3a, 0x58, 0x70, 0x2d, 0x8b, 0xcf, 0xa1, 0xf8, 0x19, 0x9a, 0x2d, 0x05,
    -	0xec, 0x82, 0x10, 0x90, 0x0e, 0xb6, 0xeb, 0xfe, 0x6f, 0xfb, 0xdc, 0xd5, 0x95, 0x3f, 0xfb, 0xee,
    -	0x72, 0x2a, 0x1e, 0xad, 0x0d, 0x5f, 0x1d, 0x9d, 0x7a, 0x8d, 0xe3, 0x53, 0xaf, 0x71, 0x72, 0xea,
    -	0x35, 0xbe, 0x6a, 0xcf, 0x39, 0xd2, 0x9e, 0x73, 0xac, 0x3d, 0xe7, 0x44, 0x7b, 0xce, 0x2f, 0xed,
    -	0x39, 0xdf, 0x7e, 0x7b, 0x8d, 0xf7, 0xfe, 0x2d, 0x1f, 0xc8, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff,
    -	0x57, 0x93, 0xf3, 0xef, 0x42, 0x05, 0x00, 0x00,
    +	// 750 bytes of a gzipped FileDescriptorProto
    +	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0xdd, 0x4e, 0x1b, 0x39,
    +	0x18, 0xcd, 0x40, 0xb2, 0x9b, 0x38, 0x04, 0xb2, 0x5e, 0x56, 0x1a, 0x71, 0x31, 0x83, 0x72, 0xb1,
    +	0x42, 0x48, 0xeb, 0x59, 0x60, 0xb5, 0x5a, 0x6d, 0x55, 0xa9, 0x1d, 0x40, 0x2d, 0x6a, 0x68, 0x91,
    +	0xa1, 0x95, 0x5a, 0x21, 0xb5, 0xce, 0x8c, 0x99, 0xb8, 0x30, 0x3f, 0xf5, 0x38, 0x54, 0xb9, 0xeb,
    +	0x23, 0xf4, 0x69, 0x5a, 0xf5, 0x0d, 0xd2, 0x3b, 0x2e, 0xb9, 0x8a, 0xca, 0x54, 0xea, 0x43, 0xf4,
    +	0xaa, 0xb2, 0x33, 0xf9, 0x27, 0x22, 0x6d, 0x11, 0x77, 0xf1, 0xf7, 0x9d, 0x73, 0xfc, 0x1d, 0xfb,
    +	0x38, 0x1a, 0x60, 0x1d, 0xff, 0x17, 0x23, 0x16, 0x5a, 0x24, 0x62, 0x96, 0x13, 0x86, 0xdc, 0x65,
    +	0x01, 0x11, 0x2c, 0x0c, 0xac, 0xd3, 0xb5, 0x1a, 0x15, 0x64, 0xcd, 0xf2, 0x68, 0x40, 0x39, 0x11,
    +	0xd4, 0x45, 0x11, 0x0f, 0x45, 0x08, 0xcd, 0x0e, 0x01, 0x91, 0x88, 0xa1, 0x41, 0x02, 0x4a, 0x09,
    +	0x4b, 0x7f, 0x79, 0x4c, 0xd4, 0x1b, 0x35, 0xe4, 0x84, 0xbe, 0xe5, 0x85, 0x5e, 0x68, 0x29, 0x5e,
    +	0xad, 0x71, 0xa4, 0x56, 0x6a, 0xa1, 0x7e, 0x75, 0xf4, 0x96, 0x56, 0x27, 0x0f, 0x30, 0xba, 0xf7,
    +	0xd2, 0x3f, 0x7d, 0xac, 0x4f, 0x9c, 0x3a, 0x0b, 0x28, 0x6f, 0x5a, 0xd1, 0xb1, 0x27, 0x0b, 0xb1,
    +	0xe5, 0x53, 0x41, 0x2e, 0x63, 0x59, 0x93, 0x58, 0xbc, 0x11, 0x08, 0xe6, 0xd3, 0x31, 0xc2, 0xbf,
    +	0x57, 0x11, 0x62, 0xa7, 0x4e, 0x7d, 0x32, 0xca, 0xab, 0xbc, 0xd7, 0x40, 0xae, 0x4a, 0x49, 0x4c,
    +	0xe1, 0x0b, 0x90, 0x97, 0xd3, 0xb8, 0x44, 0x10, 0x5d, 0x5b, 0xd6, 0x56, 0x8a, 0xeb, 0x7f, 0xa3,
    +	0xfe, 0xb9, 0xf5, 0x44, 0x51, 0x74, 0xec, 0xc9, 0x42, 0x8c, 0x24, 0x1a, 0x9d, 0xae, 0xa1, 0x47,
    +	0xb5, 0x97, 0xd4, 0x11, 0xbb, 0x54, 0x10, 0x1b, 0xb6, 0xda, 0x66, 0x26, 0x69, 0x9b, 0xa0, 0x5f,
    +	0xc3, 0x3d, 0x55, 0x58, 0x05, 0xd9, 0x38, 0xa2, 0x8e, 0x3e, 0xa3, 0xd4, 0x57, 0xd1, 0x15, 0xb7,
    +	0x82, 0xd4, 0x5c, 0xfb, 0x11, 0x75, 0xec, 0xb9, 0x54, 0x37, 0x2b, 0x57, 0x58, 0xa9, 0x54, 0x3e,
    +	0x6a, 0x60, 0x5e, 0x21, 0x36, 0x49, 0xe0, 0x32, 0x97, 0x88, 0x9b, 0xb0, 0xf0, 0x78, 0xc8, 0xc2,
    +	0xc6, 0x74, 0x16, 0x7a, 0x03, 0x4e, 0xf4, 0xd2, 0xd2, 0x00, 0x1c, 0x86, 0x56, 0x59, 0x2c, 0xe0,
    +	0xe1, 0x98, 0x1f, 0x34, 0x9d, 0x1f, 0xc9, 0x56, 0x6e, 0xca, 0xe9, 0x66, 0xf9, 0x6e, 0x65, 0xc0,
    +	0xcb, 0x01, 0xc8, 0x31, 0x41, 0xfd, 0x58, 0x9f, 0x59, 0x9e, 0x5d, 0x29, 0xae, 0x5b, 0xdf, 0x69,
    +	0xc6, 0x2e, 0xa5, 0xda, 0xb9, 0x1d, 0xa9, 0x82, 0x3b, 0x62, 0x95, 0x2f, 0xb3, 0xa3, 0x56, 0xa4,
    +	0x4f, 0x68, 0x81, 0xc2, 0x89, 0xac, 0x3e, 0x24, 0x3e, 0x55, 0x5e, 0x0a, 0xf6, 0x6f, 0x29, 0xbf,
    +	0x50, 0xed, 0x36, 0x70, 0x1f, 0x03, 0x9f, 0x82, 0x7c, 0xc4, 0x02, 0xef, 0x80, 0xf9, 0x34, 0x3d,
    +	0x6d, 0x6b, 0x3a, 0xef, 0xbb, 0xcc, 0xe1, 0xa1, 0xa4, 0xd9, 0x73, 0xd2, 0xf8, 0x5e, 0x2a, 0x82,
    +	0x7b, 0x72, 0xf0, 0x10, 0x14, 0x38, 0x0d, 0xe8, 0x6b, 0xa5, 0x3d, 0xfb, 0x63, 0xda, 0x25, 0x39,
    +	0x38, 0xee, 0xaa, 0xe0, 0xbe, 0x20, 0xbc, 0x05, 0x4a, 0x35, 0x16, 0x10, 0xde, 0x7c, 0x42, 0x79,
    +	0xcc, 0xc2, 0x40, 0xcf, 0x2a, 0xb7, 0x7f, 0xa4, 0x6e, 0x4b, 0xf6, 0x60, 0x13, 0x0f, 0x63, 0xe1,
    +	0x16, 0x28, 0x53, 0xbf, 0x71, 0xa2, 0xce, 0xbd, 0xcb, 0xcf, 0x29, 0xbe, 0x9e, 0xf2, 0xcb, 0xdb,
    +	0x23, 0x7d, 0x3c, 0xc6, 0x80, 0x0e, 0xc8, 0xc7, 0x42, 0xbe, 0x72, 0xaf, 0xa9, 0xff, 0xa2, 0xd8,
    +	0xf7, 0xba, 0x39, 0xd8, 0x4f, 0xeb, 0x5f, 0xdb, 0xe6, 0xc6, 0xe4, 0x7f, 0x31, 0xb4, 0xd9, 0x5d,
    +	0x53, 0xb7, 0xf3, 0x0a, 0x53, 0x1a, 0xee, 0x09, 0x57, 0xde, 0x69, 0xa0, 0x73, 0x73, 0x37, 0x10,
    +	0xd5, 0x07, 0xc3, 0x51, 0xfd, 0x73, 0xba, 0xa8, 0x4e, 0x48, 0xe8, 0x87, 0x6c, 0x3a, 0xb8, 0x0a,
    +	0xe6, 0xff, 0x60, 0xbe, 0x1e, 0x9e, 0xb8, 0x94, 0xef, 0xb8, 0x34, 0x10, 0x4c, 0x34, 0xd3, 0x74,
    +	0xc2, 0xa4, 0x6d, 0xce, 0xdf, 0x1f, 0xea, 0xe0, 0x11, 0x24, 0xac, 0x82, 0x45, 0x15, 0xd8, 0xad,
    +	0x06, 0x57, 0xdb, 0xef, 0x53, 0x27, 0x0c, 0xdc, 0x58, 0xe5, 0x35, 0x67, 0xeb, 0x49, 0xdb, 0x5c,
    +	0xac, 0x5e, 0xd2, 0xc7, 0x97, 0xb2, 0x60, 0x0d, 0x14, 0x89, 0xf3, 0xaa, 0xc1, 0x38, 0xfd, 0x99,
    +	0x60, 0x2e, 0x24, 0x6d, 0xb3, 0x78, 0xb7, 0xaf, 0x83, 0x07, 0x45, 0x87, 0xa3, 0x9f, 0xbd, 0xee,
    +	0xe8, 0xdf, 0x01, 0x65, 0xe5, 0xec, 0x80, 0x93, 0x20, 0x66, 0xd2, 0x5b, 0xac, 0xd2, 0x9b, 0xb3,
    +	0x17, 0x65, 0x72, 0xab, 0x23, 0x3d, 0x3c, 0x86, 0x86, 0xcf, 0xc7, 0x92, 0xbb, 0x79, 0xad, 0xa9,
    +	0x85, 0xb7, 0xc1, 0x42, 0xc4, 0xe9, 0x11, 0xe5, 0x9c, 0xba, 0x9d, 0xdb, 0xd5, 0x7f, 0x55, 0xfb,
    +	0xfc, 0x9e, 0xb4, 0xcd, 0x85, 0xbd, 0xe1, 0x16, 0x1e, 0xc5, 0xda, 0xdb, 0xad, 0x0b, 0x23, 0x73,
    +	0x76, 0x61, 0x64, 0xce, 0x2f, 0x8c, 0xcc, 0x9b, 0xc4, 0xd0, 0x5a, 0x89, 0xa1, 0x9d, 0x25, 0x86,
    +	0x76, 0x9e, 0x18, 0xda, 0xa7, 0xc4, 0xd0, 0xde, 0x7e, 0x36, 0x32, 0xcf, 0xcc, 0x2b, 0x3e, 0x50,
    +	0xbe, 0x05, 0x00, 0x00, 0xff, 0xff, 0xff, 0x56, 0x51, 0x57, 0xc2, 0x08, 0x00, 0x00,
     }
     
     func (m *Lease) Marshal() (dAtA []byte, err error) {
    @@ -225,6 +321,163 @@ func (m *Lease) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	return len(dAtA) - i, nil
     }
     
    +func (m *LeaseCandidate) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *LeaseCandidate) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *LeaseCandidate) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	{
    +		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0x12
    +	{
    +		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *LeaseCandidateList) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *LeaseCandidateList) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *LeaseCandidateList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.Items) > 0 {
    +		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
    +		}
    +	}
    +	{
    +		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *LeaseCandidateSpec) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *LeaseCandidateSpec) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *LeaseCandidateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	i -= len(m.Strategy)
    +	copy(dAtA[i:], m.Strategy)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Strategy)))
    +	i--
    +	dAtA[i] = 0x32
    +	i -= len(m.EmulationVersion)
    +	copy(dAtA[i:], m.EmulationVersion)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.EmulationVersion)))
    +	i--
    +	dAtA[i] = 0x2a
    +	i -= len(m.BinaryVersion)
    +	copy(dAtA[i:], m.BinaryVersion)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.BinaryVersion)))
    +	i--
    +	dAtA[i] = 0x22
    +	if m.RenewTime != nil {
    +		{
    +			size, err := m.RenewTime.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x1a
    +	}
    +	if m.PingTime != nil {
    +		{
    +			size, err := m.PingTime.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x12
    +	}
    +	i -= len(m.LeaseName)
    +	copy(dAtA[i:], m.LeaseName)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.LeaseName)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
     func (m *LeaseList) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
    @@ -374,6 +627,61 @@ func (m *Lease) Size() (n int) {
     	return n
     }
     
    +func (m *LeaseCandidate) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.ObjectMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = m.Spec.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *LeaseCandidateList) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.ListMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.Items) > 0 {
    +		for _, e := range m.Items {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
    +}
    +
    +func (m *LeaseCandidateSpec) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.LeaseName)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if m.PingTime != nil {
    +		l = m.PingTime.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	if m.RenewTime != nil {
    +		l = m.RenewTime.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	l = len(m.BinaryVersion)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.EmulationVersion)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.Strategy)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
     func (m *LeaseList) Size() (n int) {
     	if m == nil {
     		return 0
    @@ -443,6 +751,48 @@ func (this *Lease) String() string {
     	}, "")
     	return s
     }
    +func (this *LeaseCandidate) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&LeaseCandidate{`,
    +		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
    +		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "LeaseCandidateSpec", "LeaseCandidateSpec", 1), `&`, ``, 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *LeaseCandidateList) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForItems := "[]LeaseCandidate{"
    +	for _, f := range this.Items {
    +		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "LeaseCandidate", "LeaseCandidate", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForItems += "}"
    +	s := strings.Join([]string{`&LeaseCandidateList{`,
    +		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
    +		`Items:` + repeatedStringForItems + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *LeaseCandidateSpec) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&LeaseCandidateSpec{`,
    +		`LeaseName:` + fmt.Sprintf("%v", this.LeaseName) + `,`,
    +		`PingTime:` + strings.Replace(fmt.Sprintf("%v", this.PingTime), "MicroTime", "v1.MicroTime", 1) + `,`,
    +		`RenewTime:` + strings.Replace(fmt.Sprintf("%v", this.RenewTime), "MicroTime", "v1.MicroTime", 1) + `,`,
    +		`BinaryVersion:` + fmt.Sprintf("%v", this.BinaryVersion) + `,`,
    +		`EmulationVersion:` + fmt.Sprintf("%v", this.EmulationVersion) + `,`,
    +		`Strategy:` + fmt.Sprintf("%v", this.Strategy) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
     func (this *LeaseList) String() string {
     	if this == nil {
     		return "nil"
    @@ -599,6 +949,489 @@ func (m *Lease) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    +func (m *LeaseCandidate) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: LeaseCandidate: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: LeaseCandidate: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *LeaseCandidateList) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: LeaseCandidateList: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: LeaseCandidateList: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Items = append(m.Items, LeaseCandidate{})
    +			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *LeaseCandidateSpec) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: LeaseCandidateSpec: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: LeaseCandidateSpec: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field LeaseName", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.LeaseName = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field PingTime", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.PingTime == nil {
    +				m.PingTime = &v1.MicroTime{}
    +			}
    +			if err := m.PingTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field RenewTime", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.RenewTime == nil {
    +				m.RenewTime = &v1.MicroTime{}
    +			}
    +			if err := m.RenewTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 4:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field BinaryVersion", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.BinaryVersion = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 5:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field EmulationVersion", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.EmulationVersion = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 6:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Strategy = k8s_io_api_coordination_v1.CoordinatedLeaseStrategy(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
     func (m *LeaseList) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
    diff --git a/vendor/k8s.io/api/coordination/v1beta1/generated.proto b/vendor/k8s.io/api/coordination/v1beta1/generated.proto
    index 088811a74..7ca043f52 100644
    --- a/vendor/k8s.io/api/coordination/v1beta1/generated.proto
    +++ b/vendor/k8s.io/api/coordination/v1beta1/generated.proto
    @@ -41,6 +41,75 @@ message Lease {
       optional LeaseSpec spec = 2;
     }
     
    +// LeaseCandidate defines a candidate for a Lease object.
    +// Candidates are created such that coordinated leader election will pick the best leader from the list of candidates.
    +message LeaseCandidate {
    +  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +
    +  // spec contains the specification of the Lease.
    +  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    +  // +optional
    +  optional LeaseCandidateSpec spec = 2;
    +}
    +
    +// LeaseCandidateList is a list of Lease objects.
    +message LeaseCandidateList {
    +  // Standard list metadata.
    +  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +
    +  // items is a list of schema objects.
    +  repeated LeaseCandidate items = 2;
    +}
    +
    +// LeaseCandidateSpec is a specification of a Lease.
    +message LeaseCandidateSpec {
    +  // LeaseName is the name of the lease for which this candidate is contending.
    +  // The limits on this field are the same as on Lease.name. Multiple lease candidates
    +  // may reference the same Lease.name.
    +  // This field is immutable.
    +  // +required
    +  optional string leaseName = 1;
    +
    +  // PingTime is the last time that the server has requested the LeaseCandidate
    +  // to renew. It is only done during leader election to check if any
    +  // LeaseCandidates have become ineligible. When PingTime is updated, the
    +  // LeaseCandidate will respond by updating RenewTime.
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime pingTime = 2;
    +
    +  // RenewTime is the time that the LeaseCandidate was last updated.
    +  // Any time a Lease needs to do leader election, the PingTime field
    +  // is updated to signal to the LeaseCandidate that they should update
    +  // the RenewTime.
    +  // Old LeaseCandidate objects are also garbage collected if it has been hours
    +  // since the last renew. The PingTime field is updated regularly to prevent
    +  // garbage collection for still active LeaseCandidates.
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime renewTime = 3;
    +
    +  // BinaryVersion is the binary version. It must be in a semver format without leading `v`.
    +  // This field is required.
    +  // +required
    +  optional string binaryVersion = 4;
    +
    +  // EmulationVersion is the emulation version. It must be in a semver format without leading `v`.
    +  // EmulationVersion must be less than or equal to BinaryVersion.
    +  // This field is required when strategy is "OldestEmulationVersion"
    +  // +optional
    +  optional string emulationVersion = 5;
    +
    +  // Strategy is the strategy that coordinated leader election will use for picking the leader.
    +  // If multiple candidates for the same Lease return different strategies, the strategy provided
    +  // by the candidate with the latest BinaryVersion will be used. If there is still conflict,
    +  // this is a user error and coordinated leader election will not operate the Lease until resolved.
    +  // +required
    +  optional string strategy = 6;
    +}
    +
     // LeaseList is a list of Lease objects.
     message LeaseList {
       // Standard list metadata.
    diff --git a/vendor/k8s.io/api/coordination/v1beta1/register.go b/vendor/k8s.io/api/coordination/v1beta1/register.go
    index 85efaa64e..bd0016423 100644
    --- a/vendor/k8s.io/api/coordination/v1beta1/register.go
    +++ b/vendor/k8s.io/api/coordination/v1beta1/register.go
    @@ -46,6 +46,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
     	scheme.AddKnownTypes(SchemeGroupVersion,
     		&Lease{},
     		&LeaseList{},
    +		&LeaseCandidate{},
    +		&LeaseCandidateList{},
     	)
     
     	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
    diff --git a/vendor/k8s.io/api/coordination/v1beta1/types.go b/vendor/k8s.io/api/coordination/v1beta1/types.go
    index d63fc30a9..781d29efc 100644
    --- a/vendor/k8s.io/api/coordination/v1beta1/types.go
    +++ b/vendor/k8s.io/api/coordination/v1beta1/types.go
    @@ -91,3 +91,76 @@ type LeaseList struct {
     	// items is a list of schema objects.
     	Items []Lease `json:"items" protobuf:"bytes,2,rep,name=items"`
     }
    +
    +// +genclient
    +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.33
    +
    +// LeaseCandidate defines a candidate for a Lease object.
    +// Candidates are created such that coordinated leader election will pick the best leader from the list of candidates.
    +type LeaseCandidate struct {
    +	metav1.TypeMeta `json:",inline"`
    +	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    +	// +optional
    +	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    +
    +	// spec contains the specification of the Lease.
    +	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    +	// +optional
    +	Spec LeaseCandidateSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
    +}
    +
    +// LeaseCandidateSpec is a specification of a Lease.
    +type LeaseCandidateSpec struct {
    +	// LeaseName is the name of the lease for which this candidate is contending.
    +	// The limits on this field are the same as on Lease.name. Multiple lease candidates
    +	// may reference the same Lease.name.
    +	// This field is immutable.
    +	// +required
    +	LeaseName string `json:"leaseName" protobuf:"bytes,1,name=leaseName"`
    +	// PingTime is the last time that the server has requested the LeaseCandidate
    +	// to renew. It is only done during leader election to check if any
    +	// LeaseCandidates have become ineligible. When PingTime is updated, the
    +	// LeaseCandidate will respond by updating RenewTime.
    +	// +optional
    +	PingTime *metav1.MicroTime `json:"pingTime,omitempty" protobuf:"bytes,2,opt,name=pingTime"`
    +	// RenewTime is the time that the LeaseCandidate was last updated.
    +	// Any time a Lease needs to do leader election, the PingTime field
    +	// is updated to signal to the LeaseCandidate that they should update
    +	// the RenewTime.
    +	// Old LeaseCandidate objects are also garbage collected if it has been hours
    +	// since the last renew. The PingTime field is updated regularly to prevent
    +	// garbage collection for still active LeaseCandidates.
    +	// +optional
    +	RenewTime *metav1.MicroTime `json:"renewTime,omitempty" protobuf:"bytes,3,opt,name=renewTime"`
    +	// BinaryVersion is the binary version. It must be in a semver format without leading `v`.
    +	// This field is required.
    +	// +required
    +	BinaryVersion string `json:"binaryVersion" protobuf:"bytes,4,name=binaryVersion"`
    +	// EmulationVersion is the emulation version. It must be in a semver format without leading `v`.
    +	// EmulationVersion must be less than or equal to BinaryVersion.
    +	// This field is required when strategy is "OldestEmulationVersion"
    +	// +optional
    +	EmulationVersion string `json:"emulationVersion,omitempty" protobuf:"bytes,5,opt,name=emulationVersion"`
    +	// Strategy is the strategy that coordinated leader election will use for picking the leader.
    +	// If multiple candidates for the same Lease return different strategies, the strategy provided
    +	// by the candidate with the latest BinaryVersion will be used. If there is still conflict,
    +	// this is a user error and coordinated leader election will not operate the Lease until resolved.
    +	// +required
    +	Strategy v1.CoordinatedLeaseStrategy `json:"strategy,omitempty" protobuf:"bytes,6,opt,name=strategy"`
    +}
    +
    +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.33
    +
    +// LeaseCandidateList is a list of Lease objects.
    +type LeaseCandidateList struct {
    +	metav1.TypeMeta `json:",inline"`
    +	// Standard list metadata.
    +	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    +	// +optional
    +	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    +
    +	// items is a list of schema objects.
    +	Items []LeaseCandidate `json:"items" protobuf:"bytes,2,rep,name=items"`
    +}
    diff --git a/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go
    index 50fe8ea18..35812b77f 100644
    --- a/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go
    +++ b/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go
    @@ -37,6 +37,40 @@ func (Lease) SwaggerDoc() map[string]string {
     	return map_Lease
     }
     
    +var map_LeaseCandidate = map[string]string{
    +	"":         "LeaseCandidate defines a candidate for a Lease object. Candidates are created such that coordinated leader election will pick the best leader from the list of candidates.",
    +	"metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
    +	"spec":     "spec contains the specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
    +}
    +
    +func (LeaseCandidate) SwaggerDoc() map[string]string {
    +	return map_LeaseCandidate
    +}
    +
    +var map_LeaseCandidateList = map[string]string{
    +	"":         "LeaseCandidateList is a list of Lease objects.",
    +	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
    +	"items":    "items is a list of schema objects.",
    +}
    +
    +func (LeaseCandidateList) SwaggerDoc() map[string]string {
    +	return map_LeaseCandidateList
    +}
    +
    +var map_LeaseCandidateSpec = map[string]string{
    +	"":                 "LeaseCandidateSpec is a specification of a Lease.",
    +	"leaseName":        "LeaseName is the name of the lease for which this candidate is contending. The limits on this field are the same as on Lease.name. Multiple lease candidates may reference the same Lease.name. This field is immutable.",
    +	"pingTime":         "PingTime is the last time that the server has requested the LeaseCandidate to renew. It is only done during leader election to check if any LeaseCandidates have become ineligible. When PingTime is updated, the LeaseCandidate will respond by updating RenewTime.",
    +	"renewTime":        "RenewTime is the time that the LeaseCandidate was last updated. Any time a Lease needs to do leader election, the PingTime field is updated to signal to the LeaseCandidate that they should update the RenewTime. Old LeaseCandidate objects are also garbage collected if it has been hours since the last renew. The PingTime field is updated regularly to prevent garbage collection for still active LeaseCandidates.",
    +	"binaryVersion":    "BinaryVersion is the binary version. It must be in a semver format without leading `v`. This field is required.",
    +	"emulationVersion": "EmulationVersion is the emulation version. It must be in a semver format without leading `v`. EmulationVersion must be less than or equal to BinaryVersion. This field is required when strategy is \"OldestEmulationVersion\"",
    +	"strategy":         "Strategy is the strategy that coordinated leader election will use for picking the leader. If multiple candidates for the same Lease return different strategies, the strategy provided by the candidate with the latest BinaryVersion will be used. If there is still conflict, this is a user error and coordinated leader election will not operate the Lease until resolved.",
    +}
    +
    +func (LeaseCandidateSpec) SwaggerDoc() map[string]string {
    +	return map_LeaseCandidateSpec
    +}
    +
     var map_LeaseList = map[string]string{
     	"":         "LeaseList is a list of Lease objects.",
     	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
    diff --git a/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go
    index dcef1e346..b990ee247 100644
    --- a/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go
    +++ b/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go
    @@ -53,6 +53,90 @@ func (in *Lease) DeepCopyObject() runtime.Object {
     	return nil
     }
     
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *LeaseCandidate) DeepCopyInto(out *LeaseCandidate) {
    +	*out = *in
    +	out.TypeMeta = in.TypeMeta
    +	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
    +	in.Spec.DeepCopyInto(&out.Spec)
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseCandidate.
    +func (in *LeaseCandidate) DeepCopy() *LeaseCandidate {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(LeaseCandidate)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    +func (in *LeaseCandidate) DeepCopyObject() runtime.Object {
    +	if c := in.DeepCopy(); c != nil {
    +		return c
    +	}
    +	return nil
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *LeaseCandidateList) DeepCopyInto(out *LeaseCandidateList) {
    +	*out = *in
    +	out.TypeMeta = in.TypeMeta
    +	in.ListMeta.DeepCopyInto(&out.ListMeta)
    +	if in.Items != nil {
    +		in, out := &in.Items, &out.Items
    +		*out = make([]LeaseCandidate, len(*in))
    +		for i := range *in {
    +			(*in)[i].DeepCopyInto(&(*out)[i])
    +		}
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseCandidateList.
    +func (in *LeaseCandidateList) DeepCopy() *LeaseCandidateList {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(LeaseCandidateList)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    +func (in *LeaseCandidateList) DeepCopyObject() runtime.Object {
    +	if c := in.DeepCopy(); c != nil {
    +		return c
    +	}
    +	return nil
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *LeaseCandidateSpec) DeepCopyInto(out *LeaseCandidateSpec) {
    +	*out = *in
    +	if in.PingTime != nil {
    +		in, out := &in.PingTime, &out.PingTime
    +		*out = (*in).DeepCopy()
    +	}
    +	if in.RenewTime != nil {
    +		in, out := &in.RenewTime, &out.RenewTime
    +		*out = (*in).DeepCopy()
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseCandidateSpec.
    +func (in *LeaseCandidateSpec) DeepCopy() *LeaseCandidateSpec {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(LeaseCandidateSpec)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
     // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
     func (in *LeaseList) DeepCopyInto(out *LeaseList) {
     	*out = *in
    diff --git a/vendor/k8s.io/api/coordination/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/coordination/v1beta1/zz_generated.prerelease-lifecycle.go
    index 18926aa10..73636edfa 100644
    --- a/vendor/k8s.io/api/coordination/v1beta1/zz_generated.prerelease-lifecycle.go
    +++ b/vendor/k8s.io/api/coordination/v1beta1/zz_generated.prerelease-lifecycle.go
    @@ -49,6 +49,42 @@ func (in *Lease) APILifecycleRemoved() (major, minor int) {
     	return 1, 22
     }
     
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *LeaseCandidate) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 33
    +}
    +
    +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or  "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
    +func (in *LeaseCandidate) APILifecycleDeprecated() (major, minor int) {
    +	return 1, 36
    +}
    +
    +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or  "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
    +func (in *LeaseCandidate) APILifecycleRemoved() (major, minor int) {
    +	return 1, 39
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *LeaseCandidateList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 33
    +}
    +
    +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or  "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
    +func (in *LeaseCandidateList) APILifecycleDeprecated() (major, minor int) {
    +	return 1, 36
    +}
    +
    +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or  "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
    +func (in *LeaseCandidateList) APILifecycleRemoved() (major, minor int) {
    +	return 1, 39
    +}
    +
     // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
     // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
     func (in *LeaseList) APILifecycleIntroduced() (major, minor int) {
    diff --git a/vendor/k8s.io/api/core/v1/doc.go b/vendor/k8s.io/api/core/v1/doc.go
    index bc0041b33..e4e9196ae 100644
    --- a/vendor/k8s.io/api/core/v1/doc.go
    +++ b/vendor/k8s.io/api/core/v1/doc.go
    @@ -21,4 +21,4 @@ limitations under the License.
     // +groupName=
     
     // Package v1 is the v1 version of the core API.
    -package v1 // import "k8s.io/api/core/v1"
    +package v1
    diff --git a/vendor/k8s.io/api/core/v1/generated.pb.go b/vendor/k8s.io/api/core/v1/generated.pb.go
    index 9d466c6d7..e1a297b98 100644
    --- a/vendor/k8s.io/api/core/v1/generated.pb.go
    +++ b/vendor/k8s.io/api/core/v1/generated.pb.go
    @@ -861,10 +861,38 @@ func (m *Container) XXX_DiscardUnknown() {
     
     var xxx_messageInfo_Container proto.InternalMessageInfo
     
    +func (m *ContainerExtendedResourceRequest) Reset()      { *m = ContainerExtendedResourceRequest{} }
    +func (*ContainerExtendedResourceRequest) ProtoMessage() {}
    +func (*ContainerExtendedResourceRequest) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_6c07b07c062484ab, []int{29}
    +}
    +func (m *ContainerExtendedResourceRequest) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ContainerExtendedResourceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ContainerExtendedResourceRequest) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ContainerExtendedResourceRequest.Merge(m, src)
    +}
    +func (m *ContainerExtendedResourceRequest) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ContainerExtendedResourceRequest) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ContainerExtendedResourceRequest.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ContainerExtendedResourceRequest proto.InternalMessageInfo
    +
     func (m *ContainerImage) Reset()      { *m = ContainerImage{} }
     func (*ContainerImage) ProtoMessage() {}
     func (*ContainerImage) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{29}
    +	return fileDescriptor_6c07b07c062484ab, []int{30}
     }
     func (m *ContainerImage) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -892,7 +920,7 @@ var xxx_messageInfo_ContainerImage proto.InternalMessageInfo
     func (m *ContainerPort) Reset()      { *m = ContainerPort{} }
     func (*ContainerPort) ProtoMessage() {}
     func (*ContainerPort) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{30}
    +	return fileDescriptor_6c07b07c062484ab, []int{31}
     }
     func (m *ContainerPort) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -920,7 +948,7 @@ var xxx_messageInfo_ContainerPort proto.InternalMessageInfo
     func (m *ContainerResizePolicy) Reset()      { *m = ContainerResizePolicy{} }
     func (*ContainerResizePolicy) ProtoMessage() {}
     func (*ContainerResizePolicy) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{31}
    +	return fileDescriptor_6c07b07c062484ab, []int{32}
     }
     func (m *ContainerResizePolicy) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -945,10 +973,66 @@ func (m *ContainerResizePolicy) XXX_DiscardUnknown() {
     
     var xxx_messageInfo_ContainerResizePolicy proto.InternalMessageInfo
     
    +func (m *ContainerRestartRule) Reset()      { *m = ContainerRestartRule{} }
    +func (*ContainerRestartRule) ProtoMessage() {}
    +func (*ContainerRestartRule) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_6c07b07c062484ab, []int{33}
    +}
    +func (m *ContainerRestartRule) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ContainerRestartRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ContainerRestartRule) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ContainerRestartRule.Merge(m, src)
    +}
    +func (m *ContainerRestartRule) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ContainerRestartRule) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ContainerRestartRule.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ContainerRestartRule proto.InternalMessageInfo
    +
    +func (m *ContainerRestartRuleOnExitCodes) Reset()      { *m = ContainerRestartRuleOnExitCodes{} }
    +func (*ContainerRestartRuleOnExitCodes) ProtoMessage() {}
    +func (*ContainerRestartRuleOnExitCodes) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_6c07b07c062484ab, []int{34}
    +}
    +func (m *ContainerRestartRuleOnExitCodes) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ContainerRestartRuleOnExitCodes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ContainerRestartRuleOnExitCodes) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ContainerRestartRuleOnExitCodes.Merge(m, src)
    +}
    +func (m *ContainerRestartRuleOnExitCodes) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ContainerRestartRuleOnExitCodes) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ContainerRestartRuleOnExitCodes.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ContainerRestartRuleOnExitCodes proto.InternalMessageInfo
    +
     func (m *ContainerState) Reset()      { *m = ContainerState{} }
     func (*ContainerState) ProtoMessage() {}
     func (*ContainerState) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{32}
    +	return fileDescriptor_6c07b07c062484ab, []int{35}
     }
     func (m *ContainerState) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -976,7 +1060,7 @@ var xxx_messageInfo_ContainerState proto.InternalMessageInfo
     func (m *ContainerStateRunning) Reset()      { *m = ContainerStateRunning{} }
     func (*ContainerStateRunning) ProtoMessage() {}
     func (*ContainerStateRunning) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{33}
    +	return fileDescriptor_6c07b07c062484ab, []int{36}
     }
     func (m *ContainerStateRunning) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1004,7 +1088,7 @@ var xxx_messageInfo_ContainerStateRunning proto.InternalMessageInfo
     func (m *ContainerStateTerminated) Reset()      { *m = ContainerStateTerminated{} }
     func (*ContainerStateTerminated) ProtoMessage() {}
     func (*ContainerStateTerminated) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{34}
    +	return fileDescriptor_6c07b07c062484ab, []int{37}
     }
     func (m *ContainerStateTerminated) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1032,7 +1116,7 @@ var xxx_messageInfo_ContainerStateTerminated proto.InternalMessageInfo
     func (m *ContainerStateWaiting) Reset()      { *m = ContainerStateWaiting{} }
     func (*ContainerStateWaiting) ProtoMessage() {}
     func (*ContainerStateWaiting) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{35}
    +	return fileDescriptor_6c07b07c062484ab, []int{38}
     }
     func (m *ContainerStateWaiting) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1060,7 +1144,7 @@ var xxx_messageInfo_ContainerStateWaiting proto.InternalMessageInfo
     func (m *ContainerStatus) Reset()      { *m = ContainerStatus{} }
     func (*ContainerStatus) ProtoMessage() {}
     func (*ContainerStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{36}
    +	return fileDescriptor_6c07b07c062484ab, []int{39}
     }
     func (m *ContainerStatus) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1088,7 +1172,7 @@ var xxx_messageInfo_ContainerStatus proto.InternalMessageInfo
     func (m *ContainerUser) Reset()      { *m = ContainerUser{} }
     func (*ContainerUser) ProtoMessage() {}
     func (*ContainerUser) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{37}
    +	return fileDescriptor_6c07b07c062484ab, []int{40}
     }
     func (m *ContainerUser) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1116,7 +1200,7 @@ var xxx_messageInfo_ContainerUser proto.InternalMessageInfo
     func (m *DaemonEndpoint) Reset()      { *m = DaemonEndpoint{} }
     func (*DaemonEndpoint) ProtoMessage() {}
     func (*DaemonEndpoint) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{38}
    +	return fileDescriptor_6c07b07c062484ab, []int{41}
     }
     func (m *DaemonEndpoint) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1144,7 +1228,7 @@ var xxx_messageInfo_DaemonEndpoint proto.InternalMessageInfo
     func (m *DownwardAPIProjection) Reset()      { *m = DownwardAPIProjection{} }
     func (*DownwardAPIProjection) ProtoMessage() {}
     func (*DownwardAPIProjection) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{39}
    +	return fileDescriptor_6c07b07c062484ab, []int{42}
     }
     func (m *DownwardAPIProjection) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1172,7 +1256,7 @@ var xxx_messageInfo_DownwardAPIProjection proto.InternalMessageInfo
     func (m *DownwardAPIVolumeFile) Reset()      { *m = DownwardAPIVolumeFile{} }
     func (*DownwardAPIVolumeFile) ProtoMessage() {}
     func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{40}
    +	return fileDescriptor_6c07b07c062484ab, []int{43}
     }
     func (m *DownwardAPIVolumeFile) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1200,7 +1284,7 @@ var xxx_messageInfo_DownwardAPIVolumeFile proto.InternalMessageInfo
     func (m *DownwardAPIVolumeSource) Reset()      { *m = DownwardAPIVolumeSource{} }
     func (*DownwardAPIVolumeSource) ProtoMessage() {}
     func (*DownwardAPIVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{41}
    +	return fileDescriptor_6c07b07c062484ab, []int{44}
     }
     func (m *DownwardAPIVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1228,7 +1312,7 @@ var xxx_messageInfo_DownwardAPIVolumeSource proto.InternalMessageInfo
     func (m *EmptyDirVolumeSource) Reset()      { *m = EmptyDirVolumeSource{} }
     func (*EmptyDirVolumeSource) ProtoMessage() {}
     func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{42}
    +	return fileDescriptor_6c07b07c062484ab, []int{45}
     }
     func (m *EmptyDirVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1256,7 +1340,7 @@ var xxx_messageInfo_EmptyDirVolumeSource proto.InternalMessageInfo
     func (m *EndpointAddress) Reset()      { *m = EndpointAddress{} }
     func (*EndpointAddress) ProtoMessage() {}
     func (*EndpointAddress) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{43}
    +	return fileDescriptor_6c07b07c062484ab, []int{46}
     }
     func (m *EndpointAddress) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1284,7 +1368,7 @@ var xxx_messageInfo_EndpointAddress proto.InternalMessageInfo
     func (m *EndpointPort) Reset()      { *m = EndpointPort{} }
     func (*EndpointPort) ProtoMessage() {}
     func (*EndpointPort) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{44}
    +	return fileDescriptor_6c07b07c062484ab, []int{47}
     }
     func (m *EndpointPort) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1312,7 +1396,7 @@ var xxx_messageInfo_EndpointPort proto.InternalMessageInfo
     func (m *EndpointSubset) Reset()      { *m = EndpointSubset{} }
     func (*EndpointSubset) ProtoMessage() {}
     func (*EndpointSubset) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{45}
    +	return fileDescriptor_6c07b07c062484ab, []int{48}
     }
     func (m *EndpointSubset) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1340,7 +1424,7 @@ var xxx_messageInfo_EndpointSubset proto.InternalMessageInfo
     func (m *Endpoints) Reset()      { *m = Endpoints{} }
     func (*Endpoints) ProtoMessage() {}
     func (*Endpoints) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{46}
    +	return fileDescriptor_6c07b07c062484ab, []int{49}
     }
     func (m *Endpoints) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1368,7 +1452,7 @@ var xxx_messageInfo_Endpoints proto.InternalMessageInfo
     func (m *EndpointsList) Reset()      { *m = EndpointsList{} }
     func (*EndpointsList) ProtoMessage() {}
     func (*EndpointsList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{47}
    +	return fileDescriptor_6c07b07c062484ab, []int{50}
     }
     func (m *EndpointsList) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1396,7 +1480,7 @@ var xxx_messageInfo_EndpointsList proto.InternalMessageInfo
     func (m *EnvFromSource) Reset()      { *m = EnvFromSource{} }
     func (*EnvFromSource) ProtoMessage() {}
     func (*EnvFromSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{48}
    +	return fileDescriptor_6c07b07c062484ab, []int{51}
     }
     func (m *EnvFromSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1424,7 +1508,7 @@ var xxx_messageInfo_EnvFromSource proto.InternalMessageInfo
     func (m *EnvVar) Reset()      { *m = EnvVar{} }
     func (*EnvVar) ProtoMessage() {}
     func (*EnvVar) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{49}
    +	return fileDescriptor_6c07b07c062484ab, []int{52}
     }
     func (m *EnvVar) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1452,7 +1536,7 @@ var xxx_messageInfo_EnvVar proto.InternalMessageInfo
     func (m *EnvVarSource) Reset()      { *m = EnvVarSource{} }
     func (*EnvVarSource) ProtoMessage() {}
     func (*EnvVarSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{50}
    +	return fileDescriptor_6c07b07c062484ab, []int{53}
     }
     func (m *EnvVarSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1480,7 +1564,7 @@ var xxx_messageInfo_EnvVarSource proto.InternalMessageInfo
     func (m *EphemeralContainer) Reset()      { *m = EphemeralContainer{} }
     func (*EphemeralContainer) ProtoMessage() {}
     func (*EphemeralContainer) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{51}
    +	return fileDescriptor_6c07b07c062484ab, []int{54}
     }
     func (m *EphemeralContainer) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1508,7 +1592,7 @@ var xxx_messageInfo_EphemeralContainer proto.InternalMessageInfo
     func (m *EphemeralContainerCommon) Reset()      { *m = EphemeralContainerCommon{} }
     func (*EphemeralContainerCommon) ProtoMessage() {}
     func (*EphemeralContainerCommon) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{52}
    +	return fileDescriptor_6c07b07c062484ab, []int{55}
     }
     func (m *EphemeralContainerCommon) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1536,7 +1620,7 @@ var xxx_messageInfo_EphemeralContainerCommon proto.InternalMessageInfo
     func (m *EphemeralVolumeSource) Reset()      { *m = EphemeralVolumeSource{} }
     func (*EphemeralVolumeSource) ProtoMessage() {}
     func (*EphemeralVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{53}
    +	return fileDescriptor_6c07b07c062484ab, []int{56}
     }
     func (m *EphemeralVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1564,7 +1648,7 @@ var xxx_messageInfo_EphemeralVolumeSource proto.InternalMessageInfo
     func (m *Event) Reset()      { *m = Event{} }
     func (*Event) ProtoMessage() {}
     func (*Event) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{54}
    +	return fileDescriptor_6c07b07c062484ab, []int{57}
     }
     func (m *Event) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1592,7 +1676,7 @@ var xxx_messageInfo_Event proto.InternalMessageInfo
     func (m *EventList) Reset()      { *m = EventList{} }
     func (*EventList) ProtoMessage() {}
     func (*EventList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{55}
    +	return fileDescriptor_6c07b07c062484ab, []int{58}
     }
     func (m *EventList) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1620,7 +1704,7 @@ var xxx_messageInfo_EventList proto.InternalMessageInfo
     func (m *EventSeries) Reset()      { *m = EventSeries{} }
     func (*EventSeries) ProtoMessage() {}
     func (*EventSeries) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{56}
    +	return fileDescriptor_6c07b07c062484ab, []int{59}
     }
     func (m *EventSeries) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1648,7 +1732,7 @@ var xxx_messageInfo_EventSeries proto.InternalMessageInfo
     func (m *EventSource) Reset()      { *m = EventSource{} }
     func (*EventSource) ProtoMessage() {}
     func (*EventSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{57}
    +	return fileDescriptor_6c07b07c062484ab, []int{60}
     }
     func (m *EventSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1676,7 +1760,7 @@ var xxx_messageInfo_EventSource proto.InternalMessageInfo
     func (m *ExecAction) Reset()      { *m = ExecAction{} }
     func (*ExecAction) ProtoMessage() {}
     func (*ExecAction) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{58}
    +	return fileDescriptor_6c07b07c062484ab, []int{61}
     }
     func (m *ExecAction) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1704,7 +1788,7 @@ var xxx_messageInfo_ExecAction proto.InternalMessageInfo
     func (m *FCVolumeSource) Reset()      { *m = FCVolumeSource{} }
     func (*FCVolumeSource) ProtoMessage() {}
     func (*FCVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{59}
    +	return fileDescriptor_6c07b07c062484ab, []int{62}
     }
     func (m *FCVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1729,10 +1813,38 @@ func (m *FCVolumeSource) XXX_DiscardUnknown() {
     
     var xxx_messageInfo_FCVolumeSource proto.InternalMessageInfo
     
    +func (m *FileKeySelector) Reset()      { *m = FileKeySelector{} }
    +func (*FileKeySelector) ProtoMessage() {}
    +func (*FileKeySelector) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_6c07b07c062484ab, []int{63}
    +}
    +func (m *FileKeySelector) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *FileKeySelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *FileKeySelector) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_FileKeySelector.Merge(m, src)
    +}
    +func (m *FileKeySelector) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *FileKeySelector) XXX_DiscardUnknown() {
    +	xxx_messageInfo_FileKeySelector.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_FileKeySelector proto.InternalMessageInfo
    +
     func (m *FlexPersistentVolumeSource) Reset()      { *m = FlexPersistentVolumeSource{} }
     func (*FlexPersistentVolumeSource) ProtoMessage() {}
     func (*FlexPersistentVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{60}
    +	return fileDescriptor_6c07b07c062484ab, []int{64}
     }
     func (m *FlexPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1760,7 +1872,7 @@ var xxx_messageInfo_FlexPersistentVolumeSource proto.InternalMessageInfo
     func (m *FlexVolumeSource) Reset()      { *m = FlexVolumeSource{} }
     func (*FlexVolumeSource) ProtoMessage() {}
     func (*FlexVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{61}
    +	return fileDescriptor_6c07b07c062484ab, []int{65}
     }
     func (m *FlexVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1788,7 +1900,7 @@ var xxx_messageInfo_FlexVolumeSource proto.InternalMessageInfo
     func (m *FlockerVolumeSource) Reset()      { *m = FlockerVolumeSource{} }
     func (*FlockerVolumeSource) ProtoMessage() {}
     func (*FlockerVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{62}
    +	return fileDescriptor_6c07b07c062484ab, []int{66}
     }
     func (m *FlockerVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1816,7 +1928,7 @@ var xxx_messageInfo_FlockerVolumeSource proto.InternalMessageInfo
     func (m *GCEPersistentDiskVolumeSource) Reset()      { *m = GCEPersistentDiskVolumeSource{} }
     func (*GCEPersistentDiskVolumeSource) ProtoMessage() {}
     func (*GCEPersistentDiskVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{63}
    +	return fileDescriptor_6c07b07c062484ab, []int{67}
     }
     func (m *GCEPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1844,7 +1956,7 @@ var xxx_messageInfo_GCEPersistentDiskVolumeSource proto.InternalMessageInfo
     func (m *GRPCAction) Reset()      { *m = GRPCAction{} }
     func (*GRPCAction) ProtoMessage() {}
     func (*GRPCAction) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{64}
    +	return fileDescriptor_6c07b07c062484ab, []int{68}
     }
     func (m *GRPCAction) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1872,7 +1984,7 @@ var xxx_messageInfo_GRPCAction proto.InternalMessageInfo
     func (m *GitRepoVolumeSource) Reset()      { *m = GitRepoVolumeSource{} }
     func (*GitRepoVolumeSource) ProtoMessage() {}
     func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{65}
    +	return fileDescriptor_6c07b07c062484ab, []int{69}
     }
     func (m *GitRepoVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1900,7 +2012,7 @@ var xxx_messageInfo_GitRepoVolumeSource proto.InternalMessageInfo
     func (m *GlusterfsPersistentVolumeSource) Reset()      { *m = GlusterfsPersistentVolumeSource{} }
     func (*GlusterfsPersistentVolumeSource) ProtoMessage() {}
     func (*GlusterfsPersistentVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{66}
    +	return fileDescriptor_6c07b07c062484ab, []int{70}
     }
     func (m *GlusterfsPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1928,7 +2040,7 @@ var xxx_messageInfo_GlusterfsPersistentVolumeSource proto.InternalMessageInfo
     func (m *GlusterfsVolumeSource) Reset()      { *m = GlusterfsVolumeSource{} }
     func (*GlusterfsVolumeSource) ProtoMessage() {}
     func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{67}
    +	return fileDescriptor_6c07b07c062484ab, []int{71}
     }
     func (m *GlusterfsVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1956,7 +2068,7 @@ var xxx_messageInfo_GlusterfsVolumeSource proto.InternalMessageInfo
     func (m *HTTPGetAction) Reset()      { *m = HTTPGetAction{} }
     func (*HTTPGetAction) ProtoMessage() {}
     func (*HTTPGetAction) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{68}
    +	return fileDescriptor_6c07b07c062484ab, []int{72}
     }
     func (m *HTTPGetAction) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1984,7 +2096,7 @@ var xxx_messageInfo_HTTPGetAction proto.InternalMessageInfo
     func (m *HTTPHeader) Reset()      { *m = HTTPHeader{} }
     func (*HTTPHeader) ProtoMessage() {}
     func (*HTTPHeader) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{69}
    +	return fileDescriptor_6c07b07c062484ab, []int{73}
     }
     func (m *HTTPHeader) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2012,7 +2124,7 @@ var xxx_messageInfo_HTTPHeader proto.InternalMessageInfo
     func (m *HostAlias) Reset()      { *m = HostAlias{} }
     func (*HostAlias) ProtoMessage() {}
     func (*HostAlias) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{70}
    +	return fileDescriptor_6c07b07c062484ab, []int{74}
     }
     func (m *HostAlias) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2040,7 +2152,7 @@ var xxx_messageInfo_HostAlias proto.InternalMessageInfo
     func (m *HostIP) Reset()      { *m = HostIP{} }
     func (*HostIP) ProtoMessage() {}
     func (*HostIP) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{71}
    +	return fileDescriptor_6c07b07c062484ab, []int{75}
     }
     func (m *HostIP) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2068,7 +2180,7 @@ var xxx_messageInfo_HostIP proto.InternalMessageInfo
     func (m *HostPathVolumeSource) Reset()      { *m = HostPathVolumeSource{} }
     func (*HostPathVolumeSource) ProtoMessage() {}
     func (*HostPathVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{72}
    +	return fileDescriptor_6c07b07c062484ab, []int{76}
     }
     func (m *HostPathVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2096,7 +2208,7 @@ var xxx_messageInfo_HostPathVolumeSource proto.InternalMessageInfo
     func (m *ISCSIPersistentVolumeSource) Reset()      { *m = ISCSIPersistentVolumeSource{} }
     func (*ISCSIPersistentVolumeSource) ProtoMessage() {}
     func (*ISCSIPersistentVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{73}
    +	return fileDescriptor_6c07b07c062484ab, []int{77}
     }
     func (m *ISCSIPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2124,7 +2236,7 @@ var xxx_messageInfo_ISCSIPersistentVolumeSource proto.InternalMessageInfo
     func (m *ISCSIVolumeSource) Reset()      { *m = ISCSIVolumeSource{} }
     func (*ISCSIVolumeSource) ProtoMessage() {}
     func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{74}
    +	return fileDescriptor_6c07b07c062484ab, []int{78}
     }
     func (m *ISCSIVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2152,7 +2264,7 @@ var xxx_messageInfo_ISCSIVolumeSource proto.InternalMessageInfo
     func (m *ImageVolumeSource) Reset()      { *m = ImageVolumeSource{} }
     func (*ImageVolumeSource) ProtoMessage() {}
     func (*ImageVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{75}
    +	return fileDescriptor_6c07b07c062484ab, []int{79}
     }
     func (m *ImageVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2180,7 +2292,7 @@ var xxx_messageInfo_ImageVolumeSource proto.InternalMessageInfo
     func (m *KeyToPath) Reset()      { *m = KeyToPath{} }
     func (*KeyToPath) ProtoMessage() {}
     func (*KeyToPath) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{76}
    +	return fileDescriptor_6c07b07c062484ab, []int{80}
     }
     func (m *KeyToPath) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2208,7 +2320,7 @@ var xxx_messageInfo_KeyToPath proto.InternalMessageInfo
     func (m *Lifecycle) Reset()      { *m = Lifecycle{} }
     func (*Lifecycle) ProtoMessage() {}
     func (*Lifecycle) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{77}
    +	return fileDescriptor_6c07b07c062484ab, []int{81}
     }
     func (m *Lifecycle) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2236,7 +2348,7 @@ var xxx_messageInfo_Lifecycle proto.InternalMessageInfo
     func (m *LifecycleHandler) Reset()      { *m = LifecycleHandler{} }
     func (*LifecycleHandler) ProtoMessage() {}
     func (*LifecycleHandler) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{78}
    +	return fileDescriptor_6c07b07c062484ab, []int{82}
     }
     func (m *LifecycleHandler) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2264,7 +2376,7 @@ var xxx_messageInfo_LifecycleHandler proto.InternalMessageInfo
     func (m *LimitRange) Reset()      { *m = LimitRange{} }
     func (*LimitRange) ProtoMessage() {}
     func (*LimitRange) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{79}
    +	return fileDescriptor_6c07b07c062484ab, []int{83}
     }
     func (m *LimitRange) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2292,7 +2404,7 @@ var xxx_messageInfo_LimitRange proto.InternalMessageInfo
     func (m *LimitRangeItem) Reset()      { *m = LimitRangeItem{} }
     func (*LimitRangeItem) ProtoMessage() {}
     func (*LimitRangeItem) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{80}
    +	return fileDescriptor_6c07b07c062484ab, []int{84}
     }
     func (m *LimitRangeItem) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2320,7 +2432,7 @@ var xxx_messageInfo_LimitRangeItem proto.InternalMessageInfo
     func (m *LimitRangeList) Reset()      { *m = LimitRangeList{} }
     func (*LimitRangeList) ProtoMessage() {}
     func (*LimitRangeList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{81}
    +	return fileDescriptor_6c07b07c062484ab, []int{85}
     }
     func (m *LimitRangeList) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2348,7 +2460,7 @@ var xxx_messageInfo_LimitRangeList proto.InternalMessageInfo
     func (m *LimitRangeSpec) Reset()      { *m = LimitRangeSpec{} }
     func (*LimitRangeSpec) ProtoMessage() {}
     func (*LimitRangeSpec) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{82}
    +	return fileDescriptor_6c07b07c062484ab, []int{86}
     }
     func (m *LimitRangeSpec) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2376,7 +2488,7 @@ var xxx_messageInfo_LimitRangeSpec proto.InternalMessageInfo
     func (m *LinuxContainerUser) Reset()      { *m = LinuxContainerUser{} }
     func (*LinuxContainerUser) ProtoMessage() {}
     func (*LinuxContainerUser) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{83}
    +	return fileDescriptor_6c07b07c062484ab, []int{87}
     }
     func (m *LinuxContainerUser) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2404,7 +2516,7 @@ var xxx_messageInfo_LinuxContainerUser proto.InternalMessageInfo
     func (m *List) Reset()      { *m = List{} }
     func (*List) ProtoMessage() {}
     func (*List) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{84}
    +	return fileDescriptor_6c07b07c062484ab, []int{88}
     }
     func (m *List) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2432,7 +2544,7 @@ var xxx_messageInfo_List proto.InternalMessageInfo
     func (m *LoadBalancerIngress) Reset()      { *m = LoadBalancerIngress{} }
     func (*LoadBalancerIngress) ProtoMessage() {}
     func (*LoadBalancerIngress) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{85}
    +	return fileDescriptor_6c07b07c062484ab, []int{89}
     }
     func (m *LoadBalancerIngress) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2460,7 +2572,7 @@ var xxx_messageInfo_LoadBalancerIngress proto.InternalMessageInfo
     func (m *LoadBalancerStatus) Reset()      { *m = LoadBalancerStatus{} }
     func (*LoadBalancerStatus) ProtoMessage() {}
     func (*LoadBalancerStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{86}
    +	return fileDescriptor_6c07b07c062484ab, []int{90}
     }
     func (m *LoadBalancerStatus) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2488,7 +2600,7 @@ var xxx_messageInfo_LoadBalancerStatus proto.InternalMessageInfo
     func (m *LocalObjectReference) Reset()      { *m = LocalObjectReference{} }
     func (*LocalObjectReference) ProtoMessage() {}
     func (*LocalObjectReference) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{87}
    +	return fileDescriptor_6c07b07c062484ab, []int{91}
     }
     func (m *LocalObjectReference) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2516,7 +2628,7 @@ var xxx_messageInfo_LocalObjectReference proto.InternalMessageInfo
     func (m *LocalVolumeSource) Reset()      { *m = LocalVolumeSource{} }
     func (*LocalVolumeSource) ProtoMessage() {}
     func (*LocalVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{88}
    +	return fileDescriptor_6c07b07c062484ab, []int{92}
     }
     func (m *LocalVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2544,7 +2656,7 @@ var xxx_messageInfo_LocalVolumeSource proto.InternalMessageInfo
     func (m *ModifyVolumeStatus) Reset()      { *m = ModifyVolumeStatus{} }
     func (*ModifyVolumeStatus) ProtoMessage() {}
     func (*ModifyVolumeStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{89}
    +	return fileDescriptor_6c07b07c062484ab, []int{93}
     }
     func (m *ModifyVolumeStatus) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2572,7 +2684,7 @@ var xxx_messageInfo_ModifyVolumeStatus proto.InternalMessageInfo
     func (m *NFSVolumeSource) Reset()      { *m = NFSVolumeSource{} }
     func (*NFSVolumeSource) ProtoMessage() {}
     func (*NFSVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{90}
    +	return fileDescriptor_6c07b07c062484ab, []int{94}
     }
     func (m *NFSVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2600,7 +2712,7 @@ var xxx_messageInfo_NFSVolumeSource proto.InternalMessageInfo
     func (m *Namespace) Reset()      { *m = Namespace{} }
     func (*Namespace) ProtoMessage() {}
     func (*Namespace) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{91}
    +	return fileDescriptor_6c07b07c062484ab, []int{95}
     }
     func (m *Namespace) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2628,7 +2740,7 @@ var xxx_messageInfo_Namespace proto.InternalMessageInfo
     func (m *NamespaceCondition) Reset()      { *m = NamespaceCondition{} }
     func (*NamespaceCondition) ProtoMessage() {}
     func (*NamespaceCondition) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{92}
    +	return fileDescriptor_6c07b07c062484ab, []int{96}
     }
     func (m *NamespaceCondition) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2656,7 +2768,7 @@ var xxx_messageInfo_NamespaceCondition proto.InternalMessageInfo
     func (m *NamespaceList) Reset()      { *m = NamespaceList{} }
     func (*NamespaceList) ProtoMessage() {}
     func (*NamespaceList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{93}
    +	return fileDescriptor_6c07b07c062484ab, []int{97}
     }
     func (m *NamespaceList) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2684,7 +2796,7 @@ var xxx_messageInfo_NamespaceList proto.InternalMessageInfo
     func (m *NamespaceSpec) Reset()      { *m = NamespaceSpec{} }
     func (*NamespaceSpec) ProtoMessage() {}
     func (*NamespaceSpec) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{94}
    +	return fileDescriptor_6c07b07c062484ab, []int{98}
     }
     func (m *NamespaceSpec) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2712,7 +2824,7 @@ var xxx_messageInfo_NamespaceSpec proto.InternalMessageInfo
     func (m *NamespaceStatus) Reset()      { *m = NamespaceStatus{} }
     func (*NamespaceStatus) ProtoMessage() {}
     func (*NamespaceStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{95}
    +	return fileDescriptor_6c07b07c062484ab, []int{99}
     }
     func (m *NamespaceStatus) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2740,7 +2852,7 @@ var xxx_messageInfo_NamespaceStatus proto.InternalMessageInfo
     func (m *Node) Reset()      { *m = Node{} }
     func (*Node) ProtoMessage() {}
     func (*Node) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{96}
    +	return fileDescriptor_6c07b07c062484ab, []int{100}
     }
     func (m *Node) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2768,7 +2880,7 @@ var xxx_messageInfo_Node proto.InternalMessageInfo
     func (m *NodeAddress) Reset()      { *m = NodeAddress{} }
     func (*NodeAddress) ProtoMessage() {}
     func (*NodeAddress) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{97}
    +	return fileDescriptor_6c07b07c062484ab, []int{101}
     }
     func (m *NodeAddress) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2796,7 +2908,7 @@ var xxx_messageInfo_NodeAddress proto.InternalMessageInfo
     func (m *NodeAffinity) Reset()      { *m = NodeAffinity{} }
     func (*NodeAffinity) ProtoMessage() {}
     func (*NodeAffinity) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{98}
    +	return fileDescriptor_6c07b07c062484ab, []int{102}
     }
     func (m *NodeAffinity) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2824,7 +2936,7 @@ var xxx_messageInfo_NodeAffinity proto.InternalMessageInfo
     func (m *NodeCondition) Reset()      { *m = NodeCondition{} }
     func (*NodeCondition) ProtoMessage() {}
     func (*NodeCondition) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{99}
    +	return fileDescriptor_6c07b07c062484ab, []int{103}
     }
     func (m *NodeCondition) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2852,7 +2964,7 @@ var xxx_messageInfo_NodeCondition proto.InternalMessageInfo
     func (m *NodeConfigSource) Reset()      { *m = NodeConfigSource{} }
     func (*NodeConfigSource) ProtoMessage() {}
     func (*NodeConfigSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{100}
    +	return fileDescriptor_6c07b07c062484ab, []int{104}
     }
     func (m *NodeConfigSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2880,7 +2992,7 @@ var xxx_messageInfo_NodeConfigSource proto.InternalMessageInfo
     func (m *NodeConfigStatus) Reset()      { *m = NodeConfigStatus{} }
     func (*NodeConfigStatus) ProtoMessage() {}
     func (*NodeConfigStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{101}
    +	return fileDescriptor_6c07b07c062484ab, []int{105}
     }
     func (m *NodeConfigStatus) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2908,7 +3020,7 @@ var xxx_messageInfo_NodeConfigStatus proto.InternalMessageInfo
     func (m *NodeDaemonEndpoints) Reset()      { *m = NodeDaemonEndpoints{} }
     func (*NodeDaemonEndpoints) ProtoMessage() {}
     func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{102}
    +	return fileDescriptor_6c07b07c062484ab, []int{106}
     }
     func (m *NodeDaemonEndpoints) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2936,7 +3048,7 @@ var xxx_messageInfo_NodeDaemonEndpoints proto.InternalMessageInfo
     func (m *NodeFeatures) Reset()      { *m = NodeFeatures{} }
     func (*NodeFeatures) ProtoMessage() {}
     func (*NodeFeatures) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{103}
    +	return fileDescriptor_6c07b07c062484ab, []int{107}
     }
     func (m *NodeFeatures) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2964,7 +3076,7 @@ var xxx_messageInfo_NodeFeatures proto.InternalMessageInfo
     func (m *NodeList) Reset()      { *m = NodeList{} }
     func (*NodeList) ProtoMessage() {}
     func (*NodeList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{104}
    +	return fileDescriptor_6c07b07c062484ab, []int{108}
     }
     func (m *NodeList) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2992,7 +3104,7 @@ var xxx_messageInfo_NodeList proto.InternalMessageInfo
     func (m *NodeProxyOptions) Reset()      { *m = NodeProxyOptions{} }
     func (*NodeProxyOptions) ProtoMessage() {}
     func (*NodeProxyOptions) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{105}
    +	return fileDescriptor_6c07b07c062484ab, []int{109}
     }
     func (m *NodeProxyOptions) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3020,7 +3132,7 @@ var xxx_messageInfo_NodeProxyOptions proto.InternalMessageInfo
     func (m *NodeRuntimeHandler) Reset()      { *m = NodeRuntimeHandler{} }
     func (*NodeRuntimeHandler) ProtoMessage() {}
     func (*NodeRuntimeHandler) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{106}
    +	return fileDescriptor_6c07b07c062484ab, []int{110}
     }
     func (m *NodeRuntimeHandler) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3048,7 +3160,7 @@ var xxx_messageInfo_NodeRuntimeHandler proto.InternalMessageInfo
     func (m *NodeRuntimeHandlerFeatures) Reset()      { *m = NodeRuntimeHandlerFeatures{} }
     func (*NodeRuntimeHandlerFeatures) ProtoMessage() {}
     func (*NodeRuntimeHandlerFeatures) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{107}
    +	return fileDescriptor_6c07b07c062484ab, []int{111}
     }
     func (m *NodeRuntimeHandlerFeatures) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3076,7 +3188,7 @@ var xxx_messageInfo_NodeRuntimeHandlerFeatures proto.InternalMessageInfo
     func (m *NodeSelector) Reset()      { *m = NodeSelector{} }
     func (*NodeSelector) ProtoMessage() {}
     func (*NodeSelector) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{108}
    +	return fileDescriptor_6c07b07c062484ab, []int{112}
     }
     func (m *NodeSelector) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3104,7 +3216,7 @@ var xxx_messageInfo_NodeSelector proto.InternalMessageInfo
     func (m *NodeSelectorRequirement) Reset()      { *m = NodeSelectorRequirement{} }
     func (*NodeSelectorRequirement) ProtoMessage() {}
     func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{109}
    +	return fileDescriptor_6c07b07c062484ab, []int{113}
     }
     func (m *NodeSelectorRequirement) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3132,7 +3244,7 @@ var xxx_messageInfo_NodeSelectorRequirement proto.InternalMessageInfo
     func (m *NodeSelectorTerm) Reset()      { *m = NodeSelectorTerm{} }
     func (*NodeSelectorTerm) ProtoMessage() {}
     func (*NodeSelectorTerm) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{110}
    +	return fileDescriptor_6c07b07c062484ab, []int{114}
     }
     func (m *NodeSelectorTerm) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3160,7 +3272,7 @@ var xxx_messageInfo_NodeSelectorTerm proto.InternalMessageInfo
     func (m *NodeSpec) Reset()      { *m = NodeSpec{} }
     func (*NodeSpec) ProtoMessage() {}
     func (*NodeSpec) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{111}
    +	return fileDescriptor_6c07b07c062484ab, []int{115}
     }
     func (m *NodeSpec) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3188,7 +3300,7 @@ var xxx_messageInfo_NodeSpec proto.InternalMessageInfo
     func (m *NodeStatus) Reset()      { *m = NodeStatus{} }
     func (*NodeStatus) ProtoMessage() {}
     func (*NodeStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{112}
    +	return fileDescriptor_6c07b07c062484ab, []int{116}
     }
     func (m *NodeStatus) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3213,10 +3325,38 @@ func (m *NodeStatus) XXX_DiscardUnknown() {
     
     var xxx_messageInfo_NodeStatus proto.InternalMessageInfo
     
    +func (m *NodeSwapStatus) Reset()      { *m = NodeSwapStatus{} }
    +func (*NodeSwapStatus) ProtoMessage() {}
    +func (*NodeSwapStatus) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_6c07b07c062484ab, []int{117}
    +}
    +func (m *NodeSwapStatus) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *NodeSwapStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *NodeSwapStatus) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_NodeSwapStatus.Merge(m, src)
    +}
    +func (m *NodeSwapStatus) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *NodeSwapStatus) XXX_DiscardUnknown() {
    +	xxx_messageInfo_NodeSwapStatus.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_NodeSwapStatus proto.InternalMessageInfo
    +
     func (m *NodeSystemInfo) Reset()      { *m = NodeSystemInfo{} }
     func (*NodeSystemInfo) ProtoMessage() {}
     func (*NodeSystemInfo) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{113}
    +	return fileDescriptor_6c07b07c062484ab, []int{118}
     }
     func (m *NodeSystemInfo) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3244,7 +3384,7 @@ var xxx_messageInfo_NodeSystemInfo proto.InternalMessageInfo
     func (m *ObjectFieldSelector) Reset()      { *m = ObjectFieldSelector{} }
     func (*ObjectFieldSelector) ProtoMessage() {}
     func (*ObjectFieldSelector) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{114}
    +	return fileDescriptor_6c07b07c062484ab, []int{119}
     }
     func (m *ObjectFieldSelector) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3272,7 +3412,7 @@ var xxx_messageInfo_ObjectFieldSelector proto.InternalMessageInfo
     func (m *ObjectReference) Reset()      { *m = ObjectReference{} }
     func (*ObjectReference) ProtoMessage() {}
     func (*ObjectReference) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{115}
    +	return fileDescriptor_6c07b07c062484ab, []int{120}
     }
     func (m *ObjectReference) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3300,7 +3440,7 @@ var xxx_messageInfo_ObjectReference proto.InternalMessageInfo
     func (m *PersistentVolume) Reset()      { *m = PersistentVolume{} }
     func (*PersistentVolume) ProtoMessage() {}
     func (*PersistentVolume) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{116}
    +	return fileDescriptor_6c07b07c062484ab, []int{121}
     }
     func (m *PersistentVolume) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3328,7 +3468,7 @@ var xxx_messageInfo_PersistentVolume proto.InternalMessageInfo
     func (m *PersistentVolumeClaim) Reset()      { *m = PersistentVolumeClaim{} }
     func (*PersistentVolumeClaim) ProtoMessage() {}
     func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{117}
    +	return fileDescriptor_6c07b07c062484ab, []int{122}
     }
     func (m *PersistentVolumeClaim) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3356,7 +3496,7 @@ var xxx_messageInfo_PersistentVolumeClaim proto.InternalMessageInfo
     func (m *PersistentVolumeClaimCondition) Reset()      { *m = PersistentVolumeClaimCondition{} }
     func (*PersistentVolumeClaimCondition) ProtoMessage() {}
     func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{118}
    +	return fileDescriptor_6c07b07c062484ab, []int{123}
     }
     func (m *PersistentVolumeClaimCondition) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3384,7 +3524,7 @@ var xxx_messageInfo_PersistentVolumeClaimCondition proto.InternalMessageInfo
     func (m *PersistentVolumeClaimList) Reset()      { *m = PersistentVolumeClaimList{} }
     func (*PersistentVolumeClaimList) ProtoMessage() {}
     func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{119}
    +	return fileDescriptor_6c07b07c062484ab, []int{124}
     }
     func (m *PersistentVolumeClaimList) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3412,7 +3552,7 @@ var xxx_messageInfo_PersistentVolumeClaimList proto.InternalMessageInfo
     func (m *PersistentVolumeClaimSpec) Reset()      { *m = PersistentVolumeClaimSpec{} }
     func (*PersistentVolumeClaimSpec) ProtoMessage() {}
     func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{120}
    +	return fileDescriptor_6c07b07c062484ab, []int{125}
     }
     func (m *PersistentVolumeClaimSpec) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3440,7 +3580,7 @@ var xxx_messageInfo_PersistentVolumeClaimSpec proto.InternalMessageInfo
     func (m *PersistentVolumeClaimStatus) Reset()      { *m = PersistentVolumeClaimStatus{} }
     func (*PersistentVolumeClaimStatus) ProtoMessage() {}
     func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{121}
    +	return fileDescriptor_6c07b07c062484ab, []int{126}
     }
     func (m *PersistentVolumeClaimStatus) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3468,7 +3608,7 @@ var xxx_messageInfo_PersistentVolumeClaimStatus proto.InternalMessageInfo
     func (m *PersistentVolumeClaimTemplate) Reset()      { *m = PersistentVolumeClaimTemplate{} }
     func (*PersistentVolumeClaimTemplate) ProtoMessage() {}
     func (*PersistentVolumeClaimTemplate) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{122}
    +	return fileDescriptor_6c07b07c062484ab, []int{127}
     }
     func (m *PersistentVolumeClaimTemplate) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3496,7 +3636,7 @@ var xxx_messageInfo_PersistentVolumeClaimTemplate proto.InternalMessageInfo
     func (m *PersistentVolumeClaimVolumeSource) Reset()      { *m = PersistentVolumeClaimVolumeSource{} }
     func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {}
     func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{123}
    +	return fileDescriptor_6c07b07c062484ab, []int{128}
     }
     func (m *PersistentVolumeClaimVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3524,7 +3664,7 @@ var xxx_messageInfo_PersistentVolumeClaimVolumeSource proto.InternalMessageInfo
     func (m *PersistentVolumeList) Reset()      { *m = PersistentVolumeList{} }
     func (*PersistentVolumeList) ProtoMessage() {}
     func (*PersistentVolumeList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{124}
    +	return fileDescriptor_6c07b07c062484ab, []int{129}
     }
     func (m *PersistentVolumeList) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3552,7 +3692,7 @@ var xxx_messageInfo_PersistentVolumeList proto.InternalMessageInfo
     func (m *PersistentVolumeSource) Reset()      { *m = PersistentVolumeSource{} }
     func (*PersistentVolumeSource) ProtoMessage() {}
     func (*PersistentVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{125}
    +	return fileDescriptor_6c07b07c062484ab, []int{130}
     }
     func (m *PersistentVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3580,7 +3720,7 @@ var xxx_messageInfo_PersistentVolumeSource proto.InternalMessageInfo
     func (m *PersistentVolumeSpec) Reset()      { *m = PersistentVolumeSpec{} }
     func (*PersistentVolumeSpec) ProtoMessage() {}
     func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{126}
    +	return fileDescriptor_6c07b07c062484ab, []int{131}
     }
     func (m *PersistentVolumeSpec) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3608,7 +3748,7 @@ var xxx_messageInfo_PersistentVolumeSpec proto.InternalMessageInfo
     func (m *PersistentVolumeStatus) Reset()      { *m = PersistentVolumeStatus{} }
     func (*PersistentVolumeStatus) ProtoMessage() {}
     func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{127}
    +	return fileDescriptor_6c07b07c062484ab, []int{132}
     }
     func (m *PersistentVolumeStatus) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3636,7 +3776,7 @@ var xxx_messageInfo_PersistentVolumeStatus proto.InternalMessageInfo
     func (m *PhotonPersistentDiskVolumeSource) Reset()      { *m = PhotonPersistentDiskVolumeSource{} }
     func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {}
     func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{128}
    +	return fileDescriptor_6c07b07c062484ab, []int{133}
     }
     func (m *PhotonPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3664,7 +3804,7 @@ var xxx_messageInfo_PhotonPersistentDiskVolumeSource proto.InternalMessageInfo
     func (m *Pod) Reset()      { *m = Pod{} }
     func (*Pod) ProtoMessage() {}
     func (*Pod) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{129}
    +	return fileDescriptor_6c07b07c062484ab, []int{134}
     }
     func (m *Pod) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3692,7 +3832,7 @@ var xxx_messageInfo_Pod proto.InternalMessageInfo
     func (m *PodAffinity) Reset()      { *m = PodAffinity{} }
     func (*PodAffinity) ProtoMessage() {}
     func (*PodAffinity) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{130}
    +	return fileDescriptor_6c07b07c062484ab, []int{135}
     }
     func (m *PodAffinity) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3720,7 +3860,7 @@ var xxx_messageInfo_PodAffinity proto.InternalMessageInfo
     func (m *PodAffinityTerm) Reset()      { *m = PodAffinityTerm{} }
     func (*PodAffinityTerm) ProtoMessage() {}
     func (*PodAffinityTerm) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{131}
    +	return fileDescriptor_6c07b07c062484ab, []int{136}
     }
     func (m *PodAffinityTerm) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3748,7 +3888,7 @@ var xxx_messageInfo_PodAffinityTerm proto.InternalMessageInfo
     func (m *PodAntiAffinity) Reset()      { *m = PodAntiAffinity{} }
     func (*PodAntiAffinity) ProtoMessage() {}
     func (*PodAntiAffinity) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{132}
    +	return fileDescriptor_6c07b07c062484ab, []int{137}
     }
     func (m *PodAntiAffinity) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3776,7 +3916,7 @@ var xxx_messageInfo_PodAntiAffinity proto.InternalMessageInfo
     func (m *PodAttachOptions) Reset()      { *m = PodAttachOptions{} }
     func (*PodAttachOptions) ProtoMessage() {}
     func (*PodAttachOptions) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{133}
    +	return fileDescriptor_6c07b07c062484ab, []int{138}
     }
     func (m *PodAttachOptions) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3801,10 +3941,38 @@ func (m *PodAttachOptions) XXX_DiscardUnknown() {
     
     var xxx_messageInfo_PodAttachOptions proto.InternalMessageInfo
     
    +func (m *PodCertificateProjection) Reset()      { *m = PodCertificateProjection{} }
    +func (*PodCertificateProjection) ProtoMessage() {}
    +func (*PodCertificateProjection) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_6c07b07c062484ab, []int{139}
    +}
    +func (m *PodCertificateProjection) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *PodCertificateProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *PodCertificateProjection) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_PodCertificateProjection.Merge(m, src)
    +}
    +func (m *PodCertificateProjection) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *PodCertificateProjection) XXX_DiscardUnknown() {
    +	xxx_messageInfo_PodCertificateProjection.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_PodCertificateProjection proto.InternalMessageInfo
    +
     func (m *PodCondition) Reset()      { *m = PodCondition{} }
     func (*PodCondition) ProtoMessage() {}
     func (*PodCondition) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{134}
    +	return fileDescriptor_6c07b07c062484ab, []int{140}
     }
     func (m *PodCondition) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3832,7 +4000,7 @@ var xxx_messageInfo_PodCondition proto.InternalMessageInfo
     func (m *PodDNSConfig) Reset()      { *m = PodDNSConfig{} }
     func (*PodDNSConfig) ProtoMessage() {}
     func (*PodDNSConfig) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{135}
    +	return fileDescriptor_6c07b07c062484ab, []int{141}
     }
     func (m *PodDNSConfig) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3860,7 +4028,7 @@ var xxx_messageInfo_PodDNSConfig proto.InternalMessageInfo
     func (m *PodDNSConfigOption) Reset()      { *m = PodDNSConfigOption{} }
     func (*PodDNSConfigOption) ProtoMessage() {}
     func (*PodDNSConfigOption) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{136}
    +	return fileDescriptor_6c07b07c062484ab, []int{142}
     }
     func (m *PodDNSConfigOption) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3888,7 +4056,7 @@ var xxx_messageInfo_PodDNSConfigOption proto.InternalMessageInfo
     func (m *PodExecOptions) Reset()      { *m = PodExecOptions{} }
     func (*PodExecOptions) ProtoMessage() {}
     func (*PodExecOptions) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{137}
    +	return fileDescriptor_6c07b07c062484ab, []int{143}
     }
     func (m *PodExecOptions) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3913,10 +4081,38 @@ func (m *PodExecOptions) XXX_DiscardUnknown() {
     
     var xxx_messageInfo_PodExecOptions proto.InternalMessageInfo
     
    +func (m *PodExtendedResourceClaimStatus) Reset()      { *m = PodExtendedResourceClaimStatus{} }
    +func (*PodExtendedResourceClaimStatus) ProtoMessage() {}
    +func (*PodExtendedResourceClaimStatus) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_6c07b07c062484ab, []int{144}
    +}
    +func (m *PodExtendedResourceClaimStatus) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *PodExtendedResourceClaimStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *PodExtendedResourceClaimStatus) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_PodExtendedResourceClaimStatus.Merge(m, src)
    +}
    +func (m *PodExtendedResourceClaimStatus) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *PodExtendedResourceClaimStatus) XXX_DiscardUnknown() {
    +	xxx_messageInfo_PodExtendedResourceClaimStatus.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_PodExtendedResourceClaimStatus proto.InternalMessageInfo
    +
     func (m *PodIP) Reset()      { *m = PodIP{} }
     func (*PodIP) ProtoMessage() {}
     func (*PodIP) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{138}
    +	return fileDescriptor_6c07b07c062484ab, []int{145}
     }
     func (m *PodIP) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3944,7 +4140,7 @@ var xxx_messageInfo_PodIP proto.InternalMessageInfo
     func (m *PodList) Reset()      { *m = PodList{} }
     func (*PodList) ProtoMessage() {}
     func (*PodList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{139}
    +	return fileDescriptor_6c07b07c062484ab, []int{146}
     }
     func (m *PodList) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3972,7 +4168,7 @@ var xxx_messageInfo_PodList proto.InternalMessageInfo
     func (m *PodLogOptions) Reset()      { *m = PodLogOptions{} }
     func (*PodLogOptions) ProtoMessage() {}
     func (*PodLogOptions) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{140}
    +	return fileDescriptor_6c07b07c062484ab, []int{147}
     }
     func (m *PodLogOptions) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4000,7 +4196,7 @@ var xxx_messageInfo_PodLogOptions proto.InternalMessageInfo
     func (m *PodOS) Reset()      { *m = PodOS{} }
     func (*PodOS) ProtoMessage() {}
     func (*PodOS) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{141}
    +	return fileDescriptor_6c07b07c062484ab, []int{148}
     }
     func (m *PodOS) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4028,7 +4224,7 @@ var xxx_messageInfo_PodOS proto.InternalMessageInfo
     func (m *PodPortForwardOptions) Reset()      { *m = PodPortForwardOptions{} }
     func (*PodPortForwardOptions) ProtoMessage() {}
     func (*PodPortForwardOptions) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{142}
    +	return fileDescriptor_6c07b07c062484ab, []int{149}
     }
     func (m *PodPortForwardOptions) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4056,7 +4252,7 @@ var xxx_messageInfo_PodPortForwardOptions proto.InternalMessageInfo
     func (m *PodProxyOptions) Reset()      { *m = PodProxyOptions{} }
     func (*PodProxyOptions) ProtoMessage() {}
     func (*PodProxyOptions) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{143}
    +	return fileDescriptor_6c07b07c062484ab, []int{150}
     }
     func (m *PodProxyOptions) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4084,7 +4280,7 @@ var xxx_messageInfo_PodProxyOptions proto.InternalMessageInfo
     func (m *PodReadinessGate) Reset()      { *m = PodReadinessGate{} }
     func (*PodReadinessGate) ProtoMessage() {}
     func (*PodReadinessGate) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{144}
    +	return fileDescriptor_6c07b07c062484ab, []int{151}
     }
     func (m *PodReadinessGate) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4112,7 +4308,7 @@ var xxx_messageInfo_PodReadinessGate proto.InternalMessageInfo
     func (m *PodResourceClaim) Reset()      { *m = PodResourceClaim{} }
     func (*PodResourceClaim) ProtoMessage() {}
     func (*PodResourceClaim) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{145}
    +	return fileDescriptor_6c07b07c062484ab, []int{152}
     }
     func (m *PodResourceClaim) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4140,7 +4336,7 @@ var xxx_messageInfo_PodResourceClaim proto.InternalMessageInfo
     func (m *PodResourceClaimStatus) Reset()      { *m = PodResourceClaimStatus{} }
     func (*PodResourceClaimStatus) ProtoMessage() {}
     func (*PodResourceClaimStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{146}
    +	return fileDescriptor_6c07b07c062484ab, []int{153}
     }
     func (m *PodResourceClaimStatus) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4168,7 +4364,7 @@ var xxx_messageInfo_PodResourceClaimStatus proto.InternalMessageInfo
     func (m *PodSchedulingGate) Reset()      { *m = PodSchedulingGate{} }
     func (*PodSchedulingGate) ProtoMessage() {}
     func (*PodSchedulingGate) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{147}
    +	return fileDescriptor_6c07b07c062484ab, []int{154}
     }
     func (m *PodSchedulingGate) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4196,7 +4392,7 @@ var xxx_messageInfo_PodSchedulingGate proto.InternalMessageInfo
     func (m *PodSecurityContext) Reset()      { *m = PodSecurityContext{} }
     func (*PodSecurityContext) ProtoMessage() {}
     func (*PodSecurityContext) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{148}
    +	return fileDescriptor_6c07b07c062484ab, []int{155}
     }
     func (m *PodSecurityContext) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4224,7 +4420,7 @@ var xxx_messageInfo_PodSecurityContext proto.InternalMessageInfo
     func (m *PodSignature) Reset()      { *m = PodSignature{} }
     func (*PodSignature) ProtoMessage() {}
     func (*PodSignature) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{149}
    +	return fileDescriptor_6c07b07c062484ab, []int{156}
     }
     func (m *PodSignature) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4252,7 +4448,7 @@ var xxx_messageInfo_PodSignature proto.InternalMessageInfo
     func (m *PodSpec) Reset()      { *m = PodSpec{} }
     func (*PodSpec) ProtoMessage() {}
     func (*PodSpec) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{150}
    +	return fileDescriptor_6c07b07c062484ab, []int{157}
     }
     func (m *PodSpec) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4280,7 +4476,7 @@ var xxx_messageInfo_PodSpec proto.InternalMessageInfo
     func (m *PodStatus) Reset()      { *m = PodStatus{} }
     func (*PodStatus) ProtoMessage() {}
     func (*PodStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{151}
    +	return fileDescriptor_6c07b07c062484ab, []int{158}
     }
     func (m *PodStatus) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4308,7 +4504,7 @@ var xxx_messageInfo_PodStatus proto.InternalMessageInfo
     func (m *PodStatusResult) Reset()      { *m = PodStatusResult{} }
     func (*PodStatusResult) ProtoMessage() {}
     func (*PodStatusResult) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{152}
    +	return fileDescriptor_6c07b07c062484ab, []int{159}
     }
     func (m *PodStatusResult) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4336,7 +4532,7 @@ var xxx_messageInfo_PodStatusResult proto.InternalMessageInfo
     func (m *PodTemplate) Reset()      { *m = PodTemplate{} }
     func (*PodTemplate) ProtoMessage() {}
     func (*PodTemplate) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{153}
    +	return fileDescriptor_6c07b07c062484ab, []int{160}
     }
     func (m *PodTemplate) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4364,7 +4560,7 @@ var xxx_messageInfo_PodTemplate proto.InternalMessageInfo
     func (m *PodTemplateList) Reset()      { *m = PodTemplateList{} }
     func (*PodTemplateList) ProtoMessage() {}
     func (*PodTemplateList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{154}
    +	return fileDescriptor_6c07b07c062484ab, []int{161}
     }
     func (m *PodTemplateList) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4392,7 +4588,7 @@ var xxx_messageInfo_PodTemplateList proto.InternalMessageInfo
     func (m *PodTemplateSpec) Reset()      { *m = PodTemplateSpec{} }
     func (*PodTemplateSpec) ProtoMessage() {}
     func (*PodTemplateSpec) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{155}
    +	return fileDescriptor_6c07b07c062484ab, []int{162}
     }
     func (m *PodTemplateSpec) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4420,7 +4616,7 @@ var xxx_messageInfo_PodTemplateSpec proto.InternalMessageInfo
     func (m *PortStatus) Reset()      { *m = PortStatus{} }
     func (*PortStatus) ProtoMessage() {}
     func (*PortStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{156}
    +	return fileDescriptor_6c07b07c062484ab, []int{163}
     }
     func (m *PortStatus) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4448,7 +4644,7 @@ var xxx_messageInfo_PortStatus proto.InternalMessageInfo
     func (m *PortworxVolumeSource) Reset()      { *m = PortworxVolumeSource{} }
     func (*PortworxVolumeSource) ProtoMessage() {}
     func (*PortworxVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{157}
    +	return fileDescriptor_6c07b07c062484ab, []int{164}
     }
     func (m *PortworxVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4476,7 +4672,7 @@ var xxx_messageInfo_PortworxVolumeSource proto.InternalMessageInfo
     func (m *Preconditions) Reset()      { *m = Preconditions{} }
     func (*Preconditions) ProtoMessage() {}
     func (*Preconditions) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{158}
    +	return fileDescriptor_6c07b07c062484ab, []int{165}
     }
     func (m *Preconditions) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4504,7 +4700,7 @@ var xxx_messageInfo_Preconditions proto.InternalMessageInfo
     func (m *PreferAvoidPodsEntry) Reset()      { *m = PreferAvoidPodsEntry{} }
     func (*PreferAvoidPodsEntry) ProtoMessage() {}
     func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{159}
    +	return fileDescriptor_6c07b07c062484ab, []int{166}
     }
     func (m *PreferAvoidPodsEntry) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4532,7 +4728,7 @@ var xxx_messageInfo_PreferAvoidPodsEntry proto.InternalMessageInfo
     func (m *PreferredSchedulingTerm) Reset()      { *m = PreferredSchedulingTerm{} }
     func (*PreferredSchedulingTerm) ProtoMessage() {}
     func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{160}
    +	return fileDescriptor_6c07b07c062484ab, []int{167}
     }
     func (m *PreferredSchedulingTerm) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4560,7 +4756,7 @@ var xxx_messageInfo_PreferredSchedulingTerm proto.InternalMessageInfo
     func (m *Probe) Reset()      { *m = Probe{} }
     func (*Probe) ProtoMessage() {}
     func (*Probe) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{161}
    +	return fileDescriptor_6c07b07c062484ab, []int{168}
     }
     func (m *Probe) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4588,7 +4784,7 @@ var xxx_messageInfo_Probe proto.InternalMessageInfo
     func (m *ProbeHandler) Reset()      { *m = ProbeHandler{} }
     func (*ProbeHandler) ProtoMessage() {}
     func (*ProbeHandler) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{162}
    +	return fileDescriptor_6c07b07c062484ab, []int{169}
     }
     func (m *ProbeHandler) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4616,7 +4812,7 @@ var xxx_messageInfo_ProbeHandler proto.InternalMessageInfo
     func (m *ProjectedVolumeSource) Reset()      { *m = ProjectedVolumeSource{} }
     func (*ProjectedVolumeSource) ProtoMessage() {}
     func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{163}
    +	return fileDescriptor_6c07b07c062484ab, []int{170}
     }
     func (m *ProjectedVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4644,7 +4840,7 @@ var xxx_messageInfo_ProjectedVolumeSource proto.InternalMessageInfo
     func (m *QuobyteVolumeSource) Reset()      { *m = QuobyteVolumeSource{} }
     func (*QuobyteVolumeSource) ProtoMessage() {}
     func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{164}
    +	return fileDescriptor_6c07b07c062484ab, []int{171}
     }
     func (m *QuobyteVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4672,7 +4868,7 @@ var xxx_messageInfo_QuobyteVolumeSource proto.InternalMessageInfo
     func (m *RBDPersistentVolumeSource) Reset()      { *m = RBDPersistentVolumeSource{} }
     func (*RBDPersistentVolumeSource) ProtoMessage() {}
     func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{165}
    +	return fileDescriptor_6c07b07c062484ab, []int{172}
     }
     func (m *RBDPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4700,7 +4896,7 @@ var xxx_messageInfo_RBDPersistentVolumeSource proto.InternalMessageInfo
     func (m *RBDVolumeSource) Reset()      { *m = RBDVolumeSource{} }
     func (*RBDVolumeSource) ProtoMessage() {}
     func (*RBDVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{166}
    +	return fileDescriptor_6c07b07c062484ab, []int{173}
     }
     func (m *RBDVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4728,7 +4924,7 @@ var xxx_messageInfo_RBDVolumeSource proto.InternalMessageInfo
     func (m *RangeAllocation) Reset()      { *m = RangeAllocation{} }
     func (*RangeAllocation) ProtoMessage() {}
     func (*RangeAllocation) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{167}
    +	return fileDescriptor_6c07b07c062484ab, []int{174}
     }
     func (m *RangeAllocation) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4756,7 +4952,7 @@ var xxx_messageInfo_RangeAllocation proto.InternalMessageInfo
     func (m *ReplicationController) Reset()      { *m = ReplicationController{} }
     func (*ReplicationController) ProtoMessage() {}
     func (*ReplicationController) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{168}
    +	return fileDescriptor_6c07b07c062484ab, []int{175}
     }
     func (m *ReplicationController) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4784,7 +4980,7 @@ var xxx_messageInfo_ReplicationController proto.InternalMessageInfo
     func (m *ReplicationControllerCondition) Reset()      { *m = ReplicationControllerCondition{} }
     func (*ReplicationControllerCondition) ProtoMessage() {}
     func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{169}
    +	return fileDescriptor_6c07b07c062484ab, []int{176}
     }
     func (m *ReplicationControllerCondition) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4812,7 +5008,7 @@ var xxx_messageInfo_ReplicationControllerCondition proto.InternalMessageInfo
     func (m *ReplicationControllerList) Reset()      { *m = ReplicationControllerList{} }
     func (*ReplicationControllerList) ProtoMessage() {}
     func (*ReplicationControllerList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{170}
    +	return fileDescriptor_6c07b07c062484ab, []int{177}
     }
     func (m *ReplicationControllerList) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4840,7 +5036,7 @@ var xxx_messageInfo_ReplicationControllerList proto.InternalMessageInfo
     func (m *ReplicationControllerSpec) Reset()      { *m = ReplicationControllerSpec{} }
     func (*ReplicationControllerSpec) ProtoMessage() {}
     func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{171}
    +	return fileDescriptor_6c07b07c062484ab, []int{178}
     }
     func (m *ReplicationControllerSpec) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4868,7 +5064,7 @@ var xxx_messageInfo_ReplicationControllerSpec proto.InternalMessageInfo
     func (m *ReplicationControllerStatus) Reset()      { *m = ReplicationControllerStatus{} }
     func (*ReplicationControllerStatus) ProtoMessage() {}
     func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{172}
    +	return fileDescriptor_6c07b07c062484ab, []int{179}
     }
     func (m *ReplicationControllerStatus) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4896,7 +5092,7 @@ var xxx_messageInfo_ReplicationControllerStatus proto.InternalMessageInfo
     func (m *ResourceClaim) Reset()      { *m = ResourceClaim{} }
     func (*ResourceClaim) ProtoMessage() {}
     func (*ResourceClaim) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{173}
    +	return fileDescriptor_6c07b07c062484ab, []int{180}
     }
     func (m *ResourceClaim) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4924,7 +5120,7 @@ var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo
     func (m *ResourceFieldSelector) Reset()      { *m = ResourceFieldSelector{} }
     func (*ResourceFieldSelector) ProtoMessage() {}
     func (*ResourceFieldSelector) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{174}
    +	return fileDescriptor_6c07b07c062484ab, []int{181}
     }
     func (m *ResourceFieldSelector) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4952,7 +5148,7 @@ var xxx_messageInfo_ResourceFieldSelector proto.InternalMessageInfo
     func (m *ResourceHealth) Reset()      { *m = ResourceHealth{} }
     func (*ResourceHealth) ProtoMessage() {}
     func (*ResourceHealth) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{175}
    +	return fileDescriptor_6c07b07c062484ab, []int{182}
     }
     func (m *ResourceHealth) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4980,7 +5176,7 @@ var xxx_messageInfo_ResourceHealth proto.InternalMessageInfo
     func (m *ResourceQuota) Reset()      { *m = ResourceQuota{} }
     func (*ResourceQuota) ProtoMessage() {}
     func (*ResourceQuota) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{176}
    +	return fileDescriptor_6c07b07c062484ab, []int{183}
     }
     func (m *ResourceQuota) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5008,7 +5204,7 @@ var xxx_messageInfo_ResourceQuota proto.InternalMessageInfo
     func (m *ResourceQuotaList) Reset()      { *m = ResourceQuotaList{} }
     func (*ResourceQuotaList) ProtoMessage() {}
     func (*ResourceQuotaList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{177}
    +	return fileDescriptor_6c07b07c062484ab, []int{184}
     }
     func (m *ResourceQuotaList) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5036,7 +5232,7 @@ var xxx_messageInfo_ResourceQuotaList proto.InternalMessageInfo
     func (m *ResourceQuotaSpec) Reset()      { *m = ResourceQuotaSpec{} }
     func (*ResourceQuotaSpec) ProtoMessage() {}
     func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{178}
    +	return fileDescriptor_6c07b07c062484ab, []int{185}
     }
     func (m *ResourceQuotaSpec) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5064,7 +5260,7 @@ var xxx_messageInfo_ResourceQuotaSpec proto.InternalMessageInfo
     func (m *ResourceQuotaStatus) Reset()      { *m = ResourceQuotaStatus{} }
     func (*ResourceQuotaStatus) ProtoMessage() {}
     func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{179}
    +	return fileDescriptor_6c07b07c062484ab, []int{186}
     }
     func (m *ResourceQuotaStatus) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5092,7 +5288,7 @@ var xxx_messageInfo_ResourceQuotaStatus proto.InternalMessageInfo
     func (m *ResourceRequirements) Reset()      { *m = ResourceRequirements{} }
     func (*ResourceRequirements) ProtoMessage() {}
     func (*ResourceRequirements) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{180}
    +	return fileDescriptor_6c07b07c062484ab, []int{187}
     }
     func (m *ResourceRequirements) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5120,7 +5316,7 @@ var xxx_messageInfo_ResourceRequirements proto.InternalMessageInfo
     func (m *ResourceStatus) Reset()      { *m = ResourceStatus{} }
     func (*ResourceStatus) ProtoMessage() {}
     func (*ResourceStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{181}
    +	return fileDescriptor_6c07b07c062484ab, []int{188}
     }
     func (m *ResourceStatus) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5148,7 +5344,7 @@ var xxx_messageInfo_ResourceStatus proto.InternalMessageInfo
     func (m *SELinuxOptions) Reset()      { *m = SELinuxOptions{} }
     func (*SELinuxOptions) ProtoMessage() {}
     func (*SELinuxOptions) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{182}
    +	return fileDescriptor_6c07b07c062484ab, []int{189}
     }
     func (m *SELinuxOptions) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5176,7 +5372,7 @@ var xxx_messageInfo_SELinuxOptions proto.InternalMessageInfo
     func (m *ScaleIOPersistentVolumeSource) Reset()      { *m = ScaleIOPersistentVolumeSource{} }
     func (*ScaleIOPersistentVolumeSource) ProtoMessage() {}
     func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{183}
    +	return fileDescriptor_6c07b07c062484ab, []int{190}
     }
     func (m *ScaleIOPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5204,7 +5400,7 @@ var xxx_messageInfo_ScaleIOPersistentVolumeSource proto.InternalMessageInfo
     func (m *ScaleIOVolumeSource) Reset()      { *m = ScaleIOVolumeSource{} }
     func (*ScaleIOVolumeSource) ProtoMessage() {}
     func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{184}
    +	return fileDescriptor_6c07b07c062484ab, []int{191}
     }
     func (m *ScaleIOVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5232,7 +5428,7 @@ var xxx_messageInfo_ScaleIOVolumeSource proto.InternalMessageInfo
     func (m *ScopeSelector) Reset()      { *m = ScopeSelector{} }
     func (*ScopeSelector) ProtoMessage() {}
     func (*ScopeSelector) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{185}
    +	return fileDescriptor_6c07b07c062484ab, []int{192}
     }
     func (m *ScopeSelector) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5260,7 +5456,7 @@ var xxx_messageInfo_ScopeSelector proto.InternalMessageInfo
     func (m *ScopedResourceSelectorRequirement) Reset()      { *m = ScopedResourceSelectorRequirement{} }
     func (*ScopedResourceSelectorRequirement) ProtoMessage() {}
     func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{186}
    +	return fileDescriptor_6c07b07c062484ab, []int{193}
     }
     func (m *ScopedResourceSelectorRequirement) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5288,7 +5484,7 @@ var xxx_messageInfo_ScopedResourceSelectorRequirement proto.InternalMessageInfo
     func (m *SeccompProfile) Reset()      { *m = SeccompProfile{} }
     func (*SeccompProfile) ProtoMessage() {}
     func (*SeccompProfile) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{187}
    +	return fileDescriptor_6c07b07c062484ab, []int{194}
     }
     func (m *SeccompProfile) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5316,7 +5512,7 @@ var xxx_messageInfo_SeccompProfile proto.InternalMessageInfo
     func (m *Secret) Reset()      { *m = Secret{} }
     func (*Secret) ProtoMessage() {}
     func (*Secret) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{188}
    +	return fileDescriptor_6c07b07c062484ab, []int{195}
     }
     func (m *Secret) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5344,7 +5540,7 @@ var xxx_messageInfo_Secret proto.InternalMessageInfo
     func (m *SecretEnvSource) Reset()      { *m = SecretEnvSource{} }
     func (*SecretEnvSource) ProtoMessage() {}
     func (*SecretEnvSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{189}
    +	return fileDescriptor_6c07b07c062484ab, []int{196}
     }
     func (m *SecretEnvSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5372,7 +5568,7 @@ var xxx_messageInfo_SecretEnvSource proto.InternalMessageInfo
     func (m *SecretKeySelector) Reset()      { *m = SecretKeySelector{} }
     func (*SecretKeySelector) ProtoMessage() {}
     func (*SecretKeySelector) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{190}
    +	return fileDescriptor_6c07b07c062484ab, []int{197}
     }
     func (m *SecretKeySelector) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5400,7 +5596,7 @@ var xxx_messageInfo_SecretKeySelector proto.InternalMessageInfo
     func (m *SecretList) Reset()      { *m = SecretList{} }
     func (*SecretList) ProtoMessage() {}
     func (*SecretList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{191}
    +	return fileDescriptor_6c07b07c062484ab, []int{198}
     }
     func (m *SecretList) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5428,7 +5624,7 @@ var xxx_messageInfo_SecretList proto.InternalMessageInfo
     func (m *SecretProjection) Reset()      { *m = SecretProjection{} }
     func (*SecretProjection) ProtoMessage() {}
     func (*SecretProjection) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{192}
    +	return fileDescriptor_6c07b07c062484ab, []int{199}
     }
     func (m *SecretProjection) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5456,7 +5652,7 @@ var xxx_messageInfo_SecretProjection proto.InternalMessageInfo
     func (m *SecretReference) Reset()      { *m = SecretReference{} }
     func (*SecretReference) ProtoMessage() {}
     func (*SecretReference) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{193}
    +	return fileDescriptor_6c07b07c062484ab, []int{200}
     }
     func (m *SecretReference) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5484,7 +5680,7 @@ var xxx_messageInfo_SecretReference proto.InternalMessageInfo
     func (m *SecretVolumeSource) Reset()      { *m = SecretVolumeSource{} }
     func (*SecretVolumeSource) ProtoMessage() {}
     func (*SecretVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{194}
    +	return fileDescriptor_6c07b07c062484ab, []int{201}
     }
     func (m *SecretVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5512,7 +5708,7 @@ var xxx_messageInfo_SecretVolumeSource proto.InternalMessageInfo
     func (m *SecurityContext) Reset()      { *m = SecurityContext{} }
     func (*SecurityContext) ProtoMessage() {}
     func (*SecurityContext) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{195}
    +	return fileDescriptor_6c07b07c062484ab, []int{202}
     }
     func (m *SecurityContext) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5540,7 +5736,7 @@ var xxx_messageInfo_SecurityContext proto.InternalMessageInfo
     func (m *SerializedReference) Reset()      { *m = SerializedReference{} }
     func (*SerializedReference) ProtoMessage() {}
     func (*SerializedReference) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{196}
    +	return fileDescriptor_6c07b07c062484ab, []int{203}
     }
     func (m *SerializedReference) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5568,7 +5764,7 @@ var xxx_messageInfo_SerializedReference proto.InternalMessageInfo
     func (m *Service) Reset()      { *m = Service{} }
     func (*Service) ProtoMessage() {}
     func (*Service) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{197}
    +	return fileDescriptor_6c07b07c062484ab, []int{204}
     }
     func (m *Service) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5596,7 +5792,7 @@ var xxx_messageInfo_Service proto.InternalMessageInfo
     func (m *ServiceAccount) Reset()      { *m = ServiceAccount{} }
     func (*ServiceAccount) ProtoMessage() {}
     func (*ServiceAccount) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{198}
    +	return fileDescriptor_6c07b07c062484ab, []int{205}
     }
     func (m *ServiceAccount) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5624,7 +5820,7 @@ var xxx_messageInfo_ServiceAccount proto.InternalMessageInfo
     func (m *ServiceAccountList) Reset()      { *m = ServiceAccountList{} }
     func (*ServiceAccountList) ProtoMessage() {}
     func (*ServiceAccountList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{199}
    +	return fileDescriptor_6c07b07c062484ab, []int{206}
     }
     func (m *ServiceAccountList) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5652,7 +5848,7 @@ var xxx_messageInfo_ServiceAccountList proto.InternalMessageInfo
     func (m *ServiceAccountTokenProjection) Reset()      { *m = ServiceAccountTokenProjection{} }
     func (*ServiceAccountTokenProjection) ProtoMessage() {}
     func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{200}
    +	return fileDescriptor_6c07b07c062484ab, []int{207}
     }
     func (m *ServiceAccountTokenProjection) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5680,7 +5876,7 @@ var xxx_messageInfo_ServiceAccountTokenProjection proto.InternalMessageInfo
     func (m *ServiceList) Reset()      { *m = ServiceList{} }
     func (*ServiceList) ProtoMessage() {}
     func (*ServiceList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{201}
    +	return fileDescriptor_6c07b07c062484ab, []int{208}
     }
     func (m *ServiceList) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5708,7 +5904,7 @@ var xxx_messageInfo_ServiceList proto.InternalMessageInfo
     func (m *ServicePort) Reset()      { *m = ServicePort{} }
     func (*ServicePort) ProtoMessage() {}
     func (*ServicePort) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{202}
    +	return fileDescriptor_6c07b07c062484ab, []int{209}
     }
     func (m *ServicePort) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5736,7 +5932,7 @@ var xxx_messageInfo_ServicePort proto.InternalMessageInfo
     func (m *ServiceProxyOptions) Reset()      { *m = ServiceProxyOptions{} }
     func (*ServiceProxyOptions) ProtoMessage() {}
     func (*ServiceProxyOptions) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{203}
    +	return fileDescriptor_6c07b07c062484ab, []int{210}
     }
     func (m *ServiceProxyOptions) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5764,7 +5960,7 @@ var xxx_messageInfo_ServiceProxyOptions proto.InternalMessageInfo
     func (m *ServiceSpec) Reset()      { *m = ServiceSpec{} }
     func (*ServiceSpec) ProtoMessage() {}
     func (*ServiceSpec) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{204}
    +	return fileDescriptor_6c07b07c062484ab, []int{211}
     }
     func (m *ServiceSpec) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5792,7 +5988,7 @@ var xxx_messageInfo_ServiceSpec proto.InternalMessageInfo
     func (m *ServiceStatus) Reset()      { *m = ServiceStatus{} }
     func (*ServiceStatus) ProtoMessage() {}
     func (*ServiceStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{205}
    +	return fileDescriptor_6c07b07c062484ab, []int{212}
     }
     func (m *ServiceStatus) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5820,7 +6016,7 @@ var xxx_messageInfo_ServiceStatus proto.InternalMessageInfo
     func (m *SessionAffinityConfig) Reset()      { *m = SessionAffinityConfig{} }
     func (*SessionAffinityConfig) ProtoMessage() {}
     func (*SessionAffinityConfig) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{206}
    +	return fileDescriptor_6c07b07c062484ab, []int{213}
     }
     func (m *SessionAffinityConfig) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5848,7 +6044,7 @@ var xxx_messageInfo_SessionAffinityConfig proto.InternalMessageInfo
     func (m *SleepAction) Reset()      { *m = SleepAction{} }
     func (*SleepAction) ProtoMessage() {}
     func (*SleepAction) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{207}
    +	return fileDescriptor_6c07b07c062484ab, []int{214}
     }
     func (m *SleepAction) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5876,7 +6072,7 @@ var xxx_messageInfo_SleepAction proto.InternalMessageInfo
     func (m *StorageOSPersistentVolumeSource) Reset()      { *m = StorageOSPersistentVolumeSource{} }
     func (*StorageOSPersistentVolumeSource) ProtoMessage() {}
     func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{208}
    +	return fileDescriptor_6c07b07c062484ab, []int{215}
     }
     func (m *StorageOSPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5904,7 +6100,7 @@ var xxx_messageInfo_StorageOSPersistentVolumeSource proto.InternalMessageInfo
     func (m *StorageOSVolumeSource) Reset()      { *m = StorageOSVolumeSource{} }
     func (*StorageOSVolumeSource) ProtoMessage() {}
     func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{209}
    +	return fileDescriptor_6c07b07c062484ab, []int{216}
     }
     func (m *StorageOSVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5932,7 +6128,7 @@ var xxx_messageInfo_StorageOSVolumeSource proto.InternalMessageInfo
     func (m *Sysctl) Reset()      { *m = Sysctl{} }
     func (*Sysctl) ProtoMessage() {}
     func (*Sysctl) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{210}
    +	return fileDescriptor_6c07b07c062484ab, []int{217}
     }
     func (m *Sysctl) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5960,7 +6156,7 @@ var xxx_messageInfo_Sysctl proto.InternalMessageInfo
     func (m *TCPSocketAction) Reset()      { *m = TCPSocketAction{} }
     func (*TCPSocketAction) ProtoMessage() {}
     func (*TCPSocketAction) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{211}
    +	return fileDescriptor_6c07b07c062484ab, []int{218}
     }
     func (m *TCPSocketAction) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5988,7 +6184,7 @@ var xxx_messageInfo_TCPSocketAction proto.InternalMessageInfo
     func (m *Taint) Reset()      { *m = Taint{} }
     func (*Taint) ProtoMessage() {}
     func (*Taint) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{212}
    +	return fileDescriptor_6c07b07c062484ab, []int{219}
     }
     func (m *Taint) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -6016,7 +6212,7 @@ var xxx_messageInfo_Taint proto.InternalMessageInfo
     func (m *Toleration) Reset()      { *m = Toleration{} }
     func (*Toleration) ProtoMessage() {}
     func (*Toleration) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{213}
    +	return fileDescriptor_6c07b07c062484ab, []int{220}
     }
     func (m *Toleration) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -6044,7 +6240,7 @@ var xxx_messageInfo_Toleration proto.InternalMessageInfo
     func (m *TopologySelectorLabelRequirement) Reset()      { *m = TopologySelectorLabelRequirement{} }
     func (*TopologySelectorLabelRequirement) ProtoMessage() {}
     func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{214}
    +	return fileDescriptor_6c07b07c062484ab, []int{221}
     }
     func (m *TopologySelectorLabelRequirement) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -6072,7 +6268,7 @@ var xxx_messageInfo_TopologySelectorLabelRequirement proto.InternalMessageInfo
     func (m *TopologySelectorTerm) Reset()      { *m = TopologySelectorTerm{} }
     func (*TopologySelectorTerm) ProtoMessage() {}
     func (*TopologySelectorTerm) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{215}
    +	return fileDescriptor_6c07b07c062484ab, []int{222}
     }
     func (m *TopologySelectorTerm) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -6100,7 +6296,7 @@ var xxx_messageInfo_TopologySelectorTerm proto.InternalMessageInfo
     func (m *TopologySpreadConstraint) Reset()      { *m = TopologySpreadConstraint{} }
     func (*TopologySpreadConstraint) ProtoMessage() {}
     func (*TopologySpreadConstraint) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{216}
    +	return fileDescriptor_6c07b07c062484ab, []int{223}
     }
     func (m *TopologySpreadConstraint) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -6128,7 +6324,7 @@ var xxx_messageInfo_TopologySpreadConstraint proto.InternalMessageInfo
     func (m *TypedLocalObjectReference) Reset()      { *m = TypedLocalObjectReference{} }
     func (*TypedLocalObjectReference) ProtoMessage() {}
     func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{217}
    +	return fileDescriptor_6c07b07c062484ab, []int{224}
     }
     func (m *TypedLocalObjectReference) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -6156,7 +6352,7 @@ var xxx_messageInfo_TypedLocalObjectReference proto.InternalMessageInfo
     func (m *TypedObjectReference) Reset()      { *m = TypedObjectReference{} }
     func (*TypedObjectReference) ProtoMessage() {}
     func (*TypedObjectReference) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{218}
    +	return fileDescriptor_6c07b07c062484ab, []int{225}
     }
     func (m *TypedObjectReference) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -6184,7 +6380,7 @@ var xxx_messageInfo_TypedObjectReference proto.InternalMessageInfo
     func (m *Volume) Reset()      { *m = Volume{} }
     func (*Volume) ProtoMessage() {}
     func (*Volume) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{219}
    +	return fileDescriptor_6c07b07c062484ab, []int{226}
     }
     func (m *Volume) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -6212,7 +6408,7 @@ var xxx_messageInfo_Volume proto.InternalMessageInfo
     func (m *VolumeDevice) Reset()      { *m = VolumeDevice{} }
     func (*VolumeDevice) ProtoMessage() {}
     func (*VolumeDevice) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{220}
    +	return fileDescriptor_6c07b07c062484ab, []int{227}
     }
     func (m *VolumeDevice) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -6240,7 +6436,7 @@ var xxx_messageInfo_VolumeDevice proto.InternalMessageInfo
     func (m *VolumeMount) Reset()      { *m = VolumeMount{} }
     func (*VolumeMount) ProtoMessage() {}
     func (*VolumeMount) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{221}
    +	return fileDescriptor_6c07b07c062484ab, []int{228}
     }
     func (m *VolumeMount) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -6268,7 +6464,7 @@ var xxx_messageInfo_VolumeMount proto.InternalMessageInfo
     func (m *VolumeMountStatus) Reset()      { *m = VolumeMountStatus{} }
     func (*VolumeMountStatus) ProtoMessage() {}
     func (*VolumeMountStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{222}
    +	return fileDescriptor_6c07b07c062484ab, []int{229}
     }
     func (m *VolumeMountStatus) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -6296,7 +6492,7 @@ var xxx_messageInfo_VolumeMountStatus proto.InternalMessageInfo
     func (m *VolumeNodeAffinity) Reset()      { *m = VolumeNodeAffinity{} }
     func (*VolumeNodeAffinity) ProtoMessage() {}
     func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{223}
    +	return fileDescriptor_6c07b07c062484ab, []int{230}
     }
     func (m *VolumeNodeAffinity) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -6324,7 +6520,7 @@ var xxx_messageInfo_VolumeNodeAffinity proto.InternalMessageInfo
     func (m *VolumeProjection) Reset()      { *m = VolumeProjection{} }
     func (*VolumeProjection) ProtoMessage() {}
     func (*VolumeProjection) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{224}
    +	return fileDescriptor_6c07b07c062484ab, []int{231}
     }
     func (m *VolumeProjection) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -6352,7 +6548,7 @@ var xxx_messageInfo_VolumeProjection proto.InternalMessageInfo
     func (m *VolumeResourceRequirements) Reset()      { *m = VolumeResourceRequirements{} }
     func (*VolumeResourceRequirements) ProtoMessage() {}
     func (*VolumeResourceRequirements) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{225}
    +	return fileDescriptor_6c07b07c062484ab, []int{232}
     }
     func (m *VolumeResourceRequirements) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -6380,7 +6576,7 @@ var xxx_messageInfo_VolumeResourceRequirements proto.InternalMessageInfo
     func (m *VolumeSource) Reset()      { *m = VolumeSource{} }
     func (*VolumeSource) ProtoMessage() {}
     func (*VolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{226}
    +	return fileDescriptor_6c07b07c062484ab, []int{233}
     }
     func (m *VolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -6408,7 +6604,7 @@ var xxx_messageInfo_VolumeSource proto.InternalMessageInfo
     func (m *VsphereVirtualDiskVolumeSource) Reset()      { *m = VsphereVirtualDiskVolumeSource{} }
     func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {}
     func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{227}
    +	return fileDescriptor_6c07b07c062484ab, []int{234}
     }
     func (m *VsphereVirtualDiskVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -6436,7 +6632,7 @@ var xxx_messageInfo_VsphereVirtualDiskVolumeSource proto.InternalMessageInfo
     func (m *WeightedPodAffinityTerm) Reset()      { *m = WeightedPodAffinityTerm{} }
     func (*WeightedPodAffinityTerm) ProtoMessage() {}
     func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{228}
    +	return fileDescriptor_6c07b07c062484ab, []int{235}
     }
     func (m *WeightedPodAffinityTerm) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -6464,7 +6660,7 @@ var xxx_messageInfo_WeightedPodAffinityTerm proto.InternalMessageInfo
     func (m *WindowsSecurityContextOptions) Reset()      { *m = WindowsSecurityContextOptions{} }
     func (*WindowsSecurityContextOptions) ProtoMessage() {}
     func (*WindowsSecurityContextOptions) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{229}
    +	return fileDescriptor_6c07b07c062484ab, []int{236}
     }
     func (m *WindowsSecurityContextOptions) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -6523,9 +6719,12 @@ func init() {
     	proto.RegisterType((*ConfigMapProjection)(nil), "k8s.io.api.core.v1.ConfigMapProjection")
     	proto.RegisterType((*ConfigMapVolumeSource)(nil), "k8s.io.api.core.v1.ConfigMapVolumeSource")
     	proto.RegisterType((*Container)(nil), "k8s.io.api.core.v1.Container")
    +	proto.RegisterType((*ContainerExtendedResourceRequest)(nil), "k8s.io.api.core.v1.ContainerExtendedResourceRequest")
     	proto.RegisterType((*ContainerImage)(nil), "k8s.io.api.core.v1.ContainerImage")
     	proto.RegisterType((*ContainerPort)(nil), "k8s.io.api.core.v1.ContainerPort")
     	proto.RegisterType((*ContainerResizePolicy)(nil), "k8s.io.api.core.v1.ContainerResizePolicy")
    +	proto.RegisterType((*ContainerRestartRule)(nil), "k8s.io.api.core.v1.ContainerRestartRule")
    +	proto.RegisterType((*ContainerRestartRuleOnExitCodes)(nil), "k8s.io.api.core.v1.ContainerRestartRuleOnExitCodes")
     	proto.RegisterType((*ContainerState)(nil), "k8s.io.api.core.v1.ContainerState")
     	proto.RegisterType((*ContainerStateRunning)(nil), "k8s.io.api.core.v1.ContainerStateRunning")
     	proto.RegisterType((*ContainerStateTerminated)(nil), "k8s.io.api.core.v1.ContainerStateTerminated")
    @@ -6555,6 +6754,7 @@ func init() {
     	proto.RegisterType((*EventSource)(nil), "k8s.io.api.core.v1.EventSource")
     	proto.RegisterType((*ExecAction)(nil), "k8s.io.api.core.v1.ExecAction")
     	proto.RegisterType((*FCVolumeSource)(nil), "k8s.io.api.core.v1.FCVolumeSource")
    +	proto.RegisterType((*FileKeySelector)(nil), "k8s.io.api.core.v1.FileKeySelector")
     	proto.RegisterType((*FlexPersistentVolumeSource)(nil), "k8s.io.api.core.v1.FlexPersistentVolumeSource")
     	proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.FlexPersistentVolumeSource.OptionsEntry")
     	proto.RegisterType((*FlexVolumeSource)(nil), "k8s.io.api.core.v1.FlexVolumeSource")
    @@ -6617,6 +6817,7 @@ func init() {
     	proto.RegisterType((*NodeStatus)(nil), "k8s.io.api.core.v1.NodeStatus")
     	proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.NodeStatus.AllocatableEntry")
     	proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.NodeStatus.CapacityEntry")
    +	proto.RegisterType((*NodeSwapStatus)(nil), "k8s.io.api.core.v1.NodeSwapStatus")
     	proto.RegisterType((*NodeSystemInfo)(nil), "k8s.io.api.core.v1.NodeSystemInfo")
     	proto.RegisterType((*ObjectFieldSelector)(nil), "k8s.io.api.core.v1.ObjectFieldSelector")
     	proto.RegisterType((*ObjectReference)(nil), "k8s.io.api.core.v1.ObjectReference")
    @@ -6642,10 +6843,12 @@ func init() {
     	proto.RegisterType((*PodAffinityTerm)(nil), "k8s.io.api.core.v1.PodAffinityTerm")
     	proto.RegisterType((*PodAntiAffinity)(nil), "k8s.io.api.core.v1.PodAntiAffinity")
     	proto.RegisterType((*PodAttachOptions)(nil), "k8s.io.api.core.v1.PodAttachOptions")
    +	proto.RegisterType((*PodCertificateProjection)(nil), "k8s.io.api.core.v1.PodCertificateProjection")
     	proto.RegisterType((*PodCondition)(nil), "k8s.io.api.core.v1.PodCondition")
     	proto.RegisterType((*PodDNSConfig)(nil), "k8s.io.api.core.v1.PodDNSConfig")
     	proto.RegisterType((*PodDNSConfigOption)(nil), "k8s.io.api.core.v1.PodDNSConfigOption")
     	proto.RegisterType((*PodExecOptions)(nil), "k8s.io.api.core.v1.PodExecOptions")
    +	proto.RegisterType((*PodExtendedResourceClaimStatus)(nil), "k8s.io.api.core.v1.PodExtendedResourceClaimStatus")
     	proto.RegisterType((*PodIP)(nil), "k8s.io.api.core.v1.PodIP")
     	proto.RegisterType((*PodList)(nil), "k8s.io.api.core.v1.PodList")
     	proto.RegisterType((*PodLogOptions)(nil), "k8s.io.api.core.v1.PodLogOptions")
    @@ -6758,1015 +6961,1049 @@ func init() {
     }
     
     var fileDescriptor_6c07b07c062484ab = []byte{
    -	// 16114 bytes of a gzipped FileDescriptorProto
    -	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x69, 0x90, 0x64, 0xd9,
    -	0x59, 0x28, 0xa6, 0x9b, 0x59, 0xeb, 0x57, 0xfb, 0xa9, 0x5e, 0xaa, 0x6b, 0xba, 0x3b, 0x7b, 0xee,
    -	0xcc, 0xf4, 0xf4, 0x6c, 0xd5, 0xea, 0x59, 0x34, 0xad, 0x99, 0xd1, 0x30, 0xb5, 0x76, 0xd7, 0x74,
    -	0x57, 0x75, 0xce, 0xc9, 0xaa, 0x6e, 0x69, 0x34, 0x12, 0xba, 0x9d, 0x79, 0xaa, 0xea, 0xaa, 0x32,
    -	0xef, 0xcd, 0xb9, 0xf7, 0x66, 0x75, 0x57, 0x5b, 0x04, 0x20, 0x8c, 0x40, 0x02, 0x47, 0x28, 0x08,
    -	0x6c, 0x1c, 0x82, 0xe0, 0x07, 0x60, 0x16, 0xcb, 0x60, 0x64, 0x61, 0xc0, 0x88, 0xcd, 0x36, 0x8e,
    -	0x00, 0xff, 0xc0, 0x98, 0x08, 0x4b, 0x84, 0x09, 0x17, 0x56, 0xe1, 0x08, 0x82, 0x1f, 0x06, 0x82,
    -	0xf7, 0x7e, 0xbc, 0x57, 0xc1, 0x7b, 0xbc, 0x38, 0xeb, 0x3d, 0xe7, 0x2e, 0x99, 0x59, 0x3d, 0xdd,
    -	0xa5, 0x91, 0x62, 0xfe, 0x65, 0x9e, 0xef, 0x3b, 0xdf, 0x39, 0xf7, 0xac, 0xdf, 0xf9, 0x56, 0xb0,
    -	0xb7, 0x2f, 0x87, 0x33, 0xae, 0x7f, 0xd1, 0x69, 0xba, 0x17, 0xab, 0x7e, 0x40, 0x2e, 0xee, 0x5c,
    -	0xba, 0xb8, 0x49, 0x3c, 0x12, 0x38, 0x11, 0xa9, 0xcd, 0x34, 0x03, 0x3f, 0xf2, 0x11, 0xe2, 0x38,
    -	0x33, 0x4e, 0xd3, 0x9d, 0xa1, 0x38, 0x33, 0x3b, 0x97, 0xa6, 0x9f, 0xdb, 0x74, 0xa3, 0xad, 0xd6,
    -	0xed, 0x99, 0xaa, 0xdf, 0xb8, 0xb8, 0xe9, 0x6f, 0xfa, 0x17, 0x19, 0xea, 0xed, 0xd6, 0x06, 0xfb,
    -	0xc7, 0xfe, 0xb0, 0x5f, 0x9c, 0xc4, 0xf4, 0x8b, 0x71, 0x33, 0x0d, 0xa7, 0xba, 0xe5, 0x7a, 0x24,
    -	0xd8, 0xbd, 0xd8, 0xdc, 0xde, 0x64, 0xed, 0x06, 0x24, 0xf4, 0x5b, 0x41, 0x95, 0x24, 0x1b, 0x6e,
    -	0x5b, 0x2b, 0xbc, 0xd8, 0x20, 0x91, 0x93, 0xd1, 0xdd, 0xe9, 0x8b, 0x79, 0xb5, 0x82, 0x96, 0x17,
    -	0xb9, 0x8d, 0x74, 0x33, 0x1f, 0xe9, 0x54, 0x21, 0xac, 0x6e, 0x91, 0x86, 0x93, 0xaa, 0xf7, 0x42,
    -	0x5e, 0xbd, 0x56, 0xe4, 0xd6, 0x2f, 0xba, 0x5e, 0x14, 0x46, 0x41, 0xb2, 0x92, 0xfd, 0x2d, 0x0b,
    -	0xce, 0xcd, 0xde, 0xaa, 0x2c, 0xd6, 0x9d, 0x30, 0x72, 0xab, 0x73, 0x75, 0xbf, 0xba, 0x5d, 0x89,
    -	0xfc, 0x80, 0xdc, 0xf4, 0xeb, 0xad, 0x06, 0xa9, 0xb0, 0x81, 0x40, 0xcf, 0xc2, 0xc0, 0x0e, 0xfb,
    -	0xbf, 0xbc, 0x30, 0x65, 0x9d, 0xb3, 0x2e, 0x0c, 0xce, 0x8d, 0xff, 0xe9, 0x5e, 0xe9, 0x43, 0xfb,
    -	0x7b, 0xa5, 0x81, 0x9b, 0xa2, 0x1c, 0x2b, 0x0c, 0x74, 0x1e, 0xfa, 0x36, 0xc2, 0xb5, 0xdd, 0x26,
    -	0x99, 0x2a, 0x30, 0xdc, 0x51, 0x81, 0xdb, 0xb7, 0x54, 0xa1, 0xa5, 0x58, 0x40, 0xd1, 0x45, 0x18,
    -	0x6c, 0x3a, 0x41, 0xe4, 0x46, 0xae, 0xef, 0x4d, 0x15, 0xcf, 0x59, 0x17, 0x7a, 0xe7, 0x26, 0x04,
    -	0xea, 0x60, 0x59, 0x02, 0x70, 0x8c, 0x43, 0xbb, 0x11, 0x10, 0xa7, 0x76, 0xc3, 0xab, 0xef, 0x4e,
    -	0xf5, 0x9c, 0xb3, 0x2e, 0x0c, 0xc4, 0xdd, 0xc0, 0xa2, 0x1c, 0x2b, 0x0c, 0xfb, 0x2b, 0x05, 0x18,
    -	0x98, 0xdd, 0xd8, 0x70, 0x3d, 0x37, 0xda, 0x45, 0x37, 0x61, 0xd8, 0xf3, 0x6b, 0x44, 0xfe, 0x67,
    -	0x5f, 0x31, 0xf4, 0xfc, 0xb9, 0x99, 0xf4, 0x52, 0x9a, 0x59, 0xd5, 0xf0, 0xe6, 0xc6, 0xf7, 0xf7,
    -	0x4a, 0xc3, 0x7a, 0x09, 0x36, 0xe8, 0x20, 0x0c, 0x43, 0x4d, 0xbf, 0xa6, 0xc8, 0x16, 0x18, 0xd9,
    -	0x52, 0x16, 0xd9, 0x72, 0x8c, 0x36, 0x37, 0xb6, 0xbf, 0x57, 0x1a, 0xd2, 0x0a, 0xb0, 0x4e, 0x04,
    -	0xdd, 0x86, 0x31, 0xfa, 0xd7, 0x8b, 0x5c, 0x45, 0xb7, 0xc8, 0xe8, 0x3e, 0x96, 0x47, 0x57, 0x43,
    -	0x9d, 0x9b, 0xdc, 0xdf, 0x2b, 0x8d, 0x25, 0x0a, 0x71, 0x92, 0xa0, 0xfd, 0x93, 0x16, 0x8c, 0xcd,
    -	0x36, 0x9b, 0xb3, 0x41, 0xc3, 0x0f, 0xca, 0x81, 0xbf, 0xe1, 0xd6, 0x09, 0x7a, 0x19, 0x7a, 0x22,
    -	0x3a, 0x6b, 0x7c, 0x86, 0x1f, 0x13, 0x43, 0xdb, 0x43, 0xe7, 0xea, 0x60, 0xaf, 0x34, 0x99, 0x40,
    -	0x67, 0x53, 0xc9, 0x2a, 0xa0, 0x37, 0x60, 0xbc, 0xee, 0x57, 0x9d, 0xfa, 0x96, 0x1f, 0x46, 0x02,
    -	0x2a, 0xa6, 0xfe, 0xd8, 0xfe, 0x5e, 0x69, 0xfc, 0x7a, 0x02, 0x86, 0x53, 0xd8, 0xf6, 0x3d, 0x18,
    -	0x9d, 0x8d, 0x22, 0xa7, 0xba, 0x45, 0x6a, 0x7c, 0x41, 0xa1, 0x17, 0xa1, 0xc7, 0x73, 0x1a, 0xb2,
    -	0x33, 0xe7, 0x64, 0x67, 0x56, 0x9d, 0x06, 0xed, 0xcc, 0xf8, 0xba, 0xe7, 0xbe, 0xdb, 0x12, 0x8b,
    -	0x94, 0x96, 0x61, 0x86, 0x8d, 0x9e, 0x07, 0xa8, 0x91, 0x1d, 0xb7, 0x4a, 0xca, 0x4e, 0xb4, 0x25,
    -	0xfa, 0x80, 0x44, 0x5d, 0x58, 0x50, 0x10, 0xac, 0x61, 0xd9, 0x77, 0x61, 0x70, 0x76, 0xc7, 0x77,
    -	0x6b, 0x65, 0xbf, 0x16, 0xa2, 0x6d, 0x18, 0x6b, 0x06, 0x64, 0x83, 0x04, 0xaa, 0x68, 0xca, 0x3a,
    -	0x57, 0xbc, 0x30, 0xf4, 0xfc, 0x85, 0xcc, 0xb1, 0x37, 0x51, 0x17, 0xbd, 0x28, 0xd8, 0x9d, 0x3b,
    -	0x29, 0xda, 0x1b, 0x4b, 0x40, 0x71, 0x92, 0xb2, 0xfd, 0x27, 0x05, 0x38, 0x3e, 0x7b, 0xaf, 0x15,
    -	0x90, 0x05, 0x37, 0xdc, 0x4e, 0x6e, 0xb8, 0x9a, 0x1b, 0x6e, 0xaf, 0xc6, 0x23, 0xa0, 0x56, 0xfa,
    -	0x82, 0x28, 0xc7, 0x0a, 0x03, 0x3d, 0x07, 0xfd, 0xf4, 0xf7, 0x3a, 0x5e, 0x16, 0x9f, 0x3c, 0x29,
    -	0x90, 0x87, 0x16, 0x9c, 0xc8, 0x59, 0xe0, 0x20, 0x2c, 0x71, 0xd0, 0x0a, 0x0c, 0x55, 0xd9, 0xf9,
    -	0xb0, 0xb9, 0xe2, 0xd7, 0x08, 0x5b, 0x5b, 0x83, 0x73, 0xcf, 0x50, 0xf4, 0xf9, 0xb8, 0xf8, 0x60,
    -	0xaf, 0x34, 0xc5, 0xfb, 0x26, 0x48, 0x68, 0x30, 0xac, 0xd7, 0x47, 0xb6, 0xda, 0xee, 0x3d, 0x8c,
    -	0x12, 0x64, 0x6c, 0xf5, 0x0b, 0xda, 0xce, 0xed, 0x65, 0x3b, 0x77, 0x38, 0x7b, 0xd7, 0xa2, 0x4b,
    -	0xd0, 0xb3, 0xed, 0x7a, 0xb5, 0xa9, 0x3e, 0x46, 0xeb, 0x0c, 0x9d, 0xf3, 0x6b, 0xae, 0x57, 0x3b,
    -	0xd8, 0x2b, 0x4d, 0x18, 0xdd, 0xa1, 0x85, 0x98, 0xa1, 0xda, 0xff, 0xc6, 0x82, 0x12, 0x83, 0x2d,
    -	0xb9, 0x75, 0x52, 0x26, 0x41, 0xe8, 0x86, 0x11, 0xf1, 0x22, 0x63, 0x40, 0x9f, 0x07, 0x08, 0x49,
    -	0x35, 0x20, 0x91, 0x36, 0xa4, 0x6a, 0x61, 0x54, 0x14, 0x04, 0x6b, 0x58, 0xf4, 0x7c, 0x0a, 0xb7,
    -	0x9c, 0x80, 0xad, 0x2f, 0x31, 0xb0, 0xea, 0x7c, 0xaa, 0x48, 0x00, 0x8e, 0x71, 0x8c, 0xf3, 0xa9,
    -	0xd8, 0xe9, 0x7c, 0x42, 0x1f, 0x83, 0xb1, 0xb8, 0xb1, 0xb0, 0xe9, 0x54, 0xe5, 0x00, 0xb2, 0x1d,
    -	0x5c, 0x31, 0x41, 0x38, 0x89, 0x6b, 0xff, 0xb7, 0x96, 0x58, 0x3c, 0xf4, 0xab, 0xdf, 0xe7, 0xdf,
    -	0x6a, 0xff, 0xae, 0x05, 0xfd, 0x73, 0xae, 0x57, 0x73, 0xbd, 0x4d, 0xf4, 0x19, 0x18, 0xa0, 0x57,
    -	0x65, 0xcd, 0x89, 0x1c, 0x71, 0x0c, 0x7f, 0x58, 0xdb, 0x5b, 0xea, 0xe6, 0x9a, 0x69, 0x6e, 0x6f,
    -	0xd2, 0x82, 0x70, 0x86, 0x62, 0xd3, 0xdd, 0x76, 0xe3, 0xf6, 0x67, 0x49, 0x35, 0x5a, 0x21, 0x91,
    -	0x13, 0x7f, 0x4e, 0x5c, 0x86, 0x15, 0x55, 0x74, 0x0d, 0xfa, 0x22, 0x27, 0xd8, 0x24, 0x91, 0x38,
    -	0x8f, 0x33, 0xcf, 0x4d, 0x5e, 0x13, 0xd3, 0x1d, 0x49, 0xbc, 0x2a, 0x89, 0x6f, 0xa9, 0x35, 0x56,
    -	0x15, 0x0b, 0x12, 0xf6, 0x7f, 0xe8, 0x87, 0x53, 0xf3, 0x95, 0xe5, 0x9c, 0x75, 0x75, 0x1e, 0xfa,
    -	0x6a, 0x81, 0xbb, 0x43, 0x02, 0x31, 0xce, 0x8a, 0xca, 0x02, 0x2b, 0xc5, 0x02, 0x8a, 0x2e, 0xc3,
    -	0x30, 0xbf, 0x1f, 0xaf, 0x3a, 0x5e, 0x2d, 0x3e, 0x1e, 0x05, 0xf6, 0xf0, 0x4d, 0x0d, 0x86, 0x0d,
    -	0xcc, 0x43, 0x2e, 0xaa, 0xf3, 0x89, 0xcd, 0x98, 0x77, 0xf7, 0x7e, 0xd1, 0x82, 0x71, 0xde, 0xcc,
    -	0x6c, 0x14, 0x05, 0xee, 0xed, 0x56, 0x44, 0xc2, 0xa9, 0x5e, 0x76, 0xd2, 0xcd, 0x67, 0x8d, 0x56,
    -	0xee, 0x08, 0xcc, 0xdc, 0x4c, 0x50, 0xe1, 0x87, 0xe0, 0x94, 0x68, 0x77, 0x3c, 0x09, 0xc6, 0xa9,
    -	0x66, 0xd1, 0x8f, 0x58, 0x30, 0x5d, 0xf5, 0xbd, 0x28, 0xf0, 0xeb, 0x75, 0x12, 0x94, 0x5b, 0xb7,
    -	0xeb, 0x6e, 0xb8, 0xc5, 0xd7, 0x29, 0x26, 0x1b, 0xec, 0x24, 0xc8, 0x99, 0x43, 0x85, 0x24, 0xe6,
    -	0xf0, 0xec, 0xfe, 0x5e, 0x69, 0x7a, 0x3e, 0x97, 0x14, 0x6e, 0xd3, 0x0c, 0xda, 0x06, 0x44, 0x6f,
    -	0xf6, 0x4a, 0xe4, 0x6c, 0x92, 0xb8, 0xf1, 0xfe, 0xee, 0x1b, 0x3f, 0xb1, 0xbf, 0x57, 0x42, 0xab,
    -	0x29, 0x12, 0x38, 0x83, 0x2c, 0x7a, 0x17, 0x8e, 0xd1, 0xd2, 0xd4, 0xb7, 0x0e, 0x74, 0xdf, 0xdc,
    -	0xd4, 0xfe, 0x5e, 0xe9, 0xd8, 0x6a, 0x06, 0x11, 0x9c, 0x49, 0x1a, 0xfd, 0x90, 0x05, 0xa7, 0xe2,
    -	0xcf, 0x5f, 0xbc, 0xdb, 0x74, 0xbc, 0x5a, 0xdc, 0xf0, 0x60, 0xf7, 0x0d, 0xd3, 0x33, 0xf9, 0xd4,
    -	0x7c, 0x1e, 0x25, 0x9c, 0xdf, 0x08, 0xf2, 0x60, 0x92, 0x76, 0x2d, 0xd9, 0x36, 0x74, 0xdf, 0xf6,
    -	0xc9, 0xfd, 0xbd, 0xd2, 0xe4, 0x6a, 0x9a, 0x06, 0xce, 0x22, 0x3c, 0x3d, 0x0f, 0xc7, 0x33, 0x57,
    -	0x27, 0x1a, 0x87, 0xe2, 0x36, 0xe1, 0x4c, 0xe0, 0x20, 0xa6, 0x3f, 0xd1, 0x31, 0xe8, 0xdd, 0x71,
    -	0xea, 0x2d, 0xb1, 0x31, 0x31, 0xff, 0xf3, 0x4a, 0xe1, 0xb2, 0x65, 0xff, 0x6f, 0x45, 0x18, 0x9b,
    -	0xaf, 0x2c, 0xdf, 0xd7, 0xae, 0xd7, 0xaf, 0xbd, 0x42, 0xdb, 0x6b, 0x2f, 0xbe, 0x44, 0x8b, 0xb9,
    -	0x97, 0xe8, 0x0f, 0x66, 0x6c, 0xd9, 0x1e, 0xb6, 0x65, 0x3f, 0x9a, 0xb3, 0x65, 0x1f, 0xf0, 0x46,
    -	0xdd, 0xc9, 0x59, 0xb5, 0xbd, 0x6c, 0x02, 0x33, 0x39, 0x24, 0xc6, 0xfb, 0x25, 0x8f, 0xda, 0x43,
    -	0x2e, 0xdd, 0x07, 0x33, 0x8f, 0x55, 0x18, 0x9e, 0x77, 0x9a, 0xce, 0x6d, 0xb7, 0xee, 0x46, 0x2e,
    -	0x09, 0xd1, 0x93, 0x50, 0x74, 0x6a, 0x35, 0xc6, 0xdd, 0x0d, 0xce, 0x1d, 0xdf, 0xdf, 0x2b, 0x15,
    -	0x67, 0x6b, 0x94, 0xcd, 0x00, 0x85, 0xb5, 0x8b, 0x29, 0x06, 0x7a, 0x1a, 0x7a, 0x6a, 0x81, 0xdf,
    -	0x9c, 0x2a, 0x30, 0x4c, 0xba, 0xcb, 0x7b, 0x16, 0x02, 0xbf, 0x99, 0x40, 0x65, 0x38, 0xf6, 0x1f,
    -	0x17, 0xe0, 0xf4, 0x3c, 0x69, 0x6e, 0x2d, 0x55, 0x72, 0xee, 0x8b, 0x0b, 0x30, 0xd0, 0xf0, 0x3d,
    -	0x37, 0xf2, 0x83, 0x50, 0x34, 0xcd, 0x56, 0xc4, 0x8a, 0x28, 0xc3, 0x0a, 0x8a, 0xce, 0x41, 0x4f,
    -	0x33, 0x66, 0x62, 0x87, 0x25, 0x03, 0xcc, 0xd8, 0x57, 0x06, 0xa1, 0x18, 0xad, 0x90, 0x04, 0x62,
    -	0xc5, 0x28, 0x8c, 0xf5, 0x90, 0x04, 0x98, 0x41, 0x62, 0x4e, 0x80, 0xf2, 0x08, 0xe2, 0x46, 0x48,
    -	0x70, 0x02, 0x14, 0x82, 0x35, 0x2c, 0x54, 0x86, 0xc1, 0x30, 0x31, 0xb3, 0x5d, 0x6d, 0xcd, 0x11,
    -	0xc6, 0x2a, 0xa8, 0x99, 0x8c, 0x89, 0x18, 0x37, 0x58, 0x5f, 0x47, 0x56, 0xe1, 0x1b, 0x05, 0x40,
    -	0x7c, 0x08, 0xbf, 0xcb, 0x06, 0x6e, 0x3d, 0x3d, 0x70, 0xdd, 0x6f, 0x89, 0x07, 0x35, 0x7a, 0xff,
    -	0xd6, 0x82, 0xd3, 0xf3, 0xae, 0x57, 0x23, 0x41, 0xce, 0x02, 0x7c, 0x38, 0x4f, 0xf9, 0xc3, 0x31,
    -	0x29, 0xc6, 0x12, 0xeb, 0x79, 0x00, 0x4b, 0xcc, 0xfe, 0x47, 0x0b, 0x10, 0xff, 0xec, 0xf7, 0xdd,
    -	0xc7, 0xae, 0xa7, 0x3f, 0xf6, 0x01, 0x2c, 0x0b, 0xfb, 0x3a, 0x8c, 0xce, 0xd7, 0x5d, 0xe2, 0x45,
    -	0xcb, 0xe5, 0x79, 0xdf, 0xdb, 0x70, 0x37, 0xd1, 0x2b, 0x30, 0x1a, 0xb9, 0x0d, 0xe2, 0xb7, 0xa2,
    -	0x0a, 0xa9, 0xfa, 0x1e, 0x7b, 0xb9, 0x5a, 0x17, 0x7a, 0xe7, 0xd0, 0xfe, 0x5e, 0x69, 0x74, 0xcd,
    -	0x80, 0xe0, 0x04, 0xa6, 0xfd, 0xcb, 0xf4, 0xdc, 0xaa, 0xb7, 0xc2, 0x88, 0x04, 0x6b, 0x41, 0x2b,
    -	0x8c, 0xe6, 0x5a, 0x94, 0xf7, 0x2c, 0x07, 0x3e, 0xed, 0x8e, 0xeb, 0x7b, 0xe8, 0xb4, 0xf1, 0x1c,
    -	0x1f, 0x90, 0x4f, 0x71, 0xf1, 0xec, 0x9e, 0x01, 0x08, 0xdd, 0x4d, 0x8f, 0x04, 0xda, 0xf3, 0x61,
    -	0x94, 0x6d, 0x15, 0x55, 0x8a, 0x35, 0x0c, 0x54, 0x87, 0x91, 0xba, 0x73, 0x9b, 0xd4, 0x2b, 0xa4,
    -	0x4e, 0xaa, 0x91, 0x1f, 0x08, 0xf9, 0xc6, 0x0b, 0xdd, 0xbd, 0x03, 0xae, 0xeb, 0x55, 0xe7, 0x26,
    -	0xf6, 0xf7, 0x4a, 0x23, 0x46, 0x11, 0x36, 0x89, 0xd3, 0xa3, 0xc3, 0x6f, 0xd2, 0xaf, 0x70, 0xea,
    -	0xfa, 0xe3, 0xf3, 0x86, 0x28, 0xc3, 0x0a, 0xaa, 0x8e, 0x8e, 0x9e, 0xbc, 0xa3, 0xc3, 0xfe, 0x6b,
    -	0xba, 0xd0, 0xfc, 0x46, 0xd3, 0xf7, 0x88, 0x17, 0xcd, 0xfb, 0x5e, 0x8d, 0x4b, 0xa6, 0x5e, 0x31,
    -	0x44, 0x27, 0xe7, 0x13, 0xa2, 0x93, 0x13, 0xe9, 0x1a, 0x9a, 0xf4, 0xe4, 0xa3, 0xd0, 0x17, 0x46,
    -	0x4e, 0xd4, 0x0a, 0xc5, 0xc0, 0x3d, 0x2a, 0x97, 0x5d, 0x85, 0x95, 0x1e, 0xec, 0x95, 0xc6, 0x54,
    -	0x35, 0x5e, 0x84, 0x45, 0x05, 0xf4, 0x14, 0xf4, 0x37, 0x48, 0x18, 0x3a, 0x9b, 0x92, 0x6d, 0x18,
    -	0x13, 0x75, 0xfb, 0x57, 0x78, 0x31, 0x96, 0x70, 0xf4, 0x18, 0xf4, 0x92, 0x20, 0xf0, 0x03, 0xf1,
    -	0x6d, 0x23, 0x02, 0xb1, 0x77, 0x91, 0x16, 0x62, 0x0e, 0xb3, 0xff, 0x0f, 0x0b, 0xc6, 0x54, 0x5f,
    -	0x79, 0x5b, 0x47, 0xf0, 0x5c, 0x7b, 0x1b, 0xa0, 0x2a, 0x3f, 0x30, 0x64, 0xd7, 0xec, 0xd0, 0xf3,
    -	0xe7, 0x33, 0x39, 0x9a, 0xd4, 0x30, 0xc6, 0x94, 0x55, 0x51, 0x88, 0x35, 0x6a, 0xf6, 0x1f, 0x58,
    -	0x30, 0x99, 0xf8, 0xa2, 0xeb, 0x6e, 0x18, 0xa1, 0x77, 0x52, 0x5f, 0x35, 0xd3, 0xe5, 0xe2, 0x73,
    -	0x43, 0xfe, 0x4d, 0x6a, 0xcf, 0xcb, 0x12, 0xed, 0x8b, 0xae, 0x42, 0xaf, 0x1b, 0x91, 0x86, 0xfc,
    -	0x98, 0xc7, 0xda, 0x7e, 0x0c, 0xef, 0x55, 0x3c, 0x23, 0xcb, 0xb4, 0x26, 0xe6, 0x04, 0xec, 0x3f,
    -	0x2e, 0xc2, 0x20, 0xdf, 0xdf, 0x2b, 0x4e, 0xf3, 0x08, 0xe6, 0xe2, 0x19, 0x18, 0x74, 0x1b, 0x8d,
    -	0x56, 0xe4, 0xdc, 0x16, 0xf7, 0xde, 0x00, 0x3f, 0x83, 0x96, 0x65, 0x21, 0x8e, 0xe1, 0x68, 0x19,
    -	0x7a, 0x58, 0x57, 0xf8, 0x57, 0x3e, 0x99, 0xfd, 0x95, 0xa2, 0xef, 0x33, 0x0b, 0x4e, 0xe4, 0x70,
    -	0x96, 0x53, 0xed, 0x2b, 0x5a, 0x84, 0x19, 0x09, 0xe4, 0x00, 0xdc, 0x76, 0x3d, 0x27, 0xd8, 0xa5,
    -	0x65, 0x53, 0x45, 0x46, 0xf0, 0xb9, 0xf6, 0x04, 0xe7, 0x14, 0x3e, 0x27, 0xab, 0x3e, 0x2c, 0x06,
    -	0x60, 0x8d, 0xe8, 0xf4, 0xcb, 0x30, 0xa8, 0x90, 0x0f, 0xc3, 0x39, 0x4e, 0x7f, 0x0c, 0xc6, 0x12,
    -	0x6d, 0x75, 0xaa, 0x3e, 0xac, 0x33, 0x9e, 0xbf, 0xc7, 0x8e, 0x0c, 0xd1, 0xeb, 0x45, 0x6f, 0x47,
    -	0xdc, 0x4d, 0xf7, 0xe0, 0x58, 0x3d, 0xe3, 0xc8, 0x17, 0xf3, 0xda, 0xfd, 0x15, 0x71, 0x5a, 0x7c,
    -	0xf6, 0xb1, 0x2c, 0x28, 0xce, 0x6c, 0xc3, 0x38, 0x11, 0x0b, 0xed, 0x4e, 0x44, 0x7a, 0xde, 0x1d,
    -	0x53, 0x9d, 0xbf, 0x46, 0x76, 0xd5, 0xa1, 0xfa, 0x9d, 0xec, 0xfe, 0x19, 0x3e, 0xfa, 0xfc, 0xb8,
    -	0x1c, 0x12, 0x04, 0x8a, 0xd7, 0xc8, 0x2e, 0x9f, 0x0a, 0xfd, 0xeb, 0x8a, 0x6d, 0xbf, 0xee, 0x6b,
    -	0x16, 0x8c, 0xa8, 0xaf, 0x3b, 0x82, 0x73, 0x61, 0xce, 0x3c, 0x17, 0xce, 0xb4, 0x5d, 0xe0, 0x39,
    -	0x27, 0xc2, 0x37, 0x0a, 0x70, 0x4a, 0xe1, 0xd0, 0x47, 0x14, 0xff, 0x23, 0x56, 0xd5, 0x45, 0x18,
    -	0xf4, 0x94, 0x38, 0xd1, 0x32, 0xe5, 0x78, 0xb1, 0x30, 0x31, 0xc6, 0xa1, 0x57, 0x9e, 0x17, 0x5f,
    -	0xda, 0xc3, 0xba, 0x9c, 0x5d, 0x5c, 0xee, 0x73, 0x50, 0x6c, 0xb9, 0x35, 0x71, 0xc1, 0x7c, 0x58,
    -	0x8e, 0xf6, 0xfa, 0xf2, 0xc2, 0xc1, 0x5e, 0xe9, 0xd1, 0x3c, 0x95, 0x13, 0xbd, 0xd9, 0xc2, 0x99,
    -	0xf5, 0xe5, 0x05, 0x4c, 0x2b, 0xa3, 0x59, 0x18, 0x93, 0x5a, 0xb5, 0x9b, 0x94, 0x2f, 0xf5, 0x3d,
    -	0x71, 0x0f, 0x29, 0x61, 0x39, 0x36, 0xc1, 0x38, 0x89, 0x8f, 0x16, 0x60, 0x7c, 0xbb, 0x75, 0x9b,
    -	0xd4, 0x49, 0xc4, 0x3f, 0xf8, 0x1a, 0xe1, 0xa2, 0xe4, 0xc1, 0xf8, 0x09, 0x7b, 0x2d, 0x01, 0xc7,
    -	0xa9, 0x1a, 0xf6, 0xbf, 0xb2, 0xfb, 0x40, 0x8c, 0x9e, 0xc6, 0xdf, 0x7c, 0x27, 0x97, 0x73, 0x37,
    -	0xab, 0xe2, 0x1a, 0xd9, 0x5d, 0xf3, 0x29, 0x1f, 0x92, 0xbd, 0x2a, 0x8c, 0x35, 0xdf, 0xd3, 0x76,
    -	0xcd, 0xff, 0x56, 0x01, 0x8e, 0xab, 0x11, 0x30, 0xb8, 0xe5, 0xef, 0xf6, 0x31, 0xb8, 0x04, 0x43,
    -	0x35, 0xb2, 0xe1, 0xb4, 0xea, 0x91, 0xd2, 0x6b, 0xf4, 0x72, 0x55, 0xdb, 0x42, 0x5c, 0x8c, 0x75,
    -	0x9c, 0x43, 0x0c, 0xdb, 0xaf, 0x8f, 0xb0, 0x8b, 0x38, 0x72, 0xe8, 0x1a, 0x57, 0xbb, 0xc6, 0xca,
    -	0xdd, 0x35, 0x8f, 0x41, 0xaf, 0xdb, 0xa0, 0x8c, 0x59, 0xc1, 0xe4, 0xb7, 0x96, 0x69, 0x21, 0xe6,
    -	0x30, 0xf4, 0x04, 0xf4, 0x57, 0xfd, 0x46, 0xc3, 0xf1, 0x6a, 0xec, 0xca, 0x1b, 0x9c, 0x1b, 0xa2,
    -	0xbc, 0xdb, 0x3c, 0x2f, 0xc2, 0x12, 0x46, 0x99, 0x6f, 0x27, 0xd8, 0xe4, 0xc2, 0x1e, 0xc1, 0x7c,
    -	0xcf, 0x06, 0x9b, 0x21, 0x66, 0xa5, 0xf4, 0xad, 0x7a, 0xc7, 0x0f, 0xb6, 0x5d, 0x6f, 0x73, 0xc1,
    -	0x0d, 0xc4, 0x96, 0x50, 0x77, 0xe1, 0x2d, 0x05, 0xc1, 0x1a, 0x16, 0x5a, 0x82, 0xde, 0xa6, 0x1f,
    -	0x44, 0xe1, 0x54, 0x1f, 0x1b, 0xee, 0x47, 0x73, 0x0e, 0x22, 0xfe, 0xb5, 0x65, 0x3f, 0x88, 0xe2,
    -	0x0f, 0xa0, 0xff, 0x42, 0xcc, 0xab, 0xa3, 0xeb, 0xd0, 0x4f, 0xbc, 0x9d, 0xa5, 0xc0, 0x6f, 0x4c,
    -	0x4d, 0xe6, 0x53, 0x5a, 0xe4, 0x28, 0x7c, 0x99, 0xc5, 0x3c, 0xaa, 0x28, 0xc6, 0x92, 0x04, 0xfa,
    -	0x28, 0x14, 0x89, 0xb7, 0x33, 0xd5, 0xcf, 0x28, 0x4d, 0xe7, 0x50, 0xba, 0xe9, 0x04, 0xf1, 0x99,
    -	0xbf, 0xe8, 0xed, 0x60, 0x5a, 0x07, 0x7d, 0x02, 0x06, 0xe5, 0x81, 0x11, 0x0a, 0x29, 0x6a, 0xe6,
    -	0x82, 0x95, 0xc7, 0x0c, 0x26, 0xef, 0xb6, 0xdc, 0x80, 0x34, 0x88, 0x17, 0x85, 0xf1, 0x09, 0x29,
    -	0xa1, 0x21, 0x8e, 0xa9, 0xa1, 0x2a, 0x0c, 0x07, 0x24, 0x74, 0xef, 0x91, 0xb2, 0x5f, 0x77, 0xab,
    -	0xbb, 0x53, 0x27, 0x59, 0xf7, 0x9e, 0x6a, 0x3b, 0x64, 0x58, 0xab, 0x10, 0x4b, 0xf9, 0xf5, 0x52,
    -	0x6c, 0x10, 0x45, 0x6f, 0xc1, 0x48, 0x40, 0xc2, 0xc8, 0x09, 0x22, 0xd1, 0xca, 0x94, 0xd2, 0xca,
    -	0x8d, 0x60, 0x1d, 0xc0, 0x9f, 0x13, 0x71, 0x33, 0x31, 0x04, 0x9b, 0x14, 0xd0, 0x27, 0xa4, 0xca,
    -	0x61, 0xc5, 0x6f, 0x79, 0x51, 0x38, 0x35, 0xc8, 0xfa, 0x9d, 0xa9, 0x9b, 0xbe, 0x19, 0xe3, 0x25,
    -	0x75, 0x12, 0xbc, 0x32, 0x36, 0x48, 0xa1, 0x4f, 0xc1, 0x08, 0xff, 0xcf, 0x55, 0xaa, 0xe1, 0xd4,
    -	0x71, 0x46, 0xfb, 0x5c, 0x3e, 0x6d, 0x8e, 0x38, 0x77, 0x5c, 0x10, 0x1f, 0xd1, 0x4b, 0x43, 0x6c,
    -	0x52, 0x43, 0x18, 0x46, 0xea, 0xee, 0x0e, 0xf1, 0x48, 0x18, 0x96, 0x03, 0xff, 0x36, 0x11, 0x12,
    -	0xe2, 0x53, 0xd9, 0x2a, 0x58, 0xff, 0x36, 0x11, 0x8f, 0x40, 0xbd, 0x0e, 0x36, 0x49, 0xa0, 0x75,
    -	0x18, 0xa5, 0x4f, 0x72, 0x37, 0x26, 0x3a, 0xd4, 0x89, 0x28, 0x7b, 0x38, 0x63, 0xa3, 0x12, 0x4e,
    -	0x10, 0x41, 0x37, 0x60, 0x98, 0x8d, 0x79, 0xab, 0xc9, 0x89, 0x9e, 0xe8, 0x44, 0x94, 0x19, 0x14,
    -	0x54, 0xb4, 0x2a, 0xd8, 0x20, 0x80, 0xde, 0x84, 0xc1, 0xba, 0xbb, 0x41, 0xaa, 0xbb, 0xd5, 0x3a,
    -	0x99, 0x1a, 0x66, 0xd4, 0x32, 0x0f, 0xc3, 0xeb, 0x12, 0x89, 0xf3, 0xe7, 0xea, 0x2f, 0x8e, 0xab,
    -	0xa3, 0x9b, 0x70, 0x22, 0x22, 0x41, 0xc3, 0xf5, 0x1c, 0x7a, 0x88, 0x89, 0x27, 0x21, 0xd3, 0x8c,
    -	0x8f, 0xb0, 0xd5, 0x75, 0x56, 0xcc, 0xc6, 0x89, 0xb5, 0x4c, 0x2c, 0x9c, 0x53, 0x1b, 0xdd, 0x85,
    -	0xa9, 0x0c, 0x08, 0x5f, 0xb7, 0xc7, 0x18, 0xe5, 0xd7, 0x04, 0xe5, 0xa9, 0xb5, 0x1c, 0xbc, 0x83,
    -	0x36, 0x30, 0x9c, 0x4b, 0x1d, 0xdd, 0x80, 0x31, 0x76, 0x72, 0x96, 0x5b, 0xf5, 0xba, 0x68, 0x70,
    -	0x94, 0x35, 0xf8, 0x84, 0xe4, 0x23, 0x96, 0x4d, 0xf0, 0xc1, 0x5e, 0x09, 0xe2, 0x7f, 0x38, 0x59,
    -	0x1b, 0xdd, 0x66, 0x4a, 0xd8, 0x56, 0xe0, 0x46, 0xbb, 0x74, 0x57, 0x91, 0xbb, 0xd1, 0xd4, 0x58,
    -	0x5b, 0x81, 0x94, 0x8e, 0xaa, 0x34, 0xb5, 0x7a, 0x21, 0x4e, 0x12, 0xa4, 0x57, 0x41, 0x18, 0xd5,
    -	0x5c, 0x6f, 0x6a, 0x9c, 0xbf, 0xa7, 0xe4, 0x49, 0x5a, 0xa1, 0x85, 0x98, 0xc3, 0x98, 0x02, 0x96,
    -	0xfe, 0xb8, 0x41, 0x6f, 0xdc, 0x09, 0x86, 0x18, 0x2b, 0x60, 0x25, 0x00, 0xc7, 0x38, 0x94, 0x09,
    -	0x8e, 0xa2, 0xdd, 0x29, 0xc4, 0x50, 0xd5, 0x81, 0xb8, 0xb6, 0xf6, 0x09, 0x4c, 0xcb, 0xed, 0xdb,
    -	0x30, 0xaa, 0x8e, 0x09, 0x36, 0x26, 0xa8, 0x04, 0xbd, 0x8c, 0xed, 0x13, 0xe2, 0xd3, 0x41, 0xda,
    -	0x05, 0xc6, 0x12, 0x62, 0x5e, 0xce, 0xba, 0xe0, 0xde, 0x23, 0x73, 0xbb, 0x11, 0xe1, 0xb2, 0x88,
    -	0xa2, 0xd6, 0x05, 0x09, 0xc0, 0x31, 0x8e, 0xfd, 0x1f, 0x39, 0xfb, 0x1c, 0xdf, 0x12, 0x5d, 0xdc,
    -	0x8b, 0xcf, 0xc2, 0x00, 0x33, 0xfc, 0xf0, 0x03, 0xae, 0x9d, 0xed, 0x8d, 0x19, 0xe6, 0xab, 0xa2,
    -	0x1c, 0x2b, 0x0c, 0xf4, 0x2a, 0x8c, 0x54, 0xf5, 0x06, 0xc4, 0xa5, 0xae, 0x8e, 0x11, 0xa3, 0x75,
    -	0x6c, 0xe2, 0xa2, 0xcb, 0x30, 0xc0, 0x6c, 0x9c, 0xaa, 0x7e, 0x5d, 0x70, 0x9b, 0x92, 0x33, 0x19,
    -	0x28, 0x8b, 0xf2, 0x03, 0xed, 0x37, 0x56, 0xd8, 0xe8, 0x3c, 0xf4, 0xd1, 0x2e, 0x2c, 0x97, 0xc5,
    -	0x75, 0xaa, 0x24, 0x81, 0x57, 0x59, 0x29, 0x16, 0x50, 0xfb, 0x0f, 0x2c, 0xc6, 0x4b, 0xa5, 0xcf,
    -	0x7c, 0x74, 0x95, 0x5d, 0x1a, 0xec, 0x06, 0xd1, 0xb4, 0xf0, 0x8f, 0x6b, 0x37, 0x81, 0x82, 0x1d,
    -	0x24, 0xfe, 0x63, 0xa3, 0x26, 0x7a, 0x3b, 0x79, 0x33, 0x70, 0x86, 0xe2, 0x45, 0x39, 0x04, 0xc9,
    -	0xdb, 0xe1, 0x91, 0xf8, 0x8a, 0xa3, 0xfd, 0x69, 0x77, 0x45, 0xd8, 0x3f, 0x55, 0xd0, 0x56, 0x49,
    -	0x25, 0x72, 0x22, 0x82, 0xca, 0xd0, 0x7f, 0xc7, 0x71, 0x23, 0xd7, 0xdb, 0x14, 0x7c, 0x5f, 0xfb,
    -	0x8b, 0x8e, 0x55, 0xba, 0xc5, 0x2b, 0x70, 0xee, 0x45, 0xfc, 0xc1, 0x92, 0x0c, 0xa5, 0x18, 0xb4,
    -	0x3c, 0x8f, 0x52, 0x2c, 0x74, 0x4b, 0x11, 0xf3, 0x0a, 0x9c, 0xa2, 0xf8, 0x83, 0x25, 0x19, 0xf4,
    -	0x0e, 0x80, 0x3c, 0x21, 0x48, 0x4d, 0xc8, 0x0e, 0x9f, 0xed, 0x4c, 0x74, 0x4d, 0xd5, 0xe1, 0xc2,
    -	0xc9, 0xf8, 0x3f, 0xd6, 0xe8, 0xd9, 0x91, 0x36, 0xa7, 0x7a, 0x67, 0xd0, 0x27, 0xe9, 0x16, 0x75,
    -	0x82, 0x88, 0xd4, 0x66, 0x23, 0x31, 0x38, 0x4f, 0x77, 0xf7, 0x38, 0x5c, 0x73, 0x1b, 0x44, 0xdf,
    -	0xce, 0x82, 0x08, 0x8e, 0xe9, 0xd9, 0xbf, 0x53, 0x84, 0xa9, 0xbc, 0xee, 0xd2, 0x4d, 0x43, 0xee,
    -	0xba, 0xd1, 0x3c, 0x65, 0x6b, 0x2d, 0x73, 0xd3, 0x2c, 0x8a, 0x72, 0xac, 0x30, 0xe8, 0xea, 0x0d,
    -	0xdd, 0x4d, 0xf9, 0xb6, 0xef, 0x8d, 0x57, 0x6f, 0x85, 0x95, 0x62, 0x01, 0xa5, 0x78, 0x01, 0x71,
    -	0x42, 0x61, 0x7c, 0xa7, 0xad, 0x72, 0xcc, 0x4a, 0xb1, 0x80, 0xea, 0x52, 0xc6, 0x9e, 0x0e, 0x52,
    -	0x46, 0x63, 0x88, 0x7a, 0x1f, 0xec, 0x10, 0xa1, 0x4f, 0x03, 0x6c, 0xb8, 0x9e, 0x1b, 0x6e, 0x31,
    -	0xea, 0x7d, 0x87, 0xa6, 0xae, 0x98, 0xe2, 0x25, 0x45, 0x05, 0x6b, 0x14, 0xd1, 0x4b, 0x30, 0xa4,
    -	0x0e, 0x90, 0xe5, 0x05, 0xa6, 0xfa, 0xd7, 0x4c, 0xa9, 0xe2, 0xd3, 0x74, 0x01, 0xeb, 0x78, 0xf6,
    -	0x67, 0x93, 0xeb, 0x45, 0xec, 0x00, 0x6d, 0x7c, 0xad, 0x6e, 0xc7, 0xb7, 0xd0, 0x7e, 0x7c, 0xed,
    -	0x9f, 0x19, 0x84, 0x31, 0xa3, 0xb1, 0x56, 0xd8, 0xc5, 0x99, 0x7b, 0x85, 0x5e, 0x40, 0x4e, 0x44,
    -	0xc4, 0xfe, 0xb3, 0x3b, 0x6f, 0x15, 0xfd, 0x92, 0xa2, 0x3b, 0x80, 0xd7, 0x47, 0x9f, 0x86, 0xc1,
    -	0xba, 0x13, 0x32, 0x89, 0x25, 0x11, 0xfb, 0xae, 0x1b, 0x62, 0xf1, 0x83, 0xd0, 0x09, 0x23, 0xed,
    -	0xd6, 0xe7, 0xb4, 0x63, 0x92, 0xf4, 0xa6, 0xa4, 0xfc, 0x95, 0xb4, 0xee, 0x54, 0x9d, 0xa0, 0x4c,
    -	0xd8, 0x2e, 0xe6, 0x30, 0x74, 0x99, 0x1d, 0xad, 0x74, 0x55, 0xcc, 0x53, 0x6e, 0x94, 0x2d, 0xb3,
    -	0x5e, 0x83, 0xc9, 0x56, 0x30, 0x6c, 0x60, 0xc6, 0x6f, 0xb2, 0xbe, 0x36, 0x6f, 0xb2, 0xa7, 0xa0,
    -	0x9f, 0xfd, 0x50, 0x2b, 0x40, 0xcd, 0xc6, 0x32, 0x2f, 0xc6, 0x12, 0x9e, 0x5c, 0x30, 0x03, 0xdd,
    -	0x2d, 0x18, 0xfa, 0xea, 0x13, 0x8b, 0x9a, 0x99, 0x5d, 0x0c, 0xf0, 0x53, 0x4e, 0x2c, 0x79, 0x2c,
    -	0x61, 0xe8, 0x57, 0x2c, 0x40, 0x4e, 0x9d, 0xbe, 0x96, 0x69, 0xb1, 0x7a, 0xdc, 0x00, 0x63, 0xb5,
    -	0x5f, 0xed, 0x38, 0xec, 0xad, 0x70, 0x66, 0x36, 0x55, 0x9b, 0x4b, 0x4a, 0x5f, 0x11, 0x5d, 0x44,
    -	0x69, 0x04, 0xfd, 0x32, 0xba, 0xee, 0x86, 0xd1, 0xe7, 0xff, 0x26, 0x71, 0x39, 0x65, 0x74, 0x09,
    -	0xad, 0xeb, 0x8f, 0xaf, 0xa1, 0x43, 0x3e, 0xbe, 0x46, 0x72, 0x1f, 0x5e, 0xdf, 0x9f, 0x78, 0xc0,
    -	0x0c, 0xb3, 0x2f, 0x7f, 0xa2, 0xc3, 0x03, 0x46, 0x88, 0xd3, 0xbb, 0x79, 0xc6, 0x94, 0x85, 0x1e,
    -	0x78, 0x84, 0x75, 0xb9, 0xfd, 0x23, 0x78, 0x3d, 0x24, 0xc1, 0xdc, 0x29, 0xa9, 0x26, 0x3e, 0xd0,
    -	0x79, 0x0f, 0x4d, 0x6f, 0xfc, 0x43, 0x16, 0x4c, 0xa5, 0x07, 0x88, 0x77, 0x69, 0x6a, 0x94, 0xf5,
    -	0xdf, 0x6e, 0x37, 0x32, 0xa2, 0xf3, 0xd2, 0xdc, 0x75, 0x6a, 0x36, 0x87, 0x16, 0xce, 0x6d, 0x65,
    -	0xba, 0x05, 0x27, 0x73, 0xe6, 0x3d, 0x43, 0x6a, 0xbd, 0xa0, 0x4b, 0xad, 0x3b, 0xc8, 0x3a, 0x67,
    -	0xe4, 0xcc, 0xcc, 0xbc, 0xd5, 0x72, 0xbc, 0xc8, 0x8d, 0x76, 0x75, 0x29, 0xb7, 0x07, 0xe6, 0x80,
    -	0xa0, 0x4f, 0x41, 0x6f, 0xdd, 0xf5, 0x5a, 0x77, 0xc5, 0x4d, 0x79, 0x3e, 0xfb, 0x11, 0xe3, 0xb5,
    -	0xee, 0x9a, 0x43, 0x5c, 0xa2, 0x1b, 0x92, 0x95, 0x1f, 0xec, 0x95, 0x50, 0x1a, 0x01, 0x73, 0xaa,
    -	0xf6, 0xd3, 0x30, 0xba, 0xe0, 0x90, 0x86, 0xef, 0x2d, 0x7a, 0xb5, 0xa6, 0xef, 0x7a, 0x11, 0x9a,
    -	0x82, 0x1e, 0xc6, 0x22, 0xf2, 0x0b, 0xb2, 0x87, 0x0e, 0x21, 0x66, 0x25, 0xf6, 0x26, 0x1c, 0x5f,
    -	0xf0, 0xef, 0x78, 0x77, 0x9c, 0xa0, 0x36, 0x5b, 0x5e, 0xd6, 0xa4, 0x7e, 0xab, 0x52, 0xea, 0x64,
    -	0xe5, 0xbf, 0xe9, 0xb5, 0x9a, 0x7c, 0x29, 0x2d, 0xb9, 0x75, 0x92, 0x23, 0x9b, 0xfd, 0x99, 0x82,
    -	0xd1, 0x52, 0x8c, 0xaf, 0x34, 0x8b, 0x56, 0xae, 0x51, 0xc2, 0x5b, 0x30, 0xb0, 0xe1, 0x92, 0x7a,
    -	0x0d, 0x93, 0x0d, 0x31, 0x1b, 0x4f, 0xe6, 0x9b, 0x2d, 0x2e, 0x51, 0x4c, 0xa5, 0x02, 0x65, 0x32,
    -	0xab, 0x25, 0x51, 0x19, 0x2b, 0x32, 0x68, 0x1b, 0xc6, 0xe5, 0x9c, 0x49, 0xa8, 0x38, 0xb5, 0x9f,
    -	0x6a, 0xb7, 0x08, 0x4d, 0xe2, 0xcc, 0x84, 0x1b, 0x27, 0xc8, 0xe0, 0x14, 0x61, 0x74, 0x1a, 0x7a,
    -	0x1a, 0x94, 0x3f, 0xe9, 0x61, 0xc3, 0xcf, 0x84, 0x54, 0x4c, 0xde, 0xc6, 0x4a, 0xed, 0x9f, 0xb3,
    -	0xe0, 0x64, 0x6a, 0x64, 0x84, 0xdc, 0xf1, 0x01, 0xcf, 0x42, 0x52, 0x0e, 0x58, 0xe8, 0x2c, 0x07,
    -	0xb4, 0xff, 0x3b, 0x0b, 0x8e, 0x2d, 0x36, 0x9a, 0xd1, 0xee, 0x82, 0x6b, 0x5a, 0x10, 0xbc, 0x0c,
    -	0x7d, 0x0d, 0x52, 0x73, 0x5b, 0x0d, 0x31, 0x73, 0x25, 0x79, 0x87, 0xaf, 0xb0, 0x52, 0x7a, 0x0e,
    -	0x54, 0x22, 0x3f, 0x70, 0x36, 0x09, 0x2f, 0xc0, 0x02, 0x9d, 0x71, 0x42, 0xee, 0x3d, 0x72, 0xdd,
    -	0x6d, 0xb8, 0xd1, 0xfd, 0xed, 0x2e, 0xa1, 0xfc, 0x97, 0x44, 0x70, 0x4c, 0xcf, 0xfe, 0x96, 0x05,
    -	0x63, 0x72, 0xdd, 0xcf, 0xd6, 0x6a, 0x01, 0x09, 0x43, 0x34, 0x0d, 0x05, 0xb7, 0x29, 0x7a, 0x09,
    -	0xa2, 0x97, 0x85, 0xe5, 0x32, 0x2e, 0xb8, 0x4d, 0xf9, 0xe8, 0x62, 0x6c, 0x42, 0xd1, 0xb4, 0x83,
    -	0xb8, 0x2a, 0xca, 0xb1, 0xc2, 0x40, 0x17, 0x60, 0xc0, 0xf3, 0x6b, 0xfc, 0xdd, 0x22, 0x34, 0xe1,
    -	0x14, 0x73, 0x55, 0x94, 0x61, 0x05, 0x45, 0x65, 0x18, 0xe4, 0x56, 0xb2, 0xf1, 0xa2, 0xed, 0xca,
    -	0xd6, 0x96, 0x7d, 0xd9, 0x9a, 0xac, 0x89, 0x63, 0x22, 0xf6, 0x1f, 0x59, 0x30, 0x2c, 0xbf, 0xac,
    -	0xcb, 0x17, 0x25, 0xdd, 0x5a, 0xf1, 0x6b, 0x32, 0xde, 0x5a, 0xf4, 0x45, 0xc8, 0x20, 0xc6, 0x43,
    -	0xb0, 0x78, 0xa8, 0x87, 0xe0, 0x25, 0x18, 0x72, 0x9a, 0xcd, 0xb2, 0xf9, 0x8a, 0x64, 0x4b, 0x69,
    -	0x36, 0x2e, 0xc6, 0x3a, 0x8e, 0xfd, 0xb3, 0x05, 0x18, 0x95, 0x5f, 0x50, 0x69, 0xdd, 0x0e, 0x49,
    -	0x84, 0xd6, 0x60, 0xd0, 0xe1, 0xb3, 0x44, 0xe4, 0x22, 0x7f, 0x2c, 0x5b, 0xba, 0x69, 0x4c, 0x69,
    -	0xcc, 0x0e, 0xcf, 0xca, 0xda, 0x38, 0x26, 0x84, 0xea, 0x30, 0xe1, 0xf9, 0x11, 0x63, 0x8d, 0x14,
    -	0xbc, 0x9d, 0xc2, 0x39, 0x49, 0xfd, 0x94, 0xa0, 0x3e, 0xb1, 0x9a, 0xa4, 0x82, 0xd3, 0x84, 0xd1,
    -	0xa2, 0x94, 0x18, 0x17, 0xf3, 0x45, 0x7d, 0xfa, 0xc4, 0x65, 0x0b, 0x8c, 0xed, 0xdf, 0xb7, 0x60,
    -	0x50, 0xa2, 0x1d, 0x85, 0x6d, 0xc1, 0x0a, 0xf4, 0x87, 0x6c, 0x12, 0xe4, 0xd0, 0xd8, 0xed, 0x3a,
    -	0xce, 0xe7, 0x2b, 0xe6, 0xf8, 0xf8, 0xff, 0x10, 0x4b, 0x1a, 0x4c, 0x61, 0xa8, 0xba, 0xff, 0x3e,
    -	0x51, 0x18, 0xaa, 0xfe, 0xe4, 0x5c, 0x4a, 0x7f, 0xc7, 0xfa, 0xac, 0x49, 0xe0, 0xe9, 0xc3, 0xa4,
    -	0x19, 0x90, 0x0d, 0xf7, 0x6e, 0xf2, 0x61, 0x52, 0x66, 0xa5, 0x58, 0x40, 0xd1, 0x3b, 0x30, 0x5c,
    -	0x95, 0x9a, 0xa2, 0x78, 0x87, 0x9f, 0x6f, 0xab, 0xb5, 0x54, 0x0a, 0x6e, 0x2e, 0xe9, 0x9c, 0xd7,
    -	0xea, 0x63, 0x83, 0x9a, 0x69, 0x05, 0x56, 0xec, 0x64, 0x05, 0x16, 0xd3, 0xcd, 0xb7, 0x89, 0xfa,
    -	0x79, 0x0b, 0xfa, 0xb8, 0x86, 0xa0, 0x3b, 0x05, 0x8d, 0xa6, 0xef, 0x8f, 0xc7, 0xee, 0x26, 0x2d,
    -	0x14, 0x9c, 0x0d, 0x5a, 0x81, 0x41, 0xf6, 0x83, 0x69, 0x38, 0x8a, 0xf9, 0x3e, 0x63, 0xbc, 0x55,
    -	0xbd, 0x83, 0x37, 0x65, 0x35, 0x1c, 0x53, 0xb0, 0x7f, 0xba, 0x48, 0x4f, 0xb7, 0x18, 0xd5, 0xb8,
    -	0xf4, 0xad, 0x87, 0x77, 0xe9, 0x17, 0x1e, 0xd6, 0xa5, 0xbf, 0x09, 0x63, 0x55, 0xcd, 0x3a, 0x20,
    -	0x9e, 0xc9, 0x0b, 0x6d, 0x17, 0x89, 0x66, 0x48, 0xc0, 0x65, 0xa8, 0xf3, 0x26, 0x11, 0x9c, 0xa4,
    -	0x8a, 0x3e, 0x09, 0xc3, 0x7c, 0x9e, 0x45, 0x2b, 0xdc, 0x90, 0xee, 0x89, 0xfc, 0xf5, 0xa2, 0x37,
    -	0xc1, 0x65, 0xee, 0x5a, 0x75, 0x6c, 0x10, 0xb3, 0xff, 0xc9, 0x02, 0xb4, 0xd8, 0xdc, 0x22, 0x0d,
    -	0x12, 0x38, 0xf5, 0x58, 0xc9, 0xf7, 0x25, 0x0b, 0xa6, 0x48, 0xaa, 0x78, 0xde, 0x6f, 0x34, 0xc4,
    -	0x93, 0x3e, 0x47, 0xea, 0xb4, 0x98, 0x53, 0x27, 0x66, 0xeb, 0xf3, 0x30, 0x70, 0x6e, 0x7b, 0x68,
    -	0x05, 0x26, 0xf9, 0x2d, 0xa9, 0x00, 0x9a, 0xad, 0xdd, 0x23, 0x82, 0xf0, 0xe4, 0x5a, 0x1a, 0x05,
    -	0x67, 0xd5, 0xb3, 0x7f, 0x7f, 0x04, 0x72, 0x7b, 0xf1, 0x81, 0x76, 0xf3, 0x03, 0xed, 0xe6, 0x07,
    -	0xda, 0xcd, 0x0f, 0xb4, 0x9b, 0x1f, 0x68, 0x37, 0x3f, 0xd0, 0x6e, 0xbe, 0x4f, 0xb5, 0x9b, 0xff,
    -	0xa5, 0x05, 0xc7, 0xd5, 0xf5, 0x65, 0x3c, 0xd8, 0x3f, 0x07, 0x93, 0x7c, 0xbb, 0xcd, 0xd7, 0x1d,
    -	0xb7, 0xb1, 0x46, 0x1a, 0xcd, 0xba, 0x13, 0x49, 0x1b, 0xa6, 0x4b, 0x99, 0x2b, 0x37, 0xe1, 0x28,
    -	0x61, 0x54, 0xe4, 0x1e, 0x67, 0x19, 0x00, 0x9c, 0xd5, 0x8c, 0xfd, 0x3b, 0x03, 0xd0, 0xbb, 0xb8,
    -	0x43, 0xbc, 0xe8, 0x08, 0x9e, 0x36, 0x55, 0x18, 0x75, 0xbd, 0x1d, 0xbf, 0xbe, 0x43, 0x6a, 0x1c,
    -	0x7e, 0x98, 0x17, 0xf8, 0x09, 0x41, 0x7a, 0x74, 0xd9, 0x20, 0x81, 0x13, 0x24, 0x1f, 0x86, 0x8e,
    -	0xe8, 0x0a, 0xf4, 0xf1, 0xcb, 0x47, 0x28, 0x88, 0x32, 0xcf, 0x6c, 0x36, 0x88, 0xe2, 0x4a, 0x8d,
    -	0xf5, 0x57, 0xfc, 0x72, 0x13, 0xd5, 0xd1, 0x67, 0x61, 0x74, 0xc3, 0x0d, 0xc2, 0x68, 0xcd, 0x6d,
    -	0xd0, 0xab, 0xa1, 0xd1, 0xbc, 0x0f, 0x9d, 0x90, 0x1a, 0x87, 0x25, 0x83, 0x12, 0x4e, 0x50, 0x46,
    -	0x9b, 0x30, 0x52, 0x77, 0xf4, 0xa6, 0xfa, 0x0f, 0xdd, 0x94, 0xba, 0x1d, 0xae, 0xeb, 0x84, 0xb0,
    -	0x49, 0x97, 0x6e, 0xa7, 0x2a, 0x53, 0x6b, 0x0c, 0x30, 0x71, 0x86, 0xda, 0x4e, 0x5c, 0x9f, 0xc1,
    -	0x61, 0x94, 0x41, 0x63, 0xee, 0x06, 0x83, 0x26, 0x83, 0xa6, 0x39, 0x15, 0x7c, 0x06, 0x06, 0x09,
    -	0x1d, 0x42, 0x4a, 0x58, 0x5c, 0x30, 0x17, 0xbb, 0xeb, 0xeb, 0x8a, 0x5b, 0x0d, 0x7c, 0x53, 0x1b,
    -	0xb7, 0x28, 0x29, 0xe1, 0x98, 0x28, 0x9a, 0x87, 0xbe, 0x90, 0x04, 0xae, 0x92, 0xf8, 0xb7, 0x99,
    -	0x46, 0x86, 0xc6, 0x5d, 0x1a, 0xf9, 0x6f, 0x2c, 0xaa, 0xd2, 0xe5, 0xe5, 0x30, 0x51, 0x2c, 0xbb,
    -	0x0c, 0xb4, 0xe5, 0x35, 0xcb, 0x4a, 0xb1, 0x80, 0xa2, 0x37, 0xa1, 0x3f, 0x20, 0x75, 0xa6, 0xee,
    -	0x1d, 0xe9, 0x7e, 0x91, 0x73, 0xed, 0x31, 0xaf, 0x87, 0x25, 0x01, 0x74, 0x0d, 0x50, 0x40, 0x28,
    -	0x83, 0xe7, 0x7a, 0x9b, 0xca, 0x08, 0x5f, 0x1c, 0xb4, 0x8a, 0x91, 0xc6, 0x31, 0x86, 0xf4, 0x66,
    -	0xc5, 0x19, 0xd5, 0xd0, 0x15, 0x98, 0x50, 0xa5, 0xcb, 0x5e, 0x18, 0x39, 0xf4, 0x80, 0x1b, 0x63,
    -	0xb4, 0x94, 0x7c, 0x05, 0x27, 0x11, 0x70, 0xba, 0x8e, 0xfd, 0x6b, 0x16, 0xf0, 0x71, 0x3e, 0x02,
    -	0xa9, 0xc2, 0xeb, 0xa6, 0x54, 0xe1, 0x54, 0xee, 0xcc, 0xe5, 0x48, 0x14, 0x7e, 0xcd, 0x82, 0x21,
    -	0x6d, 0x66, 0xe3, 0x35, 0x6b, 0xb5, 0x59, 0xb3, 0x2d, 0x18, 0xa7, 0x2b, 0xfd, 0xc6, 0xed, 0x90,
    -	0x04, 0x3b, 0xa4, 0xc6, 0x16, 0x66, 0xe1, 0xfe, 0x16, 0xa6, 0x32, 0xf8, 0xbd, 0x9e, 0x20, 0x88,
    -	0x53, 0x4d, 0xd8, 0x9f, 0x91, 0x5d, 0x55, 0xf6, 0xd1, 0x55, 0x35, 0xe7, 0x09, 0xfb, 0x68, 0x35,
    -	0xab, 0x38, 0xc6, 0xa1, 0x5b, 0x6d, 0xcb, 0x0f, 0xa3, 0xa4, 0x7d, 0xf4, 0x55, 0x3f, 0x8c, 0x30,
    -	0x83, 0xd8, 0x2f, 0x00, 0x2c, 0xde, 0x25, 0x55, 0xbe, 0x62, 0xf5, 0x47, 0x8f, 0x95, 0xff, 0xe8,
    -	0xb1, 0xff, 0xd2, 0x82, 0xd1, 0xa5, 0x79, 0xe3, 0xe6, 0x9a, 0x01, 0xe0, 0x2f, 0xb5, 0x5b, 0xb7,
    -	0x56, 0xa5, 0x91, 0x0e, 0xb7, 0x53, 0x50, 0xa5, 0x58, 0xc3, 0x40, 0xa7, 0xa0, 0x58, 0x6f, 0x79,
    -	0x42, 0xec, 0xd9, 0x4f, 0xaf, 0xc7, 0xeb, 0x2d, 0x0f, 0xd3, 0x32, 0xcd, 0x93, 0xad, 0xd8, 0xb5,
    -	0x27, 0x5b, 0xc7, 0x80, 0x3a, 0xa8, 0x04, 0xbd, 0x77, 0xee, 0xb8, 0x35, 0x1e, 0x27, 0x40, 0x18,
    -	0x10, 0xdd, 0xba, 0xb5, 0xbc, 0x10, 0x62, 0x5e, 0x6e, 0x7f, 0xb9, 0x08, 0xd3, 0x4b, 0x75, 0x72,
    -	0xf7, 0x3d, 0xc6, 0x4a, 0xe8, 0xd6, 0x0f, 0xef, 0x70, 0x02, 0xa4, 0xc3, 0xfa, 0x5a, 0x76, 0x1e,
    -	0x8f, 0x0d, 0xe8, 0xe7, 0xe6, 0xc1, 0x32, 0x72, 0x42, 0xa6, 0x52, 0x36, 0x7f, 0x40, 0x66, 0xb8,
    -	0x99, 0xb1, 0x50, 0xca, 0xaa, 0x0b, 0x53, 0x94, 0x62, 0x49, 0x7c, 0xfa, 0x15, 0x18, 0xd6, 0x31,
    -	0x0f, 0xe5, 0xf5, 0xfc, 0xc3, 0x45, 0x18, 0xa7, 0x3d, 0x78, 0xa8, 0x13, 0xb1, 0x9e, 0x9e, 0x88,
    -	0x07, 0xed, 0xf9, 0xda, 0x79, 0x36, 0xde, 0x49, 0xce, 0xc6, 0xa5, 0xbc, 0xd9, 0x38, 0xea, 0x39,
    -	0xf8, 0x11, 0x0b, 0x26, 0x97, 0xea, 0x7e, 0x75, 0x3b, 0xe1, 0x9d, 0xfa, 0x12, 0x0c, 0xd1, 0xe3,
    -	0x38, 0x34, 0x02, 0xb5, 0x18, 0xa1, 0x7b, 0x04, 0x08, 0xeb, 0x78, 0x5a, 0xb5, 0xf5, 0xf5, 0xe5,
    -	0x85, 0xac, 0x88, 0x3f, 0x02, 0x84, 0x75, 0x3c, 0xfb, 0xcf, 0x2d, 0x38, 0x73, 0x65, 0x7e, 0x31,
    -	0x5e, 0x8a, 0xa9, 0xa0, 0x43, 0xe7, 0xa1, 0xaf, 0x59, 0xd3, 0xba, 0x12, 0x8b, 0x85, 0x17, 0x58,
    -	0x2f, 0x04, 0xf4, 0xfd, 0x12, 0xdf, 0x6b, 0x1d, 0xe0, 0x0a, 0x2e, 0xcf, 0x8b, 0x73, 0x57, 0x6a,
    -	0x81, 0xac, 0x5c, 0x2d, 0xd0, 0x13, 0xd0, 0x4f, 0xef, 0x05, 0xb7, 0x2a, 0xfb, 0xcd, 0xcd, 0x2e,
    -	0x78, 0x11, 0x96, 0x30, 0xfb, 0x57, 0x2d, 0x98, 0xbc, 0xe2, 0x46, 0xf4, 0xd2, 0x4e, 0x46, 0xd5,
    -	0xa1, 0xb7, 0x76, 0xe8, 0x46, 0x7e, 0xb0, 0x9b, 0x8c, 0xaa, 0x83, 0x15, 0x04, 0x6b, 0x58, 0xfc,
    -	0x83, 0x76, 0x5c, 0xe6, 0xef, 0x52, 0x30, 0xf5, 0x6e, 0x58, 0x94, 0x63, 0x85, 0x41, 0xc7, 0xab,
    -	0xe6, 0x06, 0x4c, 0x64, 0xb9, 0x2b, 0x0e, 0x6e, 0x35, 0x5e, 0x0b, 0x12, 0x80, 0x63, 0x1c, 0xfb,
    -	0x1f, 0x2c, 0x28, 0x5d, 0xe1, 0x5e, 0xbb, 0x1b, 0x61, 0xce, 0xa1, 0xfb, 0x02, 0x0c, 0x12, 0xa9,
    -	0x20, 0x10, 0xbd, 0x56, 0x8c, 0xa8, 0xd2, 0x1c, 0xf0, 0xe0, 0x3e, 0x0a, 0xaf, 0x0b, 0x17, 0xfa,
    -	0xc3, 0xf9, 0x40, 0x2f, 0x01, 0x22, 0x7a, 0x5b, 0x7a, 0xb4, 0x23, 0x16, 0x36, 0x65, 0x31, 0x05,
    -	0xc5, 0x19, 0x35, 0xec, 0x9f, 0xb3, 0xe0, 0xb8, 0xfa, 0xe0, 0xf7, 0xdd, 0x67, 0xda, 0x5f, 0x2f,
    -	0xc0, 0xc8, 0xd5, 0xb5, 0xb5, 0xf2, 0x15, 0x12, 0x69, 0xab, 0xb2, 0xbd, 0xda, 0x1f, 0x6b, 0xda,
    -	0xcb, 0x76, 0x6f, 0xc4, 0x56, 0xe4, 0xd6, 0x67, 0x78, 0x0c, 0xbf, 0x99, 0x65, 0x2f, 0xba, 0x11,
    -	0x54, 0xa2, 0xc0, 0xf5, 0x36, 0x33, 0x57, 0xba, 0xe4, 0x59, 0x8a, 0x79, 0x3c, 0x0b, 0x7a, 0x01,
    -	0xfa, 0x58, 0x10, 0x41, 0x39, 0x09, 0x8f, 0xa8, 0x27, 0x16, 0x2b, 0x3d, 0xd8, 0x2b, 0x0d, 0xae,
    -	0xe3, 0x65, 0xfe, 0x07, 0x0b, 0x54, 0xb4, 0x0e, 0x43, 0x5b, 0x51, 0xd4, 0xbc, 0x4a, 0x9c, 0x1a,
    -	0x09, 0xe4, 0x29, 0x7b, 0x36, 0xeb, 0x94, 0xa5, 0x83, 0xc0, 0xd1, 0xe2, 0x83, 0x29, 0x2e, 0x0b,
    -	0xb1, 0x4e, 0xc7, 0xae, 0x00, 0xc4, 0xb0, 0x07, 0xa4, 0xb8, 0xb1, 0xd7, 0x60, 0x90, 0x7e, 0xee,
    -	0x6c, 0xdd, 0x75, 0xda, 0xab, 0xc6, 0x9f, 0x81, 0x41, 0xa9, 0xf8, 0x0e, 0x45, 0x88, 0x0f, 0x76,
    -	0x23, 0x49, 0xbd, 0x78, 0x88, 0x63, 0xb8, 0xfd, 0x38, 0x08, 0x0b, 0xe0, 0x76, 0x24, 0xed, 0x0d,
    -	0x38, 0xc6, 0x4c, 0x99, 0x9d, 0x68, 0xcb, 0x58, 0xa3, 0x9d, 0x17, 0xc3, 0xb3, 0xe2, 0x5d, 0xc7,
    -	0xbf, 0x6c, 0x4a, 0x73, 0x21, 0x1f, 0x96, 0x14, 0xe3, 0x37, 0x9e, 0xfd, 0xf7, 0x3d, 0xf0, 0xc8,
    -	0x72, 0x25, 0x3f, 0x36, 0xd5, 0x65, 0x18, 0xe6, 0xec, 0x22, 0x5d, 0x1a, 0x4e, 0x5d, 0xb4, 0xab,
    -	0x24, 0xa0, 0x6b, 0x1a, 0x0c, 0x1b, 0x98, 0xe8, 0x0c, 0x14, 0xdd, 0x77, 0xbd, 0xa4, 0x83, 0xe5,
    -	0xf2, 0x5b, 0xab, 0x98, 0x96, 0x53, 0x30, 0xe5, 0x3c, 0xf9, 0x91, 0xae, 0xc0, 0x8a, 0xfb, 0x7c,
    -	0x1d, 0x46, 0xdd, 0xb0, 0x1a, 0xba, 0xcb, 0x1e, 0xdd, 0xa7, 0xda, 0x4e, 0x57, 0x32, 0x07, 0xda,
    -	0x69, 0x05, 0xc5, 0x09, 0x6c, 0xed, 0x7e, 0xe9, 0xed, 0x9a, 0x7b, 0xed, 0x18, 0x19, 0x83, 0x1e,
    -	0xff, 0x4d, 0xf6, 0x75, 0x21, 0x13, 0xc1, 0x8b, 0xe3, 0x9f, 0x7f, 0x70, 0x88, 0x25, 0x8c, 0x3e,
    -	0xe8, 0xaa, 0x5b, 0x4e, 0x73, 0xb6, 0x15, 0x6d, 0x2d, 0xb8, 0x61, 0xd5, 0xdf, 0x21, 0xc1, 0x2e,
    -	0x7b, 0x8b, 0x0f, 0xc4, 0x0f, 0x3a, 0x05, 0x98, 0xbf, 0x3a, 0x5b, 0xa6, 0x98, 0x38, 0x5d, 0x07,
    -	0xcd, 0xc2, 0x98, 0x2c, 0xac, 0x90, 0x90, 0x5d, 0x01, 0x43, 0x8c, 0x8c, 0x72, 0x79, 0x14, 0xc5,
    -	0x8a, 0x48, 0x12, 0xdf, 0x64, 0x70, 0xe1, 0x41, 0x30, 0xb8, 0x2f, 0xc3, 0x88, 0xeb, 0xb9, 0x91,
    -	0xeb, 0x44, 0x3e, 0xd7, 0x1f, 0xf1, 0x67, 0x37, 0x13, 0x30, 0x2f, 0xeb, 0x00, 0x6c, 0xe2, 0xd9,
    -	0xff, 0x5f, 0x0f, 0x4c, 0xb0, 0x69, 0xfb, 0x60, 0x85, 0x7d, 0x2f, 0xad, 0xb0, 0xf5, 0xf4, 0x0a,
    -	0x7b, 0x10, 0x9c, 0xfb, 0x7d, 0x2f, 0xb3, 0x2f, 0x58, 0x30, 0xc1, 0x64, 0xdc, 0xc6, 0x32, 0xbb,
    -	0x08, 0x83, 0x81, 0xe1, 0x8d, 0x3a, 0xa8, 0x2b, 0xb5, 0xa4, 0x63, 0x69, 0x8c, 0x83, 0xde, 0x00,
    -	0x68, 0xc6, 0x32, 0xf4, 0x82, 0x11, 0x42, 0x14, 0x72, 0xc5, 0xe7, 0x5a, 0x1d, 0xfb, 0xb3, 0x30,
    -	0xa8, 0xdc, 0x4d, 0xa5, 0xbf, 0xb9, 0x95, 0xe3, 0x6f, 0xde, 0x99, 0x8d, 0x90, 0xb6, 0x71, 0xc5,
    -	0x4c, 0xdb, 0xb8, 0xaf, 0x5a, 0x10, 0x6b, 0x38, 0xd0, 0x5b, 0x30, 0xd8, 0xf4, 0x99, 0x41, 0x74,
    -	0x20, 0xbd, 0x0c, 0x1e, 0x6f, 0xab, 0x22, 0xe1, 0x71, 0x02, 0x03, 0x3e, 0x1d, 0x65, 0x59, 0x15,
    -	0xc7, 0x54, 0xd0, 0x35, 0xe8, 0x6f, 0x06, 0xa4, 0x12, 0xb1, 0x20, 0x56, 0xdd, 0x13, 0xe4, 0xcb,
    -	0x97, 0x57, 0xc4, 0x92, 0x82, 0xfd, 0x1b, 0x05, 0x18, 0x4f, 0xa2, 0xa2, 0xd7, 0xa0, 0x87, 0xdc,
    -	0x25, 0x55, 0xd1, 0xdf, 0x4c, 0x9e, 0x20, 0x96, 0x91, 0xf0, 0x01, 0xa0, 0xff, 0x31, 0xab, 0x85,
    -	0xae, 0x42, 0x3f, 0x65, 0x08, 0xae, 0xa8, 0x80, 0x8d, 0x8f, 0xe6, 0x31, 0x15, 0x8a, 0xb3, 0xe2,
    -	0x9d, 0x13, 0x45, 0x58, 0x56, 0x67, 0x06, 0x69, 0xd5, 0x66, 0x85, 0xbe, 0xb5, 0xa2, 0x76, 0x22,
    -	0x81, 0xb5, 0xf9, 0x32, 0x47, 0x12, 0xd4, 0xb8, 0x41, 0x9a, 0x2c, 0xc4, 0x31, 0x11, 0xf4, 0x06,
    -	0xf4, 0x86, 0x75, 0x42, 0x9a, 0xc2, 0xe2, 0x20, 0x53, 0xca, 0x59, 0xa1, 0x08, 0x82, 0x12, 0x93,
    -	0x8a, 0xb0, 0x02, 0xcc, 0x2b, 0xda, 0xbf, 0x65, 0x01, 0x70, 0x0b, 0x3e, 0xc7, 0xdb, 0x24, 0x47,
    -	0xa0, 0x18, 0x58, 0x80, 0x9e, 0xb0, 0x49, 0xaa, 0xed, 0xac, 0xfd, 0xe3, 0xfe, 0x54, 0x9a, 0xa4,
    -	0x1a, 0xaf, 0x59, 0xfa, 0x0f, 0xb3, 0xda, 0xf6, 0x8f, 0x02, 0x8c, 0xc6, 0x68, 0xcb, 0x11, 0x69,
    -	0xa0, 0xe7, 0x8c, 0x28, 0x37, 0xa7, 0x12, 0x51, 0x6e, 0x06, 0x19, 0xb6, 0x26, 0x83, 0xfe, 0x2c,
    -	0x14, 0x1b, 0xce, 0x5d, 0x21, 0x64, 0x7c, 0xa6, 0x7d, 0x37, 0x28, 0xfd, 0x99, 0x15, 0xe7, 0x2e,
    -	0x7f, 0x87, 0x3f, 0x23, 0xf7, 0xd8, 0x8a, 0x73, 0xb7, 0xa3, 0x45, 0x3a, 0x6d, 0x84, 0xb5, 0xe5,
    -	0x7a, 0xc2, 0x38, 0xad, 0xab, 0xb6, 0x5c, 0x2f, 0xd9, 0x96, 0xeb, 0x75, 0xd1, 0x96, 0xeb, 0xa1,
    -	0x7b, 0xd0, 0x2f, 0x6c, 0x47, 0x45, 0xf8, 0xbd, 0x8b, 0x5d, 0xb4, 0x27, 0x4c, 0x4f, 0x79, 0x9b,
    -	0x17, 0xa5, 0x9c, 0x41, 0x94, 0x76, 0x6c, 0x57, 0x36, 0x88, 0xfe, 0x2b, 0x0b, 0x46, 0xc5, 0x6f,
    -	0x4c, 0xde, 0x6d, 0x91, 0x30, 0x12, 0x7c, 0xf8, 0x47, 0xba, 0xef, 0x83, 0xa8, 0xc8, 0xbb, 0xf2,
    -	0x11, 0x79, 0x65, 0x9a, 0xc0, 0x8e, 0x3d, 0x4a, 0xf4, 0x02, 0xfd, 0x86, 0x05, 0xc7, 0x1a, 0xce,
    -	0x5d, 0xde, 0x22, 0x2f, 0xc3, 0x4e, 0xe4, 0xfa, 0xc2, 0x06, 0xe3, 0xb5, 0xee, 0xa6, 0x3f, 0x55,
    -	0x9d, 0x77, 0x52, 0x2a, 0x5c, 0x8f, 0x65, 0xa1, 0x74, 0xec, 0x6a, 0x66, 0xbf, 0xa6, 0x37, 0x60,
    -	0x40, 0xae, 0xb7, 0x87, 0x69, 0x18, 0xcf, 0xda, 0x11, 0x6b, 0xed, 0xa1, 0xb6, 0xf3, 0x59, 0x18,
    -	0xd6, 0xd7, 0xd8, 0x43, 0x6d, 0xeb, 0x5d, 0x98, 0xcc, 0x58, 0x4b, 0x0f, 0xb5, 0xc9, 0x3b, 0x70,
    -	0x2a, 0x77, 0x7d, 0x3c, 0x54, 0xc7, 0x86, 0xaf, 0x5b, 0xfa, 0x39, 0x78, 0x04, 0xda, 0x99, 0x79,
    -	0x53, 0x3b, 0x73, 0xb6, 0xfd, 0xce, 0xc9, 0x51, 0xd1, 0xbc, 0xa3, 0x77, 0x9a, 0x9e, 0xea, 0xe8,
    -	0x4d, 0xe8, 0xab, 0xd3, 0x12, 0x69, 0x81, 0x6c, 0x77, 0xde, 0x91, 0x31, 0x5f, 0xcc, 0xca, 0x43,
    -	0x2c, 0x28, 0xd8, 0x5f, 0xb1, 0x20, 0xc3, 0x35, 0x83, 0xf2, 0x49, 0x2d, 0xb7, 0xc6, 0x86, 0xa4,
    -	0x18, 0xf3, 0x49, 0x2a, 0x08, 0xcc, 0x19, 0x28, 0x6e, 0xba, 0x35, 0xe1, 0x59, 0xac, 0xc0, 0x57,
    -	0x28, 0x78, 0xd3, 0xad, 0xa1, 0x25, 0x40, 0x61, 0xab, 0xd9, 0xac, 0x33, 0xb3, 0x25, 0xa7, 0x7e,
    -	0x25, 0xf0, 0x5b, 0x4d, 0x6e, 0x6e, 0x5c, 0xe4, 0x42, 0xa2, 0x4a, 0x0a, 0x8a, 0x33, 0x6a, 0xd8,
    -	0xbf, 0x6b, 0x41, 0xcf, 0x11, 0x4c, 0x13, 0x36, 0xa7, 0xe9, 0xb9, 0x5c, 0xd2, 0x22, 0x6b, 0xc3,
    -	0x0c, 0x76, 0xee, 0x2c, 0xde, 0x8d, 0x88, 0x17, 0x32, 0x86, 0x23, 0x73, 0xd6, 0xf6, 0x2c, 0x98,
    -	0xbc, 0xee, 0x3b, 0xb5, 0x39, 0xa7, 0xee, 0x78, 0x55, 0x12, 0x2c, 0x7b, 0x9b, 0x87, 0xb2, 0xed,
    -	0x2f, 0x74, 0xb4, 0xed, 0xbf, 0x0c, 0x7d, 0x6e, 0x53, 0x0b, 0xfb, 0x7e, 0x8e, 0xce, 0xee, 0x72,
    -	0x59, 0x44, 0x7c, 0x47, 0x46, 0xe3, 0xac, 0x14, 0x0b, 0x7c, 0xba, 0x2c, 0xb9, 0x51, 0x5d, 0x4f,
    -	0xfe, 0xb2, 0xa4, 0x6f, 0x9d, 0x64, 0x38, 0x33, 0xc3, 0xfc, 0x7b, 0x0b, 0x8c, 0x26, 0x84, 0x07,
    -	0x23, 0x86, 0x7e, 0x97, 0x7f, 0xa9, 0x58, 0x9b, 0x4f, 0x66, 0xbf, 0x41, 0x52, 0x03, 0xa3, 0xf9,
    -	0xe6, 0xf1, 0x02, 0x2c, 0x09, 0xd9, 0x97, 0x21, 0x33, 0xfc, 0x4c, 0x67, 0xf9, 0x92, 0xfd, 0x09,
    -	0x98, 0x60, 0x35, 0x0f, 0x29, 0xbb, 0xb1, 0x13, 0x52, 0xf1, 0x8c, 0x08, 0xbe, 0xf6, 0xff, 0x6d,
    -	0x01, 0x5a, 0xf1, 0x6b, 0xee, 0xc6, 0xae, 0x20, 0xce, 0xbf, 0xff, 0x5d, 0x28, 0xf1, 0xc7, 0x71,
    -	0x32, 0xca, 0xed, 0x7c, 0xdd, 0x09, 0x43, 0x4d, 0x22, 0xff, 0xa4, 0x68, 0xb7, 0xb4, 0xd6, 0x1e,
    -	0x1d, 0x77, 0xa2, 0x87, 0xde, 0x4a, 0x04, 0x1d, 0xfc, 0x68, 0x2a, 0xe8, 0xe0, 0x93, 0x99, 0x76,
    -	0x31, 0xe9, 0xde, 0xcb, 0x60, 0x84, 0xf6, 0x17, 0x2d, 0x18, 0x5b, 0x4d, 0x44, 0x6d, 0x3d, 0xcf,
    -	0x8c, 0x04, 0x32, 0x34, 0x4d, 0x15, 0x56, 0x8a, 0x05, 0xf4, 0x81, 0x4b, 0x62, 0xff, 0xd5, 0x82,
    -	0x38, 0xdc, 0xd5, 0x11, 0xb0, 0xdc, 0xf3, 0x06, 0xcb, 0x9d, 0xf9, 0x7c, 0x51, 0xdd, 0xc9, 0xe3,
    -	0xb8, 0xd1, 0x35, 0x35, 0x27, 0x6d, 0x5e, 0x2e, 0x31, 0x19, 0xbe, 0xcf, 0x46, 0xcd, 0x89, 0x53,
    -	0xb3, 0xf1, 0xcd, 0x02, 0x20, 0x85, 0xdb, 0x75, 0xa0, 0xca, 0x74, 0x8d, 0x07, 0x13, 0xa8, 0x72,
    -	0x07, 0x10, 0x33, 0x73, 0x09, 0x1c, 0x2f, 0xe4, 0x64, 0x5d, 0x21, 0x7b, 0x3e, 0x9c, 0x0d, 0xcd,
    -	0xb4, 0xf4, 0x5c, 0xbd, 0x9e, 0xa2, 0x86, 0x33, 0x5a, 0xd0, 0xcc, 0x97, 0x7a, 0xbb, 0x35, 0x5f,
    -	0xea, 0xeb, 0xe0, 0x82, 0xfd, 0x35, 0x0b, 0x46, 0xd4, 0x30, 0xbd, 0x4f, 0x5c, 0x40, 0x54, 0x7f,
    -	0x72, 0xee, 0x95, 0xb2, 0xd6, 0x65, 0xc6, 0x0c, 0x7c, 0x1f, 0x73, 0xa5, 0x77, 0xea, 0xee, 0x3d,
    -	0xa2, 0xe2, 0x29, 0x97, 0x84, 0x6b, 0xbc, 0x28, 0x3d, 0xd8, 0x2b, 0x8d, 0xa8, 0x7f, 0x3c, 0x82,
    -	0x6b, 0x5c, 0xc5, 0xfe, 0x25, 0xba, 0xd9, 0xcd, 0xa5, 0x88, 0x5e, 0x82, 0xde, 0xe6, 0x96, 0x13,
    -	0x92, 0x84, 0xab, 0x5c, 0x6f, 0x99, 0x16, 0x1e, 0xec, 0x95, 0x46, 0x55, 0x05, 0x56, 0x82, 0x39,
    -	0x76, 0xf7, 0xe1, 0x3f, 0xd3, 0x8b, 0xb3, 0x63, 0xf8, 0xcf, 0x7f, 0xb2, 0xa0, 0x67, 0x95, 0xde,
    -	0x5e, 0x0f, 0xff, 0x08, 0x78, 0xdd, 0x38, 0x02, 0x4e, 0xe7, 0x65, 0x16, 0xca, 0xdd, 0xfd, 0x4b,
    -	0x89, 0xdd, 0x7f, 0x36, 0x97, 0x42, 0xfb, 0x8d, 0xdf, 0x80, 0x21, 0x96, 0xaf, 0x48, 0xb8, 0x05,
    -	0xbe, 0x60, 0x6c, 0xf8, 0x52, 0x62, 0xc3, 0x8f, 0x69, 0xa8, 0xda, 0x4e, 0x7f, 0x0a, 0xfa, 0x85,
    -	0x9f, 0x59, 0x32, 0x22, 0x81, 0xc0, 0xc5, 0x12, 0x6e, 0xff, 0x7c, 0x11, 0x8c, 0xfc, 0x48, 0xe8,
    -	0xf7, 0x2d, 0x98, 0x09, 0xb8, 0xfd, 0x79, 0x6d, 0xa1, 0x15, 0xb8, 0xde, 0x66, 0xa5, 0xba, 0x45,
    -	0x6a, 0xad, 0xba, 0xeb, 0x6d, 0x2e, 0x6f, 0x7a, 0xbe, 0x2a, 0x5e, 0xbc, 0x4b, 0xaa, 0x2d, 0xa6,
    -	0x1b, 0xee, 0x90, 0x8c, 0x49, 0xf9, 0x71, 0x3c, 0xbf, 0xbf, 0x57, 0x9a, 0xc1, 0x87, 0xa2, 0x8d,
    -	0x0f, 0xd9, 0x17, 0xf4, 0xe7, 0x16, 0x5c, 0xe4, 0x79, 0x7a, 0xba, 0xef, 0x7f, 0x1b, 0x09, 0x47,
    -	0x59, 0x92, 0x8a, 0x89, 0xac, 0x91, 0xa0, 0x31, 0xf7, 0xb2, 0x18, 0xd0, 0x8b, 0xe5, 0xc3, 0xb5,
    -	0x85, 0x0f, 0xdb, 0x39, 0xfb, 0x7f, 0x2e, 0xc2, 0x88, 0x08, 0x13, 0x29, 0xee, 0x80, 0x97, 0x8c,
    -	0x25, 0xf1, 0x68, 0x62, 0x49, 0x4c, 0x18, 0xc8, 0x0f, 0xe6, 0xf8, 0x0f, 0x61, 0x82, 0x1e, 0xce,
    -	0x57, 0x89, 0x13, 0x44, 0xb7, 0x89, 0xc3, 0xad, 0x12, 0x8b, 0x87, 0x3e, 0xfd, 0x95, 0x78, 0xfc,
    -	0x7a, 0x92, 0x18, 0x4e, 0xd3, 0xff, 0x5e, 0xba, 0x73, 0x3c, 0x18, 0x4f, 0x45, 0xfa, 0x7c, 0x1b,
    -	0x06, 0x95, 0x93, 0x94, 0x38, 0x74, 0xda, 0x07, 0xcc, 0x4d, 0x52, 0xe0, 0x42, 0xcf, 0xd8, 0x41,
    -	0x2f, 0x26, 0x67, 0xff, 0x66, 0xc1, 0x68, 0x90, 0x4f, 0xe2, 0x2a, 0x0c, 0x38, 0x21, 0x0b, 0xe2,
    -	0x5d, 0x6b, 0x27, 0x97, 0x4e, 0x35, 0xc3, 0x1c, 0xd5, 0x66, 0x45, 0x4d, 0xac, 0x68, 0xa0, 0xab,
    -	0xdc, 0xf6, 0x73, 0x87, 0xb4, 0x13, 0x4a, 0xa7, 0xa8, 0x81, 0xb4, 0x0e, 0xdd, 0x21, 0x58, 0xd4,
    -	0x47, 0x9f, 0xe2, 0xc6, 0xb9, 0xd7, 0x3c, 0xff, 0x8e, 0x77, 0xc5, 0xf7, 0x65, 0x48, 0xa0, 0xee,
    -	0x08, 0x4e, 0x48, 0x93, 0x5c, 0x55, 0x1d, 0x9b, 0xd4, 0xba, 0x0b, 0x9d, 0xfd, 0x39, 0x60, 0x79,
    -	0x49, 0xcc, 0x98, 0x04, 0x21, 0x22, 0x30, 0x26, 0x62, 0x90, 0xca, 0x32, 0x31, 0x76, 0x99, 0xcf,
    -	0x6f, 0xb3, 0x76, 0xac, 0xc7, 0xb9, 0x66, 0x92, 0xc0, 0x49, 0x9a, 0xf6, 0x16, 0x3f, 0x84, 0x97,
    -	0x88, 0x13, 0xb5, 0x02, 0x12, 0xa2, 0x8f, 0xc3, 0x54, 0xfa, 0x65, 0x2c, 0xd4, 0x21, 0x16, 0xe3,
    -	0x9e, 0x4f, 0xef, 0xef, 0x95, 0xa6, 0x2a, 0x39, 0x38, 0x38, 0xb7, 0xb6, 0xfd, 0x2b, 0x16, 0x30,
    -	0x4f, 0xf0, 0x23, 0xe0, 0x7c, 0x3e, 0x66, 0x72, 0x3e, 0x53, 0x79, 0xd3, 0x99, 0xc3, 0xf4, 0xbc,
    -	0xc8, 0xd7, 0x70, 0x39, 0xf0, 0xef, 0xee, 0x0a, 0xdb, 0xad, 0xce, 0xcf, 0x38, 0xfb, 0xcb, 0x16,
    -	0xb0, 0x24, 0x3e, 0x98, 0xbf, 0xda, 0xa5, 0x82, 0xa3, 0xb3, 0x59, 0xc2, 0xc7, 0x61, 0x60, 0x43,
    -	0x0c, 0x7f, 0x86, 0xd0, 0xc9, 0xe8, 0xb0, 0x49, 0x5b, 0x4e, 0x9a, 0xf0, 0xe8, 0x14, 0xff, 0xb0,
    -	0xa2, 0x66, 0xff, 0xf7, 0x16, 0x4c, 0xe7, 0x57, 0x43, 0xeb, 0x70, 0x32, 0x20, 0xd5, 0x56, 0x10,
    -	0xd2, 0x2d, 0x21, 0x1e, 0x40, 0xc2, 0x29, 0x8a, 0x4f, 0xf5, 0x23, 0xfb, 0x7b, 0xa5, 0x93, 0x38,
    -	0x1b, 0x05, 0xe7, 0xd5, 0x45, 0xaf, 0xc0, 0x68, 0x2b, 0xe4, 0x9c, 0x1f, 0x63, 0xba, 0x42, 0x11,
    -	0x29, 0x9a, 0xf9, 0x0d, 0xad, 0x1b, 0x10, 0x9c, 0xc0, 0xb4, 0x7f, 0x80, 0x2f, 0x47, 0x15, 0x2c,
    -	0xba, 0x01, 0x13, 0x9e, 0xf6, 0x9f, 0xde, 0x80, 0xf2, 0xa9, 0xff, 0x78, 0xa7, 0x5b, 0x9f, 0x5d,
    -	0x97, 0x9a, 0xaf, 0x7a, 0x82, 0x0c, 0x4e, 0x53, 0xb6, 0x7f, 0xc1, 0x82, 0x93, 0x3a, 0xa2, 0xe6,
    -	0x0e, 0xd7, 0x49, 0x97, 0xb7, 0x00, 0x03, 0x7e, 0x93, 0x04, 0x4e, 0xe4, 0x07, 0xe2, 0x9a, 0xbb,
    -	0x20, 0x57, 0xe8, 0x0d, 0x51, 0x7e, 0x20, 0x92, 0xd7, 0x48, 0xea, 0xb2, 0x1c, 0xab, 0x9a, 0xc8,
    -	0x86, 0x3e, 0x26, 0x40, 0x0c, 0x85, 0xe3, 0x23, 0x3b, 0xb4, 0x98, 0x7d, 0x4a, 0x88, 0x05, 0xc4,
    -	0xfe, 0x7b, 0x8b, 0xaf, 0x4f, 0xbd, 0xeb, 0xe8, 0x5d, 0x18, 0x6f, 0x38, 0x51, 0x75, 0x6b, 0xf1,
    -	0x6e, 0x33, 0xe0, 0x2a, 0x5a, 0x39, 0x4e, 0xcf, 0x74, 0x1a, 0x27, 0xed, 0x23, 0x63, 0x03, 0xe9,
    -	0x95, 0x04, 0x31, 0x9c, 0x22, 0x8f, 0x6e, 0xc3, 0x10, 0x2b, 0x63, 0x3e, 0xbd, 0x61, 0x3b, 0x5e,
    -	0x26, 0xaf, 0x35, 0x65, 0xe2, 0xb3, 0x12, 0xd3, 0xc1, 0x3a, 0x51, 0xfb, 0xab, 0x45, 0x7e, 0x68,
    -	0xb0, 0xb7, 0xc7, 0x53, 0xd0, 0xdf, 0xf4, 0x6b, 0xf3, 0xcb, 0x0b, 0x58, 0xcc, 0x82, 0xba, 0xf7,
    -	0xca, 0xbc, 0x18, 0x4b, 0x38, 0xba, 0x00, 0x03, 0xe2, 0xa7, 0x54, 0xa9, 0xb3, 0x3d, 0x22, 0xf0,
    -	0x42, 0xac, 0xa0, 0xe8, 0x79, 0x80, 0x66, 0xe0, 0xef, 0xb8, 0x35, 0x16, 0x89, 0xa9, 0x68, 0x5a,
    -	0xe7, 0x95, 0x15, 0x04, 0x6b, 0x58, 0xe8, 0x55, 0x18, 0x69, 0x79, 0x21, 0xe7, 0x9f, 0xb4, 0x78,
    -	0xf7, 0xca, 0x6e, 0x6c, 0x5d, 0x07, 0x62, 0x13, 0x17, 0xcd, 0x42, 0x5f, 0xe4, 0x30, 0x6b, 0xb3,
    -	0xde, 0x7c, 0x23, 0xfa, 0x35, 0x8a, 0xa1, 0x67, 0x96, 0xa3, 0x15, 0xb0, 0xa8, 0x88, 0xde, 0x96,
    -	0xee, 0xf5, 0xfc, 0x26, 0x12, 0xde, 0x2b, 0xdd, 0xdd, 0x5a, 0x9a, 0x73, 0xbd, 0xf0, 0x8a, 0x31,
    -	0x68, 0xa1, 0x57, 0x00, 0xc8, 0xdd, 0x88, 0x04, 0x9e, 0x53, 0x57, 0x36, 0xa2, 0x8a, 0x91, 0x59,
    -	0xf0, 0x57, 0xfd, 0x68, 0x3d, 0x24, 0x8b, 0x0a, 0x03, 0x6b, 0xd8, 0xf6, 0x8f, 0x0e, 0x01, 0xc4,
    -	0x0f, 0x0d, 0x74, 0x0f, 0x06, 0xaa, 0x4e, 0xd3, 0xa9, 0xf2, 0xb4, 0xa9, 0xc5, 0x3c, 0xaf, 0xe7,
    -	0xb8, 0xc6, 0xcc, 0xbc, 0x40, 0xe7, 0xca, 0x1b, 0x19, 0x32, 0x7c, 0x40, 0x16, 0x77, 0x54, 0xd8,
    -	0xa8, 0xf6, 0xd0, 0x17, 0x2c, 0x18, 0x12, 0x91, 0x8e, 0xd8, 0x0c, 0x15, 0xf2, 0xf5, 0x6d, 0x5a,
    -	0xfb, 0xb3, 0x71, 0x0d, 0xde, 0x85, 0x17, 0xe4, 0x0a, 0xd5, 0x20, 0x1d, 0x7b, 0xa1, 0x37, 0x8c,
    -	0x3e, 0x2c, 0xdf, 0xb6, 0x45, 0x63, 0x28, 0xd5, 0xdb, 0x76, 0x90, 0x5d, 0x35, 0xfa, 0xb3, 0x76,
    -	0xdd, 0x78, 0xd6, 0xf6, 0xe4, 0xfb, 0x0f, 0x1b, 0xfc, 0x76, 0xa7, 0x17, 0x2d, 0x2a, 0xeb, 0xb1,
    -	0x44, 0x7a, 0xf3, 0x9d, 0x5e, 0xb5, 0x87, 0x5d, 0x87, 0x38, 0x22, 0x9f, 0x85, 0xb1, 0x9a, 0xc9,
    -	0xb5, 0x88, 0x95, 0xf8, 0x64, 0x1e, 0xdd, 0x04, 0x93, 0x13, 0xf3, 0x29, 0x09, 0x00, 0x4e, 0x12,
    -	0x46, 0x65, 0x1e, 0x5a, 0x66, 0xd9, 0xdb, 0xf0, 0x85, 0x07, 0x95, 0x9d, 0x3b, 0x97, 0xbb, 0x61,
    -	0x44, 0x1a, 0x14, 0x33, 0x66, 0x12, 0x56, 0x45, 0x5d, 0xac, 0xa8, 0xa0, 0x37, 0xa1, 0x8f, 0x79,
    -	0x3d, 0x86, 0x53, 0x03, 0xf9, 0x6a, 0x0d, 0x33, 0x12, 0x6a, 0xbc, 0x21, 0xd9, 0xdf, 0x10, 0x0b,
    -	0x0a, 0xe8, 0xaa, 0xf4, 0x29, 0x0e, 0x97, 0xbd, 0xf5, 0x90, 0x30, 0x9f, 0xe2, 0xc1, 0xb9, 0xc7,
    -	0x63, 0x77, 0x61, 0x5e, 0x9e, 0x99, 0x7f, 0xd6, 0xa8, 0x49, 0xd9, 0x3e, 0xf1, 0x5f, 0xa6, 0xb5,
    -	0x15, 0x71, 0xdb, 0x32, 0xbb, 0x67, 0xa6, 0xbe, 0x8d, 0x87, 0xf3, 0xa6, 0x49, 0x02, 0x27, 0x69,
    -	0x52, 0x16, 0x9a, 0xef, 0x7a, 0xe1, 0x83, 0xd5, 0xe9, 0xec, 0xe0, 0x92, 0x03, 0x76, 0x1b, 0xf1,
    -	0x12, 0x2c, 0xea, 0x23, 0x17, 0xc6, 0x02, 0x83, 0xbd, 0x90, 0xe1, 0xd6, 0xce, 0x77, 0xc7, 0xc4,
    -	0x68, 0x81, 0xfc, 0x4d, 0x32, 0x38, 0x49, 0x17, 0xbd, 0xa9, 0x31, 0x4a, 0x23, 0xed, 0x5f, 0xfe,
    -	0x9d, 0x58, 0xa3, 0xe9, 0x6d, 0x18, 0x31, 0x0e, 0x9b, 0x87, 0xaa, 0x82, 0xf4, 0x60, 0x3c, 0x79,
    -	0xb2, 0x3c, 0x54, 0xcd, 0xe3, 0xdf, 0xf6, 0xc0, 0xa8, 0xb9, 0x13, 0xd0, 0x45, 0x18, 0x14, 0x44,
    -	0x54, 0x46, 0x2b, 0xb5, 0xb9, 0x57, 0x24, 0x00, 0xc7, 0x38, 0x2c, 0x91, 0x19, 0xab, 0xae, 0xf9,
    -	0x0a, 0xc4, 0x89, 0xcc, 0x14, 0x04, 0x6b, 0x58, 0xf4, 0x01, 0x7b, 0xdb, 0xf7, 0x23, 0x75, 0x8f,
    -	0xaa, 0xed, 0x32, 0xc7, 0x4a, 0xb1, 0x80, 0xd2, 0xfb, 0x73, 0x9b, 0x04, 0x1e, 0xa9, 0x9b, 0x29,
    -	0x1d, 0xd4, 0xfd, 0x79, 0x4d, 0x07, 0x62, 0x13, 0x97, 0x72, 0x01, 0x7e, 0xc8, 0xf6, 0x9f, 0x78,
    -	0x26, 0xc7, 0xbe, 0x17, 0x15, 0x1e, 0x45, 0x42, 0xc2, 0xd1, 0x27, 0xe0, 0xa4, 0x0a, 0x9f, 0x28,
    -	0x56, 0x97, 0x6c, 0xb1, 0xcf, 0x90, 0x6a, 0x9d, 0x9c, 0xcf, 0x46, 0xc3, 0x79, 0xf5, 0xd1, 0xeb,
    -	0x30, 0x2a, 0x9e, 0x52, 0x92, 0x62, 0xbf, 0x69, 0x48, 0x78, 0xcd, 0x80, 0xe2, 0x04, 0xb6, 0x4c,
    -	0x4a, 0xc1, 0xde, 0x18, 0x92, 0xc2, 0x40, 0x3a, 0x29, 0x85, 0x0e, 0xc7, 0xa9, 0x1a, 0x68, 0x16,
    -	0xc6, 0x38, 0xeb, 0xe8, 0x7a, 0x9b, 0x7c, 0x4e, 0x84, 0x67, 0xa7, 0xda, 0x54, 0x37, 0x4c, 0x30,
    -	0x4e, 0xe2, 0xa3, 0xcb, 0x30, 0xec, 0x04, 0xd5, 0x2d, 0x37, 0x22, 0x55, 0xba, 0x33, 0x98, 0x2d,
    -	0x9f, 0x66, 0x89, 0x39, 0xab, 0xc1, 0xb0, 0x81, 0x69, 0xdf, 0x83, 0xc9, 0x8c, 0xf0, 0x32, 0x74,
    -	0xe1, 0x38, 0x4d, 0x57, 0x7e, 0x53, 0xc2, 0xdd, 0x61, 0xb6, 0xbc, 0x2c, 0xbf, 0x46, 0xc3, 0xa2,
    -	0xab, 0x93, 0x85, 0xa1, 0xd1, 0x92, 0x6f, 0xab, 0xd5, 0xb9, 0x24, 0x01, 0x38, 0xc6, 0xb1, 0xff,
    -	0xb9, 0x00, 0x63, 0x19, 0x0a, 0x3a, 0x96, 0x00, 0x3a, 0xf1, 0xd2, 0x8a, 0xf3, 0x3d, 0x9b, 0x39,
    -	0x4e, 0x0a, 0x87, 0xc8, 0x71, 0x52, 0xec, 0x94, 0xe3, 0xa4, 0xe7, 0xbd, 0xe4, 0x38, 0x31, 0x47,
    -	0xac, 0xb7, 0xab, 0x11, 0xcb, 0xc8, 0x8b, 0xd2, 0x77, 0xc8, 0xbc, 0x28, 0xc6, 0xa0, 0xf7, 0x77,
    -	0x31, 0xe8, 0x3f, 0x5d, 0x80, 0xf1, 0xa4, 0x6e, 0xef, 0x08, 0xe4, 0xe3, 0x6f, 0x1a, 0xf2, 0xf1,
    -	0x0b, 0xdd, 0x78, 0xe2, 0xe7, 0xca, 0xca, 0x71, 0x42, 0x56, 0xfe, 0x74, 0x57, 0xd4, 0xda, 0xcb,
    -	0xcd, 0x7f, 0xb1, 0x00, 0xc7, 0x33, 0x55, 0x9e, 0x47, 0x30, 0x36, 0x37, 0x8c, 0xb1, 0x79, 0xae,
    -	0xeb, 0x28, 0x05, 0xb9, 0x03, 0x74, 0x2b, 0x31, 0x40, 0x17, 0xbb, 0x27, 0xd9, 0x7e, 0x94, 0xbe,
    -	0x55, 0x84, 0xb3, 0x99, 0xf5, 0x62, 0xf1, 0xf2, 0x92, 0x21, 0x5e, 0x7e, 0x3e, 0x21, 0x5e, 0xb6,
    -	0xdb, 0xd7, 0x7e, 0x30, 0xf2, 0x66, 0xe1, 0xad, 0xcf, 0x62, 0x8e, 0xdc, 0xa7, 0xac, 0xd9, 0xf0,
    -	0xd6, 0x57, 0x84, 0xb0, 0x49, 0xf7, 0x7b, 0x49, 0xc6, 0xfc, 0x67, 0x16, 0x9c, 0xca, 0x9c, 0x9b,
    -	0x23, 0x90, 0xf4, 0xad, 0x9a, 0x92, 0xbe, 0xa7, 0xba, 0x5e, 0xad, 0x39, 0xa2, 0xbf, 0x2f, 0xf6,
    -	0xe5, 0x7c, 0x0b, 0x13, 0x40, 0xdc, 0x80, 0x21, 0xa7, 0x5a, 0x25, 0x61, 0xb8, 0xe2, 0xd7, 0x54,
    -	0x3a, 0x84, 0xe7, 0xd8, 0xf3, 0x30, 0x2e, 0x3e, 0xd8, 0x2b, 0x4d, 0x27, 0x49, 0xc4, 0x60, 0xac,
    -	0x53, 0x40, 0x9f, 0x82, 0x81, 0x50, 0x66, 0xb2, 0xec, 0xb9, 0xff, 0x4c, 0x96, 0x8c, 0xc9, 0x55,
    -	0x02, 0x16, 0x45, 0x12, 0x7d, 0xbf, 0x1e, 0xfd, 0xa9, 0x8d, 0x68, 0x91, 0x77, 0xf2, 0x3e, 0x62,
    -	0x40, 0x3d, 0x0f, 0xb0, 0xa3, 0x5e, 0x32, 0x49, 0xe1, 0x89, 0xf6, 0xc6, 0xd1, 0xb0, 0xd0, 0x1b,
    -	0x30, 0x1e, 0xf2, 0xc0, 0xa7, 0xb1, 0x91, 0x0a, 0x5f, 0x8b, 0x2c, 0x76, 0x5c, 0x25, 0x01, 0xc3,
    -	0x29, 0x6c, 0xb4, 0x24, 0x5b, 0x65, 0xe6, 0x48, 0x7c, 0x79, 0x9e, 0x8f, 0x5b, 0x14, 0x26, 0x49,
    -	0xc7, 0x92, 0x93, 0xc0, 0x86, 0x5f, 0xab, 0x89, 0x3e, 0x05, 0x40, 0x17, 0x91, 0x10, 0xa2, 0xf4,
    -	0xe7, 0x1f, 0xa1, 0xf4, 0x6c, 0xa9, 0x65, 0x7a, 0x32, 0x30, 0x37, 0xfb, 0x05, 0x45, 0x04, 0x6b,
    -	0x04, 0x91, 0x03, 0x23, 0xf1, 0xbf, 0x38, 0x47, 0xfb, 0x85, 0xdc, 0x16, 0x92, 0xc4, 0x99, 0x82,
    -	0x61, 0x41, 0x27, 0x81, 0x4d, 0x8a, 0xe8, 0x93, 0x70, 0x6a, 0x27, 0xd7, 0xf2, 0x87, 0x73, 0x82,
    -	0x2c, 0xe9, 0x7a, 0xbe, 0xbd, 0x4f, 0x7e, 0x7d, 0xfb, 0x7f, 0x07, 0x78, 0xa4, 0xcd, 0x49, 0x8f,
    -	0x66, 0x4d, 0xad, 0xfd, 0x33, 0x49, 0xc9, 0xc6, 0x74, 0x66, 0x65, 0x43, 0xd4, 0x91, 0xd8, 0x50,
    -	0x85, 0xf7, 0xbc, 0xa1, 0x7e, 0xc2, 0xd2, 0x64, 0x4e, 0xdc, 0xa6, 0xfb, 0x63, 0x87, 0xbc, 0xc1,
    -	0x1e, 0xa0, 0x10, 0x6a, 0x23, 0x43, 0x92, 0xf3, 0x7c, 0xd7, 0xdd, 0xe9, 0x5e, 0xb4, 0xf3, 0xf5,
    -	0xec, 0x80, 0xef, 0x5c, 0xc8, 0x73, 0xe5, 0xb0, 0xdf, 0x7f, 0x54, 0xc1, 0xdf, 0xbf, 0x69, 0xc1,
    -	0xa9, 0x54, 0x31, 0xef, 0x03, 0x09, 0x45, 0xb4, 0xbb, 0xd5, 0xf7, 0xdc, 0x79, 0x49, 0x90, 0x7f,
    -	0xc3, 0x55, 0xf1, 0x0d, 0xa7, 0x72, 0xf1, 0x92, 0x5d, 0xff, 0xd2, 0xdf, 0x94, 0x26, 0x59, 0x03,
    -	0x26, 0x22, 0xce, 0xef, 0x3a, 0x6a, 0xc2, 0xb9, 0x6a, 0x2b, 0x08, 0xe2, 0xc5, 0x9a, 0xb1, 0x39,
    -	0xf9, 0x5b, 0xef, 0xf1, 0xfd, 0xbd, 0xd2, 0xb9, 0xf9, 0x0e, 0xb8, 0xb8, 0x23, 0x35, 0xe4, 0x01,
    -	0x6a, 0xa4, 0xec, 0xeb, 0xd8, 0x01, 0x90, 0x23, 0x87, 0x49, 0x5b, 0xe3, 0x71, 0x4b, 0xd9, 0x0c,
    -	0x2b, 0xbd, 0x0c, 0xca, 0x47, 0x2b, 0x3d, 0xf9, 0xce, 0xc4, 0xa5, 0x9f, 0xbe, 0x0e, 0x67, 0xdb,
    -	0x2f, 0xa6, 0x43, 0x85, 0x72, 0xf8, 0x4b, 0x0b, 0xce, 0xb4, 0x8d, 0x17, 0xf6, 0x5d, 0xf8, 0x58,
    -	0xb0, 0x3f, 0x6f, 0xc1, 0xa3, 0x99, 0x35, 0x92, 0x4e, 0x78, 0x55, 0x5a, 0xa8, 0x99, 0xa3, 0xc6,
    -	0x91, 0x73, 0x24, 0x00, 0xc7, 0x38, 0x86, 0xc5, 0x66, 0xa1, 0xa3, 0xc5, 0xe6, 0x1f, 0x59, 0x90,
    -	0xba, 0xea, 0x8f, 0x80, 0xf3, 0x5c, 0x36, 0x39, 0xcf, 0xc7, 0xbb, 0x19, 0xcd, 0x1c, 0xa6, 0xf3,
    -	0x1f, 0xc7, 0xe0, 0x44, 0x8e, 0x27, 0xf6, 0x0e, 0x4c, 0x6c, 0x56, 0x89, 0x19, 0x7a, 0xa3, 0x5d,
    -	0x48, 0xba, 0xb6, 0x71, 0x3a, 0xe6, 0x8e, 0xef, 0xef, 0x95, 0x26, 0x52, 0x28, 0x38, 0xdd, 0x04,
    -	0xfa, 0xbc, 0x05, 0xc7, 0x9c, 0x3b, 0xe1, 0x22, 0x7d, 0x41, 0xb8, 0xd5, 0xb9, 0xba, 0x5f, 0xdd,
    -	0xa6, 0x8c, 0x99, 0xdc, 0x56, 0x2f, 0x66, 0x0a, 0xa3, 0x6f, 0x55, 0x52, 0xf8, 0x46, 0xf3, 0x53,
    -	0xfb, 0x7b, 0xa5, 0x63, 0x59, 0x58, 0x38, 0xb3, 0x2d, 0x84, 0x45, 0xc6, 0x2f, 0x27, 0xda, 0x6a,
    -	0x17, 0x1c, 0x26, 0xcb, 0x65, 0x9e, 0xb3, 0xc4, 0x12, 0x82, 0x15, 0x1d, 0xf4, 0x19, 0x18, 0xdc,
    -	0x94, 0x71, 0x20, 0x32, 0x58, 0xee, 0x78, 0x20, 0xdb, 0x47, 0xc7, 0xe0, 0x26, 0x30, 0x0a, 0x09,
    -	0xc7, 0x44, 0xd1, 0xeb, 0x50, 0xf4, 0x36, 0x42, 0x11, 0xa2, 0x2e, 0xdb, 0x12, 0xd7, 0xb4, 0x75,
    -	0xe6, 0x21, 0x98, 0x56, 0x97, 0x2a, 0x98, 0x56, 0x44, 0x57, 0xa1, 0x18, 0xdc, 0xae, 0x09, 0x4d,
    -	0x4a, 0xe6, 0x26, 0xc5, 0x73, 0x0b, 0x39, 0xbd, 0x62, 0x94, 0xf0, 0xdc, 0x02, 0xa6, 0x24, 0x50,
    -	0x19, 0x7a, 0x99, 0xfb, 0xb2, 0x60, 0x6d, 0x33, 0x9f, 0xf2, 0x6d, 0xc2, 0x00, 0x70, 0x8f, 0x44,
    -	0x86, 0x80, 0x39, 0x21, 0xb4, 0x06, 0x7d, 0x55, 0xd7, 0xab, 0x91, 0x40, 0xf0, 0xb2, 0x1f, 0xce,
    -	0xd4, 0x99, 0x30, 0x8c, 0x1c, 0x9a, 0x5c, 0x85, 0xc0, 0x30, 0xb0, 0xa0, 0xc5, 0xa8, 0x92, 0xe6,
    -	0xd6, 0x86, 0xbc, 0xb1, 0xb2, 0xa9, 0x92, 0xe6, 0xd6, 0x52, 0xa5, 0x2d, 0x55, 0x86, 0x81, 0x05,
    -	0x2d, 0xf4, 0x0a, 0x14, 0x36, 0xaa, 0xc2, 0x35, 0x39, 0x53, 0x79, 0x62, 0x46, 0xd1, 0x9a, 0xeb,
    -	0xdb, 0xdf, 0x2b, 0x15, 0x96, 0xe6, 0x71, 0x61, 0xa3, 0x8a, 0x56, 0xa1, 0x7f, 0x83, 0xc7, 0xdd,
    -	0x11, 0xfa, 0x91, 0x27, 0xb3, 0x43, 0x02, 0xa5, 0x42, 0xf3, 0x70, 0xef, 0x52, 0x01, 0xc0, 0x92,
    -	0x08, 0x4b, 0x40, 0xa5, 0xe2, 0x07, 0x89, 0xf0, 0xa5, 0x33, 0x87, 0x8b, 0xf9, 0xc4, 0x9f, 0x1a,
    -	0x71, 0x14, 0x22, 0xac, 0x51, 0xa4, 0xab, 0xda, 0xb9, 0xd7, 0x0a, 0x58, 0x6e, 0x0b, 0xa1, 0x1a,
    -	0xc9, 0x5c, 0xd5, 0xb3, 0x12, 0xa9, 0xdd, 0xaa, 0x56, 0x48, 0x38, 0x26, 0x8a, 0xb6, 0x61, 0x64,
    -	0x27, 0x6c, 0x6e, 0x11, 0xb9, 0xa5, 0x59, 0xd8, 0xbb, 0x1c, 0x6e, 0xf6, 0xa6, 0x40, 0x74, 0x83,
    -	0xa8, 0xe5, 0xd4, 0x53, 0xa7, 0x10, 0x7b, 0xd6, 0xdc, 0xd4, 0x89, 0x61, 0x93, 0x36, 0x1d, 0xfe,
    -	0x77, 0x5b, 0xfe, 0xed, 0xdd, 0x88, 0x88, 0xa8, 0xa3, 0x99, 0xc3, 0xff, 0x16, 0x47, 0x49, 0x0f,
    -	0xbf, 0x00, 0x60, 0x49, 0x04, 0xdd, 0x14, 0xc3, 0xc3, 0x4e, 0xcf, 0xf1, 0xfc, 0x90, 0xe6, 0xb3,
    -	0x12, 0x29, 0x67, 0x50, 0xd8, 0x69, 0x19, 0x93, 0x62, 0xa7, 0x64, 0x73, 0xcb, 0x8f, 0x7c, 0x2f,
    -	0x71, 0x42, 0x4f, 0xe4, 0x9f, 0x92, 0xe5, 0x0c, 0xfc, 0xf4, 0x29, 0x99, 0x85, 0x85, 0x33, 0xdb,
    -	0x42, 0x35, 0x18, 0x6d, 0xfa, 0x41, 0x74, 0xc7, 0x0f, 0xe4, 0xfa, 0x42, 0x6d, 0x04, 0xa5, 0x06,
    -	0xa6, 0x68, 0x91, 0x19, 0xe6, 0x98, 0x10, 0x9c, 0xa0, 0x89, 0x3e, 0x0e, 0xfd, 0x61, 0xd5, 0xa9,
    -	0x93, 0xe5, 0x1b, 0x53, 0x93, 0xf9, 0xd7, 0x4f, 0x85, 0xa3, 0xe4, 0xac, 0x2e, 0x1e, 0x36, 0x89,
    -	0xa3, 0x60, 0x49, 0x0e, 0x2d, 0x41, 0x2f, 0x4b, 0xec, 0xcc, 0x42, 0xe4, 0xe6, 0x44, 0x66, 0x4f,
    -	0xb9, 0xd5, 0xf0, 0xb3, 0x89, 0x15, 0x63, 0x5e, 0x9d, 0xee, 0x01, 0x21, 0x29, 0xf0, 0xc3, 0xa9,
    -	0xe3, 0xf9, 0x7b, 0x40, 0x08, 0x18, 0x6e, 0x54, 0xda, 0xed, 0x01, 0x85, 0x84, 0x63, 0xa2, 0xf4,
    -	0x64, 0xa6, 0xa7, 0xe9, 0x89, 0x36, 0x26, 0x93, 0xb9, 0x67, 0x29, 0x3b, 0x99, 0xe9, 0x49, 0x4a,
    -	0x49, 0xd8, 0x7f, 0x30, 0x90, 0xe6, 0x59, 0x98, 0x84, 0xe9, 0x3f, 0xb7, 0x52, 0x36, 0x13, 0x1f,
    -	0xe9, 0x56, 0xe0, 0xfd, 0x00, 0x1f, 0xae, 0x9f, 0xb7, 0xe0, 0x44, 0x33, 0xf3, 0x43, 0x04, 0x03,
    -	0xd0, 0x9d, 0xdc, 0x9c, 0x7f, 0xba, 0x0a, 0xa7, 0x9c, 0x0d, 0xc7, 0x39, 0x2d, 0x25, 0x85, 0x03,
    -	0xc5, 0xf7, 0x2c, 0x1c, 0x58, 0x81, 0x81, 0x2a, 0x7f, 0xc9, 0xc9, 0x34, 0x00, 0x5d, 0x05, 0x03,
    -	0x65, 0xac, 0x84, 0x78, 0x02, 0x6e, 0x60, 0x45, 0x02, 0xfd, 0xa4, 0x05, 0x67, 0x92, 0x5d, 0xc7,
    -	0x84, 0x81, 0x85, 0xc1, 0x24, 0x17, 0x6b, 0x2d, 0x89, 0xef, 0x4f, 0xf1, 0xff, 0x06, 0xf2, 0x41,
    -	0x27, 0x04, 0xdc, 0xbe, 0x31, 0xb4, 0x90, 0x21, 0x57, 0xeb, 0x33, 0x35, 0x8a, 0x5d, 0xc8, 0xd6,
    -	0x5e, 0x84, 0xe1, 0x86, 0xdf, 0xf2, 0x22, 0x61, 0xf7, 0x28, 0x8c, 0xa7, 0x98, 0xd1, 0xd0, 0x8a,
    -	0x56, 0x8e, 0x0d, 0xac, 0x84, 0x44, 0x6e, 0xe0, 0xbe, 0x25, 0x72, 0xef, 0xc0, 0xb0, 0xa7, 0xb9,
    -	0x04, 0xb4, 0x7b, 0xc1, 0x0a, 0xe9, 0xa2, 0x86, 0xcd, 0x7b, 0xa9, 0x97, 0x60, 0x83, 0x5a, 0x7b,
    -	0x69, 0x19, 0xbc, 0x37, 0x69, 0xd9, 0x91, 0x3e, 0x89, 0xed, 0x5f, 0x2f, 0x64, 0xbc, 0x18, 0xb8,
    -	0x54, 0xee, 0x35, 0x53, 0x2a, 0x77, 0x3e, 0x29, 0x95, 0x4b, 0xa9, 0xaa, 0x0c, 0x81, 0x5c, 0xf7,
    -	0x19, 0x25, 0xbb, 0x0e, 0xf0, 0xfc, 0xc3, 0x16, 0x9c, 0x64, 0xba, 0x0f, 0xda, 0xc0, 0x7b, 0xd6,
    -	0x77, 0x30, 0x93, 0xd4, 0xeb, 0xd9, 0xe4, 0x70, 0x5e, 0x3b, 0x76, 0x1d, 0xce, 0x75, 0xba, 0x77,
    -	0x99, 0x85, 0x6f, 0x4d, 0x19, 0x47, 0xc4, 0x16, 0xbe, 0xb5, 0xe5, 0x05, 0xcc, 0x20, 0xdd, 0x86,
    -	0x2f, 0xb4, 0xff, 0x7f, 0x0b, 0x8a, 0x65, 0xbf, 0x76, 0x04, 0x2f, 0xfa, 0x8f, 0x19, 0x2f, 0xfa,
    -	0x47, 0xb2, 0x6f, 0xfc, 0x5a, 0xae, 0xb2, 0x6f, 0x31, 0xa1, 0xec, 0x3b, 0x93, 0x47, 0xa0, 0xbd,
    -	0x6a, 0xef, 0x97, 0x8a, 0x30, 0x54, 0xf6, 0x6b, 0x6a, 0x9f, 0xfd, 0xaf, 0xf7, 0xe3, 0xc8, 0x93,
    -	0x9b, 0x7d, 0x4a, 0xa3, 0xcc, 0x2c, 0x7a, 0x65, 0xdc, 0x89, 0xef, 0x32, 0x7f, 0x9e, 0x5b, 0xc4,
    -	0xdd, 0xdc, 0x8a, 0x48, 0x2d, 0xf9, 0x39, 0x47, 0xe7, 0xcf, 0xf3, 0xed, 0x22, 0x8c, 0x25, 0x5a,
    -	0x47, 0x75, 0x18, 0xa9, 0xeb, 0xaa, 0x24, 0xb1, 0x4e, 0xef, 0x4b, 0x0b, 0x25, 0xfc, 0x21, 0xb4,
    -	0x22, 0x6c, 0x12, 0x47, 0x33, 0x00, 0x9e, 0x6e, 0x15, 0xae, 0x02, 0x15, 0x6b, 0x16, 0xe1, 0x1a,
    -	0x06, 0x7a, 0x09, 0x86, 0x22, 0xbf, 0xe9, 0xd7, 0xfd, 0xcd, 0xdd, 0x6b, 0x44, 0x46, 0xb6, 0x54,
    -	0x46, 0xc3, 0x6b, 0x31, 0x08, 0xeb, 0x78, 0xe8, 0x2e, 0x4c, 0x28, 0x22, 0x95, 0x07, 0xa0, 0x5e,
    -	0x63, 0x62, 0x93, 0xd5, 0x24, 0x45, 0x9c, 0x6e, 0x04, 0xbd, 0x02, 0xa3, 0xcc, 0x7a, 0x99, 0xd5,
    -	0xbf, 0x46, 0x76, 0x65, 0xc4, 0x63, 0xc6, 0x61, 0xaf, 0x18, 0x10, 0x9c, 0xc0, 0x44, 0xf3, 0x30,
    -	0xd1, 0x70, 0xc3, 0x44, 0xf5, 0x3e, 0x56, 0x9d, 0x75, 0x60, 0x25, 0x09, 0xc4, 0x69, 0x7c, 0xfb,
    -	0x57, 0xc5, 0x1c, 0x7b, 0x91, 0xfb, 0xc1, 0x76, 0x7c, 0x7f, 0x6f, 0xc7, 0x6f, 0x59, 0x30, 0x4e,
    -	0x5b, 0x67, 0x26, 0x99, 0x92, 0x91, 0x52, 0x39, 0x31, 0xac, 0x36, 0x39, 0x31, 0xce, 0xd3, 0x63,
    -	0xbb, 0xe6, 0xb7, 0x22, 0x21, 0x1d, 0xd5, 0xce, 0x65, 0x5a, 0x8a, 0x05, 0x54, 0xe0, 0x91, 0x20,
    -	0x10, 0x7e, 0xef, 0x3a, 0x1e, 0x09, 0x02, 0x2c, 0xa0, 0x32, 0x65, 0x46, 0x4f, 0x76, 0xca, 0x0c,
    -	0x1e, 0xf9, 0x5c, 0x58, 0xc1, 0x09, 0x96, 0x56, 0x8b, 0x7c, 0x2e, 0xcd, 0xe3, 0x62, 0x1c, 0xfb,
    -	0xeb, 0x45, 0x18, 0x2e, 0xfb, 0xb5, 0xd8, 0xb0, 0xe3, 0x45, 0xc3, 0xb0, 0xe3, 0x5c, 0xc2, 0xb0,
    -	0x63, 0x5c, 0xc7, 0xfd, 0xc0, 0x8c, 0xe3, 0x3b, 0x65, 0xc6, 0xf1, 0x87, 0x16, 0x9b, 0xb5, 0x85,
    -	0xd5, 0x0a, 0xb7, 0xf0, 0x45, 0x97, 0x60, 0x88, 0x9d, 0x70, 0x2c, 0xd0, 0x82, 0xb4, 0x76, 0x60,
    -	0x29, 0x2c, 0x57, 0xe3, 0x62, 0xac, 0xe3, 0xa0, 0x0b, 0x30, 0x10, 0x12, 0x27, 0xa8, 0x6e, 0xa9,
    -	0xe3, 0x5d, 0x98, 0x26, 0xf0, 0x32, 0xac, 0xa0, 0xe8, 0xad, 0x38, 0xe8, 0x76, 0x31, 0xdf, 0x5c,
    -	0x58, 0xef, 0x0f, 0xdf, 0x22, 0xf9, 0x91, 0xb6, 0xed, 0x5b, 0x80, 0xd2, 0xf8, 0x5d, 0xf8, 0x5f,
    -	0x95, 0xcc, 0xb0, 0xb0, 0x83, 0xa9, 0x90, 0xb0, 0xff, 0x62, 0xc1, 0x68, 0xd9, 0xaf, 0xd1, 0xad,
    -	0xfb, 0xbd, 0xb4, 0x4f, 0xf5, 0x8c, 0x03, 0x7d, 0x6d, 0x32, 0x0e, 0x3c, 0x06, 0xbd, 0x65, 0xbf,
    -	0xd6, 0x21, 0x74, 0xed, 0x7f, 0x63, 0x41, 0x7f, 0xd9, 0xaf, 0x1d, 0x81, 0xe2, 0xe5, 0x35, 0x53,
    -	0xf1, 0x72, 0x32, 0x67, 0xdd, 0xe4, 0xe8, 0x5a, 0xfe, 0xa4, 0x07, 0x46, 0x68, 0x3f, 0xfd, 0x4d,
    -	0x39, 0x95, 0xc6, 0xb0, 0x59, 0x5d, 0x0c, 0x1b, 0x7d, 0x06, 0xf8, 0xf5, 0xba, 0x7f, 0x27, 0x39,
    -	0xad, 0x4b, 0xac, 0x14, 0x0b, 0x28, 0x7a, 0x16, 0x06, 0x9a, 0x01, 0xd9, 0x71, 0x7d, 0xc1, 0x5f,
    -	0x6b, 0x6a, 0xac, 0xb2, 0x28, 0xc7, 0x0a, 0x83, 0x3e, 0xbc, 0x43, 0xd7, 0xa3, 0xbc, 0x44, 0xd5,
    -	0xf7, 0x6a, 0x5c, 0x37, 0x51, 0x14, 0x69, 0xb1, 0xb4, 0x72, 0x6c, 0x60, 0xa1, 0x5b, 0x30, 0xc8,
    -	0xfe, 0xb3, 0x63, 0xa7, 0xf7, 0xd0, 0xc7, 0x8e, 0x48, 0x14, 0x2c, 0x08, 0xe0, 0x98, 0x16, 0x7a,
    -	0x1e, 0x20, 0x92, 0xa9, 0x65, 0x42, 0x11, 0xc2, 0x54, 0xbd, 0x45, 0x54, 0xd2, 0x99, 0x10, 0x6b,
    -	0x58, 0xe8, 0x19, 0x18, 0x8c, 0x1c, 0xb7, 0x7e, 0xdd, 0xf5, 0x98, 0xfe, 0x9e, 0xf6, 0x5f, 0xe4,
    -	0xeb, 0x15, 0x85, 0x38, 0x86, 0x53, 0x5e, 0x90, 0xc5, 0x84, 0x9a, 0xdb, 0x8d, 0x44, 0x6a, 0xba,
    -	0x22, 0xe7, 0x05, 0xaf, 0xab, 0x52, 0xac, 0x61, 0xa0, 0x2d, 0x38, 0xed, 0x7a, 0x2c, 0x85, 0x14,
    -	0xa9, 0x6c, 0xbb, 0xcd, 0xb5, 0xeb, 0x95, 0x9b, 0x24, 0x70, 0x37, 0x76, 0xe7, 0x9c, 0xea, 0x36,
    -	0xf1, 0x64, 0x42, 0xfc, 0xc7, 0x45, 0x17, 0x4f, 0x2f, 0xb7, 0xc1, 0xc5, 0x6d, 0x29, 0x21, 0x9b,
    -	0x6e, 0xc7, 0x80, 0x38, 0x0d, 0x21, 0x13, 0xe0, 0xe9, 0x67, 0x58, 0x09, 0x16, 0x10, 0xfb, 0x05,
    -	0xb6, 0x27, 0x6e, 0x54, 0xd0, 0xd3, 0xc6, 0xf1, 0x72, 0x42, 0x3f, 0x5e, 0x0e, 0xf6, 0x4a, 0x7d,
    -	0x37, 0x2a, 0x5a, 0x7c, 0xa0, 0xcb, 0x70, 0xbc, 0xec, 0xd7, 0xca, 0x7e, 0x10, 0x2d, 0xf9, 0xc1,
    -	0x1d, 0x27, 0xa8, 0xc9, 0x25, 0x58, 0x92, 0x11, 0x92, 0xe8, 0x19, 0xdb, 0xcb, 0x4f, 0x20, 0x23,
    -	0xfa, 0xd1, 0x0b, 0x8c, 0xab, 0x3b, 0xa4, 0x43, 0x6a, 0x95, 0xf1, 0x17, 0x2a, 0x51, 0xdb, 0x15,
    -	0x27, 0x22, 0xe8, 0x06, 0x8c, 0x54, 0xf5, 0xab, 0x56, 0x54, 0x7f, 0x4a, 0x5e, 0x76, 0xc6, 0x3d,
    -	0x9c, 0x79, 0x37, 0x9b, 0xf5, 0xed, 0x6f, 0x5a, 0xa2, 0x15, 0x2e, 0xad, 0xe0, 0x76, 0xaf, 0x9d,
    -	0xcf, 0xdc, 0x79, 0x98, 0x08, 0xf4, 0x2a, 0x9a, 0xfd, 0xd8, 0x71, 0x9e, 0xf9, 0x26, 0x01, 0xc4,
    -	0x69, 0x7c, 0xf4, 0x49, 0x38, 0x65, 0x14, 0x4a, 0x55, 0xba, 0x96, 0x7f, 0x9a, 0xc9, 0x73, 0x70,
    -	0x1e, 0x12, 0xce, 0xaf, 0x6f, 0xff, 0x20, 0x9c, 0x48, 0x7e, 0x97, 0x90, 0xb0, 0xdc, 0xe7, 0xd7,
    -	0x15, 0x0e, 0xf7, 0x75, 0xf6, 0x4b, 0x30, 0x41, 0x9f, 0xde, 0x8a, 0x8d, 0x64, 0xf3, 0xd7, 0x39,
    -	0x08, 0xd5, 0x6f, 0x0e, 0xb0, 0x6b, 0x30, 0x91, 0x7d, 0x0d, 0x7d, 0x1a, 0x46, 0x43, 0xc2, 0x22,
    -	0xaf, 0x49, 0xc9, 0x5e, 0x1b, 0x6f, 0xf2, 0xca, 0xa2, 0x8e, 0xc9, 0x5f, 0x2f, 0x66, 0x19, 0x4e,
    -	0x50, 0x43, 0x0d, 0x18, 0xbd, 0xe3, 0x7a, 0x35, 0xff, 0x4e, 0x28, 0xe9, 0x0f, 0xe4, 0xab, 0x09,
    -	0x6e, 0x71, 0xcc, 0x44, 0x1f, 0x8d, 0xe6, 0x6e, 0x19, 0xc4, 0x70, 0x82, 0x38, 0x3d, 0x6a, 0x82,
    -	0x96, 0x37, 0x1b, 0xae, 0x87, 0x24, 0x10, 0x71, 0xe1, 0xd8, 0x51, 0x83, 0x65, 0x21, 0x8e, 0xe1,
    -	0xf4, 0xa8, 0x61, 0x7f, 0x98, 0x3b, 0x3a, 0x3b, 0xcb, 0xc4, 0x51, 0x83, 0x55, 0x29, 0xd6, 0x30,
    -	0xe8, 0x51, 0xcc, 0xfe, 0xad, 0xfa, 0x1e, 0xf6, 0xfd, 0x48, 0x1e, 0xde, 0x2c, 0x55, 0xa5, 0x56,
    -	0x8e, 0x0d, 0xac, 0x9c, 0x28, 0x74, 0x3d, 0x87, 0x8d, 0x42, 0x87, 0xa2, 0x36, 0x1e, 0xf8, 0x3c,
    -	0x1a, 0xf2, 0xe5, 0x76, 0x1e, 0xf8, 0x07, 0xf7, 0xe5, 0x9d, 0x4f, 0x79, 0x81, 0x0d, 0x31, 0x40,
    -	0xbd, 0x3c, 0xcc, 0x1e, 0x53, 0x64, 0x56, 0xf8, 0xe8, 0x48, 0x18, 0x5a, 0x84, 0xfe, 0x70, 0x37,
    -	0xac, 0x46, 0xf5, 0xb0, 0x5d, 0x3a, 0xd2, 0x0a, 0x43, 0xd1, 0xb2, 0x61, 0xf3, 0x2a, 0x58, 0xd6,
    -	0x45, 0x55, 0x98, 0x14, 0x14, 0xe7, 0xb7, 0x1c, 0x4f, 0x25, 0x49, 0xe4, 0x16, 0x8b, 0x97, 0xf6,
    -	0xf7, 0x4a, 0x93, 0xa2, 0x65, 0x1d, 0x7c, 0xb0, 0x57, 0xa2, 0x5b, 0x32, 0x03, 0x82, 0xb3, 0xa8,
    -	0xf1, 0x25, 0x5f, 0xad, 0xfa, 0x8d, 0x66, 0x39, 0xf0, 0x37, 0xdc, 0x3a, 0x69, 0xa7, 0x0c, 0xae,
    -	0x18, 0x98, 0x62, 0xc9, 0x1b, 0x65, 0x38, 0x41, 0x0d, 0xdd, 0x86, 0x31, 0xa7, 0xd9, 0x9c, 0x0d,
    -	0x1a, 0x7e, 0x20, 0x1b, 0x18, 0xca, 0xd7, 0x2a, 0xcc, 0x9a, 0xa8, 0x3c, 0x47, 0x62, 0xa2, 0x10,
    -	0x27, 0x09, 0xd2, 0x81, 0x12, 0x1b, 0xcd, 0x18, 0xa8, 0x91, 0x78, 0xa0, 0xc4, 0xbe, 0xcc, 0x18,
    -	0xa8, 0x0c, 0x08, 0xce, 0xa2, 0x66, 0xff, 0x00, 0x63, 0xfc, 0x2b, 0xee, 0xa6, 0xc7, 0x9c, 0xe3,
    -	0x50, 0x03, 0x46, 0x9a, 0xec, 0xd8, 0x17, 0xf9, 0xcb, 0xc4, 0x51, 0xf1, 0x62, 0x97, 0xc2, 0xcb,
    -	0x3b, 0x2c, 0x03, 0xab, 0x61, 0xc4, 0x5a, 0xd6, 0xc9, 0x61, 0x93, 0xba, 0xfd, 0x8b, 0xd3, 0x8c,
    -	0x75, 0xac, 0x70, 0x89, 0x64, 0xbf, 0x70, 0x55, 0x14, 0x32, 0x88, 0xe9, 0x7c, 0xd9, 0x7f, 0xbc,
    -	0xbe, 0x84, 0xbb, 0x23, 0x96, 0x75, 0xd1, 0xa7, 0x60, 0x94, 0x3e, 0xe9, 0x15, 0xfb, 0x16, 0x4e,
    -	0x1d, 0xcb, 0x8f, 0x81, 0xa5, 0xb0, 0xf4, 0xdc, 0x86, 0x7a, 0x65, 0x9c, 0x20, 0x86, 0xde, 0x62,
    -	0x76, 0x9d, 0x92, 0x74, 0xa1, 0x1b, 0xd2, 0xba, 0x09, 0xa7, 0x24, 0xab, 0x11, 0x41, 0x2d, 0x98,
    -	0x4c, 0x67, 0x70, 0x0e, 0xa7, 0xec, 0xfc, 0xb7, 0x51, 0x3a, 0x09, 0x73, 0x9c, 0x84, 0x2e, 0x0d,
    -	0x0b, 0x71, 0x16, 0x7d, 0x74, 0x3d, 0x99, 0x5f, 0xb7, 0x68, 0x68, 0x0d, 0x52, 0x39, 0x76, 0x47,
    -	0xda, 0xa6, 0xd6, 0xdd, 0x84, 0x33, 0x5a, 0x8a, 0xd2, 0x2b, 0x81, 0xc3, 0xec, 0x8a, 0x5c, 0x76,
    -	0x1b, 0x69, 0x4c, 0xed, 0xa3, 0xfb, 0x7b, 0xa5, 0x33, 0x6b, 0xed, 0x10, 0x71, 0x7b, 0x3a, 0xe8,
    -	0x06, 0x1c, 0xe7, 0x11, 0x5c, 0x16, 0x88, 0x53, 0xab, 0xbb, 0x9e, 0xe2, 0x9a, 0xf9, 0xd9, 0x75,
    -	0x6a, 0x7f, 0xaf, 0x74, 0x7c, 0x36, 0x0b, 0x01, 0x67, 0xd7, 0x43, 0xaf, 0xc1, 0x60, 0xcd, 0x93,
    -	0xa7, 0x6c, 0x9f, 0x91, 0x05, 0x76, 0x70, 0x61, 0xb5, 0xa2, 0xbe, 0x3f, 0xfe, 0x83, 0xe3, 0x0a,
    -	0x68, 0x93, 0xab, 0xad, 0x94, 0xac, 0xb1, 0x3f, 0x15, 0xd8, 0x33, 0x29, 0x8e, 0x37, 0x42, 0x22,
    -	0x70, 0x7d, 0xad, 0x72, 0xb9, 0x33, 0xa2, 0x25, 0x18, 0x84, 0xd1, 0x9b, 0x80, 0x44, 0xb6, 0xa1,
    -	0xd9, 0x2a, 0x4b, 0x8e, 0xa7, 0xd9, 0x92, 0x2a, 0x11, 0x42, 0x25, 0x85, 0x81, 0x33, 0x6a, 0xa1,
    -	0xab, 0xf4, 0x78, 0xd4, 0x4b, 0xc5, 0xf1, 0xab, 0x72, 0x8d, 0x2f, 0x90, 0x66, 0x40, 0x98, 0xf9,
    -	0xa3, 0x49, 0x11, 0x27, 0xea, 0xa1, 0x1a, 0x9c, 0x76, 0x5a, 0x91, 0xcf, 0x34, 0x82, 0x26, 0xea,
    -	0x9a, 0xbf, 0x4d, 0x3c, 0xa6, 0x8c, 0x1f, 0x60, 0x01, 0x43, 0x4f, 0xcf, 0xb6, 0xc1, 0xc3, 0x6d,
    -	0xa9, 0xd0, 0xe7, 0x14, 0x1d, 0x0b, 0x4d, 0x59, 0x67, 0x78, 0x77, 0x73, 0x0d, 0xb6, 0xc4, 0x40,
    -	0x2f, 0xc1, 0xd0, 0x96, 0x1f, 0x46, 0xab, 0x24, 0xba, 0xe3, 0x07, 0xdb, 0x22, 0xbd, 0x41, 0x9c,
    -	0x52, 0x26, 0x06, 0x61, 0x1d, 0x0f, 0x3d, 0x05, 0xfd, 0xcc, 0x54, 0x6c, 0x79, 0x81, 0xdd, 0xb5,
    -	0x03, 0xf1, 0x19, 0x73, 0x95, 0x17, 0x63, 0x09, 0x97, 0xa8, 0xcb, 0xe5, 0x79, 0x76, 0x1c, 0x27,
    -	0x50, 0x97, 0xcb, 0xf3, 0x58, 0xc2, 0xe9, 0x72, 0x0d, 0xb7, 0x9c, 0x80, 0x94, 0x03, 0xbf, 0x4a,
    -	0x42, 0x2d, 0x91, 0xd1, 0x23, 0x3c, 0x79, 0x03, 0x5d, 0xae, 0x95, 0x2c, 0x04, 0x9c, 0x5d, 0x0f,
    -	0x91, 0x74, 0x7a, 0xde, 0xd1, 0x7c, 0x55, 0x69, 0x9a, 0x1d, 0xec, 0x32, 0x43, 0xaf, 0x07, 0xe3,
    -	0x2a, 0x31, 0x30, 0x4f, 0xd7, 0x10, 0x4e, 0x8d, 0xb1, 0xb5, 0xdd, 0x7d, 0xae, 0x07, 0xa5, 0x7c,
    -	0x5e, 0x4e, 0x50, 0xc2, 0x29, 0xda, 0x46, 0x44, 0xda, 0xf1, 0x8e, 0x11, 0x69, 0x2f, 0xc2, 0x60,
    -	0xd8, 0xba, 0x5d, 0xf3, 0x1b, 0x8e, 0xeb, 0x31, 0x8b, 0x1b, 0xed, 0xe1, 0x5e, 0x91, 0x00, 0x1c,
    -	0xe3, 0xa0, 0x25, 0x18, 0x70, 0xa4, 0x66, 0x19, 0xe5, 0x07, 0xdb, 0x53, 0xfa, 0x64, 0x1e, 0x7f,
    -	0x4a, 0xea, 0x92, 0x55, 0x5d, 0xf4, 0x2a, 0x8c, 0x88, 0x80, 0x1e, 0x22, 0x97, 0xfe, 0xa4, 0xe9,
    -	0xbe, 0x5c, 0xd1, 0x81, 0xd8, 0xc4, 0x45, 0xeb, 0x30, 0x14, 0xf9, 0x75, 0xe6, 0x83, 0x4b, 0xb9,
    -	0xe4, 0x13, 0xf9, 0x31, 0x71, 0xd7, 0x14, 0x9a, 0xae, 0xf3, 0x50, 0x55, 0xb1, 0x4e, 0x07, 0xad,
    -	0xf1, 0xf5, 0xce, 0xd2, 0x16, 0x91, 0x50, 0x24, 0x63, 0x3f, 0x93, 0x67, 0x2e, 0xc9, 0xd0, 0xcc,
    -	0xed, 0x20, 0x6a, 0x62, 0x9d, 0x0c, 0xba, 0x02, 0x13, 0xcd, 0xc0, 0xf5, 0xd9, 0x9a, 0x50, 0x9a,
    -	0xf2, 0x29, 0x33, 0x49, 0x69, 0x39, 0x89, 0x80, 0xd3, 0x75, 0x58, 0x3c, 0x16, 0x51, 0x38, 0x75,
    -	0x8a, 0x27, 0x5a, 0xe3, 0x72, 0x10, 0x5e, 0x86, 0x15, 0x14, 0xad, 0xb0, 0x93, 0x98, 0x8b, 0xf0,
    -	0xa6, 0xa6, 0xf3, 0xbd, 0xfc, 0x75, 0x51, 0x1f, 0xe7, 0xfd, 0xd5, 0x5f, 0x1c, 0x53, 0x40, 0x35,
    -	0x2d, 0xbf, 0x39, 0x7d, 0x41, 0x85, 0x53, 0xa7, 0xdb, 0xd8, 0xeb, 0x26, 0x9e, 0xcb, 0x31, 0x43,
    -	0x60, 0x14, 0x87, 0x38, 0x41, 0x13, 0xbd, 0x01, 0xe3, 0x22, 0x58, 0x41, 0x3c, 0x4c, 0x67, 0x62,
    -	0x9f, 0x26, 0x9c, 0x80, 0xe1, 0x14, 0x36, 0x4f, 0x74, 0xe6, 0xdc, 0xae, 0x13, 0x71, 0xf4, 0x5d,
    -	0x77, 0xbd, 0xed, 0x70, 0xea, 0x2c, 0x3b, 0x1f, 0x44, 0xa2, 0xb3, 0x24, 0x14, 0x67, 0xd4, 0x40,
    -	0x6b, 0x30, 0xde, 0x0c, 0x08, 0x69, 0xb0, 0x77, 0x92, 0xb8, 0xcf, 0x4a, 0x3c, 0x1c, 0x11, 0xed,
    -	0x49, 0x39, 0x01, 0x3b, 0xc8, 0x28, 0xc3, 0x29, 0x0a, 0xe8, 0x0e, 0x0c, 0xf8, 0x3b, 0x24, 0xd8,
    -	0x22, 0x4e, 0x6d, 0xea, 0x5c, 0x1b, 0x4f, 0x3b, 0x71, 0xb9, 0xdd, 0x10, 0xb8, 0x09, 0x43, 0x24,
    -	0x59, 0xdc, 0xd9, 0x10, 0x49, 0x36, 0x86, 0xfe, 0x0b, 0x0b, 0x4e, 0x49, 0xd5, 0x5e, 0xa5, 0x49,
    -	0x47, 0x7d, 0xde, 0xf7, 0xc2, 0x28, 0xe0, 0x01, 0x74, 0x1e, 0xcd, 0x0f, 0x2a, 0xb3, 0x96, 0x53,
    -	0x49, 0x69, 0x11, 0x4e, 0xe5, 0x61, 0x84, 0x38, 0xbf, 0x45, 0xfa, 0xb2, 0x0f, 0x49, 0x24, 0x0f,
    -	0xa3, 0xd9, 0x70, 0xe9, 0xad, 0x85, 0xd5, 0xa9, 0xc7, 0x78, 0xf4, 0x1f, 0xba, 0x19, 0x2a, 0x49,
    -	0x20, 0x4e, 0xe3, 0xa3, 0x4b, 0x50, 0xf0, 0xc3, 0xa9, 0xc7, 0xdb, 0xa4, 0xc4, 0xf7, 0x6b, 0x37,
    -	0x2a, 0xdc, 0x20, 0xf5, 0x46, 0x05, 0x17, 0xfc, 0x50, 0x26, 0x1b, 0xa3, 0xcf, 0xd9, 0x70, 0xea,
    -	0x09, 0x2e, 0x73, 0x96, 0xc9, 0xc6, 0x58, 0x21, 0x8e, 0xe1, 0x68, 0x0b, 0xc6, 0x42, 0x43, 0x6c,
    -	0x10, 0x4e, 0x9d, 0x67, 0x23, 0xf5, 0x44, 0xde, 0xa4, 0x19, 0xd8, 0x5a, 0x16, 0x20, 0x93, 0x0a,
    -	0x4e, 0x92, 0xe5, 0xbb, 0x4b, 0x13, 0x5c, 0x84, 0x53, 0x4f, 0x76, 0xd8, 0x5d, 0x1a, 0xb2, 0xbe,
    -	0xbb, 0x74, 0x1a, 0x38, 0x41, 0x13, 0xad, 0xeb, 0x6e, 0x8c, 0x17, 0xf2, 0x8d, 0x1b, 0x33, 0x1d,
    -	0x18, 0x47, 0xf2, 0x9c, 0x17, 0xa7, 0xbf, 0x0f, 0x26, 0x52, 0x5c, 0xd8, 0x61, 0x7c, 0x3a, 0xa6,
    -	0xb7, 0x61, 0xc4, 0x58, 0xe9, 0x0f, 0xd5, 0xe4, 0xe7, 0xcf, 0x06, 0x61, 0x50, 0x99, 0x62, 0xa0,
    -	0x8b, 0xa6, 0x95, 0xcf, 0xa9, 0xa4, 0x95, 0xcf, 0x40, 0xd9, 0xaf, 0x19, 0x86, 0x3d, 0x6b, 0x19,
    -	0xb1, 0x72, 0xf3, 0xce, 0xd5, 0xee, 0x1d, 0xcf, 0x34, 0xf5, 0x52, 0xb1, 0x6b, 0x73, 0xa1, 0x9e,
    -	0xb6, 0x1a, 0xab, 0x2b, 0x30, 0xe1, 0xf9, 0x8c, 0xf5, 0x27, 0x35, 0xc9, 0xd7, 0x31, 0xf6, 0x6d,
    -	0x50, 0x8f, 0xe5, 0x96, 0x40, 0xc0, 0xe9, 0x3a, 0xb4, 0x41, 0xce, 0x7f, 0x25, 0x55, 0x64, 0x9c,
    -	0x3d, 0xc3, 0x02, 0x4a, 0x9f, 0x9c, 0xfc, 0x57, 0x38, 0x35, 0x9e, 0xff, 0xe4, 0xe4, 0x95, 0x92,
    -	0x3c, 0x5e, 0x28, 0x79, 0x3c, 0xa6, 0x11, 0x6a, 0xfa, 0xb5, 0xe5, 0xb2, 0x78, 0x3d, 0x68, 0x51,
    -	0xec, 0x6b, 0xcb, 0x65, 0xcc, 0x61, 0x68, 0x16, 0xfa, 0xd8, 0x0f, 0x19, 0x23, 0x27, 0x6f, 0xf7,
    -	0x2f, 0x97, 0xb5, 0x1c, 0xaa, 0xac, 0x02, 0x16, 0x15, 0x99, 0xc4, 0x9f, 0x3e, 0xb9, 0x98, 0xc4,
    -	0xbf, 0xff, 0x3e, 0x25, 0xfe, 0x92, 0x00, 0x8e, 0x69, 0xa1, 0xbb, 0x70, 0xdc, 0x78, 0xe6, 0x2a,
    -	0x4f, 0x3c, 0xc8, 0x37, 0x06, 0x48, 0x20, 0xcf, 0x9d, 0x11, 0x9d, 0x3e, 0xbe, 0x9c, 0x45, 0x09,
    -	0x67, 0x37, 0x80, 0xea, 0x30, 0x51, 0x4d, 0xb5, 0x3a, 0xd0, 0x7d, 0xab, 0x6a, 0x5d, 0xa4, 0x5b,
    -	0x4c, 0x13, 0x46, 0xaf, 0xc2, 0xc0, 0xbb, 0x3e, 0x37, 0xdc, 0x13, 0x2f, 0x1e, 0x19, 0x05, 0x66,
    -	0xe0, 0xad, 0x1b, 0x15, 0x56, 0x7e, 0xb0, 0x57, 0x1a, 0x2a, 0xfb, 0x35, 0xf9, 0x17, 0xab, 0x0a,
    -	0xe8, 0xc7, 0x2c, 0x98, 0x4e, 0xbf, 0xa3, 0x55, 0xa7, 0x47, 0xba, 0xef, 0xb4, 0x2d, 0x1a, 0x9d,
    -	0x5e, 0xcc, 0x25, 0x87, 0xdb, 0x34, 0x85, 0x3e, 0x4a, 0xf7, 0x53, 0xe8, 0xde, 0x23, 0x22, 0x01,
    -	0xfd, 0xa3, 0xf1, 0x7e, 0xa2, 0xa5, 0x07, 0x7b, 0xa5, 0x31, 0x7e, 0xe0, 0xba, 0xf7, 0x54, 0xbc,
    -	0x7d, 0x5e, 0x01, 0xfd, 0x20, 0x1c, 0x0f, 0xd2, 0x72, 0x6d, 0x22, 0x79, 0xfb, 0xa7, 0xbb, 0x39,
    -	0xbc, 0x93, 0x13, 0x8e, 0xb3, 0x08, 0xe2, 0xec, 0x76, 0xec, 0xdf, 0xb3, 0x98, 0x3e, 0x43, 0x74,
    -	0x8b, 0x84, 0xad, 0x7a, 0x74, 0x04, 0xc6, 0x72, 0x8b, 0x86, 0x3d, 0xc1, 0x7d, 0x5b, 0xbb, 0xfd,
    -	0x2f, 0x16, 0xb3, 0x76, 0x3b, 0x42, 0xbf, 0xbd, 0xb7, 0x60, 0x20, 0x12, 0xad, 0x89, 0xae, 0xe7,
    -	0x59, 0xe6, 0xc8, 0x4e, 0x31, 0x8b, 0x3f, 0xf5, 0x76, 0x92, 0xa5, 0x58, 0x91, 0xb1, 0xff, 0x47,
    -	0x3e, 0x03, 0x12, 0x72, 0x04, 0x6a, 0xdb, 0x05, 0x53, 0x6d, 0x5b, 0xea, 0xf0, 0x05, 0x39, 0xea,
    -	0xdb, 0xff, 0xc1, 0xec, 0x37, 0x93, 0x19, 0xbe, 0xdf, 0xcd, 0x2c, 0xed, 0x2f, 0x5a, 0x00, 0x71,
    -	0x82, 0x93, 0x2e, 0x12, 0x4e, 0x5f, 0xa6, 0xaf, 0x25, 0x3f, 0xf2, 0xab, 0x7e, 0x5d, 0xa8, 0x8d,
    -	0x4e, 0xc7, 0x9a, 0x63, 0x5e, 0x7e, 0xa0, 0xfd, 0xc6, 0x0a, 0x1b, 0x95, 0x64, 0xc4, 0xe1, 0x62,
    -	0x6c, 0xcb, 0x60, 0x44, 0x1b, 0xfe, 0x8a, 0x05, 0xc7, 0xb2, 0x9c, 0x40, 0xe8, 0xdb, 0x9b, 0x4b,
    -	0x4f, 0x95, 0x09, 0xac, 0x9a, 0xcd, 0x9b, 0xa2, 0x1c, 0x2b, 0x8c, 0xae, 0x33, 0x79, 0x1f, 0x2e,
    -	0xf9, 0xc6, 0x0d, 0x18, 0x29, 0x07, 0x44, 0xe3, 0x2f, 0x5e, 0x8f, 0xf3, 0x02, 0x0d, 0xce, 0x3d,
    -	0x7b, 0xe8, 0xc8, 0x4a, 0xf6, 0x57, 0x0b, 0x70, 0x8c, 0x1b, 0x72, 0xcd, 0xee, 0xf8, 0x6e, 0xad,
    -	0xec, 0xd7, 0x84, 0xeb, 0xee, 0xdb, 0x30, 0xdc, 0xd4, 0x44, 0xde, 0xed, 0x02, 0xc9, 0xeb, 0xa2,
    -	0xf1, 0x58, 0x48, 0xa7, 0x97, 0x62, 0x83, 0x16, 0xaa, 0xc1, 0x30, 0xd9, 0x71, 0xab, 0xca, 0x1a,
    -	0xa8, 0x70, 0xe8, 0x4b, 0x5a, 0xb5, 0xb2, 0xa8, 0xd1, 0xc1, 0x06, 0xd5, 0xae, 0xcd, 0xaf, 0x35,
    -	0x16, 0xad, 0xa7, 0x83, 0x05, 0xd0, 0xcf, 0x5a, 0x70, 0x32, 0x27, 0xec, 0x3c, 0x6d, 0xee, 0x0e,
    -	0x33, 0x99, 0x13, 0xcb, 0x56, 0x35, 0xc7, 0x0d, 0xe9, 0xb0, 0x80, 0xa2, 0x8f, 0x03, 0x34, 0xe3,
    -	0x94, 0x9b, 0x1d, 0xe2, 0x73, 0x1b, 0x91, 0x7a, 0xb5, 0xa0, 0xab, 0x2a, 0x33, 0xa7, 0x46, 0xcb,
    -	0xfe, 0x4a, 0x0f, 0xf4, 0x32, 0xc3, 0x2b, 0x54, 0x86, 0xfe, 0x2d, 0x1e, 0x13, 0xb0, 0xed, 0xbc,
    -	0x51, 0x5c, 0x19, 0x64, 0x30, 0x9e, 0x37, 0xad, 0x14, 0x4b, 0x32, 0x68, 0x05, 0x26, 0x79, 0x3a,
    -	0xd1, 0xfa, 0x02, 0xa9, 0x3b, 0xbb, 0x52, 0x9a, 0x5c, 0x60, 0x9f, 0xaa, 0xa4, 0xea, 0xcb, 0x69,
    -	0x14, 0x9c, 0x55, 0x0f, 0xbd, 0x0e, 0xa3, 0xf4, 0x75, 0xef, 0xb7, 0x22, 0x49, 0x89, 0xe7, 0xef,
    -	0x54, 0x0f, 0x9e, 0x35, 0x03, 0x8a, 0x13, 0xd8, 0xe8, 0x55, 0x18, 0x69, 0xa6, 0xe4, 0xe6, 0xbd,
    -	0xb1, 0x80, 0xc9, 0x94, 0x95, 0x9b, 0xb8, 0xcc, 0x0f, 0xa4, 0xc5, 0xbc, 0x5e, 0xd6, 0xb6, 0x02,
    -	0x12, 0x6e, 0xf9, 0xf5, 0x1a, 0xe3, 0x80, 0x7b, 0x35, 0x3f, 0x90, 0x04, 0x1c, 0xa7, 0x6a, 0x50,
    -	0x2a, 0x1b, 0x8e, 0x5b, 0x6f, 0x05, 0x24, 0xa6, 0xd2, 0x67, 0x52, 0x59, 0x4a, 0xc0, 0x71, 0xaa,
    -	0x46, 0x67, 0x85, 0x40, 0xff, 0x83, 0x51, 0x08, 0xd8, 0xbf, 0x5c, 0x00, 0x63, 0x6a, 0xbf, 0x87,
    -	0xf3, 0x8a, 0xbe, 0x06, 0x3d, 0x9b, 0x41, 0xb3, 0x2a, 0x8c, 0x0c, 0x33, 0xbf, 0xec, 0x0a, 0x2e,
    -	0xcf, 0xeb, 0x5f, 0x46, 0xff, 0x63, 0x56, 0x8b, 0xee, 0xf1, 0xe3, 0xe5, 0xc0, 0xa7, 0x97, 0x9c,
    -	0x0c, 0x1b, 0xaa, 0xdc, 0xad, 0xfa, 0xe5, 0x1b, 0xbb, 0x4d, 0x80, 0x6d, 0xe1, 0x33, 0xc2, 0x29,
    -	0x18, 0xf6, 0x78, 0x15, 0xf1, 0xc2, 0x96, 0x54, 0xd0, 0x25, 0x18, 0x12, 0xa9, 0x1e, 0x99, 0x57,
    -	0x10, 0xdf, 0x4c, 0xcc, 0x7e, 0x70, 0x21, 0x2e, 0xc6, 0x3a, 0x8e, 0xfd, 0xe3, 0x05, 0x98, 0xcc,
    -	0x70, 0xeb, 0xe4, 0xd7, 0xc8, 0xa6, 0x1b, 0x46, 0xc1, 0x6e, 0xf2, 0x72, 0xc2, 0xa2, 0x1c, 0x2b,
    -	0x0c, 0x7a, 0x56, 0xf1, 0x8b, 0x2a, 0x79, 0x39, 0x09, 0xb7, 0x29, 0x01, 0x3d, 0xdc, 0xe5, 0x44,
    -	0xaf, 0xed, 0x56, 0x48, 0x64, 0x2c, 0x7f, 0x75, 0x6d, 0x33, 0x63, 0x03, 0x06, 0xa1, 0x4f, 0xc0,
    -	0x4d, 0xa5, 0x41, 0xd7, 0x9e, 0x80, 0x5c, 0x87, 0xce, 0x61, 0xb4, 0x73, 0x11, 0xf1, 0x1c, 0x2f,
    -	0x12, 0x0f, 0xc5, 0x38, 0xc6, 0x33, 0x2b, 0xc5, 0x02, 0x6a, 0x7f, 0xb9, 0x08, 0xa7, 0x72, 0x1d,
    -	0xbd, 0x69, 0xd7, 0x1b, 0xbe, 0xe7, 0x46, 0xbe, 0x32, 0xcc, 0xe4, 0x71, 0x9d, 0x49, 0x73, 0x6b,
    -	0x45, 0x94, 0x63, 0x85, 0x81, 0xce, 0x43, 0x2f, 0x93, 0xb5, 0x27, 0xd3, 0xbc, 0xe1, 0xb9, 0x05,
    -	0x1e, 0x31, 0x93, 0x83, 0xb5, 0x5b, 0xbd, 0xd8, 0xf6, 0x56, 0x7f, 0x8c, 0x72, 0x30, 0x7e, 0x3d,
    -	0x79, 0xa1, 0xd0, 0xee, 0xfa, 0x7e, 0x1d, 0x33, 0x20, 0x7a, 0x42, 0x8c, 0x57, 0xc2, 0x12, 0x11,
    -	0x3b, 0x35, 0x3f, 0xd4, 0x06, 0xed, 0x29, 0xe8, 0xdf, 0x26, 0xbb, 0x81, 0xeb, 0x6d, 0x26, 0x2d,
    -	0x54, 0xaf, 0xf1, 0x62, 0x2c, 0xe1, 0x66, 0x56, 0xf3, 0xfe, 0x07, 0x91, 0xd5, 0x5c, 0x5f, 0x01,
    -	0x03, 0x1d, 0xd9, 0x93, 0x9f, 0x28, 0xc2, 0x18, 0x9e, 0x5b, 0xf8, 0x60, 0x22, 0xd6, 0xd3, 0x13,
    -	0xf1, 0x20, 0x92, 0x7f, 0x1f, 0x6e, 0x36, 0x7e, 0xdb, 0x82, 0x31, 0x96, 0x70, 0x52, 0x44, 0x69,
    -	0x71, 0x7d, 0xef, 0x08, 0x9e, 0x02, 0x8f, 0x41, 0x6f, 0x40, 0x1b, 0x15, 0x33, 0xa8, 0xf6, 0x38,
    -	0xeb, 0x09, 0xe6, 0x30, 0x74, 0x1a, 0x7a, 0x58, 0x17, 0xe8, 0xe4, 0x0d, 0xf3, 0x23, 0x78, 0xc1,
    -	0x89, 0x1c, 0xcc, 0x4a, 0x59, 0xbc, 0x48, 0x4c, 0x9a, 0x75, 0x97, 0x77, 0x3a, 0xb6, 0x84, 0x78,
    -	0x7f, 0x84, 0x80, 0xc9, 0xec, 0xda, 0x7b, 0x8b, 0x17, 0x99, 0x4d, 0xb2, 0xfd, 0x33, 0xfb, 0x1f,
    -	0x0a, 0x70, 0x36, 0xb3, 0x5e, 0xd7, 0xf1, 0x22, 0xdb, 0xd7, 0x7e, 0x98, 0xe9, 0xe9, 0x8a, 0x47,
    -	0x68, 0xff, 0xdf, 0xd3, 0x2d, 0xf7, 0xdf, 0xdb, 0x45, 0x18, 0xc7, 0xcc, 0x21, 0x7b, 0x9f, 0x84,
    -	0x71, 0xcc, 0xec, 0x5b, 0x8e, 0x98, 0xe0, 0x5f, 0x0b, 0x39, 0xdf, 0xc2, 0x04, 0x06, 0x17, 0xe8,
    -	0x39, 0xc3, 0x80, 0xa1, 0x7c, 0x84, 0xf3, 0x33, 0x86, 0x97, 0x61, 0x05, 0x45, 0xb3, 0x30, 0xd6,
    -	0x70, 0x3d, 0x7a, 0xf8, 0xec, 0x9a, 0xac, 0xb8, 0x52, 0x91, 0xac, 0x98, 0x60, 0x9c, 0xc4, 0x47,
    -	0xae, 0x16, 0xe2, 0x91, 0x7f, 0xdd, 0xab, 0x87, 0xda, 0x75, 0x33, 0xa6, 0x95, 0x88, 0x1a, 0xc5,
    -	0x8c, 0x70, 0x8f, 0x2b, 0x9a, 0x9c, 0xa8, 0xd8, 0xbd, 0x9c, 0x68, 0x38, 0x5b, 0x46, 0x34, 0xfd,
    -	0x2a, 0x8c, 0xdc, 0xb7, 0x6e, 0xc4, 0xfe, 0x56, 0x11, 0x1e, 0x69, 0xb3, 0xed, 0xf9, 0x59, 0x6f,
    -	0xcc, 0x81, 0x76, 0xd6, 0xa7, 0xe6, 0xa1, 0x0c, 0xc7, 0x36, 0x5a, 0xf5, 0xfa, 0x2e, 0x73, 0x74,
    -	0x23, 0x35, 0x89, 0x21, 0x78, 0x4a, 0x29, 0x1c, 0x39, 0xb6, 0x94, 0x81, 0x83, 0x33, 0x6b, 0xd2,
    -	0x27, 0x16, 0xbd, 0x49, 0x76, 0x15, 0xa9, 0xc4, 0x13, 0x0b, 0xeb, 0x40, 0x6c, 0xe2, 0xa2, 0x2b,
    -	0x30, 0xe1, 0xec, 0x38, 0x2e, 0x4f, 0xef, 0x21, 0x09, 0xf0, 0x37, 0x96, 0x92, 0x45, 0xcf, 0x26,
    -	0x11, 0x70, 0xba, 0x0e, 0x7a, 0x13, 0x90, 0x7f, 0x9b, 0x39, 0xcf, 0xd4, 0xae, 0x10, 0x4f, 0x28,
    -	0xf3, 0xd9, 0xdc, 0x15, 0xe3, 0x23, 0xe1, 0x46, 0x0a, 0x03, 0x67, 0xd4, 0x4a, 0x04, 0x1b, 0xec,
    -	0xcb, 0x0f, 0x36, 0xd8, 0xfe, 0x5c, 0xec, 0x98, 0x19, 0xf1, 0x1d, 0x18, 0x39, 0xac, 0xb5, 0xf7,
    -	0x53, 0xd0, 0x1f, 0x88, 0x9c, 0xf3, 0x09, 0xaf, 0x72, 0x99, 0x91, 0x5b, 0xc2, 0xed, 0xff, 0xc7,
    -	0x02, 0x25, 0x4b, 0x36, 0xe3, 0x8a, 0xbf, 0xca, 0x4c, 0xd7, 0xb9, 0x14, 0x5c, 0x0b, 0x25, 0x76,
    -	0x5c, 0x33, 0x5d, 0x8f, 0x81, 0xd8, 0xc4, 0xe5, 0xcb, 0x2d, 0x8c, 0x23, 0x58, 0x18, 0x0f, 0x08,
    -	0xa1, 0x35, 0x54, 0x18, 0xe8, 0x13, 0xd0, 0x5f, 0x73, 0x77, 0xdc, 0x50, 0xc8, 0xd1, 0x0e, 0xad,
    -	0xb7, 0x8b, 0xbf, 0x6f, 0x81, 0x93, 0xc1, 0x92, 0x9e, 0xfd, 0x53, 0x16, 0x28, 0x75, 0xe7, 0x55,
    -	0xe2, 0xd4, 0xa3, 0x2d, 0xf4, 0x06, 0x80, 0xa4, 0xa0, 0x64, 0x6f, 0xd2, 0x08, 0x0b, 0xb0, 0x82,
    -	0x1c, 0x18, 0xff, 0xb0, 0x56, 0x07, 0xbd, 0x0e, 0x7d, 0x5b, 0x8c, 0x96, 0xf8, 0xb6, 0xf3, 0x4a,
    -	0xd5, 0xc5, 0x4a, 0x0f, 0xf6, 0x4a, 0xc7, 0xcc, 0x36, 0xe5, 0x2d, 0xc6, 0x6b, 0xd9, 0x3f, 0x51,
    -	0x88, 0xe7, 0xf4, 0xad, 0x96, 0x1f, 0x39, 0x47, 0xc0, 0x89, 0x5c, 0x31, 0x38, 0x91, 0x27, 0xda,
    -	0xe9, 0x73, 0x59, 0x97, 0x72, 0x39, 0x90, 0x1b, 0x09, 0x0e, 0xe4, 0xc9, 0xce, 0xa4, 0xda, 0x73,
    -	0x1e, 0xff, 0x93, 0x05, 0x13, 0x06, 0xfe, 0x11, 0x5c, 0x80, 0x4b, 0xe6, 0x05, 0xf8, 0x68, 0xc7,
    -	0x6f, 0xc8, 0xb9, 0xf8, 0x7e, 0xb4, 0x98, 0xe8, 0x3b, 0xbb, 0xf0, 0xde, 0x85, 0x9e, 0x2d, 0x27,
    -	0xa8, 0x89, 0x77, 0xfd, 0xc5, 0xae, 0xc6, 0x7a, 0xe6, 0xaa, 0x13, 0x08, 0x03, 0x8e, 0x67, 0xe5,
    -	0xa8, 0xd3, 0xa2, 0x8e, 0xc6, 0x1b, 0xac, 0x29, 0x74, 0x19, 0xfa, 0xc2, 0xaa, 0xdf, 0x54, 0x7e,
    -	0x80, 0x2c, 0x5d, 0x78, 0x85, 0x95, 0x1c, 0xec, 0x95, 0x90, 0xd9, 0x1c, 0x2d, 0xc6, 0x02, 0x1f,
    -	0xbd, 0x0d, 0x23, 0xec, 0x97, 0xb2, 0xa6, 0x2c, 0xe6, 0x4b, 0x60, 0x2a, 0x3a, 0x22, 0x37, 0x35,
    -	0x36, 0x8a, 0xb0, 0x49, 0x6a, 0x7a, 0x13, 0x06, 0xd5, 0x67, 0x3d, 0x54, 0x6d, 0xfd, 0xff, 0x59,
    -	0x84, 0xc9, 0x8c, 0x35, 0x87, 0x42, 0x63, 0x26, 0x2e, 0x75, 0xb9, 0x54, 0xdf, 0xe3, 0x5c, 0x84,
    -	0xec, 0x01, 0x58, 0x13, 0x6b, 0xab, 0xeb, 0x46, 0xd7, 0x43, 0x92, 0x6c, 0x94, 0x16, 0x75, 0x6e,
    -	0x94, 0x36, 0x76, 0x64, 0x43, 0x4d, 0x1b, 0x52, 0x3d, 0x7d, 0xa8, 0x73, 0xfa, 0x87, 0x3d, 0x70,
    -	0x2c, 0xcb, 0xc4, 0x04, 0x7d, 0x0e, 0xfa, 0x98, 0xa3, 0x9a, 0x14, 0x9c, 0xbd, 0xd8, 0xad, 0x71,
    -	0xca, 0x0c, 0xf3, 0x75, 0x13, 0xa1, 0x69, 0x67, 0xe4, 0x71, 0xc4, 0x0b, 0x3b, 0x0e, 0xb3, 0x68,
    -	0x93, 0x85, 0x8c, 0x12, 0xb7, 0xa7, 0x3c, 0x3e, 0x3e, 0xd2, 0x75, 0x07, 0xc4, 0xfd, 0x1b, 0x26,
    -	0x2c, 0xb5, 0x64, 0x71, 0x67, 0x4b, 0x2d, 0xd9, 0x32, 0x5a, 0x86, 0xbe, 0x2a, 0x37, 0x01, 0x2a,
    -	0x76, 0x3e, 0xc2, 0xb8, 0xfd, 0x8f, 0x3a, 0x80, 0x85, 0xdd, 0x8f, 0x20, 0x30, 0xed, 0xc2, 0x90,
    -	0x36, 0x30, 0x0f, 0x75, 0xf1, 0x6c, 0xd3, 0x8b, 0x4f, 0x1b, 0x82, 0x87, 0xba, 0x80, 0x7e, 0x46,
    -	0xbb, 0xfb, 0xc5, 0x79, 0xf0, 0x61, 0x83, 0x77, 0x3a, 0x9d, 0x70, 0x1f, 0x4c, 0xec, 0x2b, 0xc6,
    -	0x4b, 0x55, 0xcc, 0x98, 0xee, 0xb9, 0xa9, 0xa1, 0xcc, 0x0b, 0xbf, 0x7d, 0x1c, 0x77, 0xfb, 0x67,
    -	0x2d, 0x48, 0x38, 0x78, 0x29, 0x71, 0xa7, 0x95, 0x2b, 0xee, 0x3c, 0x07, 0x3d, 0x81, 0x5f, 0x27,
    -	0xc9, 0xd4, 0xfb, 0xd8, 0xaf, 0x13, 0xcc, 0x20, 0x14, 0x23, 0x8a, 0x85, 0x58, 0xc3, 0xfa, 0x03,
    -	0x5d, 0x3c, 0xbd, 0x1f, 0x83, 0xde, 0x3a, 0xd9, 0x21, 0xf5, 0x64, 0x86, 0xd4, 0xeb, 0xb4, 0x10,
    -	0x73, 0x98, 0xfd, 0xdb, 0x3d, 0x70, 0xa6, 0x6d, 0x64, 0x39, 0xca, 0x60, 0x6e, 0x3a, 0x11, 0xb9,
    -	0xe3, 0xec, 0x26, 0x33, 0x03, 0x5e, 0xe1, 0xc5, 0x58, 0xc2, 0x99, 0xb3, 0x35, 0xcf, 0x94, 0x93,
    -	0x10, 0x0e, 0x8b, 0x04, 0x39, 0x02, 0x6a, 0x0a, 0x1b, 0x8b, 0x0f, 0x42, 0xd8, 0xf8, 0x3c, 0x40,
    -	0x18, 0xd6, 0xb9, 0x1d, 0x67, 0x4d, 0x78, 0x71, 0xc7, 0x19, 0x95, 0x2a, 0xd7, 0x05, 0x04, 0x6b,
    -	0x58, 0x68, 0x01, 0xc6, 0x9b, 0x81, 0x1f, 0x71, 0x59, 0xfb, 0x02, 0x37, 0x75, 0xee, 0x35, 0x83,
    -	0x7a, 0x95, 0x13, 0x70, 0x9c, 0xaa, 0x81, 0x5e, 0x82, 0x21, 0x11, 0xe8, 0xab, 0xec, 0xfb, 0x75,
    -	0x21, 0xde, 0x53, 0xd6, 0xbf, 0x95, 0x18, 0x84, 0x75, 0x3c, 0xad, 0x1a, 0x13, 0xe0, 0xf7, 0x67,
    -	0x56, 0xe3, 0x42, 0x7c, 0x0d, 0x2f, 0x91, 0x14, 0x60, 0xa0, 0xab, 0xa4, 0x00, 0xb1, 0xc0, 0x73,
    -	0xb0, 0x6b, 0x7d, 0x32, 0x74, 0x14, 0x11, 0x7e, 0xad, 0x07, 0x26, 0xc5, 0xc2, 0x79, 0xd8, 0xcb,
    -	0x65, 0x3d, 0xbd, 0x5c, 0x1e, 0x84, 0x48, 0xf4, 0x83, 0x35, 0x73, 0xd4, 0x6b, 0xe6, 0x27, 0x2d,
    -	0x30, 0x79, 0x48, 0xf4, 0x9f, 0xe5, 0xa6, 0x56, 0x7d, 0x29, 0x97, 0x27, 0x8d, 0x23, 0x86, 0xbf,
    -	0xb7, 0x24, 0xab, 0xf6, 0xff, 0x65, 0xc1, 0xa3, 0x1d, 0x29, 0xa2, 0x45, 0x18, 0x64, 0x8c, 0xae,
    -	0xf6, 0x2e, 0x7e, 0x52, 0xb9, 0x42, 0x48, 0x40, 0x0e, 0xdf, 0x1d, 0xd7, 0x44, 0x8b, 0xa9, 0x1c,
    -	0xb6, 0x4f, 0x65, 0xe4, 0xb0, 0x3d, 0x6e, 0x0c, 0xcf, 0x7d, 0x26, 0xb1, 0xfd, 0x12, 0xbd, 0x71,
    -	0x4c, 0x7f, 0xca, 0x8f, 0x18, 0xe2, 0x5c, 0x3b, 0x21, 0xce, 0x45, 0x26, 0xb6, 0x76, 0x87, 0xbc,
    -	0x01, 0xe3, 0x2c, 0x02, 0x28, 0x73, 0xcc, 0x11, 0x8e, 0x98, 0x85, 0xd8, 0xf8, 0xfe, 0x7a, 0x02,
    -	0x86, 0x53, 0xd8, 0xf6, 0xdf, 0x15, 0xa1, 0x8f, 0x6f, 0xbf, 0x23, 0x78, 0xf8, 0x3e, 0x03, 0x83,
    -	0x6e, 0xa3, 0xd1, 0xe2, 0x69, 0x49, 0x7b, 0x63, 0x53, 0xee, 0x65, 0x59, 0x88, 0x63, 0x38, 0x5a,
    -	0x12, 0x9a, 0x84, 0x36, 0x41, 0xc6, 0x79, 0xc7, 0x67, 0x16, 0x9c, 0xc8, 0xe1, 0x5c, 0x9c, 0xba,
    -	0x67, 0x63, 0x9d, 0x03, 0xfa, 0x34, 0x40, 0x18, 0x05, 0xae, 0xb7, 0x49, 0xcb, 0x44, 0x26, 0x8a,
    -	0xa7, 0xdb, 0x50, 0xab, 0x28, 0x64, 0x4e, 0x33, 0x3e, 0x73, 0x14, 0x00, 0x6b, 0x14, 0xd1, 0x8c,
    -	0x71, 0xd3, 0x4f, 0x27, 0xe6, 0x0e, 0x38, 0xd5, 0x78, 0xce, 0xa6, 0x5f, 0x86, 0x41, 0x45, 0xbc,
    -	0x93, 0x5c, 0x71, 0x58, 0x67, 0xd8, 0x3e, 0x06, 0x63, 0x89, 0xbe, 0x1d, 0x4a, 0x2c, 0xf9, 0x3b,
    -	0x16, 0x8c, 0xf1, 0xce, 0x2c, 0x7a, 0x3b, 0xe2, 0x36, 0xb8, 0x07, 0xc7, 0xea, 0x19, 0xa7, 0xb2,
    -	0x98, 0xfe, 0xee, 0x4f, 0x71, 0x25, 0x86, 0xcc, 0x82, 0xe2, 0xcc, 0x36, 0xd0, 0x05, 0xba, 0xe3,
    -	0xe8, 0xa9, 0xeb, 0xd4, 0x45, 0x34, 0x91, 0x61, 0xbe, 0xdb, 0x78, 0x19, 0x56, 0x50, 0xfb, 0xaf,
    -	0x2c, 0x98, 0xe0, 0x3d, 0xbf, 0x46, 0x76, 0xd5, 0xd9, 0xf4, 0x9d, 0xec, 0xbb, 0x48, 0x88, 0x5d,
    -	0xc8, 0x49, 0x88, 0xad, 0x7f, 0x5a, 0xb1, 0xed, 0xa7, 0x7d, 0xd5, 0x02, 0xb1, 0x42, 0x8e, 0x40,
    -	0xd2, 0xf2, 0x7d, 0xa6, 0xa4, 0x65, 0x3a, 0x7f, 0x13, 0xe4, 0x88, 0x58, 0xfe, 0xc5, 0x82, 0x71,
    -	0x8e, 0x10, 0x5b, 0x41, 0x7c, 0x47, 0xe7, 0x61, 0xce, 0xfc, 0xa2, 0x4c, 0xb3, 0xd6, 0x6b, 0x64,
    -	0x77, 0xcd, 0x2f, 0x3b, 0xd1, 0x56, 0xf6, 0x47, 0x19, 0x93, 0xd5, 0xd3, 0x76, 0xb2, 0x6a, 0x72,
    -	0x03, 0x19, 0x89, 0x17, 0x3b, 0x08, 0x80, 0x0f, 0x9b, 0x78, 0xd1, 0xfe, 0x7b, 0x0b, 0x10, 0x6f,
    -	0xc6, 0x60, 0xdc, 0x28, 0x3b, 0xc4, 0x4a, 0xb5, 0x8b, 0x2e, 0x3e, 0x9a, 0x14, 0x04, 0x6b, 0x58,
    -	0x0f, 0x64, 0x78, 0x12, 0xa6, 0x2c, 0xc5, 0xce, 0xa6, 0x2c, 0x87, 0x18, 0xd1, 0xaf, 0xf6, 0x43,
    -	0xd2, 0x15, 0x13, 0xdd, 0x84, 0xe1, 0xaa, 0xd3, 0x74, 0x6e, 0xbb, 0x75, 0x37, 0x72, 0x49, 0xd8,
    -	0xce, 0xce, 0x6d, 0x5e, 0xc3, 0x13, 0xc6, 0x07, 0x5a, 0x09, 0x36, 0xe8, 0xa0, 0x19, 0x80, 0x66,
    -	0xe0, 0xee, 0xb8, 0x75, 0xb2, 0xc9, 0x04, 0x42, 0x2c, 0x7e, 0x11, 0x37, 0xba, 0x93, 0xa5, 0x58,
    -	0xc3, 0xc8, 0x08, 0x1b, 0x52, 0x7c, 0xc8, 0x61, 0x43, 0xe0, 0xc8, 0xc2, 0x86, 0xf4, 0x1c, 0x2a,
    -	0x6c, 0xc8, 0xc0, 0xa1, 0xc3, 0x86, 0xf4, 0x76, 0x15, 0x36, 0x04, 0xc3, 0x09, 0xc9, 0x7b, 0xd2,
    -	0xff, 0x4b, 0x6e, 0x9d, 0x88, 0x07, 0x07, 0x0f, 0xba, 0x34, 0xbd, 0xbf, 0x57, 0x3a, 0x81, 0x33,
    -	0x31, 0x70, 0x4e, 0x4d, 0xf4, 0x71, 0x98, 0x72, 0xea, 0x75, 0xff, 0x8e, 0x9a, 0xd4, 0xc5, 0xb0,
    -	0xea, 0xd4, 0xb9, 0x72, 0xa9, 0x9f, 0x51, 0x3d, 0xbd, 0xbf, 0x57, 0x9a, 0x9a, 0xcd, 0xc1, 0xc1,
    -	0xb9, 0xb5, 0xd1, 0x6b, 0x30, 0xd8, 0x0c, 0xfc, 0xea, 0x8a, 0xe6, 0x2f, 0x7e, 0x96, 0x0e, 0x60,
    -	0x59, 0x16, 0x1e, 0xec, 0x95, 0x46, 0xd4, 0x1f, 0x76, 0xe1, 0xc7, 0x15, 0x32, 0x22, 0x72, 0x0c,
    -	0x3d, 0xec, 0x88, 0x1c, 0xc3, 0x0f, 0x38, 0x22, 0x87, 0xbd, 0x0d, 0x93, 0x15, 0x12, 0xb8, 0x4e,
    -	0xdd, 0xbd, 0x47, 0x79, 0x72, 0x79, 0x06, 0xae, 0xc1, 0x60, 0x90, 0x38, 0xf5, 0xbb, 0x0a, 0x2e,
    -	0xae, 0xc9, 0x65, 0xe4, 0x29, 0x1f, 0x13, 0xb2, 0xff, 0xbd, 0x05, 0xfd, 0xc2, 0xbd, 0xf3, 0x08,
    -	0x38, 0xd3, 0x59, 0x43, 0x25, 0x53, 0xca, 0x9e, 0x14, 0xd6, 0x99, 0x5c, 0x65, 0xcc, 0x72, 0x42,
    -	0x19, 0xf3, 0x68, 0x3b, 0x22, 0xed, 0xd5, 0x30, 0xff, 0x75, 0x91, 0xbe, 0x10, 0x8c, 0x40, 0x03,
    -	0x0f, 0x7f, 0x08, 0x56, 0xa1, 0x3f, 0x14, 0x8e, 0xee, 0x85, 0x7c, 0x5f, 0x9e, 0xe4, 0x24, 0xc6,
    -	0x36, 0x90, 0xc2, 0xb5, 0x5d, 0x12, 0xc9, 0xf4, 0xa0, 0x2f, 0x3e, 0x44, 0x0f, 0xfa, 0x4e, 0xa1,
    -	0x18, 0x7a, 0x1e, 0x44, 0x28, 0x06, 0xfb, 0x1b, 0xec, 0x76, 0xd6, 0xcb, 0x8f, 0x80, 0x71, 0xbb,
    -	0x62, 0xde, 0xe3, 0x76, 0x9b, 0x95, 0x25, 0x3a, 0x95, 0xc3, 0xc0, 0xfd, 0x96, 0x05, 0x67, 0x32,
    -	0xbe, 0x4a, 0xe3, 0xe6, 0x9e, 0x85, 0x01, 0xa7, 0x55, 0x73, 0xd5, 0x5e, 0xd6, 0xb4, 0xc5, 0xb3,
    -	0xa2, 0x1c, 0x2b, 0x0c, 0x34, 0x0f, 0x13, 0xe4, 0x6e, 0xd3, 0xe5, 0x6a, 0x78, 0xdd, 0x74, 0xbc,
    -	0xc8, 0x7d, 0x82, 0x17, 0x93, 0x40, 0x9c, 0xc6, 0x57, 0xe1, 0xdc, 0x8a, 0xb9, 0xe1, 0xdc, 0x7e,
    -	0xdd, 0x82, 0x21, 0xe5, 0xea, 0xfd, 0xd0, 0x47, 0xfb, 0x0d, 0x73, 0xb4, 0x1f, 0x69, 0x33, 0xda,
    -	0x39, 0xc3, 0xfc, 0x97, 0x05, 0xd5, 0xdf, 0xb2, 0x1f, 0x44, 0x5d, 0x70, 0x89, 0xf7, 0xef, 0xf6,
    -	0x72, 0x09, 0x86, 0x9c, 0x66, 0x53, 0x02, 0xa4, 0xfd, 0x22, 0x4b, 0x15, 0x11, 0x17, 0x63, 0x1d,
    -	0x47, 0x79, 0xe1, 0x14, 0x73, 0xbd, 0x70, 0x6a, 0x00, 0x91, 0x13, 0x6c, 0x92, 0x88, 0x96, 0x09,
    -	0x73, 0xeb, 0xfc, 0xf3, 0xa6, 0x15, 0xb9, 0xf5, 0x19, 0xd7, 0x8b, 0xc2, 0x28, 0x98, 0x59, 0xf6,
    -	0xa2, 0x1b, 0x01, 0x7f, 0xa6, 0x6a, 0x41, 0x13, 0x15, 0x2d, 0xac, 0xd1, 0x95, 0x61, 0x4d, 0x58,
    -	0x1b, 0xbd, 0xa6, 0x21, 0xcc, 0xaa, 0x28, 0xc7, 0x0a, 0xc3, 0x7e, 0x99, 0xdd, 0x3e, 0x6c, 0x4c,
    -	0x0f, 0x17, 0x0c, 0xf0, 0x1f, 0x86, 0xd5, 0x6c, 0x30, 0x95, 0xf0, 0x82, 0x1e, 0x72, 0xb0, 0xfd,
    -	0x61, 0x4f, 0x1b, 0xd6, 0xfd, 0x59, 0xe3, 0xb8, 0x84, 0xe8, 0x93, 0x29, 0xe3, 0xa6, 0xe7, 0x3a,
    -	0xdc, 0x1a, 0x87, 0x30, 0x67, 0x62, 0x79, 0xe3, 0x58, 0x56, 0xad, 0xe5, 0xb2, 0xd8, 0x17, 0x5a,
    -	0xde, 0x38, 0x01, 0xc0, 0x31, 0x0e, 0x65, 0xd8, 0xd4, 0x9f, 0x70, 0x0a, 0xc5, 0xe1, 0xc5, 0x15,
    -	0x76, 0x88, 0x35, 0x0c, 0x74, 0x51, 0x08, 0x2d, 0xb8, 0xee, 0xe1, 0x91, 0x84, 0xd0, 0x42, 0x0e,
    -	0x97, 0x26, 0x69, 0xba, 0x04, 0x43, 0xe4, 0x6e, 0x44, 0x02, 0xcf, 0xa9, 0xd3, 0x16, 0x7a, 0xe3,
    -	0x88, 0xb8, 0x8b, 0x71, 0x31, 0xd6, 0x71, 0xd0, 0x1a, 0x8c, 0x85, 0x5c, 0x96, 0xa7, 0x92, 0x5a,
    -	0x70, 0x99, 0xe8, 0xd3, 0xca, 0xc9, 0xde, 0x04, 0x1f, 0xb0, 0x22, 0x7e, 0x3a, 0xc9, 0xd0, 0x23,
    -	0x49, 0x12, 0xe8, 0x75, 0x18, 0xad, 0xfb, 0x4e, 0x6d, 0xce, 0xa9, 0x3b, 0x5e, 0x95, 0x8d, 0xcf,
    -	0x80, 0x11, 0x7f, 0x72, 0xf4, 0xba, 0x01, 0xc5, 0x09, 0x6c, 0xca, 0x20, 0xea, 0x25, 0x22, 0x11,
    -	0x8b, 0xe3, 0x6d, 0x92, 0x70, 0x6a, 0x90, 0x7d, 0x15, 0x63, 0x10, 0xaf, 0xe7, 0xe0, 0xe0, 0xdc,
    -	0xda, 0xe8, 0x32, 0x0c, 0xcb, 0xcf, 0xd7, 0x22, 0xf5, 0xc4, 0x0e, 0x4d, 0x1a, 0x0c, 0x1b, 0x98,
    -	0x28, 0x84, 0xe3, 0xf2, 0xff, 0x5a, 0xe0, 0x6c, 0x6c, 0xb8, 0x55, 0x11, 0xbe, 0x82, 0x3b, 0x7f,
    -	0x7f, 0x4c, 0x7a, 0x9a, 0x2e, 0x66, 0x21, 0x1d, 0xec, 0x95, 0x4e, 0x8b, 0x51, 0xcb, 0x84, 0xe3,
    -	0x6c, 0xda, 0x68, 0x05, 0x26, 0xb9, 0x0d, 0xcc, 0xfc, 0x16, 0xa9, 0x6e, 0xcb, 0x0d, 0xc7, 0xb8,
    -	0x46, 0xcd, 0xf1, 0xe7, 0x6a, 0x1a, 0x05, 0x67, 0xd5, 0x43, 0xef, 0xc0, 0x54, 0xb3, 0x75, 0xbb,
    -	0xee, 0x86, 0x5b, 0xab, 0x7e, 0xc4, 0x4c, 0xc8, 0x66, 0x6b, 0xb5, 0x80, 0x84, 0xdc, 0x37, 0x98,
    -	0x5d, 0xbd, 0x32, 0xba, 0x52, 0x39, 0x07, 0x0f, 0xe7, 0x52, 0x40, 0xf7, 0xe0, 0x78, 0x62, 0x21,
    -	0x88, 0x30, 0x29, 0xa3, 0xf9, 0x29, 0xad, 0x2a, 0x59, 0x15, 0x44, 0xc4, 0xa1, 0x2c, 0x10, 0xce,
    -	0x6e, 0x02, 0xbd, 0x02, 0xe0, 0x36, 0x97, 0x9c, 0x86, 0x5b, 0xa7, 0xcf, 0xd1, 0x49, 0xb6, 0x46,
    -	0xe8, 0xd3, 0x04, 0x96, 0xcb, 0xb2, 0x94, 0x9e, 0xcd, 0xe2, 0xdf, 0x2e, 0xd6, 0xb0, 0xd1, 0x75,
    -	0x18, 0x15, 0xff, 0x76, 0xc5, 0x94, 0x4e, 0xa8, 0xec, 0xa7, 0xa3, 0xb2, 0x86, 0x9a, 0xc7, 0x44,
    -	0x09, 0x4e, 0xd4, 0x45, 0x9b, 0x70, 0x46, 0xa6, 0x5e, 0xd5, 0xd7, 0xa7, 0x9c, 0x83, 0x90, 0xe5,
    -	0x91, 0x1a, 0xe0, 0x3e, 0x45, 0xb3, 0xed, 0x10, 0x71, 0x7b, 0x3a, 0xf4, 0x5e, 0xd7, 0x97, 0x39,
    -	0xf7, 0x18, 0x3f, 0x1e, 0x47, 0xf1, 0xbc, 0x9e, 0x04, 0xe2, 0x34, 0x3e, 0xf2, 0xe1, 0xb8, 0xeb,
    -	0x65, 0xad, 0xea, 0x13, 0x8c, 0xd0, 0x47, 0xb9, 0xb3, 0x7c, 0xfb, 0x15, 0x9d, 0x09, 0xc7, 0xd9,
    -	0x74, 0xd1, 0x32, 0x4c, 0x46, 0xbc, 0x60, 0xc1, 0x0d, 0x79, 0x9a, 0x1a, 0xfa, 0xec, 0x3b, 0xc9,
    -	0x9a, 0x3b, 0x49, 0x57, 0xf3, 0x5a, 0x1a, 0x8c, 0xb3, 0xea, 0xbc, 0x37, 0x03, 0xd0, 0x6f, 0x5a,
    -	0xb4, 0xb6, 0xc6, 0xe8, 0xa3, 0xcf, 0xc0, 0xb0, 0x3e, 0x3e, 0x82, 0x69, 0x39, 0x9f, 0xcd, 0x07,
    -	0x6b, 0xc7, 0x0b, 0x7f, 0x26, 0xa8, 0x23, 0x44, 0x87, 0x61, 0x83, 0x22, 0xaa, 0x66, 0x04, 0xb9,
    -	0xb8, 0xd8, 0x1d, 0x53, 0xd4, 0xbd, 0xfd, 0x23, 0x81, 0xec, 0x9d, 0x83, 0xae, 0xc3, 0x40, 0xb5,
    -	0xee, 0x12, 0x2f, 0x5a, 0x2e, 0xb7, 0x0b, 0xae, 0x3a, 0x2f, 0x70, 0xc4, 0x56, 0x14, 0xd9, 0xa5,
    -	0x78, 0x19, 0x56, 0x14, 0xec, 0xcb, 0x30, 0x54, 0xa9, 0x13, 0xd2, 0xe4, 0x7e, 0x5c, 0xe8, 0x29,
    -	0xf6, 0x30, 0x61, 0xac, 0xa5, 0xc5, 0x58, 0x4b, 0xfd, 0xcd, 0xc1, 0x98, 0x4a, 0x09, 0xb7, 0xff,
    -	0xb8, 0x00, 0xa5, 0x0e, 0x49, 0xce, 0x12, 0xfa, 0x36, 0xab, 0x2b, 0x7d, 0xdb, 0x2c, 0x8c, 0xc5,
    -	0xff, 0x74, 0x51, 0x9e, 0x32, 0x86, 0xbe, 0x69, 0x82, 0x71, 0x12, 0xbf, 0x6b, 0xbf, 0x16, 0x5d,
    -	0x65, 0xd7, 0xd3, 0xd1, 0x33, 0xcb, 0x50, 0xd5, 0xf7, 0x76, 0xff, 0xf6, 0xce, 0x55, 0xbb, 0xda,
    -	0xdf, 0x28, 0xc0, 0x71, 0x35, 0x84, 0xdf, 0xbb, 0x03, 0xb7, 0x9e, 0x1e, 0xb8, 0x07, 0xa0, 0xb4,
    -	0xb6, 0x6f, 0x40, 0x1f, 0x8f, 0xf8, 0xda, 0x05, 0xcf, 0xff, 0x98, 0x19, 0x7c, 0x5f, 0xb1, 0x99,
    -	0x46, 0x00, 0xfe, 0x1f, 0xb3, 0x60, 0x2c, 0xe1, 0x20, 0x89, 0xb0, 0xe6, 0x45, 0x7f, 0x3f, 0x7c,
    -	0x79, 0x16, 0xc7, 0x7f, 0x0e, 0x7a, 0xb6, 0x7c, 0x65, 0xa4, 0xac, 0x30, 0xae, 0xfa, 0x61, 0x84,
    -	0x19, 0xc4, 0xfe, 0x6b, 0x0b, 0x7a, 0xd7, 0x1c, 0xd7, 0x8b, 0xa4, 0xf6, 0xc3, 0xca, 0xd1, 0x7e,
    -	0x74, 0xf3, 0x5d, 0xe8, 0x25, 0xe8, 0x23, 0x1b, 0x1b, 0xa4, 0x1a, 0x89, 0x59, 0x95, 0xd1, 0x34,
    -	0xfa, 0x16, 0x59, 0x29, 0x65, 0x42, 0x59, 0x63, 0xfc, 0x2f, 0x16, 0xc8, 0xe8, 0x16, 0x0c, 0x46,
    -	0x6e, 0x83, 0xcc, 0xd6, 0x6a, 0xc2, 0x26, 0xe0, 0x3e, 0x42, 0xc0, 0xac, 0x49, 0x02, 0x38, 0xa6,
    -	0x65, 0x7f, 0xb9, 0x00, 0x10, 0x47, 0x98, 0xeb, 0xf4, 0x89, 0x73, 0x29, 0x6d, 0xf1, 0xf9, 0x0c,
    -	0x6d, 0x31, 0x8a, 0x09, 0x66, 0xa8, 0x8a, 0xd5, 0x30, 0x15, 0xbb, 0x1a, 0xa6, 0x9e, 0xc3, 0x0c,
    -	0xd3, 0x3c, 0x4c, 0xc4, 0x11, 0xf2, 0xcc, 0x00, 0xa1, 0xec, 0xfe, 0x5e, 0x4b, 0x02, 0x71, 0x1a,
    -	0xdf, 0x26, 0x70, 0x4e, 0x05, 0x0a, 0x13, 0x77, 0x21, 0x73, 0x25, 0xd0, 0xb5, 0xef, 0x1d, 0xc6,
    -	0x29, 0x56, 0x87, 0x17, 0x72, 0xd5, 0xe1, 0xbf, 0x60, 0xc1, 0xb1, 0x64, 0x3b, 0xcc, 0xef, 0xfe,
    -	0x8b, 0x16, 0x1c, 0x8f, 0x73, 0xfc, 0xa4, 0x4d, 0x10, 0x5e, 0x6c, 0x1b, 0xfc, 0x2c, 0xa7, 0xc7,
    -	0x71, 0xd8, 0x96, 0x95, 0x2c, 0xd2, 0x38, 0xbb, 0x45, 0xfb, 0xdf, 0xf5, 0xc0, 0x54, 0x5e, 0xd4,
    -	0x34, 0xe6, 0x69, 0xe4, 0xdc, 0xad, 0x6c, 0x93, 0x3b, 0xc2, 0x9f, 0x23, 0xf6, 0x34, 0xe2, 0xc5,
    -	0x58, 0xc2, 0x93, 0x69, 0x9d, 0x0a, 0x5d, 0xa6, 0x75, 0xda, 0x82, 0x89, 0x3b, 0x5b, 0xc4, 0x5b,
    -	0xf7, 0x42, 0x27, 0x72, 0xc3, 0x0d, 0x97, 0x29, 0xd0, 0xf9, 0xba, 0x79, 0x45, 0x7a, 0x5d, 0xdc,
    -	0x4a, 0x22, 0x1c, 0xec, 0x95, 0xce, 0x18, 0x05, 0x71, 0x97, 0xf9, 0x41, 0x82, 0xd3, 0x44, 0xd3,
    -	0x59, 0xb1, 0x7a, 0x1e, 0x72, 0x56, 0xac, 0x86, 0x2b, 0xcc, 0x6e, 0xa4, 0x1b, 0x09, 0x7b, 0xb6,
    -	0xae, 0xa8, 0x52, 0xac, 0x61, 0xa0, 0x4f, 0x01, 0xd2, 0xd3, 0x1a, 0x1a, 0x41, 0x6b, 0x9f, 0xdb,
    -	0xdf, 0x2b, 0xa1, 0xd5, 0x14, 0xf4, 0x60, 0xaf, 0x34, 0x49, 0x4b, 0x97, 0x3d, 0xfa, 0xfc, 0x8d,
    -	0x23, 0xfd, 0x65, 0x10, 0x42, 0xb7, 0x60, 0x9c, 0x96, 0xb2, 0x1d, 0x25, 0x23, 0xe2, 0xf2, 0x27,
    -	0xeb, 0x33, 0xfb, 0x7b, 0xa5, 0xf1, 0xd5, 0x04, 0x2c, 0x8f, 0x74, 0x8a, 0x48, 0x46, 0x72, 0xac,
    -	0x81, 0x6e, 0x93, 0x63, 0xd9, 0x5f, 0xb4, 0xe0, 0x14, 0xbd, 0xe0, 0x6a, 0xd7, 0x73, 0xb4, 0xe8,
    -	0x4e, 0xd3, 0xe5, 0x7a, 0x1a, 0x71, 0xd5, 0x30, 0x59, 0x5d, 0x79, 0x99, 0x6b, 0x69, 0x14, 0x94,
    -	0x9e, 0xf0, 0xdb, 0xae, 0x57, 0x4b, 0x9e, 0xf0, 0xd7, 0x5c, 0xaf, 0x86, 0x19, 0x44, 0x5d, 0x59,
    -	0xc5, 0xdc, 0x08, 0xfb, 0x5f, 0xa3, 0x7b, 0x95, 0xf6, 0xe5, 0x3b, 0xda, 0x0d, 0xf4, 0x8c, 0xae,
    -	0x53, 0x15, 0xe6, 0x93, 0xb9, 0xfa, 0xd4, 0x2f, 0x58, 0x20, 0xbc, 0xdf, 0xbb, 0xb8, 0x93, 0xdf,
    -	0x86, 0xe1, 0x9d, 0x74, 0xca, 0xd7, 0x73, 0xf9, 0xe1, 0x00, 0x44, 0xa2, 0x57, 0xc5, 0xa2, 0x1b,
    -	0xe9, 0x5d, 0x0d, 0x5a, 0x76, 0x0d, 0x04, 0x74, 0x81, 0x30, 0xad, 0x46, 0xe7, 0xde, 0x3c, 0x0f,
    -	0x50, 0x63, 0xb8, 0x2c, 0x0f, 0x7c, 0xc1, 0xe4, 0xb8, 0x16, 0x14, 0x04, 0x6b, 0x58, 0xf6, 0xaf,
    -	0x16, 0x61, 0x48, 0xa6, 0x18, 0x6d, 0x79, 0xdd, 0xc8, 0x1e, 0x75, 0xc6, 0xa9, 0xd0, 0x91, 0x71,
    -	0x7a, 0x07, 0x26, 0x02, 0x52, 0x6d, 0x05, 0xa1, 0xbb, 0x43, 0x24, 0x58, 0x6c, 0x92, 0x19, 0x9e,
    -	0xe0, 0x21, 0x01, 0x3c, 0x60, 0x21, 0xb2, 0x12, 0x85, 0x4c, 0x69, 0x9c, 0x26, 0x84, 0x2e, 0xc2,
    -	0x20, 0x13, 0xbd, 0x97, 0x63, 0x81, 0xb0, 0x12, 0x7c, 0xad, 0x48, 0x00, 0x8e, 0x71, 0xd8, 0xe3,
    -	0xa0, 0x75, 0x9b, 0xa1, 0x27, 0x3c, 0xc1, 0x2b, 0xbc, 0x18, 0x4b, 0x38, 0xfa, 0x38, 0x8c, 0xf3,
    -	0x7a, 0x81, 0xdf, 0x74, 0x36, 0xb9, 0x4a, 0xb0, 0x57, 0x85, 0xd7, 0x19, 0x5f, 0x49, 0xc0, 0x0e,
    -	0xf6, 0x4a, 0xc7, 0x92, 0x65, 0xac, 0xdb, 0x29, 0x2a, 0xcc, 0xf2, 0x8f, 0x37, 0x42, 0xef, 0x8c,
    -	0x94, 0xc1, 0x60, 0x0c, 0xc2, 0x3a, 0x9e, 0xfd, 0xcf, 0x16, 0x4c, 0x68, 0x53, 0xd5, 0x75, 0x8e,
    -	0x0d, 0x63, 0x90, 0x0a, 0x5d, 0x0c, 0xd2, 0xe1, 0xa2, 0x3d, 0x64, 0xce, 0x70, 0xcf, 0x03, 0x9a,
    -	0x61, 0xfb, 0x33, 0x80, 0xd2, 0xf9, 0x6b, 0xd1, 0x9b, 0xdc, 0x90, 0xdf, 0x0d, 0x48, 0xad, 0x9d,
    -	0xc2, 0x5f, 0x8f, 0x9c, 0x23, 0x3d, 0x57, 0x79, 0x2d, 0xac, 0xea, 0xdb, 0x3f, 0xde, 0x03, 0xe3,
    -	0xc9, 0x58, 0x1d, 0xe8, 0x2a, 0xf4, 0x71, 0x2e, 0x5d, 0x90, 0x6f, 0x63, 0x4f, 0xa6, 0x45, 0xf8,
    -	0xe0, 0xf9, 0x6f, 0x38, 0x77, 0x2f, 0xea, 0xa3, 0x77, 0x60, 0xa8, 0xe6, 0xdf, 0xf1, 0xee, 0x38,
    -	0x41, 0x6d, 0xb6, 0xbc, 0x2c, 0x4e, 0x88, 0x4c, 0x01, 0xd4, 0x42, 0x8c, 0xa6, 0x47, 0x0d, 0x61,
    -	0xb6, 0x13, 0x31, 0x08, 0xeb, 0xe4, 0xd0, 0x1a, 0x4b, 0xc9, 0xb4, 0xe1, 0x6e, 0xae, 0x38, 0xcd,
    -	0x76, 0x5e, 0x5d, 0xf3, 0x12, 0x49, 0xa3, 0x3c, 0x22, 0xf2, 0x36, 0x71, 0x00, 0x8e, 0x09, 0xa1,
    -	0xcf, 0xc1, 0x64, 0x98, 0xa3, 0x12, 0xcb, 0x4b, 0x67, 0xde, 0x4e, 0x4b, 0xc4, 0x85, 0x29, 0x59,
    -	0xca, 0xb3, 0xac, 0x66, 0xd0, 0x5d, 0x40, 0x42, 0xf4, 0xbc, 0x16, 0xb4, 0xc2, 0x68, 0xae, 0xe5,
    -	0xd5, 0xea, 0x32, 0x65, 0xd3, 0x87, 0xb3, 0xe5, 0x04, 0x49, 0x6c, 0xad, 0x6d, 0x16, 0x12, 0x38,
    -	0x8d, 0x81, 0x33, 0xda, 0xb0, 0xbf, 0xd0, 0x03, 0xd3, 0x32, 0x61, 0x74, 0x86, 0xf7, 0xca, 0xe7,
    -	0xad, 0x84, 0xfb, 0xca, 0x2b, 0xf9, 0x07, 0xfd, 0x43, 0x73, 0x62, 0xf9, 0x52, 0xda, 0x89, 0xe5,
    -	0xb5, 0x43, 0x76, 0xe3, 0x81, 0xb9, 0xb2, 0x7c, 0xcf, 0xfa, 0x9f, 0xec, 0x1f, 0x03, 0xe3, 0x6a,
    -	0x46, 0x98, 0xc7, 0x5b, 0x2f, 0x4b, 0xd5, 0x51, 0xce, 0xf3, 0xff, 0xaa, 0xc0, 0x31, 0x2e, 0xfb,
    -	0x61, 0x19, 0x95, 0x9d, 0x9d, 0xb3, 0x8a, 0x0e, 0xa5, 0x49, 0x1a, 0xcd, 0x68, 0x77, 0xc1, 0x0d,
    -	0x44, 0x8f, 0x33, 0x69, 0x2e, 0x0a, 0x9c, 0x34, 0x4d, 0x09, 0xc1, 0x8a, 0x0e, 0xda, 0x81, 0x89,
    -	0x4d, 0x16, 0xf1, 0x49, 0xcb, 0xdd, 0x2c, 0xce, 0x85, 0xcc, 0x7d, 0x7b, 0x65, 0x7e, 0x31, 0x3f,
    -	0xd1, 0x33, 0x7f, 0xfc, 0xa5, 0x50, 0x70, 0xba, 0x09, 0xba, 0x35, 0x8e, 0x39, 0x77, 0xc2, 0xc5,
    -	0xba, 0x13, 0x46, 0x6e, 0x75, 0xae, 0xee, 0x57, 0xb7, 0x2b, 0x91, 0x1f, 0xc8, 0x04, 0x8f, 0x99,
    -	0x6f, 0xaf, 0xd9, 0x5b, 0x95, 0x14, 0xbe, 0xd1, 0xfc, 0xd4, 0xfe, 0x5e, 0xe9, 0x58, 0x16, 0x16,
    -	0xce, 0x6c, 0x0b, 0xad, 0x42, 0xff, 0xa6, 0x1b, 0x61, 0xd2, 0xf4, 0xc5, 0x69, 0x91, 0x79, 0x14,
    -	0x5e, 0xe1, 0x28, 0x46, 0x4b, 0x2c, 0x22, 0x95, 0x00, 0x60, 0x49, 0x04, 0xbd, 0xa9, 0x2e, 0x81,
    -	0xbe, 0x7c, 0x01, 0x6c, 0xda, 0xf6, 0x2e, 0xf3, 0x1a, 0x78, 0x1d, 0x8a, 0xde, 0x46, 0xd8, 0x2e,
    -	0x16, 0xcf, 0xea, 0x92, 0x21, 0x3f, 0x9b, 0xeb, 0xa7, 0x4f, 0xe3, 0xd5, 0xa5, 0x0a, 0xa6, 0x15,
    -	0x99, 0xdb, 0x6b, 0x58, 0x0d, 0x5d, 0x91, 0x2c, 0x2a, 0xd3, 0x0b, 0x78, 0xb9, 0x32, 0x5f, 0x59,
    -	0x36, 0x68, 0xb0, 0xa8, 0x86, 0xac, 0x18, 0xf3, 0xea, 0xe8, 0x26, 0x0c, 0x6e, 0xf2, 0x83, 0x6f,
    -	0x23, 0x14, 0x49, 0xe3, 0x33, 0x2f, 0xa3, 0x2b, 0x12, 0xc9, 0xa0, 0xc7, 0xae, 0x0c, 0x05, 0xc2,
    -	0x31, 0x29, 0xf4, 0x05, 0x0b, 0x8e, 0x27, 0xb3, 0xee, 0x33, 0x67, 0x35, 0x61, 0xa6, 0x96, 0xe9,
    -	0x00, 0x50, 0xce, 0xaa, 0x60, 0x34, 0xc8, 0xd4, 0x2f, 0x99, 0x68, 0x38, 0xbb, 0x39, 0x3a, 0xd0,
    -	0xc1, 0xed, 0x5a, 0xbb, 0xfc, 0x42, 0x89, 0xc0, 0x44, 0x7c, 0xa0, 0xf1, 0xdc, 0x02, 0xa6, 0x15,
    -	0xd1, 0x1a, 0xc0, 0x46, 0x9d, 0x88, 0x88, 0x8f, 0xc2, 0x28, 0x2a, 0xf3, 0xf6, 0x5f, 0x52, 0x58,
    -	0x82, 0x0e, 0x7b, 0x89, 0xc6, 0xa5, 0x58, 0xa3, 0x43, 0x97, 0x52, 0xd5, 0xf5, 0x6a, 0x24, 0x60,
    -	0xca, 0xad, 0x9c, 0xa5, 0x34, 0xcf, 0x30, 0xd2, 0x4b, 0x89, 0x97, 0x63, 0x41, 0x81, 0xd1, 0x22,
    -	0xcd, 0xad, 0x8d, 0xb0, 0x5d, 0x26, 0x8b, 0x79, 0xd2, 0xdc, 0x4a, 0x2c, 0x28, 0x4e, 0x8b, 0x95,
    -	0x63, 0x41, 0x81, 0x6e, 0x99, 0x0d, 0xba, 0x81, 0x48, 0x30, 0x35, 0x96, 0xbf, 0x65, 0x96, 0x38,
    -	0x4a, 0x7a, 0xcb, 0x08, 0x00, 0x96, 0x44, 0xd0, 0xa7, 0x4d, 0x6e, 0x67, 0x9c, 0xd1, 0x7c, 0xa6,
    -	0x03, 0xb7, 0x63, 0xd0, 0x6d, 0xcf, 0xef, 0xbc, 0x02, 0x85, 0x8d, 0x2a, 0x53, 0x8a, 0xe5, 0xe8,
    -	0x0c, 0x96, 0xe6, 0x0d, 0x6a, 0x2c, 0x32, 0xfc, 0xd2, 0x3c, 0x2e, 0x6c, 0x54, 0xe9, 0xd2, 0x77,
    -	0xee, 0xb5, 0x02, 0xb2, 0xe4, 0xd6, 0x89, 0xc8, 0x6a, 0x91, 0xb9, 0xf4, 0x67, 0x25, 0x52, 0x7a,
    -	0xe9, 0x2b, 0x10, 0x8e, 0x49, 0x51, 0xba, 0x31, 0x0f, 0x36, 0x99, 0x4f, 0x57, 0xb1, 0x5a, 0x69,
    -	0xba, 0x99, 0x5c, 0xd8, 0x36, 0x8c, 0xec, 0x84, 0xcd, 0x2d, 0x22, 0x4f, 0x45, 0xa6, 0xae, 0xcb,
    -	0x89, 0x54, 0x71, 0x53, 0x20, 0xba, 0x41, 0xd4, 0x72, 0xea, 0xa9, 0x83, 0x9c, 0x89, 0x56, 0x6e,
    -	0xea, 0xc4, 0xb0, 0x49, 0x9b, 0x2e, 0x84, 0x77, 0x79, 0x38, 0x39, 0xa6, 0xb8, 0xcb, 0x59, 0x08,
    -	0x19, 0x11, 0xe7, 0xf8, 0x42, 0x10, 0x00, 0x2c, 0x89, 0xa8, 0xc1, 0x66, 0x17, 0xd0, 0x89, 0x0e,
    -	0x83, 0x9d, 0xea, 0x6f, 0x3c, 0xd8, 0xec, 0xc2, 0x89, 0x49, 0xb1, 0x8b, 0xa6, 0xb9, 0xe5, 0x47,
    -	0xbe, 0x97, 0xb8, 0xe4, 0x4e, 0xe6, 0x5f, 0x34, 0xe5, 0x0c, 0xfc, 0xf4, 0x45, 0x93, 0x85, 0x85,
    -	0x33, 0xdb, 0xa2, 0x1f, 0xd7, 0x94, 0x91, 0x01, 0x45, 0xe6, 0x8d, 0xa7, 0x72, 0x02, 0x6b, 0xa6,
    -	0xc3, 0x07, 0xf2, 0x8f, 0x53, 0x20, 0x1c, 0x93, 0x42, 0x35, 0x18, 0x6d, 0x1a, 0x11, 0x67, 0x59,
    -	0x06, 0x91, 0x1c, 0xbe, 0x20, 0x2b, 0x36, 0x2d, 0x97, 0x10, 0x99, 0x10, 0x9c, 0xa0, 0xc9, 0x2c,
    -	0xf7, 0xb8, 0xab, 0x1f, 0x4b, 0x30, 0x92, 0x33, 0xd5, 0x19, 0xde, 0x80, 0x7c, 0xaa, 0x05, 0x00,
    -	0x4b, 0x22, 0x74, 0x34, 0x84, 0x83, 0x9a, 0x1f, 0xb2, 0x3c, 0x3d, 0x79, 0x0a, 0xf6, 0x2c, 0x35,
    -	0x91, 0x0c, 0xb3, 0x2e, 0x40, 0x38, 0x26, 0x45, 0x4f, 0x72, 0x7a, 0xe1, 0x9d, 0xce, 0x3f, 0xc9,
    -	0x93, 0xd7, 0x1d, 0x3b, 0xc9, 0xe9, 0x65, 0x57, 0x14, 0x57, 0x9d, 0x8a, 0x0a, 0xce, 0x72, 0x8c,
    -	0xe4, 0xf4, 0x4b, 0x85, 0x15, 0x4f, 0xf7, 0x4b, 0x81, 0x70, 0x4c, 0x8a, 0x5d, 0xc5, 0x2c, 0x34,
    -	0xdd, 0xd9, 0x36, 0x57, 0x31, 0x45, 0xc8, 0xb8, 0x8a, 0xb5, 0xd0, 0x75, 0xf6, 0x8f, 0x17, 0xe0,
    -	0x6c, 0xfb, 0x7d, 0x1b, 0xeb, 0xd0, 0xca, 0xb1, 0xcd, 0x52, 0x42, 0x87, 0xc6, 0x25, 0x3a, 0x31,
    -	0x56, 0xd7, 0x01, 0x87, 0xaf, 0xc0, 0x84, 0x72, 0x47, 0xac, 0xbb, 0xd5, 0x5d, 0x2d, 0xb1, 0xa8,
    -	0x0a, 0xcd, 0x53, 0x49, 0x22, 0xe0, 0x74, 0x1d, 0x34, 0x0b, 0x63, 0x46, 0xe1, 0xf2, 0x82, 0x78,
    -	0xfe, 0xc7, 0xd9, 0x31, 0x4c, 0x30, 0x4e, 0xe2, 0xdb, 0xbf, 0x66, 0xc1, 0xc9, 0x9c, 0x3c, 0xf3,
    -	0x5d, 0xc7, 0xd3, 0xdd, 0x80, 0xb1, 0xa6, 0x59, 0xb5, 0x43, 0x08, 0x70, 0x23, 0x9b, 0xbd, 0xea,
    -	0x6b, 0x02, 0x80, 0x93, 0x44, 0xed, 0x5f, 0x29, 0xc0, 0x99, 0xb6, 0xf6, 0xf5, 0x08, 0xc3, 0x89,
    -	0xcd, 0x46, 0xe8, 0xcc, 0x07, 0xa4, 0x46, 0xbc, 0xc8, 0x75, 0xea, 0x95, 0x26, 0xa9, 0x6a, 0x5a,
    -	0x50, 0x66, 0xa8, 0x7e, 0x65, 0xa5, 0x32, 0x9b, 0xc6, 0xc0, 0x39, 0x35, 0xd1, 0x12, 0xa0, 0x34,
    -	0x44, 0xcc, 0x30, 0x7b, 0xe2, 0xa6, 0xe9, 0xe1, 0x8c, 0x1a, 0xe8, 0x65, 0x18, 0x51, 0x76, 0xfb,
    -	0xda, 0x8c, 0xb3, 0x0b, 0x02, 0xeb, 0x00, 0x6c, 0xe2, 0xa1, 0x4b, 0x3c, 0x6d, 0x92, 0x48, 0xb0,
    -	0x25, 0x54, 0xa6, 0x63, 0x32, 0x27, 0x92, 0x28, 0xc6, 0x3a, 0xce, 0xdc, 0xe5, 0x3f, 0xfd, 0xf6,
    -	0xd9, 0x0f, 0xfd, 0xc5, 0xb7, 0xcf, 0x7e, 0xe8, 0xaf, 0xbe, 0x7d, 0xf6, 0x43, 0x3f, 0xb4, 0x7f,
    -	0xd6, 0xfa, 0xd3, 0xfd, 0xb3, 0xd6, 0x5f, 0xec, 0x9f, 0xb5, 0xfe, 0x6a, 0xff, 0xac, 0xf5, 0xff,
    -	0xee, 0x9f, 0xb5, 0xbe, 0xfc, 0xb7, 0x67, 0x3f, 0xf4, 0x36, 0x8a, 0x23, 0x54, 0x5f, 0xa4, 0xb3,
    -	0x73, 0x71, 0xe7, 0xd2, 0x7f, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x60, 0x45, 0x7a, 0xd6, 0xa3, 0x24,
    -	0x01, 0x00,
    +	// 16665 bytes of a gzipped FileDescriptorProto
    +	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x5b, 0x90, 0x5c, 0x49,
    +	0x76, 0x18, 0xb6, 0xb7, 0xaa, 0x9f, 0xa7, 0xdf, 0x89, 0x57, 0xa1, 0x07, 0x40, 0x61, 0xee, 0xcc,
    +	0x60, 0x30, 0x3b, 0x33, 0x8d, 0xc5, 0x3c, 0x76, 0xb1, 0x33, 0xb3, 0xc3, 0xe9, 0x27, 0xd0, 0x03,
    +	0x74, 0xa3, 0x26, 0xab, 0x01, 0xec, 0x63, 0x76, 0xb5, 0x17, 0x55, 0xd9, 0xdd, 0x77, 0xbb, 0xea,
    +	0xde, 0x9a, 0x7b, 0x6f, 0x35, 0xd0, 0x30, 0x15, 0xa4, 0x56, 0xe6, 0x4a, 0x4b, 0xd2, 0x11, 0x1b,
    +	0x0a, 0x4b, 0x72, 0x90, 0x0a, 0x7e, 0xe8, 0x45, 0xd2, 0xb4, 0x64, 0x52, 0xa4, 0x45, 0x59, 0x14,
    +	0x29, 0xda, 0x96, 0x23, 0x68, 0x7f, 0xc8, 0x14, 0x23, 0xcc, 0x65, 0x58, 0xe1, 0x96, 0xd9, 0xb6,
    +	0x42, 0xc1, 0x0f, 0x53, 0x0a, 0xda, 0x1f, 0x76, 0x87, 0x6c, 0x2a, 0xf2, 0x79, 0x33, 0xef, 0xab,
    +	0xaa, 0x31, 0x40, 0xef, 0x70, 0x63, 0xfe, 0xaa, 0xf2, 0x9c, 0x3c, 0x99, 0x37, 0x1f, 0x27, 0x4f,
    +	0x9e, 0x73, 0xf2, 0x1c, 0xb0, 0x77, 0xae, 0x85, 0x73, 0xae, 0x7f, 0xc5, 0xe9, 0xb8, 0x57, 0x1a,
    +	0x7e, 0x40, 0xae, 0xec, 0x5e, 0xbd, 0xb2, 0x45, 0x3c, 0x12, 0x38, 0x11, 0x69, 0xce, 0x75, 0x02,
    +	0x3f, 0xf2, 0x11, 0xe2, 0x38, 0x73, 0x4e, 0xc7, 0x9d, 0xa3, 0x38, 0x73, 0xbb, 0x57, 0x67, 0x5f,
    +	0xdd, 0x72, 0xa3, 0xed, 0xee, 0xfd, 0xb9, 0x86, 0xdf, 0xbe, 0xb2, 0xe5, 0x6f, 0xf9, 0x57, 0x18,
    +	0xea, 0xfd, 0xee, 0x26, 0xfb, 0xc7, 0xfe, 0xb0, 0x5f, 0x9c, 0xc4, 0xec, 0x1b, 0x71, 0x33, 0x6d,
    +	0xa7, 0xb1, 0xed, 0x7a, 0x24, 0xd8, 0xbb, 0xd2, 0xd9, 0xd9, 0x62, 0xed, 0x06, 0x24, 0xf4, 0xbb,
    +	0x41, 0x83, 0x24, 0x1b, 0x2e, 0xac, 0x15, 0x5e, 0x69, 0x93, 0xc8, 0xc9, 0xe8, 0xee, 0xec, 0x95,
    +	0xbc, 0x5a, 0x41, 0xd7, 0x8b, 0xdc, 0x76, 0xba, 0x99, 0xcf, 0xf7, 0xaa, 0x10, 0x36, 0xb6, 0x49,
    +	0xdb, 0x49, 0xd5, 0x7b, 0x3d, 0xaf, 0x5e, 0x37, 0x72, 0x5b, 0x57, 0x5c, 0x2f, 0x0a, 0xa3, 0x20,
    +	0x59, 0xc9, 0xfe, 0xbe, 0x05, 0x17, 0xe7, 0xef, 0xd5, 0x97, 0x5b, 0x4e, 0x18, 0xb9, 0x8d, 0x85,
    +	0x96, 0xdf, 0xd8, 0xa9, 0x47, 0x7e, 0x40, 0xee, 0xfa, 0xad, 0x6e, 0x9b, 0xd4, 0xd9, 0x40, 0xa0,
    +	0x57, 0x60, 0x64, 0x97, 0xfd, 0x5f, 0x5d, 0xaa, 0x58, 0x17, 0xad, 0xcb, 0xa3, 0x0b, 0xd3, 0xbf,
    +	0xb3, 0x5f, 0xfd, 0xcc, 0xc1, 0x7e, 0x75, 0xe4, 0xae, 0x28, 0xc7, 0x0a, 0x03, 0x5d, 0x82, 0xa1,
    +	0xcd, 0x70, 0x63, 0xaf, 0x43, 0x2a, 0x25, 0x86, 0x3b, 0x29, 0x70, 0x87, 0x56, 0xea, 0xb4, 0x14,
    +	0x0b, 0x28, 0xba, 0x02, 0xa3, 0x1d, 0x27, 0x88, 0xdc, 0xc8, 0xf5, 0xbd, 0x4a, 0xf9, 0xa2, 0x75,
    +	0x79, 0x70, 0x61, 0x46, 0xa0, 0x8e, 0xd6, 0x24, 0x00, 0xc7, 0x38, 0xb4, 0x1b, 0x01, 0x71, 0x9a,
    +	0xb7, 0xbd, 0xd6, 0x5e, 0x65, 0xe0, 0xa2, 0x75, 0x79, 0x24, 0xee, 0x06, 0x16, 0xe5, 0x58, 0x61,
    +	0xd8, 0x3f, 0x53, 0x82, 0x91, 0xf9, 0xcd, 0x4d, 0xd7, 0x73, 0xa3, 0x3d, 0x74, 0x17, 0xc6, 0x3d,
    +	0xbf, 0x49, 0xe4, 0x7f, 0xf6, 0x15, 0x63, 0xaf, 0x5d, 0x9c, 0x4b, 0x2f, 0xa5, 0xb9, 0x75, 0x0d,
    +	0x6f, 0x61, 0xfa, 0x60, 0xbf, 0x3a, 0xae, 0x97, 0x60, 0x83, 0x0e, 0xc2, 0x30, 0xd6, 0xf1, 0x9b,
    +	0x8a, 0x6c, 0x89, 0x91, 0xad, 0x66, 0x91, 0xad, 0xc5, 0x68, 0x0b, 0x53, 0x07, 0xfb, 0xd5, 0x31,
    +	0xad, 0x00, 0xeb, 0x44, 0xd0, 0x7d, 0x98, 0xa2, 0x7f, 0xbd, 0xc8, 0x55, 0x74, 0xcb, 0x8c, 0xee,
    +	0x73, 0x79, 0x74, 0x35, 0xd4, 0x85, 0x13, 0x07, 0xfb, 0xd5, 0xa9, 0x44, 0x21, 0x4e, 0x12, 0xb4,
    +	0x7f, 0xda, 0x82, 0xa9, 0xf9, 0x4e, 0x67, 0x3e, 0x68, 0xfb, 0x41, 0x2d, 0xf0, 0x37, 0xdd, 0x16,
    +	0x41, 0x5f, 0x80, 0x81, 0x88, 0xce, 0x1a, 0x9f, 0xe1, 0xe7, 0xc4, 0xd0, 0x0e, 0xd0, 0xb9, 0x3a,
    +	0xdc, 0xaf, 0x9e, 0x48, 0xa0, 0xb3, 0xa9, 0x64, 0x15, 0xd0, 0x7b, 0x30, 0xdd, 0xf2, 0x1b, 0x4e,
    +	0x6b, 0xdb, 0x0f, 0x23, 0x01, 0x15, 0x53, 0x7f, 0xf2, 0x60, 0xbf, 0x3a, 0x7d, 0x2b, 0x01, 0xc3,
    +	0x29, 0x6c, 0xfb, 0x11, 0x4c, 0xce, 0x47, 0x91, 0xd3, 0xd8, 0x26, 0x4d, 0xbe, 0xa0, 0xd0, 0x1b,
    +	0x30, 0xe0, 0x39, 0x6d, 0xd9, 0x99, 0x8b, 0xb2, 0x33, 0xeb, 0x4e, 0x9b, 0x76, 0x66, 0xfa, 0x8e,
    +	0xe7, 0x7e, 0xd4, 0x15, 0x8b, 0x94, 0x96, 0x61, 0x86, 0x8d, 0x5e, 0x03, 0x68, 0x92, 0x5d, 0xb7,
    +	0x41, 0x6a, 0x4e, 0xb4, 0x2d, 0xfa, 0x80, 0x44, 0x5d, 0x58, 0x52, 0x10, 0xac, 0x61, 0xd9, 0x0f,
    +	0x61, 0x74, 0x7e, 0xd7, 0x77, 0x9b, 0x35, 0xbf, 0x19, 0xa2, 0x1d, 0x98, 0xea, 0x04, 0x64, 0x93,
    +	0x04, 0xaa, 0xa8, 0x62, 0x5d, 0x2c, 0x5f, 0x1e, 0x7b, 0xed, 0x72, 0xe6, 0xd8, 0x9b, 0xa8, 0xcb,
    +	0x5e, 0x14, 0xec, 0x2d, 0x9c, 0x11, 0xed, 0x4d, 0x25, 0xa0, 0x38, 0x49, 0xd9, 0xfe, 0x67, 0x25,
    +	0x38, 0x35, 0xff, 0xa8, 0x1b, 0x90, 0x25, 0x37, 0xdc, 0x49, 0x6e, 0xb8, 0xa6, 0x1b, 0xee, 0xac,
    +	0xc7, 0x23, 0xa0, 0x56, 0xfa, 0x92, 0x28, 0xc7, 0x0a, 0x03, 0xbd, 0x0a, 0xc3, 0xf4, 0xf7, 0x1d,
    +	0xbc, 0x2a, 0x3e, 0xf9, 0x84, 0x40, 0x1e, 0x5b, 0x72, 0x22, 0x67, 0x89, 0x83, 0xb0, 0xc4, 0x41,
    +	0x6b, 0x30, 0xd6, 0x60, 0xfc, 0x61, 0x6b, 0xcd, 0x6f, 0x12, 0xb6, 0xb6, 0x46, 0x17, 0x5e, 0xa6,
    +	0xe8, 0x8b, 0x71, 0xf1, 0xe1, 0x7e, 0xb5, 0xc2, 0xfb, 0x26, 0x48, 0x68, 0x30, 0xac, 0xd7, 0x47,
    +	0xb6, 0xda, 0xee, 0x03, 0x8c, 0x12, 0x64, 0x6c, 0xf5, 0xcb, 0xda, 0xce, 0x1d, 0x64, 0x3b, 0x77,
    +	0x3c, 0x7b, 0xd7, 0xa2, 0xab, 0x30, 0xb0, 0xe3, 0x7a, 0xcd, 0xca, 0x10, 0xa3, 0x75, 0x9e, 0xce,
    +	0xf9, 0x4d, 0xd7, 0x6b, 0x1e, 0xee, 0x57, 0x67, 0x8c, 0xee, 0xd0, 0x42, 0xcc, 0x50, 0xed, 0xff,
    +	0xcb, 0x82, 0x2a, 0x83, 0xad, 0xb8, 0x2d, 0x52, 0x23, 0x41, 0xe8, 0x86, 0x11, 0xf1, 0x22, 0x63,
    +	0x40, 0x5f, 0x03, 0x08, 0x49, 0x23, 0x20, 0x91, 0x36, 0xa4, 0x6a, 0x61, 0xd4, 0x15, 0x04, 0x6b,
    +	0x58, 0x94, 0x3f, 0x85, 0xdb, 0x4e, 0xc0, 0xd6, 0x97, 0x18, 0x58, 0xc5, 0x9f, 0xea, 0x12, 0x80,
    +	0x63, 0x1c, 0x83, 0x3f, 0x95, 0x7b, 0xf1, 0x27, 0xf4, 0x25, 0x98, 0x8a, 0x1b, 0x0b, 0x3b, 0x4e,
    +	0x43, 0x0e, 0x20, 0xdb, 0xc1, 0x75, 0x13, 0x84, 0x93, 0xb8, 0xf6, 0x7f, 0x6e, 0x89, 0xc5, 0x43,
    +	0xbf, 0xfa, 0x13, 0xfe, 0xad, 0xf6, 0x3f, 0xb2, 0x60, 0x78, 0xc1, 0xf5, 0x9a, 0xae, 0xb7, 0x85,
    +	0xbe, 0x09, 0x23, 0xf4, 0xa8, 0x6c, 0x3a, 0x91, 0x23, 0xd8, 0xf0, 0xe7, 0xb4, 0xbd, 0xa5, 0x4e,
    +	0xae, 0xb9, 0xce, 0xce, 0x16, 0x2d, 0x08, 0xe7, 0x28, 0x36, 0xdd, 0x6d, 0xb7, 0xef, 0x7f, 0x8b,
    +	0x34, 0xa2, 0x35, 0x12, 0x39, 0xf1, 0xe7, 0xc4, 0x65, 0x58, 0x51, 0x45, 0x37, 0x61, 0x28, 0x72,
    +	0x82, 0x2d, 0x12, 0x09, 0x7e, 0x9c, 0xc9, 0x37, 0x79, 0x4d, 0x4c, 0x77, 0x24, 0xf1, 0x1a, 0x24,
    +	0x3e, 0xa5, 0x36, 0x58, 0x55, 0x2c, 0x48, 0xd8, 0xff, 0xdf, 0x30, 0x9c, 0x5d, 0xac, 0xaf, 0xe6,
    +	0xac, 0xab, 0x4b, 0x30, 0xd4, 0x0c, 0xdc, 0x5d, 0x12, 0x88, 0x71, 0x56, 0x54, 0x96, 0x58, 0x29,
    +	0x16, 0x50, 0x74, 0x0d, 0xc6, 0xf9, 0xf9, 0x78, 0xc3, 0xf1, 0x9a, 0x31, 0x7b, 0x14, 0xd8, 0xe3,
    +	0x77, 0x35, 0x18, 0x36, 0x30, 0x8f, 0xb8, 0xa8, 0x2e, 0x25, 0x36, 0x63, 0xde, 0xd9, 0xfb, 0x5d,
    +	0x0b, 0xa6, 0x79, 0x33, 0xf3, 0x51, 0x14, 0xb8, 0xf7, 0xbb, 0x11, 0x09, 0x2b, 0x83, 0x8c, 0xd3,
    +	0x2d, 0x66, 0x8d, 0x56, 0xee, 0x08, 0xcc, 0xdd, 0x4d, 0x50, 0xe1, 0x4c, 0xb0, 0x22, 0xda, 0x9d,
    +	0x4e, 0x82, 0x71, 0xaa, 0x59, 0xf4, 0x17, 0x2d, 0x98, 0x6d, 0xf8, 0x5e, 0x14, 0xf8, 0xad, 0x16,
    +	0x09, 0x6a, 0xdd, 0xfb, 0x2d, 0x37, 0xdc, 0xe6, 0xeb, 0x14, 0x93, 0x4d, 0xc6, 0x09, 0x72, 0xe6,
    +	0x50, 0x21, 0x89, 0x39, 0xbc, 0x70, 0xb0, 0x5f, 0x9d, 0x5d, 0xcc, 0x25, 0x85, 0x0b, 0x9a, 0x41,
    +	0x3b, 0x80, 0xe8, 0xc9, 0x5e, 0x8f, 0x9c, 0x2d, 0x12, 0x37, 0x3e, 0xdc, 0x7f, 0xe3, 0xa7, 0x0f,
    +	0xf6, 0xab, 0x68, 0x3d, 0x45, 0x02, 0x67, 0x90, 0x45, 0x1f, 0xc1, 0x49, 0x5a, 0x9a, 0xfa, 0xd6,
    +	0x91, 0xfe, 0x9b, 0xab, 0x1c, 0xec, 0x57, 0x4f, 0xae, 0x67, 0x10, 0xc1, 0x99, 0xa4, 0xd1, 0x8f,
    +	0x5b, 0x70, 0x36, 0xfe, 0xfc, 0xe5, 0x87, 0x1d, 0xc7, 0x6b, 0xc6, 0x0d, 0x8f, 0xf6, 0xdf, 0x30,
    +	0xe5, 0xc9, 0x67, 0x17, 0xf3, 0x28, 0xe1, 0xfc, 0x46, 0x90, 0x07, 0x27, 0x68, 0xd7, 0x92, 0x6d,
    +	0x43, 0xff, 0x6d, 0x9f, 0x39, 0xd8, 0xaf, 0x9e, 0x58, 0x4f, 0xd3, 0xc0, 0x59, 0x84, 0x67, 0x17,
    +	0xe1, 0x54, 0xe6, 0xea, 0x44, 0xd3, 0x50, 0xde, 0x21, 0x5c, 0x08, 0x1c, 0xc5, 0xf4, 0x27, 0x3a,
    +	0x09, 0x83, 0xbb, 0x4e, 0xab, 0x2b, 0x36, 0x26, 0xe6, 0x7f, 0xde, 0x2a, 0x5d, 0xb3, 0xec, 0xff,
    +	0xbe, 0x0c, 0x53, 0x8b, 0xf5, 0xd5, 0xc7, 0xda, 0xf5, 0xfa, 0xb1, 0x57, 0x2a, 0x3c, 0xf6, 0xe2,
    +	0x43, 0xb4, 0x9c, 0x7b, 0x88, 0xfe, 0x58, 0xc6, 0x96, 0x1d, 0x60, 0x5b, 0xf6, 0x8b, 0x39, 0x5b,
    +	0xf6, 0x09, 0x6f, 0xd4, 0xdd, 0x9c, 0x55, 0x3b, 0xc8, 0x26, 0x30, 0x53, 0x42, 0x62, 0xb2, 0x5f,
    +	0x92, 0xd5, 0x1e, 0x71, 0xe9, 0x3e, 0x99, 0x79, 0x6c, 0xc0, 0xf8, 0xa2, 0xd3, 0x71, 0xee, 0xbb,
    +	0x2d, 0x37, 0x72, 0x49, 0x88, 0x5e, 0x84, 0xb2, 0xd3, 0x6c, 0x32, 0xe9, 0x6e, 0x74, 0xe1, 0xd4,
    +	0xc1, 0x7e, 0xb5, 0x3c, 0xdf, 0xa4, 0x62, 0x06, 0x28, 0xac, 0x3d, 0x4c, 0x31, 0xd0, 0x67, 0x61,
    +	0xa0, 0x19, 0xf8, 0x9d, 0x4a, 0x89, 0x61, 0xd2, 0x5d, 0x3e, 0xb0, 0x14, 0xf8, 0x9d, 0x04, 0x2a,
    +	0xc3, 0xb1, 0x7f, 0xbb, 0x04, 0xe7, 0x16, 0x49, 0x67, 0x7b, 0xa5, 0x9e, 0x73, 0x5e, 0x5c, 0x86,
    +	0x91, 0xb6, 0xef, 0xb9, 0x91, 0x1f, 0x84, 0xa2, 0x69, 0xb6, 0x22, 0xd6, 0x44, 0x19, 0x56, 0x50,
    +	0x74, 0x11, 0x06, 0x3a, 0xb1, 0x10, 0x3b, 0x2e, 0x05, 0x60, 0x26, 0xbe, 0x32, 0x08, 0xc5, 0xe8,
    +	0x86, 0x24, 0x10, 0x2b, 0x46, 0x61, 0xdc, 0x09, 0x49, 0x80, 0x19, 0x24, 0x96, 0x04, 0xa8, 0x8c,
    +	0x20, 0x4e, 0x84, 0x84, 0x24, 0x40, 0x21, 0x58, 0xc3, 0x42, 0x35, 0x18, 0x0d, 0x13, 0x33, 0xdb,
    +	0xd7, 0xd6, 0x9c, 0x60, 0xa2, 0x82, 0x9a, 0xc9, 0x98, 0x88, 0x71, 0x82, 0x0d, 0xf5, 0x14, 0x15,
    +	0x7e, 0xa3, 0x04, 0x88, 0x0f, 0xe1, 0x9f, 0xb1, 0x81, 0xbb, 0x93, 0x1e, 0xb8, 0xfe, 0xb7, 0xc4,
    +	0x93, 0x1a, 0xbd, 0xff, 0xdb, 0x82, 0x73, 0x8b, 0xae, 0xd7, 0x24, 0x41, 0xce, 0x02, 0x7c, 0x3a,
    +	0x57, 0xf9, 0xa3, 0x09, 0x29, 0xc6, 0x12, 0x1b, 0x78, 0x02, 0x4b, 0xcc, 0xfe, 0xb7, 0x16, 0x20,
    +	0xfe, 0xd9, 0x9f, 0xb8, 0x8f, 0xbd, 0x93, 0xfe, 0xd8, 0x27, 0xb0, 0x2c, 0xec, 0x5b, 0x30, 0xb9,
    +	0xd8, 0x72, 0x89, 0x17, 0xad, 0xd6, 0x16, 0x7d, 0x6f, 0xd3, 0xdd, 0x42, 0x6f, 0xc1, 0x64, 0xe4,
    +	0xb6, 0x89, 0xdf, 0x8d, 0xea, 0xa4, 0xe1, 0x7b, 0xec, 0xe6, 0x6a, 0x5d, 0x1e, 0x5c, 0x40, 0x07,
    +	0xfb, 0xd5, 0xc9, 0x0d, 0x03, 0x82, 0x13, 0x98, 0xf6, 0xdf, 0xa5, 0x7c, 0xab, 0xd5, 0x0d, 0x23,
    +	0x12, 0x6c, 0x04, 0xdd, 0x30, 0x5a, 0xe8, 0x52, 0xd9, 0xb3, 0x16, 0xf8, 0xb4, 0x3b, 0xae, 0xef,
    +	0xa1, 0x73, 0xc6, 0x75, 0x7c, 0x44, 0x5e, 0xc5, 0xc5, 0xb5, 0x7b, 0x0e, 0x20, 0x74, 0xb7, 0x3c,
    +	0x12, 0x68, 0xd7, 0x87, 0x49, 0xb6, 0x55, 0x54, 0x29, 0xd6, 0x30, 0x50, 0x0b, 0x26, 0x5a, 0xce,
    +	0x7d, 0xd2, 0xaa, 0x93, 0x16, 0x69, 0x44, 0x7e, 0x20, 0xf4, 0x1b, 0xaf, 0xf7, 0x77, 0x0f, 0xb8,
    +	0xa5, 0x57, 0x5d, 0x98, 0x39, 0xd8, 0xaf, 0x4e, 0x18, 0x45, 0xd8, 0x24, 0x4e, 0x59, 0x87, 0xdf,
    +	0xa1, 0x5f, 0xe1, 0xb4, 0xf4, 0xcb, 0xe7, 0x6d, 0x51, 0x86, 0x15, 0x54, 0xb1, 0x8e, 0x81, 0x3c,
    +	0xd6, 0x61, 0xff, 0x4b, 0xba, 0xd0, 0xfc, 0x76, 0xc7, 0xf7, 0x88, 0x17, 0x2d, 0xfa, 0x5e, 0x93,
    +	0x6b, 0xa6, 0xde, 0x32, 0x54, 0x27, 0x97, 0x12, 0xaa, 0x93, 0xd3, 0xe9, 0x1a, 0x9a, 0xf6, 0xe4,
    +	0x8b, 0x30, 0x14, 0x46, 0x4e, 0xd4, 0x0d, 0xc5, 0xc0, 0x3d, 0x2b, 0x97, 0x5d, 0x9d, 0x95, 0x1e,
    +	0xee, 0x57, 0xa7, 0x54, 0x35, 0x5e, 0x84, 0x45, 0x05, 0xf4, 0x12, 0x0c, 0xb7, 0x49, 0x18, 0x3a,
    +	0x5b, 0x52, 0x6c, 0x98, 0x12, 0x75, 0x87, 0xd7, 0x78, 0x31, 0x96, 0x70, 0xf4, 0x1c, 0x0c, 0x92,
    +	0x20, 0xf0, 0x03, 0xf1, 0x6d, 0x13, 0x02, 0x71, 0x70, 0x99, 0x16, 0x62, 0x0e, 0xb3, 0xff, 0x27,
    +	0x0b, 0xa6, 0x54, 0x5f, 0x79, 0x5b, 0xc7, 0x70, 0x5d, 0xfb, 0x2a, 0x40, 0x43, 0x7e, 0x60, 0xc8,
    +	0x8e, 0xd9, 0xb1, 0xd7, 0x2e, 0x65, 0x4a, 0x34, 0xa9, 0x61, 0x8c, 0x29, 0xab, 0xa2, 0x10, 0x6b,
    +	0xd4, 0xec, 0xdf, 0xb4, 0xe0, 0x44, 0xe2, 0x8b, 0x6e, 0xb9, 0x61, 0x84, 0x3e, 0x4c, 0x7d, 0xd5,
    +	0x5c, 0x9f, 0x8b, 0xcf, 0x0d, 0xf9, 0x37, 0xa9, 0x3d, 0x2f, 0x4b, 0xb4, 0x2f, 0xba, 0x01, 0x83,
    +	0x6e, 0x44, 0xda, 0xf2, 0x63, 0x9e, 0x2b, 0xfc, 0x18, 0xde, 0xab, 0x78, 0x46, 0x56, 0x69, 0x4d,
    +	0xcc, 0x09, 0xd8, 0xbf, 0x5d, 0x86, 0x51, 0xbe, 0xbf, 0xd7, 0x9c, 0xce, 0x31, 0xcc, 0xc5, 0xcb,
    +	0x30, 0xea, 0xb6, 0xdb, 0xdd, 0xc8, 0xb9, 0x2f, 0xce, 0xbd, 0x11, 0xce, 0x83, 0x56, 0x65, 0x21,
    +	0x8e, 0xe1, 0x68, 0x15, 0x06, 0x58, 0x57, 0xf8, 0x57, 0xbe, 0x98, 0xfd, 0x95, 0xa2, 0xef, 0x73,
    +	0x4b, 0x4e, 0xe4, 0x70, 0x91, 0x53, 0xed, 0x2b, 0x5a, 0x84, 0x19, 0x09, 0xe4, 0x00, 0xdc, 0x77,
    +	0x3d, 0x27, 0xd8, 0xa3, 0x65, 0x95, 0x32, 0x23, 0xf8, 0x6a, 0x31, 0xc1, 0x05, 0x85, 0xcf, 0xc9,
    +	0xaa, 0x0f, 0x8b, 0x01, 0x58, 0x23, 0x3a, 0xfb, 0x05, 0x18, 0x55, 0xc8, 0x47, 0x91, 0x1c, 0x67,
    +	0xbf, 0x04, 0x53, 0x89, 0xb6, 0x7a, 0x55, 0x1f, 0xd7, 0x05, 0xcf, 0x7f, 0xcc, 0x58, 0x86, 0xe8,
    +	0xf5, 0xb2, 0xb7, 0x2b, 0xce, 0xa6, 0x47, 0x70, 0xb2, 0x95, 0xc1, 0xf2, 0xc5, 0xbc, 0xf6, 0x7f,
    +	0x44, 0x9c, 0x13, 0x9f, 0x7d, 0x32, 0x0b, 0x8a, 0x33, 0xdb, 0x30, 0x38, 0x62, 0xa9, 0x88, 0x23,
    +	0x52, 0x7e, 0x77, 0x52, 0x75, 0xfe, 0x26, 0xd9, 0x53, 0x4c, 0xf5, 0x07, 0xd9, 0xfd, 0xf3, 0x7c,
    +	0xf4, 0x39, 0xbb, 0x1c, 0x13, 0x04, 0xca, 0x37, 0xc9, 0x1e, 0x9f, 0x0a, 0xfd, 0xeb, 0xca, 0x85,
    +	0x5f, 0xf7, 0x2b, 0x16, 0x4c, 0xa8, 0xaf, 0x3b, 0x06, 0xbe, 0xb0, 0x60, 0xf2, 0x85, 0xf3, 0x85,
    +	0x0b, 0x3c, 0x87, 0x23, 0xfc, 0x46, 0x09, 0xce, 0x2a, 0x1c, 0x7a, 0x89, 0xe2, 0x7f, 0xc4, 0xaa,
    +	0xba, 0x02, 0xa3, 0x9e, 0x52, 0x27, 0x5a, 0xa6, 0x1e, 0x2f, 0x56, 0x26, 0xc6, 0x38, 0xf4, 0xc8,
    +	0xf3, 0xe2, 0x43, 0x7b, 0x5c, 0xd7, 0xb3, 0x8b, 0xc3, 0x7d, 0x01, 0xca, 0x5d, 0xb7, 0x29, 0x0e,
    +	0x98, 0xcf, 0xc9, 0xd1, 0xbe, 0xb3, 0xba, 0x74, 0xb8, 0x5f, 0x7d, 0x36, 0xcf, 0xe4, 0x44, 0x4f,
    +	0xb6, 0x70, 0xee, 0xce, 0xea, 0x12, 0xa6, 0x95, 0xd1, 0x3c, 0x4c, 0x49, 0xab, 0xda, 0x5d, 0x2a,
    +	0x97, 0xfa, 0x9e, 0x38, 0x87, 0x94, 0xb2, 0x1c, 0x9b, 0x60, 0x9c, 0xc4, 0x47, 0x4b, 0x30, 0xbd,
    +	0xd3, 0xbd, 0x4f, 0x5a, 0x24, 0xe2, 0x1f, 0x7c, 0x93, 0x70, 0x55, 0xf2, 0x68, 0x7c, 0x85, 0xbd,
    +	0x99, 0x80, 0xe3, 0x54, 0x0d, 0xfb, 0x4f, 0xd9, 0x79, 0x20, 0x46, 0x4f, 0x93, 0x6f, 0x7e, 0x90,
    +	0xcb, 0xb9, 0x9f, 0x55, 0x71, 0x93, 0xec, 0x6d, 0xf8, 0x54, 0x0e, 0xc9, 0x5e, 0x15, 0xc6, 0x9a,
    +	0x1f, 0x28, 0x5c, 0xf3, 0xbf, 0x56, 0x82, 0x53, 0x6a, 0x04, 0x0c, 0x69, 0xf9, 0xcf, 0xfa, 0x18,
    +	0x5c, 0x85, 0xb1, 0x26, 0xd9, 0x74, 0xba, 0xad, 0x48, 0xd9, 0x35, 0x06, 0xb9, 0xa9, 0x6d, 0x29,
    +	0x2e, 0xc6, 0x3a, 0xce, 0x11, 0x86, 0xed, 0x6f, 0x4d, 0xb2, 0x83, 0x38, 0x72, 0xe8, 0x1a, 0x57,
    +	0xbb, 0xc6, 0xca, 0xdd, 0x35, 0xcf, 0xc1, 0xa0, 0xdb, 0xa6, 0x82, 0x59, 0xc9, 0x94, 0xb7, 0x56,
    +	0x69, 0x21, 0xe6, 0x30, 0xf4, 0x02, 0x0c, 0x37, 0xfc, 0x76, 0xdb, 0xf1, 0x9a, 0xec, 0xc8, 0x1b,
    +	0x5d, 0x18, 0xa3, 0xb2, 0xdb, 0x22, 0x2f, 0xc2, 0x12, 0x46, 0x85, 0x6f, 0x27, 0xd8, 0xe2, 0xca,
    +	0x1e, 0x21, 0x7c, 0xcf, 0x07, 0x5b, 0x21, 0x66, 0xa5, 0xf4, 0xae, 0xfa, 0xc0, 0x0f, 0x76, 0x5c,
    +	0x6f, 0x6b, 0xc9, 0x0d, 0xc4, 0x96, 0x50, 0x67, 0xe1, 0x3d, 0x05, 0xc1, 0x1a, 0x16, 0x5a, 0x81,
    +	0xc1, 0x8e, 0x1f, 0x44, 0x61, 0x65, 0x88, 0x0d, 0xf7, 0xb3, 0x39, 0x8c, 0x88, 0x7f, 0x6d, 0xcd,
    +	0x0f, 0xa2, 0xf8, 0x03, 0xe8, 0xbf, 0x10, 0xf3, 0xea, 0xe8, 0x16, 0x0c, 0x13, 0x6f, 0x77, 0x25,
    +	0xf0, 0xdb, 0x95, 0x13, 0xf9, 0x94, 0x96, 0x39, 0x0a, 0x5f, 0x66, 0xb1, 0x8c, 0x2a, 0x8a, 0xb1,
    +	0x24, 0x81, 0xbe, 0x08, 0x65, 0xe2, 0xed, 0x56, 0x86, 0x19, 0xa5, 0xd9, 0x1c, 0x4a, 0x77, 0x9d,
    +	0x20, 0xe6, 0xf9, 0xcb, 0xde, 0x2e, 0xa6, 0x75, 0xd0, 0x57, 0x60, 0x54, 0x32, 0x8c, 0x50, 0x68,
    +	0x51, 0x33, 0x17, 0xac, 0x64, 0x33, 0x98, 0x7c, 0xd4, 0x75, 0x03, 0xd2, 0x26, 0x5e, 0x14, 0xc6,
    +	0x1c, 0x52, 0x42, 0x43, 0x1c, 0x53, 0x43, 0x0d, 0x18, 0x0f, 0x48, 0xe8, 0x3e, 0x22, 0x35, 0xbf,
    +	0xe5, 0x36, 0xf6, 0x2a, 0x67, 0x58, 0xf7, 0x5e, 0x2a, 0x1c, 0x32, 0xac, 0x55, 0x88, 0xb5, 0xfc,
    +	0x7a, 0x29, 0x36, 0x88, 0xa2, 0x0f, 0x60, 0x22, 0x20, 0x61, 0xe4, 0x04, 0x91, 0x68, 0xa5, 0xa2,
    +	0xac, 0x72, 0x13, 0x58, 0x07, 0xf0, 0xeb, 0x44, 0xdc, 0x4c, 0x0c, 0xc1, 0x26, 0x05, 0x14, 0x01,
    +	0x32, 0x0a, 0x70, 0xb7, 0x45, 0xc2, 0xca, 0xd9, 0x7c, 0x6b, 0x66, 0x92, 0x2c, 0xad, 0xb0, 0x30,
    +	0x2b, 0x3a, 0x8f, 0x70, 0x8a, 0x16, 0xce, 0xa0, 0x8f, 0xbe, 0x22, 0x0d, 0x1d, 0x6b, 0x7e, 0xd7,
    +	0x8b, 0xc2, 0xca, 0x28, 0x6b, 0x2f, 0xd3, 0x22, 0x7e, 0x37, 0xc6, 0x4b, 0x5a, 0x42, 0x78, 0x65,
    +	0x6c, 0x90, 0x42, 0x5f, 0x87, 0x09, 0xfe, 0x9f, 0x1b, 0x72, 0xc3, 0xca, 0x29, 0x46, 0xfb, 0x62,
    +	0x3e, 0x6d, 0x8e, 0xb8, 0x70, 0x4a, 0x10, 0x9f, 0xd0, 0x4b, 0x43, 0x6c, 0x52, 0x43, 0x18, 0x26,
    +	0x5a, 0xee, 0x2e, 0xf1, 0x48, 0x18, 0xd6, 0x02, 0xff, 0x3e, 0x11, 0x7a, 0xe9, 0xb3, 0xd9, 0x86,
    +	0x5f, 0xff, 0x3e, 0x11, 0x57, 0x4f, 0xbd, 0x0e, 0x36, 0x49, 0xa0, 0x3b, 0x30, 0x19, 0x10, 0xa7,
    +	0xe9, 0xc6, 0x44, 0xc7, 0x7a, 0x11, 0x65, 0xd7, 0x75, 0x6c, 0x54, 0xc2, 0x09, 0x22, 0xe8, 0x36,
    +	0x8c, 0xb3, 0x81, 0xef, 0x76, 0x38, 0xd1, 0xd3, 0xbd, 0x88, 0x32, 0x37, 0x86, 0xba, 0x56, 0x05,
    +	0x1b, 0x04, 0xd0, 0xfb, 0x30, 0xda, 0x72, 0x37, 0x49, 0x63, 0xaf, 0xd1, 0x22, 0x95, 0x71, 0x46,
    +	0x2d, 0x93, 0x05, 0xdf, 0x92, 0x48, 0xfc, 0x56, 0xa0, 0xfe, 0xe2, 0xb8, 0x3a, 0xba, 0x0b, 0xa7,
    +	0x23, 0x12, 0xb4, 0x5d, 0xcf, 0xa1, 0xac, 0x53, 0x5c, 0x44, 0x99, 0x3d, 0x7e, 0x82, 0xad, 0xe9,
    +	0x0b, 0x62, 0x36, 0x4e, 0x6f, 0x64, 0x62, 0xe1, 0x9c, 0xda, 0xe8, 0x21, 0x54, 0x32, 0x20, 0x7c,
    +	0xb7, 0x9c, 0x64, 0x94, 0xdf, 0x11, 0x94, 0x2b, 0x1b, 0x39, 0x78, 0x87, 0x05, 0x30, 0x9c, 0x4b,
    +	0x1d, 0xdd, 0x86, 0x29, 0xc6, 0xaf, 0x6b, 0xdd, 0x56, 0x4b, 0x34, 0x38, 0xc9, 0x1a, 0x7c, 0x41,
    +	0x4a, 0x2f, 0xab, 0x26, 0xf8, 0x70, 0xbf, 0x0a, 0xf1, 0x3f, 0x9c, 0xac, 0x8d, 0xee, 0x33, 0xd3,
    +	0x6f, 0x37, 0x70, 0xa3, 0x3d, 0xba, 0xe9, 0xc8, 0xc3, 0xa8, 0x32, 0x55, 0xa8, 0x06, 0xd3, 0x51,
    +	0x95, 0x7d, 0x58, 0x2f, 0xc4, 0x49, 0x82, 0xf4, 0x00, 0x0a, 0xa3, 0xa6, 0xeb, 0x55, 0xa6, 0xf9,
    +	0x2d, 0x4e, 0xf2, 0xef, 0x3a, 0x2d, 0xc4, 0x1c, 0xc6, 0xcc, 0xbe, 0xf4, 0xc7, 0x6d, 0x7a, 0xce,
    +	0xcf, 0x30, 0xc4, 0xd8, 0xec, 0x2b, 0x01, 0x38, 0xc6, 0xa1, 0xa2, 0x77, 0x14, 0xed, 0x55, 0x10,
    +	0x43, 0x55, 0x6c, 0x78, 0x63, 0xe3, 0x2b, 0x98, 0x96, 0xdb, 0xbf, 0x6b, 0xc1, 0x45, 0xc5, 0x46,
    +	0x96, 0x1f, 0x46, 0xc4, 0x6b, 0x92, 0xa6, 0xce, 0x73, 0x49, 0x18, 0xa1, 0xb7, 0x61, 0xa2, 0x21,
    +	0x71, 0x34, 0x13, 0xb5, 0xda, 0xa5, 0x8b, 0x3a, 0x10, 0x9b, 0xb8, 0xe8, 0x1a, 0xe3, 0xc6, 0x8c,
    +	0x9e, 0xa6, 0x6c, 0xd2, 0x59, 0xac, 0x82, 0x61, 0x03, 0x13, 0xbd, 0x09, 0x63, 0x01, 0xef, 0x01,
    +	0xab, 0x58, 0x36, 0x3d, 0x25, 0x70, 0x0c, 0xc2, 0x3a, 0x9e, 0x7d, 0x1f, 0x26, 0x55, 0x87, 0xd8,
    +	0x34, 0xa3, 0x2a, 0x0c, 0x32, 0xf9, 0x59, 0xe8, 0xa1, 0x47, 0xe9, 0xa8, 0x32, 0xd9, 0x1a, 0xf3,
    +	0x72, 0x36, 0xaa, 0xee, 0x23, 0xb2, 0xb0, 0x17, 0x11, 0xae, 0xd4, 0x29, 0x6b, 0xa3, 0x2a, 0x01,
    +	0x38, 0xc6, 0xb1, 0xff, 0x7f, 0x7e, 0x0f, 0x89, 0x8f, 0xdb, 0x3e, 0x04, 0x8c, 0x57, 0x60, 0x84,
    +	0x79, 0xd0, 0xf8, 0x01, 0x37, 0x73, 0x0f, 0xc6, 0x37, 0x8f, 0x1b, 0xa2, 0x1c, 0x2b, 0x0c, 0x63,
    +	0xcc, 0x59, 0x15, 0x2e, 0x1d, 0xa5, 0xc7, 0x9c, 0xd5, 0x33, 0x71, 0xd1, 0x35, 0x18, 0x61, 0xce,
    +	0x62, 0x0d, 0xbf, 0x25, 0xc4, 0x76, 0x29, 0xe2, 0x8d, 0xd4, 0x44, 0xf9, 0xa1, 0xf6, 0x1b, 0x2b,
    +	0x6c, 0x74, 0x09, 0x86, 0x68, 0x17, 0x56, 0x6b, 0x42, 0x2e, 0x51, 0x2a, 0xd5, 0x1b, 0xac, 0x14,
    +	0x0b, 0xa8, 0xfd, 0x9b, 0x16, 0x13, 0x4a, 0xd3, 0x87, 0x27, 0xba, 0x91, 0x98, 0x6f, 0x3e, 0x20,
    +	0xcf, 0x67, 0xcd, 0xf7, 0x61, 0xf1, 0xfc, 0x7f, 0x35, 0x79, 0xc4, 0xf2, 0xa5, 0xf3, 0x86, 0x1c,
    +	0x82, 0xe4, 0x31, 0xfb, 0x4c, 0xbc, 0x6e, 0x69, 0x7f, 0x8a, 0xce, 0x5a, 0xfb, 0xb7, 0xf8, 0x35,
    +	0x39, 0x75, 0x7c, 0xa2, 0x25, 0x18, 0x72, 0xd8, 0x0d, 0x43, 0x74, 0xfc, 0x15, 0x39, 0x00, 0xf3,
    +	0xac, 0xf4, 0x50, 0xd8, 0xab, 0x93, 0xf5, 0x38, 0x14, 0x8b, 0xba, 0xe8, 0x9b, 0x30, 0x4a, 0x1e,
    +	0xba, 0xd1, 0xa2, 0xdf, 0x14, 0x0b, 0xca, 0xd4, 0x95, 0x16, 0x9e, 0xe0, 0xb7, 0xbd, 0x65, 0x59,
    +	0x95, 0x33, 0x6d, 0xf5, 0x17, 0xc7, 0x44, 0xed, 0x9f, 0xb3, 0xa0, 0xda, 0xa3, 0x36, 0xba, 0x47,
    +	0x85, 0x65, 0x12, 0x38, 0x91, 0x2f, 0xed, 0x9e, 0x6f, 0xcb, 0x65, 0x70, 0x5b, 0x94, 0x1f, 0xee,
    +	0x57, 0x5f, 0xec, 0x41, 0x46, 0xa2, 0x62, 0x45, 0x0c, 0xd9, 0x30, 0xc4, 0xd4, 0x25, 0x5c, 0xfa,
    +	0x1f, 0xe4, 0xc6, 0xcf, 0xbb, 0xac, 0x04, 0x0b, 0x88, 0xfd, 0x57, 0x4a, 0xda, 0x3e, 0xac, 0x47,
    +	0x4e, 0x44, 0x50, 0x0d, 0x86, 0x1f, 0x38, 0x6e, 0xe4, 0x7a, 0x5b, 0xe2, 0x8a, 0x52, 0x2c, 0x93,
    +	0xb1, 0x4a, 0xf7, 0x78, 0x05, 0x2e, 0x68, 0x8b, 0x3f, 0x58, 0x92, 0xa1, 0x14, 0x83, 0xae, 0xe7,
    +	0x51, 0x8a, 0xa5, 0x7e, 0x29, 0x62, 0x5e, 0x81, 0x53, 0x14, 0x7f, 0xb0, 0x24, 0x83, 0x3e, 0x04,
    +	0x90, 0xc7, 0x0a, 0x69, 0x0a, 0x35, 0xf7, 0x2b, 0xbd, 0x89, 0x6e, 0xa8, 0x3a, 0x5c, 0x8f, 0x1e,
    +	0xff, 0xc7, 0x1a, 0x3d, 0x3b, 0xd2, 0x76, 0x8d, 0xde, 0x19, 0xf4, 0x35, 0xca, 0xd7, 0x9d, 0x20,
    +	0x22, 0xcd, 0xf9, 0x48, 0x0c, 0xce, 0x67, 0xfb, 0xd3, 0x63, 0x6c, 0xb8, 0x6d, 0xa2, 0x9f, 0x01,
    +	0x82, 0x08, 0x8e, 0xe9, 0xd9, 0xbf, 0x5e, 0x86, 0x4a, 0x5e, 0x77, 0x29, 0x5b, 0x92, 0xab, 0x4a,
    +	0xd8, 0x1f, 0x14, 0x5b, 0x92, 0x4b, 0x00, 0x2b, 0x0c, 0xca, 0x1f, 0x42, 0x77, 0x4b, 0xaa, 0xa1,
    +	0x06, 0x63, 0xfe, 0x50, 0x67, 0xa5, 0x58, 0x40, 0x29, 0x5e, 0x40, 0x9c, 0x50, 0xf8, 0x89, 0x6a,
    +	0x7c, 0x04, 0xb3, 0x52, 0x2c, 0xa0, 0xba, 0x42, 0x7c, 0xa0, 0x87, 0x42, 0xdc, 0x18, 0xa2, 0xc1,
    +	0x27, 0x3b, 0x44, 0xe8, 0x1b, 0x00, 0x9b, 0xae, 0xe7, 0x86, 0xdb, 0x8c, 0xfa, 0xd0, 0x91, 0xa9,
    +	0xab, 0xfb, 0xdb, 0x8a, 0xa2, 0x82, 0x35, 0x8a, 0xf4, 0x2c, 0x53, 0x2c, 0x7a, 0x75, 0x89, 0x79,
    +	0xa9, 0x68, 0x67, 0x59, 0x7c, 0x5e, 0x2d, 0x61, 0x1d, 0xcf, 0xfe, 0x56, 0x72, 0xbd, 0x88, 0x1d,
    +	0xa0, 0x8d, 0xaf, 0xd5, 0xef, 0xf8, 0x96, 0x8a, 0xc7, 0xd7, 0xfe, 0x17, 0xa3, 0x30, 0x65, 0x34,
    +	0xd6, 0x0d, 0xfb, 0x38, 0xd5, 0xae, 0x53, 0xa9, 0xc5, 0x89, 0x88, 0xd8, 0x7f, 0x76, 0xef, 0xad,
    +	0xa2, 0x4b, 0x36, 0x74, 0x07, 0xf0, 0xfa, 0xe8, 0x1b, 0x30, 0xda, 0x72, 0x42, 0xa6, 0x5c, 0x27,
    +	0x62, 0xdf, 0xf5, 0x43, 0x2c, 0xd6, 0x5d, 0x38, 0x61, 0xa4, 0x89, 0x8a, 0x9c, 0x76, 0x4c, 0x92,
    +	0x8a, 0x57, 0x54, 0x28, 0x97, 0x8e, 0xc8, 0xaa, 0x13, 0x54, 0x72, 0xdf, 0xc3, 0x1c, 0x26, 0x84,
    +	0x15, 0xba, 0x2a, 0x16, 0xe9, 0x15, 0x86, 0x2d, 0xb3, 0x41, 0x43, 0x58, 0x51, 0x30, 0x6c, 0x60,
    +	0xc6, 0xea, 0x83, 0xa1, 0x02, 0xf5, 0xc1, 0x4b, 0x30, 0xcc, 0x7e, 0xa8, 0x15, 0xa0, 0x66, 0x63,
    +	0x95, 0x17, 0x63, 0x09, 0x4f, 0x2e, 0x98, 0x91, 0xfe, 0x16, 0x0c, 0x7a, 0x01, 0x86, 0xc5, 0xa2,
    +	0x66, 0x1e, 0x42, 0x23, 0x9c, 0xcb, 0x89, 0x25, 0x8f, 0x25, 0x0c, 0xfd, 0xbc, 0x05, 0xc8, 0x69,
    +	0xb5, 0xfc, 0x06, 0xe3, 0x50, 0xea, 0x1e, 0x0e, 0xec, 0x7e, 0xf6, 0x76, 0xcf, 0x61, 0xef, 0x86,
    +	0x73, 0xf3, 0xa9, 0xda, 0x5c, 0xa9, 0xff, 0x96, 0xbc, 0x7e, 0xa6, 0x11, 0xf4, 0xe3, 0xfe, 0x96,
    +	0x1b, 0x46, 0xdf, 0xfe, 0x57, 0x89, 0xe3, 0x3f, 0xa3, 0x4b, 0xe8, 0x8e, 0xae, 0x27, 0x18, 0x3b,
    +	0xa2, 0x9e, 0x60, 0x22, 0x57, 0x47, 0xf0, 0xe7, 0x12, 0xb7, 0xde, 0x71, 0xf6, 0xe5, 0x2f, 0xf4,
    +	0xb8, 0xf5, 0x0a, 0xcb, 0x4f, 0x3f, 0x77, 0xdf, 0x9a, 0x70, 0x59, 0x98, 0x60, 0x5d, 0x2e, 0xd6,
    +	0xd7, 0xdc, 0x09, 0x49, 0xb0, 0x70, 0x56, 0x7a, 0x34, 0x1c, 0xea, 0xd2, 0x9d, 0xe6, 0xe2, 0xf0,
    +	0xe3, 0x16, 0x54, 0xd2, 0x03, 0xc4, 0xbb, 0x54, 0x99, 0x64, 0xfd, 0xb7, 0x8b, 0x46, 0x46, 0x74,
    +	0x5e, 0x7a, 0x66, 0x57, 0xe6, 0x73, 0x68, 0xe1, 0xdc, 0x56, 0xd0, 0x35, 0x80, 0x30, 0xf2, 0x3b,
    +	0x9c, 0xd7, 0xb3, 0x1b, 0xd0, 0x28, 0xf3, 0x0d, 0x82, 0xba, 0x2a, 0x3d, 0x8c, 0xcf, 0x02, 0x0d,
    +	0x77, 0xb6, 0x0b, 0x67, 0x72, 0x56, 0x4c, 0x86, 0x69, 0x66, 0x49, 0x37, 0xcd, 0xf4, 0x50, 0xe8,
    +	0xcf, 0xc9, 0x39, 0x9d, 0xfb, 0xa0, 0xeb, 0x78, 0x91, 0x1b, 0xed, 0xe9, 0xa6, 0x1c, 0x0f, 0xcc,
    +	0xa1, 0x44, 0x5f, 0x87, 0xc1, 0x96, 0xeb, 0x75, 0x1f, 0x8a, 0x33, 0xf6, 0x52, 0xf6, 0x9d, 0xd9,
    +	0xeb, 0x3e, 0x34, 0x27, 0xa7, 0x4a, 0xb7, 0x32, 0x2b, 0x3f, 0xdc, 0xaf, 0xa2, 0x34, 0x02, 0xe6,
    +	0x54, 0xed, 0xcf, 0xc2, 0xe4, 0x92, 0x43, 0xda, 0xbe, 0xb7, 0xec, 0x35, 0x3b, 0xbe, 0xeb, 0x45,
    +	0xa8, 0x02, 0x03, 0x4c, 0x7c, 0xe7, 0x47, 0xeb, 0x00, 0x1d, 0x7c, 0xcc, 0x4a, 0xec, 0x2d, 0x38,
    +	0xb5, 0xe4, 0x3f, 0xf0, 0x1e, 0x38, 0x41, 0x73, 0xbe, 0xb6, 0xaa, 0xa9, 0xb6, 0xd7, 0xa5, 0x6a,
    +	0xd5, 0xca, 0x57, 0x5c, 0x69, 0x35, 0xf9, 0x22, 0x5c, 0x71, 0x5b, 0x24, 0xc7, 0x00, 0xf1, 0xd7,
    +	0x4b, 0x46, 0x4b, 0x31, 0xbe, 0x32, 0x9f, 0x5b, 0xb9, 0x9e, 0x37, 0x1f, 0xc0, 0xc8, 0xa6, 0x4b,
    +	0x5a, 0x4d, 0x4c, 0x36, 0xc5, 0x6c, 0xbc, 0x98, 0xef, 0x9b, 0xbb, 0x42, 0x31, 0x95, 0x9d, 0x9f,
    +	0x29, 0x66, 0x57, 0x44, 0x65, 0xac, 0xc8, 0xa0, 0x1d, 0x98, 0x96, 0x73, 0x26, 0xa1, 0x82, 0xdf,
    +	0xbf, 0x54, 0xb4, 0x7c, 0x4d, 0xe2, 0xec, 0x9d, 0x02, 0x4e, 0x90, 0xc1, 0x29, 0xc2, 0xe8, 0x1c,
    +	0x0c, 0xb4, 0xa9, 0x64, 0x33, 0xc0, 0x86, 0x9f, 0x69, 0x62, 0x99, 0x52, 0x99, 0x95, 0xda, 0x7f,
    +	0xc3, 0x82, 0x33, 0xa9, 0x91, 0x11, 0xca, 0xf5, 0x27, 0x3c, 0x0b, 0x49, 0x65, 0x77, 0xa9, 0xb7,
    +	0xb2, 0xdb, 0xfe, 0x2f, 0x2c, 0x38, 0xb9, 0xdc, 0xee, 0x44, 0x7b, 0x4b, 0xae, 0xe9, 0x26, 0xf3,
    +	0x05, 0x18, 0x6a, 0x93, 0xa6, 0xdb, 0x6d, 0x8b, 0x99, 0xab, 0xca, 0xd3, 0x7f, 0x8d, 0x95, 0x52,
    +	0x0e, 0x52, 0x8f, 0xfc, 0xc0, 0xd9, 0x22, 0xbc, 0x00, 0x0b, 0x74, 0x26, 0x43, 0xb9, 0x8f, 0xc8,
    +	0x2d, 0xb7, 0xed, 0x46, 0x8f, 0xb7, 0xbb, 0x84, 0x87, 0x8b, 0x24, 0x82, 0x63, 0x7a, 0xf6, 0xf7,
    +	0x2d, 0x98, 0x92, 0xeb, 0x7e, 0xbe, 0xd9, 0x0c, 0x48, 0x18, 0xa2, 0x59, 0x28, 0xb9, 0x1d, 0xd1,
    +	0x4b, 0x10, 0xbd, 0x2c, 0xad, 0xd6, 0x70, 0xc9, 0xed, 0xc8, 0x0b, 0xb1, 0x17, 0x5f, 0xee, 0x8d,
    +	0x0b, 0xb1, 0xc7, 0xde, 0x4c, 0x48, 0x0c, 0x74, 0x19, 0x46, 0x3c, 0xbf, 0xc9, 0xef, 0x94, 0xc2,
    +	0xdd, 0x83, 0x62, 0xae, 0x8b, 0x32, 0xac, 0xa0, 0xa8, 0x06, 0xa3, 0xdc, 0x15, 0x3c, 0x5e, 0xb4,
    +	0x7d, 0x39, 0x94, 0xb3, 0x2f, 0xdb, 0x90, 0x35, 0x71, 0x4c, 0xc4, 0xfe, 0xa7, 0x16, 0x8c, 0xcb,
    +	0x2f, 0xeb, 0xf3, 0xb6, 0x4f, 0xb7, 0x56, 0x7c, 0xd3, 0x8f, 0xb7, 0x16, 0xbd, 0xad, 0x33, 0x88,
    +	0x71, 0x49, 0x2f, 0x1f, 0xe9, 0x92, 0x7e, 0x15, 0xc6, 0x9c, 0x4e, 0xa7, 0x66, 0xde, 0xf0, 0xd9,
    +	0x52, 0x9a, 0x8f, 0x8b, 0xb1, 0x8e, 0x63, 0xff, 0x6c, 0x09, 0x26, 0xe5, 0x17, 0xd4, 0xbb, 0xf7,
    +	0x43, 0x12, 0xa1, 0x0d, 0x18, 0x75, 0xf8, 0x2c, 0x11, 0xb9, 0xc8, 0x9f, 0xcb, 0x56, 0xe1, 0x1b,
    +	0x53, 0x1a, 0x0b, 0xd2, 0xf3, 0xb2, 0x36, 0x8e, 0x09, 0xa1, 0x16, 0xcc, 0x78, 0x7e, 0xc4, 0x84,
    +	0x2a, 0x05, 0x2f, 0xf2, 0xaa, 0x48, 0x52, 0x3f, 0x2b, 0xa8, 0xcf, 0xac, 0x27, 0xa9, 0xe0, 0x34,
    +	0x61, 0xb4, 0x2c, 0xcd, 0x22, 0xe5, 0x7c, 0xcd, 0xb2, 0x3e, 0x71, 0xd9, 0x56, 0x11, 0xfb, 0x9f,
    +	0x58, 0x30, 0x2a, 0xd1, 0x8e, 0xc3, 0x81, 0x66, 0x0d, 0x86, 0x43, 0x36, 0x09, 0x72, 0x68, 0xec,
    +	0xa2, 0x8e, 0xf3, 0xf9, 0x8a, 0x65, 0x45, 0xfe, 0x3f, 0xc4, 0x92, 0x06, 0xb3, 0x8a, 0xab, 0xee,
    +	0x7f, 0x42, 0xac, 0xe2, 0xaa, 0x3f, 0x39, 0x87, 0xd2, 0xbf, 0x61, 0x7d, 0xd6, 0xcc, 0x4c, 0xf4,
    +	0x4a, 0xd3, 0x09, 0xc8, 0xa6, 0xfb, 0x30, 0x79, 0xa5, 0xa9, 0xb1, 0x52, 0x2c, 0xa0, 0xe8, 0x43,
    +	0x18, 0x6f, 0x48, 0x73, 0x68, 0xbc, 0xc3, 0x2f, 0x15, 0x9a, 0xe6, 0x95, 0x17, 0x07, 0x57, 0xac,
    +	0x2f, 0x6a, 0xf5, 0xb1, 0x41, 0xcd, 0x74, 0x75, 0x2c, 0xf7, 0x72, 0x75, 0x8c, 0xe9, 0xe6, 0x3b,
    +	0xfe, 0xfd, 0x9c, 0x05, 0x43, 0xdc, 0x0c, 0xd6, 0x9f, 0x15, 0x52, 0x73, 0x6a, 0x89, 0xc7, 0x8e,
    +	0x29, 0x57, 0x84, 0x64, 0x83, 0xd6, 0x60, 0x94, 0xfd, 0x60, 0x66, 0xbc, 0x72, 0xfe, 0xc3, 0x48,
    +	0xde, 0xaa, 0xde, 0xc1, 0xbb, 0xb2, 0x1a, 0x8e, 0x29, 0xd8, 0x7f, 0x54, 0xa6, 0xdc, 0x2d, 0x46,
    +	0x35, 0x0e, 0x7d, 0xeb, 0xe9, 0x1d, 0xfa, 0xa5, 0xa7, 0x75, 0xe8, 0x6f, 0xc1, 0x54, 0x43, 0x73,
    +	0x81, 0x89, 0x67, 0xf2, 0x72, 0xe1, 0x22, 0xd1, 0xbc, 0x65, 0xb8, 0xca, 0x7e, 0xd1, 0x24, 0x82,
    +	0x93, 0x54, 0xd1, 0xd7, 0x60, 0x9c, 0xcf, 0xb3, 0x68, 0x85, 0x7b, 0x8b, 0xbe, 0x90, 0xbf, 0x5e,
    +	0xf4, 0x26, 0xb8, 0x89, 0x47, 0xab, 0x8e, 0x0d, 0x62, 0xa8, 0x0e, 0xb0, 0xe9, 0xb6, 0x88, 0x20,
    +	0x5d, 0xe0, 0xd8, 0xbd, 0xc2, 0xb1, 0x14, 0xe1, 0x49, 0xae, 0x87, 0x90, 0x55, 0xb1, 0x46, 0xc6,
    +	0xfe, 0x77, 0x16, 0xa0, 0xe5, 0xce, 0x36, 0x69, 0x93, 0xc0, 0x69, 0xc5, 0xe6, 0xf1, 0x9f, 0xb4,
    +	0xa0, 0x42, 0x52, 0xc5, 0x8b, 0x7e, 0xbb, 0x2d, 0x34, 0x0c, 0x39, 0x4a, 0xb0, 0xe5, 0x9c, 0x3a,
    +	0xf1, 0x2d, 0x23, 0x0f, 0x03, 0xe7, 0xb6, 0x87, 0xd6, 0xe0, 0x04, 0x3f, 0x7a, 0x0d, 0xbb, 0x82,
    +	0xd8, 0x11, 0xcf, 0x08, 0xc2, 0x27, 0x36, 0xd2, 0x28, 0x38, 0xab, 0x9e, 0xfd, 0x0f, 0x26, 0x21,
    +	0xb7, 0x17, 0x9f, 0xfa, 0x05, 0x7c, 0xea, 0x17, 0xf0, 0xa9, 0x5f, 0xc0, 0xa7, 0x7e, 0x01, 0x9f,
    +	0xfa, 0x05, 0x7c, 0xea, 0x17, 0xf0, 0xa9, 0x5f, 0x80, 0xe6, 0x17, 0xf0, 0x57, 0x2d, 0x38, 0xa5,
    +	0x0e, 0x4d, 0x43, 0xf7, 0xf0, 0xa3, 0x70, 0x82, 0x6f, 0xb7, 0xc5, 0x96, 0xe3, 0xb6, 0x37, 0x48,
    +	0xbb, 0xd3, 0x72, 0x22, 0xe9, 0x73, 0x78, 0x35, 0x73, 0xe5, 0x26, 0x1e, 0x36, 0x19, 0x15, 0xf9,
    +	0x0b, 0xd1, 0x0c, 0x00, 0xce, 0x6a, 0xc6, 0xfe, 0xf5, 0x11, 0x18, 0x5c, 0xde, 0x25, 0x5e, 0x74,
    +	0x0c, 0xb7, 0xb4, 0x06, 0x4c, 0xba, 0xde, 0xae, 0xdf, 0xda, 0x25, 0x4d, 0x0e, 0x3f, 0x8a, 0x32,
    +	0xe1, 0xb4, 0x20, 0x3d, 0xb9, 0x6a, 0x90, 0xc0, 0x09, 0x92, 0x4f, 0xc3, 0x50, 0x76, 0x1d, 0x86,
    +	0xf8, 0x91, 0x27, 0x84, 0xc6, 0x4c, 0x9e, 0xcd, 0x06, 0x51, 0x1c, 0xe4, 0xb1, 0x11, 0x8f, 0x1f,
    +	0xa9, 0xa2, 0x3a, 0xfa, 0x16, 0x4c, 0x6e, 0xba, 0x41, 0x18, 0x6d, 0xb8, 0x6d, 0x7a, 0x3e, 0xb4,
    +	0x3b, 0x8f, 0x61, 0x18, 0x53, 0xe3, 0xb0, 0x62, 0x50, 0xc2, 0x09, 0xca, 0x68, 0x0b, 0x26, 0x5a,
    +	0x8e, 0xde, 0xd4, 0xf0, 0x91, 0x9b, 0x52, 0xa7, 0xc3, 0x2d, 0x9d, 0x10, 0x36, 0xe9, 0xd2, 0xed,
    +	0xd4, 0x60, 0xb6, 0x9d, 0x11, 0xa6, 0x99, 0x51, 0xdb, 0x89, 0x1b, 0x75, 0x38, 0x8c, 0x8a, 0x85,
    +	0xec, 0x79, 0xd0, 0xa8, 0x29, 0x16, 0x6a, 0x8f, 0x80, 0xbe, 0x09, 0xa3, 0x84, 0x0e, 0x21, 0x25,
    +	0x2c, 0x0e, 0x98, 0x2b, 0xfd, 0xf5, 0x75, 0xcd, 0x6d, 0x04, 0xbe, 0x69, 0x92, 0x5c, 0x96, 0x94,
    +	0x70, 0x4c, 0x14, 0x2d, 0xc2, 0x50, 0x48, 0x02, 0x57, 0x99, 0x3d, 0x0a, 0xa6, 0x91, 0xa1, 0x71,
    +	0x2b, 0x3c, 0xff, 0x8d, 0x45, 0x55, 0xba, 0xbc, 0x84, 0x3b, 0xc3, 0xb8, 0xb9, 0xbc, 0x12, 0x0e,
    +	0x0b, 0xef, 0xc3, 0x70, 0x40, 0x5a, 0xcc, 0xe6, 0x3d, 0xd1, 0xff, 0x22, 0xe7, 0x26, 0x74, 0x5e,
    +	0x0f, 0x4b, 0x02, 0xe8, 0x26, 0x95, 0x57, 0xa8, 0x58, 0xe9, 0x7a, 0x5b, 0xea, 0xd1, 0x8c, 0x60,
    +	0xb4, 0x4a, 0x7c, 0xc7, 0x31, 0x86, 0x7c, 0x7d, 0x8e, 0x33, 0xaa, 0xa1, 0xeb, 0x30, 0xa3, 0x4a,
    +	0x57, 0xbd, 0x30, 0x72, 0x28, 0x83, 0xe3, 0x96, 0x07, 0xa5, 0x2a, 0xc2, 0x49, 0x04, 0x9c, 0xae,
    +	0x63, 0xff, 0xa2, 0x05, 0x7c, 0x9c, 0x8f, 0x41, 0x41, 0xf2, 0xae, 0xa9, 0x20, 0x39, 0x9b, 0x3b,
    +	0x73, 0x39, 0xca, 0x91, 0x5f, 0xb4, 0x60, 0x4c, 0x9b, 0xd9, 0x78, 0xcd, 0x5a, 0x05, 0x6b, 0xb6,
    +	0x0b, 0xd3, 0x74, 0xa5, 0xdf, 0xbe, 0x1f, 0x92, 0x60, 0x97, 0x34, 0xd9, 0xc2, 0x2c, 0x3d, 0xde,
    +	0xc2, 0x54, 0x0e, 0xfa, 0xb7, 0x12, 0x04, 0x71, 0xaa, 0x09, 0xfb, 0x9b, 0xb2, 0xab, 0xea, 0x3d,
    +	0x43, 0x43, 0xcd, 0x79, 0xe2, 0x3d, 0x83, 0x9a, 0x55, 0x1c, 0xe3, 0xd0, 0xad, 0xb6, 0xed, 0x87,
    +	0x51, 0xf2, 0x3d, 0xc3, 0x0d, 0x3f, 0x8c, 0x30, 0x83, 0xd8, 0xaf, 0x03, 0x2c, 0x3f, 0x24, 0x0d,
    +	0xbe, 0x62, 0xf5, 0xab, 0x96, 0x95, 0x7f, 0xd5, 0xb2, 0x7f, 0xcf, 0x82, 0xc9, 0x95, 0x45, 0xe3,
    +	0xe4, 0x9a, 0x03, 0xe0, 0xf7, 0xc3, 0x7b, 0xf7, 0xd6, 0xa5, 0x2f, 0x18, 0x77, 0xd6, 0x50, 0xa5,
    +	0x58, 0xc3, 0x40, 0x67, 0xa1, 0xdc, 0xea, 0x7a, 0x42, 0x83, 0x3b, 0x4c, 0x8f, 0xc7, 0x5b, 0x5d,
    +	0x0f, 0xd3, 0x32, 0xed, 0xe5, 0x69, 0xb9, 0xef, 0x97, 0xa7, 0x3d, 0x03, 0x60, 0xa1, 0x2a, 0x0c,
    +	0x3e, 0x78, 0xe0, 0x36, 0x79, 0x5c, 0x0f, 0xe1, 0xa7, 0x76, 0xef, 0xde, 0xea, 0x52, 0x88, 0x79,
    +	0xb9, 0xfd, 0xcb, 0x16, 0x4c, 0x25, 0x6e, 0xfb, 0xf4, 0xd6, 0xb8, 0xab, 0xa2, 0x2a, 0x25, 0x83,
    +	0xc7, 0x68, 0xf1, 0x96, 0x34, 0xac, 0x3e, 0x5e, 0x5c, 0x8b, 0x17, 0x3b, 0xe5, 0x3e, 0x5e, 0xec,
    +	0x14, 0xbb, 0xe1, 0x7f, 0xaf, 0x0c, 0xb3, 0x2b, 0x2d, 0xf2, 0xf0, 0x63, 0x86, 0x63, 0xe9, 0xf7,
    +	0xa9, 0xef, 0xd1, 0xd4, 0x77, 0x47, 0x7d, 0xce, 0xdd, 0x7b, 0x0a, 0x37, 0x61, 0x98, 0x7f, 0xba,
    +	0x0c, 0xce, 0x92, 0x69, 0x4c, 0xcf, 0x1f, 0x90, 0x39, 0x3e, 0x84, 0xc2, 0x98, 0xae, 0xce, 0x78,
    +	0x51, 0x8a, 0x25, 0xf1, 0xd9, 0xb7, 0x60, 0x5c, 0xc7, 0x3c, 0x52, 0x60, 0x85, 0xbf, 0x50, 0x86,
    +	0x69, 0xda, 0x83, 0xa7, 0x3a, 0x11, 0x77, 0xd2, 0x13, 0xf1, 0xa4, 0x1f, 0xd7, 0xf7, 0x9e, 0x8d,
    +	0x0f, 0x93, 0xb3, 0x71, 0x35, 0x6f, 0x36, 0x8e, 0x7b, 0x0e, 0xfe, 0xa2, 0x05, 0x27, 0x56, 0x5a,
    +	0x7e, 0x63, 0x27, 0xf1, 0x00, 0xfe, 0x4d, 0x18, 0xa3, 0x27, 0x48, 0x68, 0xc4, 0x82, 0x32, 0xa2,
    +	0x83, 0x09, 0x10, 0xd6, 0xf1, 0xb4, 0x6a, 0x77, 0xee, 0xac, 0x2e, 0x65, 0x05, 0x15, 0x13, 0x20,
    +	0xac, 0xe3, 0xd9, 0xff, 0xdc, 0x82, 0xf3, 0xd7, 0x17, 0x97, 0xe3, 0xa5, 0x98, 0x8a, 0x6b, 0x76,
    +	0x09, 0x86, 0x3a, 0x4d, 0xad, 0x2b, 0xb1, 0x52, 0x7e, 0x89, 0xf5, 0x42, 0x40, 0x3f, 0x29, 0x21,
    +	0x04, 0xef, 0x00, 0x5c, 0xc7, 0xb5, 0x45, 0x71, 0x54, 0x48, 0x1b, 0x9c, 0x95, 0x6b, 0x83, 0x7b,
    +	0x01, 0x86, 0xe9, 0x51, 0xe6, 0x36, 0x64, 0xbf, 0xb9, 0xbb, 0x0c, 0x2f, 0xc2, 0x12, 0x66, 0xff,
    +	0x82, 0x05, 0x27, 0xae, 0xbb, 0x11, 0x95, 0x33, 0x92, 0x81, 0xbb, 0xa8, 0xa0, 0x11, 0xba, 0x91,
    +	0x1f, 0xec, 0x25, 0x79, 0x2f, 0x56, 0x10, 0xac, 0x61, 0xf1, 0x0f, 0xda, 0x75, 0xd9, 0x93, 0xba,
    +	0x92, 0x69, 0xf5, 0xc4, 0xa2, 0x1c, 0x2b, 0x0c, 0x3a, 0x5e, 0x4d, 0x37, 0x60, 0x9c, 0x5e, 0x72,
    +	0x63, 0x35, 0x5e, 0x4b, 0x12, 0x80, 0x63, 0x1c, 0xfb, 0x8f, 0x2d, 0xa8, 0x5e, 0xe7, 0x81, 0x01,
    +	0x36, 0xc3, 0x1c, 0xa6, 0xfb, 0x3a, 0x8c, 0x12, 0x69, 0x9e, 0x49, 0xfa, 0x72, 0x2b, 0xbb, 0x0d,
    +	0x8f, 0x1f, 0xa6, 0xf0, 0xfa, 0x38, 0x33, 0x8e, 0x16, 0x66, 0x61, 0x05, 0x10, 0xd1, 0xdb, 0xd2,
    +	0x03, 0xaa, 0xb1, 0xc8, 0x4c, 0xcb, 0x29, 0x28, 0xce, 0xa8, 0x61, 0xff, 0x0d, 0x0b, 0x4e, 0xa9,
    +	0x0f, 0xfe, 0xc4, 0x7d, 0xa6, 0xfd, 0xab, 0x25, 0x98, 0xb8, 0xb1, 0xb1, 0x51, 0xbb, 0x4e, 0x22,
    +	0x6d, 0x55, 0x16, 0x3b, 0x5d, 0x60, 0xcd, 0x76, 0x5c, 0x74, 0xad, 0xed, 0x46, 0x6e, 0x6b, 0x8e,
    +	0x87, 0x09, 0x9d, 0x5b, 0xf5, 0xa2, 0xdb, 0x41, 0x3d, 0x0a, 0x5c, 0x6f, 0x2b, 0x73, 0xa5, 0x4b,
    +	0x31, 0xab, 0x9c, 0x27, 0x66, 0xa1, 0xd7, 0x61, 0x88, 0xc5, 0x29, 0x95, 0x93, 0xf0, 0x8c, 0xba,
    +	0x15, 0xb2, 0xd2, 0xc3, 0xfd, 0xea, 0xe8, 0x1d, 0xbc, 0xca, 0xff, 0x60, 0x81, 0x8a, 0xee, 0xc0,
    +	0xd8, 0x76, 0x14, 0x75, 0x6e, 0x10, 0xa7, 0x49, 0x02, 0xc9, 0x65, 0x2f, 0x64, 0x71, 0x59, 0x3a,
    +	0x08, 0x1c, 0x2d, 0x66, 0x4c, 0x71, 0x59, 0x88, 0x75, 0x3a, 0x76, 0x1d, 0x20, 0x86, 0x3d, 0x21,
    +	0xb3, 0x99, 0xbd, 0x01, 0xa3, 0xf4, 0x73, 0xe7, 0x5b, 0xae, 0x53, 0xec, 0x98, 0xf0, 0x32, 0x8c,
    +	0x4a, 0xb7, 0x83, 0x50, 0x44, 0x11, 0x62, 0x27, 0x92, 0xf4, 0x4a, 0x08, 0x71, 0x0c, 0xb7, 0x9f,
    +	0x07, 0xe1, 0x1b, 0x5f, 0x44, 0xd2, 0xde, 0x84, 0x93, 0xcc, 0xc9, 0xdf, 0x89, 0xb6, 0x8d, 0x35,
    +	0xda, 0x7b, 0x31, 0xbc, 0x22, 0xae, 0xa2, 0x25, 0xe5, 0x6d, 0x25, 0xa3, 0x54, 0x8c, 0x4b, 0x8a,
    +	0xf1, 0xb5, 0xd4, 0xfe, 0xa3, 0x01, 0x78, 0x66, 0xb5, 0x9e, 0x1f, 0xfe, 0xee, 0x1a, 0x8c, 0x73,
    +	0x09, 0x97, 0x2e, 0x0d, 0xa7, 0x25, 0xda, 0x55, 0x4a, 0xdb, 0x0d, 0x0d, 0x86, 0x0d, 0x4c, 0x2a,
    +	0x11, 0xba, 0x1f, 0x79, 0xc9, 0x37, 0xdc, 0xab, 0x1f, 0xac, 0x63, 0x5a, 0x4e, 0xc1, 0x54, 0x58,
    +	0xe6, 0x2c, 0x5d, 0x81, 0x95, 0xc0, 0xfc, 0x2e, 0x4c, 0xba, 0x61, 0x23, 0x74, 0x57, 0x3d, 0xba,
    +	0x4f, 0xb5, 0x9d, 0xae, 0xd4, 0x24, 0xb4, 0xd3, 0x0a, 0x8a, 0x13, 0xd8, 0xda, 0xf9, 0x32, 0xd8,
    +	0xb7, 0xc0, 0xdd, 0x33, 0xf8, 0x0e, 0x65, 0xff, 0x1d, 0xf6, 0x75, 0x21, 0xb3, 0x55, 0x08, 0xf6,
    +	0xcf, 0x3f, 0x38, 0xc4, 0x12, 0x46, 0xef, 0xa0, 0x8d, 0x6d, 0xa7, 0x33, 0xdf, 0x8d, 0xb6, 0x97,
    +	0xdc, 0xb0, 0xe1, 0xef, 0x92, 0x60, 0x8f, 0xa9, 0x0f, 0x46, 0xe2, 0x3b, 0xa8, 0x02, 0x2c, 0xde,
    +	0x98, 0xaf, 0x51, 0x4c, 0x9c, 0xae, 0x83, 0xe6, 0x61, 0x4a, 0x16, 0xd6, 0x49, 0xc8, 0x8e, 0x80,
    +	0x31, 0x46, 0x46, 0xbd, 0xaa, 0x16, 0xc5, 0x8a, 0x48, 0x12, 0xdf, 0x14, 0x70, 0xe1, 0x49, 0x08,
    +	0xb8, 0x5f, 0x80, 0x09, 0xd7, 0x73, 0x23, 0xd7, 0x89, 0x7c, 0x6e, 0x68, 0xe3, 0x9a, 0x02, 0xa6,
    +	0x13, 0x5f, 0xd5, 0x01, 0xd8, 0xc4, 0xb3, 0xff, 0x8f, 0x01, 0x98, 0x61, 0xd3, 0xf6, 0xe9, 0x0a,
    +	0xfb, 0x61, 0x5a, 0x61, 0x77, 0xd2, 0x2b, 0xec, 0x49, 0x48, 0xee, 0x8f, 0xbd, 0xcc, 0xbe, 0x63,
    +	0xc1, 0x0c, 0x53, 0xcb, 0x1b, 0xcb, 0xec, 0x0a, 0x8c, 0x06, 0xc6, 0x83, 0xf7, 0x51, 0xdd, 0xfa,
    +	0x27, 0xdf, 0xae, 0xc7, 0x38, 0xe8, 0x3d, 0x80, 0x4e, 0xac, 0xf6, 0x2f, 0x19, 0x51, 0x8a, 0x21,
    +	0x57, 0xe3, 0xaf, 0xd5, 0xb1, 0xbf, 0x05, 0xa3, 0xea, 0x45, 0xbb, 0xbc, 0x20, 0x5b, 0x39, 0x17,
    +	0xe4, 0xde, 0x62, 0x84, 0xf4, 0x4c, 0x2c, 0x67, 0x7a, 0x26, 0xfe, 0x6b, 0x0b, 0x62, 0xa3, 0x0c,
    +	0xfa, 0x00, 0x46, 0x3b, 0x3e, 0x73, 0x64, 0x0f, 0xe4, 0xeb, 0x90, 0xe7, 0x0b, 0xad, 0x3a, 0x3c,
    +	0x14, 0x69, 0xc0, 0xa7, 0xa3, 0x26, 0xab, 0xe2, 0x98, 0x0a, 0xba, 0x09, 0xc3, 0x9d, 0x80, 0xd4,
    +	0x23, 0x16, 0x27, 0xaf, 0x7f, 0x82, 0x7c, 0xf9, 0xf2, 0x8a, 0x58, 0x52, 0x48, 0xf8, 0x05, 0x97,
    +	0xfb, 0xf7, 0x0b, 0xb6, 0xff, 0x7e, 0x09, 0xa6, 0x93, 0x8d, 0xa0, 0x77, 0x60, 0x80, 0x3c, 0x24,
    +	0x0d, 0xf1, 0xa5, 0x99, 0xd2, 0x44, 0xac, 0x10, 0xe2, 0x43, 0x47, 0xff, 0x63, 0x56, 0x0b, 0xdd,
    +	0x80, 0x61, 0x2a, 0x4a, 0x5c, 0x57, 0xd1, 0x64, 0x9f, 0xcd, 0x13, 0x47, 0x94, 0x4c, 0xc6, 0x3f,
    +	0x4b, 0x14, 0x61, 0x59, 0x9d, 0x39, 0x12, 0x36, 0x3a, 0x75, 0x7a, 0x4b, 0x8b, 0x8a, 0x94, 0x09,
    +	0x1b, 0x8b, 0x35, 0x8e, 0x24, 0xa8, 0x71, 0x47, 0x42, 0x59, 0x88, 0x63, 0x22, 0xe8, 0x3d, 0x18,
    +	0x0c, 0x5b, 0x84, 0x74, 0x84, 0xa7, 0x48, 0xa6, 0x4a, 0xb7, 0x4e, 0x11, 0x04, 0x25, 0xa6, 0x02,
    +	0x62, 0x05, 0x98, 0x57, 0xb4, 0x7f, 0xcd, 0x02, 0xe0, 0x9e, 0x97, 0x8e, 0xb7, 0x45, 0x8e, 0xc1,
    +	0x0a, 0xb2, 0x04, 0x03, 0x61, 0x87, 0x34, 0x8a, 0xde, 0x77, 0xc4, 0xfd, 0xa9, 0x77, 0x48, 0x23,
    +	0x5e, 0xed, 0xf4, 0x1f, 0x66, 0xb5, 0xed, 0x9f, 0x00, 0x98, 0x8c, 0xd1, 0x56, 0x23, 0xd2, 0x46,
    +	0xaf, 0x1a, 0x21, 0xb8, 0xce, 0x26, 0x42, 0x70, 0x8d, 0x32, 0x6c, 0x4d, 0xe1, 0xfe, 0x2d, 0x28,
    +	0xb7, 0x9d, 0x87, 0x42, 0xa3, 0xfa, 0x72, 0x71, 0x37, 0x28, 0xfd, 0xb9, 0x35, 0xe7, 0x21, 0xbf,
    +	0xc1, 0xbf, 0x2c, 0x77, 0xe7, 0x9a, 0xf3, 0xb0, 0xe7, 0x1b, 0x04, 0xda, 0x08, 0x6b, 0xcb, 0xf5,
    +	0x84, 0x53, 0x61, 0x5f, 0x6d, 0xb9, 0x5e, 0xb2, 0x2d, 0xd7, 0xeb, 0xa3, 0x2d, 0xd7, 0x43, 0x8f,
    +	0x60, 0x58, 0xf8, 0xfc, 0x8a, 0xd8, 0xa0, 0x57, 0xfa, 0x68, 0x4f, 0xb8, 0x0c, 0xf3, 0x36, 0xaf,
    +	0x48, 0x0d, 0x85, 0x28, 0xed, 0xd9, 0xae, 0x6c, 0x10, 0xfd, 0x35, 0x0b, 0x26, 0xc5, 0x6f, 0xf1,
    +	0x9c, 0x56, 0x48, 0xf0, 0x9f, 0xef, 0xbf, 0x0f, 0xa2, 0x22, 0xef, 0xca, 0xe7, 0xe5, 0x61, 0x6b,
    +	0x02, 0x7b, 0xf6, 0x28, 0xd1, 0x0b, 0xf4, 0xf7, 0x2d, 0x38, 0xd9, 0x76, 0x1e, 0xf2, 0x16, 0x79,
    +	0x19, 0x76, 0x22, 0xd7, 0x17, 0x6e, 0x2e, 0xef, 0xf4, 0x37, 0xfd, 0xa9, 0xea, 0xbc, 0x93, 0xd2,
    +	0xba, 0x7c, 0x32, 0x0b, 0xa5, 0x67, 0x57, 0x33, 0xfb, 0x35, 0xbb, 0x09, 0x23, 0x72, 0xbd, 0x3d,
    +	0xcd, 0x07, 0x0d, 0xac, 0x1d, 0xb1, 0xd6, 0x9e, 0x6a, 0x3b, 0xdf, 0x82, 0x71, 0x7d, 0x8d, 0x3d,
    +	0xd5, 0xb6, 0x3e, 0x82, 0x13, 0x19, 0x6b, 0xe9, 0xa9, 0x36, 0xf9, 0x00, 0xce, 0xe6, 0xae, 0x8f,
    +	0xa7, 0xfa, 0x20, 0xe5, 0x57, 0x2d, 0x9d, 0x0f, 0x1e, 0x83, 0x29, 0x6a, 0xd1, 0x34, 0x45, 0x5d,
    +	0x28, 0xde, 0x39, 0x39, 0xf6, 0xa8, 0x0f, 0xf5, 0x4e, 0x53, 0xae, 0x8e, 0xde, 0x87, 0xa1, 0x16,
    +	0x2d, 0x91, 0x9e, 0xe3, 0x76, 0xef, 0x1d, 0x19, 0x4b, 0xd4, 0xac, 0x3c, 0xc4, 0x82, 0x82, 0xfd,
    +	0x33, 0x16, 0x64, 0x3c, 0xa9, 0xa1, 0x12, 0x56, 0xd7, 0x6d, 0xb2, 0x21, 0x29, 0xc7, 0x12, 0x96,
    +	0x8a, 0x50, 0x75, 0x1e, 0xca, 0x5b, 0x6e, 0x53, 0xbc, 0xd6, 0x57, 0xe0, 0xeb, 0x14, 0xbc, 0xe5,
    +	0x36, 0xd1, 0x0a, 0xa0, 0xb0, 0xdb, 0xe9, 0xb4, 0x98, 0x67, 0x98, 0xd3, 0xba, 0x1e, 0xf8, 0xdd,
    +	0x0e, 0x77, 0x13, 0x2f, 0x73, 0xf5, 0x52, 0x3d, 0x05, 0xc5, 0x19, 0x35, 0xec, 0x7f, 0x64, 0xc1,
    +	0xc0, 0x31, 0x4c, 0x13, 0x36, 0xa7, 0xe9, 0xd5, 0x5c, 0xd2, 0x22, 0xa5, 0xcc, 0x1c, 0x76, 0x1e,
    +	0xb0, 0x70, 0x0d, 0x21, 0x13, 0x38, 0x32, 0x67, 0x6d, 0xdf, 0x82, 0x13, 0xb7, 0x7c, 0xa7, 0xb9,
    +	0xe0, 0xb4, 0x1c, 0xaf, 0x41, 0x82, 0x55, 0x6f, 0xeb, 0x48, 0x6f, 0x32, 0x4a, 0x3d, 0xdf, 0x64,
    +	0x5c, 0x83, 0x21, 0xb7, 0xa3, 0xe5, 0xa4, 0xb8, 0x48, 0x67, 0x77, 0xb5, 0x26, 0xd2, 0x51, 0x20,
    +	0xa3, 0x71, 0x56, 0x8a, 0x05, 0x3e, 0x5d, 0x96, 0xdc, 0x6f, 0x71, 0x20, 0x7f, 0x59, 0xd2, 0x5b,
    +	0x52, 0x32, 0xd6, 0xa2, 0xe1, 0xb6, 0xbf, 0x0d, 0x46, 0x13, 0xe2, 0x91, 0x1a, 0x86, 0x61, 0x97,
    +	0x7f, 0xa9, 0x58, 0x9b, 0x2f, 0x66, 0xdf, 0x5e, 0x52, 0x03, 0xa3, 0xbd, 0xc6, 0xe4, 0x05, 0x58,
    +	0x12, 0xb2, 0xaf, 0x41, 0x66, 0x6c, 0xac, 0xde, 0x9a, 0x29, 0xfb, 0x2b, 0x30, 0xc3, 0x6a, 0x1e,
    +	0x51, 0xeb, 0x63, 0x27, 0xf4, 0xe9, 0x19, 0xe1, 0xc5, 0xed, 0xff, 0xc5, 0x02, 0xb4, 0xe6, 0x37,
    +	0xdd, 0xcd, 0x3d, 0x41, 0x9c, 0x7f, 0xff, 0x47, 0x50, 0xe5, 0xd7, 0xea, 0x64, 0x08, 0xee, 0xc5,
    +	0x96, 0x13, 0x86, 0x9a, 0x2e, 0xff, 0x45, 0xd1, 0x6e, 0x75, 0xa3, 0x18, 0x1d, 0xf7, 0xa2, 0x87,
    +	0x3e, 0x48, 0x44, 0x44, 0xfd, 0x62, 0x2a, 0x22, 0xea, 0x8b, 0x99, 0x4e, 0x40, 0xe9, 0xde, 0xcb,
    +	0x48, 0xa9, 0xf6, 0x77, 0x2d, 0x98, 0x5a, 0x4f, 0x84, 0x94, 0xbe, 0xc4, 0x3c, 0x22, 0x32, 0x6c,
    +	0x54, 0x75, 0x56, 0x8a, 0x05, 0xf4, 0x89, 0xeb, 0x70, 0xff, 0xd4, 0x82, 0x38, 0x16, 0xdf, 0x31,
    +	0x88, 0xdc, 0x8b, 0x86, 0xc8, 0x9d, 0x79, 0x7d, 0x51, 0xdd, 0xc9, 0x93, 0xb8, 0xd1, 0x4d, 0x35,
    +	0x27, 0x05, 0x37, 0x97, 0x98, 0x0c, 0xdf, 0x67, 0x93, 0xe6, 0xc4, 0xa9, 0xd9, 0xf8, 0xfd, 0x12,
    +	0x20, 0x85, 0xdb, 0x77, 0x14, 0xdd, 0x74, 0x8d, 0x27, 0x13, 0x45, 0x77, 0x17, 0x10, 0xf3, 0xe9,
    +	0x09, 0x1c, 0x2f, 0xe4, 0x64, 0x5d, 0xa1, 0xb5, 0x3e, 0x9a, 0xc3, 0x90, 0x72, 0x89, 0xbd, 0x95,
    +	0xa2, 0x86, 0x33, 0x5a, 0xd0, 0x7c, 0xb5, 0x06, 0xfb, 0xf5, 0xd5, 0x1a, 0xea, 0xf1, 0xe8, 0xfe,
    +	0x57, 0x2c, 0x98, 0x50, 0xc3, 0xf4, 0x09, 0x79, 0xba, 0xa3, 0xfa, 0x93, 0x73, 0xae, 0xd4, 0xb4,
    +	0x2e, 0x33, 0x61, 0xe0, 0x47, 0x58, 0xf0, 0x04, 0xa7, 0xe5, 0x3e, 0x22, 0x2a, 0xd8, 0x7b, 0x55,
    +	0x04, 0x43, 0x10, 0xa5, 0x87, 0xfb, 0xd5, 0x09, 0xf5, 0x8f, 0xfb, 0x23, 0xc4, 0x55, 0xec, 0xbf,
    +	0x4d, 0x37, 0xbb, 0xb9, 0x14, 0xd1, 0x9b, 0x30, 0xd8, 0xd9, 0x76, 0x42, 0x92, 0x78, 0xe2, 0x38,
    +	0x58, 0xa3, 0x85, 0x87, 0xfb, 0xd5, 0x49, 0x55, 0x81, 0x95, 0x60, 0x8e, 0xdd, 0x7f, 0x6c, 0xe2,
    +	0xf4, 0xe2, 0xec, 0x19, 0x9b, 0xf8, 0xdf, 0x59, 0x30, 0xb0, 0x4e, 0x4f, 0xaf, 0xa7, 0xcf, 0x02,
    +	0xde, 0x35, 0x58, 0xc0, 0xb9, 0xbc, 0xb4, 0x67, 0xb9, 0xbb, 0x7f, 0x25, 0xb1, 0xfb, 0x2f, 0xe4,
    +	0x52, 0x28, 0xde, 0xf8, 0x6d, 0x18, 0x63, 0xc9, 0xd4, 0xc4, 0x73, 0xce, 0xd7, 0x8d, 0x0d, 0x5f,
    +	0x4d, 0x6c, 0xf8, 0x29, 0x0d, 0x55, 0xdb, 0xe9, 0x2f, 0xc1, 0xb0, 0x78, 0x1f, 0x98, 0x8c, 0x41,
    +	0x21, 0x70, 0xb1, 0x84, 0xdb, 0x3f, 0x57, 0x06, 0x23, 0x79, 0x1b, 0xfa, 0x27, 0x16, 0xcc, 0x05,
    +	0xdc, 0xc5, 0xbf, 0xb9, 0xd4, 0x0d, 0x5c, 0x6f, 0xab, 0xde, 0xd8, 0x26, 0xcd, 0x6e, 0xcb, 0xf5,
    +	0xb6, 0x56, 0xb7, 0x3c, 0x5f, 0x15, 0x2f, 0x3f, 0x24, 0x8d, 0xae, 0x8a, 0xdb, 0x53, 0x90, 0x29,
    +	0x4e, 0x3d, 0x93, 0x79, 0xed, 0x60, 0xbf, 0x3a, 0x87, 0x8f, 0x44, 0x1b, 0x1f, 0xb1, 0x2f, 0xe8,
    +	0x9f, 0x5b, 0x70, 0x85, 0x27, 0x11, 0xeb, 0xbf, 0xff, 0x05, 0x1a, 0x8e, 0x9a, 0x24, 0x15, 0x13,
    +	0xd9, 0x20, 0x41, 0x7b, 0xe1, 0x0b, 0x62, 0x40, 0xaf, 0xd4, 0x8e, 0xd6, 0x16, 0x3e, 0x6a, 0xe7,
    +	0xec, 0xff, 0xa6, 0x0c, 0x13, 0x22, 0x86, 0xad, 0x38, 0x03, 0xde, 0x34, 0x96, 0xc4, 0xb3, 0x89,
    +	0x25, 0x31, 0x63, 0x20, 0x3f, 0x19, 0xf6, 0x1f, 0xc2, 0x0c, 0x65, 0xce, 0x37, 0x88, 0x13, 0x44,
    +	0xf7, 0x89, 0xc3, 0x5d, 0x30, 0xcb, 0x47, 0xe6, 0xfe, 0x4a, 0xb1, 0x7e, 0x2b, 0x49, 0x0c, 0xa7,
    +	0xe9, 0xff, 0x30, 0x9d, 0x39, 0x1e, 0x4c, 0xa7, 0xc2, 0x10, 0x7f, 0x15, 0x46, 0xd5, 0xe3, 0x36,
    +	0xc1, 0x74, 0x8a, 0xa3, 0x79, 0x27, 0x29, 0x70, 0xa5, 0x67, 0xfc, 0xb0, 0x32, 0x26, 0x67, 0xff,
    +	0x72, 0xc9, 0x68, 0x90, 0x4f, 0xe2, 0x3a, 0x8c, 0x38, 0x21, 0xcb, 0x30, 0xd0, 0x2c, 0xd2, 0x68,
    +	0xa7, 0x9a, 0x61, 0x7e, 0x66, 0xf3, 0xa2, 0x26, 0x56, 0x34, 0xd0, 0x0d, 0xee, 0xe8, 0xba, 0x4b,
    +	0x8a, 0xd4, 0xd9, 0x29, 0x6a, 0x20, 0x5d, 0x61, 0x77, 0x09, 0x16, 0xf5, 0xd1, 0xd7, 0xb9, 0x27,
    +	0xf2, 0x4d, 0xcf, 0x7f, 0xe0, 0x5d, 0xf7, 0x7d, 0x19, 0x04, 0xaa, 0x3f, 0x82, 0x33, 0xd2, 0xff,
    +	0x58, 0x55, 0xc7, 0x26, 0xb5, 0xfe, 0xe2, 0xfa, 0xff, 0x28, 0xb0, 0xa4, 0x49, 0x66, 0x2c, 0x89,
    +	0x10, 0x11, 0x98, 0x12, 0x01, 0x92, 0x65, 0x99, 0x18, 0xbb, 0xcc, 0xeb, 0xb7, 0x59, 0x3b, 0xb6,
    +	0x00, 0xdd, 0x34, 0x49, 0xe0, 0x24, 0x4d, 0x7b, 0x9b, 0x33, 0xe1, 0x15, 0xe2, 0x44, 0xdd, 0x80,
    +	0x84, 0xe8, 0xcb, 0x50, 0x49, 0xdf, 0x8c, 0x85, 0x21, 0xc5, 0x62, 0xd2, 0xf3, 0xb9, 0x83, 0xfd,
    +	0x6a, 0xa5, 0x9e, 0x83, 0x83, 0x73, 0x6b, 0xdb, 0x3f, 0x6f, 0x01, 0x7b, 0xc1, 0x7f, 0x0c, 0x92,
    +	0xcf, 0x97, 0x4c, 0xc9, 0xa7, 0x92, 0x37, 0x9d, 0x39, 0x42, 0xcf, 0x1b, 0x7c, 0x0d, 0xd7, 0x02,
    +	0xff, 0xe1, 0x9e, 0xf0, 0xfa, 0xea, 0x7d, 0x8d, 0xb3, 0xbf, 0x67, 0x01, 0xcb, 0x30, 0x86, 0xf9,
    +	0xad, 0x5d, 0x1a, 0x38, 0x7a, 0x3b, 0x34, 0x7c, 0x19, 0x46, 0x36, 0xc5, 0xf0, 0x67, 0x28, 0x9d,
    +	0x8c, 0x0e, 0x9b, 0xb4, 0xe5, 0xa4, 0x89, 0x97, 0xb8, 0xe2, 0x1f, 0x56, 0xd4, 0xec, 0xff, 0xd2,
    +	0x82, 0xd9, 0xfc, 0x6a, 0xe8, 0x0e, 0x9c, 0x09, 0x48, 0xa3, 0x1b, 0x84, 0x74, 0x4b, 0x88, 0x0b,
    +	0x90, 0x78, 0x01, 0xc6, 0xa7, 0xfa, 0x99, 0x83, 0xfd, 0xea, 0x19, 0x9c, 0x8d, 0x82, 0xf3, 0xea,
    +	0xa2, 0xb7, 0x60, 0xb2, 0x1b, 0x72, 0xc9, 0x8f, 0x09, 0x5d, 0xa1, 0x08, 0x63, 0xcf, 0x1e, 0x49,
    +	0xdd, 0x31, 0x20, 0x38, 0x81, 0x69, 0xff, 0x79, 0xbe, 0x1c, 0x95, 0xc7, 0x6b, 0x1b, 0x66, 0x3c,
    +	0xed, 0x3f, 0x3d, 0x01, 0xe5, 0x55, 0xff, 0xf9, 0x5e, 0xa7, 0x3e, 0x3b, 0x2e, 0xb5, 0x18, 0x03,
    +	0x09, 0x32, 0x38, 0x4d, 0xd9, 0xfe, 0x9b, 0x16, 0x9c, 0xd1, 0x11, 0xb5, 0x17, 0x87, 0xbd, 0xac,
    +	0x80, 0x4b, 0x5a, 0x00, 0x3e, 0x7e, 0xcc, 0x5d, 0xce, 0x08, 0xc0, 0x77, 0x52, 0xa7, 0x5e, 0x18,
    +	0x6d, 0x8f, 0xbf, 0x2d, 0xcd, 0x8a, 0xb6, 0xf7, 0x47, 0x16, 0x5f, 0x9f, 0x7a, 0xd7, 0xd1, 0x47,
    +	0x30, 0xdd, 0x76, 0xa2, 0xc6, 0xf6, 0xf2, 0xc3, 0x4e, 0xc0, 0x8d, 0xbb, 0x72, 0x9c, 0x5e, 0xee,
    +	0x35, 0x4e, 0xda, 0x47, 0xc6, 0xde, 0xe0, 0x6b, 0x09, 0x62, 0x38, 0x45, 0x1e, 0xdd, 0x87, 0x31,
    +	0x56, 0xc6, 0xde, 0x62, 0x87, 0x45, 0xb2, 0x4c, 0x5e, 0x6b, 0xca, 0x39, 0x68, 0x2d, 0xa6, 0x83,
    +	0x75, 0xa2, 0xf6, 0x2f, 0x95, 0x39, 0xd3, 0x60, 0x77, 0x8f, 0x97, 0x60, 0xb8, 0xe3, 0x37, 0x17,
    +	0x57, 0x97, 0xb0, 0x98, 0x05, 0x75, 0xee, 0xd5, 0x78, 0x31, 0x96, 0x70, 0x74, 0x19, 0x46, 0xc4,
    +	0x4f, 0x69, 0x8c, 0x67, 0x7b, 0x44, 0xe0, 0x85, 0x58, 0x41, 0xd1, 0x6b, 0x00, 0x9d, 0xc0, 0xdf,
    +	0x75, 0x9b, 0x2c, 0xf6, 0x56, 0xd9, 0xf4, 0xeb, 0xab, 0x29, 0x08, 0xd6, 0xb0, 0xd0, 0xdb, 0x30,
    +	0xd1, 0xf5, 0x42, 0x2e, 0x3f, 0x69, 0xc9, 0x38, 0x94, 0xc7, 0xd9, 0x1d, 0x1d, 0x88, 0x4d, 0x5c,
    +	0x34, 0x0f, 0x43, 0x91, 0xc3, 0xfc, 0xd4, 0x06, 0xf3, 0x5f, 0x0c, 0x6c, 0x50, 0x0c, 0x3d, 0xed,
    +	0x25, 0xad, 0x80, 0x45, 0x45, 0xf4, 0x55, 0x19, 0x16, 0x81, 0x9f, 0x44, 0xe2, 0xa9, 0x4e, 0x7f,
    +	0xa7, 0x96, 0x16, 0x14, 0x41, 0x3c, 0x01, 0x32, 0x68, 0xa1, 0xb7, 0x00, 0xc8, 0xc3, 0x88, 0x04,
    +	0x9e, 0xd3, 0x52, 0xde, 0xa5, 0x4a, 0x90, 0x59, 0xf2, 0xd7, 0xfd, 0xe8, 0x4e, 0x48, 0x96, 0x15,
    +	0x06, 0xd6, 0xb0, 0xed, 0x9f, 0x18, 0x03, 0x88, 0x2f, 0x1a, 0xe8, 0x11, 0x8c, 0x34, 0x9c, 0x8e,
    +	0xd3, 0xe0, 0x39, 0x9d, 0xcb, 0x79, 0x0f, 0xcb, 0xe3, 0x1a, 0x73, 0x8b, 0x02, 0x9d, 0x1b, 0x6f,
    +	0x64, 0x3e, 0x83, 0x11, 0x59, 0xdc, 0xd3, 0x60, 0xa3, 0xda, 0x43, 0xdf, 0xb1, 0x60, 0x4c, 0xc4,
    +	0xb6, 0x62, 0x33, 0x54, 0xca, 0xb7, 0xb7, 0x69, 0xed, 0xcf, 0xc7, 0x35, 0x78, 0x17, 0x5e, 0x97,
    +	0x2b, 0x54, 0x83, 0xf4, 0xec, 0x85, 0xde, 0x30, 0xfa, 0x9c, 0xbc, 0xdb, 0x96, 0x8d, 0xa1, 0x54,
    +	0x77, 0xdb, 0x51, 0x76, 0xd4, 0xe8, 0xd7, 0xda, 0x3b, 0xc6, 0xb5, 0x76, 0x20, 0xff, 0x89, 0xb6,
    +	0x21, 0x6f, 0xf7, 0xba, 0xd1, 0xa2, 0x9a, 0x1e, 0x03, 0x66, 0x30, 0xff, 0x85, 0xaf, 0x76, 0xb1,
    +	0xeb, 0x11, 0xff, 0xe5, 0x5b, 0x30, 0xd5, 0x34, 0xa5, 0x16, 0xb1, 0x12, 0x5f, 0xcc, 0xa3, 0x9b,
    +	0x10, 0x72, 0x62, 0x39, 0x25, 0x01, 0xc0, 0x49, 0xc2, 0xa8, 0xc6, 0x43, 0x02, 0xad, 0x7a, 0x9b,
    +	0xbe, 0x78, 0x2e, 0x66, 0xe7, 0xce, 0xe5, 0x5e, 0x18, 0x91, 0x36, 0xc5, 0x8c, 0x85, 0x84, 0x75,
    +	0x51, 0x17, 0x2b, 0x2a, 0xe8, 0x7d, 0x18, 0x62, 0x4f, 0x3c, 0xc3, 0xca, 0x48, 0xbe, 0x59, 0xc3,
    +	0x8c, 0x2e, 0x1c, 0x6f, 0x48, 0xf6, 0x37, 0xc4, 0x82, 0x02, 0xba, 0x21, 0x1f, 0x50, 0x87, 0xab,
    +	0xde, 0x9d, 0x90, 0xb0, 0x07, 0xd4, 0xa3, 0x0b, 0xcf, 0xc7, 0x6f, 0xa3, 0x79, 0x79, 0x66, 0x72,
    +	0x6c, 0xa3, 0x26, 0x15, 0xfb, 0xc4, 0x7f, 0x99, 0x73, 0x5b, 0x44, 0xea, 0xcb, 0xec, 0x9e, 0x99,
    +	0x97, 0x3b, 0x1e, 0xce, 0xbb, 0x26, 0x09, 0x9c, 0xa4, 0x49, 0x45, 0x68, 0xbe, 0xeb, 0xc5, 0x83,
    +	0xb3, 0x5e, 0xbc, 0x83, 0x6b, 0x0e, 0xd8, 0x69, 0xc4, 0x4b, 0xb0, 0xa8, 0x8f, 0x5c, 0x98, 0x0a,
    +	0x0c, 0xf1, 0x42, 0x06, 0xd8, 0xbb, 0xd4, 0x9f, 0x10, 0xa3, 0x65, 0x19, 0x31, 0xc9, 0xe0, 0x24,
    +	0x5d, 0xf4, 0xbe, 0x26, 0x28, 0x4d, 0x14, 0xdf, 0xfc, 0x7b, 0x89, 0x46, 0xb3, 0x3b, 0x30, 0x61,
    +	0x30, 0x9b, 0xa7, 0x6a, 0x82, 0xf4, 0x60, 0x3a, 0xc9, 0x59, 0x9e, 0xaa, 0xe5, 0xf1, 0x2d, 0x98,
    +	0x64, 0x1b, 0xe1, 0x81, 0xd3, 0x11, 0xac, 0xf8, 0xb2, 0xc1, 0x8a, 0xad, 0xcb, 0x65, 0x3e, 0x30,
    +	0x72, 0x08, 0x62, 0xc6, 0x69, 0xff, 0x9d, 0x41, 0x51, 0x59, 0xed, 0x22, 0x74, 0x05, 0x46, 0x45,
    +	0x07, 0x54, 0xaa, 0x3e, 0xc5, 0x18, 0xd6, 0x24, 0x00, 0xc7, 0x38, 0x2c, 0x43, 0x23, 0xab, 0xae,
    +	0xbd, 0x50, 0x88, 0x33, 0x34, 0x2a, 0x08, 0xd6, 0xb0, 0xe8, 0xe5, 0xf7, 0xbe, 0xef, 0x47, 0xea,
    +	0x0c, 0x56, 0x5b, 0x6d, 0x81, 0x95, 0x62, 0x01, 0xa5, 0x67, 0xef, 0x0e, 0x09, 0x3c, 0xd2, 0x32,
    +	0x73, 0xd5, 0xa8, 0xb3, 0xf7, 0xa6, 0x0e, 0xc4, 0x26, 0x2e, 0x95, 0x20, 0xfc, 0x90, 0xed, 0x5d,
    +	0x71, 0xc5, 0x8e, 0x5f, 0x7c, 0xd4, 0x79, 0x90, 0x0f, 0x09, 0x47, 0x5f, 0x81, 0x33, 0x2a, 0xd8,
    +	0xa6, 0x58, 0x99, 0xb2, 0xc5, 0x21, 0x43, 0x23, 0x76, 0x66, 0x31, 0x1b, 0x0d, 0xe7, 0xd5, 0x47,
    +	0xef, 0xc2, 0xa4, 0xb8, 0x86, 0x49, 0x8a, 0xc3, 0xa6, 0xfb, 0xe2, 0x4d, 0x03, 0x8a, 0x13, 0xd8,
    +	0x32, 0xdb, 0x0e, 0xbb, 0x9f, 0x48, 0x0a, 0x23, 0xe9, 0x6c, 0x3b, 0x3a, 0x1c, 0xa7, 0x6a, 0xa0,
    +	0x79, 0x98, 0xe2, 0x62, 0xa7, 0xeb, 0x6d, 0xf1, 0x39, 0x11, 0x4f, 0x60, 0xd5, 0x86, 0xbc, 0x6d,
    +	0x82, 0x71, 0x12, 0x1f, 0x5d, 0x83, 0x71, 0x27, 0x68, 0x6c, 0xbb, 0x11, 0x69, 0xd0, 0x5d, 0xc5,
    +	0x3c, 0x08, 0x35, 0xff, 0xcf, 0x79, 0x0d, 0x86, 0x0d, 0x4c, 0xf4, 0x1e, 0x0c, 0x84, 0x0f, 0x9c,
    +	0x8e, 0xe0, 0x3e, 0xf9, 0xac, 0x5c, 0xad, 0x60, 0xee, 0xfa, 0x45, 0xff, 0x63, 0x56, 0xd3, 0x7e,
    +	0x04, 0x27, 0x32, 0x82, 0x12, 0xd1, 0xa5, 0xe7, 0x74, 0x5c, 0x39, 0x2a, 0x89, 0x67, 0x1a, 0xf3,
    +	0xb5, 0x55, 0x39, 0x1e, 0x1a, 0x16, 0x5d, 0xdf, 0x2c, 0x78, 0x51, 0x2d, 0x36, 0x24, 0xa9, 0xf5,
    +	0xbd, 0x22, 0x01, 0x38, 0xc6, 0xb1, 0xff, 0xa4, 0x04, 0x53, 0x19, 0xe6, 0x41, 0x96, 0x1b, 0x3f,
    +	0x71, 0xcf, 0x8b, 0x53, 0xe1, 0x9b, 0xe9, 0x9f, 0x4a, 0x47, 0x48, 0xff, 0x54, 0xee, 0x95, 0xfe,
    +	0x69, 0xe0, 0xe3, 0xa4, 0x7f, 0x32, 0x47, 0x6c, 0xb0, 0xaf, 0x11, 0xcb, 0x48, 0x19, 0x35, 0x74,
    +	0xc4, 0x94, 0x51, 0xc6, 0xa0, 0x0f, 0xf7, 0x31, 0xe8, 0xff, 0x69, 0x09, 0xa6, 0x93, 0x96, 0xc5,
    +	0x63, 0xd0, 0xce, 0xbf, 0x6f, 0x68, 0xe7, 0x2f, 0xf7, 0x13, 0xf4, 0x20, 0x57, 0x53, 0x8f, 0x13,
    +	0x9a, 0xfa, 0xcf, 0xf6, 0x45, 0xad, 0x58, 0x6b, 0xff, 0xb7, 0x4a, 0x70, 0x2a, 0xd3, 0xe0, 0x7a,
    +	0x0c, 0x63, 0x73, 0xdb, 0x18, 0x9b, 0x57, 0xfb, 0x0e, 0x08, 0x91, 0x3b, 0x40, 0xf7, 0x12, 0x03,
    +	0x74, 0xa5, 0x7f, 0x92, 0xc5, 0xa3, 0xf4, 0xfd, 0x32, 0x5c, 0xc8, 0xac, 0x17, 0x2b, 0xb7, 0x57,
    +	0x0c, 0xe5, 0xf6, 0x6b, 0x09, 0xe5, 0xb6, 0x5d, 0x5c, 0xfb, 0xc9, 0x68, 0xbb, 0x45, 0x60, 0x04,
    +	0x16, 0xde, 0xe5, 0x31, 0x35, 0xdd, 0x46, 0x60, 0x04, 0x45, 0x08, 0x9b, 0x74, 0x7f, 0x98, 0x34,
    +	0xdc, 0xff, 0x83, 0x05, 0x67, 0x33, 0xe7, 0xe6, 0x18, 0xf4, 0x8c, 0xeb, 0xa6, 0x9e, 0xf1, 0xa5,
    +	0xbe, 0x57, 0x6b, 0x8e, 0xe2, 0xf1, 0xbb, 0x43, 0x39, 0xdf, 0xc2, 0xd4, 0x1f, 0xb7, 0x61, 0xcc,
    +	0x69, 0x34, 0x48, 0x18, 0xae, 0xb1, 0x54, 0x13, 0xdc, 0xf6, 0xfa, 0x2a, 0xbb, 0x9c, 0xc6, 0xc5,
    +	0x87, 0xfb, 0xd5, 0xd9, 0x24, 0x89, 0x18, 0x8c, 0x75, 0x0a, 0xe8, 0xeb, 0x30, 0x12, 0xca, 0x24,
    +	0xbf, 0x03, 0x8f, 0x9f, 0xe4, 0x97, 0x49, 0x92, 0x4a, 0xbd, 0xa3, 0x48, 0xa2, 0x3f, 0xa7, 0x87,
    +	0xf7, 0x2a, 0x50, 0x6c, 0xf2, 0x4e, 0x3e, 0x46, 0x90, 0x2f, 0xf3, 0x39, 0x7c, 0xb9, 0xaf, 0xe7,
    +	0xf0, 0xef, 0xc1, 0x74, 0xc8, 0xc3, 0xe5, 0xc6, 0x2e, 0x32, 0x7c, 0x2d, 0xb2, 0x88, 0x83, 0xf5,
    +	0x04, 0x0c, 0xa7, 0xb0, 0xd1, 0x8a, 0x6c, 0x95, 0x39, 0x43, 0xf1, 0xe5, 0x79, 0x29, 0x6e, 0x51,
    +	0x38, 0x44, 0x9d, 0x4c, 0x4e, 0x02, 0x1b, 0x7e, 0xad, 0x26, 0xfa, 0x3a, 0x00, 0x5d, 0x44, 0x42,
    +	0x85, 0x33, 0x9c, 0xcf, 0x42, 0x29, 0x6f, 0x69, 0x66, 0xbe, 0xc0, 0x60, 0x11, 0x0d, 0x96, 0x14,
    +	0x11, 0xac, 0x11, 0x44, 0x0e, 0x4c, 0xc4, 0xff, 0x30, 0xd9, 0x2c, 0x0a, 0xb0, 0xc6, 0x5a, 0x48,
    +	0x12, 0x67, 0xe6, 0x8d, 0x25, 0x9d, 0x04, 0x36, 0x29, 0xa2, 0xaf, 0xc1, 0xd9, 0xdd, 0x5c, 0xbf,
    +	0x23, 0x2e, 0x4b, 0x9e, 0x3f, 0xd8, 0xaf, 0x9e, 0xcd, 0xf7, 0x36, 0xca, 0xaf, 0x6f, 0xff, 0x8f,
    +	0x00, 0xcf, 0x14, 0x70, 0x7a, 0x34, 0x6f, 0xfa, 0x0c, 0xbc, 0x9c, 0xd4, 0xab, 0xcc, 0x66, 0x56,
    +	0x36, 0x14, 0x2d, 0x89, 0x0d, 0x55, 0xfa, 0xd8, 0x1b, 0xea, 0xa7, 0x2c, 0xed, 0x9a, 0xc5, 0x3d,
    +	0xca, 0xbf, 0x74, 0xc4, 0x13, 0xec, 0x09, 0xaa, 0xc0, 0x36, 0x33, 0xf4, 0x48, 0xaf, 0xf5, 0xdd,
    +	0x9d, 0xfe, 0x15, 0x4b, 0xbf, 0x9a, 0x9d, 0x60, 0x80, 0xab, 0x98, 0xae, 0x1f, 0xf5, 0xfb, 0x8f,
    +	0x2b, 0xd9, 0xc0, 0xef, 0x5b, 0x70, 0x36, 0x55, 0xcc, 0xfb, 0x40, 0x42, 0x11, 0xce, 0x70, 0xfd,
    +	0x63, 0x77, 0x5e, 0x12, 0xe4, 0xdf, 0x70, 0x43, 0x7c, 0xc3, 0xd9, 0x5c, 0xbc, 0x64, 0xd7, 0x7f,
    +	0xf2, 0x5f, 0x55, 0x4f, 0xb0, 0x06, 0x4c, 0x44, 0x9c, 0xdf, 0x75, 0xd4, 0x81, 0x8b, 0x8d, 0x6e,
    +	0x10, 0xc4, 0x8b, 0x35, 0x63, 0x73, 0xf2, 0xdb, 0xe2, 0xf3, 0x07, 0xfb, 0xd5, 0x8b, 0x8b, 0x3d,
    +	0x70, 0x71, 0x4f, 0x6a, 0xc8, 0x03, 0xd4, 0x4e, 0x79, 0xf7, 0x31, 0x06, 0x90, 0xa3, 0x05, 0x4a,
    +	0xfb, 0x02, 0x72, 0x3f, 0xdd, 0x0c, 0x1f, 0xc1, 0x0c, 0xca, 0xc7, 0xab, 0xbb, 0xf9, 0xc1, 0x64,
    +	0x33, 0x98, 0xbd, 0x05, 0x17, 0x8a, 0x17, 0xd3, 0x91, 0x42, 0x50, 0xfc, 0x9e, 0x05, 0xe7, 0x0b,
    +	0x43, 0xb3, 0xfd, 0x19, 0xbc, 0x2c, 0xd8, 0xdf, 0xb6, 0xe0, 0xd9, 0xcc, 0x1a, 0xc9, 0xc7, 0x83,
    +	0x0d, 0x5a, 0xa8, 0x39, 0xc3, 0xc6, 0x41, 0x8a, 0x24, 0x00, 0xc7, 0x38, 0x86, 0xbf, 0x68, 0xa9,
    +	0xa7, 0xbf, 0xe8, 0x3f, 0xb5, 0x20, 0x75, 0xd4, 0x1f, 0x83, 0xe4, 0xb9, 0x6a, 0x4a, 0x9e, 0xcf,
    +	0xf7, 0x33, 0x9a, 0x39, 0x42, 0xe7, 0xbf, 0x9d, 0x82, 0xd3, 0x39, 0x2f, 0xc8, 0x77, 0x61, 0x66,
    +	0xab, 0x41, 0xcc, 0x90, 0x21, 0x45, 0xd1, 0xff, 0x0a, 0xe3, 0x8b, 0x2c, 0x9c, 0x3a, 0xd8, 0xaf,
    +	0xce, 0xa4, 0x50, 0x70, 0xba, 0x09, 0xf4, 0x6d, 0x0b, 0x4e, 0x3a, 0x0f, 0xc2, 0x65, 0x7a, 0x83,
    +	0x70, 0x1b, 0x0b, 0x2d, 0xbf, 0xb1, 0x43, 0x05, 0x33, 0xb9, 0xad, 0xde, 0xc8, 0x54, 0x85, 0xdf,
    +	0xab, 0xa7, 0xf0, 0x8d, 0xe6, 0x2b, 0x07, 0xfb, 0xd5, 0x93, 0x59, 0x58, 0x38, 0xb3, 0x2d, 0x84,
    +	0x45, 0x0e, 0x3f, 0x27, 0xda, 0x2e, 0x0a, 0x6a, 0x93, 0xf5, 0xd4, 0x9f, 0x8b, 0xc4, 0x12, 0x82,
    +	0x15, 0x1d, 0xf4, 0x4d, 0x18, 0xdd, 0x92, 0xf1, 0x2b, 0x32, 0x44, 0xee, 0x78, 0x20, 0x8b, 0xa3,
    +	0x7a, 0x70, 0x07, 0x1c, 0x85, 0x84, 0x63, 0xa2, 0xe8, 0x5d, 0x28, 0x7b, 0x9b, 0x61, 0x51, 0x08,
    +	0xe9, 0x84, 0xa7, 0x35, 0x8f, 0x76, 0xb5, 0xbe, 0x52, 0xc7, 0xb4, 0x22, 0xba, 0x01, 0xe5, 0xe0,
    +	0x7e, 0x53, 0xd8, 0x71, 0x32, 0x37, 0x29, 0x5e, 0x58, 0xca, 0xe9, 0x15, 0xa3, 0x84, 0x17, 0x96,
    +	0x30, 0x25, 0x81, 0x6a, 0x30, 0xc8, 0x9e, 0x5d, 0x0b, 0xd1, 0x36, 0xf3, 0x2a, 0x5f, 0x10, 0xbe,
    +	0x80, 0xbf, 0x87, 0x64, 0x08, 0x98, 0x13, 0x42, 0x1b, 0x30, 0xd4, 0x70, 0xbd, 0x26, 0x09, 0x84,
    +	0x2c, 0xfb, 0xb9, 0x4c, 0x8b, 0x0d, 0xc3, 0xc8, 0xa1, 0xc9, 0x0d, 0x18, 0x0c, 0x03, 0x0b, 0x5a,
    +	0x8c, 0x2a, 0xe9, 0x6c, 0x6f, 0xca, 0x13, 0x2b, 0x9b, 0x2a, 0xe9, 0x6c, 0xaf, 0xd4, 0x0b, 0xa9,
    +	0x32, 0x0c, 0x2c, 0x68, 0xa1, 0xb7, 0xa0, 0xb4, 0xd9, 0x10, 0x4f, 0xaa, 0x33, 0xd5, 0x9b, 0x66,
    +	0xc0, 0xb2, 0x85, 0xa1, 0x83, 0xfd, 0x6a, 0x69, 0x65, 0x11, 0x97, 0x36, 0x1b, 0x68, 0x1d, 0x86,
    +	0x37, 0x79, 0xbc, 0x20, 0xa1, 0x1f, 0x7d, 0x31, 0x3b, 0x94, 0x51, 0x2a, 0xa4, 0x10, 0x7f, 0xdb,
    +	0x2a, 0x00, 0x58, 0x12, 0x61, 0x09, 0xcf, 0x54, 0xdc, 0x23, 0x11, 0x29, 0x76, 0xee, 0x68, 0xb1,
    +	0xaa, 0x44, 0xa0, 0x71, 0x45, 0x05, 0x6b, 0x14, 0xe9, 0xaa, 0x76, 0x1e, 0x75, 0x03, 0x96, 0x11,
    +	0x45, 0x18, 0x66, 0x32, 0x57, 0xf5, 0xbc, 0x44, 0x2a, 0x5a, 0xd5, 0x0a, 0x09, 0xc7, 0x44, 0xd1,
    +	0x0e, 0x4c, 0xec, 0x86, 0x9d, 0x6d, 0x22, 0xb7, 0x34, 0x8b, 0x30, 0x98, 0x23, 0xcd, 0xde, 0x15,
    +	0x88, 0x6e, 0x10, 0x75, 0x9d, 0x56, 0x8a, 0x0b, 0xb1, 0x6b, 0xcd, 0x5d, 0x9d, 0x18, 0x36, 0x69,
    +	0xd3, 0xe1, 0xff, 0xa8, 0xeb, 0xdf, 0xdf, 0x8b, 0x88, 0x08, 0xf0, 0x9a, 0x39, 0xfc, 0x1f, 0x70,
    +	0x94, 0xf4, 0xf0, 0x0b, 0x00, 0x96, 0x44, 0xd0, 0x5d, 0x31, 0x3c, 0x8c, 0x7b, 0x4e, 0xe7, 0x07,
    +	0xc2, 0x9f, 0x97, 0x48, 0x39, 0x83, 0xc2, 0xb8, 0x65, 0x4c, 0x8a, 0x71, 0xc9, 0xce, 0xb6, 0x1f,
    +	0xf9, 0x5e, 0x82, 0x43, 0xcf, 0xe4, 0x73, 0xc9, 0x5a, 0x06, 0x7e, 0x9a, 0x4b, 0x66, 0x61, 0xe1,
    +	0xcc, 0xb6, 0x50, 0x13, 0x26, 0x3b, 0x7e, 0x10, 0x3d, 0xf0, 0x03, 0xb9, 0xbe, 0x50, 0x81, 0xa2,
    +	0xd4, 0xc0, 0x14, 0x2d, 0x32, 0xb7, 0x20, 0x13, 0x82, 0x13, 0x34, 0xd1, 0x97, 0x61, 0x38, 0x6c,
    +	0x38, 0x2d, 0xb2, 0x7a, 0xbb, 0x72, 0x22, 0xff, 0xf8, 0xa9, 0x73, 0x94, 0x9c, 0xd5, 0xc5, 0xc3,
    +	0x3d, 0x71, 0x14, 0x2c, 0xc9, 0xa1, 0x15, 0x18, 0x64, 0x39, 0xef, 0x59, 0x34, 0xe2, 0x9c, 0x78,
    +	0xfe, 0xa9, 0x47, 0x3d, 0x9c, 0x37, 0xb1, 0x62, 0xcc, 0xab, 0xd3, 0x3d, 0x20, 0x34, 0x05, 0x7e,
    +	0x58, 0x39, 0x95, 0xbf, 0x07, 0x84, 0x82, 0xe1, 0x76, 0xbd, 0x68, 0x0f, 0x28, 0x24, 0x1c, 0x13,
    +	0xa5, 0x9c, 0x99, 0x72, 0xd3, 0xd3, 0x05, 0x0e, 0x9b, 0xb9, 0xbc, 0x94, 0x71, 0x66, 0xca, 0x49,
    +	0x29, 0x09, 0xfb, 0x37, 0x47, 0xd2, 0x32, 0x0b, 0xd3, 0x30, 0xfd, 0xc7, 0x56, 0xca, 0x63, 0xe3,
    +	0xf3, 0xfd, 0x2a, 0xbc, 0x9f, 0xe0, 0xc5, 0xf5, 0xdb, 0x16, 0x9c, 0xee, 0x64, 0x7e, 0x88, 0x10,
    +	0x00, 0xfa, 0xd3, 0x9b, 0xf3, 0x4f, 0x57, 0x91, 0xab, 0xb3, 0xe1, 0x38, 0xa7, 0xa5, 0xa4, 0x72,
    +	0xa0, 0xfc, 0xb1, 0x95, 0x03, 0x6b, 0x30, 0xd2, 0xe0, 0x37, 0x39, 0x99, 0x3c, 0xa2, 0xaf, 0xb8,
    +	0xab, 0xdc, 0x4e, 0x2b, 0x2a, 0x62, 0x45, 0x02, 0xfd, 0xb4, 0x05, 0xe7, 0x93, 0x5d, 0xc7, 0x84,
    +	0x81, 0x85, 0xbb, 0x26, 0x57, 0x6b, 0xad, 0x88, 0xef, 0x4f, 0xc9, 0xff, 0x06, 0xf2, 0x61, 0x2f,
    +	0x04, 0x5c, 0xdc, 0x18, 0x5a, 0xca, 0xd0, 0xab, 0x0d, 0x99, 0x36, 0xc9, 0x3e, 0x74, 0x6b, 0x6f,
    +	0xc0, 0x78, 0xdb, 0xef, 0x7a, 0x91, 0xf0, 0xba, 0x14, 0xae, 0x5b, 0xcc, 0x65, 0x69, 0x4d, 0x2b,
    +	0xc7, 0x06, 0x56, 0x42, 0x23, 0x37, 0xf2, 0xd8, 0x1a, 0xb9, 0x0f, 0x61, 0xdc, 0xd3, 0x1e, 0x24,
    +	0x14, 0xdd, 0x60, 0x85, 0x76, 0x51, 0xc3, 0xe6, 0xbd, 0xd4, 0x4b, 0xb0, 0x41, 0xad, 0x58, 0x5b,
    +	0x06, 0x1f, 0x4f, 0x5b, 0x76, 0xac, 0x57, 0x62, 0xfb, 0xef, 0x95, 0x32, 0x6e, 0x0c, 0x5c, 0x2b,
    +	0xf7, 0x8e, 0xa9, 0x95, 0xbb, 0x94, 0xd4, 0xca, 0xa5, 0x4c, 0x55, 0x86, 0x42, 0xae, 0xff, 0x0c,
    +	0xa6, 0x7d, 0xc7, 0xd2, 0xfe, 0x0b, 0x16, 0x9c, 0x61, 0xb6, 0x0f, 0xda, 0xc0, 0xc7, 0xb6, 0x77,
    +	0x30, 0x87, 0xd8, 0x5b, 0xd9, 0xe4, 0x70, 0x5e, 0x3b, 0x76, 0x0b, 0x2e, 0xf6, 0x3a, 0x77, 0x99,
    +	0x7f, 0x71, 0x53, 0xb9, 0x57, 0xc4, 0xfe, 0xc5, 0xcd, 0xd5, 0x25, 0xcc, 0x20, 0xfd, 0x86, 0x5d,
    +	0xb4, 0xff, 0x4f, 0x0b, 0xca, 0x35, 0xbf, 0x79, 0x0c, 0x37, 0xfa, 0x2f, 0x19, 0x37, 0xfa, 0x67,
    +	0xb2, 0x4f, 0xfc, 0x66, 0xae, 0xb1, 0x6f, 0x39, 0x61, 0xec, 0x3b, 0x9f, 0x47, 0xa0, 0xd8, 0xb4,
    +	0xf7, 0xb7, 0xcb, 0x30, 0x56, 0xf3, 0x9b, 0x6a, 0x9f, 0xfd, 0x77, 0x8f, 0xf3, 0x8c, 0x28, 0x37,
    +	0x67, 0x99, 0x46, 0x99, 0xf9, 0x13, 0xcb, 0xa8, 0x17, 0x7f, 0xc6, 0x5e, 0x13, 0xdd, 0x23, 0xee,
    +	0xd6, 0x76, 0x44, 0x9a, 0xc9, 0xcf, 0x39, 0xbe, 0xd7, 0x44, 0x7f, 0x58, 0x86, 0xa9, 0x44, 0xeb,
    +	0xa8, 0x05, 0x13, 0x2d, 0xdd, 0x94, 0x24, 0xd6, 0xe9, 0x63, 0x59, 0xa1, 0xc4, 0x6b, 0x0c, 0xad,
    +	0x08, 0x9b, 0xc4, 0xd1, 0x1c, 0x80, 0xa7, 0xfb, 0xa4, 0xab, 0x98, 0xd0, 0x9a, 0x3f, 0xba, 0x86,
    +	0x81, 0xde, 0x84, 0xb1, 0xc8, 0xef, 0xf8, 0x2d, 0x7f, 0x6b, 0xef, 0xa6, 0x8a, 0x8f, 0xac, 0x5c,
    +	0x96, 0x37, 0x62, 0x10, 0xd6, 0xf1, 0xd0, 0x43, 0x98, 0x51, 0x44, 0xea, 0x4f, 0xc0, 0xbc, 0xc6,
    +	0xd4, 0x26, 0xeb, 0x49, 0x8a, 0x38, 0xdd, 0x08, 0x7a, 0x0b, 0x26, 0x99, 0xef, 0x34, 0xab, 0x7f,
    +	0x93, 0xec, 0xc9, 0xe0, 0xd2, 0x4c, 0xc2, 0x5e, 0x33, 0x20, 0x38, 0x81, 0x89, 0x16, 0x61, 0xa6,
    +	0xed, 0x86, 0x89, 0xea, 0x43, 0xac, 0x3a, 0xeb, 0xc0, 0x5a, 0x12, 0x88, 0xd3, 0xf8, 0xf6, 0x2f,
    +	0x88, 0x39, 0xf6, 0x22, 0xf7, 0xd3, 0xed, 0xf8, 0xc9, 0xde, 0x8e, 0xdf, 0xb7, 0x60, 0x9a, 0xb6,
    +	0xce, 0x1c, 0x42, 0xa5, 0x20, 0xa5, 0xd2, 0x8f, 0x58, 0x05, 0xe9, 0x47, 0x2e, 0x51, 0xb6, 0xdd,
    +	0xf4, 0xbb, 0x91, 0xd0, 0x8e, 0x6a, 0x7c, 0x99, 0x96, 0x62, 0x01, 0x15, 0x78, 0x24, 0x08, 0xc4,
    +	0xab, 0x7b, 0x1d, 0x8f, 0x04, 0x01, 0x16, 0x50, 0x99, 0x9d, 0x64, 0x20, 0x3b, 0x3b, 0x09, 0x0f,
    +	0x32, 0x2f, 0xfc, 0xe8, 0x84, 0x48, 0xab, 0x05, 0x99, 0x97, 0x0e, 0x76, 0x31, 0x8e, 0xfd, 0xd7,
    +	0xca, 0x50, 0xa9, 0xf9, 0xcd, 0x45, 0x12, 0x44, 0xee, 0xa6, 0xdb, 0x70, 0x22, 0xa2, 0xe5, 0xdb,
    +	0x7d, 0x0d, 0x80, 0x3d, 0x22, 0x0b, 0xb2, 0x22, 0xa8, 0xd7, 0x15, 0x04, 0x6b, 0x58, 0x54, 0x2a,
    +	0xd9, 0x21, 0x7b, 0xda, 0xc9, 0xab, 0xa4, 0x92, 0x9b, 0xbc, 0x18, 0x4b, 0x38, 0xba, 0xc5, 0x42,
    +	0x19, 0x2d, 0x3f, 0xec, 0xb8, 0x01, 0xcf, 0x4c, 0x4e, 0x1a, 0xbe, 0xd7, 0x0c, 0x45, 0xe0, 0xb7,
    +	0x8a, 0x08, 0x44, 0x94, 0x82, 0xe3, 0xcc, 0x5a, 0xa8, 0x06, 0x27, 0x1b, 0x01, 0x69, 0x12, 0x2f,
    +	0x72, 0x9d, 0xd6, 0x42, 0xd7, 0x6b, 0xb6, 0x78, 0x4a, 0x9e, 0x01, 0x23, 0x83, 0xe8, 0xc9, 0xc5,
    +	0x0c, 0x1c, 0x9c, 0x59, 0x53, 0x7c, 0x0a, 0x23, 0x32, 0x98, 0xfa, 0x14, 0x56, 0x4f, 0xc2, 0x59,
    +	0xe3, 0xf1, 0x10, 0x2e, 0x6e, 0x3b, 0xae, 0xc7, 0xea, 0x0d, 0x25, 0x1a, 0xcf, 0xc0, 0xc1, 0x99,
    +	0x35, 0xed, 0x3f, 0x2d, 0xc3, 0x38, 0x9d, 0x18, 0xe5, 0x71, 0xf3, 0x86, 0xe1, 0x71, 0x73, 0x31,
    +	0xe1, 0x71, 0x33, 0xad, 0xe3, 0x6a, 0xfe, 0x35, 0xef, 0x03, 0xf2, 0x45, 0x52, 0x82, 0xeb, 0xc4,
    +	0x23, 0x7c, 0xc8, 0x98, 0x92, 0xb1, 0x1c, 0xfb, 0xa3, 0xdc, 0x4e, 0x61, 0xe0, 0x8c, 0x5a, 0x9f,
    +	0xfa, 0xea, 0x1c, 0xaf, 0xaf, 0xce, 0x6f, 0x59, 0x6c, 0x05, 0x2c, 0xad, 0xd7, 0xb9, 0x13, 0x39,
    +	0xba, 0x0a, 0x63, 0xec, 0x18, 0x63, 0xb1, 0x3c, 0xa4, 0x4b, 0x0b, 0xcb, 0x6e, 0xbb, 0x1e, 0x17,
    +	0x63, 0x1d, 0x07, 0x5d, 0x86, 0x91, 0x90, 0x38, 0x41, 0x63, 0x5b, 0x9d, 0xe1, 0xc2, 0xff, 0x84,
    +	0x97, 0x61, 0x05, 0x45, 0x1f, 0xc4, 0x11, 0xe1, 0xcb, 0xf9, 0x1e, 0xe9, 0x7a, 0x7f, 0x38, 0x1f,
    +	0xcc, 0x0f, 0x03, 0x6f, 0xdf, 0x03, 0x94, 0xc6, 0xef, 0xe3, 0x89, 0x5f, 0xd5, 0x8c, 0x59, 0x3c,
    +	0x9a, 0x8a, 0x57, 0xfc, 0xef, 0x2d, 0x98, 0xac, 0xf9, 0x4d, 0xca, 0x9f, 0x7f, 0x98, 0x98, 0xb1,
    +	0x9e, 0xc1, 0x63, 0xa8, 0x20, 0x83, 0xc7, 0x81, 0x05, 0x17, 0xd8, 0xe7, 0x47, 0xc4, 0x6b, 0xc6,
    +	0x06, 0x4f, 0xdd, 0xdf, 0xe3, 0x01, 0x4c, 0x05, 0x3c, 0x7c, 0xd7, 0x9a, 0xd3, 0xe9, 0xb8, 0xde,
    +	0x96, 0x7c, 0xdf, 0xf6, 0x46, 0xe1, 0xbb, 0x8d, 0x24, 0x49, 0x11, 0x02, 0x4c, 0x77, 0x54, 0x35,
    +	0x88, 0xe2, 0x64, 0x2b, 0x3c, 0x2b, 0x8d, 0xd6, 0x1f, 0x2d, 0x41, 0xa5, 0x96, 0x95, 0x26, 0x81,
    +	0x80, 0xd3, 0x75, 0xec, 0xe7, 0x60, 0xb0, 0xe6, 0x37, 0x7b, 0x04, 0x8f, 0xfe, 0x3b, 0x16, 0x0c,
    +	0xd7, 0xfc, 0xe6, 0x31, 0x98, 0x10, 0xdf, 0x31, 0x4d, 0x88, 0x67, 0x72, 0x36, 0x47, 0x8e, 0xd5,
    +	0xf0, 0x9f, 0x0d, 0xc0, 0x04, 0xed, 0xa7, 0xbf, 0x25, 0xd7, 0xab, 0xb1, 0x36, 0xac, 0x3e, 0xd6,
    +	0x06, 0xbd, 0xd0, 0xfa, 0xad, 0x96, 0xff, 0x20, 0xb9, 0x76, 0x57, 0x58, 0x29, 0x16, 0x50, 0xf4,
    +	0x0a, 0x8c, 0x74, 0x02, 0xb2, 0xeb, 0xfa, 0xe2, 0xa6, 0xa8, 0x19, 0x64, 0x6b, 0xa2, 0x1c, 0x2b,
    +	0x0c, 0xf4, 0x06, 0x8c, 0x87, 0xae, 0x47, 0xa5, 0x62, 0x7e, 0xf4, 0x0e, 0xb0, 0x83, 0x81, 0xe7,
    +	0xd2, 0xd3, 0xca, 0xb1, 0x81, 0x85, 0xee, 0xc1, 0x28, 0xfb, 0xcf, 0x78, 0xeb, 0xe0, 0x91, 0x79,
    +	0xab, 0x48, 0x94, 0x2e, 0x08, 0xe0, 0x98, 0x16, 0x15, 0x38, 0x22, 0x99, 0x8f, 0x2a, 0x14, 0x41,
    +	0x84, 0x95, 0xc0, 0xa1, 0x32, 0x55, 0x85, 0x58, 0xc3, 0x42, 0x2f, 0xc3, 0x68, 0xe4, 0xb8, 0xad,
    +	0x5b, 0xae, 0xc7, 0x3c, 0x51, 0x68, 0xff, 0x45, 0xbe, 0x72, 0x51, 0x88, 0x63, 0x38, 0xbd, 0xd5,
    +	0xb0, 0xd8, 0x6a, 0x0b, 0x7b, 0x91, 0xc8, 0xa2, 0x59, 0xe6, 0xb7, 0x9a, 0x5b, 0xaa, 0x14, 0x6b,
    +	0x18, 0x68, 0x1b, 0xce, 0xb9, 0x1e, 0xcb, 0x3b, 0x47, 0xea, 0x3b, 0x6e, 0x67, 0xe3, 0x56, 0xfd,
    +	0x2e, 0x09, 0xdc, 0xcd, 0xbd, 0x05, 0xa7, 0xb1, 0x43, 0xbc, 0x26, 0x53, 0x7a, 0x8d, 0x2c, 0x3c,
    +	0x2f, 0xba, 0x78, 0x6e, 0xb5, 0x00, 0x17, 0x17, 0x52, 0x42, 0x36, 0xe5, 0x39, 0x01, 0x71, 0xda,
    +	0x42, 0xbb, 0xc5, 0x73, 0x56, 0xb1, 0x12, 0x2c, 0x20, 0xf6, 0xeb, 0x6c, 0x4f, 0xdc, 0xae, 0xa3,
    +	0xcf, 0x1a, 0x3c, 0xf4, 0xb4, 0xce, 0x43, 0x0f, 0xf7, 0xab, 0x43, 0xb7, 0xeb, 0x5a, 0x9c, 0xad,
    +	0x6b, 0x70, 0xaa, 0xe6, 0x37, 0x6b, 0x7e, 0x10, 0xad, 0xf8, 0xc1, 0x03, 0x27, 0x68, 0xca, 0x25,
    +	0x58, 0x95, 0x91, 0xc6, 0x28, 0x67, 0x18, 0xe4, 0x6c, 0xd6, 0x88, 0x22, 0xf6, 0x3a, 0xbb, 0x9f,
    +	0x1c, 0xf1, 0x61, 0x77, 0x83, 0x49, 0xca, 0x2a, 0xbb, 0xe3, 0x75, 0x27, 0x22, 0xe8, 0x36, 0x4c,
    +	0x34, 0x74, 0xd9, 0x44, 0x54, 0x7f, 0x49, 0x9e, 0xe8, 0x86, 0xe0, 0x92, 0x29, 0xcc, 0x98, 0xf5,
    +	0xed, 0xdf, 0xb7, 0x44, 0x2b, 0x1a, 0xd7, 0xe8, 0xe3, 0x60, 0x59, 0xcc, 0x62, 0x4e, 0xfc, 0xa6,
    +	0x7a, 0xaa, 0x5f, 0xc6, 0x84, 0xbe, 0x06, 0x67, 0x8d, 0x42, 0xe9, 0x14, 0xa2, 0xe5, 0xdf, 0x67,
    +	0x9a, 0x49, 0x9c, 0x87, 0x84, 0xf3, 0xeb, 0xdb, 0x3f, 0x06, 0xa7, 0x93, 0xdf, 0x25, 0x38, 0xfa,
    +	0x63, 0x7e, 0x5d, 0xe9, 0x68, 0x5f, 0x67, 0xbf, 0x09, 0x33, 0x35, 0x5f, 0x8b, 0xa2, 0xc2, 0xe6,
    +	0xaf, 0x77, 0x30, 0xb7, 0x5f, 0x1e, 0x61, 0x67, 0x7d, 0x22, 0x65, 0x23, 0xfa, 0x06, 0x4c, 0x86,
    +	0x84, 0x45, 0x30, 0x94, 0x3a, 0xea, 0x82, 0xa8, 0x0c, 0xf5, 0x65, 0x1d, 0x93, 0xdf, 0xc3, 0xcd,
    +	0x32, 0x9c, 0xa0, 0x86, 0xda, 0x30, 0xf9, 0xc0, 0xf5, 0x9a, 0xfe, 0x83, 0x50, 0xd2, 0x1f, 0xc9,
    +	0x37, 0x78, 0xdd, 0xe3, 0x98, 0x89, 0x3e, 0x1a, 0xcd, 0xdd, 0x33, 0x88, 0xe1, 0x04, 0x71, 0xca,
    +	0x6a, 0x82, 0xae, 0x37, 0x1f, 0xde, 0x09, 0x49, 0x20, 0xe2, 0x2b, 0x32, 0x56, 0x83, 0x65, 0x21,
    +	0x8e, 0xe1, 0x94, 0xd5, 0xb0, 0x3f, 0x2c, 0xac, 0x03, 0xe3, 0x65, 0x82, 0xd5, 0x60, 0x55, 0x8a,
    +	0x35, 0x0c, 0xca, 0x8a, 0xd9, 0xbf, 0x75, 0xdf, 0xc3, 0xbe, 0x1f, 0x49, 0xe6, 0xcd, 0xb2, 0xea,
    +	0x6a, 0xe5, 0xd8, 0xc0, 0xca, 0x89, 0xe6, 0x38, 0x70, 0xd4, 0x68, 0x8e, 0x28, 0x2a, 0x88, 0x64,
    +	0xc1, 0xe3, 0x91, 0x5f, 0x2b, 0x8a, 0x64, 0x71, 0xf8, 0x58, 0x51, 0x2e, 0xa8, 0xc0, 0xb3, 0x29,
    +	0x06, 0x68, 0x90, 0x87, 0xab, 0x64, 0x26, 0xf9, 0x3a, 0x1f, 0x1d, 0x09, 0x43, 0xcb, 0x30, 0x1c,
    +	0xee, 0x85, 0x8d, 0xa8, 0x15, 0x16, 0x65, 0x4e, 0xae, 0x33, 0x94, 0x58, 0x1e, 0xe5, 0xff, 0x43,
    +	0x2c, 0xeb, 0xa2, 0x06, 0x9c, 0x10, 0x14, 0x17, 0xb7, 0x1d, 0x4f, 0x65, 0x56, 0xe5, 0xbe, 0xb7,
    +	0x57, 0x0f, 0xf6, 0xab, 0x27, 0x44, 0xcb, 0x3a, 0xf8, 0x70, 0xbf, 0x4a, 0xb7, 0x64, 0x06, 0x04,
    +	0x67, 0x51, 0xe3, 0x4b, 0xbe, 0xd1, 0xf0, 0xdb, 0x9d, 0x5a, 0xe0, 0x6f, 0xba, 0x2d, 0x52, 0xe4,
    +	0xd6, 0x50, 0x37, 0x30, 0xc5, 0x92, 0x37, 0xca, 0x70, 0x82, 0x1a, 0xba, 0x0f, 0x53, 0x4e, 0xa7,
    +	0x33, 0x1f, 0xb4, 0xfd, 0x40, 0x36, 0x30, 0x96, 0x6f, 0x1f, 0x9b, 0x37, 0x51, 0x79, 0x62, 0xd5,
    +	0x44, 0x21, 0x4e, 0x12, 0xa4, 0x03, 0x25, 0x36, 0x9a, 0x31, 0x50, 0x13, 0xf1, 0x40, 0x89, 0x7d,
    +	0x99, 0x31, 0x50, 0x19, 0x10, 0x9c, 0x45, 0xcd, 0xfe, 0xf3, 0xec, 0x76, 0xc3, 0xa2, 0x9d, 0xb3,
    +	0x47, 0x6e, 0x6d, 0x98, 0xe8, 0x30, 0xb6, 0x2f, 0x92, 0x1e, 0x0a, 0x56, 0xf1, 0x46, 0x9f, 0x6a,
    +	0xf8, 0x07, 0x2c, 0xab, 0xb3, 0xe1, 0x8e, 0x5d, 0xd3, 0xc9, 0x61, 0x93, 0xba, 0xfd, 0xaf, 0x67,
    +	0x99, 0xe8, 0x58, 0xe7, 0xba, 0xf5, 0x61, 0xf1, 0xe4, 0x57, 0x48, 0xc9, 0xb3, 0xf9, 0x56, 0xac,
    +	0x78, 0x7d, 0x89, 0x67, 0xc3, 0x58, 0xd6, 0x45, 0x5f, 0x87, 0x49, 0xd7, 0x73, 0xe3, 0x24, 0xeb,
    +	0x61, 0xe5, 0x64, 0x7e, 0x2c, 0x39, 0x85, 0xa5, 0x27, 0x44, 0xd5, 0x2b, 0xe3, 0x04, 0x31, 0xf4,
    +	0x01, 0xf3, 0x50, 0x96, 0xa4, 0x4b, 0xfd, 0x90, 0xd6, 0x9d, 0x91, 0x25, 0x59, 0x8d, 0x08, 0xea,
    +	0xc2, 0x89, 0x74, 0xb2, 0xf9, 0xb0, 0x62, 0xe7, 0x5f, 0x00, 0xd3, 0xf9, 0xe2, 0xe3, 0xcc, 0x95,
    +	0x69, 0x58, 0x88, 0xb3, 0xe8, 0xa3, 0x5b, 0xc9, 0x54, 0xe0, 0x65, 0xc3, 0xfe, 0x95, 0x4a, 0x07,
    +	0x3e, 0x51, 0x98, 0x05, 0x7c, 0x0b, 0xce, 0x6b, 0x79, 0x8d, 0xaf, 0x07, 0x0e, 0xf3, 0x90, 0x73,
    +	0xd9, 0x69, 0xa4, 0x09, 0xb5, 0xcf, 0x1e, 0xec, 0x57, 0xcf, 0x6f, 0x14, 0x21, 0xe2, 0x62, 0x3a,
    +	0xe8, 0x36, 0x9c, 0xe2, 0x91, 0x90, 0x96, 0x88, 0xd3, 0x6c, 0xb9, 0x9e, 0x92, 0x9a, 0x39, 0xef,
    +	0x3a, 0x7b, 0xb0, 0x5f, 0x3d, 0x35, 0x9f, 0x85, 0x80, 0xb3, 0xeb, 0xa1, 0x77, 0x60, 0xb4, 0xe9,
    +	0x49, 0x2e, 0x3b, 0x64, 0xa4, 0x8e, 0x1e, 0x5d, 0x5a, 0xaf, 0xab, 0xef, 0x8f, 0xff, 0xe0, 0xb8,
    +	0x02, 0xda, 0xe2, 0x06, 0x58, 0xa5, 0x35, 0x1f, 0x4e, 0x05, 0xc8, 0x4d, 0x1a, 0x96, 0x8c, 0xd0,
    +	0x22, 0xdc, 0xf3, 0x40, 0x3d, 0x3f, 0x35, 0xa2, 0x8e, 0x18, 0x84, 0xd1, 0xfb, 0x80, 0x44, 0xbe,
    +	0xaf, 0xf9, 0x06, 0xcb, 0xa8, 0xa9, 0x79, 0x45, 0x2b, 0x3d, 0x49, 0x3d, 0x85, 0x81, 0x33, 0x6a,
    +	0xa1, 0x1b, 0x94, 0x3d, 0xea, 0xa5, 0x82, 0xfd, 0x4a, 0x7d, 0x56, 0x65, 0x89, 0x74, 0x02, 0xc2,
    +	0x1c, 0x79, 0x4d, 0x8a, 0x38, 0x51, 0x0f, 0x35, 0xe1, 0x9c, 0xd3, 0x8d, 0x7c, 0x66, 0xdb, 0x36,
    +	0x51, 0x37, 0xfc, 0x1d, 0xe2, 0x31, 0xb7, 0x92, 0x11, 0x16, 0x78, 0xf7, 0xdc, 0x7c, 0x01, 0x1e,
    +	0x2e, 0xa4, 0x42, 0xaf, 0x53, 0x74, 0x2c, 0x34, 0xb3, 0xb3, 0x11, 0x25, 0x81, 0xfb, 0x62, 0x48,
    +	0x0c, 0xf4, 0x26, 0x8c, 0x6d, 0xfb, 0x61, 0xb4, 0x4e, 0xa2, 0x07, 0x7e, 0xb0, 0x23, 0x12, 0x8c,
    +	0xc4, 0x49, 0x9d, 0x62, 0x10, 0xd6, 0xf1, 0xd0, 0x4b, 0x30, 0xcc, 0x9c, 0x1e, 0x57, 0x97, 0xd8,
    +	0x59, 0x3b, 0x12, 0xf3, 0x98, 0x1b, 0xbc, 0x18, 0x4b, 0xb8, 0x44, 0x5d, 0xad, 0x2d, 0x32, 0x76,
    +	0x9c, 0x40, 0x5d, 0xad, 0x2d, 0x62, 0x09, 0xa7, 0xcb, 0x35, 0xdc, 0x76, 0x02, 0x52, 0x0b, 0xfc,
    +	0x06, 0x09, 0xb5, 0x54, 0x62, 0xcf, 0xf0, 0xf4, 0x29, 0x74, 0xb9, 0xd6, 0xb3, 0x10, 0x70, 0x76,
    +	0x3d, 0x44, 0xd2, 0x39, 0xbd, 0x27, 0xf3, 0x8d, 0xfe, 0x69, 0x71, 0xb0, 0xcf, 0xb4, 0xde, 0x1e,
    +	0x4c, 0xab, 0x6c, 0xe2, 0x3c, 0x61, 0x4a, 0x58, 0x99, 0xca, 0xcf, 0xe9, 0x9f, 0xf9, 0xd6, 0x47,
    +	0xb9, 0x51, 0xac, 0x26, 0x28, 0xe1, 0x14, 0x6d, 0x23, 0xb2, 0xf3, 0x74, 0xcf, 0xc8, 0xce, 0x57,
    +	0x60, 0x34, 0xec, 0xde, 0x6f, 0xfa, 0x6d, 0xc7, 0xf5, 0x98, 0xef, 0x98, 0x76, 0x71, 0xaf, 0x4b,
    +	0x00, 0x8e, 0x71, 0xd0, 0x0a, 0x8c, 0x38, 0xd2, 0x47, 0x02, 0xe5, 0x07, 0xad, 0x54, 0x9e, 0x11,
    +	0x3c, 0x8e, 0x9b, 0xf4, 0x8a, 0x50, 0x75, 0xd1, 0xdb, 0x30, 0x21, 0x02, 0xe3, 0x08, 0x7d, 0xfc,
    +	0x09, 0xf3, 0x29, 0x7f, 0x5d, 0x07, 0x62, 0x13, 0x17, 0xdd, 0x81, 0xb1, 0xc8, 0x6f, 0x09, 0x45,
    +	0x6e, 0x58, 0x39, 0x9d, 0x1f, 0x5b, 0x7a, 0x43, 0xa1, 0xe9, 0xd6, 0x3b, 0x55, 0x15, 0xeb, 0x74,
    +	0xd0, 0x06, 0x5f, 0xef, 0x2c, 0x71, 0x18, 0x09, 0x2b, 0x67, 0xf2, 0xcf, 0x24, 0x95, 0x5f, 0xcc,
    +	0xdc, 0x0e, 0xa2, 0x26, 0xd6, 0xc9, 0xa0, 0xeb, 0x30, 0xd3, 0x09, 0x5c, 0x9f, 0xad, 0x09, 0xe5,
    +	0xf3, 0x51, 0x31, 0x75, 0x48, 0xb5, 0x24, 0x02, 0x4e, 0xd7, 0x61, 0x71, 0x8d, 0x44, 0x61, 0xe5,
    +	0x2c, 0x4f, 0x75, 0xc8, 0xf5, 0x20, 0xbc, 0x0c, 0x2b, 0x28, 0x5a, 0x63, 0x9c, 0x98, 0xeb, 0x29,
    +	0x2b, 0xb3, 0xf9, 0xd1, 0x32, 0x74, 0x7d, 0x26, 0x97, 0xfd, 0xd5, 0x5f, 0x1c, 0x53, 0x40, 0x4d,
    +	0x98, 0x0c, 0xf4, 0x1b, 0x70, 0x58, 0x39, 0x57, 0xe0, 0x79, 0x9e, 0xb8, 0x2e, 0xc7, 0x02, 0x81,
    +	0x51, 0x1c, 0xe2, 0x04, 0x4d, 0xf4, 0x1e, 0x4c, 0x8b, 0xa0, 0x1f, 0xf1, 0x30, 0x9d, 0x8f, 0x5f,
    +	0xe7, 0xe1, 0x04, 0x0c, 0xa7, 0xb0, 0x79, 0xaa, 0x41, 0xe7, 0x7e, 0x8b, 0x08, 0xd6, 0x77, 0xcb,
    +	0xf5, 0x76, 0xc2, 0xca, 0x05, 0xc6, 0x1f, 0x44, 0xaa, 0xc1, 0x24, 0x14, 0x67, 0xd4, 0x40, 0x1b,
    +	0x30, 0xdd, 0x09, 0x08, 0x69, 0xb3, 0x7b, 0x92, 0x38, 0xcf, 0xaa, 0x3c, 0xac, 0x17, 0xed, 0x49,
    +	0x2d, 0x01, 0x3b, 0xcc, 0x28, 0xc3, 0x29, 0x0a, 0xe8, 0x01, 0x8c, 0xf8, 0xbb, 0x24, 0xd8, 0x26,
    +	0x4e, 0xb3, 0x72, 0xb1, 0xe0, 0xcd, 0xa8, 0x38, 0xdc, 0x6e, 0x0b, 0xdc, 0x84, 0x4b, 0x9d, 0x2c,
    +	0xee, 0xed, 0x52, 0x27, 0x1b, 0x43, 0xff, 0x89, 0x05, 0x67, 0xa5, 0x91, 0xba, 0xde, 0xa1, 0xa3,
    +	0xbe, 0xe8, 0x7b, 0x61, 0x14, 0xf0, 0x40, 0x54, 0xcf, 0xe6, 0x07, 0x67, 0xda, 0xc8, 0xa9, 0xa4,
    +	0x4c, 0x25, 0x67, 0xf3, 0x30, 0x42, 0x9c, 0xdf, 0x22, 0xbd, 0xd9, 0x87, 0x24, 0x92, 0xcc, 0x68,
    +	0x3e, 0x5c, 0xf9, 0x60, 0x69, 0xbd, 0xf2, 0x1c, 0x8f, 0xa2, 0x45, 0x37, 0x43, 0x3d, 0x09, 0xc4,
    +	0x69, 0x7c, 0x74, 0x15, 0x4a, 0x7e, 0x58, 0x79, 0x9e, 0xad, 0xed, 0xb3, 0x39, 0xe3, 0x78, 0xbb,
    +	0xce, 0x5d, 0xab, 0x6f, 0xd7, 0x71, 0xc9, 0x0f, 0x65, 0xba, 0x3f, 0x7a, 0x9d, 0x0d, 0x2b, 0x2f,
    +	0x70, 0xc5, 0xba, 0x4c, 0xf7, 0xc7, 0x0a, 0x71, 0x0c, 0x47, 0xdb, 0x30, 0x15, 0x1a, 0x6a, 0x83,
    +	0xb0, 0x72, 0x89, 0x8d, 0xd4, 0x0b, 0x79, 0x93, 0x66, 0x60, 0x6b, 0x79, 0xb8, 0x4c, 0x2a, 0x38,
    +	0x49, 0x96, 0xef, 0x2e, 0x4d, 0x71, 0x11, 0x56, 0x5e, 0xec, 0xb1, 0xbb, 0x34, 0x64, 0x7d, 0x77,
    +	0xe9, 0x34, 0x70, 0x82, 0x26, 0xba, 0xa3, 0x3f, 0xc8, 0xbd, 0x9c, 0xef, 0xa6, 0x9b, 0xf9, 0x14,
    +	0x77, 0x22, 0xf7, 0x19, 0xee, 0x7b, 0x30, 0x2d, 0xcf, 0x12, 0xba, 0x32, 0x03, 0xb7, 0x49, 0x2a,
    +	0x2f, 0xc5, 0x9b, 0xf6, 0x46, 0x02, 0x86, 0x53, 0xd8, 0xb3, 0x3f, 0x02, 0x33, 0x29, 0x39, 0xee,
    +	0x28, 0xef, 0x9b, 0x66, 0x77, 0x60, 0xc2, 0xd8, 0x2b, 0x4f, 0xd7, 0xfd, 0x6d, 0x0c, 0x46, 0x95,
    +	0x5b, 0x52, 0x8e, 0x39, 0x72, 0xe6, 0xb1, 0xcc, 0x91, 0x57, 0x4c, 0xef, 0xb9, 0xb3, 0x49, 0xef,
    +	0xb9, 0x91, 0x9a, 0xdf, 0x34, 0x1c, 0xe6, 0x36, 0x32, 0x22, 0x60, 0xe7, 0x71, 0xf9, 0xfe, 0x1f,
    +	0x74, 0x6a, 0x16, 0xbd, 0x72, 0xdf, 0x6e, 0x78, 0x03, 0x85, 0x46, 0xc2, 0xeb, 0x30, 0xe3, 0xf9,
    +	0xec, 0x22, 0x42, 0x9a, 0x52, 0xca, 0x64, 0xc2, 0xe4, 0xa8, 0x1e, 0xa1, 0x31, 0x81, 0x80, 0xd3,
    +	0x75, 0x68, 0x83, 0x5c, 0x1a, 0x4c, 0x5a, 0x25, 0xb9, 0xb0, 0x88, 0x05, 0x94, 0x5e, 0x80, 0xf9,
    +	0xaf, 0xb0, 0x32, 0x9d, 0x7f, 0x01, 0xe6, 0x95, 0x92, 0x12, 0x67, 0x28, 0x25, 0x4e, 0x66, 0x84,
    +	0xeb, 0xf8, 0xcd, 0xd5, 0x9a, 0xb8, 0xcb, 0x68, 0xb9, 0x29, 0x9a, 0xab, 0x35, 0xcc, 0x61, 0x68,
    +	0x1e, 0x86, 0xd8, 0x0f, 0x19, 0xf9, 0x2a, 0x8f, 0x17, 0xad, 0xd6, 0xb4, 0x9c, 0xca, 0xac, 0x02,
    +	0x16, 0x15, 0x99, 0xfd, 0x81, 0x5e, 0x00, 0x99, 0xfd, 0x61, 0xf8, 0x31, 0xed, 0x0f, 0x92, 0x00,
    +	0x8e, 0x69, 0xa1, 0x87, 0x70, 0xca, 0xb8, 0x74, 0xab, 0x17, 0xae, 0x90, 0xef, 0x64, 0x93, 0x40,
    +	0x5e, 0x38, 0x2f, 0x3a, 0x7d, 0x6a, 0x35, 0x8b, 0x12, 0xce, 0x6e, 0x00, 0xb5, 0x60, 0xa6, 0x91,
    +	0x6a, 0x75, 0xa4, 0xff, 0x56, 0xd5, 0xba, 0x48, 0xb7, 0x98, 0x26, 0x8c, 0xde, 0x86, 0x91, 0x8f,
    +	0x7c, 0xee, 0x10, 0x2b, 0xee, 0x5f, 0x32, 0x3e, 0xd3, 0xc8, 0x07, 0xb7, 0xeb, 0xac, 0xfc, 0x70,
    +	0xbf, 0x3a, 0x56, 0xf3, 0x9b, 0xf2, 0x2f, 0x56, 0x15, 0xd0, 0x5f, 0xb2, 0x60, 0x36, 0x7d, 0xab,
    +	0x57, 0x9d, 0x9e, 0xe8, 0xbf, 0xd3, 0xb6, 0x68, 0x74, 0x76, 0x39, 0x97, 0x1c, 0x2e, 0x68, 0x0a,
    +	0x7d, 0x91, 0xee, 0xa7, 0xd0, 0x7d, 0xc4, 0x5f, 0xb8, 0x68, 0x0e, 0x09, 0x98, 0x95, 0x1e, 0xee,
    +	0x57, 0xa7, 0x38, 0xfb, 0x77, 0x1f, 0xa9, 0x2c, 0x1a, 0xbc, 0x02, 0xfa, 0x31, 0x38, 0x15, 0xa4,
    +	0xb5, 0xec, 0x44, 0xde, 0x34, 0x3e, 0xdb, 0xcf, 0x51, 0x92, 0x9c, 0x70, 0x9c, 0x45, 0x10, 0x67,
    +	0xb7, 0x83, 0xfe, 0xaa, 0x05, 0xcf, 0x90, 0x7c, 0x0b, 0xae, 0xb8, 0x2a, 0xbc, 0x96, 0xd3, 0x8f,
    +	0x02, 0xdb, 0x2f, 0x4b, 0x30, 0xf0, 0x4c, 0x01, 0x02, 0x2e, 0x6a, 0xd7, 0xfe, 0xc7, 0x16, 0xb3,
    +	0xfa, 0x08, 0x54, 0x12, 0x76, 0x5b, 0xd1, 0x31, 0x38, 0xc7, 0x2e, 0x1b, 0xae, 0x25, 0x8f, 0xed,
    +	0xdd, 0xfa, 0xdf, 0x5a, 0xcc, 0xbb, 0xf5, 0x18, 0xdf, 0xe9, 0x7e, 0x00, 0x23, 0x91, 0x68, 0x4d,
    +	0x74, 0x3d, 0xcf, 0x13, 0x4f, 0x76, 0x8a, 0x79, 0xf8, 0xaa, 0x1b, 0xa6, 0x2c, 0xc5, 0x8a, 0x8c,
    +	0xfd, 0x5f, 0xf1, 0x19, 0x90, 0x90, 0x63, 0x30, 0x6e, 0x2f, 0x99, 0xc6, 0xed, 0x6a, 0x8f, 0x2f,
    +	0xc8, 0x31, 0x72, 0xff, 0x03, 0xb3, 0xdf, 0x4c, 0xb3, 0xfa, 0x49, 0x77, 0xab, 0xb6, 0xbf, 0x6b,
    +	0x01, 0xc4, 0xe9, 0x94, 0xfa, 0x48, 0x8c, 0x7f, 0x8d, 0xde, 0x29, 0xfd, 0xc8, 0x6f, 0xf8, 0x2d,
    +	0x61, 0x5c, 0x3b, 0x17, 0xdb, 0xd7, 0x79, 0xf9, 0xa1, 0xf6, 0x1b, 0x2b, 0x6c, 0x54, 0x95, 0xf1,
    +	0xcd, 0xcb, 0xb1, 0x5b, 0x8b, 0x11, 0xdb, 0xfc, 0x67, 0x2c, 0x38, 0x99, 0xf5, 0xe8, 0x0b, 0xbd,
    +	0x02, 0x23, 0x5c, 0xc7, 0xac, 0x5c, 0xde, 0xd5, 0x6c, 0xde, 0x15, 0xe5, 0x58, 0x61, 0xf4, 0xeb,
    +	0xfa, 0x7e, 0xc4, 0x54, 0x3f, 0xb7, 0x61, 0xa2, 0x16, 0x10, 0x4d, 0xee, 0x79, 0x37, 0xce, 0x42,
    +	0x36, 0xba, 0xf0, 0xca, 0x91, 0x23, 0xa9, 0xd9, 0xbf, 0x54, 0x82, 0x93, 0xdc, 0x71, 0x73, 0x7e,
    +	0xd7, 0x77, 0x9b, 0x35, 0xbf, 0x29, 0x9e, 0xea, 0x7f, 0x15, 0xc6, 0x3b, 0x9a, 0x61, 0xa0, 0x28,
    +	0x6d, 0x85, 0x6e, 0x40, 0x88, 0x55, 0x99, 0x7a, 0x29, 0x36, 0x68, 0xa1, 0x26, 0x8c, 0x93, 0x5d,
    +	0xb7, 0xa1, 0x1c, 0xc3, 0x4a, 0x47, 0x16, 0x1e, 0x54, 0x2b, 0xcb, 0x1a, 0x1d, 0x6c, 0x50, 0xed,
    +	0xfb, 0xb9, 0x85, 0x26, 0x3a, 0x0e, 0xf4, 0x70, 0x06, 0xfb, 0x59, 0x0b, 0xce, 0xe4, 0x24, 0xb9,
    +	0xa0, 0xcd, 0x3d, 0x60, 0x2e, 0xb2, 0x62, 0xd9, 0xaa, 0xe6, 0xb8, 0xe3, 0x2c, 0x16, 0x50, 0xf4,
    +	0x65, 0x80, 0x4e, 0x9c, 0x1a, 0xb8, 0x47, 0x36, 0x00, 0x23, 0x2e, 0xb8, 0x16, 0xe2, 0x59, 0x65,
    +	0x10, 0xd6, 0x68, 0xd9, 0x3f, 0x33, 0x00, 0x83, 0xcc, 0x07, 0x0f, 0xd5, 0x60, 0x78, 0x9b, 0x47,
    +	0x20, 0x2d, 0x9c, 0x37, 0x8a, 0x2b, 0x43, 0x9a, 0xc6, 0xf3, 0xa6, 0x95, 0x62, 0x49, 0x06, 0xad,
    +	0xc1, 0x09, 0x9e, 0xf6, 0xb8, 0xb5, 0x44, 0x5a, 0xce, 0x9e, 0xd4, 0xb9, 0x97, 0xd8, 0xa7, 0x2a,
    +	0xdb, 0xc3, 0x6a, 0x1a, 0x05, 0x67, 0xd5, 0x43, 0xef, 0xc2, 0x64, 0xe4, 0xb6, 0x89, 0xdf, 0x8d,
    +	0x4c, 0x77, 0x53, 0x75, 0x2d, 0xdc, 0x30, 0xa0, 0x38, 0x81, 0x8d, 0xde, 0x86, 0x89, 0x4e, 0xca,
    +	0xba, 0x30, 0x18, 0xab, 0xe1, 0x4c, 0x8b, 0x82, 0x89, 0xcb, 0xde, 0x7d, 0x75, 0xd9, 0x2b, 0xb7,
    +	0x8d, 0xed, 0x80, 0x84, 0xdb, 0x7e, 0xab, 0xc9, 0x24, 0xf3, 0x41, 0xed, 0xdd, 0x57, 0x02, 0x8e,
    +	0x53, 0x35, 0x28, 0x95, 0x4d, 0xc7, 0x6d, 0x75, 0x03, 0x12, 0x53, 0x19, 0x32, 0xa9, 0xac, 0x24,
    +	0xe0, 0x38, 0x55, 0xa3, 0xb7, 0xd9, 0x64, 0xf8, 0xc9, 0x98, 0x4d, 0xec, 0xbf, 0x5b, 0x02, 0x63,
    +	0x6a, 0x7f, 0x88, 0xb3, 0x18, 0xbf, 0x03, 0x03, 0x5b, 0x41, 0xa7, 0x21, 0xfc, 0x4d, 0x33, 0xbf,
    +	0xec, 0x3a, 0xae, 0x2d, 0xea, 0x5f, 0x46, 0xff, 0x63, 0x56, 0x8b, 0xee, 0xf1, 0x53, 0xc2, 0xfb,
    +	0x5a, 0x06, 0x29, 0x56, 0xcf, 0x2b, 0x87, 0xa5, 0x26, 0xa2, 0x20, 0x9c, 0xbf, 0x78, 0x23, 0xa6,
    +	0xfc, 0xb7, 0x35, 0x53, 0xb8, 0xd0, 0x43, 0x48, 0x2a, 0xe8, 0x2a, 0x8c, 0x89, 0xc4, 0xb2, 0xec,
    +	0x15, 0x20, 0xdf, 0x4c, 0xcc, 0x95, 0x74, 0x29, 0x2e, 0xc6, 0x3a, 0x8e, 0xfd, 0x97, 0x4b, 0x70,
    +	0x22, 0xe3, 0x19, 0x37, 0x3f, 0x46, 0xb6, 0xdc, 0x30, 0x0a, 0xf6, 0x92, 0x87, 0x13, 0x16, 0xe5,
    +	0x58, 0x61, 0x50, 0x5e, 0xc5, 0x0f, 0xaa, 0xe4, 0xe1, 0x24, 0x9e, 0x49, 0x0a, 0xe8, 0xd1, 0x0e,
    +	0x27, 0x7a, 0x6c, 0x77, 0x43, 0x22, 0x33, 0x87, 0xa8, 0x63, 0x9b, 0xb9, 0x64, 0x30, 0x08, 0xbd,
    +	0x9a, 0x6e, 0x29, 0x3f, 0x03, 0xed, 0x6a, 0xca, 0x3d, 0x0d, 0x38, 0x8c, 0x76, 0x2e, 0x22, 0x9e,
    +	0xe3, 0x45, 0xe2, 0x02, 0x1b, 0x47, 0x94, 0x67, 0xa5, 0x58, 0x40, 0xed, 0xef, 0x95, 0xe1, 0x6c,
    +	0x6e, 0x60, 0x07, 0xda, 0xf5, 0xb6, 0xef, 0xb9, 0x91, 0xaf, 0x7c, 0x74, 0x79, 0x14, 0x79, 0xd2,
    +	0xd9, 0x5e, 0x13, 0xe5, 0x58, 0x61, 0xa0, 0x4b, 0x30, 0xc8, 0x2c, 0x12, 0xc9, 0xa4, 0x92, 0x78,
    +	0x61, 0x89, 0xc7, 0xd8, 0xe5, 0x60, 0xed, 0x54, 0x2f, 0x17, 0x9e, 0xea, 0xcf, 0x51, 0x09, 0xc6,
    +	0x6f, 0x25, 0x0f, 0x14, 0xda, 0x5d, 0xdf, 0x6f, 0x61, 0x06, 0x44, 0x2f, 0x88, 0xf1, 0x4a, 0x38,
    +	0xa5, 0x62, 0xa7, 0xe9, 0x87, 0xda, 0xa0, 0x71, 0x07, 0xf8, 0xc0, 0xf5, 0xb6, 0x92, 0xce, 0xca,
    +	0x37, 0x79, 0x31, 0x96, 0x70, 0xba, 0x97, 0xe2, 0xdc, 0xf8, 0xc3, 0xf9, 0x7b, 0x49, 0x65, 0xc0,
    +	0xef, 0x99, 0x16, 0x5f, 0x5f, 0x01, 0x23, 0x3d, 0xc5, 0x93, 0x9f, 0x2a, 0xc3, 0x14, 0x5e, 0x58,
    +	0xfa, 0x74, 0x22, 0xee, 0xa4, 0x27, 0xa2, 0x7f, 0xb3, 0xd9, 0x93, 0x9a, 0x8d, 0x7f, 0x68, 0xc1,
    +	0x14, 0x4b, 0x6f, 0x2b, 0xa2, 0x32, 0xb9, 0xbe, 0x77, 0x0c, 0x57, 0x81, 0xe7, 0x60, 0x30, 0xa0,
    +	0x8d, 0x8a, 0x19, 0x54, 0x7b, 0x9c, 0xf5, 0x04, 0x73, 0x18, 0x3a, 0x07, 0x03, 0xac, 0x0b, 0x74,
    +	0xf2, 0xc6, 0x39, 0x0b, 0x5e, 0x72, 0x22, 0x07, 0xb3, 0x52, 0x16, 0x1f, 0x16, 0x93, 0x4e, 0xcb,
    +	0xe5, 0x9d, 0x8e, 0xfd, 0x45, 0x3e, 0x19, 0x21, 0x9f, 0x32, 0xbb, 0xf6, 0xf1, 0xe2, 0xc3, 0x66,
    +	0x93, 0x2c, 0xbe, 0x66, 0xff, 0x71, 0x09, 0x2e, 0x64, 0xd6, 0xeb, 0x3b, 0x3e, 0x6c, 0x71, 0xed,
    +	0xa7, 0x99, 0x0c, 0xb3, 0x7c, 0x8c, 0x4f, 0x41, 0x06, 0xfa, 0x95, 0xfe, 0x07, 0xfb, 0x08, 0xdb,
    +	0x9a, 0x39, 0x64, 0x9f, 0x90, 0xb0, 0xad, 0x99, 0x7d, 0xcb, 0x51, 0x13, 0xfc, 0x69, 0x29, 0xe7,
    +	0x5b, 0x98, 0xc2, 0xe0, 0x32, 0xe5, 0x33, 0x0c, 0x18, 0xca, 0x4b, 0x38, 0xe7, 0x31, 0xbc, 0x0c,
    +	0x2b, 0x28, 0x9a, 0x87, 0xa9, 0xb6, 0xeb, 0x51, 0xe6, 0xb3, 0x67, 0x8a, 0xe2, 0xca, 0x90, 0xb4,
    +	0x66, 0x82, 0x71, 0x12, 0x1f, 0xb9, 0x5a, 0x48, 0x57, 0xfe, 0x75, 0x6f, 0x1f, 0x69, 0xd7, 0xcd,
    +	0x99, 0xbe, 0x34, 0x6a, 0x14, 0x33, 0xc2, 0xbb, 0xae, 0x69, 0x7a, 0xa2, 0x72, 0xff, 0x7a, 0xa2,
    +	0xf1, 0x6c, 0x1d, 0xd1, 0xec, 0xdb, 0x30, 0xf1, 0xd8, 0xf6, 0x1f, 0xfb, 0xfb, 0x65, 0x78, 0xa6,
    +	0x60, 0xdb, 0x73, 0x5e, 0x6f, 0xcc, 0x81, 0xc6, 0xeb, 0x53, 0xf3, 0x50, 0x83, 0x93, 0x9b, 0xdd,
    +	0x56, 0x6b, 0x8f, 0x3d, 0x6c, 0x25, 0x4d, 0x89, 0x21, 0x64, 0x4a, 0xf5, 0xf4, 0x6d, 0x25, 0x03,
    +	0x07, 0x67, 0xd6, 0xa4, 0x57, 0x2c, 0x7a, 0x92, 0xec, 0x29, 0x52, 0x89, 0x2b, 0x16, 0xd6, 0x81,
    +	0xd8, 0xc4, 0x45, 0xd7, 0x61, 0xc6, 0xd9, 0x75, 0x5c, 0x9e, 0x4c, 0x48, 0x12, 0xe0, 0x77, 0x2c,
    +	0xa5, 0x23, 0x9f, 0x4f, 0x22, 0xe0, 0x74, 0x9d, 0x1c, 0x53, 0x55, 0xf9, 0xb1, 0x4c, 0x55, 0x66,
    +	0x70, 0xd1, 0xa1, 0xfc, 0xe0, 0xa2, 0xc5, 0x7c, 0xb1, 0x67, 0x1e, 0xd6, 0x0f, 0x61, 0xe2, 0xa8,
    +	0x3e, 0xf1, 0x2f, 0xc1, 0xb0, 0x78, 0xc3, 0x93, 0x7c, 0xaf, 0x29, 0xf3, 0xff, 0x4b, 0xb8, 0xfd,
    +	0xbf, 0x5a, 0xa0, 0x74, 0xdc, 0x66, 0x1e, 0x81, 0xb7, 0x99, 0x83, 0x3f, 0xd7, 0xce, 0x6b, 0x6f,
    +	0x45, 0x4f, 0x69, 0x0e, 0xfe, 0x31, 0x10, 0x9b, 0xb8, 0x7c, 0xb9, 0x85, 0x71, 0xc4, 0x1a, 0xe3,
    +	0x02, 0x21, 0x6c, 0xab, 0x0a, 0x03, 0x7d, 0x05, 0x86, 0x9b, 0xee, 0xae, 0x1b, 0x0a, 0x3d, 0xda,
    +	0x91, 0x6d, 0x93, 0xf1, 0xf7, 0x2d, 0x71, 0x32, 0x58, 0xd2, 0xb3, 0xff, 0x8a, 0x05, 0xca, 0x28,
    +	0x7c, 0x83, 0x38, 0xad, 0x68, 0x1b, 0xbd, 0x07, 0x20, 0x29, 0x28, 0xdd, 0x9b, 0x74, 0x55, 0x03,
    +	0xac, 0x20, 0x87, 0xc6, 0x3f, 0xac, 0xd5, 0x41, 0xef, 0xc2, 0xd0, 0x36, 0xa3, 0x25, 0xbe, 0xed,
    +	0x92, 0x32, 0xc1, 0xb1, 0xd2, 0xc3, 0xfd, 0xea, 0x49, 0xb3, 0x4d, 0x79, 0x8a, 0xf1, 0x5a, 0xf6,
    +	0x4f, 0x95, 0xe2, 0x39, 0xfd, 0xa0, 0xeb, 0x47, 0xce, 0x31, 0x48, 0x22, 0xd7, 0x0d, 0x49, 0xe4,
    +	0x85, 0x22, 0xab, 0x37, 0xeb, 0x52, 0xae, 0x04, 0x72, 0x3b, 0x21, 0x81, 0xbc, 0xd8, 0x9b, 0x54,
    +	0xb1, 0xe4, 0xf1, 0x5f, 0x5b, 0x30, 0x63, 0xe0, 0x1f, 0xc3, 0x01, 0xb8, 0x62, 0x1e, 0x80, 0xcf,
    +	0xf6, 0xfc, 0x86, 0x9c, 0x83, 0xef, 0x27, 0xca, 0x89, 0xbe, 0xb3, 0x03, 0xef, 0x23, 0x18, 0xd8,
    +	0x76, 0x82, 0xa6, 0xb8, 0xd7, 0x5f, 0xe9, 0x6b, 0xac, 0xe7, 0x6e, 0x38, 0x81, 0x70, 0x73, 0x79,
    +	0x45, 0x8e, 0x3a, 0x2d, 0xea, 0xe9, 0xe2, 0xc2, 0x9a, 0x42, 0xd7, 0x60, 0x28, 0x6c, 0xf8, 0x1d,
    +	0xf5, 0x24, 0xf4, 0x22, 0x1b, 0x68, 0x56, 0x72, 0xb8, 0x5f, 0x45, 0x66, 0x73, 0xb4, 0x18, 0x0b,
    +	0x7c, 0xf4, 0x55, 0x98, 0x60, 0xbf, 0x94, 0xcf, 0x69, 0x39, 0x5f, 0x03, 0x53, 0xd7, 0x11, 0xb9,
    +	0x43, 0xb6, 0x51, 0x84, 0x4d, 0x52, 0xb3, 0x5b, 0x30, 0xaa, 0x3e, 0xeb, 0xa9, 0x7a, 0x24, 0xfc,
    +	0x8b, 0x32, 0x9c, 0xc8, 0x58, 0x73, 0x28, 0x34, 0x66, 0xe2, 0x6a, 0x9f, 0x4b, 0xf5, 0x63, 0xce,
    +	0x45, 0xc8, 0x2e, 0x80, 0x4d, 0xb1, 0xb6, 0xfa, 0x6e, 0xf4, 0x4e, 0x48, 0x92, 0x8d, 0xd2, 0xa2,
    +	0xde, 0x8d, 0xd2, 0xc6, 0x8e, 0x6d, 0xa8, 0x69, 0x43, 0xaa, 0xa7, 0x4f, 0x75, 0x4e, 0x7f, 0x6b,
    +	0x00, 0x4e, 0x66, 0x39, 0xe2, 0xa0, 0x1f, 0x85, 0x21, 0xf6, 0x9c, 0xaf, 0xf0, 0xfd, 0x6b, 0x56,
    +	0xcd, 0x39, 0xf6, 0x22, 0x50, 0x84, 0xa2, 0x9e, 0x93, 0xec, 0x88, 0x17, 0xf6, 0x1c, 0x66, 0xd1,
    +	0x26, 0x0b, 0x11, 0x27, 0x4e, 0x4f, 0xc9, 0x3e, 0x3e, 0xdf, 0x77, 0x07, 0xc4, 0xf9, 0x1b, 0x26,
    +	0xfc, 0xd9, 0x64, 0x71, 0x6f, 0x7f, 0x36, 0xd9, 0x32, 0x5a, 0x85, 0xa1, 0x06, 0x77, 0x94, 0x2a,
    +	0xf7, 0x66, 0x61, 0xdc, 0x4b, 0x4a, 0x31, 0x60, 0xe1, 0x1d, 0x25, 0x08, 0xcc, 0xba, 0x30, 0xa6,
    +	0x0d, 0xcc, 0x53, 0x5d, 0x3c, 0x3b, 0xf4, 0xe0, 0xd3, 0x86, 0xe0, 0xa9, 0x2e, 0xa0, 0xbf, 0xae,
    +	0x9d, 0xfd, 0x82, 0x1f, 0x7c, 0xce, 0x90, 0x9d, 0xce, 0x25, 0x1e, 0x59, 0x26, 0xf6, 0x15, 0x93,
    +	0xa5, 0xea, 0x66, 0x0e, 0x87, 0xdc, 0x44, 0x74, 0xe6, 0x81, 0x5f, 0x9c, 0xb7, 0xc1, 0xfe, 0x59,
    +	0x0b, 0x12, 0xcf, 0xe0, 0x94, 0xba, 0xd3, 0xca, 0x55, 0x77, 0x5e, 0x84, 0x81, 0xc0, 0x6f, 0x49,
    +	0x79, 0x4a, 0x61, 0x60, 0xbf, 0x45, 0x30, 0x83, 0x50, 0x8c, 0x28, 0x56, 0x62, 0x8d, 0xeb, 0x17,
    +	0x74, 0x71, 0xf5, 0x7e, 0x0e, 0x06, 0x5b, 0x64, 0x97, 0xb4, 0x92, 0xf9, 0x98, 0x6f, 0xd1, 0x42,
    +	0xcc, 0x61, 0xf6, 0x3f, 0x1c, 0x80, 0xf3, 0x85, 0x91, 0x24, 0xa9, 0x80, 0xb9, 0xe5, 0x44, 0xe4,
    +	0x81, 0xb3, 0x97, 0xcc, 0x43, 0x7a, 0x9d, 0x17, 0x63, 0x09, 0x67, 0xef, 0xee, 0x79, 0x6e, 0xad,
    +	0x84, 0x72, 0x58, 0xa4, 0xd4, 0x12, 0x50, 0x53, 0xd9, 0x58, 0x7e, 0x12, 0xca, 0xc6, 0xd7, 0x00,
    +	0xc2, 0xb0, 0xc5, 0xbd, 0x5d, 0x9b, 0xe2, 0x41, 0x7f, 0x1c, 0xe9, 0xa4, 0x7e, 0x4b, 0x40, 0xb0,
    +	0x86, 0x85, 0x96, 0x60, 0xba, 0x13, 0xf8, 0x11, 0xd7, 0xb5, 0x2f, 0x71, 0x87, 0xf0, 0x41, 0x33,
    +	0x88, 0x5f, 0x2d, 0x01, 0xc7, 0xa9, 0x1a, 0xe8, 0x4d, 0x18, 0x13, 0x81, 0xfd, 0x6a, 0xbe, 0xdf,
    +	0x12, 0xea, 0x3d, 0xe5, 0x23, 0x5d, 0x8f, 0x41, 0x58, 0xc7, 0xd3, 0xaa, 0x31, 0x05, 0xfe, 0x70,
    +	0x66, 0x35, 0xae, 0xc4, 0xd7, 0xf0, 0x12, 0x49, 0x40, 0x46, 0xfa, 0x4a, 0x02, 0x12, 0x2b, 0x3c,
    +	0x47, 0xfb, 0xb6, 0x27, 0x43, 0x4f, 0x15, 0xe1, 0xaf, 0x0c, 0xc0, 0x09, 0xb1, 0x70, 0x9e, 0xf6,
    +	0x72, 0xb9, 0x93, 0x5e, 0x2e, 0x4f, 0x42, 0x25, 0xfa, 0xe9, 0x9a, 0x39, 0xee, 0x35, 0xf3, 0xd3,
    +	0x16, 0x98, 0x32, 0x24, 0xfa, 0x8f, 0x72, 0x13, 0x39, 0xbf, 0x99, 0x2b, 0x93, 0xc6, 0x19, 0x02,
    +	0x3e, 0x5e, 0x4a, 0x67, 0xfb, 0x7f, 0xb6, 0xe0, 0xd9, 0x9e, 0x14, 0xd1, 0x32, 0x8c, 0x32, 0x41,
    +	0x57, 0xbb, 0x17, 0xbf, 0xa8, 0x1e, 0x8c, 0x48, 0x40, 0x8e, 0xdc, 0x1d, 0xd7, 0x44, 0xcb, 0xa9,
    +	0x8c, 0xd9, 0x2f, 0x65, 0x64, 0xcc, 0x3e, 0x65, 0x0c, 0xcf, 0x63, 0xa6, 0xcc, 0xfe, 0x49, 0x7a,
    +	0xe2, 0x98, 0xaf, 0x4e, 0x3f, 0x6f, 0xa8, 0x73, 0xed, 0x84, 0x3a, 0x17, 0x99, 0xd8, 0xda, 0x19,
    +	0xf2, 0x1e, 0x4c, 0xb3, 0x88, 0xbf, 0xec, 0xf9, 0x92, 0x78, 0xae, 0x5a, 0x8a, 0xbd, 0x9d, 0x6f,
    +	0x25, 0x60, 0x38, 0x85, 0x6d, 0xff, 0x9b, 0x32, 0x0c, 0xf1, 0xed, 0x77, 0x0c, 0x17, 0xdf, 0x97,
    +	0x61, 0xd4, 0x6d, 0xb7, 0xbb, 0x3c, 0x09, 0xf2, 0x60, 0xec, 0xf0, 0xbe, 0x2a, 0x0b, 0x71, 0x0c,
    +	0x47, 0x2b, 0xc2, 0x92, 0x50, 0x90, 0x54, 0x80, 0x77, 0x7c, 0x6e, 0xc9, 0x89, 0x1c, 0x2e, 0xc5,
    +	0xa9, 0x73, 0x36, 0xb6, 0x39, 0xa0, 0x6f, 0x00, 0x84, 0x51, 0xe0, 0x7a, 0x5b, 0xb4, 0x4c, 0x64,
    +	0x9e, 0xf9, 0x6c, 0x01, 0xb5, 0xba, 0x42, 0xe6, 0x34, 0x63, 0x9e, 0xa3, 0x00, 0x58, 0xa3, 0x88,
    +	0xe6, 0x8c, 0x93, 0x7e, 0x36, 0x31, 0x77, 0xc0, 0xa9, 0xc6, 0x73, 0x36, 0xfb, 0x05, 0x18, 0x55,
    +	0xc4, 0x7b, 0xe9, 0x15, 0xc7, 0x75, 0x81, 0xed, 0x4b, 0x30, 0x95, 0xe8, 0xdb, 0x91, 0xd4, 0x92,
    +	0xbf, 0x6e, 0xc1, 0x14, 0xef, 0xcc, 0xb2, 0xb7, 0x2b, 0x4e, 0x83, 0x47, 0x70, 0xb2, 0x95, 0xc1,
    +	0x95, 0xc5, 0xf4, 0xf7, 0xcf, 0xc5, 0x95, 0x1a, 0x32, 0x0b, 0x8a, 0x33, 0xdb, 0x40, 0x97, 0xe9,
    +	0x8e, 0xa3, 0x5c, 0xd7, 0x69, 0x89, 0x98, 0x2b, 0xe3, 0x7c, 0xb7, 0xf1, 0x32, 0xac, 0xa0, 0xf6,
    +	0x1f, 0x58, 0x30, 0xc3, 0x7b, 0x7e, 0x93, 0xec, 0x29, 0xde, 0xf4, 0x83, 0xec, 0xbb, 0x48, 0xbf,
    +	0x5f, 0xca, 0x49, 0xbf, 0xaf, 0x7f, 0x5a, 0xb9, 0xf0, 0xd3, 0x7e, 0xc9, 0x02, 0xb1, 0x42, 0x8e,
    +	0x41, 0xd3, 0xf2, 0x23, 0xa6, 0xa6, 0x65, 0x36, 0x7f, 0x13, 0xe4, 0xa8, 0x58, 0xfe, 0xbd, 0x05,
    +	0xd3, 0x1c, 0x41, 0x8b, 0x62, 0xf7, 0x83, 0x9c, 0x87, 0x05, 0xf3, 0x8b, 0x32, 0xdd, 0x5a, 0x6f,
    +	0x92, 0xbd, 0x0d, 0xbf, 0xe6, 0x44, 0xdb, 0xd9, 0x1f, 0x65, 0x4c, 0xd6, 0x40, 0xe1, 0x64, 0x35,
    +	0xe5, 0x06, 0x32, 0x12, 0xad, 0xf6, 0x50, 0x00, 0x1f, 0x35, 0xd1, 0xaa, 0xfd, 0x47, 0x16, 0x20,
    +	0xde, 0x8c, 0x21, 0xb8, 0x51, 0x71, 0x88, 0x95, 0x66, 0x06, 0x0b, 0x54, 0x10, 0xac, 0x61, 0x3d,
    +	0x91, 0xe1, 0x49, 0xb8, 0xb2, 0x94, 0x7b, 0xbb, 0xb2, 0x1c, 0x61, 0x44, 0x7f, 0x69, 0x18, 0x92,
    +	0x0f, 0x56, 0xd1, 0x5d, 0x18, 0x6f, 0x38, 0x1d, 0xe7, 0xbe, 0xdb, 0x72, 0x23, 0x97, 0x84, 0x45,
    +	0x7e, 0x6e, 0x8b, 0x1a, 0x9e, 0x70, 0x3e, 0xd0, 0x4a, 0xb0, 0x41, 0x07, 0xcd, 0x01, 0x74, 0x02,
    +	0x77, 0xd7, 0x6d, 0x91, 0x2d, 0xa6, 0x10, 0x62, 0x51, 0x9e, 0xb8, 0xd3, 0x9d, 0x2c, 0xc5, 0x1a,
    +	0x46, 0x46, 0x70, 0x95, 0xf2, 0x53, 0x0e, 0xae, 0x02, 0xc7, 0x16, 0x5c, 0x65, 0xe0, 0x48, 0xc1,
    +	0x55, 0x46, 0x8e, 0x1c, 0x5c, 0x65, 0xb0, 0xaf, 0xe0, 0x2a, 0x18, 0x4e, 0x4b, 0xd9, 0x93, 0xfe,
    +	0x5f, 0x71, 0x5b, 0x44, 0x5c, 0x38, 0x78, 0x68, 0xaa, 0xd9, 0x83, 0xfd, 0xea, 0x69, 0x9c, 0x89,
    +	0x81, 0x73, 0x6a, 0xa2, 0x2f, 0x43, 0xc5, 0x69, 0xb5, 0xfc, 0x07, 0x6a, 0x52, 0x97, 0xc3, 0x86,
    +	0xd3, 0x8a, 0xc3, 0x32, 0x8e, 0x2c, 0x9c, 0x3b, 0xd8, 0xaf, 0x56, 0xe6, 0x73, 0x70, 0x70, 0x6e,
    +	0x6d, 0xf4, 0x0e, 0x8c, 0x76, 0x02, 0xbf, 0xb1, 0xa6, 0xbd, 0xaa, 0xbf, 0x40, 0x07, 0xb0, 0x26,
    +	0x0b, 0x0f, 0xf7, 0xab, 0x13, 0xea, 0x0f, 0x3b, 0xf0, 0xe3, 0x0a, 0x19, 0x71, 0x4b, 0xc6, 0x9e,
    +	0x76, 0xdc, 0x92, 0xf1, 0x27, 0x1c, 0xb7, 0xc4, 0xde, 0x81, 0x13, 0x75, 0x12, 0xb8, 0x4e, 0xcb,
    +	0x7d, 0x44, 0x65, 0x72, 0xc9, 0x03, 0x37, 0x60, 0x34, 0x48, 0x70, 0xfd, 0xbe, 0x92, 0x09, 0x68,
    +	0x7a, 0x19, 0xc9, 0xe5, 0x63, 0x42, 0xf6, 0xff, 0x6b, 0xc1, 0xb0, 0x78, 0x04, 0x7b, 0x0c, 0x92,
    +	0xe9, 0xbc, 0x61, 0x92, 0xa9, 0x66, 0x4f, 0x0a, 0xeb, 0x4c, 0xae, 0x31, 0x66, 0x35, 0x61, 0x8c,
    +	0x79, 0xb6, 0x88, 0x48, 0xb1, 0x19, 0xe6, 0x3f, 0x2b, 0xd3, 0x1b, 0x82, 0x11, 0x8e, 0xe1, 0xe9,
    +	0x0f, 0xc1, 0x3a, 0x0c, 0x87, 0x22, 0x1c, 0x40, 0x29, 0xff, 0x8d, 0x51, 0x72, 0x12, 0x63, 0x1f,
    +	0x48, 0x11, 0x00, 0x40, 0x12, 0xc9, 0x8c, 0x33, 0x50, 0x7e, 0x8a, 0x71, 0x06, 0x7a, 0x05, 0xac,
    +	0x18, 0x78, 0x12, 0x01, 0x2b, 0xec, 0xdf, 0x60, 0xa7, 0xb3, 0x5e, 0x7e, 0x0c, 0x82, 0xdb, 0x75,
    +	0xf3, 0x1c, 0xb7, 0x0b, 0x56, 0x96, 0xe8, 0x54, 0x8e, 0x00, 0xf7, 0x6b, 0x16, 0x9c, 0xcf, 0xf8,
    +	0x2a, 0x4d, 0x9a, 0x7b, 0x05, 0x46, 0x9c, 0x6e, 0xd3, 0x55, 0x7b, 0x59, 0xb3, 0x16, 0xcf, 0x8b,
    +	0x72, 0xac, 0x30, 0xd0, 0x22, 0xcc, 0x90, 0x54, 0x7c, 0x61, 0x1e, 0xb9, 0x8b, 0xbd, 0x9c, 0x4e,
    +	0x07, 0x17, 0x4e, 0xe3, 0xab, 0xa0, 0x77, 0xe5, 0xdc, 0xa0, 0x77, 0x7f, 0xcf, 0x82, 0x31, 0xf5,
    +	0x20, 0xfe, 0xa9, 0x8f, 0xf6, 0x7b, 0xe6, 0x68, 0x3f, 0x53, 0x30, 0xda, 0x39, 0xc3, 0xfc, 0x7b,
    +	0x25, 0xd5, 0xdf, 0x9a, 0x1f, 0x44, 0x7d, 0x48, 0x89, 0x8f, 0xff, 0xec, 0xe5, 0x2a, 0x8c, 0x39,
    +	0x9d, 0x8e, 0x04, 0x48, 0xff, 0x45, 0x96, 0x1a, 0x26, 0x2e, 0xc6, 0x3a, 0x8e, 0x7a, 0x85, 0x53,
    +	0xce, 0x7d, 0x85, 0xd3, 0x04, 0x88, 0x9c, 0x60, 0x8b, 0x44, 0xb4, 0x4c, 0xb8, 0x5b, 0xe7, 0xf3,
    +	0x9b, 0x6e, 0xe4, 0xb6, 0xe6, 0x5c, 0x2f, 0x0a, 0xa3, 0x60, 0x6e, 0xd5, 0x8b, 0x6e, 0x07, 0xfc,
    +	0x9a, 0xaa, 0x85, 0x96, 0x54, 0xb4, 0xb0, 0x46, 0x57, 0x06, 0x7f, 0x61, 0x6d, 0x0c, 0x9a, 0x8e,
    +	0x30, 0xeb, 0xa2, 0x1c, 0x2b, 0x0c, 0xfb, 0x0b, 0xec, 0xf4, 0x61, 0x63, 0x7a, 0xb4, 0x90, 0x89,
    +	0x7f, 0x3c, 0xae, 0x66, 0x83, 0x99, 0x84, 0x97, 0xf4, 0xc0, 0x8c, 0xc5, 0xcc, 0x9e, 0x36, 0xac,
    +	0xbf, 0xb3, 0x8d, 0xa3, 0x37, 0xa2, 0xaf, 0xa5, 0x9c, 0x9b, 0x5e, 0xed, 0x71, 0x6a, 0x1c, 0xc1,
    +	0x9d, 0x89, 0xe5, 0x89, 0x64, 0x59, 0xf4, 0x56, 0x6b, 0x62, 0x5f, 0x68, 0x79, 0x22, 0x05, 0x00,
    +	0xc7, 0x38, 0x54, 0x60, 0x53, 0x7f, 0xc2, 0x0a, 0x8a, 0xd3, 0x09, 0x28, 0xec, 0x10, 0x6b, 0x18,
    +	0xe8, 0x8a, 0x50, 0x5a, 0x70, 0xdb, 0xc3, 0x33, 0x09, 0xa5, 0x85, 0x1c, 0x2e, 0x4d, 0xd3, 0x74,
    +	0x15, 0xc6, 0xc8, 0xc3, 0x88, 0x04, 0x9e, 0xd3, 0xa2, 0x2d, 0x0c, 0xc6, 0xc1, 0x91, 0x97, 0xe3,
    +	0x62, 0xac, 0xe3, 0xa0, 0x0d, 0x98, 0x0a, 0xb9, 0x2e, 0x4f, 0x25, 0xb1, 0xe1, 0x3a, 0xd1, 0xcf,
    +	0xaa, 0x50, 0x04, 0x26, 0xf8, 0x90, 0x15, 0x71, 0xee, 0x24, 0x03, 0xb4, 0x24, 0x49, 0xa0, 0x77,
    +	0x61, 0xb2, 0xe5, 0x3b, 0xcd, 0x05, 0xa7, 0xe5, 0x78, 0x0d, 0x36, 0x3e, 0x23, 0x46, 0x94, 0xce,
    +	0xc9, 0x5b, 0x06, 0x14, 0x27, 0xb0, 0xa9, 0x80, 0xa8, 0x97, 0x88, 0xc4, 0x4b, 0x8e, 0xb7, 0x45,
    +	0xc2, 0xca, 0x28, 0xfb, 0x2a, 0x26, 0x20, 0xde, 0xca, 0xc1, 0xc1, 0xb9, 0xb5, 0xd1, 0x35, 0x18,
    +	0x97, 0x9f, 0xaf, 0xc5, 0x33, 0x8a, 0x1f, 0x34, 0x69, 0x30, 0x6c, 0x60, 0xa2, 0x10, 0x4e, 0xc9,
    +	0xff, 0x1b, 0x81, 0xb3, 0xb9, 0xe9, 0x36, 0x44, 0x90, 0x0f, 0xfe, 0x28, 0xfd, 0x4b, 0xf2, 0x05,
    +	0xec, 0x72, 0x16, 0xd2, 0xe1, 0x7e, 0xf5, 0x9c, 0x18, 0xb5, 0x4c, 0x38, 0xce, 0xa6, 0x8d, 0xd6,
    +	0xe0, 0x04, 0xf7, 0x81, 0x59, 0xdc, 0x26, 0x8d, 0x1d, 0xb9, 0xe1, 0x98, 0xd4, 0xa8, 0x3d, 0xfc,
    +	0xb9, 0x91, 0x46, 0xc1, 0x59, 0xf5, 0xd0, 0x87, 0x50, 0xe9, 0x74, 0xef, 0xb7, 0xdc, 0x70, 0x7b,
    +	0xdd, 0x8f, 0x98, 0x0b, 0xd9, 0x7c, 0xb3, 0x19, 0x90, 0x90, 0xbf, 0x59, 0x66, 0x47, 0xaf, 0x8c,
    +	0x41, 0x55, 0xcb, 0xc1, 0xc3, 0xb9, 0x14, 0xd0, 0x23, 0x38, 0x95, 0x58, 0x08, 0x22, 0x98, 0xcc,
    +	0x64, 0x7e, 0x0a, 0xbb, 0x7a, 0x56, 0x05, 0x11, 0x97, 0x29, 0x0b, 0x84, 0xb3, 0x9b, 0x40, 0x6f,
    +	0x01, 0xb8, 0x9d, 0x15, 0xa7, 0xed, 0xb6, 0xe8, 0x75, 0xf4, 0x04, 0x5b, 0x23, 0xf4, 0x6a, 0x02,
    +	0xab, 0x35, 0x59, 0x4a, 0x79, 0xb3, 0xf8, 0xb7, 0x87, 0x35, 0x6c, 0x74, 0x0b, 0x26, 0xc5, 0xbf,
    +	0x3d, 0x31, 0xa5, 0x33, 0x2a, 0xdb, 0xf1, 0xa4, 0xac, 0xa1, 0xe6, 0x31, 0x51, 0x82, 0x13, 0x75,
    +	0xd1, 0x16, 0x9c, 0x97, 0xa9, 0x96, 0xf5, 0xf5, 0x29, 0xe7, 0x20, 0x64, 0x79, 0xe3, 0x46, 0xf8,
    +	0x9b, 0xa2, 0xf9, 0x22, 0x44, 0x5c, 0x4c, 0x87, 0x9e, 0xeb, 0xfa, 0x32, 0xe7, 0x2f, 0xd9, 0x4f,
    +	0xc5, 0xb1, 0x4e, 0x6f, 0x25, 0x81, 0x38, 0x8d, 0x8f, 0x7c, 0x38, 0xe5, 0x7a, 0x59, 0xab, 0xfa,
    +	0x34, 0x23, 0xf4, 0x45, 0xfe, 0x88, 0xbf, 0x78, 0x45, 0x67, 0xc2, 0x71, 0x36, 0x5d, 0xb4, 0x0a,
    +	0x27, 0x22, 0x5e, 0xb0, 0xe4, 0x86, 0x3c, 0x2d, 0x15, 0xbd, 0xf6, 0x9d, 0x61, 0xcd, 0x9d, 0xa1,
    +	0xab, 0x79, 0x23, 0x0d, 0xc6, 0x59, 0x75, 0x3e, 0x9e, 0x03, 0xe8, 0xef, 0x5b, 0xb4, 0xb6, 0x26,
    +	0xe8, 0xa3, 0x6f, 0xc2, 0xb8, 0x3e, 0x3e, 0x42, 0x68, 0xb9, 0x94, 0x2d, 0x07, 0x6b, 0xec, 0x85,
    +	0x5f, 0x13, 0x14, 0x0b, 0xd1, 0x61, 0xd8, 0xa0, 0x88, 0x1a, 0x19, 0xc1, 0x37, 0xae, 0xf4, 0x27,
    +	0x14, 0xf5, 0xef, 0xff, 0x48, 0x20, 0x7b, 0xe7, 0xa0, 0x5b, 0x30, 0xd2, 0x68, 0xb9, 0xc4, 0x8b,
    +	0x56, 0x6b, 0x45, 0x21, 0x68, 0x17, 0x05, 0x8e, 0xd8, 0x8a, 0x22, 0x9b, 0x1c, 0x2f, 0xc3, 0x8a,
    +	0x82, 0x7d, 0x0d, 0xc6, 0xea, 0x2d, 0x42, 0x3a, 0xfc, 0x1d, 0x17, 0x7a, 0x89, 0x5d, 0x4c, 0x98,
    +	0x68, 0x69, 0x31, 0xd1, 0x52, 0xbf, 0x73, 0x30, 0xa1, 0x52, 0xc2, 0xed, 0xdf, 0x2e, 0x41, 0xb5,
    +	0x47, 0x52, 0xc3, 0x84, 0xbd, 0xcd, 0xea, 0xcb, 0xde, 0x36, 0x0f, 0x53, 0xf1, 0x3f, 0x5d, 0x95,
    +	0xa7, 0x9c, 0xa1, 0xef, 0x9a, 0x60, 0x9c, 0xc4, 0xef, 0xfb, 0x5d, 0x8b, 0x6e, 0xb2, 0x1b, 0xe8,
    +	0xf9, 0x32, 0xcb, 0x30, 0xd5, 0x0f, 0xf6, 0x7f, 0xf7, 0xce, 0x35, 0xbb, 0xda, 0xbf, 0x51, 0x82,
    +	0x53, 0x6a, 0x08, 0x7f, 0x78, 0x07, 0xee, 0x4e, 0x7a, 0xe0, 0x9e, 0x80, 0xd1, 0xda, 0xbe, 0x0d,
    +	0x43, 0x3c, 0x2e, 0x6e, 0x1f, 0x32, 0xff, 0x73, 0x66, 0x1e, 0x06, 0x25, 0x66, 0x1a, 0xb9, 0x18,
    +	0xfe, 0x92, 0x05, 0x53, 0x89, 0x07, 0x92, 0x08, 0x6b, 0xaf, 0xe8, 0x1f, 0x47, 0x2e, 0xcf, 0x92,
    +	0xf8, 0x2f, 0xc2, 0xc0, 0xb6, 0xaf, 0x9c, 0x94, 0x15, 0xc6, 0x0d, 0x3f, 0x8c, 0x30, 0x83, 0xd8,
    +	0xff, 0xd2, 0x82, 0xc1, 0x0d, 0xc7, 0xf5, 0x22, 0x69, 0xfd, 0xb0, 0x72, 0xac, 0x1f, 0xfd, 0x7c,
    +	0x17, 0x7a, 0x13, 0x86, 0xc8, 0xe6, 0x26, 0x69, 0x44, 0x62, 0x56, 0x65, 0x94, 0x8f, 0xa1, 0x65,
    +	0x56, 0x4a, 0x85, 0x50, 0xd6, 0x18, 0xff, 0x8b, 0x05, 0x32, 0xba, 0x07, 0xa3, 0x91, 0xdb, 0x26,
    +	0xf3, 0xcd, 0xa6, 0xf0, 0x09, 0x78, 0x8c, 0xd0, 0x34, 0x1b, 0x92, 0x00, 0x8e, 0x69, 0xd9, 0xdf,
    +	0x2b, 0x01, 0xc4, 0x71, 0xf8, 0x7a, 0x7d, 0xe2, 0x42, 0xca, 0x5a, 0x7c, 0x29, 0xc3, 0x5a, 0x8c,
    +	0x62, 0x82, 0x19, 0xa6, 0x62, 0x35, 0x4c, 0xe5, 0xbe, 0x86, 0x69, 0xe0, 0x28, 0xc3, 0xb4, 0x08,
    +	0x33, 0x71, 0x1c, 0x41, 0x33, 0x8c, 0x2a, 0x3b, 0xbf, 0x37, 0x92, 0x40, 0x9c, 0xc6, 0xb7, 0x09,
    +	0x5c, 0x54, 0xe1, 0xd4, 0xc4, 0x59, 0xc8, 0x9e, 0x12, 0xe8, 0xd6, 0xf7, 0x1e, 0xe3, 0x14, 0x9b,
    +	0xc3, 0x4b, 0xb9, 0xe6, 0xf0, 0xbf, 0x69, 0xc1, 0xc9, 0x64, 0x3b, 0xec, 0xdd, 0xfd, 0x77, 0x2d,
    +	0x38, 0x15, 0xe7, 0xf4, 0x4a, 0xbb, 0x20, 0xbc, 0x51, 0x18, 0x22, 0x2e, 0xa7, 0xc7, 0x71, 0x38,
    +	0x99, 0xb5, 0x2c, 0xd2, 0x38, 0xbb, 0x45, 0xfb, 0xff, 0x19, 0x80, 0x4a, 0x5e, 0x6c, 0x39, 0xf6,
    +	0xd2, 0xc8, 0x79, 0x58, 0xdf, 0x21, 0x0f, 0xc4, 0x7b, 0x8e, 0xf8, 0xa5, 0x11, 0x2f, 0xc6, 0x12,
    +	0x9e, 0x4c, 0xe3, 0x56, 0xea, 0x33, 0x8d, 0xdb, 0x36, 0xcc, 0x3c, 0xd8, 0x26, 0xde, 0x1d, 0x2f,
    +	0x74, 0x22, 0x37, 0xdc, 0x74, 0x99, 0x01, 0x9d, 0xaf, 0x9b, 0xb7, 0xe4, 0xab, 0x8b, 0x7b, 0x49,
    +	0x84, 0xc3, 0xfd, 0xea, 0x79, 0xa3, 0x20, 0xee, 0x32, 0x67, 0x24, 0x38, 0x4d, 0x34, 0x9d, 0x05,
    +	0x6f, 0xe0, 0x29, 0x67, 0xc1, 0x6b, 0xbb, 0xc2, 0xed, 0x46, 0x3e, 0x23, 0x61, 0xd7, 0xd6, 0x35,
    +	0x55, 0x8a, 0x35, 0x0c, 0xf4, 0x75, 0x40, 0x7a, 0x1a, 0x53, 0x23, 0xb4, 0xef, 0xab, 0x07, 0xfb,
    +	0x55, 0xb4, 0x9e, 0x82, 0x1e, 0xee, 0x57, 0x4f, 0xd0, 0xd2, 0x55, 0x8f, 0x5e, 0x7f, 0xe3, 0x78,
    +	0x88, 0x19, 0x84, 0xd0, 0x3d, 0x98, 0xa6, 0xa5, 0x6c, 0x47, 0xc9, 0xb8, 0xc1, 0xfc, 0xca, 0xfa,
    +	0xf2, 0xc1, 0x7e, 0x75, 0x7a, 0x3d, 0x01, 0xcb, 0x23, 0x9d, 0x22, 0x92, 0x91, 0x0c, 0x6f, 0xa4,
    +	0xdf, 0x64, 0x78, 0xf6, 0x77, 0x2d, 0x38, 0x4b, 0x0f, 0xb8, 0xe6, 0xad, 0x1c, 0x2b, 0xba, 0xd3,
    +	0x71, 0xb9, 0x9d, 0x46, 0x1c, 0x35, 0x4c, 0x57, 0x57, 0x5b, 0xe5, 0x56, 0x1a, 0x05, 0xa5, 0x1c,
    +	0x7e, 0xc7, 0xf5, 0x9a, 0x49, 0x0e, 0x7f, 0xd3, 0xf5, 0x9a, 0x98, 0x41, 0xd4, 0x91, 0x55, 0xce,
    +	0xcd, 0x43, 0xf0, 0x2b, 0x74, 0xaf, 0xd2, 0xbe, 0xfc, 0x40, 0xbb, 0x81, 0x5e, 0xd6, 0x6d, 0xaa,
    +	0xc2, 0x7d, 0x32, 0xd7, 0x9e, 0xfa, 0x1d, 0x0b, 0xc4, 0xeb, 0xf7, 0x3e, 0xce, 0xe4, 0xaf, 0xc2,
    +	0xf8, 0x6e, 0x3a, 0xc5, 0xf3, 0xc5, 0xfc, 0x70, 0x00, 0x22, 0xb1, 0xb3, 0x12, 0xd1, 0x8d, 0x74,
    +	0xce, 0x06, 0x2d, 0xbb, 0x09, 0x02, 0xba, 0x44, 0x98, 0x55, 0xa3, 0x77, 0x6f, 0x5e, 0x03, 0x68,
    +	0x32, 0x5c, 0x96, 0xec, 0xac, 0x64, 0x4a, 0x5c, 0x4b, 0x0a, 0x82, 0x35, 0x2c, 0xfb, 0x17, 0xca,
    +	0x30, 0x26, 0x53, 0x0a, 0x77, 0xbd, 0x7e, 0x74, 0x8f, 0xba, 0xe0, 0x54, 0xea, 0x29, 0x38, 0x7d,
    +	0x08, 0x33, 0x01, 0x69, 0x74, 0x83, 0xd0, 0xdd, 0x25, 0x12, 0x2c, 0x36, 0xc9, 0x1c, 0x4f, 0x83,
    +	0x91, 0x00, 0x1e, 0xb2, 0xd0, 0x5d, 0x89, 0x42, 0x66, 0x34, 0x4e, 0x13, 0x42, 0x57, 0x60, 0x94,
    +	0xa9, 0xde, 0x6b, 0xb1, 0x42, 0x58, 0x29, 0xbe, 0xd6, 0x24, 0x00, 0xc7, 0x38, 0xec, 0x72, 0xd0,
    +	0xbd, 0xaf, 0x65, 0xa2, 0x8b, 0x2f, 0x07, 0xbc, 0x18, 0x4b, 0x38, 0xfa, 0x32, 0x4c, 0xf3, 0x7a,
    +	0x81, 0xdf, 0x71, 0xb6, 0xb8, 0x49, 0x70, 0x50, 0x85, 0xd7, 0x99, 0x5e, 0x4b, 0xc0, 0x0e, 0xf7,
    +	0xab, 0x27, 0x93, 0x65, 0xac, 0xdb, 0x29, 0x2a, 0xcc, 0xf3, 0x8f, 0x37, 0x42, 0xcf, 0x8c, 0x94,
    +	0xc3, 0x60, 0x0c, 0xc2, 0x3a, 0x9e, 0xfd, 0x27, 0x16, 0xcc, 0x68, 0x53, 0xd5, 0x77, 0x26, 0x12,
    +	0x63, 0x90, 0x4a, 0x7d, 0x0c, 0xd2, 0xd1, 0xa2, 0x3d, 0x64, 0xce, 0xf0, 0xc0, 0x13, 0x9a, 0x61,
    +	0xfb, 0x9b, 0x80, 0xd2, 0xf9, 0xaa, 0xd1, 0xfb, 0xdc, 0x91, 0xdf, 0x0d, 0x48, 0xb3, 0xc8, 0xe0,
    +	0xaf, 0x47, 0xce, 0x91, 0x2f, 0x57, 0x79, 0x2d, 0xac, 0xea, 0xdb, 0x7f, 0x32, 0x00, 0xd3, 0xc9,
    +	0x58, 0x1d, 0xe8, 0x06, 0x0c, 0x71, 0x29, 0x5d, 0x90, 0x2f, 0xf0, 0x27, 0xd3, 0x22, 0x7c, 0xf0,
    +	0x2c, 0x41, 0x5c, 0xba, 0x17, 0xf5, 0xd1, 0x87, 0x30, 0xd6, 0xf4, 0x1f, 0x78, 0x0f, 0x9c, 0xa0,
    +	0x39, 0x5f, 0x5b, 0x15, 0x1c, 0x22, 0x53, 0x01, 0xb5, 0x14, 0xa3, 0xe9, 0x51, 0x43, 0x98, 0xef,
    +	0x44, 0x0c, 0xc2, 0x3a, 0x39, 0xb4, 0xc1, 0x12, 0x57, 0x6d, 0xba, 0x5b, 0x6b, 0x4e, 0xa7, 0xe8,
    +	0x55, 0xd7, 0xa2, 0x44, 0xd2, 0x28, 0x4f, 0x88, 0xec, 0x56, 0x1c, 0x80, 0x63, 0x42, 0xe8, 0x47,
    +	0xe1, 0x44, 0x98, 0x63, 0x12, 0xcb, 0x71, 0x38, 0x28, 0xb4, 0x12, 0x71, 0x65, 0x4a, 0x96, 0xf1,
    +	0x2c, 0xab, 0x19, 0xf4, 0x10, 0x90, 0x50, 0x3d, 0x6f, 0x04, 0xdd, 0x30, 0xe2, 0x29, 0x20, 0xc5,
    +	0xa5, 0xeb, 0x73, 0xd9, 0x7a, 0x82, 0x24, 0xb6, 0xd6, 0x36, 0x0b, 0x9c, 0x9c, 0xc6, 0xc0, 0x19,
    +	0x6d, 0xa0, 0x6d, 0x98, 0xec, 0x18, 0xd9, 0x37, 0xd9, 0xde, 0xcc, 0x89, 0x2e, 0x9c, 0x97, 0xa7,
    +	0x93, 0x9f, 0xd2, 0x26, 0x14, 0x27, 0xe8, 0xda, 0xdf, 0x19, 0x80, 0x59, 0x99, 0x8a, 0x3e, 0xe3,
    +	0x9d, 0xcc, 0xb7, 0xad, 0xc4, 0x43, 0x99, 0xb7, 0xf2, 0x8f, 0x94, 0xa7, 0xf6, 0x5c, 0xe6, 0x27,
    +	0xd3, 0xcf, 0x65, 0xde, 0x39, 0x62, 0x37, 0x9e, 0xd8, 0xa3, 0x99, 0x1f, 0xda, 0x97, 0x2e, 0x07,
    +	0x27, 0xc1, 0x10, 0x02, 0x10, 0xe6, 0xf1, 0xef, 0x6b, 0xd2, 0x48, 0x95, 0xa3, 0x68, 0xb8, 0x21,
    +	0x70, 0x0c, 0xb1, 0x62, 0x5c, 0x46, 0xc9, 0x67, 0x1c, 0x5d, 0xd1, 0xa1, 0x34, 0x49, 0xbb, 0x13,
    +	0xed, 0x2d, 0xb9, 0x81, 0xe8, 0x71, 0x26, 0xcd, 0x65, 0x81, 0x93, 0xa6, 0x29, 0x21, 0x58, 0xd1,
    +	0x41, 0xbb, 0x30, 0xb3, 0xc5, 0x62, 0x4b, 0x69, 0x59, 0xe1, 0x05, 0x07, 0xca, 0xe4, 0x10, 0xd7,
    +	0x17, 0x97, 0xf3, 0x53, 0xc8, 0xf3, 0x6b, 0x66, 0x0a, 0x05, 0xa7, 0x9b, 0xa0, 0x5b, 0xe3, 0xa4,
    +	0xf3, 0x20, 0x5c, 0x6e, 0x39, 0x61, 0xe4, 0x36, 0x16, 0x5a, 0x7e, 0x63, 0xa7, 0x1e, 0xf9, 0x81,
    +	0xcc, 0x2a, 0x9a, 0x79, 0xcb, 0x9b, 0xbf, 0x57, 0x4f, 0xe1, 0x1b, 0xcd, 0xb3, 0xec, 0xb6, 0x59,
    +	0x58, 0x38, 0xb3, 0x2d, 0xb4, 0x0e, 0xc3, 0x5b, 0x6e, 0x84, 0x49, 0xc7, 0x17, 0x7c, 0x29, 0x93,
    +	0xe9, 0x5e, 0xe7, 0x28, 0x46, 0x4b, 0x2c, 0xf6, 0x95, 0x00, 0x60, 0x49, 0x04, 0xbd, 0xaf, 0x8e,
    +	0x9b, 0xa1, 0x7c, 0x55, 0x6f, 0xda, 0xcb, 0x2f, 0xf3, 0xc0, 0x79, 0x17, 0xca, 0xde, 0x66, 0x58,
    +	0x14, 0xf5, 0x67, 0x7d, 0xc5, 0xd0, 0xd4, 0x2d, 0x0c, 0xd3, 0x4b, 0xf8, 0xfa, 0x4a, 0x1d, 0xd3,
    +	0x8a, 0xec, 0x81, 0x6d, 0xd8, 0x08, 0x5d, 0x91, 0xbc, 0x2b, 0xf3, 0xbd, 0xf1, 0x6a, 0x7d, 0xb1,
    +	0xbe, 0x6a, 0xd0, 0x60, 0xf1, 0x13, 0x59, 0x31, 0xe6, 0xd5, 0xd1, 0x5d, 0x18, 0xdd, 0xe2, 0x2c,
    +	0x76, 0x93, 0x87, 0xb5, 0xcd, 0x39, 0xf6, 0xae, 0x4b, 0x24, 0x83, 0x1e, 0x3b, 0x9c, 0x14, 0x08,
    +	0xc7, 0xa4, 0xd0, 0x77, 0x2c, 0x38, 0xd5, 0x49, 0xe8, 0x6a, 0xd9, 0xb3, 0x38, 0xe1, 0x10, 0x97,
    +	0xf9, 0xd4, 0xa0, 0x96, 0x55, 0xc1, 0x68, 0x90, 0x19, 0x7a, 0x32, 0xd1, 0x70, 0x76, 0x73, 0x74,
    +	0xa0, 0x83, 0xfb, 0xcd, 0xa2, 0x7c, 0x4f, 0x89, 0x10, 0x48, 0x7c, 0xa0, 0xf1, 0xc2, 0x12, 0xa6,
    +	0x15, 0xd1, 0x06, 0xc0, 0x66, 0x8b, 0x88, 0xd8, 0x92, 0xc2, 0xfd, 0x2a, 0x53, 0xce, 0x58, 0x51,
    +	0x58, 0x82, 0x0e, 0xbb, 0xf3, 0xc6, 0xa5, 0x58, 0xa3, 0x43, 0x97, 0x52, 0xc3, 0xf5, 0x9a, 0x24,
    +	0x60, 0x66, 0xb4, 0x9c, 0xa5, 0xb4, 0xc8, 0x30, 0xd2, 0x4b, 0x89, 0x97, 0x63, 0x41, 0x81, 0xd1,
    +	0x22, 0x9d, 0xed, 0xcd, 0xb0, 0x28, 0xb3, 0xc8, 0x22, 0xe9, 0x6c, 0x27, 0x16, 0x14, 0xa7, 0xc5,
    +	0xca, 0xb1, 0xa0, 0x40, 0xb7, 0xcc, 0x26, 0xdd, 0x40, 0x24, 0xa8, 0x4c, 0xe5, 0x6f, 0x99, 0x15,
    +	0x8e, 0x92, 0xde, 0x32, 0x02, 0x80, 0x25, 0x11, 0xf4, 0x0d, 0x53, 0xae, 0x9a, 0x66, 0x34, 0x5f,
    +	0xee, 0x21, 0x57, 0x19, 0x74, 0x8b, 0x25, 0xab, 0xb7, 0xa0, 0xb4, 0xd9, 0x60, 0xe6, 0xb7, 0x1c,
    +	0xeb, 0xc4, 0xca, 0xa2, 0x41, 0x8d, 0x45, 0xea, 0x5f, 0x59, 0xc4, 0xa5, 0xcd, 0x06, 0x5d, 0xfa,
    +	0xce, 0xa3, 0x6e, 0x40, 0x56, 0xdc, 0x16, 0x11, 0xa1, 0x83, 0x33, 0x97, 0xfe, 0xbc, 0x44, 0x4a,
    +	0x2f, 0x7d, 0x05, 0xc2, 0x31, 0x29, 0x4a, 0x37, 0x96, 0xf6, 0x4e, 0xe4, 0xd3, 0x55, 0x42, 0x5d,
    +	0x9a, 0x6e, 0xa6, 0xbc, 0xb7, 0x03, 0x13, 0xbb, 0x61, 0x67, 0x9b, 0x48, 0xae, 0xc8, 0x0c, 0x83,
    +	0x39, 0x31, 0x31, 0xee, 0x0a, 0x44, 0x37, 0x88, 0xba, 0x4e, 0x2b, 0xc5, 0xc8, 0x99, 0x12, 0xe7,
    +	0xae, 0x4e, 0x0c, 0x9b, 0xb4, 0xe9, 0x42, 0xf8, 0x88, 0x07, 0xae, 0x63, 0x26, 0xc2, 0x9c, 0x85,
    +	0x90, 0x11, 0xdb, 0x8e, 0x2f, 0x04, 0x01, 0xc0, 0x92, 0x88, 0x1a, 0x6c, 0x76, 0x00, 0x9d, 0xee,
    +	0x31, 0xd8, 0xa9, 0xfe, 0xc6, 0x83, 0xcd, 0x0e, 0x9c, 0x98, 0x14, 0x3b, 0x68, 0x3a, 0xdb, 0x7e,
    +	0xe4, 0x7b, 0x89, 0x43, 0xee, 0x4c, 0xfe, 0x41, 0x53, 0xcb, 0xc0, 0x4f, 0x1f, 0x34, 0x59, 0x58,
    +	0x38, 0xb3, 0x2d, 0xfa, 0x71, 0x1d, 0x19, 0x83, 0x50, 0x64, 0x42, 0x79, 0x29, 0x27, 0x84, 0x67,
    +	0x3a, 0x50, 0x21, 0xff, 0x38, 0x05, 0xc2, 0x31, 0x29, 0xd4, 0xa4, 0x92, 0xae, 0x1e, 0xdb, 0x96,
    +	0x65, 0x74, 0xc9, 0x91, 0x0b, 0xb2, 0xa2, 0xe0, 0x4a, 0x29, 0x57, 0x87, 0xe0, 0x04, 0x4d, 0xe6,
    +	0x23, 0xc8, 0x1f, 0x15, 0xb2, 0x84, 0x2f, 0x39, 0x53, 0x9d, 0xf1, 0xee, 0x90, 0x4f, 0xb5, 0x00,
    +	0x60, 0x49, 0x84, 0x8e, 0x86, 0x78, 0x0a, 0xe7, 0x87, 0x2c, 0x6f, 0x52, 0x9e, 0x29, 0x3f, 0xcb,
    +	0x20, 0x25, 0x03, 0xcd, 0x0b, 0x10, 0x8e, 0x49, 0x51, 0x4e, 0x4e, 0x0f, 0xbc, 0x73, 0xf9, 0x9c,
    +	0x3c, 0x79, 0xdc, 0x31, 0x4e, 0x4e, 0x0f, 0xbb, 0xb2, 0x38, 0xea, 0x54, 0x5c, 0x74, 0x96, 0xf3,
    +	0x25, 0xa7, 0x5f, 0x2a, 0xb0, 0x7a, 0xba, 0x5f, 0x0a, 0x84, 0x63, 0x52, 0xec, 0x28, 0x66, 0x41,
    +	0xf0, 0x2e, 0x14, 0x1c, 0xc5, 0x14, 0x21, 0xe3, 0x28, 0xd6, 0x82, 0xe4, 0xd9, 0x7f, 0xb9, 0x04,
    +	0x17, 0x8a, 0xf7, 0x6d, 0x6c, 0xad, 0xab, 0xc5, 0xde, 0x51, 0x09, 0x6b, 0x1d, 0xd7, 0x1d, 0xc5,
    +	0x58, 0x7d, 0x87, 0x36, 0xbe, 0x0e, 0x33, 0xea, 0xe1, 0x63, 0xcb, 0x6d, 0xec, 0x69, 0x89, 0x5e,
    +	0x55, 0x10, 0xa0, 0x7a, 0x12, 0x01, 0xa7, 0xeb, 0xa0, 0x79, 0x98, 0x32, 0x0a, 0x57, 0x97, 0x84,
    +	0xa2, 0x21, 0xce, 0x56, 0x62, 0x82, 0x71, 0x12, 0xdf, 0xfe, 0x45, 0x0b, 0xce, 0xf0, 0x40, 0xbc,
    +	0xa4, 0x59, 0xf3, 0x9b, 0x52, 0xa3, 0x70, 0xa4, 0xc8, 0xbd, 0x9b, 0x30, 0xd5, 0x31, 0xab, 0xf6,
    +	0x08, 0x36, 0xae, 0xa3, 0xc6, 0x7d, 0x4d, 0x00, 0x70, 0x92, 0xa8, 0xfd, 0xf3, 0x25, 0x38, 0x5f,
    +	0xe8, 0xc9, 0x8f, 0x30, 0x9c, 0xde, 0x6a, 0x87, 0xce, 0x62, 0x40, 0x9a, 0xc4, 0x8b, 0x5c, 0xa7,
    +	0x55, 0xef, 0x90, 0x86, 0x66, 0x6f, 0x65, 0x2e, 0xf1, 0xd7, 0xd7, 0xea, 0xf3, 0x69, 0x0c, 0x9c,
    +	0x53, 0x13, 0xad, 0x00, 0x4a, 0x43, 0xc4, 0x0c, 0xb3, 0xcb, 0x74, 0x9a, 0x1e, 0xce, 0xa8, 0x81,
    +	0xbe, 0x00, 0x13, 0xea, 0x85, 0x80, 0x36, 0xe3, 0xec, 0x80, 0xc0, 0x3a, 0x00, 0x9b, 0x78, 0xe8,
    +	0x2a, 0x4f, 0x63, 0x25, 0x12, 0x9e, 0x09, 0xe3, 0xec, 0x94, 0xcc, 0x51, 0x25, 0x8a, 0xb1, 0x8e,
    +	0xb3, 0x70, 0xed, 0x77, 0xfe, 0xf0, 0xc2, 0x67, 0x7e, 0xf7, 0x0f, 0x2f, 0x7c, 0xe6, 0x0f, 0xfe,
    +	0xf0, 0xc2, 0x67, 0x7e, 0xfc, 0xe0, 0x82, 0xf5, 0x3b, 0x07, 0x17, 0xac, 0xdf, 0x3d, 0xb8, 0x60,
    +	0xfd, 0xc1, 0xc1, 0x05, 0xeb, 0x7f, 0x3b, 0xb8, 0x60, 0x7d, 0xef, 0x7f, 0xbf, 0xf0, 0x99, 0xaf,
    +	0xa2, 0x38, 0x16, 0xf6, 0x15, 0x3a, 0x3b, 0x57, 0x76, 0xaf, 0xfe, 0x87, 0x00, 0x00, 0x00, 0xff,
    +	0xff, 0xba, 0xfb, 0xfc, 0xdd, 0x18, 0x2e, 0x01, 0x00,
     }
     
     func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) {
    @@ -9312,6 +9549,22 @@ func (m *Container) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	_ = i
     	var l int
     	_ = l
    +	if len(m.RestartPolicyRules) > 0 {
    +		for iNdEx := len(m.RestartPolicyRules) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.RestartPolicyRules[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x1
    +			i--
    +			dAtA[i] = 0xca
    +		}
    +	}
     	if m.RestartPolicy != nil {
     		i -= len(*m.RestartPolicy)
     		copy(dAtA[i:], *m.RestartPolicy)
    @@ -9566,6 +9819,44 @@ func (m *Container) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	return len(dAtA) - i, nil
     }
     
    +func (m *ContainerExtendedResourceRequest) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ContainerExtendedResourceRequest) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ContainerExtendedResourceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	i -= len(m.RequestName)
    +	copy(dAtA[i:], m.RequestName)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequestName)))
    +	i--
    +	dAtA[i] = 0x1a
    +	i -= len(m.ResourceName)
    +	copy(dAtA[i:], m.ResourceName)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceName)))
    +	i--
    +	dAtA[i] = 0x12
    +	i -= len(m.ContainerName)
    +	copy(dAtA[i:], m.ContainerName)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerName)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
     func (m *ContainerImage) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
    @@ -9678,6 +9969,81 @@ func (m *ContainerResizePolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	return len(dAtA) - i, nil
     }
     
    +func (m *ContainerRestartRule) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ContainerRestartRule) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ContainerRestartRule) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if m.ExitCodes != nil {
    +		{
    +			size, err := m.ExitCodes.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x12
    +	}
    +	i -= len(m.Action)
    +	copy(dAtA[i:], m.Action)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Action)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *ContainerRestartRuleOnExitCodes) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ContainerRestartRuleOnExitCodes) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ContainerRestartRuleOnExitCodes) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.Values) > 0 {
    +		for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- {
    +			i = encodeVarintGenerated(dAtA, i, uint64(m.Values[iNdEx]))
    +			i--
    +			dAtA[i] = 0x10
    +		}
    +	}
    +	i -= len(m.Operator)
    +	copy(dAtA[i:], m.Operator)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
     func (m *ContainerState) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
    @@ -9887,6 +10253,13 @@ func (m *ContainerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	_ = i
     	var l int
     	_ = l
    +	if m.StopSignal != nil {
    +		i -= len(*m.StopSignal)
    +		copy(dAtA[i:], *m.StopSignal)
    +		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StopSignal)))
    +		i--
    +		dAtA[i] = 0x7a
    +	}
     	if len(m.AllocatedResourcesStatus) > 0 {
     		for iNdEx := len(m.AllocatedResourcesStatus) - 1; iNdEx >= 0; iNdEx-- {
     			{
    @@ -10640,6 +11013,18 @@ func (m *EnvVarSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	_ = i
     	var l int
     	_ = l
    +	if m.FileKeyRef != nil {
    +		{
    +			size, err := m.FileKeyRef.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x2a
    +	}
     	if m.SecretKeyRef != nil {
     		{
     			size, err := m.SecretKeyRef.MarshalToSizedBuffer(dAtA[:i])
    @@ -10749,6 +11134,22 @@ func (m *EphemeralContainerCommon) MarshalToSizedBuffer(dAtA []byte) (int, error
     	_ = i
     	var l int
     	_ = l
    +	if len(m.RestartPolicyRules) > 0 {
    +		for iNdEx := len(m.RestartPolicyRules) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.RestartPolicyRules[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x1
    +			i--
    +			dAtA[i] = 0xca
    +		}
    +	}
     	if m.RestartPolicy != nil {
     		i -= len(*m.RestartPolicy)
     		copy(dAtA[i:], *m.RestartPolicy)
    @@ -11385,6 +11786,54 @@ func (m *FCVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	return len(dAtA) - i, nil
     }
     
    +func (m *FileKeySelector) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *FileKeySelector) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *FileKeySelector) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if m.Optional != nil {
    +		i--
    +		if *m.Optional {
    +			dAtA[i] = 1
    +		} else {
    +			dAtA[i] = 0
    +		}
    +		i--
    +		dAtA[i] = 0x20
    +	}
    +	i -= len(m.Key)
    +	copy(dAtA[i:], m.Key)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key)))
    +	i--
    +	dAtA[i] = 0x1a
    +	i -= len(m.Path)
    +	copy(dAtA[i:], m.Path)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path)))
    +	i--
    +	dAtA[i] = 0x12
    +	i -= len(m.VolumeName)
    +	copy(dAtA[i:], m.VolumeName)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
     func (m *FlexPersistentVolumeSource) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
    @@ -12258,6 +12707,13 @@ func (m *Lifecycle) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	_ = i
     	var l int
     	_ = l
    +	if m.StopSignal != nil {
    +		i -= len(*m.StopSignal)
    +		copy(dAtA[i:], *m.StopSignal)
    +		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StopSignal)))
    +		i--
    +		dAtA[i] = 0x1a
    +	}
     	if m.PreStop != nil {
     		{
     			size, err := m.PreStop.MarshalToSizedBuffer(dAtA[:i])
    @@ -14135,6 +14591,34 @@ func (m *NodeStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	return len(dAtA) - i, nil
     }
     
    +func (m *NodeSwapStatus) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *NodeSwapStatus) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *NodeSwapStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if m.Capacity != nil {
    +		i = encodeVarintGenerated(dAtA, i, uint64(*m.Capacity))
    +		i--
    +		dAtA[i] = 0x8
    +	}
    +	return len(dAtA) - i, nil
    +}
    +
     func (m *NodeSystemInfo) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
    @@ -14155,6 +14639,18 @@ func (m *NodeSystemInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	_ = i
     	var l int
     	_ = l
    +	if m.Swap != nil {
    +		{
    +			size, err := m.Swap.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x5a
    +	}
     	i -= len(m.Architecture)
     	copy(dAtA[i:], m.Architecture)
     	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Architecture)))
    @@ -15703,6 +16199,59 @@ func (m *PodAttachOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	return len(dAtA) - i, nil
     }
     
    +func (m *PodCertificateProjection) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *PodCertificateProjection) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *PodCertificateProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	i -= len(m.CertificateChainPath)
    +	copy(dAtA[i:], m.CertificateChainPath)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.CertificateChainPath)))
    +	i--
    +	dAtA[i] = 0x32
    +	i -= len(m.KeyPath)
    +	copy(dAtA[i:], m.KeyPath)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.KeyPath)))
    +	i--
    +	dAtA[i] = 0x2a
    +	i -= len(m.CredentialBundlePath)
    +	copy(dAtA[i:], m.CredentialBundlePath)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.CredentialBundlePath)))
    +	i--
    +	dAtA[i] = 0x22
    +	if m.MaxExpirationSeconds != nil {
    +		i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxExpirationSeconds))
    +		i--
    +		dAtA[i] = 0x18
    +	}
    +	i -= len(m.KeyType)
    +	copy(dAtA[i:], m.KeyType)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.KeyType)))
    +	i--
    +	dAtA[i] = 0x12
    +	i -= len(m.SignerName)
    +	copy(dAtA[i:], m.SignerName)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.SignerName)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
     func (m *PodCondition) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
    @@ -15723,6 +16272,9 @@ func (m *PodCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	_ = i
     	var l int
     	_ = l
    +	i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration))
    +	i--
    +	dAtA[i] = 0x38
     	i -= len(m.Message)
     	copy(dAtA[i:], m.Message)
     	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
    @@ -15925,6 +16477,48 @@ func (m *PodExecOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	return len(dAtA) - i, nil
     }
     
    +func (m *PodExtendedResourceClaimStatus) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *PodExtendedResourceClaimStatus) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *PodExtendedResourceClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	i -= len(m.ResourceClaimName)
    +	copy(dAtA[i:], m.ResourceClaimName)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceClaimName)))
    +	i--
    +	dAtA[i] = 0x12
    +	if len(m.RequestMappings) > 0 {
    +		for iNdEx := len(m.RequestMappings) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.RequestMappings[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0xa
    +		}
    +	}
    +	return len(dAtA) - i, nil
    +}
    +
     func (m *PodIP) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
    @@ -16506,6 +17100,15 @@ func (m *PodSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	_ = i
     	var l int
     	_ = l
    +	if m.HostnameOverride != nil {
    +		i -= len(*m.HostnameOverride)
    +		copy(dAtA[i:], *m.HostnameOverride)
    +		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.HostnameOverride)))
    +		i--
    +		dAtA[i] = 0x2
    +		i--
    +		dAtA[i] = 0xca
    +	}
     	if m.Resources != nil {
     		{
     			size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i])
    @@ -16994,6 +17597,25 @@ func (m *PodStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	_ = i
     	var l int
     	_ = l
    +	if m.ExtendedResourceClaimStatus != nil {
    +		{
    +			size, err := m.ExtendedResourceClaimStatus.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x1
    +		i--
    +		dAtA[i] = 0x92
    +	}
    +	i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration))
    +	i--
    +	dAtA[i] = 0x1
    +	i--
    +	dAtA[i] = 0x88
     	if len(m.HostIPs) > 0 {
     		for iNdEx := len(m.HostIPs) - 1; iNdEx >= 0; iNdEx-- {
     			{
    @@ -21012,6 +21634,18 @@ func (m *VolumeProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	_ = i
     	var l int
     	_ = l
    +	if m.PodCertificate != nil {
    +		{
    +			size, err := m.PodCertificate.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x32
    +	}
     	if m.ClusterTrustBundle != nil {
     		{
     			size, err := m.ClusterTrustBundle.MarshalToSizedBuffer(dAtA[:i])
    @@ -22375,6 +23009,27 @@ func (m *Container) Size() (n int) {
     		l = len(*m.RestartPolicy)
     		n += 2 + l + sovGenerated(uint64(l))
     	}
    +	if len(m.RestartPolicyRules) > 0 {
    +		for _, e := range m.RestartPolicyRules {
    +			l = e.Size()
    +			n += 2 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
    +}
    +
    +func (m *ContainerExtendedResourceRequest) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.ContainerName)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.ResourceName)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.RequestName)
    +	n += 1 + l + sovGenerated(uint64(l))
     	return n
     }
     
    @@ -22424,6 +23079,37 @@ func (m *ContainerResizePolicy) Size() (n int) {
     	return n
     }
     
    +func (m *ContainerRestartRule) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.Action)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if m.ExitCodes != nil {
    +		l = m.ExitCodes.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	return n
    +}
    +
    +func (m *ContainerRestartRuleOnExitCodes) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.Operator)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.Values) > 0 {
    +		for _, e := range m.Values {
    +			n += 1 + sovGenerated(uint64(e))
    +		}
    +	}
    +	return n
    +}
    +
     func (m *ContainerState) Size() (n int) {
     	if m == nil {
     		return 0
    @@ -22542,6 +23228,10 @@ func (m *ContainerStatus) Size() (n int) {
     			n += 1 + l + sovGenerated(uint64(l))
     		}
     	}
    +	if m.StopSignal != nil {
    +		l = len(*m.StopSignal)
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
     	return n
     }
     
    @@ -22796,6 +23486,10 @@ func (m *EnvVarSource) Size() (n int) {
     		l = m.SecretKeyRef.Size()
     		n += 1 + l + sovGenerated(uint64(l))
     	}
    +	if m.FileKeyRef != nil {
    +		l = m.FileKeyRef.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
     	return n
     }
     
    @@ -22907,6 +23601,12 @@ func (m *EphemeralContainerCommon) Size() (n int) {
     		l = len(*m.RestartPolicy)
     		n += 2 + l + sovGenerated(uint64(l))
     	}
    +	if len(m.RestartPolicyRules) > 0 {
    +		for _, e := range m.RestartPolicyRules {
    +			l = e.Size()
    +			n += 2 + l + sovGenerated(uint64(l))
    +		}
    +	}
     	return n
     }
     
    @@ -23049,6 +23749,24 @@ func (m *FCVolumeSource) Size() (n int) {
     	return n
     }
     
    +func (m *FileKeySelector) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.VolumeName)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.Path)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.Key)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if m.Optional != nil {
    +		n += 2
    +	}
    +	return n
    +}
    +
     func (m *FlexPersistentVolumeSource) Size() (n int) {
     	if m == nil {
     		return 0
    @@ -23382,6 +24100,10 @@ func (m *Lifecycle) Size() (n int) {
     		l = m.PreStop.Size()
     		n += 1 + l + sovGenerated(uint64(l))
     	}
    +	if m.StopSignal != nil {
    +		l = len(*m.StopSignal)
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
     	return n
     }
     
    @@ -24067,6 +24789,18 @@ func (m *NodeStatus) Size() (n int) {
     	return n
     }
     
    +func (m *NodeSwapStatus) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	if m.Capacity != nil {
    +		n += 1 + sovGenerated(uint64(*m.Capacity))
    +	}
    +	return n
    +}
    +
     func (m *NodeSystemInfo) Size() (n int) {
     	if m == nil {
     		return 0
    @@ -24093,6 +24827,10 @@ func (m *NodeSystemInfo) Size() (n int) {
     	n += 1 + l + sovGenerated(uint64(l))
     	l = len(m.Architecture)
     	n += 1 + l + sovGenerated(uint64(l))
    +	if m.Swap != nil {
    +		l = m.Swap.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
     	return n
     }
     
    @@ -24632,6 +25370,28 @@ func (m *PodAttachOptions) Size() (n int) {
     	return n
     }
     
    +func (m *PodCertificateProjection) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.SignerName)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.KeyType)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if m.MaxExpirationSeconds != nil {
    +		n += 1 + sovGenerated(uint64(*m.MaxExpirationSeconds))
    +	}
    +	l = len(m.CredentialBundlePath)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.KeyPath)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.CertificateChainPath)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
     func (m *PodCondition) Size() (n int) {
     	if m == nil {
     		return 0
    @@ -24650,6 +25410,7 @@ func (m *PodCondition) Size() (n int) {
     	n += 1 + l + sovGenerated(uint64(l))
     	l = len(m.Message)
     	n += 1 + l + sovGenerated(uint64(l))
    +	n += 1 + sovGenerated(uint64(m.ObservedGeneration))
     	return n
     }
     
    @@ -24716,6 +25477,23 @@ func (m *PodExecOptions) Size() (n int) {
     	return n
     }
     
    +func (m *PodExtendedResourceClaimStatus) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	if len(m.RequestMappings) > 0 {
    +		for _, e := range m.RequestMappings {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	l = len(m.ResourceClaimName)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
     func (m *PodIP) Size() (n int) {
     	if m == nil {
     		return 0
    @@ -25103,6 +25881,10 @@ func (m *PodSpec) Size() (n int) {
     		l = m.Resources.Size()
     		n += 2 + l + sovGenerated(uint64(l))
     	}
    +	if m.HostnameOverride != nil {
    +		l = len(*m.HostnameOverride)
    +		n += 2 + l + sovGenerated(uint64(l))
    +	}
     	return n
     }
     
    @@ -25174,6 +25956,11 @@ func (m *PodStatus) Size() (n int) {
     			n += 2 + l + sovGenerated(uint64(l))
     		}
     	}
    +	n += 2 + sovGenerated(uint64(m.ObservedGeneration))
    +	if m.ExtendedResourceClaimStatus != nil {
    +		l = m.ExtendedResourceClaimStatus.Size()
    +		n += 2 + l + sovGenerated(uint64(l))
    +	}
     	return n
     }
     
    @@ -26629,6 +27416,10 @@ func (m *VolumeProjection) Size() (n int) {
     		l = m.ClusterTrustBundle.Size()
     		n += 1 + l + sovGenerated(uint64(l))
     	}
    +	if m.PodCertificate != nil {
    +		l = m.PodCertificate.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
     	return n
     }
     
    @@ -27304,6 +28095,11 @@ func (this *Container) String() string {
     		repeatedStringForResizePolicy += strings.Replace(strings.Replace(f.String(), "ContainerResizePolicy", "ContainerResizePolicy", 1), `&`, ``, 1) + ","
     	}
     	repeatedStringForResizePolicy += "}"
    +	repeatedStringForRestartPolicyRules := "[]ContainerRestartRule{"
    +	for _, f := range this.RestartPolicyRules {
    +		repeatedStringForRestartPolicyRules += strings.Replace(strings.Replace(f.String(), "ContainerRestartRule", "ContainerRestartRule", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForRestartPolicyRules += "}"
     	s := strings.Join([]string{`&Container{`,
     		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
     		`Image:` + fmt.Sprintf("%v", this.Image) + `,`,
    @@ -27329,6 +28125,19 @@ func (this *Container) String() string {
     		`StartupProbe:` + strings.Replace(this.StartupProbe.String(), "Probe", "Probe", 1) + `,`,
     		`ResizePolicy:` + repeatedStringForResizePolicy + `,`,
     		`RestartPolicy:` + valueToStringGenerated(this.RestartPolicy) + `,`,
    +		`RestartPolicyRules:` + repeatedStringForRestartPolicyRules + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ContainerExtendedResourceRequest) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&ContainerExtendedResourceRequest{`,
    +		`ContainerName:` + fmt.Sprintf("%v", this.ContainerName) + `,`,
    +		`ResourceName:` + fmt.Sprintf("%v", this.ResourceName) + `,`,
    +		`RequestName:` + fmt.Sprintf("%v", this.RequestName) + `,`,
     		`}`,
     	}, "")
     	return s
    @@ -27369,6 +28178,28 @@ func (this *ContainerResizePolicy) String() string {
     	}, "")
     	return s
     }
    +func (this *ContainerRestartRule) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&ContainerRestartRule{`,
    +		`Action:` + fmt.Sprintf("%v", this.Action) + `,`,
    +		`ExitCodes:` + strings.Replace(this.ExitCodes.String(), "ContainerRestartRuleOnExitCodes", "ContainerRestartRuleOnExitCodes", 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ContainerRestartRuleOnExitCodes) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&ContainerRestartRuleOnExitCodes{`,
    +		`Operator:` + fmt.Sprintf("%v", this.Operator) + `,`,
    +		`Values:` + fmt.Sprintf("%v", this.Values) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
     func (this *ContainerState) String() string {
     	if this == nil {
     		return "nil"
    @@ -27457,6 +28288,7 @@ func (this *ContainerStatus) String() string {
     		`VolumeMounts:` + repeatedStringForVolumeMounts + `,`,
     		`User:` + strings.Replace(this.User.String(), "ContainerUser", "ContainerUser", 1) + `,`,
     		`AllocatedResourcesStatus:` + repeatedStringForAllocatedResourcesStatus + `,`,
    +		`StopSignal:` + valueToStringGenerated(this.StopSignal) + `,`,
     		`}`,
     	}, "")
     	return s
    @@ -27654,6 +28486,7 @@ func (this *EnvVarSource) String() string {
     		`ResourceFieldRef:` + strings.Replace(this.ResourceFieldRef.String(), "ResourceFieldSelector", "ResourceFieldSelector", 1) + `,`,
     		`ConfigMapKeyRef:` + strings.Replace(this.ConfigMapKeyRef.String(), "ConfigMapKeySelector", "ConfigMapKeySelector", 1) + `,`,
     		`SecretKeyRef:` + strings.Replace(this.SecretKeyRef.String(), "SecretKeySelector", "SecretKeySelector", 1) + `,`,
    +		`FileKeyRef:` + strings.Replace(this.FileKeyRef.String(), "FileKeySelector", "FileKeySelector", 1) + `,`,
     		`}`,
     	}, "")
     	return s
    @@ -27703,6 +28536,11 @@ func (this *EphemeralContainerCommon) String() string {
     		repeatedStringForResizePolicy += strings.Replace(strings.Replace(f.String(), "ContainerResizePolicy", "ContainerResizePolicy", 1), `&`, ``, 1) + ","
     	}
     	repeatedStringForResizePolicy += "}"
    +	repeatedStringForRestartPolicyRules := "[]ContainerRestartRule{"
    +	for _, f := range this.RestartPolicyRules {
    +		repeatedStringForRestartPolicyRules += strings.Replace(strings.Replace(f.String(), "ContainerRestartRule", "ContainerRestartRule", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForRestartPolicyRules += "}"
     	s := strings.Join([]string{`&EphemeralContainerCommon{`,
     		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
     		`Image:` + fmt.Sprintf("%v", this.Image) + `,`,
    @@ -27728,6 +28566,7 @@ func (this *EphemeralContainerCommon) String() string {
     		`StartupProbe:` + strings.Replace(this.StartupProbe.String(), "Probe", "Probe", 1) + `,`,
     		`ResizePolicy:` + repeatedStringForResizePolicy + `,`,
     		`RestartPolicy:` + valueToStringGenerated(this.RestartPolicy) + `,`,
    +		`RestartPolicyRules:` + repeatedStringForRestartPolicyRules + `,`,
     		`}`,
     	}, "")
     	return s
    @@ -27828,6 +28667,19 @@ func (this *FCVolumeSource) String() string {
     	}, "")
     	return s
     }
    +func (this *FileKeySelector) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&FileKeySelector{`,
    +		`VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`,
    +		`Path:` + fmt.Sprintf("%v", this.Path) + `,`,
    +		`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
    +		`Optional:` + valueToStringGenerated(this.Optional) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
     func (this *FlexPersistentVolumeSource) String() string {
     	if this == nil {
     		return "nil"
    @@ -28080,6 +28932,7 @@ func (this *Lifecycle) String() string {
     	s := strings.Join([]string{`&Lifecycle{`,
     		`PostStart:` + strings.Replace(this.PostStart.String(), "LifecycleHandler", "LifecycleHandler", 1) + `,`,
     		`PreStop:` + strings.Replace(this.PreStop.String(), "LifecycleHandler", "LifecycleHandler", 1) + `,`,
    +		`StopSignal:` + valueToStringGenerated(this.StopSignal) + `,`,
     		`}`,
     	}, "")
     	return s
    @@ -28658,6 +29511,16 @@ func (this *NodeStatus) String() string {
     	}, "")
     	return s
     }
    +func (this *NodeSwapStatus) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&NodeSwapStatus{`,
    +		`Capacity:` + valueToStringGenerated(this.Capacity) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
     func (this *NodeSystemInfo) String() string {
     	if this == nil {
     		return "nil"
    @@ -28673,6 +29536,7 @@ func (this *NodeSystemInfo) String() string {
     		`KubeProxyVersion:` + fmt.Sprintf("%v", this.KubeProxyVersion) + `,`,
     		`OperatingSystem:` + fmt.Sprintf("%v", this.OperatingSystem) + `,`,
     		`Architecture:` + fmt.Sprintf("%v", this.Architecture) + `,`,
    +		`Swap:` + strings.Replace(this.Swap.String(), "NodeSwapStatus", "NodeSwapStatus", 1) + `,`,
     		`}`,
     	}, "")
     	return s
    @@ -29034,6 +29898,21 @@ func (this *PodAttachOptions) String() string {
     	}, "")
     	return s
     }
    +func (this *PodCertificateProjection) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&PodCertificateProjection{`,
    +		`SignerName:` + fmt.Sprintf("%v", this.SignerName) + `,`,
    +		`KeyType:` + fmt.Sprintf("%v", this.KeyType) + `,`,
    +		`MaxExpirationSeconds:` + valueToStringGenerated(this.MaxExpirationSeconds) + `,`,
    +		`CredentialBundlePath:` + fmt.Sprintf("%v", this.CredentialBundlePath) + `,`,
    +		`KeyPath:` + fmt.Sprintf("%v", this.KeyPath) + `,`,
    +		`CertificateChainPath:` + fmt.Sprintf("%v", this.CertificateChainPath) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
     func (this *PodCondition) String() string {
     	if this == nil {
     		return "nil"
    @@ -29045,6 +29924,7 @@ func (this *PodCondition) String() string {
     		`LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
     		`Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
     		`Message:` + fmt.Sprintf("%v", this.Message) + `,`,
    +		`ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
     		`}`,
     	}, "")
     	return s
    @@ -29092,6 +29972,22 @@ func (this *PodExecOptions) String() string {
     	}, "")
     	return s
     }
    +func (this *PodExtendedResourceClaimStatus) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForRequestMappings := "[]ContainerExtendedResourceRequest{"
    +	for _, f := range this.RequestMappings {
    +		repeatedStringForRequestMappings += strings.Replace(strings.Replace(f.String(), "ContainerExtendedResourceRequest", "ContainerExtendedResourceRequest", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForRequestMappings += "}"
    +	s := strings.Join([]string{`&PodExtendedResourceClaimStatus{`,
    +		`RequestMappings:` + repeatedStringForRequestMappings + `,`,
    +		`ResourceClaimName:` + fmt.Sprintf("%v", this.ResourceClaimName) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
     func (this *PodIP) String() string {
     	if this == nil {
     		return "nil"
    @@ -29367,6 +30263,7 @@ func (this *PodSpec) String() string {
     		`SchedulingGates:` + repeatedStringForSchedulingGates + `,`,
     		`ResourceClaims:` + repeatedStringForResourceClaims + `,`,
     		`Resources:` + strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1) + `,`,
    +		`HostnameOverride:` + valueToStringGenerated(this.HostnameOverride) + `,`,
     		`}`,
     	}, "")
     	return s
    @@ -29427,6 +30324,8 @@ func (this *PodStatus) String() string {
     		`Resize:` + fmt.Sprintf("%v", this.Resize) + `,`,
     		`ResourceClaimStatuses:` + repeatedStringForResourceClaimStatuses + `,`,
     		`HostIPs:` + repeatedStringForHostIPs + `,`,
    +		`ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
    +		`ExtendedResourceClaimStatus:` + strings.Replace(this.ExtendedResourceClaimStatus.String(), "PodExtendedResourceClaimStatus", "PodExtendedResourceClaimStatus", 1) + `,`,
     		`}`,
     	}, "")
     	return s
    @@ -30536,6 +31435,7 @@ func (this *VolumeProjection) String() string {
     		`ConfigMap:` + strings.Replace(this.ConfigMap.String(), "ConfigMapProjection", "ConfigMapProjection", 1) + `,`,
     		`ServiceAccountToken:` + strings.Replace(this.ServiceAccountToken.String(), "ServiceAccountTokenProjection", "ServiceAccountTokenProjection", 1) + `,`,
     		`ClusterTrustBundle:` + strings.Replace(this.ClusterTrustBundle.String(), "ClusterTrustBundleProjection", "ClusterTrustBundleProjection", 1) + `,`,
    +		`PodCertificate:` + strings.Replace(this.PodCertificate.String(), "PodCertificateProjection", "PodCertificateProjection", 1) + `,`,
     		`}`,
     	}, "")
     	return s
    @@ -36328,6 +37228,186 @@ func (m *Container) Unmarshal(dAtA []byte) error {
     			s := ContainerRestartPolicy(dAtA[iNdEx:postIndex])
     			m.RestartPolicy = &s
     			iNdEx = postIndex
    +		case 25:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicyRules", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.RestartPolicyRules = append(m.RestartPolicyRules, ContainerRestartRule{})
    +			if err := m.RestartPolicyRules[len(m.RestartPolicyRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *ContainerExtendedResourceRequest) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ContainerExtendedResourceRequest: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ContainerExtendedResourceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ContainerName", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.ContainerName = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ResourceName", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.ResourceName = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field RequestName", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.RequestName = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
     			skippy, err := skipGenerated(dAtA[iNdEx:])
    @@ -36408,27 +37488,211 @@ func (m *ContainerImage) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Names = append(m.Names, string(dAtA[iNdEx:postIndex]))
    +			m.Names = append(m.Names, string(dAtA[iNdEx:postIndex]))
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field SizeBytes", wireType)
    +			}
    +			m.SizeBytes = 0
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				m.SizeBytes |= int64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *ContainerPort) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ContainerPort: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ContainerPort: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Name = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field HostPort", wireType)
    +			}
    +			m.HostPort = 0
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				m.HostPort |= int32(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +		case 3:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ContainerPort", wireType)
    +			}
    +			m.ContainerPort = 0
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				m.ContainerPort |= int32(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +		case 4:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Protocol = Protocol(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 5:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.HostIP = string(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
    -		case 2:
    -			if wireType != 0 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field SizeBytes", wireType)
    -			}
    -			m.SizeBytes = 0
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				m.SizeBytes |= int64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
     		default:
     			iNdEx = preIndex
     			skippy, err := skipGenerated(dAtA[iNdEx:])
    @@ -36450,7 +37714,7 @@ func (m *ContainerImage) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *ContainerPort) Unmarshal(dAtA []byte) error {
    +func (m *ContainerResizePolicy) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -36473,15 +37737,15 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: ContainerPort: wiretype end group for non-group")
    +			return fmt.Errorf("proto: ContainerResizePolicy: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: ContainerPort: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: ContainerResizePolicy: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field ResourceName", wireType)
     			}
     			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
    @@ -36509,13 +37773,13 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Name = string(dAtA[iNdEx:postIndex])
    +			m.ResourceName = ResourceName(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
     		case 2:
    -			if wireType != 0 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field HostPort", wireType)
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicy", wireType)
     			}
    -			m.HostPort = 0
    +			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -36525,33 +37789,77 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				m.HostPort |= int32(b&0x7F) << shift
    +				stringLen |= uint64(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -		case 3:
    -			if wireType != 0 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ContainerPort", wireType)
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
     			}
    -			m.ContainerPort = 0
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				m.ContainerPort |= int32(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
     			}
    -		case 4:
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.RestartPolicy = ResourceResizeRestartPolicy(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *ContainerRestartRule) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ContainerRestartRule: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ContainerRestartRule: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType)
     			}
     			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
    @@ -36579,13 +37887,13 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Protocol = Protocol(dAtA[iNdEx:postIndex])
    +			m.Action = ContainerRestartRuleAction(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
    -		case 5:
    +		case 2:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field ExitCodes", wireType)
     			}
    -			var stringLen uint64
    +			var msglen int
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -36595,23 +37903,27 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    +				msglen |= int(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    +			if msglen < 0 {
     				return ErrInvalidLengthGenerated
     			}
    -			postIndex := iNdEx + intStringLen
    +			postIndex := iNdEx + msglen
     			if postIndex < 0 {
     				return ErrInvalidLengthGenerated
     			}
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.HostIP = string(dAtA[iNdEx:postIndex])
    +			if m.ExitCodes == nil {
    +				m.ExitCodes = &ContainerRestartRuleOnExitCodes{}
    +			}
    +			if err := m.ExitCodes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
     			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
    @@ -36634,7 +37946,7 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *ContainerResizePolicy) Unmarshal(dAtA []byte) error {
    +func (m *ContainerRestartRuleOnExitCodes) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -36657,15 +37969,15 @@ func (m *ContainerResizePolicy) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: ContainerResizePolicy: wiretype end group for non-group")
    +			return fmt.Errorf("proto: ContainerRestartRuleOnExitCodes: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: ContainerResizePolicy: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: ContainerRestartRuleOnExitCodes: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ResourceName", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType)
     			}
     			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
    @@ -36693,40 +38005,84 @@ func (m *ContainerResizePolicy) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.ResourceName = ResourceName(dAtA[iNdEx:postIndex])
    +			m.Operator = ContainerRestartRuleOnExitCodesOperator(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
     		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicy", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    +			if wireType == 0 {
    +				var v int32
    +				for shift := uint(0); ; shift += 7 {
    +					if shift >= 64 {
    +						return ErrIntOverflowGenerated
    +					}
    +					if iNdEx >= l {
    +						return io.ErrUnexpectedEOF
    +					}
    +					b := dAtA[iNdEx]
    +					iNdEx++
    +					v |= int32(b&0x7F) << shift
    +					if b < 0x80 {
    +						break
    +					}
     				}
    -				if iNdEx >= l {
    +				m.Values = append(m.Values, v)
    +			} else if wireType == 2 {
    +				var packedLen int
    +				for shift := uint(0); ; shift += 7 {
    +					if shift >= 64 {
    +						return ErrIntOverflowGenerated
    +					}
    +					if iNdEx >= l {
    +						return io.ErrUnexpectedEOF
    +					}
    +					b := dAtA[iNdEx]
    +					iNdEx++
    +					packedLen |= int(b&0x7F) << shift
    +					if b < 0x80 {
    +						break
    +					}
    +				}
    +				if packedLen < 0 {
    +					return ErrInvalidLengthGenerated
    +				}
    +				postIndex := iNdEx + packedLen
    +				if postIndex < 0 {
    +					return ErrInvalidLengthGenerated
    +				}
    +				if postIndex > l {
     					return io.ErrUnexpectedEOF
     				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    +				var elementCount int
    +				var count int
    +				for _, integer := range dAtA[iNdEx:postIndex] {
    +					if integer < 128 {
    +						count++
    +					}
     				}
    +				elementCount = count
    +				if elementCount != 0 && len(m.Values) == 0 {
    +					m.Values = make([]int32, 0, elementCount)
    +				}
    +				for iNdEx < postIndex {
    +					var v int32
    +					for shift := uint(0); ; shift += 7 {
    +						if shift >= 64 {
    +							return ErrIntOverflowGenerated
    +						}
    +						if iNdEx >= l {
    +							return io.ErrUnexpectedEOF
    +						}
    +						b := dAtA[iNdEx]
    +						iNdEx++
    +						v |= int32(b&0x7F) << shift
    +						if b < 0x80 {
    +							break
    +						}
    +					}
    +					m.Values = append(m.Values, v)
    +				}
    +			} else {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
     			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.RestartPolicy = ResourceResizeRestartPolicy(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
     			skippy, err := skipGenerated(dAtA[iNdEx:])
    @@ -37864,18 +39220,52 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			if m.User == nil {
    -				m.User = &ContainerUser{}
    -			}
    -			if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			if m.User == nil {
    +				m.User = &ContainerUser{}
    +			}
    +			if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 14:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field AllocatedResourcesStatus", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.AllocatedResourcesStatus = append(m.AllocatedResourcesStatus, ResourceStatus{})
    +			if err := m.AllocatedResourcesStatus[len(m.AllocatedResourcesStatus)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
     			iNdEx = postIndex
    -		case 14:
    +		case 15:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field AllocatedResourcesStatus", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field StopSignal", wireType)
     			}
    -			var msglen int
    +			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -37885,25 +39275,24 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    +				stringLen |= uint64(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			if msglen < 0 {
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
     				return ErrInvalidLengthGenerated
     			}
    -			postIndex := iNdEx + msglen
    +			postIndex := iNdEx + intStringLen
     			if postIndex < 0 {
     				return ErrInvalidLengthGenerated
     			}
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.AllocatedResourcesStatus = append(m.AllocatedResourcesStatus, ResourceStatus{})
    -			if err := m.AllocatedResourcesStatus[len(m.AllocatedResourcesStatus)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    +			s := Signal(dAtA[iNdEx:postIndex])
    +			m.StopSignal = &s
     			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
    @@ -39773,6 +41162,42 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error {
     				return err
     			}
     			iNdEx = postIndex
    +		case 5:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field FileKeyRef", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.FileKeyRef == nil {
    +				m.FileKeyRef = &FileKeySelector{}
    +			}
    +			if err := m.FileKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
     			skippy, err := skipGenerated(dAtA[iNdEx:])
    @@ -40704,6 +42129,40 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error {
     			s := ContainerRestartPolicy(dAtA[iNdEx:postIndex])
     			m.RestartPolicy = &s
     			iNdEx = postIndex
    +		case 25:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicyRules", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.RestartPolicyRules = append(m.RestartPolicyRules, ContainerRestartRule{})
    +			if err := m.RestartPolicyRules[len(m.RestartPolicyRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
     			skippy, err := skipGenerated(dAtA[iNdEx:])
    @@ -41816,13 +43275,199 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.TargetWWNs = append(m.TargetWWNs, string(dAtA[iNdEx:postIndex]))
    +			m.TargetWWNs = append(m.TargetWWNs, string(dAtA[iNdEx:postIndex]))
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Lun", wireType)
    +			}
    +			var v int32
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				v |= int32(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			m.Lun = &v
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.FSType = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 4:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
    +			}
    +			var v int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				v |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			m.ReadOnly = bool(v != 0)
    +		case 5:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field WWIDs", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.WWIDs = append(m.WWIDs, string(dAtA[iNdEx:postIndex]))
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *FileKeySelector) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: FileKeySelector: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: FileKeySelector: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field VolumeName", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.VolumeName = string(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
     		case 2:
    -			if wireType != 0 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Lun", wireType)
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
     			}
    -			var v int32
    +			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -41832,15 +43477,27 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				v |= int32(b&0x7F) << shift
    +				stringLen |= uint64(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			m.Lun = &v
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Path = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
     		case 3:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
     			}
     			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
    @@ -41868,11 +43525,11 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.FSType = string(dAtA[iNdEx:postIndex])
    +			m.Key = string(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
     		case 4:
     			if wireType != 0 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType)
     			}
     			var v int
     			for shift := uint(0); ; shift += 7 {
    @@ -41889,39 +43546,8 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error {
     					break
     				}
     			}
    -			m.ReadOnly = bool(v != 0)
    -		case 5:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field WWIDs", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.WWIDs = append(m.WWIDs, string(dAtA[iNdEx:postIndex]))
    -			iNdEx = postIndex
    +			b := bool(v != 0)
    +			m.Optional = &b
     		default:
     			iNdEx = preIndex
     			skippy, err := skipGenerated(dAtA[iNdEx:])
    @@ -45056,6 +46682,39 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error {
     				return err
     			}
     			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field StopSignal", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			s := Signal(dAtA[iNdEx:postIndex])
    +			m.StopSignal = &s
    +			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
     			skippy, err := skipGenerated(dAtA[iNdEx:])
    @@ -50743,6 +52402,76 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    +func (m *NodeSwapStatus) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: NodeSwapStatus: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: NodeSwapStatus: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType)
    +			}
    +			var v int64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				v |= int64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			m.Capacity = &v
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
     func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
    @@ -50898,11 +52627,107 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.KernelVersion = string(dAtA[iNdEx:postIndex])
    +			m.KernelVersion = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 5:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field OSImage", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.OSImage = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 6:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ContainerRuntimeVersion", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.ContainerRuntimeVersion = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 7:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field KubeletVersion", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.KubeletVersion = string(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
    -		case 5:
    +		case 8:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field OSImage", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field KubeProxyVersion", wireType)
     			}
     			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
    @@ -50930,11 +52755,11 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.OSImage = string(dAtA[iNdEx:postIndex])
    +			m.KubeProxyVersion = string(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
    -		case 6:
    +		case 9:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ContainerRuntimeVersion", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field OperatingSystem", wireType)
     			}
     			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
    @@ -50962,11 +52787,11 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.ContainerRuntimeVersion = string(dAtA[iNdEx:postIndex])
    +			m.OperatingSystem = string(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
    -		case 7:
    +		case 10:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field KubeletVersion", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Architecture", wireType)
     			}
     			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
    @@ -50994,13 +52819,13 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.KubeletVersion = string(dAtA[iNdEx:postIndex])
    +			m.Architecture = string(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
    -		case 8:
    +		case 11:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field KubeProxyVersion", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Swap", wireType)
     			}
    -			var stringLen uint64
    +			var msglen int
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -51010,87 +52835,27 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    +				msglen |= int(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    +			if msglen < 0 {
     				return ErrInvalidLengthGenerated
     			}
    -			postIndex := iNdEx + intStringLen
    +			postIndex := iNdEx + msglen
     			if postIndex < 0 {
     				return ErrInvalidLengthGenerated
     			}
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.KubeProxyVersion = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		case 9:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field OperatingSystem", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    +			if m.Swap == nil {
    +				m.Swap = &NodeSwapStatus{}
     			}
    -			m.OperatingSystem = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		case 10:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Architecture", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    +			if err := m.Swap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
     			}
    -			m.Architecture = string(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
    @@ -55725,17 +57490,179 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: PodAttachOptions: wiretype end group for non-group")
    +			return fmt.Errorf("proto: PodAttachOptions: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: PodAttachOptions: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType)
    +			}
    +			var v int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				v |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			m.Stdin = bool(v != 0)
    +		case 2:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType)
    +			}
    +			var v int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				v |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			m.Stdout = bool(v != 0)
    +		case 3:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType)
    +			}
    +			var v int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				v |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			m.Stderr = bool(v != 0)
    +		case 4:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType)
    +			}
    +			var v int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				v |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			m.TTY = bool(v != 0)
    +		case 5:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Container = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *PodCertificateProjection) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: PodCertificateProjection: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: PodAttachOptions: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: PodCertificateProjection: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
    -			if wireType != 0 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType)
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field SignerName", wireType)
     			}
    -			var v int
    +			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -55745,17 +57672,29 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				v |= int(b&0x7F) << shift
    +				stringLen |= uint64(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			m.Stdin = bool(v != 0)
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.SignerName = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
     		case 2:
    -			if wireType != 0 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType)
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field KeyType", wireType)
     			}
    -			var v int
    +			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -55765,17 +57704,29 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				v |= int(b&0x7F) << shift
    +				stringLen |= uint64(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			m.Stdout = bool(v != 0)
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.KeyType = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
     		case 3:
     			if wireType != 0 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field MaxExpirationSeconds", wireType)
     			}
    -			var v int
    +			var v int32
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -55785,17 +57736,17 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				v |= int(b&0x7F) << shift
    +				v |= int32(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			m.Stderr = bool(v != 0)
    +			m.MaxExpirationSeconds = &v
     		case 4:
    -			if wireType != 0 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType)
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field CredentialBundlePath", wireType)
     			}
    -			var v int
    +			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -55805,15 +57756,27 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				v |= int(b&0x7F) << shift
    +				stringLen |= uint64(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			m.TTY = bool(v != 0)
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.CredentialBundlePath = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
     		case 5:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field KeyPath", wireType)
     			}
     			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
    @@ -55841,7 +57804,39 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Container = string(dAtA[iNdEx:postIndex])
    +			m.KeyPath = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 6:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field CertificateChainPath", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.CertificateChainPath = string(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
    @@ -56087,6 +58082,25 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error {
     			}
     			m.Message = string(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
    +		case 7:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType)
    +			}
    +			m.ObservedGeneration = 0
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				m.ObservedGeneration |= int64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
     		default:
     			iNdEx = preIndex
     			skippy, err := skipGenerated(dAtA[iNdEx:])
    @@ -56510,39 +58524,155 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Container = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		case 6:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.Command = append(m.Command, string(dAtA[iNdEx:postIndex]))
    +			m.Container = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 6:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Command = append(m.Command, string(dAtA[iNdEx:postIndex]))
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *PodExtendedResourceClaimStatus) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: PodExtendedResourceClaimStatus: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: PodExtendedResourceClaimStatus: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field RequestMappings", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.RequestMappings = append(m.RequestMappings, ContainerExtendedResourceRequest{})
    +			if err := m.RequestMappings[len(m.RequestMappings)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaimName", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.ResourceClaimName = string(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
    @@ -59760,6 +61890,39 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error {
     				return err
     			}
     			iNdEx = postIndex
    +		case 41:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field HostnameOverride", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			s := string(dAtA[iNdEx:postIndex])
    +			m.HostnameOverride = &s
    +			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
     			skippy, err := skipGenerated(dAtA[iNdEx:])
    @@ -59871,14 +62034,212 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Conditions = append(m.Conditions, PodCondition{})
    -			if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			m.Conditions = append(m.Conditions, PodCondition{})
    +			if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Message = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 4:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Reason = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 5:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.HostIP = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 6:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field PodIP", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.PodIP = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 7:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.StartTime == nil {
    +				m.StartTime = &v1.Time{}
    +			}
    +			if err := m.StartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 8:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ContainerStatuses", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.ContainerStatuses = append(m.ContainerStatuses, ContainerStatus{})
    +			if err := m.ContainerStatuses[len(m.ContainerStatuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
     			iNdEx = postIndex
    -		case 3:
    +		case 9:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field QOSClass", wireType)
     			}
     			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
    @@ -59906,13 +62267,13 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Message = string(dAtA[iNdEx:postIndex])
    +			m.QOSClass = PodQOSClass(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
    -		case 4:
    +		case 10:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field InitContainerStatuses", wireType)
     			}
    -			var stringLen uint64
    +			var msglen int
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -59922,59 +62283,29 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    +				msglen |= int(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    +			if msglen < 0 {
     				return ErrInvalidLengthGenerated
     			}
    -			postIndex := iNdEx + intStringLen
    +			postIndex := iNdEx + msglen
     			if postIndex < 0 {
     				return ErrInvalidLengthGenerated
     			}
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Reason = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		case 5:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    +			m.InitContainerStatuses = append(m.InitContainerStatuses, ContainerStatus{})
    +			if err := m.InitContainerStatuses[len(m.InitContainerStatuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
     			}
    -			m.HostIP = string(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
    -		case 6:
    +		case 11:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field PodIP", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field NominatedNodeName", wireType)
     			}
     			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
    @@ -60002,47 +62333,11 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.PodIP = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		case 7:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if m.StartTime == nil {
    -				m.StartTime = &v1.Time{}
    -			}
    -			if err := m.StartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    +			m.NominatedNodeName = string(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
    -		case 8:
    +		case 12:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ContainerStatuses", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field PodIPs", wireType)
     			}
     			var msglen int
     			for shift := uint(0); ; shift += 7 {
    @@ -60069,46 +62364,14 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.ContainerStatuses = append(m.ContainerStatuses, ContainerStatus{})
    -			if err := m.ContainerStatuses[len(m.ContainerStatuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			m.PodIPs = append(m.PodIPs, PodIP{})
    +			if err := m.PodIPs[len(m.PodIPs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
     			iNdEx = postIndex
    -		case 9:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field QOSClass", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.QOSClass = PodQOSClass(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		case 10:
    +		case 13:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field InitContainerStatuses", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field EphemeralContainerStatuses", wireType)
     			}
     			var msglen int
     			for shift := uint(0); ; shift += 7 {
    @@ -60135,14 +62398,14 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.InitContainerStatuses = append(m.InitContainerStatuses, ContainerStatus{})
    -			if err := m.InitContainerStatuses[len(m.InitContainerStatuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			m.EphemeralContainerStatuses = append(m.EphemeralContainerStatuses, ContainerStatus{})
    +			if err := m.EphemeralContainerStatuses[len(m.EphemeralContainerStatuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
     			iNdEx = postIndex
    -		case 11:
    +		case 14:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field NominatedNodeName", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Resize", wireType)
     			}
     			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
    @@ -60170,11 +62433,11 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.NominatedNodeName = string(dAtA[iNdEx:postIndex])
    +			m.Resize = PodResizeStatus(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
    -		case 12:
    +		case 15:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field PodIPs", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaimStatuses", wireType)
     			}
     			var msglen int
     			for shift := uint(0); ; shift += 7 {
    @@ -60201,14 +62464,14 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.PodIPs = append(m.PodIPs, PodIP{})
    -			if err := m.PodIPs[len(m.PodIPs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			m.ResourceClaimStatuses = append(m.ResourceClaimStatuses, PodResourceClaimStatus{})
    +			if err := m.ResourceClaimStatuses[len(m.ResourceClaimStatuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
     			iNdEx = postIndex
    -		case 13:
    +		case 16:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field EphemeralContainerStatuses", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field HostIPs", wireType)
     			}
     			var msglen int
     			for shift := uint(0); ; shift += 7 {
    @@ -60235,16 +62498,16 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.EphemeralContainerStatuses = append(m.EphemeralContainerStatuses, ContainerStatus{})
    -			if err := m.EphemeralContainerStatuses[len(m.EphemeralContainerStatuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			m.HostIPs = append(m.HostIPs, HostIP{})
    +			if err := m.HostIPs[len(m.HostIPs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
     			iNdEx = postIndex
    -		case 14:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Resize", wireType)
    +		case 17:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType)
     			}
    -			var stringLen uint64
    +			m.ObservedGeneration = 0
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -60254,27 +62517,14 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    +				m.ObservedGeneration |= int64(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.Resize = PodResizeStatus(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		case 15:
    +		case 18:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaimStatuses", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field ExtendedResourceClaimStatus", wireType)
     			}
     			var msglen int
     			for shift := uint(0); ; shift += 7 {
    @@ -60301,42 +62551,10 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.ResourceClaimStatuses = append(m.ResourceClaimStatuses, PodResourceClaimStatus{})
    -			if err := m.ResourceClaimStatuses[len(m.ResourceClaimStatuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    +			if m.ExtendedResourceClaimStatus == nil {
    +				m.ExtendedResourceClaimStatus = &PodExtendedResourceClaimStatus{}
     			}
    -			iNdEx = postIndex
    -		case 16:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field HostIPs", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.HostIPs = append(m.HostIPs, HostIP{})
    -			if err := m.HostIPs[len(m.HostIPs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			if err := m.ExtendedResourceClaimStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
     			iNdEx = postIndex
    @@ -73142,6 +75360,42 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error {
     				return err
     			}
     			iNdEx = postIndex
    +		case 6:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field PodCertificate", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.PodCertificate == nil {
    +				m.PodCertificate = &PodCertificateProjection{}
    +			}
    +			if err := m.PodCertificate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
     			skippy, err := skipGenerated(dAtA[iNdEx:])
    diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto
    index 08706987c..fb2695314 100644
    --- a/vendor/k8s.io/api/core/v1/generated.proto
    +++ b/vendor/k8s.io/api/core/v1/generated.proto
    @@ -737,8 +737,8 @@ message Container {
       repeated ContainerPort ports = 6;
     
       // List of sources to populate environment variables in the container.
    -  // The keys defined within a source must be a C_IDENTIFIER. All invalid keys
    -  // will be reported as an event when the container is starting. When a key exists in multiple
    +  // The keys defined within a source may consist of any printable ASCII characters except '='.
    +  // When a key exists in multiple
       // sources, the value associated with the last source will take precedence.
       // Values defined by an Env with a duplicate key will take precedence.
       // Cannot be updated.
    @@ -768,10 +768,10 @@ message Container {
       repeated ContainerResizePolicy resizePolicy = 23;
     
       // RestartPolicy defines the restart behavior of individual containers in a pod.
    -  // This field may only be set for init containers, and the only allowed value is "Always".
    -  // For non-init containers or when this field is not specified,
    +  // This overrides the pod-level restart policy. When this field is not specified,
       // the restart behavior is defined by the Pod's restart policy and the container type.
    -  // Setting the RestartPolicy as "Always" for the init container will have the following effect:
    +  // Additionally, setting the RestartPolicy as "Always" for the init container will
    +  // have the following effect:
       // this init container will be continually restarted on
       // exit until all regular containers have terminated. Once all regular
       // containers have completed, all init containers with restartPolicy "Always"
    @@ -786,6 +786,22 @@ message Container {
       // +optional
       optional string restartPolicy = 24;
     
    +  // Represents a list of rules to be checked to determine if the
    +  // container should be restarted on exit. The rules are evaluated in
    +  // order. Once a rule matches a container exit condition, the remaining
    +  // rules are ignored. If no rule matches the container exit condition,
    +  // the Container-level restart policy determines the whether the container
    +  // is restarted or not. Constraints on the rules:
    +  // - At most 20 rules are allowed.
    +  // - Rules can have the same action.
    +  // - Identical rules are not forbidden in validations.
    +  // When rules are specified, container MUST set RestartPolicy explicitly
    +  // even it if matches the Pod's RestartPolicy.
    +  // +featureGate=ContainerRestartRules
    +  // +optional
    +  // +listType=atomic
    +  repeated ContainerRestartRule restartPolicyRules = 25;
    +
       // Pod volumes to mount into the container's filesystem.
       // Cannot be updated.
       // +optional
    @@ -888,6 +904,19 @@ message Container {
       optional bool tty = 18;
     }
     
    +// ContainerExtendedResourceRequest has the mapping of container name,
    +// extended resource name to the device request name.
    +message ContainerExtendedResourceRequest {
    +  // The name of the container requesting resources.
    +  optional string containerName = 1;
    +
    +  // The name of the extended resource in that container which gets backed by DRA.
    +  optional string resourceName = 2;
    +
    +  // The name of the request in the special ResourceClaim which corresponds to the extended resource.
    +  optional string requestName = 3;
    +}
    +
     // Describe a container image
     message ContainerImage {
       // Names by which this image is known.
    @@ -942,6 +971,39 @@ message ContainerResizePolicy {
       optional string restartPolicy = 2;
     }
     
    +// ContainerRestartRule describes how a container exit is handled.
    +message ContainerRestartRule {
    +  // Specifies the action taken on a container exit if the requirements
    +  // are satisfied. The only possible value is "Restart" to restart the
    +  // container.
    +  // +required
    +  optional string action = 1;
    +
    +  // Represents the exit codes to check on container exits.
    +  // +optional
    +  // +oneOf=when
    +  optional ContainerRestartRuleOnExitCodes exitCodes = 2;
    +}
    +
    +// ContainerRestartRuleOnExitCodes describes the condition
    +// for handling an exited container based on its exit codes.
    +message ContainerRestartRuleOnExitCodes {
    +  // Represents the relationship between the container exit code(s) and the
    +  // specified values. Possible values are:
    +  // - In: the requirement is satisfied if the container exit code is in the
    +  //   set of specified values.
    +  // - NotIn: the requirement is satisfied if the container exit code is
    +  //   not in the set of specified values.
    +  // +required
    +  optional string operator = 1;
    +
    +  // Specifies the set of values to check for container exit codes.
    +  // At most 255 elements are allowed.
    +  // +optional
    +  // +listType=set
    +  repeated int32 values = 2;
    +}
    +
     // ContainerState holds a possible state of container.
     // Only one of its members may be specified.
     // If none of them is specified, the default one is ContainerStateWaiting.
    @@ -1103,6 +1165,11 @@ message ContainerStatus {
       // +listType=map
       // +listMapKey=name
       repeated ResourceStatus allocatedResourcesStatus = 14;
    +
    +  // StopSignal reports the effective stop signal for this container
    +  // +featureGate=ContainerStopSignals
    +  // +optional
    +  optional string stopSignal = 15;
     }
     
     // ContainerUser represents user identity information
    @@ -1194,6 +1261,7 @@ message EmptyDirVolumeSource {
     }
     
     // EndpointAddress is a tuple that describes single IP address.
    +// Deprecated: This API is deprecated in v1.33+.
     // +structType=atomic
     message EndpointAddress {
       // The IP of this endpoint.
    @@ -1215,6 +1283,7 @@ message EndpointAddress {
     }
     
     // EndpointPort is a tuple that describes a single port.
    +// Deprecated: This API is deprecated in v1.33+.
     // +structType=atomic
     message EndpointPort {
       // The name of this port.  This must match the 'name' field in the
    @@ -1265,6 +1334,8 @@ message EndpointPort {
     //
     // 	a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],
     // 	b: [ 10.10.1.1:309, 10.10.2.2:309 ]
    +//
    +// Deprecated: This API is deprecated in v1.33+.
     message EndpointSubset {
       // IP addresses which offer the related ports that are marked as ready. These endpoints
       // should be considered safe for load balancers and clients to utilize.
    @@ -1298,6 +1369,11 @@ message EndpointSubset {
     // 	     Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}]
     // 	   },
     // 	]
    +//
    +// Endpoints is a legacy API and does not contain information about all Service features.
    +// Use discoveryv1.EndpointSlice for complete information about Service endpoints.
    +//
    +// Deprecated: This API is deprecated in v1.33+. Use discoveryv1.EndpointSlice.
     message Endpoints {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    @@ -1317,6 +1393,7 @@ message Endpoints {
     }
     
     // EndpointsList is a list of endpoints.
    +// Deprecated: This API is deprecated in v1.33+.
     message EndpointsList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
    @@ -1327,9 +1404,10 @@ message EndpointsList {
       repeated Endpoints items = 2;
     }
     
    -// EnvFromSource represents the source of a set of ConfigMaps
    +// EnvFromSource represents the source of a set of ConfigMaps or Secrets
     message EnvFromSource {
    -  // An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
    +  // Optional text to prepend to the name of each environment variable.
    +  // May consist of any printable ASCII characters except '='.
       // +optional
       optional string prefix = 1;
     
    @@ -1344,7 +1422,8 @@ message EnvFromSource {
     
     // EnvVar represents an environment variable present in a Container.
     message EnvVar {
    -  // Name of the environment variable. Must be a C_IDENTIFIER.
    +  // Name of the environment variable.
    +  // May consist of any printable ASCII characters except '='.
       optional string name = 1;
     
       // Variable references $(VAR_NAME) are expanded
    @@ -1383,6 +1462,13 @@ message EnvVarSource {
       // Selects a key of a secret in the pod's namespace
       // +optional
       optional SecretKeySelector secretKeyRef = 4;
    +
    +  // FileKeyRef selects a key of the env file.
    +  // Requires the EnvFiles feature gate to be enabled.
    +  //
    +  // +featureGate=EnvFiles
    +  // +optional
    +  optional FileKeySelector fileKeyRef = 5;
     }
     
     // An EphemeralContainer is a temporary container that you may add to an existing Pod for
    @@ -1464,8 +1550,8 @@ message EphemeralContainerCommon {
       repeated ContainerPort ports = 6;
     
       // List of sources to populate environment variables in the container.
    -  // The keys defined within a source must be a C_IDENTIFIER. All invalid keys
    -  // will be reported as an event when the container is starting. When a key exists in multiple
    +  // The keys defined within a source may consist of any printable ASCII characters except '='.
    +  // When a key exists in multiple
       // sources, the value associated with the last source will take precedence.
       // Values defined by an Env with a duplicate key will take precedence.
       // Cannot be updated.
    @@ -1495,12 +1581,19 @@ message EphemeralContainerCommon {
     
       // Restart policy for the container to manage the restart behavior of each
       // container within a pod.
    -  // This may only be set for init containers. You cannot set this field on
    -  // ephemeral containers.
    +  // You cannot set this field on ephemeral containers.
       // +featureGate=SidecarContainers
       // +optional
       optional string restartPolicy = 24;
     
    +  // Represents a list of rules to be checked to determine if the
    +  // container should be restarted on exit. You cannot set this field on
    +  // ephemeral containers.
    +  // +featureGate=ContainerRestartRules
    +  // +optional
    +  // +listType=atomic
    +  repeated ContainerRestartRule restartPolicyRules = 25;
    +
       // Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers.
       // Cannot be updated.
       // +optional
    @@ -1761,6 +1854,36 @@ message FCVolumeSource {
       repeated string wwids = 5;
     }
     
    +// FileKeySelector selects a key of the env file.
    +// +structType=atomic
    +message FileKeySelector {
    +  // The name of the volume mount containing the env file.
    +  // +required
    +  optional string volumeName = 1;
    +
    +  // The path within the volume from which to select the file.
    +  // Must be relative and may not contain the '..' path or start with '..'.
    +  // +required
    +  optional string path = 2;
    +
    +  // The key within the env file. An invalid key will prevent the pod from starting.
    +  // The keys defined within a source may consist of any printable ASCII characters except '='.
    +  // During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
    +  // +required
    +  optional string key = 3;
    +
    +  // Specify whether the file or its key must be defined. If the file or key
    +  // does not exist, then the env var is not published.
    +  // If optional is set to true and the specified key does not exist,
    +  // the environment variable will not be set in the Pod's containers.
    +  //
    +  // If optional is set to false and the specified key does not exist,
    +  // an error will be returned during Pod creation.
    +  // +optional
    +  // +default=false
    +  optional bool optional = 4;
    +}
    +
     // FlexPersistentVolumeSource represents a generic persistent volume resource that is
     // provisioned/attached using an exec based plugin.
     message FlexPersistentVolumeSource {
    @@ -1934,7 +2057,6 @@ message GlusterfsPersistentVolumeSource {
     // Glusterfs volumes do not support ownership management or SELinux relabeling.
     message GlusterfsVolumeSource {
       // endpoints is the endpoint name that details Glusterfs topology.
    -  // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
       optional string endpoints = 1;
     
       // path is the Glusterfs volume path.
    @@ -2198,6 +2320,12 @@ message Lifecycle {
       // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
       // +optional
       optional LifecycleHandler preStop = 2;
    +
    +  // StopSignal defines which signal will be sent to a container when it is being stopped.
    +  // If not specified, the default is defined by the container runtime in use.
    +  // StopSignal can only be set for Pods with a non-empty .spec.os.name
    +  // +optional
    +  optional string stopSignal = 3;
     }
     
     // LifecycleHandler defines a specific action that should be taken in a lifecycle
    @@ -2862,6 +2990,13 @@ message NodeStatus {
       optional NodeFeatures features = 13;
     }
     
    +// NodeSwapStatus represents swap memory information.
    +message NodeSwapStatus {
    +  // Total amount of swap memory in bytes.
    +  // +optional
    +  optional int64 capacity = 1;
    +}
    +
     // NodeSystemInfo is a set of ids/uuids to uniquely identify the node.
     message NodeSystemInfo {
       // MachineID reported by the node. For unique machine identification
    @@ -2897,6 +3032,9 @@ message NodeSystemInfo {
     
       // The Architecture reported by the node
       optional string architecture = 10;
    +
    +  // Swap Info reported by the node.
    +  optional NodeSwapStatus swap = 11;
     }
     
     // ObjectFieldSelector selects an APIVersioned field of an object.
    @@ -3129,15 +3267,13 @@ message PersistentVolumeClaimSpec {
       // volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
       // If specified, the CSI driver will create or update the volume with the attributes defined
       // in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
    -  // it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
    -  // will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
    -  // If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
    -  // will be set by the persistentvolume controller if it exists.
    +  // it can be changed after the claim is created. An empty string or nil value indicates that no
    +  // VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state,
    +  // this field can be reset to its previous value (including nil) to cancel the modification.
       // If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
       // set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
       // exists.
       // More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
    -  // (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
       // +featureGate=VolumeAttributesClass
       // +optional
       optional string volumeAttributesClassName = 9;
    @@ -3236,14 +3372,12 @@ message PersistentVolumeClaimStatus {
     
       // currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using.
       // When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim
    -  // This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
       // +featureGate=VolumeAttributesClass
       // +optional
       optional string currentVolumeAttributesClassName = 8;
     
       // ModifyVolumeStatus represents the status object of ControllerModifyVolume operation.
       // When this is unset, there is no ModifyVolume operation being attempted.
    -  // This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
       // +featureGate=VolumeAttributesClass
       // +optional
       optional ModifyVolumeStatus modifyVolumeStatus = 9;
    @@ -3484,7 +3618,6 @@ message PersistentVolumeSpec {
       // after a volume has been updated successfully to a new class.
       // For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound
       // PersistentVolumeClaims during the binding process.
    -  // This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
       // +featureGate=VolumeAttributesClass
       // +optional
       optional string volumeAttributesClassName = 10;
    @@ -3615,7 +3748,6 @@ message PodAffinityTerm {
       // pod labels will be ignored. The default value is empty.
       // The same key is forbidden to exist in both matchLabelKeys and labelSelector.
       // Also, matchLabelKeys cannot be set when labelSelector isn't set.
    -  // This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
       //
       // +listType=atomic
       // +optional
    @@ -3629,7 +3761,6 @@ message PodAffinityTerm {
       // pod labels will be ignored. The default value is empty.
       // The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
       // Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
    -  // This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
       //
       // +listType=atomic
       // +optional
    @@ -3655,8 +3786,8 @@ message PodAntiAffinity {
       // most preferred is the one with the greatest sum of weights, i.e.
       // for each node that meets all of the scheduling requirements (resource
       // request, requiredDuringScheduling anti-affinity expressions, etc.),
    -  // compute a sum by iterating through the elements of this field and adding
    -  // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
    +  // compute a sum by iterating through the elements of this field and subtracting
    +  // "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the
       // node(s) with the highest sum are the most preferred.
       // +optional
       // +listType=atomic
    @@ -3696,12 +3827,91 @@ message PodAttachOptions {
       optional string container = 5;
     }
     
    +// PodCertificateProjection provides a private key and X.509 certificate in the
    +// pod filesystem.
    +message PodCertificateProjection {
    +  // Kubelet's generated CSRs will be addressed to this signer.
    +  //
    +  // +required
    +  optional string signerName = 1;
    +
    +  // The type of keypair Kubelet will generate for the pod.
    +  //
    +  // Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384",
    +  // "ECDSAP521", and "ED25519".
    +  //
    +  // +required
    +  optional string keyType = 2;
    +
    +  // maxExpirationSeconds is the maximum lifetime permitted for the
    +  // certificate.
    +  //
    +  // Kubelet copies this value verbatim into the PodCertificateRequests it
    +  // generates for this projection.
    +  //
    +  // If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver
    +  // will reject values shorter than 3600 (1 hour).  The maximum allowable
    +  // value is 7862400 (91 days).
    +  //
    +  // The signer implementation is then free to issue a certificate with any
    +  // lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600
    +  // seconds (1 hour).  This constraint is enforced by kube-apiserver.
    +  // `kubernetes.io` signers will never issue certificates with a lifetime
    +  // longer than 24 hours.
    +  //
    +  // +optional
    +  optional int32 maxExpirationSeconds = 3;
    +
    +  // Write the credential bundle at this path in the projected volume.
    +  //
    +  // The credential bundle is a single file that contains multiple PEM blocks.
    +  // The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private
    +  // key.
    +  //
    +  // The remaining blocks are CERTIFICATE blocks, containing the issued
    +  // certificate chain from the signer (leaf and any intermediates).
    +  //
    +  // Using credentialBundlePath lets your Pod's application code make a single
    +  // atomic read that retrieves a consistent key and certificate chain.  If you
    +  // project them to separate files, your application code will need to
    +  // additionally check that the leaf certificate was issued to the key.
    +  //
    +  // +optional
    +  optional string credentialBundlePath = 4;
    +
    +  // Write the key at this path in the projected volume.
    +  //
    +  // Most applications should use credentialBundlePath.  When using keyPath
    +  // and certificateChainPath, your application needs to check that the key
    +  // and leaf certificate are consistent, because it is possible to read the
    +  // files mid-rotation.
    +  //
    +  // +optional
    +  optional string keyPath = 5;
    +
    +  // Write the certificate chain at this path in the projected volume.
    +  //
    +  // Most applications should use credentialBundlePath.  When using keyPath
    +  // and certificateChainPath, your application needs to check that the key
    +  // and leaf certificate are consistent, because it is possible to read the
    +  // files mid-rotation.
    +  //
    +  // +optional
    +  optional string certificateChainPath = 6;
    +}
    +
     // PodCondition contains details for the current condition of this pod.
     message PodCondition {
       // Type is the type of the condition.
       // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
       optional string type = 1;
     
    +  // If set, this represents the .metadata.generation that the pod condition was set based upon.
    +  // This is an alpha field. Enable PodObservedGenerationTracking to be able to use this field.
    +  // +featureGate=PodObservedGenerationTracking
    +  // +optional
    +  optional int64 observedGeneration = 7;
    +
       // Status is the status of the condition.
       // Can be True, False, Unknown.
       // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
    @@ -3794,6 +4004,20 @@ message PodExecOptions {
       repeated string command = 6;
     }
     
    +// PodExtendedResourceClaimStatus is stored in the PodStatus for the extended
    +// resource requests backed by DRA. It stores the generated name for
    +// the corresponding special ResourceClaim created by the scheduler.
    +message PodExtendedResourceClaimStatus {
    +  // RequestMappings identifies the mapping of  to  device request
    +  // in the generated ResourceClaim.
    +  // +listType=atomic
    +  repeated ContainerExtendedResourceRequest requestMappings = 1;
    +
    +  // ResourceClaimName is the name of the ResourceClaim that was
    +  // generated for the Pod in the namespace of the Pod.
    +  optional string resourceClaimName = 2;
    +}
    +
     // PodIP represents a single IP address allocated to the pod.
     message PodIP {
       // IP is the IP address assigned to the pod
    @@ -4138,7 +4362,7 @@ message PodSpec {
       // Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes.
       // The resourceRequirements of an init container are taken into account during scheduling
       // by finding the highest request/limit for each resource type, and then using the max of
    -  // of that value or the sum of the normal containers. Limits are applied to init containers
    +  // that value or the sum of the normal containers. Limits are applied to init containers
       // in a similar fashion.
       // Init containers cannot currently be added or removed.
       // Cannot be updated.
    @@ -4234,7 +4458,9 @@ message PodSpec {
       optional string nodeName = 10;
     
       // Host networking requested for this pod. Use the host's network namespace.
    -  // If this option is set, the ports that will be used must be specified.
    +  // When using HostNetwork you should specify ports so the scheduler is aware.
    +  // When `hostNetwork` is true, specified `hostPort` fields in port definitions must match `containerPort`,
    +  // and unspecified `hostPort` fields in port definitions are defaulted to match `containerPort`.
       // Default to false.
       // +k8s:conversion-gen=false
       // +optional
    @@ -4399,6 +4625,7 @@ message PodSpec {
       // - spec.hostPID
       // - spec.hostIPC
       // - spec.hostUsers
    +  // - spec.resources
       // - spec.securityContext.appArmorProfile
       // - spec.securityContext.seLinuxOptions
       // - spec.securityContext.seccompProfile
    @@ -4469,7 +4696,7 @@ message PodSpec {
     
       // Resources is the total amount of CPU and Memory resources required by all
       // containers in the pod. It supports specifying Requests and Limits for
    -  // "cpu" and "memory" resource names only. ResourceClaims are not supported.
    +  // "cpu", "memory" and "hugepages-" resource names only. ResourceClaims are not supported.
       //
       // This field enables fine-grained control over resource allocation for the
       // entire pod, allowing resource sharing among containers in a pod.
    @@ -4481,12 +4708,33 @@ message PodSpec {
       // +featureGate=PodLevelResources
       // +optional
       optional ResourceRequirements resources = 40;
    +
    +  // HostnameOverride specifies an explicit override for the pod's hostname as perceived by the pod.
    +  // This field only specifies the pod's hostname and does not affect its DNS records.
    +  // When this field is set to a non-empty string:
    +  // - It takes precedence over the values set in `hostname` and `subdomain`.
    +  // - The Pod's hostname will be set to this value.
    +  // - `setHostnameAsFQDN` must be nil or set to false.
    +  // - `hostNetwork` must be set to false.
    +  //
    +  // This field must be a valid DNS subdomain as defined in RFC 1123 and contain at most 64 characters.
    +  // Requires the HostnameOverride feature gate to be enabled.
    +  //
    +  // +featureGate=HostnameOverride
    +  // +optional
    +  optional string hostnameOverride = 41;
     }
     
     // PodStatus represents information about the status of a pod. Status may trail the actual
     // state of a system, especially if the node that hosts the pod cannot contact the control
     // plane.
     message PodStatus {
    +  // If set, this represents the .metadata.generation that the pod status was set based upon.
    +  // This is an alpha field. Enable PodObservedGenerationTracking to be able to use this field.
    +  // +featureGate=PodObservedGenerationTracking
    +  // +optional
    +  optional int64 observedGeneration = 17;
    +
       // The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle.
       // The conditions array, the reason and message fields, and the individual container status
       // arrays contain more detail about the pod's status.
    @@ -4618,6 +4866,9 @@ message PodStatus {
       // Status of resources resize desired for pod's containers.
       // It is empty if no resources resize is pending.
       // Any changes to container resources will automatically set this to "Proposed"
    +  // Deprecated: Resize status is moved to two pod conditions PodResizePending and PodResizeInProgress.
    +  // PodResizePending will track states where the spec has been resized, but the Kubelet has not yet allocated the resources.
    +  // PodResizeInProgress will track in-progress resizes, and should be present whenever allocated resources != acknowledged resources.
       // +featureGate=InPlacePodVerticalScaling
       // +optional
       optional string resize = 14;
    @@ -4630,6 +4881,11 @@ message PodStatus {
       // +featureGate=DynamicResourceAllocation
       // +optional
       repeated PodResourceClaimStatus resourceClaimStatuses = 15;
    +
    +  // Status of extended resource claim backed by DRA.
    +  // +featureGate=DRAExtendedResource
    +  // +optional
    +  optional PodExtendedResourceClaimStatus extendedResourceClaimStatus = 18;
     }
     
     // PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded
    @@ -5063,12 +5319,18 @@ message ReplicationControllerSpec {
       // Defaults to 1.
       // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller
       // +optional
    +  // +k8s:optional
    +  // +default=1
    +  // +k8s:minimum=0
       optional int32 replicas = 1;
     
       // Minimum number of seconds for which a newly created pod should be ready
       // without any of its container crashing, for it to be considered available.
       // Defaults to 0 (pod will be considered available as soon as it is ready)
       // +optional
    +  // +k8s:optional
    +  // +default=0
    +  // +k8s:minimum=0
       optional int32 minReadySeconds = 4;
     
       // Selector is a label query over pods that should match the Replicas count.
    @@ -5248,7 +5510,7 @@ message ResourceRequirements {
       // Claims lists the names of resources, defined in spec.resourceClaims,
       // that are used by this container.
       //
    -  // This is an alpha field and requires enabling the
    +  // This field depends on the
       // DynamicResourceAllocation feature gate.
       //
       // This field is immutable. It can only be set for containers.
    @@ -6110,13 +6372,12 @@ message ServiceSpec {
       // +optional
       optional string internalTrafficPolicy = 22;
     
    -  // TrafficDistribution offers a way to express preferences for how traffic is
    -  // distributed to Service endpoints. Implementations can use this field as a
    -  // hint, but are not required to guarantee strict adherence. If the field is
    -  // not set, the implementation will apply its default routing strategy. If set
    -  // to "PreferClose", implementations should prioritize endpoints that are
    -  // topologically close (e.g., same zone).
    -  // This is a beta field and requires enabling ServiceTrafficDistribution feature.
    +  // TrafficDistribution offers a way to express preferences for how traffic
    +  // is distributed to Service endpoints. Implementations can use this field
    +  // as a hint, but are not required to guarantee strict adherence. If the
    +  // field is not set, the implementation will apply its default routing
    +  // strategy. If set to "PreferClose", implementations should prioritize
    +  // endpoints that are in the same zone.
       // +featureGate=ServiceTrafficDistribution
       // +optional
       optional string trafficDistribution = 23;
    @@ -6252,7 +6513,6 @@ message Taint {
       optional string effect = 3;
     
       // TimeAdded represents the time at which the taint was added.
    -  // It is only written for NoExecute taints.
       // +optional
       optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time timeAdded = 4;
     }
    @@ -6411,7 +6671,6 @@ message TopologySpreadConstraint {
       // - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
       //
       // If this value is nil, the behavior is equivalent to the Honor policy.
    -  // This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
       // +optional
       optional string nodeAffinityPolicy = 6;
     
    @@ -6422,7 +6681,6 @@ message TopologySpreadConstraint {
       // - Ignore: node taints are ignored. All nodes are included.
       //
       // If this value is nil, the behavior is equivalent to the Ignore policy.
    -  // This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
       // +optional
       optional string nodeTaintsPolicy = 7;
     
    @@ -6635,6 +6893,44 @@ message VolumeProjection {
       // +featureGate=ClusterTrustBundleProjection
       // +optional
       optional ClusterTrustBundleProjection clusterTrustBundle = 5;
    +
    +  // Projects an auto-rotating credential bundle (private key and certificate
    +  // chain) that the pod can use either as a TLS client or server.
    +  //
    +  // Kubelet generates a private key and uses it to send a
    +  // PodCertificateRequest to the named signer.  Once the signer approves the
    +  // request and issues a certificate chain, Kubelet writes the key and
    +  // certificate chain to the pod filesystem.  The pod does not start until
    +  // certificates have been issued for each podCertificate projected volume
    +  // source in its spec.
    +  //
    +  // Kubelet will begin trying to rotate the certificate at the time indicated
    +  // by the signer using the PodCertificateRequest.Status.BeginRefreshAt
    +  // timestamp.
    +  //
    +  // Kubelet can write a single file, indicated by the credentialBundlePath
    +  // field, or separate files, indicated by the keyPath and
    +  // certificateChainPath fields.
    +  //
    +  // The credential bundle is a single file in PEM format.  The first PEM
    +  // entry is the private key (in PKCS#8 format), and the remaining PEM
    +  // entries are the certificate chain issued by the signer (typically,
    +  // signers will return their certificate chain in leaf-to-root order).
    +  //
    +  // Prefer using the credential bundle format, since your application code
    +  // can read it atomically.  If you use keyPath and certificateChainPath,
    +  // your application must make two separate file reads. If these coincide
    +  // with a certificate rotation, it is possible that the private key and leaf
    +  // certificate you read may not correspond to each other.  Your application
    +  // will need to check for this condition, and re-read until they are
    +  // consistent.
    +  //
    +  // The named signer controls chooses the format of the certificate it
    +  // issues; consult the signer implementation's documentation to learn how to
    +  // use the certificates it issues.
    +  //
    +  // +featureGate=PodCertificateProjection +optional
    +  optional PodCertificateProjection podCertificate = 6;
     }
     
     // VolumeResourceRequirements describes the storage resource requirements for a volume.
    @@ -6706,13 +7002,12 @@ message VolumeSource {
     
       // iscsi represents an ISCSI Disk resource that is attached to a
       // kubelet's host machine and then exposed to the pod.
    -  // More info: https://examples.k8s.io/volumes/iscsi/README.md
    +  // More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi
       // +optional
       optional ISCSIVolumeSource iscsi = 8;
     
       // glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
       // Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
    -  // More info: https://examples.k8s.io/volumes/glusterfs/README.md
       // +optional
       optional GlusterfsVolumeSource glusterfs = 9;
     
    @@ -6724,7 +7019,6 @@ message VolumeSource {
     
       // rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
       // Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
    -  // More info: https://examples.k8s.io/volumes/rbd/README.md
       // +optional
       optional RBDVolumeSource rbd = 11;
     
    @@ -6854,7 +7148,7 @@ message VolumeSource {
       // The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
       // The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
       // The volume will be mounted read-only (ro) and non-executable files (noexec).
    -  // Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
    +  // Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33.
       // The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
       // +featureGate=ImageVolume
       // +optional
    diff --git a/vendor/k8s.io/api/core/v1/lifecycle.go b/vendor/k8s.io/api/core/v1/lifecycle.go
    index 21ca90e81..21b931b67 100644
    --- a/vendor/k8s.io/api/core/v1/lifecycle.go
    +++ b/vendor/k8s.io/api/core/v1/lifecycle.go
    @@ -16,6 +16,10 @@ limitations under the License.
     
     package v1
     
    +import (
    +	"k8s.io/apimachinery/pkg/runtime/schema"
    +)
    +
     // APILifecycleIntroduced returns the release in which the API struct was introduced as int versions of major and minor for comparison.
     func (in *ComponentStatus) APILifecycleIntroduced() (major, minor int) {
     	return 1, 0
    @@ -35,3 +39,23 @@ func (in *ComponentStatusList) APILifecycleIntroduced() (major, minor int) {
     func (in *ComponentStatusList) APILifecycleDeprecated() (major, minor int) {
     	return 1, 19
     }
    +
    +// APILifecycleDeprecated returns the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
    +func (in *Endpoints) APILifecycleDeprecated() (major, minor int) {
    +	return 1, 33
    +}
    +
    +// APILifecycleReplacement returns the GVK of the replacement for the given API
    +func (in *Endpoints) APILifecycleReplacement() schema.GroupVersionKind {
    +	return schema.GroupVersionKind{Group: "discovery.k8s.io", Version: "v1", Kind: "EndpointSlice"}
    +}
    +
    +// APILifecycleDeprecated returns the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
    +func (in *EndpointsList) APILifecycleDeprecated() (major, minor int) {
    +	return 1, 33
    +}
    +
    +// APILifecycleReplacement returns the GVK of the replacement for the given API
    +func (in *EndpointsList) APILifecycleReplacement() schema.GroupVersionKind {
    +	return schema.GroupVersionKind{Group: "discovery.k8s.io", Version: "v1", Kind: "EndpointSliceList"}
    +}
    diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go
    index fb2c1c745..08b6d351c 100644
    --- a/vendor/k8s.io/api/core/v1/types.go
    +++ b/vendor/k8s.io/api/core/v1/types.go
    @@ -91,12 +91,11 @@ type VolumeSource struct {
     	NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,7,opt,name=nfs"`
     	// iscsi represents an ISCSI Disk resource that is attached to a
     	// kubelet's host machine and then exposed to the pod.
    -	// More info: https://examples.k8s.io/volumes/iscsi/README.md
    +	// More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi
     	// +optional
     	ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,8,opt,name=iscsi"`
     	// glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
     	// Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
    -	// More info: https://examples.k8s.io/volumes/glusterfs/README.md
     	// +optional
     	Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,9,opt,name=glusterfs"`
     	// persistentVolumeClaimVolumeSource represents a reference to a
    @@ -106,7 +105,6 @@ type VolumeSource struct {
     	PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaim"`
     	// rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
     	// Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
    -	// More info: https://examples.k8s.io/volumes/rbd/README.md
     	// +optional
     	RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,11,opt,name=rbd"`
     	// flexVolume represents a generic volume resource that is
    @@ -217,7 +215,7 @@ type VolumeSource struct {
     	// The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
     	// The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
     	// The volume will be mounted read-only (ro) and non-executable files (noexec).
    -	// Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
    +	// Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33.
     	// The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
     	// +featureGate=ImageVolume
     	// +optional
    @@ -437,7 +435,6 @@ type PersistentVolumeSpec struct {
     	// after a volume has been updated successfully to a new class.
     	// For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound
     	// PersistentVolumeClaims during the binding process.
    -	// This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
     	// +featureGate=VolumeAttributesClass
     	// +optional
     	VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty" protobuf:"bytes,10,opt,name=volumeAttributesClassName"`
    @@ -616,15 +613,13 @@ type PersistentVolumeClaimSpec struct {
     	// volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
     	// If specified, the CSI driver will create or update the volume with the attributes defined
     	// in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
    -	// it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
    -	// will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
    -	// If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
    -	// will be set by the persistentvolume controller if it exists.
    +	// it can be changed after the claim is created. An empty string or nil value indicates that no
    +	// VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state,
    +	// this field can be reset to its previous value (including nil) to cancel the modification.
     	// If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
     	// set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
     	// exists.
     	// More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
    -	// (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
     	// +featureGate=VolumeAttributesClass
     	// +optional
     	VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty" protobuf:"bytes,9,opt,name=volumeAttributesClassName"`
    @@ -851,13 +846,11 @@ type PersistentVolumeClaimStatus struct {
     	AllocatedResourceStatuses map[ResourceName]ClaimResourceStatus `json:"allocatedResourceStatuses,omitempty" protobuf:"bytes,7,rep,name=allocatedResourceStatuses"`
     	// currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using.
     	// When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim
    -	// This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
     	// +featureGate=VolumeAttributesClass
     	// +optional
     	CurrentVolumeAttributesClassName *string `json:"currentVolumeAttributesClassName,omitempty" protobuf:"bytes,8,opt,name=currentVolumeAttributesClassName"`
     	// ModifyVolumeStatus represents the status object of ControllerModifyVolume operation.
     	// When this is unset, there is no ModifyVolume operation being attempted.
    -	// This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
     	// +featureGate=VolumeAttributesClass
     	// +optional
     	ModifyVolumeStatus *ModifyVolumeStatus `json:"modifyVolumeStatus,omitempty" protobuf:"bytes,9,opt,name=modifyVolumeStatus"`
    @@ -972,7 +965,6 @@ type EmptyDirVolumeSource struct {
     // Glusterfs volumes do not support ownership management or SELinux relabeling.
     type GlusterfsVolumeSource struct {
     	// endpoints is the endpoint name that details Glusterfs topology.
    -	// More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
     	EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"`
     
     	// path is the Glusterfs volume path.
    @@ -1993,6 +1985,79 @@ type ClusterTrustBundleProjection struct {
     	Path string `json:"path" protobuf:"bytes,4,rep,name=path"`
     }
     
    +// PodCertificateProjection provides a private key and X.509 certificate in the
    +// pod filesystem.
    +type PodCertificateProjection struct {
    +	// Kubelet's generated CSRs will be addressed to this signer.
    +	//
    +	// +required
    +	SignerName string `json:"signerName,omitempty" protobuf:"bytes,1,rep,name=signerName"`
    +
    +	// The type of keypair Kubelet will generate for the pod.
    +	//
    +	// Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384",
    +	// "ECDSAP521", and "ED25519".
    +	//
    +	// +required
    +	KeyType string `json:"keyType,omitempty" protobuf:"bytes,2,rep,name=keyType"`
    +
    +	// maxExpirationSeconds is the maximum lifetime permitted for the
    +	// certificate.
    +	//
    +	// Kubelet copies this value verbatim into the PodCertificateRequests it
    +	// generates for this projection.
    +	//
    +	// If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver
    +	// will reject values shorter than 3600 (1 hour).  The maximum allowable
    +	// value is 7862400 (91 days).
    +	//
    +	// The signer implementation is then free to issue a certificate with any
    +	// lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600
    +	// seconds (1 hour).  This constraint is enforced by kube-apiserver.
    +	// `kubernetes.io` signers will never issue certificates with a lifetime
    +	// longer than 24 hours.
    +	//
    +	// +optional
    +	MaxExpirationSeconds *int32 `json:"maxExpirationSeconds,omitempty" protobuf:"varint,3,opt,name=maxExpirationSeconds"`
    +
    +	// Write the credential bundle at this path in the projected volume.
    +	//
    +	// The credential bundle is a single file that contains multiple PEM blocks.
    +	// The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private
    +	// key.
    +	//
    +	// The remaining blocks are CERTIFICATE blocks, containing the issued
    +	// certificate chain from the signer (leaf and any intermediates).
    +	//
    +	// Using credentialBundlePath lets your Pod's application code make a single
    +	// atomic read that retrieves a consistent key and certificate chain.  If you
    +	// project them to separate files, your application code will need to
    +	// additionally check that the leaf certificate was issued to the key.
    +	//
    +	// +optional
    +	CredentialBundlePath string `json:"credentialBundlePath,omitempty" protobuf:"bytes,4,rep,name=credentialBundlePath"`
    +
    +	// Write the key at this path in the projected volume.
    +	//
    +	// Most applications should use credentialBundlePath.  When using keyPath
    +	// and certificateChainPath, your application needs to check that the key
    +	// and leaf certificate are consistent, because it is possible to read the
    +	// files mid-rotation.
    +	//
    +	// +optional
    +	KeyPath string `json:"keyPath,omitempty" protobuf:"bytes,5,rep,name=keyPath"`
    +
    +	// Write the certificate chain at this path in the projected volume.
    +	//
    +	// Most applications should use credentialBundlePath.  When using keyPath
    +	// and certificateChainPath, your application needs to check that the key
    +	// and leaf certificate are consistent, because it is possible to read the
    +	// files mid-rotation.
    +	//
    +	// +optional
    +	CertificateChainPath string `json:"certificateChainPath,omitempty" protobuf:"bytes,6,rep,name=certificateChainPath"`
    +}
    +
     // Represents a projected volume source
     type ProjectedVolumeSource struct {
     	// sources is the list of volume projections. Each entry in this list
    @@ -2043,6 +2108,44 @@ type VolumeProjection struct {
     	// +featureGate=ClusterTrustBundleProjection
     	// +optional
     	ClusterTrustBundle *ClusterTrustBundleProjection `json:"clusterTrustBundle,omitempty" protobuf:"bytes,5,opt,name=clusterTrustBundle"`
    +
    +	// Projects an auto-rotating credential bundle (private key and certificate
    +	// chain) that the pod can use either as a TLS client or server.
    +	//
    +	// Kubelet generates a private key and uses it to send a
    +	// PodCertificateRequest to the named signer.  Once the signer approves the
    +	// request and issues a certificate chain, Kubelet writes the key and
    +	// certificate chain to the pod filesystem.  The pod does not start until
    +	// certificates have been issued for each podCertificate projected volume
    +	// source in its spec.
    +	//
    +	// Kubelet will begin trying to rotate the certificate at the time indicated
    +	// by the signer using the PodCertificateRequest.Status.BeginRefreshAt
    +	// timestamp.
    +	//
    +	// Kubelet can write a single file, indicated by the credentialBundlePath
    +	// field, or separate files, indicated by the keyPath and
    +	// certificateChainPath fields.
    +	//
    +	// The credential bundle is a single file in PEM format.  The first PEM
    +	// entry is the private key (in PKCS#8 format), and the remaining PEM
    +	// entries are the certificate chain issued by the signer (typically,
    +	// signers will return their certificate chain in leaf-to-root order).
    +	//
    +	// Prefer using the credential bundle format, since your application code
    +	// can read it atomically.  If you use keyPath and certificateChainPath,
    +	// your application must make two separate file reads. If these coincide
    +	// with a certificate rotation, it is possible that the private key and leaf
    +	// certificate you read may not correspond to each other.  Your application
    +	// will need to check for this condition, and re-read until they are
    +	// consistent.
    +	//
    +	// The named signer controls chooses the format of the certificate it
    +	// issues; consult the signer implementation's documentation to learn how to
    +	// use the certificates it issues.
    +	//
    +	// +featureGate=PodCertificateProjection +optional
    +	PodCertificate *PodCertificateProjection `json:"podCertificate,omitempty" protobuf:"bytes,6,opt,name=podCertificate"`
     }
     
     const (
    @@ -2351,7 +2454,8 @@ type VolumeDevice struct {
     
     // EnvVar represents an environment variable present in a Container.
     type EnvVar struct {
    -	// Name of the environment variable. Must be a C_IDENTIFIER.
    +	// Name of the environment variable.
    +	// May consist of any printable ASCII characters except '='.
     	Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
     
     	// Optional: no more than one of the following may be specified.
    @@ -2388,6 +2492,39 @@ type EnvVarSource struct {
     	// Selects a key of a secret in the pod's namespace
     	// +optional
     	SecretKeyRef *SecretKeySelector `json:"secretKeyRef,omitempty" protobuf:"bytes,4,opt,name=secretKeyRef"`
    +	// FileKeyRef selects a key of the env file.
    +	// Requires the EnvFiles feature gate to be enabled.
    +	//
    +	// +featureGate=EnvFiles
    +	// +optional
    +	FileKeyRef *FileKeySelector `json:"fileKeyRef,omitempty" protobuf:"bytes,5,opt,name=fileKeyRef"`
    +}
    +
    +// FileKeySelector selects a key of the env file.
    +// +structType=atomic
    +type FileKeySelector struct {
    +	// The name of the volume mount containing the env file.
    +	// +required
    +	VolumeName string `json:"volumeName" protobuf:"bytes,1,opt,name=volumeName"`
    +	// The path within the volume from which to select the file.
    +	// Must be relative and may not contain the '..' path or start with '..'.
    +	// +required
    +	Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
    +	// The key within the env file. An invalid key will prevent the pod from starting.
    +	// The keys defined within a source may consist of any printable ASCII characters except '='.
    +	// During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
    +	// +required
    +	Key string `json:"key" protobuf:"bytes,3,opt,name=key"`
    +	// Specify whether the file or its key must be defined. If the file or key
    +	// does not exist, then the env var is not published.
    +	// If optional is set to true and the specified key does not exist,
    +	// the environment variable will not be set in the Pod's containers.
    +	//
    +	// If optional is set to false and the specified key does not exist,
    +	// an error will be returned during Pod creation.
    +	// +optional
    +	// +default=false
    +	Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
     }
     
     // ObjectFieldSelector selects an APIVersioned field of an object.
    @@ -2437,9 +2574,10 @@ type SecretKeySelector struct {
     	Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"`
     }
     
    -// EnvFromSource represents the source of a set of ConfigMaps
    +// EnvFromSource represents the source of a set of ConfigMaps or Secrets
     type EnvFromSource struct {
    -	// An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
    +	// Optional text to prepend to the name of each environment variable.
    +	// May consist of any printable ASCII characters except '='.
     	// +optional
     	Prefix string `json:"prefix,omitempty" protobuf:"bytes,1,opt,name=prefix"`
     	// The ConfigMap to select from
    @@ -2697,7 +2835,7 @@ type ResourceRequirements struct {
     	// Claims lists the names of resources, defined in spec.resourceClaims,
     	// that are used by this container.
     	//
    -	// This is an alpha field and requires enabling the
    +	// This field depends on the
     	// DynamicResourceAllocation feature gate.
     	//
     	// This field is immutable. It can only be set for containers.
    @@ -2805,8 +2943,8 @@ type Container struct {
     	// +listMapKey=protocol
     	Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"`
     	// List of sources to populate environment variables in the container.
    -	// The keys defined within a source must be a C_IDENTIFIER. All invalid keys
    -	// will be reported as an event when the container is starting. When a key exists in multiple
    +	// The keys defined within a source may consist of any printable ASCII characters except '='.
    +	// When a key exists in multiple
     	// sources, the value associated with the last source will take precedence.
     	// Values defined by an Env with a duplicate key will take precedence.
     	// Cannot be updated.
    @@ -2832,10 +2970,10 @@ type Container struct {
     	// +listType=atomic
     	ResizePolicy []ContainerResizePolicy `json:"resizePolicy,omitempty" protobuf:"bytes,23,rep,name=resizePolicy"`
     	// RestartPolicy defines the restart behavior of individual containers in a pod.
    -	// This field may only be set for init containers, and the only allowed value is "Always".
    -	// For non-init containers or when this field is not specified,
    +	// This overrides the pod-level restart policy. When this field is not specified,
     	// the restart behavior is defined by the Pod's restart policy and the container type.
    -	// Setting the RestartPolicy as "Always" for the init container will have the following effect:
    +	// Additionally, setting the RestartPolicy as "Always" for the init container will
    +	// have the following effect:
     	// this init container will be continually restarted on
     	// exit until all regular containers have terminated. Once all regular
     	// containers have completed, all init containers with restartPolicy "Always"
    @@ -2849,6 +2987,21 @@ type Container struct {
     	// +featureGate=SidecarContainers
     	// +optional
     	RestartPolicy *ContainerRestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,24,opt,name=restartPolicy,casttype=ContainerRestartPolicy"`
    +	// Represents a list of rules to be checked to determine if the
    +	// container should be restarted on exit. The rules are evaluated in
    +	// order. Once a rule matches a container exit condition, the remaining
    +	// rules are ignored. If no rule matches the container exit condition,
    +	// the Container-level restart policy determines the whether the container
    +	// is restarted or not. Constraints on the rules:
    +	// - At most 20 rules are allowed.
    +	// - Rules can have the same action.
    +	// - Identical rules are not forbidden in validations.
    +	// When rules are specified, container MUST set RestartPolicy explicitly
    +	// even it if matches the Pod's RestartPolicy.
    +	// +featureGate=ContainerRestartRules
    +	// +optional
    +	// +listType=atomic
    +	RestartPolicyRules []ContainerRestartRule `json:"restartPolicyRules,omitempty" protobuf:"bytes,25,rep,name=restartPolicyRules"`
     	// Pod volumes to mount into the container's filesystem.
     	// Cannot be updated.
     	// +optional
    @@ -2980,6 +3133,78 @@ type LifecycleHandler struct {
     	Sleep *SleepAction `json:"sleep,omitempty" protobuf:"bytes,4,opt,name=sleep"`
     }
     
    +// Signal defines the stop signal of containers
    +// +enum
    +type Signal string
    +
    +const (
    +	SIGABRT         Signal = "SIGABRT"
    +	SIGALRM         Signal = "SIGALRM"
    +	SIGBUS          Signal = "SIGBUS"
    +	SIGCHLD         Signal = "SIGCHLD"
    +	SIGCLD          Signal = "SIGCLD"
    +	SIGCONT         Signal = "SIGCONT"
    +	SIGFPE          Signal = "SIGFPE"
    +	SIGHUP          Signal = "SIGHUP"
    +	SIGILL          Signal = "SIGILL"
    +	SIGINT          Signal = "SIGINT"
    +	SIGIO           Signal = "SIGIO"
    +	SIGIOT          Signal = "SIGIOT"
    +	SIGKILL         Signal = "SIGKILL"
    +	SIGPIPE         Signal = "SIGPIPE"
    +	SIGPOLL         Signal = "SIGPOLL"
    +	SIGPROF         Signal = "SIGPROF"
    +	SIGPWR          Signal = "SIGPWR"
    +	SIGQUIT         Signal = "SIGQUIT"
    +	SIGSEGV         Signal = "SIGSEGV"
    +	SIGSTKFLT       Signal = "SIGSTKFLT"
    +	SIGSTOP         Signal = "SIGSTOP"
    +	SIGSYS          Signal = "SIGSYS"
    +	SIGTERM         Signal = "SIGTERM"
    +	SIGTRAP         Signal = "SIGTRAP"
    +	SIGTSTP         Signal = "SIGTSTP"
    +	SIGTTIN         Signal = "SIGTTIN"
    +	SIGTTOU         Signal = "SIGTTOU"
    +	SIGURG          Signal = "SIGURG"
    +	SIGUSR1         Signal = "SIGUSR1"
    +	SIGUSR2         Signal = "SIGUSR2"
    +	SIGVTALRM       Signal = "SIGVTALRM"
    +	SIGWINCH        Signal = "SIGWINCH"
    +	SIGXCPU         Signal = "SIGXCPU"
    +	SIGXFSZ         Signal = "SIGXFSZ"
    +	SIGRTMIN        Signal = "SIGRTMIN"
    +	SIGRTMINPLUS1   Signal = "SIGRTMIN+1"
    +	SIGRTMINPLUS2   Signal = "SIGRTMIN+2"
    +	SIGRTMINPLUS3   Signal = "SIGRTMIN+3"
    +	SIGRTMINPLUS4   Signal = "SIGRTMIN+4"
    +	SIGRTMINPLUS5   Signal = "SIGRTMIN+5"
    +	SIGRTMINPLUS6   Signal = "SIGRTMIN+6"
    +	SIGRTMINPLUS7   Signal = "SIGRTMIN+7"
    +	SIGRTMINPLUS8   Signal = "SIGRTMIN+8"
    +	SIGRTMINPLUS9   Signal = "SIGRTMIN+9"
    +	SIGRTMINPLUS10  Signal = "SIGRTMIN+10"
    +	SIGRTMINPLUS11  Signal = "SIGRTMIN+11"
    +	SIGRTMINPLUS12  Signal = "SIGRTMIN+12"
    +	SIGRTMINPLUS13  Signal = "SIGRTMIN+13"
    +	SIGRTMINPLUS14  Signal = "SIGRTMIN+14"
    +	SIGRTMINPLUS15  Signal = "SIGRTMIN+15"
    +	SIGRTMAXMINUS14 Signal = "SIGRTMAX-14"
    +	SIGRTMAXMINUS13 Signal = "SIGRTMAX-13"
    +	SIGRTMAXMINUS12 Signal = "SIGRTMAX-12"
    +	SIGRTMAXMINUS11 Signal = "SIGRTMAX-11"
    +	SIGRTMAXMINUS10 Signal = "SIGRTMAX-10"
    +	SIGRTMAXMINUS9  Signal = "SIGRTMAX-9"
    +	SIGRTMAXMINUS8  Signal = "SIGRTMAX-8"
    +	SIGRTMAXMINUS7  Signal = "SIGRTMAX-7"
    +	SIGRTMAXMINUS6  Signal = "SIGRTMAX-6"
    +	SIGRTMAXMINUS5  Signal = "SIGRTMAX-5"
    +	SIGRTMAXMINUS4  Signal = "SIGRTMAX-4"
    +	SIGRTMAXMINUS3  Signal = "SIGRTMAX-3"
    +	SIGRTMAXMINUS2  Signal = "SIGRTMAX-2"
    +	SIGRTMAXMINUS1  Signal = "SIGRTMAX-1"
    +	SIGRTMAX        Signal = "SIGRTMAX"
    +)
    +
     // Lifecycle describes actions that the management system should take in response to container lifecycle
     // events. For the PostStart and PreStop lifecycle handlers, management of the container blocks
     // until the action is complete, unless the container process fails, in which case the handler is aborted.
    @@ -3001,6 +3226,11 @@ type Lifecycle struct {
     	// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
     	// +optional
     	PreStop *LifecycleHandler `json:"preStop,omitempty" protobuf:"bytes,2,opt,name=preStop"`
    +	// StopSignal defines which signal will be sent to a container when it is being stopped.
    +	// If not specified, the default is defined by the container runtime in use.
    +	// StopSignal can only be set for Pods with a non-empty .spec.os.name
    +	// +optional
    +	StopSignal *Signal `json:"stopSignal,omitempty" protobuf:"bytes,3,opt,name=stopSignal"`
     }
     
     type ConditionStatus string
    @@ -3154,6 +3384,10 @@ type ContainerStatus struct {
     	// +listType=map
     	// +listMapKey=name
     	AllocatedResourcesStatus []ResourceStatus `json:"allocatedResourcesStatus,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,14,rep,name=allocatedResourcesStatus"`
    +	// StopSignal reports the effective stop signal for this container
    +	// +featureGate=ContainerStopSignals
    +	// +optional
    +	StopSignal *Signal `json:"stopSignal,omitempty" protobuf:"bytes,15,opt,name=stopSignal"`
     }
     
     // ResourceStatus represents the status of a single resource allocated to a Pod.
    @@ -3278,6 +3512,17 @@ const (
     	// PodReadyToStartContainers pod sandbox is successfully configured and
     	// the pod is ready to launch containers.
     	PodReadyToStartContainers PodConditionType = "PodReadyToStartContainers"
    +	// PodResizePending indicates that the pod has been resized, but kubelet has not
    +	// yet allocated the resources. If both PodResizePending and PodResizeInProgress
    +	// are set, it means that a new resize was requested in the middle of a previous
    +	// pod resize that is still in progress.
    +	PodResizePending PodConditionType = "PodResizePending"
    +	// PodResizeInProgress indicates that a resize is in progress, and is present whenever
    +	// the Kubelet has allocated resources for the resize, but has not yet actuated all of
    +	// the required changes.
    +	// If both PodResizePending and PodResizeInProgress are set, it means that a new resize was
    +	// requested in the middle of a previous pod resize that is still in progress.
    +	PodResizeInProgress PodConditionType = "PodResizeInProgress"
     )
     
     // These are reasons for a pod's transition to a condition.
    @@ -3301,6 +3546,18 @@ const (
     	// PodReasonPreemptionByScheduler reason in DisruptionTarget pod condition indicates that the
     	// disruption was initiated by scheduler's preemption.
     	PodReasonPreemptionByScheduler = "PreemptionByScheduler"
    +
    +	// PodReasonDeferred reason in PodResizePending pod condition indicates the proposed resize is feasible in
    +	// theory (it fits on this node) but is not possible right now.
    +	PodReasonDeferred = "Deferred"
    +
    +	// PodReasonInfeasible reason in PodResizePending pod condition indicates the proposed resize is not
    +	// feasible and is rejected; it may not be re-evaluated
    +	PodReasonInfeasible = "Infeasible"
    +
    +	// PodReasonError reason in PodResizeInProgress pod condition indicates that an error occurred while
    +	// actuating the resize.
    +	PodReasonError = "Error"
     )
     
     // PodCondition contains details for the current condition of this pod.
    @@ -3308,6 +3565,11 @@ type PodCondition struct {
     	// Type is the type of the condition.
     	// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
     	Type PodConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PodConditionType"`
    +	// If set, this represents the .metadata.generation that the pod condition was set based upon.
    +	// This is an alpha field. Enable PodObservedGenerationTracking to be able to use this field.
    +	// +featureGate=PodObservedGenerationTracking
    +	// +optional
    +	ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,7,opt,name=observedGeneration"`
     	// Status is the status of the condition.
     	// Can be True, False, Unknown.
     	// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
    @@ -3326,12 +3588,10 @@ type PodCondition struct {
     	Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
     }
     
    -// PodResizeStatus shows status of desired resize of a pod's containers.
    +// Deprecated: PodResizeStatus shows status of desired resize of a pod's containers.
     type PodResizeStatus string
     
     const (
    -	// Pod resources resize has been requested and will be evaluated by node.
    -	PodResizeStatusProposed PodResizeStatus = "Proposed"
     	// Pod resources resize has been accepted by node and is being actuated.
     	PodResizeStatusInProgress PodResizeStatus = "InProgress"
     	// Node cannot resize the pod at this time and will keep retrying.
    @@ -3371,11 +3631,64 @@ const (
     )
     
     // ContainerRestartPolicy is the restart policy for a single container.
    -// This may only be set for init containers and only allowed value is "Always".
    +// The only allowed values are "Always", "Never", and "OnFailure".
     type ContainerRestartPolicy string
     
     const (
    -	ContainerRestartPolicyAlways ContainerRestartPolicy = "Always"
    +	ContainerRestartPolicyAlways    ContainerRestartPolicy = "Always"
    +	ContainerRestartPolicyNever     ContainerRestartPolicy = "Never"
    +	ContainerRestartPolicyOnFailure ContainerRestartPolicy = "OnFailure"
    +)
    +
    +// ContainerRestartRule describes how a container exit is handled.
    +type ContainerRestartRule struct {
    +	// Specifies the action taken on a container exit if the requirements
    +	// are satisfied. The only possible value is "Restart" to restart the
    +	// container.
    +	// +required
    +	Action ContainerRestartRuleAction `json:"action,omitempty" proto:"bytes,1,opt,name=action" protobuf:"bytes,1,opt,name=action,casttype=ContainerRestartRuleAction"`
    +
    +	// Represents the exit codes to check on container exits.
    +	// +optional
    +	// +oneOf=when
    +	ExitCodes *ContainerRestartRuleOnExitCodes `json:"exitCodes,omitempty" proto:"bytes,2,opt,name=exitCodes" protobuf:"bytes,2,opt,name=exitCodes"`
    +}
    +
    +// ContainerRestartRuleAction describes the action to take when the
    +// container exits.
    +type ContainerRestartRuleAction string
    +
    +// The only valid action is Restart.
    +const (
    +	ContainerRestartRuleActionRestart ContainerRestartRuleAction = "Restart"
    +)
    +
    +// ContainerRestartRuleOnExitCodes describes the condition
    +// for handling an exited container based on its exit codes.
    +type ContainerRestartRuleOnExitCodes struct {
    +	// Represents the relationship between the container exit code(s) and the
    +	// specified values. Possible values are:
    +	// - In: the requirement is satisfied if the container exit code is in the
    +	//   set of specified values.
    +	// - NotIn: the requirement is satisfied if the container exit code is
    +	//   not in the set of specified values.
    +	// +required
    +	Operator ContainerRestartRuleOnExitCodesOperator `json:"operator,omitempty" proto:"bytes,1,opt,name=operator" protobuf:"bytes,1,opt,name=operator,casttype=ContainerRestartRuleOnExitCodesOperator"`
    +
    +	// Specifies the set of values to check for container exit codes.
    +	// At most 255 elements are allowed.
    +	// +optional
    +	// +listType=set
    +	Values []int32 `json:"values,omitempty" proto:"varint,2,rep,name=values" protobuf:"varint,2,rep,name=values"`
    +}
    +
    +// ContainerRestartRuleOnExitCodesOperator describes the operator
    +// to take for the exit codes.
    +type ContainerRestartRuleOnExitCodesOperator string
    +
    +const (
    +	ContainerRestartRuleOnExitCodesOpIn    ContainerRestartRuleOnExitCodesOperator = "In"
    +	ContainerRestartRuleOnExitCodesOpNotIn ContainerRestartRuleOnExitCodesOperator = "NotIn"
     )
     
     // DNSPolicy defines how a pod's DNS will be configured.
    @@ -3571,8 +3884,8 @@ type PodAntiAffinity struct {
     	// most preferred is the one with the greatest sum of weights, i.e.
     	// for each node that meets all of the scheduling requirements (resource
     	// request, requiredDuringScheduling anti-affinity expressions, etc.),
    -	// compute a sum by iterating through the elements of this field and adding
    -	// "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
    +	// compute a sum by iterating through the elements of this field and subtracting
    +	// "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the
     	// node(s) with the highest sum are the most preferred.
     	// +optional
     	// +listType=atomic
    @@ -3627,7 +3940,6 @@ type PodAffinityTerm struct {
     	// pod labels will be ignored. The default value is empty.
     	// The same key is forbidden to exist in both matchLabelKeys and labelSelector.
     	// Also, matchLabelKeys cannot be set when labelSelector isn't set.
    -	// This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
     	//
     	// +listType=atomic
     	// +optional
    @@ -3640,7 +3952,6 @@ type PodAffinityTerm struct {
     	// pod labels will be ignored. The default value is empty.
     	// The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
     	// Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
    -	// This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
     	//
     	// +listType=atomic
     	// +optional
    @@ -3701,7 +4012,6 @@ type Taint struct {
     	// Valid effects are NoSchedule, PreferNoSchedule and NoExecute.
     	Effect TaintEffect `json:"effect" protobuf:"bytes,3,opt,name=effect,casttype=TaintEffect"`
     	// TimeAdded represents the time at which the taint was added.
    -	// It is only written for NoExecute taints.
     	// +optional
     	TimeAdded *metav1.Time `json:"timeAdded,omitempty" protobuf:"bytes,4,opt,name=timeAdded"`
     }
    @@ -3792,7 +4102,7 @@ type PodSpec struct {
     	// Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes.
     	// The resourceRequirements of an init container are taken into account during scheduling
     	// by finding the highest request/limit for each resource type, and then using the max of
    -	// of that value or the sum of the normal containers. Limits are applied to init containers
    +	// that value or the sum of the normal containers. Limits are applied to init containers
     	// in a similar fashion.
     	// Init containers cannot currently be added or removed.
     	// Cannot be updated.
    @@ -3878,7 +4188,9 @@ type PodSpec struct {
     	// +optional
     	NodeName string `json:"nodeName,omitempty" protobuf:"bytes,10,opt,name=nodeName"`
     	// Host networking requested for this pod. Use the host's network namespace.
    -	// If this option is set, the ports that will be used must be specified.
    +	// When using HostNetwork you should specify ports so the scheduler is aware.
    +	// When `hostNetwork` is true, specified `hostPort` fields in port definitions must match `containerPort`,
    +	// and unspecified `hostPort` fields in port definitions are defaulted to match `containerPort`.
     	// Default to false.
     	// +k8s:conversion-gen=false
     	// +optional
    @@ -4021,6 +4333,7 @@ type PodSpec struct {
     	// - spec.hostPID
     	// - spec.hostIPC
     	// - spec.hostUsers
    +	// - spec.resources
     	// - spec.securityContext.appArmorProfile
     	// - spec.securityContext.seLinuxOptions
     	// - spec.securityContext.seccompProfile
    @@ -4089,7 +4402,7 @@ type PodSpec struct {
     	ResourceClaims []PodResourceClaim `json:"resourceClaims,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,39,rep,name=resourceClaims"`
     	// Resources is the total amount of CPU and Memory resources required by all
     	// containers in the pod. It supports specifying Requests and Limits for
    -	// "cpu" and "memory" resource names only. ResourceClaims are not supported.
    +	// "cpu", "memory" and "hugepages-" resource names only. ResourceClaims are not supported.
     	//
     	// This field enables fine-grained control over resource allocation for the
     	// entire pod, allowing resource sharing among containers in a pod.
    @@ -4101,6 +4414,20 @@ type PodSpec struct {
     	// +featureGate=PodLevelResources
     	// +optional
     	Resources *ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,40,opt,name=resources"`
    +	// HostnameOverride specifies an explicit override for the pod's hostname as perceived by the pod.
    +	// This field only specifies the pod's hostname and does not affect its DNS records.
    +	// When this field is set to a non-empty string:
    +	// - It takes precedence over the values set in `hostname` and `subdomain`.
    +	// - The Pod's hostname will be set to this value.
    +	// - `setHostnameAsFQDN` must be nil or set to false.
    +	// - `hostNetwork` must be set to false.
    +	//
    +	// This field must be a valid DNS subdomain as defined in RFC 1123 and contain at most 64 characters.
    +	// Requires the HostnameOverride feature gate to be enabled.
    +	//
    +	// +featureGate=HostnameOverride
    +	// +optional
    +	HostnameOverride *string `json:"hostnameOverride,omitempty" protobuf:"bytes,41,opt,name=hostnameOverride"`
     }
     
     // PodResourceClaim references exactly one ResourceClaim, either directly
    @@ -4162,6 +4489,31 @@ type PodResourceClaimStatus struct {
     	ResourceClaimName *string `json:"resourceClaimName,omitempty" protobuf:"bytes,2,opt,name=resourceClaimName"`
     }
     
    +// PodExtendedResourceClaimStatus is stored in the PodStatus for the extended
    +// resource requests backed by DRA. It stores the generated name for
    +// the corresponding special ResourceClaim created by the scheduler.
    +type PodExtendedResourceClaimStatus struct {
    +	// RequestMappings identifies the mapping of  to  device request
    +	// in the generated ResourceClaim.
    +	// +listType=atomic
    +	RequestMappings []ContainerExtendedResourceRequest `json:"requestMappings" protobuf:"bytes,1,rep,name=requestMappings"`
    +
    +	// ResourceClaimName is the name of the ResourceClaim that was
    +	// generated for the Pod in the namespace of the Pod.
    +	ResourceClaimName string `json:"resourceClaimName" protobuf:"bytes,2,name=resourceClaimName"`
    +}
    +
    +// ContainerExtendedResourceRequest has the mapping of container name,
    +// extended resource name to the device request name.
    +type ContainerExtendedResourceRequest struct {
    +	// The name of the container requesting resources.
    +	ContainerName string `json:"containerName" protobuf:"bytes,1,name=containerName"`
    +	// The name of the extended resource in that container which gets backed by DRA.
    +	ResourceName string `json:"resourceName" protobuf:"bytes,2,name=resourceName"`
    +	// The name of the request in the special ResourceClaim which corresponds to the extended resource.
    +	RequestName string `json:"requestName" protobuf:"bytes,3,name=requestName"`
    +}
    +
     // OSName is the set of OS'es that can be used in OS.
     type OSName string
     
    @@ -4301,7 +4653,6 @@ type TopologySpreadConstraint struct {
     	// - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
     	//
     	// If this value is nil, the behavior is equivalent to the Honor policy.
    -	// This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
     	// +optional
     	NodeAffinityPolicy *NodeInclusionPolicy `json:"nodeAffinityPolicy,omitempty" protobuf:"bytes,6,opt,name=nodeAffinityPolicy"`
     	// NodeTaintsPolicy indicates how we will treat node taints when calculating
    @@ -4311,7 +4662,6 @@ type TopologySpreadConstraint struct {
     	// - Ignore: node taints are ignored. All nodes are included.
     	//
     	// If this value is nil, the behavior is equivalent to the Ignore policy.
    -	// This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
     	// +optional
     	NodeTaintsPolicy *NodeInclusionPolicy `json:"nodeTaintsPolicy,omitempty" protobuf:"bytes,7,opt,name=nodeTaintsPolicy"`
     	// MatchLabelKeys is a set of pod label keys to select the pods over which
    @@ -4696,8 +5046,8 @@ type EphemeralContainerCommon struct {
     	// +listMapKey=protocol
     	Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"`
     	// List of sources to populate environment variables in the container.
    -	// The keys defined within a source must be a C_IDENTIFIER. All invalid keys
    -	// will be reported as an event when the container is starting. When a key exists in multiple
    +	// The keys defined within a source may consist of any printable ASCII characters except '='.
    +	// When a key exists in multiple
     	// sources, the value associated with the last source will take precedence.
     	// Values defined by an Env with a duplicate key will take precedence.
     	// Cannot be updated.
    @@ -4723,11 +5073,17 @@ type EphemeralContainerCommon struct {
     	ResizePolicy []ContainerResizePolicy `json:"resizePolicy,omitempty" protobuf:"bytes,23,rep,name=resizePolicy"`
     	// Restart policy for the container to manage the restart behavior of each
     	// container within a pod.
    -	// This may only be set for init containers. You cannot set this field on
    -	// ephemeral containers.
    +	// You cannot set this field on ephemeral containers.
     	// +featureGate=SidecarContainers
     	// +optional
     	RestartPolicy *ContainerRestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,24,opt,name=restartPolicy,casttype=ContainerRestartPolicy"`
    +	// Represents a list of rules to be checked to determine if the
    +	// container should be restarted on exit. You cannot set this field on
    +	// ephemeral containers.
    +	// +featureGate=ContainerRestartRules
    +	// +optional
    +	// +listType=atomic
    +	RestartPolicyRules []ContainerRestartRule `json:"restartPolicyRules,omitempty" protobuf:"bytes,25,rep,name=restartPolicyRules"`
     	// Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers.
     	// Cannot be updated.
     	// +optional
    @@ -4841,6 +5197,11 @@ type EphemeralContainer struct {
     // state of a system, especially if the node that hosts the pod cannot contact the control
     // plane.
     type PodStatus struct {
    +	// If set, this represents the .metadata.generation that the pod status was set based upon.
    +	// This is an alpha field. Enable PodObservedGenerationTracking to be able to use this field.
    +	// +featureGate=PodObservedGenerationTracking
    +	// +optional
    +	ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,17,opt,name=observedGeneration"`
     	// The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle.
     	// The conditions array, the reason and message fields, and the individual container status
     	// arrays contain more detail about the pod's status.
    @@ -4968,6 +5329,9 @@ type PodStatus struct {
     	// Status of resources resize desired for pod's containers.
     	// It is empty if no resources resize is pending.
     	// Any changes to container resources will automatically set this to "Proposed"
    +	// Deprecated: Resize status is moved to two pod conditions PodResizePending and PodResizeInProgress.
    +	// PodResizePending will track states where the spec has been resized, but the Kubelet has not yet allocated the resources.
    +	// PodResizeInProgress will track in-progress resizes, and should be present whenever allocated resources != acknowledged resources.
     	// +featureGate=InPlacePodVerticalScaling
     	// +optional
     	Resize PodResizeStatus `json:"resize,omitempty" protobuf:"bytes,14,opt,name=resize,casttype=PodResizeStatus"`
    @@ -4980,6 +5344,10 @@ type PodStatus struct {
     	// +featureGate=DynamicResourceAllocation
     	// +optional
     	ResourceClaimStatuses []PodResourceClaimStatus `json:"resourceClaimStatuses,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,15,rep,name=resourceClaimStatuses"`
    +	// Status of extended resource claim backed by DRA.
    +	// +featureGate=DRAExtendedResource
    +	// +optional
    +	ExtendedResourceClaimStatus *PodExtendedResourceClaimStatus `json:"extendedResourceClaimStatus,omitempty" protobuf:"bytes,18,opt,name=extendedResourceClaimStatus"`
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    @@ -5099,12 +5467,18 @@ type ReplicationControllerSpec struct {
     	// Defaults to 1.
     	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller
     	// +optional
    +	// +k8s:optional
    +	// +default=1
    +	// +k8s:minimum=0
     	Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
     
     	// Minimum number of seconds for which a newly created pod should be ready
     	// without any of its container crashing, for it to be considered available.
     	// Defaults to 0 (pod will be considered available as soon as it is ready)
     	// +optional
    +	// +k8s:optional
    +	// +default=0
    +	// +k8s:minimum=0
     	MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"`
     
     	// Selector is a label query over pods that should match the Replicas count.
    @@ -5194,6 +5568,7 @@ type ReplicationControllerCondition struct {
     // +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
     // +k8s:prerelease-lifecycle-gen:introduced=1.0
    +// +k8s:supportsSubresource=/scale
     
     // ReplicationController represents the configuration of a replication controller.
     type ReplicationController struct {
    @@ -5334,14 +5709,27 @@ const (
     
     // These are valid values for the TrafficDistribution field of a Service.
     const (
    -	// Indicates a preference for routing traffic to endpoints that are
    -	// topologically proximate to the client. The interpretation of "topologically
    -	// proximate" may vary across implementations and could encompass endpoints
    -	// within the same node, rack, zone, or even region. Setting this value gives
    -	// implementations permission to make different tradeoffs, e.g. optimizing for
    -	// proximity rather than equal distribution of load. Users should not set this
    -	// value if such tradeoffs are not acceptable.
    +	// Indicates a preference for routing traffic to endpoints that are in the same
    +	// zone as the client. Users should not set this value unless they have ensured
    +	// that clients and endpoints are distributed in such a way that the "same zone"
    +	// preference will not result in endpoints getting overloaded.
     	ServiceTrafficDistributionPreferClose = "PreferClose"
    +
    +	// Indicates a preference for routing traffic to endpoints that are in the same
    +	// zone as the client. Users should not set this value unless they have ensured
    +	// that clients and endpoints are distributed in such a way that the "same zone"
    +	// preference will not result in endpoints getting overloaded.
    +	// This is an alias for "PreferClose", but it is an Alpha feature and is only
    +	// recognized if the PreferSameTrafficDistribution feature gate is enabled.
    +	ServiceTrafficDistributionPreferSameZone = "PreferSameZone"
    +
    +	// Indicates a preference for routing traffic to endpoints that are on the same
    +	// node as the client. Users should not set this value unless they have ensured
    +	// that clients and endpoints are distributed in such a way that the "same node"
    +	// preference will not result in endpoints getting overloaded.
    +	// This is an Alpha feature and is only recognized if the
    +	// PreferSameTrafficDistribution feature gate is enabled.
    +	ServiceTrafficDistributionPreferSameNode = "PreferSameNode"
     )
     
     // These are the valid conditions of a service.
    @@ -5689,13 +6077,12 @@ type ServiceSpec struct {
     	// +optional
     	InternalTrafficPolicy *ServiceInternalTrafficPolicy `json:"internalTrafficPolicy,omitempty" protobuf:"bytes,22,opt,name=internalTrafficPolicy"`
     
    -	// TrafficDistribution offers a way to express preferences for how traffic is
    -	// distributed to Service endpoints. Implementations can use this field as a
    -	// hint, but are not required to guarantee strict adherence. If the field is
    -	// not set, the implementation will apply its default routing strategy. If set
    -	// to "PreferClose", implementations should prioritize endpoints that are
    -	// topologically close (e.g., same zone).
    -	// This is a beta field and requires enabling ServiceTrafficDistribution feature.
    +	// TrafficDistribution offers a way to express preferences for how traffic
    +	// is distributed to Service endpoints. Implementations can use this field
    +	// as a hint, but are not required to guarantee strict adherence. If the
    +	// field is not set, the implementation will apply its default routing
    +	// strategy. If set to "PreferClose", implementations should prioritize
    +	// endpoints that are in the same zone.
     	// +featureGate=ServiceTrafficDistribution
     	// +optional
     	TrafficDistribution *string `json:"trafficDistribution,omitempty" protobuf:"bytes,23,opt,name=trafficDistribution"`
    @@ -5888,6 +6275,11 @@ type ServiceAccountList struct {
     //	     Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}]
     //	   },
     //	]
    +//
    +// Endpoints is a legacy API and does not contain information about all Service features.
    +// Use discoveryv1.EndpointSlice for complete information about Service endpoints.
    +//
    +// Deprecated: This API is deprecated in v1.33+. Use discoveryv1.EndpointSlice.
     type Endpoints struct {
     	metav1.TypeMeta `json:",inline"`
     	// Standard object's metadata.
    @@ -5920,6 +6312,8 @@ type Endpoints struct {
     //
     //	a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],
     //	b: [ 10.10.1.1:309, 10.10.2.2:309 ]
    +//
    +// Deprecated: This API is deprecated in v1.33+.
     type EndpointSubset struct {
     	// IP addresses which offer the related ports that are marked as ready. These endpoints
     	// should be considered safe for load balancers and clients to utilize.
    @@ -5939,6 +6333,7 @@ type EndpointSubset struct {
     }
     
     // EndpointAddress is a tuple that describes single IP address.
    +// Deprecated: This API is deprecated in v1.33+.
     // +structType=atomic
     type EndpointAddress struct {
     	// The IP of this endpoint.
    @@ -5957,6 +6352,7 @@ type EndpointAddress struct {
     }
     
     // EndpointPort is a tuple that describes a single port.
    +// Deprecated: This API is deprecated in v1.33+.
     // +structType=atomic
     type EndpointPort struct {
     	// The name of this port.  This must match the 'name' field in the
    @@ -5998,6 +6394,7 @@ type EndpointPort struct {
     // +k8s:prerelease-lifecycle-gen:introduced=1.0
     
     // EndpointsList is a list of endpoints.
    +// Deprecated: This API is deprecated in v1.33+.
     type EndpointsList struct {
     	metav1.TypeMeta `json:",inline"`
     	// Standard list metadata.
    @@ -6166,6 +6563,15 @@ type NodeSystemInfo struct {
     	OperatingSystem string `json:"operatingSystem" protobuf:"bytes,9,opt,name=operatingSystem"`
     	// The Architecture reported by the node
     	Architecture string `json:"architecture" protobuf:"bytes,10,opt,name=architecture"`
    +	// Swap Info reported by the node.
    +	Swap *NodeSwapStatus `json:"swap,omitempty" protobuf:"bytes,11,opt,name=swap"`
    +}
    +
    +// NodeSwapStatus represents swap memory information.
    +type NodeSwapStatus struct {
    +	// Total amount of swap memory in bytes.
    +	// +optional
    +	Capacity *int64 `json:"capacity,omitempty" protobuf:"varint,1,opt,name=capacity"`
     }
     
     // NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource.
    @@ -7267,6 +7673,9 @@ const (
     	ResourceQuotaScopePriorityClass ResourceQuotaScope = "PriorityClass"
     	// Match all pod objects that have cross-namespace pod (anti)affinity mentioned.
     	ResourceQuotaScopeCrossNamespacePodAffinity ResourceQuotaScope = "CrossNamespacePodAffinity"
    +
    +	// Match all pvc objects that have volume attributes class mentioned.
    +	ResourceQuotaScopeVolumeAttributesClass ResourceQuotaScope = "VolumeAttributesClass"
     )
     
     // ResourceQuotaSpec defines the desired hard limits to enforce for Quota.
    diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
    index 89ce3d230..120430766 100644
    --- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
    +++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
    @@ -356,11 +356,12 @@ var map_Container = map[string]string{
     	"args":                     "Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
     	"workingDir":               "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.",
     	"ports":                    "List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.",
    -	"envFrom":                  "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.",
    +	"envFrom":                  "List of sources to populate environment variables in the container. The keys defined within a source may consist of any printable ASCII characters except '='. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.",
     	"env":                      "List of environment variables to set in the container. Cannot be updated.",
     	"resources":                "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/",
     	"resizePolicy":             "Resources resize policy for the container.",
    -	"restartPolicy":            "RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is \"Always\". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Setting the RestartPolicy as \"Always\" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy \"Always\" will be shut down. This lifecycle differs from normal init containers and is often referred to as a \"sidecar\" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed.",
    +	"restartPolicy":            "RestartPolicy defines the restart behavior of individual containers in a pod. This overrides the pod-level restart policy. When this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Additionally, setting the RestartPolicy as \"Always\" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy \"Always\" will be shut down. This lifecycle differs from normal init containers and is often referred to as a \"sidecar\" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed.",
    +	"restartPolicyRules":       "Represents a list of rules to be checked to determine if the container should be restarted on exit. The rules are evaluated in order. Once a rule matches a container exit condition, the remaining rules are ignored. If no rule matches the container exit condition, the Container-level restart policy determines the whether the container is restarted or not. Constraints on the rules: - At most 20 rules are allowed. - Rules can have the same action. - Identical rules are not forbidden in validations. When rules are specified, container MUST set RestartPolicy explicitly even it if matches the Pod's RestartPolicy.",
     	"volumeMounts":             "Pod volumes to mount into the container's filesystem. Cannot be updated.",
     	"volumeDevices":            "volumeDevices is the list of block devices to be used by the container.",
     	"livenessProbe":            "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
    @@ -380,6 +381,17 @@ func (Container) SwaggerDoc() map[string]string {
     	return map_Container
     }
     
    +var map_ContainerExtendedResourceRequest = map[string]string{
    +	"":              "ContainerExtendedResourceRequest has the mapping of container name, extended resource name to the device request name.",
    +	"containerName": "The name of the container requesting resources.",
    +	"resourceName":  "The name of the extended resource in that container which gets backed by DRA.",
    +	"requestName":   "The name of the request in the special ResourceClaim which corresponds to the extended resource.",
    +}
    +
    +func (ContainerExtendedResourceRequest) SwaggerDoc() map[string]string {
    +	return map_ContainerExtendedResourceRequest
    +}
    +
     var map_ContainerImage = map[string]string{
     	"":          "Describe a container image",
     	"names":     "Names by which this image is known. e.g. [\"kubernetes.example/hyperkube:v1.0.7\", \"cloud-vendor.registry.example/cloud-vendor/hyperkube:v1.0.7\"]",
    @@ -413,6 +425,26 @@ func (ContainerResizePolicy) SwaggerDoc() map[string]string {
     	return map_ContainerResizePolicy
     }
     
    +var map_ContainerRestartRule = map[string]string{
    +	"":          "ContainerRestartRule describes how a container exit is handled.",
    +	"action":    "Specifies the action taken on a container exit if the requirements are satisfied. The only possible value is \"Restart\" to restart the container.",
    +	"exitCodes": "Represents the exit codes to check on container exits.",
    +}
    +
    +func (ContainerRestartRule) SwaggerDoc() map[string]string {
    +	return map_ContainerRestartRule
    +}
    +
    +var map_ContainerRestartRuleOnExitCodes = map[string]string{
    +	"":         "ContainerRestartRuleOnExitCodes describes the condition for handling an exited container based on its exit codes.",
    +	"operator": "Represents the relationship between the container exit code(s) and the specified values. Possible values are: - In: the requirement is satisfied if the container exit code is in the\n  set of specified values.\n- NotIn: the requirement is satisfied if the container exit code is\n  not in the set of specified values.",
    +	"values":   "Specifies the set of values to check for container exit codes. At most 255 elements are allowed.",
    +}
    +
    +func (ContainerRestartRuleOnExitCodes) SwaggerDoc() map[string]string {
    +	return map_ContainerRestartRuleOnExitCodes
    +}
    +
     var map_ContainerState = map[string]string{
     	"":           "ContainerState holds a possible state of container. Only one of its members may be specified. If none of them is specified, the default one is ContainerStateWaiting.",
     	"waiting":    "Details about a waiting container",
    @@ -474,6 +506,7 @@ var map_ContainerStatus = map[string]string{
     	"volumeMounts":             "Status of volume mounts.",
     	"user":                     "User represents user identity information initially attached to the first process of the container",
     	"allocatedResourcesStatus": "AllocatedResourcesStatus represents the status of various resources allocated for this Pod.",
    +	"stopSignal":               "StopSignal reports the effective stop signal for this container",
     }
     
     func (ContainerStatus) SwaggerDoc() map[string]string {
    @@ -540,7 +573,7 @@ func (EmptyDirVolumeSource) SwaggerDoc() map[string]string {
     }
     
     var map_EndpointAddress = map[string]string{
    -	"":          "EndpointAddress is a tuple that describes single IP address.",
    +	"":          "EndpointAddress is a tuple that describes single IP address. Deprecated: This API is deprecated in v1.33+.",
     	"ip":        "The IP of this endpoint. May not be loopback (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 or fe80::/10), or link-local multicast (224.0.0.0/24 or ff02::/16).",
     	"hostname":  "The Hostname of this endpoint",
     	"nodeName":  "Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.",
    @@ -552,7 +585,7 @@ func (EndpointAddress) SwaggerDoc() map[string]string {
     }
     
     var map_EndpointPort = map[string]string{
    -	"":            "EndpointPort is a tuple that describes a single port.",
    +	"":            "EndpointPort is a tuple that describes a single port. Deprecated: This API is deprecated in v1.33+.",
     	"name":        "The name of this port.  This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined.",
     	"port":        "The port number of the endpoint.",
     	"protocol":    "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.",
    @@ -564,7 +597,7 @@ func (EndpointPort) SwaggerDoc() map[string]string {
     }
     
     var map_EndpointSubset = map[string]string{
    -	"":                  "EndpointSubset is a group of addresses with a common set of ports. The expanded set of endpoints is the Cartesian product of Addresses x Ports. For example, given:\n\n\t{\n\t  Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n\t  Ports:     [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n\t}\n\nThe resulting set of endpoints can be viewed as:\n\n\ta: [ 10.10.1.1:8675, 10.10.2.2:8675 ],\n\tb: [ 10.10.1.1:309, 10.10.2.2:309 ]",
    +	"":                  "EndpointSubset is a group of addresses with a common set of ports. The expanded set of endpoints is the Cartesian product of Addresses x Ports. For example, given:\n\n\t{\n\t  Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n\t  Ports:     [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n\t}\n\nThe resulting set of endpoints can be viewed as:\n\n\ta: [ 10.10.1.1:8675, 10.10.2.2:8675 ],\n\tb: [ 10.10.1.1:309, 10.10.2.2:309 ]\n\nDeprecated: This API is deprecated in v1.33+.",
     	"addresses":         "IP addresses which offer the related ports that are marked as ready. These endpoints should be considered safe for load balancers and clients to utilize.",
     	"notReadyAddresses": "IP addresses which offer the related ports but are not currently marked as ready because they have not yet finished starting, have recently failed a readiness check, or have recently failed a liveness check.",
     	"ports":             "Port numbers available on the related IP addresses.",
    @@ -575,7 +608,7 @@ func (EndpointSubset) SwaggerDoc() map[string]string {
     }
     
     var map_Endpoints = map[string]string{
    -	"":         "Endpoints is a collection of endpoints that implement the actual service. Example:\n\n\t Name: \"mysvc\",\n\t Subsets: [\n\t   {\n\t     Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n\t     Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n\t   },\n\t   {\n\t     Addresses: [{\"ip\": \"10.10.3.3\"}],\n\t     Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}]\n\t   },\n\t]",
    +	"":         "Endpoints is a collection of endpoints that implement the actual service. Example:\n\n\t Name: \"mysvc\",\n\t Subsets: [\n\t   {\n\t     Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n\t     Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n\t   },\n\t   {\n\t     Addresses: [{\"ip\": \"10.10.3.3\"}],\n\t     Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}]\n\t   },\n\t]\n\nEndpoints is a legacy API and does not contain information about all Service features. Use discoveryv1.EndpointSlice for complete information about Service endpoints.\n\nDeprecated: This API is deprecated in v1.33+. Use discoveryv1.EndpointSlice.",
     	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
     	"subsets":  "The set of all endpoints is the union of all subsets. Addresses are placed into subsets according to the IPs they share. A single address with multiple ports, some of which are ready and some of which are not (because they come from different containers) will result in the address being displayed in different subsets for the different ports. No address will appear in both Addresses and NotReadyAddresses in the same subset. Sets of addresses and ports that comprise a service.",
     }
    @@ -585,7 +618,7 @@ func (Endpoints) SwaggerDoc() map[string]string {
     }
     
     var map_EndpointsList = map[string]string{
    -	"":         "EndpointsList is a list of endpoints.",
    +	"":         "EndpointsList is a list of endpoints. Deprecated: This API is deprecated in v1.33+.",
     	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
     	"items":    "List of endpoints.",
     }
    @@ -595,8 +628,8 @@ func (EndpointsList) SwaggerDoc() map[string]string {
     }
     
     var map_EnvFromSource = map[string]string{
    -	"":             "EnvFromSource represents the source of a set of ConfigMaps",
    -	"prefix":       "An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.",
    +	"":             "EnvFromSource represents the source of a set of ConfigMaps or Secrets",
    +	"prefix":       "Optional text to prepend to the name of each environment variable. May consist of any printable ASCII characters except '='.",
     	"configMapRef": "The ConfigMap to select from",
     	"secretRef":    "The Secret to select from",
     }
    @@ -607,7 +640,7 @@ func (EnvFromSource) SwaggerDoc() map[string]string {
     
     var map_EnvVar = map[string]string{
     	"":          "EnvVar represents an environment variable present in a Container.",
    -	"name":      "Name of the environment variable. Must be a C_IDENTIFIER.",
    +	"name":      "Name of the environment variable. May consist of any printable ASCII characters except '='.",
     	"value":     "Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\".",
     	"valueFrom": "Source for the environment variable's value. Cannot be used if value is not empty.",
     }
    @@ -622,6 +655,7 @@ var map_EnvVarSource = map[string]string{
     	"resourceFieldRef": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.",
     	"configMapKeyRef":  "Selects a key of a ConfigMap.",
     	"secretKeyRef":     "Selects a key of a secret in the pod's namespace",
    +	"fileKeyRef":       "FileKeyRef selects a key of the env file. Requires the EnvFiles feature gate to be enabled.",
     }
     
     func (EnvVarSource) SwaggerDoc() map[string]string {
    @@ -645,11 +679,12 @@ var map_EphemeralContainerCommon = map[string]string{
     	"args":                     "Arguments to the entrypoint. The image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
     	"workingDir":               "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.",
     	"ports":                    "Ports are not allowed for ephemeral containers.",
    -	"envFrom":                  "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.",
    +	"envFrom":                  "List of sources to populate environment variables in the container. The keys defined within a source may consist of any printable ASCII characters except '='. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.",
     	"env":                      "List of environment variables to set in the container. Cannot be updated.",
     	"resources":                "Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.",
     	"resizePolicy":             "Resources resize policy for the container.",
    -	"restartPolicy":            "Restart policy for the container to manage the restart behavior of each container within a pod. This may only be set for init containers. You cannot set this field on ephemeral containers.",
    +	"restartPolicy":            "Restart policy for the container to manage the restart behavior of each container within a pod. You cannot set this field on ephemeral containers.",
    +	"restartPolicyRules":       "Represents a list of rules to be checked to determine if the container should be restarted on exit. You cannot set this field on ephemeral containers.",
     	"volumeMounts":             "Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. Cannot be updated.",
     	"volumeDevices":            "volumeDevices is the list of block devices to be used by the container.",
     	"livenessProbe":            "Probes are not allowed for ephemeral containers.",
    @@ -753,6 +788,18 @@ func (FCVolumeSource) SwaggerDoc() map[string]string {
     	return map_FCVolumeSource
     }
     
    +var map_FileKeySelector = map[string]string{
    +	"":           "FileKeySelector selects a key of the env file.",
    +	"volumeName": "The name of the volume mount containing the env file.",
    +	"path":       "The path within the volume from which to select the file. Must be relative and may not contain the '..' path or start with '..'.",
    +	"key":        "The key within the env file. An invalid key will prevent the pod from starting. The keys defined within a source may consist of any printable ASCII characters except '='. During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.",
    +	"optional":   "Specify whether the file or its key must be defined. If the file or key does not exist, then the env var is not published. If optional is set to true and the specified key does not exist, the environment variable will not be set in the Pod's containers.\n\nIf optional is set to false and the specified key does not exist, an error will be returned during Pod creation.",
    +}
    +
    +func (FileKeySelector) SwaggerDoc() map[string]string {
    +	return map_FileKeySelector
    +}
    +
     var map_FlexPersistentVolumeSource = map[string]string{
     	"":          "FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin.",
     	"driver":    "driver is the name of the driver to use for this volume.",
    @@ -836,7 +883,7 @@ func (GlusterfsPersistentVolumeSource) SwaggerDoc() map[string]string {
     
     var map_GlusterfsVolumeSource = map[string]string{
     	"":          "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.",
    -	"endpoints": "endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod",
    +	"endpoints": "endpoints is the endpoint name that details Glusterfs topology.",
     	"path":      "path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod",
     	"readOnly":  "readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod",
     }
    @@ -957,9 +1004,10 @@ func (KeyToPath) SwaggerDoc() map[string]string {
     }
     
     var map_Lifecycle = map[string]string{
    -	"":          "Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.",
    -	"postStart": "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks",
    -	"preStop":   "PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks",
    +	"":           "Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.",
    +	"postStart":  "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks",
    +	"preStop":    "PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks",
    +	"stopSignal": "StopSignal defines which signal will be sent to a container when it is being stopped. If not specified, the default is defined by the container runtime in use. StopSignal can only be set for Pods with a non-empty .spec.os.name",
     }
     
     func (Lifecycle) SwaggerDoc() map[string]string {
    @@ -1335,6 +1383,15 @@ func (NodeStatus) SwaggerDoc() map[string]string {
     	return map_NodeStatus
     }
     
    +var map_NodeSwapStatus = map[string]string{
    +	"":         "NodeSwapStatus represents swap memory information.",
    +	"capacity": "Total amount of swap memory in bytes.",
    +}
    +
    +func (NodeSwapStatus) SwaggerDoc() map[string]string {
    +	return map_NodeSwapStatus
    +}
    +
     var map_NodeSystemInfo = map[string]string{
     	"":                        "NodeSystemInfo is a set of ids/uuids to uniquely identify the node.",
     	"machineID":               "MachineID reported by the node. For unique machine identification in the cluster this field is preferred. Learn more from man(5) machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html",
    @@ -1347,6 +1404,7 @@ var map_NodeSystemInfo = map[string]string{
     	"kubeProxyVersion":        "Deprecated: KubeProxy Version reported by the node.",
     	"operatingSystem":         "The Operating System reported by the node",
     	"architecture":            "The Architecture reported by the node",
    +	"swap":                    "Swap Info reported by the node.",
     }
     
     func (NodeSystemInfo) SwaggerDoc() map[string]string {
    @@ -1434,7 +1492,7 @@ var map_PersistentVolumeClaimSpec = map[string]string{
     	"volumeMode":                "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.",
     	"dataSource":                "dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.",
     	"dataSourceRef":             "dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef\n  allows any non-core object, as well as PersistentVolumeClaim objects.\n* While dataSource ignores disallowed values (dropping them), dataSourceRef\n  preserves all values, and generates an error if a disallowed value is\n  specified.\n* While dataSource only allows local objects, dataSourceRef allows objects\n  in any namespaces.\n(Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.",
    -	"volumeAttributesClassName": "volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).",
    +	"volumeAttributesClassName": "volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string or nil value indicates that no VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/",
     }
     
     func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string {
    @@ -1449,8 +1507,8 @@ var map_PersistentVolumeClaimStatus = map[string]string{
     	"conditions":                       "conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'Resizing'.",
     	"allocatedResources":               "allocatedResources tracks the resources allocated to a PVC including its capacity. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nCapacity reported here may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity.\n\nA controller that receives PVC update with previously unknown resourceName should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.",
     	"allocatedResourceStatuses":        "allocatedResourceStatuses stores status of resource being resized for the given PVC. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nClaimResourceStatus can be in any of following states:\n\t- ControllerResizeInProgress:\n\t\tState set when resize controller starts resizing the volume in control-plane.\n\t- ControllerResizeFailed:\n\t\tState set when resize has failed in resize controller with a terminal error.\n\t- NodeResizePending:\n\t\tState set when resize controller has finished resizing the volume but further resizing of\n\t\tvolume is needed on the node.\n\t- NodeResizeInProgress:\n\t\tState set when kubelet starts resizing the volume.\n\t- NodeResizeFailed:\n\t\tState set when resizing has failed in kubelet with a terminal error. Transient errors don't set\n\t\tNodeResizeFailed.\nFor example: if expanding a PVC for more capacity - this field can be one of the following states:\n\t- pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeInProgress\"\n     - pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeFailed\"\n     - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizePending\"\n     - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeInProgress\"\n     - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeFailed\"\nWhen this field is not set, it means that no resize operation is in progress for the given PVC.\n\nA controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.",
    -	"currentVolumeAttributesClassName": "currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim This is a beta field and requires enabling VolumeAttributesClass feature (off by default).",
    -	"modifyVolumeStatus":               "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. When this is unset, there is no ModifyVolume operation being attempted. This is a beta field and requires enabling VolumeAttributesClass feature (off by default).",
    +	"currentVolumeAttributesClassName": "currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim",
    +	"modifyVolumeStatus":               "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. When this is unset, there is no ModifyVolume operation being attempted.",
     }
     
     func (PersistentVolumeClaimStatus) SwaggerDoc() map[string]string {
    @@ -1527,7 +1585,7 @@ var map_PersistentVolumeSpec = map[string]string{
     	"mountOptions":                  "mountOptions is the list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options",
     	"volumeMode":                    "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec.",
     	"nodeAffinity":                  "nodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume.",
    -	"volumeAttributesClassName":     "Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process. This is a beta field and requires enabling VolumeAttributesClass feature (off by default).",
    +	"volumeAttributesClassName":     "Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process.",
     }
     
     func (PersistentVolumeSpec) SwaggerDoc() map[string]string {
    @@ -1583,8 +1641,8 @@ var map_PodAffinityTerm = map[string]string{
     	"namespaces":        "namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\".",
     	"topologyKey":       "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.",
     	"namespaceSelector": "A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces.",
    -	"matchLabelKeys":    "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).",
    -	"mismatchLabelKeys": "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).",
    +	"matchLabelKeys":    "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set.",
    +	"mismatchLabelKeys": "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set.",
     }
     
     func (PodAffinityTerm) SwaggerDoc() map[string]string {
    @@ -1594,7 +1652,7 @@ func (PodAffinityTerm) SwaggerDoc() map[string]string {
     var map_PodAntiAffinity = map[string]string{
     	"": "Pod anti affinity is a group of inter pod anti affinity scheduling rules.",
     	"requiredDuringSchedulingIgnoredDuringExecution":  "If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.",
    -	"preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.",
    +	"preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and subtracting \"weight\" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.",
     }
     
     func (PodAntiAffinity) SwaggerDoc() map[string]string {
    @@ -1614,9 +1672,24 @@ func (PodAttachOptions) SwaggerDoc() map[string]string {
     	return map_PodAttachOptions
     }
     
    +var map_PodCertificateProjection = map[string]string{
    +	"":                     "PodCertificateProjection provides a private key and X.509 certificate in the pod filesystem.",
    +	"signerName":           "Kubelet's generated CSRs will be addressed to this signer.",
    +	"keyType":              "The type of keypair Kubelet will generate for the pod.\n\nValid values are \"RSA3072\", \"RSA4096\", \"ECDSAP256\", \"ECDSAP384\", \"ECDSAP521\", and \"ED25519\".",
    +	"maxExpirationSeconds": "maxExpirationSeconds is the maximum lifetime permitted for the certificate.\n\nKubelet copies this value verbatim into the PodCertificateRequests it generates for this projection.\n\nIf omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver will reject values shorter than 3600 (1 hour).  The maximum allowable value is 7862400 (91 days).\n\nThe signer implementation is then free to issue a certificate with any lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 seconds (1 hour).  This constraint is enforced by kube-apiserver. `kubernetes.io` signers will never issue certificates with a lifetime longer than 24 hours.",
    +	"credentialBundlePath": "Write the credential bundle at this path in the projected volume.\n\nThe credential bundle is a single file that contains multiple PEM blocks. The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private key.\n\nThe remaining blocks are CERTIFICATE blocks, containing the issued certificate chain from the signer (leaf and any intermediates).\n\nUsing credentialBundlePath lets your Pod's application code make a single atomic read that retrieves a consistent key and certificate chain.  If you project them to separate files, your application code will need to additionally check that the leaf certificate was issued to the key.",
    +	"keyPath":              "Write the key at this path in the projected volume.\n\nMost applications should use credentialBundlePath.  When using keyPath and certificateChainPath, your application needs to check that the key and leaf certificate are consistent, because it is possible to read the files mid-rotation.",
    +	"certificateChainPath": "Write the certificate chain at this path in the projected volume.\n\nMost applications should use credentialBundlePath.  When using keyPath and certificateChainPath, your application needs to check that the key and leaf certificate are consistent, because it is possible to read the files mid-rotation.",
    +}
    +
    +func (PodCertificateProjection) SwaggerDoc() map[string]string {
    +	return map_PodCertificateProjection
    +}
    +
     var map_PodCondition = map[string]string{
     	"":                   "PodCondition contains details for the current condition of this pod.",
     	"type":               "Type is the type of the condition. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions",
    +	"observedGeneration": "If set, this represents the .metadata.generation that the pod condition was set based upon. This is an alpha field. Enable PodObservedGenerationTracking to be able to use this field.",
     	"status":             "Status is the status of the condition. Can be True, False, Unknown. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions",
     	"lastProbeTime":      "Last time we probed the condition.",
     	"lastTransitionTime": "Last time the condition transitioned from one status to another.",
    @@ -1663,6 +1736,16 @@ func (PodExecOptions) SwaggerDoc() map[string]string {
     	return map_PodExecOptions
     }
     
    +var map_PodExtendedResourceClaimStatus = map[string]string{
    +	"":                  "PodExtendedResourceClaimStatus is stored in the PodStatus for the extended resource requests backed by DRA. It stores the generated name for the corresponding special ResourceClaim created by the scheduler.",
    +	"requestMappings":   "RequestMappings identifies the mapping of  to  device request in the generated ResourceClaim.",
    +	"resourceClaimName": "ResourceClaimName is the name of the ResourceClaim that was generated for the Pod in the namespace of the Pod.",
    +}
    +
    +func (PodExtendedResourceClaimStatus) SwaggerDoc() map[string]string {
    +	return map_PodExtendedResourceClaimStatus
    +}
    +
     var map_PodIP = map[string]string{
     	"":   "PodIP represents a single IP address allocated to the pod.",
     	"ip": "IP is the IP address assigned to the pod",
    @@ -1799,7 +1882,7 @@ func (PodSignature) SwaggerDoc() map[string]string {
     var map_PodSpec = map[string]string{
     	"":                              "PodSpec is a description of a pod.",
     	"volumes":                       "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes",
    -	"initContainers":                "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/",
    +	"initContainers":                "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/",
     	"containers":                    "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated.",
     	"ephemeralContainers":           "List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.",
     	"restartPolicy":                 "Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy",
    @@ -1811,7 +1894,7 @@ var map_PodSpec = map[string]string{
     	"serviceAccount":                "DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.",
     	"automountServiceAccountToken":  "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.",
     	"nodeName":                      "NodeName indicates in which node this pod is scheduled. If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. This field should not be used to express a desire for the pod to be scheduled on a specific node. https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename",
    -	"hostNetwork":                   "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.",
    +	"hostNetwork":                   "Host networking requested for this pod. Use the host's network namespace. When using HostNetwork you should specify ports so the scheduler is aware. When `hostNetwork` is true, specified `hostPort` fields in port definitions must match `containerPort`, and unspecified `hostPort` fields in port definitions are defaulted to match `containerPort`. Default to false.",
     	"hostPID":                       "Use the host's pid namespace. Optional: Default to false.",
     	"hostIPC":                       "Use the host's ipc namespace. Optional: Default to false.",
     	"shareProcessNamespace":         "Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false.",
    @@ -1833,11 +1916,12 @@ var map_PodSpec = map[string]string{
     	"overhead":                      "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md",
     	"topologySpreadConstraints":     "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.",
     	"setHostnameAsFQDN":             "If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false.",
    -	"os":                            "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.appArmorProfile - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.securityContext.supplementalGroupsPolicy - spec.containers[*].securityContext.appArmorProfile - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup",
    +	"os":                            "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.resources - spec.securityContext.appArmorProfile - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.securityContext.supplementalGroupsPolicy - spec.containers[*].securityContext.appArmorProfile - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup",
     	"hostUsers":                     "Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.",
     	"schedulingGates":               "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\n\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.",
     	"resourceClaims":                "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable.",
    -	"resources":                     "Resources is the total amount of CPU and Memory resources required by all containers in the pod. It supports specifying Requests and Limits for \"cpu\" and \"memory\" resource names only. ResourceClaims are not supported.\n\nThis field enables fine-grained control over resource allocation for the entire pod, allowing resource sharing among containers in a pod.\n\nThis is an alpha field and requires enabling the PodLevelResources feature gate.",
    +	"resources":                     "Resources is the total amount of CPU and Memory resources required by all containers in the pod. It supports specifying Requests and Limits for \"cpu\", \"memory\" and \"hugepages-\" resource names only. ResourceClaims are not supported.\n\nThis field enables fine-grained control over resource allocation for the entire pod, allowing resource sharing among containers in a pod.\n\nThis is an alpha field and requires enabling the PodLevelResources feature gate.",
    +	"hostnameOverride":              "HostnameOverride specifies an explicit override for the pod's hostname as perceived by the pod. This field only specifies the pod's hostname and does not affect its DNS records. When this field is set to a non-empty string: - It takes precedence over the values set in `hostname` and `subdomain`. - The Pod's hostname will be set to this value. - `setHostnameAsFQDN` must be nil or set to false. - `hostNetwork` must be set to false.\n\nThis field must be a valid DNS subdomain as defined in RFC 1123 and contain at most 64 characters. Requires the HostnameOverride feature gate to be enabled.",
     }
     
     func (PodSpec) SwaggerDoc() map[string]string {
    @@ -1845,23 +1929,25 @@ func (PodSpec) SwaggerDoc() map[string]string {
     }
     
     var map_PodStatus = map[string]string{
    -	"":                           "PodStatus represents information about the status of a pod. Status may trail the actual state of a system, especially if the node that hosts the pod cannot contact the control plane.",
    -	"phase":                      "The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values:\n\nPending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase",
    -	"conditions":                 "Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions",
    -	"message":                    "A human readable message indicating details about why the pod is in this condition.",
    -	"reason":                     "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted'",
    -	"nominatedNodeName":          "nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be scheduled right away as preemption victims receive their graceful termination periods. This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to give the resources on this node to a higher priority pod that is created after preemption. As a result, this field may be different than PodSpec.nodeName when the pod is scheduled.",
    -	"hostIP":                     "hostIP holds the IP address of the host to which the pod is assigned. Empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns mean that HostIP will not be updated even if there is a node is assigned to pod",
    -	"hostIPs":                    "hostIPs holds the IP addresses allocated to the host. If this field is specified, the first entry must match the hostIP field. This list is empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns means that HostIPs will not be updated even if there is a node is assigned to this pod.",
    -	"podIP":                      "podIP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.",
    -	"podIPs":                     "podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list is empty if no IPs have been allocated yet.",
    -	"startTime":                  "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.",
    -	"initContainerStatuses":      "Statuses of init containers in this pod. The most recent successful non-restartable init container will have ready = true, the most recently started container will have startTime set. Each init container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-and-container-status",
    -	"containerStatuses":          "Statuses of containers in this pod. Each container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status",
    -	"qosClass":                   "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes",
    -	"ephemeralContainerStatuses": "Statuses for any ephemeral containers that have run in this pod. Each ephemeral container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status",
    -	"resize":                     "Status of resources resize desired for pod's containers. It is empty if no resources resize is pending. Any changes to container resources will automatically set this to \"Proposed\"",
    -	"resourceClaimStatuses":      "Status of resource claims.",
    +	"":                            "PodStatus represents information about the status of a pod. Status may trail the actual state of a system, especially if the node that hosts the pod cannot contact the control plane.",
    +	"observedGeneration":          "If set, this represents the .metadata.generation that the pod status was set based upon. This is an alpha field. Enable PodObservedGenerationTracking to be able to use this field.",
    +	"phase":                       "The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values:\n\nPending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase",
    +	"conditions":                  "Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions",
    +	"message":                     "A human readable message indicating details about why the pod is in this condition.",
    +	"reason":                      "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted'",
    +	"nominatedNodeName":           "nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be scheduled right away as preemption victims receive their graceful termination periods. This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to give the resources on this node to a higher priority pod that is created after preemption. As a result, this field may be different than PodSpec.nodeName when the pod is scheduled.",
    +	"hostIP":                      "hostIP holds the IP address of the host to which the pod is assigned. Empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns mean that HostIP will not be updated even if there is a node is assigned to pod",
    +	"hostIPs":                     "hostIPs holds the IP addresses allocated to the host. If this field is specified, the first entry must match the hostIP field. This list is empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns means that HostIPs will not be updated even if there is a node is assigned to this pod.",
    +	"podIP":                       "podIP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.",
    +	"podIPs":                      "podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list is empty if no IPs have been allocated yet.",
    +	"startTime":                   "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.",
    +	"initContainerStatuses":       "Statuses of init containers in this pod. The most recent successful non-restartable init container will have ready = true, the most recently started container will have startTime set. Each init container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-and-container-status",
    +	"containerStatuses":           "Statuses of containers in this pod. Each container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status",
    +	"qosClass":                    "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes",
    +	"ephemeralContainerStatuses":  "Statuses for any ephemeral containers that have run in this pod. Each ephemeral container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status",
    +	"resize":                      "Status of resources resize desired for pod's containers. It is empty if no resources resize is pending. Any changes to container resources will automatically set this to \"Proposed\" Deprecated: Resize status is moved to two pod conditions PodResizePending and PodResizeInProgress. PodResizePending will track states where the spec has been resized, but the Kubelet has not yet allocated the resources. PodResizeInProgress will track in-progress resizes, and should be present whenever allocated resources != acknowledged resources.",
    +	"resourceClaimStatuses":       "Status of resource claims.",
    +	"extendedResourceClaimStatus": "Status of extended resource claim backed by DRA.",
     }
     
     func (PodStatus) SwaggerDoc() map[string]string {
    @@ -2191,7 +2277,7 @@ var map_ResourceRequirements = map[string]string{
     	"":         "ResourceRequirements describes the compute resource requirements.",
     	"limits":   "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/",
     	"requests": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/",
    -	"claims":   "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable. It can only be set for containers.",
    +	"claims":   "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.\n\nThis field depends on the DynamicResourceAllocation feature gate.\n\nThis field is immutable. It can only be set for containers.",
     }
     
     func (ResourceRequirements) SwaggerDoc() map[string]string {
    @@ -2487,7 +2573,7 @@ var map_ServiceSpec = map[string]string{
     	"allocateLoadBalancerNodePorts": "allocateLoadBalancerNodePorts defines if NodePorts will be automatically allocated for services with type LoadBalancer.  Default is \"true\". It may be set to \"false\" if the cluster load-balancer does not rely on NodePorts.  If the caller requests specific NodePorts (by specifying a value), those requests will be respected, regardless of this field. This field may only be set for services with type LoadBalancer and will be cleared if the type is changed to any other type.",
     	"loadBalancerClass":             "loadBalancerClass is the class of the load balancer implementation this Service belongs to. If specified, the value of this field must be a label-style identifier, with an optional prefix, e.g. \"internal-vip\" or \"example.com/internal-vip\". Unprefixed names are reserved for end-users. This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load balancer implementation is used, today this is typically done through the cloud provider integration, but should apply for any default implementation. If set, it is assumed that a load balancer implementation is watching for Services with a matching class. Any default load balancer implementation (e.g. cloud providers) should ignore Services that set this field. This field can only be set when creating or updating a Service to type 'LoadBalancer'. Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.",
     	"internalTrafficPolicy":         "InternalTrafficPolicy describes how nodes distribute service traffic they receive on the ClusterIP. If set to \"Local\", the proxy will assume that pods only want to talk to endpoints of the service on the same node as the pod, dropping the traffic if there are no local endpoints. The default value, \"Cluster\", uses the standard behavior of routing to all endpoints evenly (possibly modified by topology and other features).",
    -	"trafficDistribution":           "TrafficDistribution offers a way to express preferences for how traffic is distributed to Service endpoints. Implementations can use this field as a hint, but are not required to guarantee strict adherence. If the field is not set, the implementation will apply its default routing strategy. If set to \"PreferClose\", implementations should prioritize endpoints that are topologically close (e.g., same zone). This is a beta field and requires enabling ServiceTrafficDistribution feature.",
    +	"trafficDistribution":           "TrafficDistribution offers a way to express preferences for how traffic is distributed to Service endpoints. Implementations can use this field as a hint, but are not required to guarantee strict adherence. If the field is not set, the implementation will apply its default routing strategy. If set to \"PreferClose\", implementations should prioritize endpoints that are in the same zone.",
     }
     
     func (ServiceSpec) SwaggerDoc() map[string]string {
    @@ -2573,7 +2659,7 @@ var map_Taint = map[string]string{
     	"key":       "Required. The taint key to be applied to a node.",
     	"value":     "The taint value corresponding to the taint key.",
     	"effect":    "Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute.",
    -	"timeAdded": "TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints.",
    +	"timeAdded": "TimeAdded represents the time at which the taint was added.",
     }
     
     func (Taint) SwaggerDoc() map[string]string {
    @@ -2619,8 +2705,8 @@ var map_TopologySpreadConstraint = map[string]string{
     	"whenUnsatisfiable":  "WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location,\n  but giving higher precedence to topologies that would help reduce the\n  skew.\nA constraint is considered \"Unsatisfiable\" for an incoming pod if and only if every possible node assignment for that pod would violate \"MaxSkew\" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: ",
     	"labelSelector":      "LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.",
     	"minDomains":         "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule.\n\nFor example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: ",
    -	"nodeAffinityPolicy": "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.\n\nIf this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.",
    -	"nodeTaintsPolicy":   "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included.\n\nIf this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.",
    +	"nodeAffinityPolicy": "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.\n\nIf this value is nil, the behavior is equivalent to the Honor policy.",
    +	"nodeTaintsPolicy":   "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included.\n\nIf this value is nil, the behavior is equivalent to the Ignore policy.",
     	"matchLabelKeys":     "MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. MatchLabelKeys cannot be set when LabelSelector isn't set. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector.\n\nThis is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default).",
     }
     
    @@ -2713,6 +2799,7 @@ var map_VolumeProjection = map[string]string{
     	"configMap":           "configMap information about the configMap data to project",
     	"serviceAccountToken": "serviceAccountToken is information about the serviceAccountToken data to project",
     	"clusterTrustBundle":  "ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field of ClusterTrustBundle objects in an auto-updating file.\n\nAlpha, gated by the ClusterTrustBundleProjection feature gate.\n\nClusterTrustBundle objects can either be selected by name, or by the combination of signer name and a label selector.\n\nKubelet performs aggressive normalization of the PEM contents written into the pod filesystem.  Esoteric PEM features such as inter-block comments and block headers are stripped.  Certificates are deduplicated. The ordering of certificates within the file is arbitrary, and Kubelet may change the order over time.",
    +	"podCertificate":      "Projects an auto-rotating credential bundle (private key and certificate chain) that the pod can use either as a TLS client or server.\n\nKubelet generates a private key and uses it to send a PodCertificateRequest to the named signer.  Once the signer approves the request and issues a certificate chain, Kubelet writes the key and certificate chain to the pod filesystem.  The pod does not start until certificates have been issued for each podCertificate projected volume source in its spec.\n\nKubelet will begin trying to rotate the certificate at the time indicated by the signer using the PodCertificateRequest.Status.BeginRefreshAt timestamp.\n\nKubelet can write a single file, indicated by the credentialBundlePath field, or separate files, indicated by the keyPath and certificateChainPath fields.\n\nThe credential bundle is a single file in PEM format.  The first PEM entry is the private key (in PKCS#8 format), and the remaining PEM entries are the certificate chain issued by the signer (typically, signers will return their certificate chain in leaf-to-root order).\n\nPrefer using the credential bundle format, since your application code can read it atomically.  If you use keyPath and certificateChainPath, your application must make two separate file reads. If these coincide with a certificate rotation, it is possible that the private key and leaf certificate you read may not correspond to each other.  Your application will need to check for this condition, and re-read until they are consistent.\n\nThe named signer controls chooses the format of the certificate it issues; consult the signer implementation's documentation to learn how to use the certificates it issues.",
     }
     
     func (VolumeProjection) SwaggerDoc() map[string]string {
    @@ -2738,10 +2825,10 @@ var map_VolumeSource = map[string]string{
     	"gitRepo":               "gitRepo represents a git repository at a particular revision. Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.",
     	"secret":                "secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret",
     	"nfs":                   "nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs",
    -	"iscsi":                 "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md",
    -	"glusterfs":             "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. More info: https://examples.k8s.io/volumes/glusterfs/README.md",
    +	"iscsi":                 "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi",
    +	"glusterfs":             "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.",
     	"persistentVolumeClaim": "persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims",
    -	"rbd":                   "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. More info: https://examples.k8s.io/volumes/rbd/README.md",
    +	"rbd":                   "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.",
     	"flexVolume":            "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.",
     	"cinder":                "cinder represents a cinder volume attached and mounted on kubelets host machine. Deprecated: Cinder is deprecated. All operations for the in-tree cinder type are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md",
     	"cephfs":                "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.",
    @@ -2760,7 +2847,7 @@ var map_VolumeSource = map[string]string{
     	"storageos":             "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported.",
     	"csi":                   "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers.",
     	"ephemeral":             "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n   tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n   a PersistentVolumeClaim (see EphemeralVolumeSource for more\n   information on the connection between this volume type\n   and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.",
    -	"image":                 "image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. The volume is resolved at pod startup depending on which PullPolicy value is provided:\n\n- Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.\n\nThe volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.",
    +	"image":                 "image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. The volume is resolved at pod startup depending on which PullPolicy value is provided:\n\n- Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.\n\nThe volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.",
     }
     
     func (VolumeSource) SwaggerDoc() map[string]string {
    diff --git a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
    index 3f669092e..bcd91bd01 100644
    --- a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
    +++ b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
    @@ -829,6 +829,13 @@ func (in *Container) DeepCopyInto(out *Container) {
     		*out = new(ContainerRestartPolicy)
     		**out = **in
     	}
    +	if in.RestartPolicyRules != nil {
    +		in, out := &in.RestartPolicyRules, &out.RestartPolicyRules
    +		*out = make([]ContainerRestartRule, len(*in))
    +		for i := range *in {
    +			(*in)[i].DeepCopyInto(&(*out)[i])
    +		}
    +	}
     	if in.VolumeMounts != nil {
     		in, out := &in.VolumeMounts, &out.VolumeMounts
     		*out = make([]VolumeMount, len(*in))
    @@ -879,6 +886,22 @@ func (in *Container) DeepCopy() *Container {
     	return out
     }
     
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *ContainerExtendedResourceRequest) DeepCopyInto(out *ContainerExtendedResourceRequest) {
    +	*out = *in
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerExtendedResourceRequest.
    +func (in *ContainerExtendedResourceRequest) DeepCopy() *ContainerExtendedResourceRequest {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(ContainerExtendedResourceRequest)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
     // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
     func (in *ContainerImage) DeepCopyInto(out *ContainerImage) {
     	*out = *in
    @@ -932,6 +955,48 @@ func (in *ContainerResizePolicy) DeepCopy() *ContainerResizePolicy {
     	return out
     }
     
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *ContainerRestartRule) DeepCopyInto(out *ContainerRestartRule) {
    +	*out = *in
    +	if in.ExitCodes != nil {
    +		in, out := &in.ExitCodes, &out.ExitCodes
    +		*out = new(ContainerRestartRuleOnExitCodes)
    +		(*in).DeepCopyInto(*out)
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRestartRule.
    +func (in *ContainerRestartRule) DeepCopy() *ContainerRestartRule {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(ContainerRestartRule)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *ContainerRestartRuleOnExitCodes) DeepCopyInto(out *ContainerRestartRuleOnExitCodes) {
    +	*out = *in
    +	if in.Values != nil {
    +		in, out := &in.Values, &out.Values
    +		*out = make([]int32, len(*in))
    +		copy(*out, *in)
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRestartRuleOnExitCodes.
    +func (in *ContainerRestartRuleOnExitCodes) DeepCopy() *ContainerRestartRuleOnExitCodes {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(ContainerRestartRuleOnExitCodes)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
     // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
     func (in *ContainerState) DeepCopyInto(out *ContainerState) {
     	*out = *in
    @@ -1055,6 +1120,11 @@ func (in *ContainerStatus) DeepCopyInto(out *ContainerStatus) {
     			(*in)[i].DeepCopyInto(&(*out)[i])
     		}
     	}
    +	if in.StopSignal != nil {
    +		in, out := &in.StopSignal, &out.StopSignal
    +		*out = new(Signal)
    +		**out = **in
    +	}
     	return
     }
     
    @@ -1428,6 +1498,11 @@ func (in *EnvVarSource) DeepCopyInto(out *EnvVarSource) {
     		*out = new(SecretKeySelector)
     		(*in).DeepCopyInto(*out)
     	}
    +	if in.FileKeyRef != nil {
    +		in, out := &in.FileKeyRef, &out.FileKeyRef
    +		*out = new(FileKeySelector)
    +		(*in).DeepCopyInto(*out)
    +	}
     	return
     }
     
    @@ -1501,6 +1576,13 @@ func (in *EphemeralContainerCommon) DeepCopyInto(out *EphemeralContainerCommon)
     		*out = new(ContainerRestartPolicy)
     		**out = **in
     	}
    +	if in.RestartPolicyRules != nil {
    +		in, out := &in.RestartPolicyRules, &out.RestartPolicyRules
    +		*out = make([]ContainerRestartRule, len(*in))
    +		for i := range *in {
    +			(*in)[i].DeepCopyInto(&(*out)[i])
    +		}
    +	}
     	if in.VolumeMounts != nil {
     		in, out := &in.VolumeMounts, &out.VolumeMounts
     		*out = make([]VolumeMount, len(*in))
    @@ -1731,6 +1813,27 @@ func (in *FCVolumeSource) DeepCopy() *FCVolumeSource {
     	return out
     }
     
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *FileKeySelector) DeepCopyInto(out *FileKeySelector) {
    +	*out = *in
    +	if in.Optional != nil {
    +		in, out := &in.Optional, &out.Optional
    +		*out = new(bool)
    +		**out = **in
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileKeySelector.
    +func (in *FileKeySelector) DeepCopy() *FileKeySelector {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(FileKeySelector)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
     // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
     func (in *FlexPersistentVolumeSource) DeepCopyInto(out *FlexPersistentVolumeSource) {
     	*out = *in
    @@ -2101,6 +2204,11 @@ func (in *Lifecycle) DeepCopyInto(out *Lifecycle) {
     		*out = new(LifecycleHandler)
     		(*in).DeepCopyInto(*out)
     	}
    +	if in.StopSignal != nil {
    +		in, out := &in.StopSignal, &out.StopSignal
    +		*out = new(Signal)
    +		**out = **in
    +	}
     	return
     }
     
    @@ -3002,7 +3110,7 @@ func (in *NodeStatus) DeepCopyInto(out *NodeStatus) {
     		copy(*out, *in)
     	}
     	out.DaemonEndpoints = in.DaemonEndpoints
    -	out.NodeInfo = in.NodeInfo
    +	in.NodeInfo.DeepCopyInto(&out.NodeInfo)
     	if in.Images != nil {
     		in, out := &in.Images, &out.Images
     		*out = make([]ContainerImage, len(*in))
    @@ -3050,9 +3158,35 @@ func (in *NodeStatus) DeepCopy() *NodeStatus {
     	return out
     }
     
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *NodeSwapStatus) DeepCopyInto(out *NodeSwapStatus) {
    +	*out = *in
    +	if in.Capacity != nil {
    +		in, out := &in.Capacity, &out.Capacity
    +		*out = new(int64)
    +		**out = **in
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSwapStatus.
    +func (in *NodeSwapStatus) DeepCopy() *NodeSwapStatus {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(NodeSwapStatus)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
     // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
     func (in *NodeSystemInfo) DeepCopyInto(out *NodeSystemInfo) {
     	*out = *in
    +	if in.Swap != nil {
    +		in, out := &in.Swap, &out.Swap
    +		*out = new(NodeSwapStatus)
    +		(*in).DeepCopyInto(*out)
    +	}
     	return
     }
     
    @@ -3761,6 +3895,27 @@ func (in *PodAttachOptions) DeepCopyObject() runtime.Object {
     	return nil
     }
     
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *PodCertificateProjection) DeepCopyInto(out *PodCertificateProjection) {
    +	*out = *in
    +	if in.MaxExpirationSeconds != nil {
    +		in, out := &in.MaxExpirationSeconds, &out.MaxExpirationSeconds
    +		*out = new(int32)
    +		**out = **in
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCertificateProjection.
    +func (in *PodCertificateProjection) DeepCopy() *PodCertificateProjection {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(PodCertificateProjection)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
     // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
     func (in *PodCondition) DeepCopyInto(out *PodCondition) {
     	*out = *in
    @@ -3863,6 +4018,27 @@ func (in *PodExecOptions) DeepCopyObject() runtime.Object {
     	return nil
     }
     
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *PodExtendedResourceClaimStatus) DeepCopyInto(out *PodExtendedResourceClaimStatus) {
    +	*out = *in
    +	if in.RequestMappings != nil {
    +		in, out := &in.RequestMappings, &out.RequestMappings
    +		*out = make([]ContainerExtendedResourceRequest, len(*in))
    +		copy(*out, *in)
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodExtendedResourceClaimStatus.
    +func (in *PodExtendedResourceClaimStatus) DeepCopy() *PodExtendedResourceClaimStatus {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(PodExtendedResourceClaimStatus)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
     // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
     func (in *PodIP) DeepCopyInto(out *PodIP) {
     	*out = *in
    @@ -4376,6 +4552,11 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) {
     		*out = new(ResourceRequirements)
     		(*in).DeepCopyInto(*out)
     	}
    +	if in.HostnameOverride != nil {
    +		in, out := &in.HostnameOverride, &out.HostnameOverride
    +		*out = new(string)
    +		**out = **in
    +	}
     	return
     }
     
    @@ -4441,6 +4622,11 @@ func (in *PodStatus) DeepCopyInto(out *PodStatus) {
     			(*in)[i].DeepCopyInto(&(*out)[i])
     		}
     	}
    +	if in.ExtendedResourceClaimStatus != nil {
    +		in, out := &in.ExtendedResourceClaimStatus, &out.ExtendedResourceClaimStatus
    +		*out = new(PodExtendedResourceClaimStatus)
    +		(*in).DeepCopyInto(*out)
    +	}
     	return
     }
     
    @@ -6376,6 +6562,11 @@ func (in *VolumeProjection) DeepCopyInto(out *VolumeProjection) {
     		*out = new(ClusterTrustBundleProjection)
     		(*in).DeepCopyInto(*out)
     	}
    +	if in.PodCertificate != nil {
    +		in, out := &in.PodCertificate, &out.PodCertificate
    +		*out = new(PodCertificateProjection)
    +		(*in).DeepCopyInto(*out)
    +	}
     	return
     }
     
    diff --git a/vendor/k8s.io/api/discovery/v1/doc.go b/vendor/k8s.io/api/discovery/v1/doc.go
    index 01913669f..43e30b7f4 100644
    --- a/vendor/k8s.io/api/discovery/v1/doc.go
    +++ b/vendor/k8s.io/api/discovery/v1/doc.go
    @@ -20,4 +20,4 @@ limitations under the License.
     // +k8s:prerelease-lifecycle-gen=true
     // +groupName=discovery.k8s.io
     
    -package v1 // import "k8s.io/api/discovery/v1"
    +package v1
    diff --git a/vendor/k8s.io/api/discovery/v1/generated.pb.go b/vendor/k8s.io/api/discovery/v1/generated.pb.go
    index 5792481dc..443ff8f8f 100644
    --- a/vendor/k8s.io/api/discovery/v1/generated.pb.go
    +++ b/vendor/k8s.io/api/discovery/v1/generated.pb.go
    @@ -214,10 +214,38 @@ func (m *EndpointSliceList) XXX_DiscardUnknown() {
     
     var xxx_messageInfo_EndpointSliceList proto.InternalMessageInfo
     
    +func (m *ForNode) Reset()      { *m = ForNode{} }
    +func (*ForNode) ProtoMessage() {}
    +func (*ForNode) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_2237b452324cf77e, []int{6}
    +}
    +func (m *ForNode) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ForNode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ForNode) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ForNode.Merge(m, src)
    +}
    +func (m *ForNode) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ForNode) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ForNode.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ForNode proto.InternalMessageInfo
    +
     func (m *ForZone) Reset()      { *m = ForZone{} }
     func (*ForZone) ProtoMessage() {}
     func (*ForZone) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_2237b452324cf77e, []int{6}
    +	return fileDescriptor_2237b452324cf77e, []int{7}
     }
     func (m *ForZone) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -250,6 +278,7 @@ func init() {
     	proto.RegisterType((*EndpointPort)(nil), "k8s.io.api.discovery.v1.EndpointPort")
     	proto.RegisterType((*EndpointSlice)(nil), "k8s.io.api.discovery.v1.EndpointSlice")
     	proto.RegisterType((*EndpointSliceList)(nil), "k8s.io.api.discovery.v1.EndpointSliceList")
    +	proto.RegisterType((*ForNode)(nil), "k8s.io.api.discovery.v1.ForNode")
     	proto.RegisterType((*ForZone)(nil), "k8s.io.api.discovery.v1.ForZone")
     }
     
    @@ -258,62 +287,64 @@ func init() {
     }
     
     var fileDescriptor_2237b452324cf77e = []byte{
    -	// 877 bytes of a gzipped FileDescriptorProto
    -	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0x4d, 0x6f, 0xdc, 0x44,
    -	0x18, 0x5e, 0x67, 0x63, 0x62, 0x8f, 0x13, 0xd1, 0x8e, 0x90, 0x62, 0x2d, 0xc8, 0x5e, 0x8c, 0x0a,
    -	0x2b, 0x45, 0x78, 0x49, 0x84, 0x50, 0x41, 0xe2, 0x10, 0xd3, 0xd0, 0xf2, 0x15, 0xa2, 0x69, 0x4e,
    -	0x15, 0x52, 0x71, 0xec, 0x37, 0x5e, 0x93, 0xd8, 0x63, 0x79, 0x26, 0x2b, 0x2d, 0x27, 0x2e, 0x9c,
    -	0xe1, 0x17, 0x71, 0x44, 0x39, 0xf6, 0x46, 0x4f, 0x16, 0x31, 0x7f, 0x81, 0x53, 0x4f, 0x68, 0xc6,
    -	0x9f, 0x61, 0xb3, 0xda, 0xde, 0x3c, 0xcf, 0x3c, 0xcf, 0xfb, 0xf1, 0xcc, 0xcc, 0x6b, 0xf4, 0xc1,
    -	0xc5, 0x43, 0xe6, 0xc6, 0x74, 0xea, 0x67, 0xf1, 0x34, 0x8c, 0x59, 0x40, 0xe7, 0x90, 0x2f, 0xa6,
    -	0xf3, 0xfd, 0x69, 0x04, 0x29, 0xe4, 0x3e, 0x87, 0xd0, 0xcd, 0x72, 0xca, 0x29, 0xde, 0xad, 0x88,
    -	0xae, 0x9f, 0xc5, 0x6e, 0x4b, 0x74, 0xe7, 0xfb, 0xa3, 0x0f, 0xa3, 0x98, 0xcf, 0xae, 0xce, 0xdc,
    -	0x80, 0x26, 0xd3, 0x88, 0x46, 0x74, 0x2a, 0xf9, 0x67, 0x57, 0xe7, 0x72, 0x25, 0x17, 0xf2, 0xab,
    -	0x8a, 0x33, 0x72, 0x7a, 0x09, 0x03, 0x9a, 0xc3, 0x1d, 0xb9, 0x46, 0x1f, 0x77, 0x9c, 0xc4, 0x0f,
    -	0x66, 0x71, 0x2a, 0x6a, 0xca, 0x2e, 0x22, 0x01, 0xb0, 0x69, 0x02, 0xdc, 0xbf, 0x4b, 0x35, 0x5d,
    -	0xa5, 0xca, 0xaf, 0x52, 0x1e, 0x27, 0xb0, 0x24, 0xf8, 0x64, 0x9d, 0x80, 0x05, 0x33, 0x48, 0xfc,
    -	0xff, 0xeb, 0x9c, 0x7f, 0x37, 0x91, 0x76, 0x94, 0x86, 0x19, 0x8d, 0x53, 0x8e, 0xf7, 0x90, 0xee,
    -	0x87, 0x61, 0x0e, 0x8c, 0x01, 0x33, 0x95, 0xf1, 0x70, 0xa2, 0x7b, 0x3b, 0x65, 0x61, 0xeb, 0x87,
    -	0x0d, 0x48, 0xba, 0x7d, 0xfc, 0x1c, 0xa1, 0x80, 0xa6, 0x61, 0xcc, 0x63, 0x9a, 0x32, 0x73, 0x63,
    -	0xac, 0x4c, 0x8c, 0x83, 0x3d, 0x77, 0x85, 0xb3, 0x6e, 0x93, 0xe3, 0x8b, 0x56, 0xe2, 0xe1, 0xeb,
    -	0xc2, 0x1e, 0x94, 0x85, 0x8d, 0x3a, 0x8c, 0xf4, 0x42, 0xe2, 0x09, 0xd2, 0x66, 0x94, 0xf1, 0xd4,
    -	0x4f, 0xc0, 0x1c, 0x8e, 0x95, 0x89, 0xee, 0x6d, 0x97, 0x85, 0xad, 0x3d, 0xa9, 0x31, 0xd2, 0xee,
    -	0xe2, 0x13, 0xa4, 0x73, 0x3f, 0x8f, 0x80, 0x13, 0x38, 0x37, 0x37, 0x65, 0x25, 0xef, 0xf5, 0x2b,
    -	0x11, 0x67, 0x23, 0x8a, 0xf8, 0xfe, 0xec, 0x27, 0x08, 0x04, 0x09, 0x72, 0x48, 0x03, 0xa8, 0x9a,
    -	0x3b, 0x6d, 0x94, 0xa4, 0x0b, 0x82, 0x7f, 0x55, 0x10, 0x0e, 0x21, 0xcb, 0x21, 0x10, 0x5e, 0x9d,
    -	0xd2, 0x8c, 0x5e, 0xd2, 0x68, 0x61, 0xaa, 0xe3, 0xe1, 0xc4, 0x38, 0xf8, 0x74, 0x6d, 0x97, 0xee,
    -	0xa3, 0x25, 0xed, 0x51, 0xca, 0xf3, 0x85, 0x37, 0xaa, 0x7b, 0xc6, 0xcb, 0x04, 0x72, 0x47, 0x42,
    -	0xe1, 0x41, 0x4a, 0x43, 0x38, 0x16, 0x1e, 0xbc, 0xd1, 0x79, 0x70, 0x5c, 0x63, 0xa4, 0xdd, 0xc5,
    -	0xef, 0xa0, 0xcd, 0x9f, 0x69, 0x0a, 0xe6, 0x96, 0x64, 0x69, 0x65, 0x61, 0x6f, 0x3e, 0xa3, 0x29,
    -	0x10, 0x89, 0xe2, 0xc7, 0x48, 0x9d, 0xc5, 0x29, 0x67, 0xa6, 0x26, 0xdd, 0x79, 0x7f, 0x6d, 0x07,
    -	0x4f, 0x04, 0xdb, 0xd3, 0xcb, 0xc2, 0x56, 0xe5, 0x27, 0xa9, 0xf4, 0xa3, 0x23, 0xb4, 0xbb, 0xa2,
    -	0x37, 0x7c, 0x0f, 0x0d, 0x2f, 0x60, 0x61, 0x2a, 0xa2, 0x00, 0x22, 0x3e, 0xf1, 0x5b, 0x48, 0x9d,
    -	0xfb, 0x97, 0x57, 0x20, 0x6f, 0x87, 0x4e, 0xaa, 0xc5, 0x67, 0x1b, 0x0f, 0x15, 0xe7, 0x37, 0x05,
    -	0xe1, 0xe5, 0x2b, 0x81, 0x6d, 0xa4, 0xe6, 0xe0, 0x87, 0x55, 0x10, 0xad, 0x4a, 0x4f, 0x04, 0x40,
    -	0x2a, 0x1c, 0x3f, 0x40, 0x5b, 0x0c, 0xf2, 0x79, 0x9c, 0x46, 0x32, 0xa6, 0xe6, 0x19, 0x65, 0x61,
    -	0x6f, 0x3d, 0xad, 0x20, 0xd2, 0xec, 0xe1, 0x7d, 0x64, 0x70, 0xc8, 0x93, 0x38, 0xf5, 0xb9, 0xa0,
    -	0x0e, 0x25, 0xf5, 0xcd, 0xb2, 0xb0, 0x8d, 0xd3, 0x0e, 0x26, 0x7d, 0x8e, 0xf3, 0x1c, 0xed, 0xdc,
    -	0xea, 0x1d, 0x1f, 0x23, 0xed, 0x9c, 0xe6, 0xc2, 0xc3, 0xea, 0x2d, 0x18, 0x07, 0xe3, 0x95, 0xae,
    -	0x7d, 0x59, 0x11, 0xbd, 0x7b, 0xf5, 0xf1, 0x6a, 0x35, 0xc0, 0x48, 0x1b, 0xc3, 0xf9, 0x53, 0x41,
    -	0xdb, 0x4d, 0x86, 0x13, 0x9a, 0x73, 0x71, 0x62, 0xf2, 0x6e, 0x2b, 0xdd, 0x89, 0xc9, 0x33, 0x95,
    -	0x28, 0x7e, 0x8c, 0x34, 0xf9, 0x42, 0x03, 0x7a, 0x59, 0xd9, 0xe7, 0xed, 0x89, 0xc0, 0x27, 0x35,
    -	0xf6, 0xaa, 0xb0, 0xdf, 0x5e, 0x9e, 0x3e, 0x6e, 0xb3, 0x4d, 0x5a, 0xb1, 0x48, 0x93, 0xd1, 0x9c,
    -	0x4b, 0x13, 0xd4, 0x2a, 0x8d, 0x48, 0x4f, 0x24, 0x2a, 0x9c, 0xf2, 0xb3, 0xac, 0x91, 0xc9, 0xc7,
    -	0xa3, 0x57, 0x4e, 0x1d, 0x76, 0x30, 0xe9, 0x73, 0x9c, 0xbf, 0x36, 0x3a, 0xab, 0x9e, 0x5e, 0xc6,
    -	0x01, 0xe0, 0x1f, 0x91, 0x26, 0x06, 0x59, 0xe8, 0x73, 0x5f, 0x76, 0x63, 0x1c, 0x7c, 0xd4, 0xb3,
    -	0xaa, 0x9d, 0x47, 0x6e, 0x76, 0x11, 0x09, 0x80, 0xb9, 0x82, 0xdd, 0x3d, 0xc8, 0xef, 0x80, 0xfb,
    -	0xdd, 0x34, 0xe8, 0x30, 0xd2, 0x46, 0xc5, 0x8f, 0x90, 0x51, 0x4f, 0x9e, 0xd3, 0x45, 0x06, 0x75,
    -	0x99, 0x4e, 0x2d, 0x31, 0x0e, 0xbb, 0xad, 0x57, 0xb7, 0x97, 0xa4, 0x2f, 0xc3, 0x04, 0xe9, 0x50,
    -	0x17, 0x2e, 0x26, 0x96, 0x38, 0xd3, 0x77, 0xd7, 0xbe, 0x04, 0xef, 0x7e, 0x9d, 0x46, 0x6f, 0x10,
    -	0x46, 0xba, 0x30, 0xf8, 0x6b, 0xa4, 0x0a, 0x23, 0x99, 0x39, 0x94, 0xf1, 0x1e, 0xac, 0x8d, 0x27,
    -	0xcc, 0xf7, 0x76, 0xea, 0x98, 0xaa, 0x58, 0x31, 0x52, 0x85, 0x70, 0xfe, 0x50, 0xd0, 0xfd, 0x5b,
    -	0xce, 0x7e, 0x1b, 0x33, 0x8e, 0x7f, 0x58, 0x72, 0xd7, 0x7d, 0x3d, 0x77, 0x85, 0x5a, 0x7a, 0xdb,
    -	0x5e, 0xcb, 0x06, 0xe9, 0x39, 0xfb, 0x0d, 0x52, 0x63, 0x0e, 0x49, 0xe3, 0xc7, 0xfa, 0xc9, 0x20,
    -	0x0b, 0xeb, 0x1a, 0xf8, 0x4a, 0x88, 0x49, 0x15, 0xc3, 0xd9, 0x43, 0x5b, 0xf5, 0xcd, 0xc7, 0xe3,
    -	0x5b, 0xb7, 0x7b, 0xbb, 0xa6, 0xf7, 0x6e, 0xb8, 0xf7, 0xf9, 0xf5, 0x8d, 0x35, 0x78, 0x71, 0x63,
    -	0x0d, 0x5e, 0xde, 0x58, 0x83, 0x5f, 0x4a, 0x4b, 0xb9, 0x2e, 0x2d, 0xe5, 0x45, 0x69, 0x29, 0x2f,
    -	0x4b, 0x4b, 0xf9, 0xbb, 0xb4, 0x94, 0xdf, 0xff, 0xb1, 0x06, 0xcf, 0x76, 0x57, 0xfc, 0xd4, 0xff,
    -	0x0b, 0x00, 0x00, 0xff, 0xff, 0x76, 0x4b, 0x26, 0xe3, 0xee, 0x07, 0x00, 0x00,
    +	// 902 bytes of a gzipped FileDescriptorProto
    +	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x56, 0xcf, 0x6f, 0xe3, 0x44,
    +	0x14, 0x8e, 0x9b, 0x9a, 0xda, 0xe3, 0x56, 0xec, 0x8e, 0x90, 0x6a, 0x05, 0x64, 0x07, 0xa3, 0x85,
    +	0x48, 0x15, 0x0e, 0xad, 0x10, 0x5a, 0x90, 0x38, 0xd4, 0x6c, 0xd9, 0xe5, 0x57, 0xa9, 0x66, 0x7b,
    +	0x5a, 0x21, 0x81, 0x6b, 0xbf, 0x3a, 0xa6, 0x8d, 0xc7, 0xf2, 0x4c, 0x22, 0x85, 0x13, 0x17, 0xce,
    +	0xf0, 0x9f, 0xf0, 0x1f, 0x70, 0x44, 0x3d, 0xee, 0x8d, 0x3d, 0x59, 0xd4, 0xfc, 0x0b, 0x9c, 0xf6,
    +	0x84, 0x66, 0xfc, 0x33, 0xa4, 0x51, 0xf6, 0xe6, 0xf9, 0xe6, 0x7b, 0xdf, 0x7b, 0xf3, 0xcd, 0x7b,
    +	0x23, 0xa3, 0xf7, 0xae, 0x1e, 0x32, 0x37, 0xa6, 0x63, 0x3f, 0x8d, 0xc7, 0x61, 0xcc, 0x02, 0x3a,
    +	0x87, 0x6c, 0x31, 0x9e, 0x1f, 0x8e, 0x23, 0x48, 0x20, 0xf3, 0x39, 0x84, 0x6e, 0x9a, 0x51, 0x4e,
    +	0xf1, 0x7e, 0x49, 0x74, 0xfd, 0x34, 0x76, 0x1b, 0xa2, 0x3b, 0x3f, 0x1c, 0xbc, 0x1f, 0xc5, 0x7c,
    +	0x32, 0xbb, 0x70, 0x03, 0x3a, 0x1d, 0x47, 0x34, 0xa2, 0x63, 0xc9, 0xbf, 0x98, 0x5d, 0xca, 0x95,
    +	0x5c, 0xc8, 0xaf, 0x52, 0x67, 0xe0, 0x74, 0x12, 0x06, 0x34, 0x83, 0x3b, 0x72, 0x0d, 0x3e, 0x6c,
    +	0x39, 0x53, 0x3f, 0x98, 0xc4, 0x89, 0xa8, 0x29, 0xbd, 0x8a, 0x04, 0xc0, 0xc6, 0x53, 0xe0, 0xfe,
    +	0x5d, 0x51, 0xe3, 0x75, 0x51, 0xd9, 0x2c, 0xe1, 0xf1, 0x14, 0x56, 0x02, 0x3e, 0xda, 0x14, 0xc0,
    +	0x82, 0x09, 0x4c, 0xfd, 0xff, 0xc7, 0x39, 0xff, 0x6e, 0x23, 0xed, 0x24, 0x09, 0x53, 0x1a, 0x27,
    +	0x1c, 0x1f, 0x20, 0xdd, 0x0f, 0xc3, 0x0c, 0x18, 0x03, 0x66, 0x2a, 0xc3, 0xfe, 0x48, 0xf7, 0xf6,
    +	0x8a, 0xdc, 0xd6, 0x8f, 0x6b, 0x90, 0xb4, 0xfb, 0xf8, 0x7b, 0x84, 0x02, 0x9a, 0x84, 0x31, 0x8f,
    +	0x69, 0xc2, 0xcc, 0xad, 0xa1, 0x32, 0x32, 0x8e, 0x0e, 0xdc, 0x35, 0xce, 0xba, 0x75, 0x8e, 0xcf,
    +	0x9a, 0x10, 0x0f, 0xdf, 0xe4, 0x76, 0xaf, 0xc8, 0x6d, 0xd4, 0x62, 0xa4, 0x23, 0x89, 0x47, 0x48,
    +	0x9b, 0x50, 0xc6, 0x13, 0x7f, 0x0a, 0x66, 0x7f, 0xa8, 0x8c, 0x74, 0x6f, 0xb7, 0xc8, 0x6d, 0xed,
    +	0x49, 0x85, 0x91, 0x66, 0x17, 0x9f, 0x21, 0x9d, 0xfb, 0x59, 0x04, 0x9c, 0xc0, 0xa5, 0xb9, 0x2d,
    +	0x2b, 0x79, 0xa7, 0x5b, 0x89, 0xb8, 0x1b, 0x51, 0xc4, 0xb7, 0x17, 0x3f, 0x42, 0x20, 0x48, 0x90,
    +	0x41, 0x12, 0x40, 0x79, 0xb8, 0xf3, 0x3a, 0x92, 0xb4, 0x22, 0xf8, 0x17, 0x05, 0xe1, 0x10, 0xd2,
    +	0x0c, 0x02, 0xe1, 0xd5, 0x39, 0x4d, 0xe9, 0x35, 0x8d, 0x16, 0xa6, 0x3a, 0xec, 0x8f, 0x8c, 0xa3,
    +	0x8f, 0x37, 0x9e, 0xd2, 0x7d, 0xb4, 0x12, 0x7b, 0x92, 0xf0, 0x6c, 0xe1, 0x0d, 0xaa, 0x33, 0xe3,
    +	0x55, 0x02, 0xb9, 0x23, 0xa1, 0xf0, 0x20, 0xa1, 0x21, 0x9c, 0x0a, 0x0f, 0x5e, 0x6b, 0x3d, 0x38,
    +	0xad, 0x30, 0xd2, 0xec, 0xe2, 0xb7, 0xd0, 0xf6, 0x4f, 0x34, 0x01, 0x73, 0x47, 0xb2, 0xb4, 0x22,
    +	0xb7, 0xb7, 0x9f, 0xd1, 0x04, 0x88, 0x44, 0xf1, 0x63, 0xa4, 0x4e, 0xe2, 0x84, 0x33, 0x53, 0x93,
    +	0xee, 0xbc, 0xbb, 0xf1, 0x04, 0x4f, 0x04, 0xdb, 0xd3, 0x8b, 0xdc, 0x56, 0xe5, 0x27, 0x29, 0xe3,
    +	0x07, 0x27, 0x68, 0x7f, 0xcd, 0xd9, 0xf0, 0x3d, 0xd4, 0xbf, 0x82, 0x85, 0xa9, 0x88, 0x02, 0x88,
    +	0xf8, 0xc4, 0x6f, 0x20, 0x75, 0xee, 0x5f, 0xcf, 0x40, 0x76, 0x87, 0x4e, 0xca, 0xc5, 0x27, 0x5b,
    +	0x0f, 0x15, 0xe7, 0x57, 0x05, 0xe1, 0xd5, 0x96, 0xc0, 0x36, 0x52, 0x33, 0xf0, 0xc3, 0x52, 0x44,
    +	0x2b, 0xd3, 0x13, 0x01, 0x90, 0x12, 0xc7, 0x0f, 0xd0, 0x0e, 0x83, 0x6c, 0x1e, 0x27, 0x91, 0xd4,
    +	0xd4, 0x3c, 0xa3, 0xc8, 0xed, 0x9d, 0xa7, 0x25, 0x44, 0xea, 0x3d, 0x7c, 0x88, 0x0c, 0x0e, 0xd9,
    +	0x34, 0x4e, 0x7c, 0x2e, 0xa8, 0x7d, 0x49, 0x7d, 0xbd, 0xc8, 0x6d, 0xe3, 0xbc, 0x85, 0x49, 0x97,
    +	0xe3, 0xfc, 0xae, 0xa0, 0xbd, 0xa5, 0xc3, 0xe3, 0x53, 0xa4, 0x5d, 0xd2, 0x4c, 0x98, 0x58, 0x0e,
    +	0x83, 0x71, 0x34, 0x5c, 0x6b, 0xdb, 0xe7, 0x25, 0xd1, 0xbb, 0x57, 0xdd, 0xaf, 0x56, 0x01, 0x8c,
    +	0x34, 0x1a, 0x95, 0x9e, 0xb8, 0x3a, 0x31, 0x2e, 0x1b, 0xf5, 0x04, 0x71, 0x49, 0x4f, 0x46, 0x92,
    +	0x46, 0xc3, 0xf9, 0x53, 0x41, 0xbb, 0x75, 0xc5, 0x67, 0x34, 0xe3, 0xa2, 0x05, 0xe4, 0xb0, 0x28,
    +	0x6d, 0x0b, 0xc8, 0x26, 0x91, 0x28, 0x7e, 0x8c, 0x34, 0x39, 0xf2, 0x01, 0xbd, 0x2e, 0xef, 0xc3,
    +	0x3b, 0x10, 0xc2, 0x67, 0x15, 0xf6, 0x32, 0xb7, 0xdf, 0x5c, 0x7d, 0xce, 0xdc, 0x7a, 0x9b, 0x34,
    +	0xc1, 0x22, 0x4d, 0x4a, 0x33, 0x2e, 0x5d, 0x55, 0xcb, 0x34, 0x22, 0x3d, 0x91, 0xa8, 0xb0, 0xde,
    +	0x4f, 0xd3, 0x3a, 0x4c, 0x4e, 0xa3, 0x5e, 0x5a, 0x7f, 0xdc, 0xc2, 0xa4, 0xcb, 0x71, 0xfe, 0xda,
    +	0x6a, 0xad, 0x7f, 0x7a, 0x1d, 0x07, 0x80, 0x7f, 0x40, 0x9a, 0x78, 0x19, 0x43, 0x9f, 0xfb, 0xf2,
    +	0x34, 0xc6, 0xd1, 0x07, 0x1d, 0xab, 0x9a, 0x07, 0xce, 0x4d, 0xaf, 0x22, 0x01, 0x30, 0x57, 0xb0,
    +	0xdb, 0x09, 0xff, 0x06, 0xb8, 0xdf, 0x3e, 0x2f, 0x2d, 0x46, 0x1a, 0x55, 0xfc, 0x08, 0x19, 0xd5,
    +	0x53, 0x76, 0xbe, 0x48, 0xa1, 0x2a, 0xd3, 0xa9, 0x42, 0x8c, 0xe3, 0x76, 0xeb, 0xe5, 0xf2, 0x92,
    +	0x74, 0xc3, 0x30, 0x41, 0x3a, 0x54, 0x85, 0xd7, 0x77, 0xfa, 0xf6, 0xc6, 0xd1, 0xf2, 0xee, 0x57,
    +	0x69, 0xf4, 0x1a, 0x61, 0xa4, 0x95, 0xc1, 0x5f, 0x22, 0x55, 0x18, 0xc9, 0xcc, 0xbe, 0xd4, 0x7b,
    +	0xb0, 0x51, 0x4f, 0x98, 0xef, 0xed, 0x55, 0x9a, 0xaa, 0x58, 0x31, 0x52, 0x4a, 0x38, 0x7f, 0x28,
    +	0xe8, 0xfe, 0x92, 0xb3, 0x5f, 0xc7, 0x8c, 0xe3, 0xef, 0x56, 0xdc, 0x75, 0x5f, 0xcd, 0x5d, 0x11,
    +	0x2d, 0xbd, 0x6d, 0xda, 0xb2, 0x46, 0x3a, 0xce, 0x7e, 0x85, 0xd4, 0x98, 0xc3, 0xb4, 0xf6, 0x63,
    +	0xf3, 0x53, 0x23, 0x0b, 0x6b, 0x0f, 0xf0, 0x85, 0x08, 0x26, 0xa5, 0x86, 0x73, 0x80, 0x76, 0xaa,
    +	0xce, 0xc7, 0xc3, 0xa5, 0xee, 0xde, 0xad, 0xe8, 0x9d, 0x0e, 0xaf, 0xc8, 0x62, 0xd8, 0x36, 0x93,
    +	0xbd, 0x4f, 0x6f, 0x6e, 0xad, 0xde, 0xf3, 0x5b, 0xab, 0xf7, 0xe2, 0xd6, 0xea, 0xfd, 0x5c, 0x58,
    +	0xca, 0x4d, 0x61, 0x29, 0xcf, 0x0b, 0x4b, 0x79, 0x51, 0x58, 0xca, 0xdf, 0x85, 0xa5, 0xfc, 0xf6,
    +	0x8f, 0xd5, 0x7b, 0xb6, 0xbf, 0xe6, 0x97, 0xe2, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf4, 0xfc,
    +	0xbe, 0xad, 0x6c, 0x08, 0x00, 0x00,
     }
     
     func (m *Endpoint) Marshal() (dAtA []byte, err error) {
    @@ -500,6 +531,20 @@ func (m *EndpointHints) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	_ = i
     	var l int
     	_ = l
    +	if len(m.ForNodes) > 0 {
    +		for iNdEx := len(m.ForNodes) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.ForNodes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
    +		}
    +	}
     	if len(m.ForZones) > 0 {
     		for iNdEx := len(m.ForZones) - 1; iNdEx >= 0; iNdEx-- {
     			{
    @@ -679,6 +724,34 @@ func (m *EndpointSliceList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	return len(dAtA) - i, nil
     }
     
    +func (m *ForNode) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ForNode) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ForNode) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	i -= len(m.Name)
    +	copy(dAtA[i:], m.Name)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
     func (m *ForZone) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
    @@ -793,6 +866,12 @@ func (m *EndpointHints) Size() (n int) {
     			n += 1 + l + sovGenerated(uint64(l))
     		}
     	}
    +	if len(m.ForNodes) > 0 {
    +		for _, e := range m.ForNodes {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
     	return n
     }
     
    @@ -862,6 +941,17 @@ func (m *EndpointSliceList) Size() (n int) {
     	return n
     }
     
    +func (m *ForNode) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.Name)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
     func (m *ForZone) Size() (n int) {
     	if m == nil {
     		return 0
    @@ -927,8 +1017,14 @@ func (this *EndpointHints) String() string {
     		repeatedStringForForZones += strings.Replace(strings.Replace(f.String(), "ForZone", "ForZone", 1), `&`, ``, 1) + ","
     	}
     	repeatedStringForForZones += "}"
    +	repeatedStringForForNodes := "[]ForNode{"
    +	for _, f := range this.ForNodes {
    +		repeatedStringForForNodes += strings.Replace(strings.Replace(f.String(), "ForNode", "ForNode", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForForNodes += "}"
     	s := strings.Join([]string{`&EndpointHints{`,
     		`ForZones:` + repeatedStringForForZones + `,`,
    +		`ForNodes:` + repeatedStringForForNodes + `,`,
     		`}`,
     	}, "")
     	return s
    @@ -985,6 +1081,16 @@ func (this *EndpointSliceList) String() string {
     	}, "")
     	return s
     }
    +func (this *ForNode) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&ForNode{`,
    +		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
     func (this *ForZone) String() string {
     	if this == nil {
     		return "nil"
    @@ -1592,6 +1698,40 @@ func (m *EndpointHints) Unmarshal(dAtA []byte) error {
     				return err
     			}
     			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ForNodes", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.ForNodes = append(m.ForNodes, ForNode{})
    +			if err := m.ForNodes[len(m.ForNodes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
     			skippy, err := skipGenerated(dAtA[iNdEx:])
    @@ -2082,6 +2222,88 @@ func (m *EndpointSliceList) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    +func (m *ForNode) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ForNode: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ForNode: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Name = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
     func (m *ForZone) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
    diff --git a/vendor/k8s.io/api/discovery/v1/generated.proto b/vendor/k8s.io/api/discovery/v1/generated.proto
    index 8ddf0dc5d..569d8a916 100644
    --- a/vendor/k8s.io/api/discovery/v1/generated.proto
    +++ b/vendor/k8s.io/api/discovery/v1/generated.proto
    @@ -31,12 +31,12 @@ option go_package = "k8s.io/api/discovery/v1";
     
     // Endpoint represents a single logical "backend" implementing a service.
     message Endpoint {
    -  // addresses of this endpoint. The contents of this field are interpreted
    -  // according to the corresponding EndpointSlice addressType field. Consumers
    -  // must handle different types of addresses in the context of their own
    -  // capabilities. This must contain at least one address but no more than
    -  // 100. These are all assumed to be fungible and clients may choose to only
    -  // use the first element. Refer to: https://issue.k8s.io/106267
    +  // addresses of this endpoint. For EndpointSlices of addressType "IPv4" or "IPv6",
    +  // the values are IP addresses in canonical form. The syntax and semantics of
    +  // other addressType values are not defined. This must contain at least one
    +  // address but no more than 100. EndpointSlices generated by the EndpointSlice
    +  // controller will always have exactly 1 address. No semantics are defined for
    +  // additional addresses beyond the first, and kube-proxy does not look at them.
       // +listType=set
       repeated string addresses = 1;
     
    @@ -82,36 +82,42 @@ message Endpoint {
     
     // EndpointConditions represents the current condition of an endpoint.
     message EndpointConditions {
    -  // ready indicates that this endpoint is prepared to receive traffic,
    +  // ready indicates that this endpoint is ready to receive traffic,
       // according to whatever system is managing the endpoint. A nil value
    -  // indicates an unknown state. In most cases consumers should interpret this
    -  // unknown state as ready. For compatibility reasons, ready should never be
    -  // "true" for terminating endpoints, except when the normal readiness
    -  // behavior is being explicitly overridden, for example when the associated
    -  // Service has set the publishNotReadyAddresses flag.
    +  // should be interpreted as "true". In general, an endpoint should be
    +  // marked ready if it is serving and not terminating, though this can
    +  // be overridden in some cases, such as when the associated Service has
    +  // set the publishNotReadyAddresses flag.
       // +optional
       optional bool ready = 1;
     
    -  // serving is identical to ready except that it is set regardless of the
    -  // terminating state of endpoints. This condition should be set to true for
    -  // a ready endpoint that is terminating. If nil, consumers should defer to
    -  // the ready condition.
    +  // serving indicates that this endpoint is able to receive traffic,
    +  // according to whatever system is managing the endpoint. For endpoints
    +  // backed by pods, the EndpointSlice controller will mark the endpoint
    +  // as serving if the pod's Ready condition is True. A nil value should be
    +  // interpreted as "true".
       // +optional
       optional bool serving = 2;
     
       // terminating indicates that this endpoint is terminating. A nil value
    -  // indicates an unknown state. Consumers should interpret this unknown state
    -  // to mean that the endpoint is not terminating.
    +  // should be interpreted as "false".
       // +optional
       optional bool terminating = 3;
     }
     
     // EndpointHints provides hints describing how an endpoint should be consumed.
     message EndpointHints {
    -  // forZones indicates the zone(s) this endpoint should be consumed by to
    -  // enable topology aware routing.
    +  // forZones indicates the zone(s) this endpoint should be consumed by when
    +  // using topology aware routing. May contain a maximum of 8 entries.
       // +listType=atomic
       repeated ForZone forZones = 1;
    +
    +  // forNodes indicates the node(s) this endpoint should be consumed by when
    +  // using topology aware routing. May contain a maximum of 8 entries.
    +  // This is an Alpha feature and is only used when the PreferSameTrafficDistribution
    +  // feature gate is enabled.
    +  // +listType=atomic
    +  repeated ForNode forNodes = 2;
     }
     
     // EndpointPort represents a Port used by an EndpointSlice
    @@ -132,8 +138,9 @@ message EndpointPort {
       optional string protocol = 2;
     
       // port represents the port number of the endpoint.
    -  // If this is not specified, ports are not restricted and must be
    -  // interpreted in the context of the specific consumer.
    +  // If the EndpointSlice is derived from a Kubernetes service, this must be set
    +  // to the service's target port. EndpointSlices used for other purposes may have
    +  // a nil port.
       optional int32 port = 3;
     
       // The application protocol for this port.
    @@ -155,9 +162,12 @@ message EndpointPort {
       optional string appProtocol = 4;
     }
     
    -// EndpointSlice represents a subset of the endpoints that implement a service.
    -// For a given service there may be multiple EndpointSlice objects, selected by
    -// labels, which must be joined to produce the full set of endpoints.
    +// EndpointSlice represents a set of service endpoints. Most EndpointSlices are created by
    +// the EndpointSlice controller to represent the Pods selected by Service objects. For a
    +// given service there may be multiple EndpointSlice objects which must be joined to
    +// produce the full set of endpoints; you can find all of the slices for a given service
    +// by listing EndpointSlices in the service's namespace whose `kubernetes.io/service-name`
    +// label contains the service's name.
     message EndpointSlice {
       // Standard object's metadata.
       // +optional
    @@ -169,7 +179,10 @@ message EndpointSlice {
       // supported:
       // * IPv4: Represents an IPv4 Address.
       // * IPv6: Represents an IPv6 Address.
    -  // * FQDN: Represents a Fully Qualified Domain Name.
    +  // * FQDN: Represents a Fully Qualified Domain Name. (Deprecated)
    +  // The EndpointSlice controller only generates, and kube-proxy only processes,
    +  // slices of addressType "IPv4" and "IPv6". No semantics are defined for
    +  // the "FQDN" type.
       optional string addressType = 4;
     
       // endpoints is a list of unique endpoints in this slice. Each slice may
    @@ -178,10 +191,11 @@ message EndpointSlice {
       repeated Endpoint endpoints = 2;
     
       // ports specifies the list of network ports exposed by each endpoint in
    -  // this slice. Each port must have a unique name. When ports is empty, it
    -  // indicates that there are no defined ports. When a port is defined with a
    -  // nil port value, it indicates "all ports". Each slice may include a
    +  // this slice. Each port must have a unique name. Each slice may include a
       // maximum of 100 ports.
    +  // Services always have at least 1 port, so EndpointSlices generated by the
    +  // EndpointSlice controller will likewise always have at least 1 port.
    +  // EndpointSlices used for other purposes may have an empty ports list.
       // +optional
       // +listType=atomic
       repeated EndpointPort ports = 3;
    @@ -197,6 +211,12 @@ message EndpointSliceList {
       repeated EndpointSlice items = 2;
     }
     
    +// ForNode provides information about which nodes should consume this endpoint.
    +message ForNode {
    +  // name represents the name of the node.
    +  optional string name = 1;
    +}
    +
     // ForZone provides information about which zones should consume this endpoint.
     message ForZone {
       // name represents the name of the zone.
    diff --git a/vendor/k8s.io/api/discovery/v1/types.go b/vendor/k8s.io/api/discovery/v1/types.go
    index d6a9d0fce..6f2695316 100644
    --- a/vendor/k8s.io/api/discovery/v1/types.go
    +++ b/vendor/k8s.io/api/discovery/v1/types.go
    @@ -25,9 +25,12 @@ import (
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
     // +k8s:prerelease-lifecycle-gen:introduced=1.21
     
    -// EndpointSlice represents a subset of the endpoints that implement a service.
    -// For a given service there may be multiple EndpointSlice objects, selected by
    -// labels, which must be joined to produce the full set of endpoints.
    +// EndpointSlice represents a set of service endpoints. Most EndpointSlices are created by
    +// the EndpointSlice controller to represent the Pods selected by Service objects. For a
    +// given service there may be multiple EndpointSlice objects which must be joined to
    +// produce the full set of endpoints; you can find all of the slices for a given service
    +// by listing EndpointSlices in the service's namespace whose `kubernetes.io/service-name`
    +// label contains the service's name.
     type EndpointSlice struct {
     	metav1.TypeMeta `json:",inline"`
     
    @@ -41,7 +44,10 @@ type EndpointSlice struct {
     	// supported:
     	// * IPv4: Represents an IPv4 Address.
     	// * IPv6: Represents an IPv6 Address.
    -	// * FQDN: Represents a Fully Qualified Domain Name.
    +	// * FQDN: Represents a Fully Qualified Domain Name. (Deprecated)
    +	// The EndpointSlice controller only generates, and kube-proxy only processes,
    +	// slices of addressType "IPv4" and "IPv6". No semantics are defined for
    +	// the "FQDN" type.
     	AddressType AddressType `json:"addressType" protobuf:"bytes,4,rep,name=addressType"`
     
     	// endpoints is a list of unique endpoints in this slice. Each slice may
    @@ -50,10 +56,11 @@ type EndpointSlice struct {
     	Endpoints []Endpoint `json:"endpoints" protobuf:"bytes,2,rep,name=endpoints"`
     
     	// ports specifies the list of network ports exposed by each endpoint in
    -	// this slice. Each port must have a unique name. When ports is empty, it
    -	// indicates that there are no defined ports. When a port is defined with a
    -	// nil port value, it indicates "all ports". Each slice may include a
    +	// this slice. Each port must have a unique name. Each slice may include a
     	// maximum of 100 ports.
    +	// Services always have at least 1 port, so EndpointSlices generated by the
    +	// EndpointSlice controller will likewise always have at least 1 port.
    +	// EndpointSlices used for other purposes may have an empty ports list.
     	// +optional
     	// +listType=atomic
     	Ports []EndpointPort `json:"ports" protobuf:"bytes,3,rep,name=ports"`
    @@ -76,12 +83,12 @@ const (
     
     // Endpoint represents a single logical "backend" implementing a service.
     type Endpoint struct {
    -	// addresses of this endpoint. The contents of this field are interpreted
    -	// according to the corresponding EndpointSlice addressType field. Consumers
    -	// must handle different types of addresses in the context of their own
    -	// capabilities. This must contain at least one address but no more than
    -	// 100. These are all assumed to be fungible and clients may choose to only
    -	// use the first element. Refer to: https://issue.k8s.io/106267
    +	// addresses of this endpoint. For EndpointSlices of addressType "IPv4" or "IPv6",
    +	// the values are IP addresses in canonical form. The syntax and semantics of
    +	// other addressType values are not defined. This must contain at least one
    +	// address but no more than 100. EndpointSlices generated by the EndpointSlice
    +	// controller will always have exactly 1 address. No semantics are defined for
    +	// additional addresses beyond the first, and kube-proxy does not look at them.
     	// +listType=set
     	Addresses []string `json:"addresses" protobuf:"bytes,1,rep,name=addresses"`
     
    @@ -127,36 +134,42 @@ type Endpoint struct {
     
     // EndpointConditions represents the current condition of an endpoint.
     type EndpointConditions struct {
    -	// ready indicates that this endpoint is prepared to receive traffic,
    +	// ready indicates that this endpoint is ready to receive traffic,
     	// according to whatever system is managing the endpoint. A nil value
    -	// indicates an unknown state. In most cases consumers should interpret this
    -	// unknown state as ready. For compatibility reasons, ready should never be
    -	// "true" for terminating endpoints, except when the normal readiness
    -	// behavior is being explicitly overridden, for example when the associated
    -	// Service has set the publishNotReadyAddresses flag.
    +	// should be interpreted as "true". In general, an endpoint should be
    +	// marked ready if it is serving and not terminating, though this can
    +	// be overridden in some cases, such as when the associated Service has
    +	// set the publishNotReadyAddresses flag.
     	// +optional
     	Ready *bool `json:"ready,omitempty" protobuf:"bytes,1,name=ready"`
     
    -	// serving is identical to ready except that it is set regardless of the
    -	// terminating state of endpoints. This condition should be set to true for
    -	// a ready endpoint that is terminating. If nil, consumers should defer to
    -	// the ready condition.
    +	// serving indicates that this endpoint is able to receive traffic,
    +	// according to whatever system is managing the endpoint. For endpoints
    +	// backed by pods, the EndpointSlice controller will mark the endpoint
    +	// as serving if the pod's Ready condition is True. A nil value should be
    +	// interpreted as "true".
     	// +optional
     	Serving *bool `json:"serving,omitempty" protobuf:"bytes,2,name=serving"`
     
     	// terminating indicates that this endpoint is terminating. A nil value
    -	// indicates an unknown state. Consumers should interpret this unknown state
    -	// to mean that the endpoint is not terminating.
    +	// should be interpreted as "false".
     	// +optional
     	Terminating *bool `json:"terminating,omitempty" protobuf:"bytes,3,name=terminating"`
     }
     
     // EndpointHints provides hints describing how an endpoint should be consumed.
     type EndpointHints struct {
    -	// forZones indicates the zone(s) this endpoint should be consumed by to
    -	// enable topology aware routing.
    +	// forZones indicates the zone(s) this endpoint should be consumed by when
    +	// using topology aware routing. May contain a maximum of 8 entries.
     	// +listType=atomic
     	ForZones []ForZone `json:"forZones,omitempty" protobuf:"bytes,1,name=forZones"`
    +
    +	// forNodes indicates the node(s) this endpoint should be consumed by when
    +	// using topology aware routing. May contain a maximum of 8 entries.
    +	// This is an Alpha feature and is only used when the PreferSameTrafficDistribution
    +	// feature gate is enabled.
    +	// +listType=atomic
    +	ForNodes []ForNode `json:"forNodes,omitempty" protobuf:"bytes,2,name=forNodes"`
     }
     
     // ForZone provides information about which zones should consume this endpoint.
    @@ -165,6 +178,12 @@ type ForZone struct {
     	Name string `json:"name" protobuf:"bytes,1,name=name"`
     }
     
    +// ForNode provides information about which nodes should consume this endpoint.
    +type ForNode struct {
    +	// name represents the name of the node.
    +	Name string `json:"name" protobuf:"bytes,1,name=name"`
    +}
    +
     // EndpointPort represents a Port used by an EndpointSlice
     // +structType=atomic
     type EndpointPort struct {
    @@ -183,8 +202,9 @@ type EndpointPort struct {
     	Protocol *v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,2,name=protocol"`
     
     	// port represents the port number of the endpoint.
    -	// If this is not specified, ports are not restricted and must be
    -	// interpreted in the context of the specific consumer.
    +	// If the EndpointSlice is derived from a Kubernetes service, this must be set
    +	// to the service's target port. EndpointSlices used for other purposes may have
    +	// a nil port.
     	Port *int32 `json:"port,omitempty" protobuf:"bytes,3,opt,name=port"`
     
     	// The application protocol for this port.
    diff --git a/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go
    index 41c306056..ac5b853b9 100644
    --- a/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go
    +++ b/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go
    @@ -29,7 +29,7 @@ package v1
     // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
     var map_Endpoint = map[string]string{
     	"":                   "Endpoint represents a single logical \"backend\" implementing a service.",
    -	"addresses":          "addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. Consumers must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100. These are all assumed to be fungible and clients may choose to only use the first element. Refer to: https://issue.k8s.io/106267",
    +	"addresses":          "addresses of this endpoint. For EndpointSlices of addressType \"IPv4\" or \"IPv6\", the values are IP addresses in canonical form. The syntax and semantics of other addressType values are not defined. This must contain at least one address but no more than 100. EndpointSlices generated by the EndpointSlice controller will always have exactly 1 address. No semantics are defined for additional addresses beyond the first, and kube-proxy does not look at them.",
     	"conditions":         "conditions contains information about the current status of the endpoint.",
     	"hostname":           "hostname of this endpoint. This field may be used by consumers of endpoints to distinguish endpoints from each other (e.g. in DNS names). Multiple endpoints which use the same hostname should be considered fungible (e.g. multiple A values in DNS). Must be lowercase and pass DNS Label (RFC 1123) validation.",
     	"targetRef":          "targetRef is a reference to a Kubernetes object that represents this endpoint.",
    @@ -45,9 +45,9 @@ func (Endpoint) SwaggerDoc() map[string]string {
     
     var map_EndpointConditions = map[string]string{
     	"":            "EndpointConditions represents the current condition of an endpoint.",
    -	"ready":       "ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready. For compatibility reasons, ready should never be \"true\" for terminating endpoints, except when the normal readiness behavior is being explicitly overridden, for example when the associated Service has set the publishNotReadyAddresses flag.",
    -	"serving":     "serving is identical to ready except that it is set regardless of the terminating state of endpoints. This condition should be set to true for a ready endpoint that is terminating. If nil, consumers should defer to the ready condition.",
    -	"terminating": "terminating indicates that this endpoint is terminating. A nil value indicates an unknown state. Consumers should interpret this unknown state to mean that the endpoint is not terminating.",
    +	"ready":       "ready indicates that this endpoint is ready to receive traffic, according to whatever system is managing the endpoint. A nil value should be interpreted as \"true\". In general, an endpoint should be marked ready if it is serving and not terminating, though this can be overridden in some cases, such as when the associated Service has set the publishNotReadyAddresses flag.",
    +	"serving":     "serving indicates that this endpoint is able to receive traffic, according to whatever system is managing the endpoint. For endpoints backed by pods, the EndpointSlice controller will mark the endpoint as serving if the pod's Ready condition is True. A nil value should be interpreted as \"true\".",
    +	"terminating": "terminating indicates that this endpoint is terminating. A nil value should be interpreted as \"false\".",
     }
     
     func (EndpointConditions) SwaggerDoc() map[string]string {
    @@ -56,7 +56,8 @@ func (EndpointConditions) SwaggerDoc() map[string]string {
     
     var map_EndpointHints = map[string]string{
     	"":         "EndpointHints provides hints describing how an endpoint should be consumed.",
    -	"forZones": "forZones indicates the zone(s) this endpoint should be consumed by to enable topology aware routing.",
    +	"forZones": "forZones indicates the zone(s) this endpoint should be consumed by when using topology aware routing. May contain a maximum of 8 entries.",
    +	"forNodes": "forNodes indicates the node(s) this endpoint should be consumed by when using topology aware routing. May contain a maximum of 8 entries. This is an Alpha feature and is only used when the PreferSameTrafficDistribution feature gate is enabled.",
     }
     
     func (EndpointHints) SwaggerDoc() map[string]string {
    @@ -67,7 +68,7 @@ var map_EndpointPort = map[string]string{
     	"":            "EndpointPort represents a Port used by an EndpointSlice",
     	"name":        "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.",
     	"protocol":    "protocol represents the IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.",
    -	"port":        "port represents the port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.",
    +	"port":        "port represents the port number of the endpoint. If the EndpointSlice is derived from a Kubernetes service, this must be set to the service's target port. EndpointSlices used for other purposes may have a nil port.",
     	"appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n  * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n  * 'kubernetes.io/ws'  - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n  * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.",
     }
     
    @@ -76,11 +77,11 @@ func (EndpointPort) SwaggerDoc() map[string]string {
     }
     
     var map_EndpointSlice = map[string]string{
    -	"":            "EndpointSlice represents a subset of the endpoints that implement a service. For a given service there may be multiple EndpointSlice objects, selected by labels, which must be joined to produce the full set of endpoints.",
    +	"":            "EndpointSlice represents a set of service endpoints. Most EndpointSlices are created by the EndpointSlice controller to represent the Pods selected by Service objects. For a given service there may be multiple EndpointSlice objects which must be joined to produce the full set of endpoints; you can find all of the slices for a given service by listing EndpointSlices in the service's namespace whose `kubernetes.io/service-name` label contains the service's name.",
     	"metadata":    "Standard object's metadata.",
    -	"addressType": "addressType specifies the type of address carried by this EndpointSlice. All addresses in this slice must be the same type. This field is immutable after creation. The following address types are currently supported: * IPv4: Represents an IPv4 Address. * IPv6: Represents an IPv6 Address. * FQDN: Represents a Fully Qualified Domain Name.",
    +	"addressType": "addressType specifies the type of address carried by this EndpointSlice. All addresses in this slice must be the same type. This field is immutable after creation. The following address types are currently supported: * IPv4: Represents an IPv4 Address. * IPv6: Represents an IPv6 Address. * FQDN: Represents a Fully Qualified Domain Name. (Deprecated) The EndpointSlice controller only generates, and kube-proxy only processes, slices of addressType \"IPv4\" and \"IPv6\". No semantics are defined for the \"FQDN\" type.",
     	"endpoints":   "endpoints is a list of unique endpoints in this slice. Each slice may include a maximum of 1000 endpoints.",
    -	"ports":       "ports specifies the list of network ports exposed by each endpoint in this slice. Each port must have a unique name. When ports is empty, it indicates that there are no defined ports. When a port is defined with a nil port value, it indicates \"all ports\". Each slice may include a maximum of 100 ports.",
    +	"ports":       "ports specifies the list of network ports exposed by each endpoint in this slice. Each port must have a unique name. Each slice may include a maximum of 100 ports. Services always have at least 1 port, so EndpointSlices generated by the EndpointSlice controller will likewise always have at least 1 port. EndpointSlices used for other purposes may have an empty ports list.",
     }
     
     func (EndpointSlice) SwaggerDoc() map[string]string {
    @@ -97,6 +98,15 @@ func (EndpointSliceList) SwaggerDoc() map[string]string {
     	return map_EndpointSliceList
     }
     
    +var map_ForNode = map[string]string{
    +	"":     "ForNode provides information about which nodes should consume this endpoint.",
    +	"name": "name represents the name of the node.",
    +}
    +
    +func (ForNode) SwaggerDoc() map[string]string {
    +	return map_ForNode
    +}
    +
     var map_ForZone = map[string]string{
     	"":     "ForZone provides information about which zones should consume this endpoint.",
     	"name": "name represents the name of the zone.",
    diff --git a/vendor/k8s.io/api/discovery/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/discovery/v1/zz_generated.deepcopy.go
    index caa872af0..60eada3b9 100644
    --- a/vendor/k8s.io/api/discovery/v1/zz_generated.deepcopy.go
    +++ b/vendor/k8s.io/api/discovery/v1/zz_generated.deepcopy.go
    @@ -119,6 +119,11 @@ func (in *EndpointHints) DeepCopyInto(out *EndpointHints) {
     		*out = make([]ForZone, len(*in))
     		copy(*out, *in)
     	}
    +	if in.ForNodes != nil {
    +		in, out := &in.ForNodes, &out.ForNodes
    +		*out = make([]ForNode, len(*in))
    +		copy(*out, *in)
    +	}
     	return
     }
     
    @@ -241,6 +246,22 @@ func (in *EndpointSliceList) DeepCopyObject() runtime.Object {
     	return nil
     }
     
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *ForNode) DeepCopyInto(out *ForNode) {
    +	*out = *in
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForNode.
    +func (in *ForNode) DeepCopy() *ForNode {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(ForNode)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
     // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
     func (in *ForZone) DeepCopyInto(out *ForZone) {
     	*out = *in
    diff --git a/vendor/k8s.io/api/discovery/v1beta1/doc.go b/vendor/k8s.io/api/discovery/v1beta1/doc.go
    index 7d7084802..f12087eff 100644
    --- a/vendor/k8s.io/api/discovery/v1beta1/doc.go
    +++ b/vendor/k8s.io/api/discovery/v1beta1/doc.go
    @@ -20,4 +20,4 @@ limitations under the License.
     // +k8s:prerelease-lifecycle-gen=true
     // +groupName=discovery.k8s.io
     
    -package v1beta1 // import "k8s.io/api/discovery/v1beta1"
    +package v1beta1
    diff --git a/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go b/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go
    index 46935574b..de3257786 100644
    --- a/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go
    +++ b/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go
    @@ -214,10 +214,38 @@ func (m *EndpointSliceList) XXX_DiscardUnknown() {
     
     var xxx_messageInfo_EndpointSliceList proto.InternalMessageInfo
     
    +func (m *ForNode) Reset()      { *m = ForNode{} }
    +func (*ForNode) ProtoMessage() {}
    +func (*ForNode) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_6555bad15de200e0, []int{6}
    +}
    +func (m *ForNode) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ForNode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ForNode) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ForNode.Merge(m, src)
    +}
    +func (m *ForNode) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ForNode) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ForNode.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ForNode proto.InternalMessageInfo
    +
     func (m *ForZone) Reset()      { *m = ForZone{} }
     func (*ForZone) ProtoMessage() {}
     func (*ForZone) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6555bad15de200e0, []int{6}
    +	return fileDescriptor_6555bad15de200e0, []int{7}
     }
     func (m *ForZone) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -250,6 +278,7 @@ func init() {
     	proto.RegisterType((*EndpointPort)(nil), "k8s.io.api.discovery.v1beta1.EndpointPort")
     	proto.RegisterType((*EndpointSlice)(nil), "k8s.io.api.discovery.v1beta1.EndpointSlice")
     	proto.RegisterType((*EndpointSliceList)(nil), "k8s.io.api.discovery.v1beta1.EndpointSliceList")
    +	proto.RegisterType((*ForNode)(nil), "k8s.io.api.discovery.v1beta1.ForNode")
     	proto.RegisterType((*ForZone)(nil), "k8s.io.api.discovery.v1beta1.ForZone")
     }
     
    @@ -258,61 +287,62 @@ func init() {
     }
     
     var fileDescriptor_6555bad15de200e0 = []byte{
    -	// 857 bytes of a gzipped FileDescriptorProto
    -	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0x4f, 0x6f, 0xe4, 0x34,
    -	0x14, 0x9f, 0x74, 0x1a, 0x9a, 0x78, 0x5a, 0xb1, 0x6b, 0x71, 0x18, 0x95, 0x2a, 0x19, 0x05, 0x2d,
    +	// 877 bytes of a gzipped FileDescriptorProto
    +	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x56, 0x4f, 0x6f, 0xe4, 0x34,
    +	0x1c, 0x9d, 0x74, 0x1a, 0x9a, 0x78, 0x5a, 0xb1, 0x6b, 0x71, 0x18, 0x95, 0x2a, 0x19, 0x05, 0x2d,
     	0x1a, 0x51, 0x48, 0x68, 0xb5, 0x42, 0x2b, 0x38, 0x35, 0xb0, 0xb0, 0x48, 0xcb, 0x6e, 0xe5, 0x56,
     	0x42, 0x5a, 0x71, 0xc0, 0x93, 0xb8, 0x19, 0xd3, 0x26, 0x8e, 0x62, 0x77, 0xa4, 0xb9, 0xf1, 0x0d,
    -	0xe0, 0xb3, 0xf0, 0x15, 0x90, 0x50, 0x8f, 0x7b, 0xdc, 0x53, 0xc4, 0x84, 0x6f, 0xb1, 0x27, 0x64,
    -	0xc7, 0xf9, 0x33, 0x0c, 0x94, 0xb9, 0xc5, 0x3f, 0xbf, 0xdf, 0xef, 0xbd, 0xf7, 0x7b, 0xb6, 0x03,
    -	0x3e, 0xbe, 0x7e, 0xc2, 0x7d, 0xca, 0x02, 0x9c, 0xd3, 0x20, 0xa6, 0x3c, 0x62, 0x0b, 0x52, 0x2c,
    -	0x83, 0xc5, 0xc9, 0x8c, 0x08, 0x7c, 0x12, 0x24, 0x24, 0x23, 0x05, 0x16, 0x24, 0xf6, 0xf3, 0x82,
    -	0x09, 0x06, 0x8f, 0xea, 0x68, 0x1f, 0xe7, 0xd4, 0x6f, 0xa3, 0x7d, 0x1d, 0x7d, 0xf8, 0x49, 0x42,
    -	0xc5, 0xfc, 0x76, 0xe6, 0x47, 0x2c, 0x0d, 0x12, 0x96, 0xb0, 0x40, 0x91, 0x66, 0xb7, 0x57, 0x6a,
    -	0xa5, 0x16, 0xea, 0xab, 0x16, 0x3b, 0xf4, 0x7a, 0xa9, 0x23, 0x56, 0x90, 0x60, 0xb1, 0x91, 0xf0,
    -	0xf0, 0x71, 0x17, 0x93, 0xe2, 0x68, 0x4e, 0x33, 0x59, 0x5d, 0x7e, 0x9d, 0x48, 0x80, 0x07, 0x29,
    -	0x11, 0xf8, 0xdf, 0x58, 0xc1, 0x7f, 0xb1, 0x8a, 0xdb, 0x4c, 0xd0, 0x94, 0x6c, 0x10, 0x3e, 0xfb,
    -	0x3f, 0x02, 0x8f, 0xe6, 0x24, 0xc5, 0xff, 0xe4, 0x79, 0xbf, 0xed, 0x02, 0xeb, 0x69, 0x16, 0xe7,
    -	0x8c, 0x66, 0x02, 0x1e, 0x03, 0x1b, 0xc7, 0x71, 0x41, 0x38, 0x27, 0x7c, 0x6c, 0x4c, 0x86, 0x53,
    -	0x3b, 0x3c, 0xa8, 0x4a, 0xd7, 0x3e, 0x6b, 0x40, 0xd4, 0xed, 0xc3, 0x18, 0x80, 0x88, 0x65, 0x31,
    -	0x15, 0x94, 0x65, 0x7c, 0xbc, 0x33, 0x31, 0xa6, 0xa3, 0xd3, 0x4f, 0xfd, 0xfb, 0xec, 0xf5, 0x9b,
    -	0x44, 0x5f, 0xb6, 0xbc, 0x10, 0xde, 0x95, 0xee, 0xa0, 0x2a, 0x5d, 0xd0, 0x61, 0xa8, 0xa7, 0x0b,
    -	0xa7, 0xc0, 0x9a, 0x33, 0x2e, 0x32, 0x9c, 0x92, 0xf1, 0x70, 0x62, 0x4c, 0xed, 0x70, 0xbf, 0x2a,
    -	0x5d, 0xeb, 0x99, 0xc6, 0x50, 0xbb, 0x0b, 0xcf, 0x81, 0x2d, 0x70, 0x91, 0x10, 0x81, 0xc8, 0xd5,
    -	0x78, 0x57, 0x95, 0xf3, 0x41, 0xbf, 0x1c, 0x39, 0x20, 0x7f, 0x71, 0xe2, 0xbf, 0x9c, 0xfd, 0x44,
    -	0x22, 0x19, 0x44, 0x0a, 0x92, 0x45, 0xa4, 0xee, 0xf0, 0xb2, 0x61, 0xa2, 0x4e, 0x04, 0xce, 0x80,
    -	0x25, 0x58, 0xce, 0x6e, 0x58, 0xb2, 0x1c, 0x9b, 0x93, 0xe1, 0x74, 0x74, 0xfa, 0x78, 0xbb, 0xfe,
    -	0xfc, 0x4b, 0x4d, 0x7b, 0x9a, 0x89, 0x62, 0x19, 0x3e, 0xd0, 0x3d, 0x5a, 0x0d, 0x8c, 0x5a, 0x5d,
    -	0xd9, 0x5f, 0xc6, 0x62, 0xf2, 0x42, 0xf6, 0xf7, 0x4e, 0xd7, 0xdf, 0x0b, 0x8d, 0xa1, 0x76, 0x17,
    -	0x3e, 0x07, 0xe6, 0x9c, 0x66, 0x82, 0x8f, 0xf7, 0x54, 0x6f, 0xc7, 0xdb, 0x95, 0xf2, 0x4c, 0x52,
    -	0x42, 0xbb, 0x2a, 0x5d, 0x53, 0x7d, 0xa2, 0x5a, 0xe4, 0xf0, 0x0b, 0x70, 0xb0, 0x56, 0x24, 0x7c,
    -	0x00, 0x86, 0xd7, 0x64, 0x39, 0x36, 0x64, 0x0d, 0x48, 0x7e, 0xc2, 0xf7, 0x80, 0xb9, 0xc0, 0x37,
    -	0xb7, 0x44, 0xcd, 0xd6, 0x46, 0xf5, 0xe2, 0xf3, 0x9d, 0x27, 0x86, 0xf7, 0x8b, 0x01, 0xe0, 0xe6,
    -	0x2c, 0xa1, 0x0b, 0xcc, 0x82, 0xe0, 0xb8, 0x16, 0xb1, 0xea, 0xa4, 0x48, 0x02, 0xa8, 0xc6, 0xe1,
    -	0x23, 0xb0, 0xc7, 0x49, 0xb1, 0xa0, 0x59, 0xa2, 0x34, 0xad, 0x70, 0x54, 0x95, 0xee, 0xde, 0x45,
    -	0x0d, 0xa1, 0x66, 0x0f, 0x9e, 0x80, 0x91, 0x20, 0x45, 0x4a, 0x33, 0x2c, 0x64, 0xe8, 0x50, 0x85,
    -	0xbe, 0x5b, 0x95, 0xee, 0xe8, 0xb2, 0x83, 0x51, 0x3f, 0xc6, 0x8b, 0xc1, 0xc1, 0x5a, 0xc7, 0xf0,
    -	0x02, 0x58, 0x57, 0xac, 0x78, 0xc5, 0x32, 0x7d, 0x92, 0x47, 0xa7, 0x8f, 0xee, 0x37, 0xec, 0xeb,
    -	0x3a, 0xba, 0x1b, 0x96, 0x06, 0x38, 0x6a, 0x85, 0xbc, 0x3f, 0x0c, 0xb0, 0xdf, 0xa4, 0x39, 0x67,
    -	0x85, 0x80, 0x47, 0x60, 0x57, 0x9d, 0x4c, 0xe5, 0x5a, 0x68, 0x55, 0xa5, 0xbb, 0xab, 0xa6, 0xa6,
    -	0x50, 0xf8, 0x0d, 0xb0, 0xd4, 0x25, 0x8b, 0xd8, 0x4d, 0xed, 0x61, 0x78, 0x2c, 0x85, 0xcf, 0x35,
    -	0xf6, 0xb6, 0x74, 0xdf, 0xdf, 0x7c, 0x40, 0xfc, 0x66, 0x1b, 0xb5, 0x64, 0x99, 0x26, 0x67, 0x85,
    -	0x50, 0x4e, 0x98, 0x75, 0x1a, 0x99, 0x1e, 0x29, 0x54, 0xda, 0x85, 0xf3, 0xbc, 0xa1, 0xa9, 0xa3,
    -	0x6f, 0xd7, 0x76, 0x9d, 0x75, 0x30, 0xea, 0xc7, 0x78, 0xab, 0x9d, 0xce, 0xaf, 0x8b, 0x1b, 0x1a,
    -	0x11, 0xf8, 0x23, 0xb0, 0xe4, 0x5b, 0x14, 0x63, 0x81, 0x55, 0x37, 0xeb, 0x77, 0xb9, 0x7d, 0x52,
    -	0xfc, 0xfc, 0x3a, 0x91, 0x00, 0xf7, 0x65, 0x74, 0x77, 0x9d, 0xbe, 0x23, 0x02, 0x77, 0x77, 0xb9,
    -	0xc3, 0x50, 0xab, 0x0a, 0xbf, 0x02, 0x23, 0xfd, 0x78, 0x5c, 0x2e, 0x73, 0xa2, 0xcb, 0xf4, 0x34,
    -	0x65, 0x74, 0xd6, 0x6d, 0xbd, 0x5d, 0x5f, 0xa2, 0x3e, 0x0d, 0x7e, 0x0f, 0x6c, 0xa2, 0x0b, 0x97,
    -	0x8f, 0x8e, 0x1c, 0xec, 0x87, 0xdb, 0xdd, 0x84, 0xf0, 0xa1, 0xce, 0x65, 0x37, 0x08, 0x47, 0x9d,
    -	0x16, 0x7c, 0x09, 0x4c, 0xe9, 0x26, 0x1f, 0x0f, 0x95, 0xe8, 0x47, 0xdb, 0x89, 0xca, 0x31, 0x84,
    -	0x07, 0x5a, 0xd8, 0x94, 0x2b, 0x8e, 0x6a, 0x1d, 0xef, 0x77, 0x03, 0x3c, 0x5c, 0xf3, 0xf8, 0x39,
    -	0xe5, 0x02, 0xfe, 0xb0, 0xe1, 0xb3, 0xbf, 0x9d, 0xcf, 0x92, 0xad, 0x5c, 0x6e, 0x0f, 0x68, 0x83,
    -	0xf4, 0x3c, 0x3e, 0x07, 0x26, 0x15, 0x24, 0x6d, 0x9c, 0xd9, 0xf2, 0x8d, 0x50, 0xd5, 0x75, 0x5d,
    -	0x7c, 0x2b, 0x15, 0x50, 0x2d, 0xe4, 0x1d, 0x83, 0x3d, 0x7d, 0x11, 0xe0, 0x64, 0xed, 0xb0, 0xef,
    -	0xeb, 0xf0, 0xde, 0x81, 0x0f, 0xc3, 0xbb, 0x95, 0x33, 0x78, 0xbd, 0x72, 0x06, 0x6f, 0x56, 0xce,
    -	0xe0, 0xe7, 0xca, 0x31, 0xee, 0x2a, 0xc7, 0x78, 0x5d, 0x39, 0xc6, 0x9b, 0xca, 0x31, 0xfe, 0xac,
    -	0x1c, 0xe3, 0xd7, 0xbf, 0x9c, 0xc1, 0xab, 0xa3, 0xfb, 0x7e, 0xd8, 0x7f, 0x07, 0x00, 0x00, 0xff,
    -	0xff, 0x1c, 0xe6, 0x20, 0x06, 0xcf, 0x07, 0x00, 0x00,
    +	0xe0, 0xb3, 0x70, 0xe3, 0x8c, 0x84, 0x7a, 0xdc, 0xe3, 0x9e, 0x22, 0x1a, 0xbe, 0xc5, 0x9e, 0x90,
    +	0x1d, 0xe7, 0xcf, 0x30, 0xd0, 0xce, 0x2d, 0x7e, 0x7e, 0xef, 0xfd, 0xfe, 0xd9, 0x56, 0xc0, 0xc7,
    +	0x97, 0x4f, 0xb8, 0x4f, 0x59, 0x80, 0x73, 0x1a, 0xc4, 0x94, 0x47, 0x6c, 0x41, 0x8a, 0x65, 0xb0,
    +	0x38, 0x9a, 0x11, 0x81, 0x8f, 0x82, 0x84, 0x64, 0xa4, 0xc0, 0x82, 0xc4, 0x7e, 0x5e, 0x30, 0xc1,
    +	0xe0, 0x41, 0xcd, 0xf6, 0x71, 0x4e, 0xfd, 0x96, 0xed, 0x6b, 0xf6, 0xfe, 0x27, 0x09, 0x15, 0xf3,
    +	0xeb, 0x99, 0x1f, 0xb1, 0x34, 0x48, 0x58, 0xc2, 0x02, 0x25, 0x9a, 0x5d, 0x5f, 0xa8, 0x95, 0x5a,
    +	0xa8, 0xaf, 0xda, 0x6c, 0xdf, 0xeb, 0x85, 0x8e, 0x58, 0x41, 0x82, 0xc5, 0x5a, 0xc0, 0xfd, 0xc7,
    +	0x1d, 0x27, 0xc5, 0xd1, 0x9c, 0x66, 0x32, 0xbb, 0xfc, 0x32, 0x91, 0x00, 0x0f, 0x52, 0x22, 0xf0,
    +	0x7f, 0xa9, 0x82, 0xff, 0x53, 0x15, 0xd7, 0x99, 0xa0, 0x29, 0x59, 0x13, 0x7c, 0x76, 0x9f, 0x80,
    +	0x47, 0x73, 0x92, 0xe2, 0x7f, 0xeb, 0xbc, 0xdf, 0xb6, 0x81, 0xf5, 0x34, 0x8b, 0x73, 0x46, 0x33,
    +	0x01, 0x0f, 0x81, 0x8d, 0xe3, 0xb8, 0x20, 0x9c, 0x13, 0x3e, 0x36, 0x26, 0xc3, 0xa9, 0x1d, 0xee,
    +	0x55, 0xa5, 0x6b, 0x9f, 0x34, 0x20, 0xea, 0xf6, 0x61, 0x0c, 0x40, 0xc4, 0xb2, 0x98, 0x0a, 0xca,
    +	0x32, 0x3e, 0xde, 0x9a, 0x18, 0xd3, 0xd1, 0xf1, 0xa7, 0xfe, 0x5d, 0xed, 0xf5, 0x9b, 0x40, 0x5f,
    +	0xb6, 0xba, 0x10, 0xde, 0x94, 0xee, 0xa0, 0x2a, 0x5d, 0xd0, 0x61, 0xa8, 0xe7, 0x0b, 0xa7, 0xc0,
    +	0x9a, 0x33, 0x2e, 0x32, 0x9c, 0x92, 0xf1, 0x70, 0x62, 0x4c, 0xed, 0x70, 0xb7, 0x2a, 0x5d, 0xeb,
    +	0x99, 0xc6, 0x50, 0xbb, 0x0b, 0x4f, 0x81, 0x2d, 0x70, 0x91, 0x10, 0x81, 0xc8, 0xc5, 0x78, 0x5b,
    +	0xa5, 0xf3, 0x41, 0x3f, 0x1d, 0x39, 0x20, 0x7f, 0x71, 0xe4, 0xbf, 0x9c, 0xfd, 0x44, 0x22, 0x49,
    +	0x22, 0x05, 0xc9, 0x22, 0x52, 0x57, 0x78, 0xde, 0x28, 0x51, 0x67, 0x02, 0x67, 0xc0, 0x12, 0x2c,
    +	0x67, 0x57, 0x2c, 0x59, 0x8e, 0xcd, 0xc9, 0x70, 0x3a, 0x3a, 0x7e, 0xbc, 0x59, 0x7d, 0xfe, 0xb9,
    +	0x96, 0x3d, 0xcd, 0x44, 0xb1, 0x0c, 0x1f, 0xe8, 0x1a, 0xad, 0x06, 0x46, 0xad, 0xaf, 0xac, 0x2f,
    +	0x63, 0x31, 0x79, 0x21, 0xeb, 0x7b, 0xa7, 0xab, 0xef, 0x85, 0xc6, 0x50, 0xbb, 0x0b, 0x9f, 0x03,
    +	0x73, 0x4e, 0x33, 0xc1, 0xc7, 0x3b, 0xaa, 0xb6, 0xc3, 0xcd, 0x52, 0x79, 0x26, 0x25, 0xa1, 0x5d,
    +	0x95, 0xae, 0xa9, 0x3e, 0x51, 0x6d, 0xb2, 0xff, 0x05, 0xd8, 0x5b, 0x49, 0x12, 0x3e, 0x00, 0xc3,
    +	0x4b, 0xb2, 0x1c, 0x1b, 0x32, 0x07, 0x24, 0x3f, 0xe1, 0x7b, 0xc0, 0x5c, 0xe0, 0xab, 0x6b, 0xa2,
    +	0x66, 0x6b, 0xa3, 0x7a, 0xf1, 0xf9, 0xd6, 0x13, 0xc3, 0xfb, 0xc5, 0x00, 0x70, 0x7d, 0x96, 0xd0,
    +	0x05, 0x66, 0x41, 0x70, 0x5c, 0x9b, 0x58, 0x75, 0x50, 0x24, 0x01, 0x54, 0xe3, 0xf0, 0x11, 0xd8,
    +	0xe1, 0xa4, 0x58, 0xd0, 0x2c, 0x51, 0x9e, 0x56, 0x38, 0xaa, 0x4a, 0x77, 0xe7, 0xac, 0x86, 0x50,
    +	0xb3, 0x07, 0x8f, 0xc0, 0x48, 0x90, 0x22, 0xa5, 0x19, 0x16, 0x92, 0x3a, 0x54, 0xd4, 0x77, 0xab,
    +	0xd2, 0x1d, 0x9d, 0x77, 0x30, 0xea, 0x73, 0xbc, 0xdf, 0x0d, 0xb0, 0xb7, 0x52, 0x32, 0x3c, 0x03,
    +	0xd6, 0x05, 0x2b, 0x5e, 0xb1, 0x4c, 0x1f, 0xe5, 0xd1, 0xf1, 0xa3, 0xbb, 0x3b, 0xf6, 0x75, 0xcd,
    +	0xee, 0xa6, 0xa5, 0x01, 0x8e, 0x5a, 0x23, 0x6d, 0x2a, 0x87, 0x23, 0x4f, 0xfc, 0x66, 0xa6, 0x92,
    +	0xbd, 0x62, 0xaa, 0xe4, 0xa8, 0x35, 0xf2, 0xfe, 0x34, 0xc0, 0x6e, 0x93, 0xfb, 0x29, 0x2b, 0x04,
    +	0x3c, 0x00, 0xdb, 0xea, 0xbc, 0xab, 0x59, 0x84, 0x56, 0x55, 0xba, 0xdb, 0xea, 0x2c, 0x28, 0x14,
    +	0x7e, 0x03, 0x2c, 0x75, 0x75, 0x23, 0x76, 0x55, 0x4f, 0x26, 0x3c, 0x94, 0xc6, 0xa7, 0x1a, 0x7b,
    +	0x5b, 0xba, 0xef, 0xaf, 0x3f, 0x4b, 0x7e, 0xb3, 0x8d, 0x5a, 0xb1, 0x0c, 0x93, 0xb3, 0x42, 0xa8,
    +	0xfe, 0x9a, 0x75, 0x18, 0x19, 0x1e, 0x29, 0x54, 0x0e, 0x01, 0xe7, 0x79, 0x23, 0x53, 0x17, 0xca,
    +	0xae, 0x87, 0x70, 0xd2, 0xc1, 0xa8, 0xcf, 0xf1, 0x6e, 0xb7, 0xba, 0x21, 0x9c, 0x5d, 0xd1, 0x88,
    +	0xc0, 0x1f, 0x81, 0x25, 0x5f, 0xb8, 0x18, 0x0b, 0xac, 0xaa, 0x59, 0x7d, 0x21, 0xda, 0x87, 0xca,
    +	0xcf, 0x2f, 0x13, 0x09, 0x70, 0x5f, 0xb2, 0xbb, 0x4b, 0xfa, 0x1d, 0x11, 0xb8, 0x7b, 0x21, 0x3a,
    +	0x0c, 0xb5, 0xae, 0xf0, 0x2b, 0x30, 0xd2, 0x4f, 0xd2, 0xf9, 0x32, 0x27, 0x3a, 0x4d, 0x4f, 0x4b,
    +	0x46, 0x27, 0xdd, 0xd6, 0xdb, 0xd5, 0x25, 0xea, 0xcb, 0xe0, 0xf7, 0xc0, 0x26, 0x3a, 0xf1, 0x66,
    +	0xb0, 0x1f, 0x6e, 0x76, 0xbf, 0xc2, 0x87, 0x3a, 0x96, 0xdd, 0x20, 0x1c, 0x75, 0x5e, 0xf0, 0x25,
    +	0x30, 0x65, 0x37, 0xf9, 0x78, 0xa8, 0x4c, 0x3f, 0xda, 0xcc, 0x54, 0x8e, 0x21, 0xdc, 0xd3, 0xc6,
    +	0xa6, 0x5c, 0x71, 0x54, 0xfb, 0x78, 0x7f, 0x18, 0xe0, 0xe1, 0x4a, 0x8f, 0x9f, 0x53, 0x2e, 0xe0,
    +	0x0f, 0x6b, 0x7d, 0xf6, 0x37, 0xeb, 0xb3, 0x54, 0xab, 0x2e, 0xb7, 0x07, 0xb4, 0x41, 0x7a, 0x3d,
    +	0x3e, 0x05, 0x26, 0x15, 0x24, 0x6d, 0x3a, 0xb3, 0xe1, 0xcb, 0xa3, 0xb2, 0xeb, 0xaa, 0xf8, 0x56,
    +	0x3a, 0xa0, 0xda, 0xc8, 0x3b, 0x04, 0x3b, 0xfa, 0x22, 0xc0, 0xc9, 0xca, 0x61, 0xdf, 0xd5, 0xf4,
    +	0xde, 0x81, 0xd7, 0x64, 0x79, 0x01, 0xef, 0x27, 0x87, 0xe1, 0xcd, 0xad, 0x33, 0x78, 0x7d, 0xeb,
    +	0x0c, 0xde, 0xdc, 0x3a, 0x83, 0x9f, 0x2b, 0xc7, 0xb8, 0xa9, 0x1c, 0xe3, 0x75, 0xe5, 0x18, 0x6f,
    +	0x2a, 0xc7, 0xf8, 0xab, 0x72, 0x8c, 0x5f, 0xff, 0x76, 0x06, 0xaf, 0x0e, 0xee, 0xfa, 0x67, 0xf8,
    +	0x27, 0x00, 0x00, 0xff, 0xff, 0x76, 0x8e, 0x48, 0x7e, 0x52, 0x08, 0x00, 0x00,
     }
     
     func (m *Endpoint) Marshal() (dAtA []byte, err error) {
    @@ -492,6 +522,20 @@ func (m *EndpointHints) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	_ = i
     	var l int
     	_ = l
    +	if len(m.ForNodes) > 0 {
    +		for iNdEx := len(m.ForNodes) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.ForNodes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
    +		}
    +	}
     	if len(m.ForZones) > 0 {
     		for iNdEx := len(m.ForZones) - 1; iNdEx >= 0; iNdEx-- {
     			{
    @@ -671,6 +715,34 @@ func (m *EndpointSliceList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	return len(dAtA) - i, nil
     }
     
    +func (m *ForNode) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ForNode) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ForNode) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	i -= len(m.Name)
    +	copy(dAtA[i:], m.Name)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
     func (m *ForZone) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
    @@ -781,6 +853,12 @@ func (m *EndpointHints) Size() (n int) {
     			n += 1 + l + sovGenerated(uint64(l))
     		}
     	}
    +	if len(m.ForNodes) > 0 {
    +		for _, e := range m.ForNodes {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
     	return n
     }
     
    @@ -850,6 +928,17 @@ func (m *EndpointSliceList) Size() (n int) {
     	return n
     }
     
    +func (m *ForNode) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.Name)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
     func (m *ForZone) Size() (n int) {
     	if m == nil {
     		return 0
    @@ -914,8 +1003,14 @@ func (this *EndpointHints) String() string {
     		repeatedStringForForZones += strings.Replace(strings.Replace(f.String(), "ForZone", "ForZone", 1), `&`, ``, 1) + ","
     	}
     	repeatedStringForForZones += "}"
    +	repeatedStringForForNodes := "[]ForNode{"
    +	for _, f := range this.ForNodes {
    +		repeatedStringForForNodes += strings.Replace(strings.Replace(f.String(), "ForNode", "ForNode", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForForNodes += "}"
     	s := strings.Join([]string{`&EndpointHints{`,
     		`ForZones:` + repeatedStringForForZones + `,`,
    +		`ForNodes:` + repeatedStringForForNodes + `,`,
     		`}`,
     	}, "")
     	return s
    @@ -972,6 +1067,16 @@ func (this *EndpointSliceList) String() string {
     	}, "")
     	return s
     }
    +func (this *ForNode) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&ForNode{`,
    +		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
     func (this *ForZone) String() string {
     	if this == nil {
     		return "nil"
    @@ -1546,6 +1651,40 @@ func (m *EndpointHints) Unmarshal(dAtA []byte) error {
     				return err
     			}
     			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ForNodes", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.ForNodes = append(m.ForNodes, ForNode{})
    +			if err := m.ForNodes[len(m.ForNodes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
     			skippy, err := skipGenerated(dAtA[iNdEx:])
    @@ -2036,6 +2175,88 @@ func (m *EndpointSliceList) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    +func (m *ForNode) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ForNode: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ForNode: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Name = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
     func (m *ForZone) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
    diff --git a/vendor/k8s.io/api/discovery/v1beta1/generated.proto b/vendor/k8s.io/api/discovery/v1beta1/generated.proto
    index 55828dd97..907050da1 100644
    --- a/vendor/k8s.io/api/discovery/v1beta1/generated.proto
    +++ b/vendor/k8s.io/api/discovery/v1beta1/generated.proto
    @@ -114,6 +114,13 @@ message EndpointHints {
       // enable topology aware routing. May contain a maximum of 8 entries.
       // +listType=atomic
       repeated ForZone forZones = 1;
    +
    +  // forNodes indicates the node(s) this endpoint should be consumed by when
    +  // using topology aware routing. May contain a maximum of 8 entries.
    +  // This is an Alpha feature and is only used when the PreferSameTrafficDistribution
    +  // feature gate is enabled.
    +  // +listType=atomic
    +  repeated ForNode forNodes = 2;
     }
     
     // EndpointPort represents a Port used by an EndpointSlice
    @@ -189,6 +196,12 @@ message EndpointSliceList {
       repeated EndpointSlice items = 2;
     }
     
    +// ForNode provides information about which nodes should consume this endpoint.
    +message ForNode {
    +  // name represents the name of the node.
    +  optional string name = 1;
    +}
    +
     // ForZone provides information about which zones should consume this endpoint.
     message ForZone {
       // name represents the name of the zone.
    diff --git a/vendor/k8s.io/api/discovery/v1beta1/types.go b/vendor/k8s.io/api/discovery/v1beta1/types.go
    index defd8e2ce..fa9d1eae4 100644
    --- a/vendor/k8s.io/api/discovery/v1beta1/types.go
    +++ b/vendor/k8s.io/api/discovery/v1beta1/types.go
    @@ -161,6 +161,13 @@ type EndpointHints struct {
     	// enable topology aware routing. May contain a maximum of 8 entries.
     	// +listType=atomic
     	ForZones []ForZone `json:"forZones,omitempty" protobuf:"bytes,1,name=forZones"`
    +
    +	// forNodes indicates the node(s) this endpoint should be consumed by when
    +	// using topology aware routing. May contain a maximum of 8 entries.
    +	// This is an Alpha feature and is only used when the PreferSameTrafficDistribution
    +	// feature gate is enabled.
    +	// +listType=atomic
    +	ForNodes []ForNode `json:"forNodes,omitempty" protobuf:"bytes,2,name=forNodes"`
     }
     
     // ForZone provides information about which zones should consume this endpoint.
    @@ -169,6 +176,12 @@ type ForZone struct {
     	Name string `json:"name" protobuf:"bytes,1,name=name"`
     }
     
    +// ForNode provides information about which nodes should consume this endpoint.
    +type ForNode struct {
    +	// name represents the name of the node.
    +	Name string `json:"name" protobuf:"bytes,1,name=name"`
    +}
    +
     // EndpointPort represents a Port used by an EndpointSlice
     type EndpointPort struct {
     	// name represents the name of this port. All ports in an EndpointSlice must have a unique name.
    diff --git a/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go
    index 847d4d58e..72aa0cb9b 100644
    --- a/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go
    +++ b/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go
    @@ -56,6 +56,7 @@ func (EndpointConditions) SwaggerDoc() map[string]string {
     var map_EndpointHints = map[string]string{
     	"":         "EndpointHints provides hints describing how an endpoint should be consumed.",
     	"forZones": "forZones indicates the zone(s) this endpoint should be consumed by to enable topology aware routing. May contain a maximum of 8 entries.",
    +	"forNodes": "forNodes indicates the node(s) this endpoint should be consumed by when using topology aware routing. May contain a maximum of 8 entries. This is an Alpha feature and is only used when the PreferSameTrafficDistribution feature gate is enabled.",
     }
     
     func (EndpointHints) SwaggerDoc() map[string]string {
    @@ -96,6 +97,15 @@ func (EndpointSliceList) SwaggerDoc() map[string]string {
     	return map_EndpointSliceList
     }
     
    +var map_ForNode = map[string]string{
    +	"":     "ForNode provides information about which nodes should consume this endpoint.",
    +	"name": "name represents the name of the node.",
    +}
    +
    +func (ForNode) SwaggerDoc() map[string]string {
    +	return map_ForNode
    +}
    +
     var map_ForZone = map[string]string{
     	"":     "ForZone provides information about which zones should consume this endpoint.",
     	"name": "name represents the name of the zone.",
    diff --git a/vendor/k8s.io/api/discovery/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/discovery/v1beta1/zz_generated.deepcopy.go
    index 13b9544b0..72490d6ad 100644
    --- a/vendor/k8s.io/api/discovery/v1beta1/zz_generated.deepcopy.go
    +++ b/vendor/k8s.io/api/discovery/v1beta1/zz_generated.deepcopy.go
    @@ -114,6 +114,11 @@ func (in *EndpointHints) DeepCopyInto(out *EndpointHints) {
     		*out = make([]ForZone, len(*in))
     		copy(*out, *in)
     	}
    +	if in.ForNodes != nil {
    +		in, out := &in.ForNodes, &out.ForNodes
    +		*out = make([]ForNode, len(*in))
    +		copy(*out, *in)
    +	}
     	return
     }
     
    @@ -236,6 +241,22 @@ func (in *EndpointSliceList) DeepCopyObject() runtime.Object {
     	return nil
     }
     
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *ForNode) DeepCopyInto(out *ForNode) {
    +	*out = *in
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForNode.
    +func (in *ForNode) DeepCopy() *ForNode {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(ForNode)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
     // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
     func (in *ForZone) DeepCopyInto(out *ForZone) {
     	*out = *in
    diff --git a/vendor/k8s.io/api/events/v1/doc.go b/vendor/k8s.io/api/events/v1/doc.go
    index 5fe700ffc..911639044 100644
    --- a/vendor/k8s.io/api/events/v1/doc.go
    +++ b/vendor/k8s.io/api/events/v1/doc.go
    @@ -20,4 +20,4 @@ limitations under the License.
     // +k8s:prerelease-lifecycle-gen=true
     // +groupName=events.k8s.io
     
    -package v1 // import "k8s.io/api/events/v1"
    +package v1
    diff --git a/vendor/k8s.io/api/events/v1beta1/doc.go b/vendor/k8s.io/api/events/v1beta1/doc.go
    index 46048a65b..e4864294f 100644
    --- a/vendor/k8s.io/api/events/v1beta1/doc.go
    +++ b/vendor/k8s.io/api/events/v1beta1/doc.go
    @@ -21,4 +21,4 @@ limitations under the License.
     
     // +groupName=events.k8s.io
     
    -package v1beta1 // import "k8s.io/api/events/v1beta1"
    +package v1beta1
    diff --git a/vendor/k8s.io/api/extensions/v1beta1/doc.go b/vendor/k8s.io/api/extensions/v1beta1/doc.go
    index c9af49d55..be710973c 100644
    --- a/vendor/k8s.io/api/extensions/v1beta1/doc.go
    +++ b/vendor/k8s.io/api/extensions/v1beta1/doc.go
    @@ -18,5 +18,7 @@ limitations under the License.
     // +k8s:protobuf-gen=package
     // +k8s:openapi-gen=true
     // +k8s:prerelease-lifecycle-gen=true
    +// +k8s:validation-gen=TypeMeta
    +// +k8s:validation-gen-input=k8s.io/api/extensions/v1beta1
     
    -package v1beta1 // import "k8s.io/api/extensions/v1beta1"
    +package v1beta1
    diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go
    index 818486f39..35b9a4ff2 100644
    --- a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go
    +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go
    @@ -1364,185 +1364,187 @@ func init() {
     }
     
     var fileDescriptor_90a532284de28347 = []byte{
    -	// 2842 bytes of a gzipped FileDescriptorProto
    -	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcd, 0x6f, 0x24, 0x47,
    -	0x15, 0xdf, 0x9e, 0xf1, 0xd8, 0xe3, 0xe7, 0xb5, 0xbd, 0x5b, 0xeb, 0xac, 0x1d, 0x2f, 0xb1, 0xa3,
    -	0x46, 0x84, 0x4d, 0xd8, 0x9d, 0x61, 0x37, 0xc9, 0x92, 0x0f, 0x29, 0x61, 0xc7, 0xbb, 0xc9, 0x3a,
    -	0xb1, 0xc7, 0x93, 0x9a, 0x71, 0x82, 0x22, 0x02, 0xb4, 0x7b, 0xca, 0xe3, 0x8e, 0x7b, 0xba, 0x47,
    -	0xdd, 0x35, 0x66, 0x7d, 0x03, 0xc1, 0x25, 0x27, 0xb8, 0x04, 0x38, 0x22, 0x21, 0x71, 0xe5, 0xca,
    -	0x21, 0x44, 0x20, 0x82, 0xb4, 0x42, 0x1c, 0x22, 0x71, 0x20, 0x27, 0x8b, 0x38, 0x27, 0xc4, 0x3f,
    -	0x80, 0xf6, 0x84, 0xea, 0xa3, 0xab, 0xbf, 0xed, 0x1e, 0xe3, 0x58, 0x04, 0x71, 0x5a, 0x4f, 0xbd,
    -	0xf7, 0x7e, 0xf5, 0xaa, 0xea, 0xd5, 0x7b, 0xbf, 0xaa, 0xea, 0x85, 0xeb, 0xbb, 0xcf, 0xf9, 0x35,
    -	0xcb, 0xad, 0x1b, 0x03, 0xab, 0x4e, 0xee, 0x53, 0xe2, 0xf8, 0x96, 0xeb, 0xf8, 0xf5, 0xbd, 0x1b,
    -	0x5b, 0x84, 0x1a, 0x37, 0xea, 0x3d, 0xe2, 0x10, 0xcf, 0xa0, 0xa4, 0x5b, 0x1b, 0x78, 0x2e, 0x75,
    -	0xd1, 0x63, 0x42, 0xbd, 0x66, 0x0c, 0xac, 0x5a, 0xa8, 0x5e, 0x93, 0xea, 0x8b, 0xd7, 0x7b, 0x16,
    -	0xdd, 0x19, 0x6e, 0xd5, 0x4c, 0xb7, 0x5f, 0xef, 0xb9, 0x3d, 0xb7, 0xce, 0xad, 0xb6, 0x86, 0xdb,
    -	0xfc, 0x17, 0xff, 0xc1, 0xff, 0x12, 0x68, 0x8b, 0x7a, 0xa4, 0x73, 0xd3, 0xf5, 0x48, 0x7d, 0x2f,
    -	0xd5, 0xe3, 0xe2, 0x33, 0xa1, 0x4e, 0xdf, 0x30, 0x77, 0x2c, 0x87, 0x78, 0xfb, 0xf5, 0xc1, 0x6e,
    -	0x8f, 0x35, 0xf8, 0xf5, 0x3e, 0xa1, 0x46, 0x96, 0x55, 0x3d, 0xcf, 0xca, 0x1b, 0x3a, 0xd4, 0xea,
    -	0x93, 0x94, 0xc1, 0xad, 0xe3, 0x0c, 0x7c, 0x73, 0x87, 0xf4, 0x8d, 0x94, 0xdd, 0xd3, 0x79, 0x76,
    -	0x43, 0x6a, 0xd9, 0x75, 0xcb, 0xa1, 0x3e, 0xf5, 0x92, 0x46, 0xfa, 0xfb, 0x25, 0x98, 0xbc, 0x63,
    -	0x90, 0xbe, 0xeb, 0xb4, 0x09, 0x45, 0xdf, 0x83, 0x2a, 0x1b, 0x46, 0xd7, 0xa0, 0xc6, 0x82, 0xf6,
    -	0xb8, 0x76, 0x75, 0xea, 0xe6, 0xd7, 0x6b, 0xe1, 0x34, 0x2b, 0xd4, 0xda, 0x60, 0xb7, 0xc7, 0x1a,
    -	0xfc, 0x1a, 0xd3, 0xae, 0xed, 0xdd, 0xa8, 0x6d, 0x6c, 0xbd, 0x4b, 0x4c, 0xba, 0x4e, 0xa8, 0xd1,
    -	0x40, 0x0f, 0x0e, 0x96, 0xcf, 0x1d, 0x1e, 0x2c, 0x43, 0xd8, 0x86, 0x15, 0x2a, 0x6a, 0xc2, 0x98,
    -	0x3f, 0x20, 0xe6, 0x42, 0x89, 0xa3, 0x5f, 0xab, 0x1d, 0xb9, 0x88, 0x35, 0xe5, 0x59, 0x7b, 0x40,
    -	0xcc, 0xc6, 0x79, 0x89, 0x3c, 0xc6, 0x7e, 0x61, 0x8e, 0x83, 0xde, 0x84, 0x71, 0x9f, 0x1a, 0x74,
    -	0xe8, 0x2f, 0x94, 0x39, 0x62, 0xad, 0x30, 0x22, 0xb7, 0x6a, 0xcc, 0x48, 0xcc, 0x71, 0xf1, 0x1b,
    -	0x4b, 0x34, 0xfd, 0x1f, 0x25, 0x40, 0x4a, 0x77, 0xc5, 0x75, 0xba, 0x16, 0xb5, 0x5c, 0x07, 0xbd,
    -	0x00, 0x63, 0x74, 0x7f, 0x40, 0xf8, 0xe4, 0x4c, 0x36, 0x9e, 0x08, 0x1c, 0xea, 0xec, 0x0f, 0xc8,
    -	0xc3, 0x83, 0xe5, 0xcb, 0x69, 0x0b, 0x26, 0xc1, 0xdc, 0x06, 0xad, 0x29, 0x57, 0x4b, 0xdc, 0xfa,
    -	0x99, 0x78, 0xd7, 0x0f, 0x0f, 0x96, 0x33, 0x82, 0xb0, 0xa6, 0x90, 0xe2, 0x0e, 0xa2, 0x3d, 0x40,
    -	0xb6, 0xe1, 0xd3, 0x8e, 0x67, 0x38, 0xbe, 0xe8, 0xc9, 0xea, 0x13, 0x39, 0x09, 0x4f, 0x15, 0x5b,
    -	0x34, 0x66, 0xd1, 0x58, 0x94, 0x5e, 0xa0, 0xb5, 0x14, 0x1a, 0xce, 0xe8, 0x01, 0x3d, 0x01, 0xe3,
    -	0x1e, 0x31, 0x7c, 0xd7, 0x59, 0x18, 0xe3, 0xa3, 0x50, 0x13, 0x88, 0x79, 0x2b, 0x96, 0x52, 0xf4,
    -	0x24, 0x4c, 0xf4, 0x89, 0xef, 0x1b, 0x3d, 0xb2, 0x50, 0xe1, 0x8a, 0xb3, 0x52, 0x71, 0x62, 0x5d,
    -	0x34, 0xe3, 0x40, 0xae, 0x7f, 0xa0, 0xc1, 0xb4, 0x9a, 0xb9, 0x35, 0xcb, 0xa7, 0xe8, 0xdb, 0xa9,
    -	0x38, 0xac, 0x15, 0x1b, 0x12, 0xb3, 0xe6, 0x51, 0x78, 0x41, 0xf6, 0x56, 0x0d, 0x5a, 0x22, 0x31,
    -	0xb8, 0x0e, 0x15, 0x8b, 0x92, 0x3e, 0x5b, 0x87, 0xf2, 0xd5, 0xa9, 0x9b, 0x57, 0x8b, 0x86, 0x4c,
    -	0x63, 0x5a, 0x82, 0x56, 0x56, 0x99, 0x39, 0x16, 0x28, 0xfa, 0xcf, 0xc6, 0x22, 0xee, 0xb3, 0xd0,
    -	0x44, 0xef, 0x40, 0xd5, 0x27, 0x36, 0x31, 0xa9, 0xeb, 0x49, 0xf7, 0x9f, 0x2e, 0xe8, 0xbe, 0xb1,
    -	0x45, 0xec, 0xb6, 0x34, 0x6d, 0x9c, 0x67, 0xfe, 0x07, 0xbf, 0xb0, 0x82, 0x44, 0x6f, 0x40, 0x95,
    -	0x92, 0xfe, 0xc0, 0x36, 0x28, 0x91, 0xfb, 0xe8, 0xcb, 0xd1, 0x21, 0xb0, 0xc8, 0x61, 0x60, 0x2d,
    -	0xb7, 0xdb, 0x91, 0x6a, 0x7c, 0xfb, 0xa8, 0x29, 0x09, 0x5a, 0xb1, 0x82, 0x41, 0x7b, 0x30, 0x33,
    -	0x1c, 0x74, 0x99, 0x26, 0x65, 0xd9, 0xa1, 0xb7, 0x2f, 0x23, 0xe9, 0x56, 0xd1, 0xb9, 0xd9, 0x8c,
    -	0x59, 0x37, 0x2e, 0xcb, 0xbe, 0x66, 0xe2, 0xed, 0x38, 0xd1, 0x0b, 0xba, 0x0d, 0xb3, 0x7d, 0xcb,
    -	0xc1, 0xc4, 0xe8, 0xee, 0xb7, 0x89, 0xe9, 0x3a, 0x5d, 0x9f, 0x87, 0x55, 0xa5, 0x31, 0x2f, 0x01,
    -	0x66, 0xd7, 0xe3, 0x62, 0x9c, 0xd4, 0x47, 0xaf, 0x01, 0x0a, 0x86, 0xf1, 0xaa, 0x48, 0x6e, 0x96,
    -	0xeb, 0xf0, 0x98, 0x2b, 0x87, 0xc1, 0xdd, 0x49, 0x69, 0xe0, 0x0c, 0x2b, 0xb4, 0x06, 0x73, 0x1e,
    -	0xd9, 0xb3, 0xd8, 0x18, 0xef, 0x59, 0x3e, 0x75, 0xbd, 0xfd, 0x35, 0xab, 0x6f, 0xd1, 0x85, 0x71,
    -	0xee, 0xd3, 0xc2, 0xe1, 0xc1, 0xf2, 0x1c, 0xce, 0x90, 0xe3, 0x4c, 0x2b, 0xfd, 0xe7, 0xe3, 0x30,
    -	0x9b, 0xc8, 0x37, 0xe8, 0x4d, 0xb8, 0x6c, 0x0e, 0x3d, 0x8f, 0x38, 0xb4, 0x39, 0xec, 0x6f, 0x11,
    -	0xaf, 0x6d, 0xee, 0x90, 0xee, 0xd0, 0x26, 0x5d, 0x1e, 0x28, 0x95, 0xc6, 0x92, 0xf4, 0xf8, 0xf2,
    -	0x4a, 0xa6, 0x16, 0xce, 0xb1, 0x66, 0xb3, 0xe0, 0xf0, 0xa6, 0x75, 0xcb, 0xf7, 0x15, 0x66, 0x89,
    -	0x63, 0xaa, 0x59, 0x68, 0xa6, 0x34, 0x70, 0x86, 0x15, 0xf3, 0xb1, 0x4b, 0x7c, 0xcb, 0x23, 0xdd,
    -	0xa4, 0x8f, 0xe5, 0xb8, 0x8f, 0x77, 0x32, 0xb5, 0x70, 0x8e, 0x35, 0x7a, 0x16, 0xa6, 0x44, 0x6f,
    -	0x7c, 0xfd, 0xe4, 0x42, 0x5f, 0x92, 0x60, 0x53, 0xcd, 0x50, 0x84, 0xa3, 0x7a, 0x6c, 0x68, 0xee,
    -	0x96, 0x4f, 0xbc, 0x3d, 0xd2, 0xcd, 0x5f, 0xe0, 0x8d, 0x94, 0x06, 0xce, 0xb0, 0x62, 0x43, 0x13,
    -	0x11, 0x98, 0x1a, 0xda, 0x78, 0x7c, 0x68, 0x9b, 0x99, 0x5a, 0x38, 0xc7, 0x9a, 0xc5, 0xb1, 0x70,
    -	0xf9, 0xf6, 0x9e, 0x61, 0xd9, 0xc6, 0x96, 0x4d, 0x16, 0x26, 0xe2, 0x71, 0xdc, 0x8c, 0x8b, 0x71,
    -	0x52, 0x1f, 0xbd, 0x0a, 0x17, 0x45, 0xd3, 0xa6, 0x63, 0x28, 0x90, 0x2a, 0x07, 0x79, 0x54, 0x82,
    -	0x5c, 0x6c, 0x26, 0x15, 0x70, 0xda, 0x06, 0xbd, 0x00, 0x33, 0xa6, 0x6b, 0xdb, 0x3c, 0x1e, 0x57,
    -	0xdc, 0xa1, 0x43, 0x17, 0x26, 0x39, 0x0a, 0x62, 0xfb, 0x71, 0x25, 0x26, 0xc1, 0x09, 0x4d, 0x44,
    -	0x00, 0xcc, 0xa0, 0xe0, 0xf8, 0x0b, 0xc0, 0xf3, 0xe3, 0x8d, 0xa2, 0x39, 0x40, 0x95, 0xaa, 0x90,
    -	0x03, 0xa8, 0x26, 0x1f, 0x47, 0x80, 0xf5, 0x3f, 0x6b, 0x30, 0x9f, 0x93, 0x3a, 0xd0, 0xcb, 0xb1,
    -	0x12, 0xfb, 0xb5, 0x44, 0x89, 0xbd, 0x92, 0x63, 0x16, 0xa9, 0xb3, 0x0e, 0x4c, 0x7b, 0x6c, 0x54,
    -	0x4e, 0x4f, 0xa8, 0xc8, 0x1c, 0xf9, 0xec, 0x31, 0xc3, 0xc0, 0x51, 0x9b, 0x30, 0xe7, 0x5f, 0x3c,
    -	0x3c, 0x58, 0x9e, 0x8e, 0xc9, 0x70, 0x1c, 0x5e, 0xff, 0x45, 0x09, 0xe0, 0x0e, 0x19, 0xd8, 0xee,
    -	0x7e, 0x9f, 0x38, 0x67, 0xc1, 0xa1, 0x36, 0x62, 0x1c, 0xea, 0xfa, 0x71, 0xcb, 0xa3, 0x5c, 0xcb,
    -	0x25, 0x51, 0x6f, 0x25, 0x48, 0x54, 0xbd, 0x38, 0xe4, 0xd1, 0x2c, 0xea, 0x6f, 0x65, 0xb8, 0x14,
    -	0x2a, 0x87, 0x34, 0xea, 0xc5, 0xd8, 0x1a, 0x7f, 0x35, 0xb1, 0xc6, 0xf3, 0x19, 0x26, 0x9f, 0x1b,
    -	0x8f, 0x7a, 0x17, 0x66, 0x18, 0xcb, 0x11, 0x6b, 0xc9, 0x39, 0xd4, 0xf8, 0xc8, 0x1c, 0x4a, 0x55,
    -	0xbb, 0xb5, 0x18, 0x12, 0x4e, 0x20, 0xe7, 0x70, 0xb6, 0x89, 0x2f, 0x22, 0x67, 0xfb, 0x50, 0x83,
    -	0x99, 0x70, 0x99, 0xce, 0x80, 0xb4, 0x35, 0xe3, 0xa4, 0xed, 0xc9, 0xc2, 0x21, 0x9a, 0xc3, 0xda,
    -	0xfe, 0xc5, 0x08, 0xbe, 0x52, 0x62, 0x1b, 0x7c, 0xcb, 0x30, 0x77, 0xd1, 0xe3, 0x30, 0xe6, 0x18,
    -	0xfd, 0x20, 0x32, 0xd5, 0x66, 0x69, 0x1a, 0x7d, 0x82, 0xb9, 0x04, 0xbd, 0xaf, 0x01, 0x92, 0x55,
    -	0xe0, 0xb6, 0xe3, 0xb8, 0xd4, 0x10, 0xb9, 0x52, 0xb8, 0xb5, 0x5a, 0xd8, 0xad, 0xa0, 0xc7, 0xda,
    -	0x66, 0x0a, 0xeb, 0xae, 0x43, 0xbd, 0xfd, 0x70, 0x91, 0xd3, 0x0a, 0x38, 0xc3, 0x01, 0x64, 0x00,
    -	0x78, 0x12, 0xb3, 0xe3, 0xca, 0x8d, 0x7c, 0xbd, 0x40, 0xce, 0x63, 0x06, 0x2b, 0xae, 0xb3, 0x6d,
    -	0xf5, 0xc2, 0xb4, 0x83, 0x15, 0x10, 0x8e, 0x80, 0x2e, 0xde, 0x85, 0xf9, 0x1c, 0x6f, 0xd1, 0x05,
    -	0x28, 0xef, 0x92, 0x7d, 0x31, 0x6d, 0x98, 0xfd, 0x89, 0xe6, 0xa0, 0xb2, 0x67, 0xd8, 0x43, 0x91,
    -	0x7e, 0x27, 0xb1, 0xf8, 0xf1, 0x42, 0xe9, 0x39, 0x4d, 0xff, 0xa0, 0x12, 0x8d, 0x1d, 0xce, 0x98,
    -	0xaf, 0x42, 0xd5, 0x23, 0x03, 0xdb, 0x32, 0x0d, 0x5f, 0x12, 0x21, 0x4e, 0x7e, 0xb1, 0x6c, 0xc3,
    -	0x4a, 0x1a, 0xe3, 0xd6, 0xa5, 0xcf, 0x97, 0x5b, 0x97, 0x4f, 0x87, 0x5b, 0x7f, 0x17, 0xaa, 0x7e,
    -	0xc0, 0xaa, 0xc7, 0x38, 0xe4, 0x8d, 0x11, 0xf2, 0xab, 0x24, 0xd4, 0xaa, 0x03, 0x45, 0xa5, 0x15,
    -	0x68, 0x16, 0x89, 0xae, 0x8c, 0x48, 0xa2, 0x4f, 0x95, 0xf8, 0xb2, 0x7c, 0x33, 0x30, 0x86, 0x3e,
    -	0xe9, 0xf2, 0xdc, 0x56, 0x0d, 0xf3, 0x4d, 0x8b, 0xb7, 0x62, 0x29, 0x45, 0xef, 0xc4, 0x42, 0xb6,
    -	0x7a, 0x92, 0x90, 0x9d, 0xc9, 0x0f, 0x57, 0xb4, 0x09, 0xf3, 0x03, 0xcf, 0xed, 0x79, 0xc4, 0xf7,
    -	0xef, 0x10, 0xa3, 0x6b, 0x5b, 0x0e, 0x09, 0xe6, 0x47, 0x30, 0xa2, 0x2b, 0x87, 0x07, 0xcb, 0xf3,
    -	0xad, 0x6c, 0x15, 0x9c, 0x67, 0xab, 0x3f, 0x18, 0x83, 0x0b, 0xc9, 0x0a, 0x98, 0x43, 0x52, 0xb5,
    -	0x13, 0x91, 0xd4, 0x6b, 0x91, 0xcd, 0x20, 0x18, 0xbc, 0x5a, 0xfd, 0x8c, 0x0d, 0x71, 0x1b, 0x66,
    -	0x65, 0x36, 0x08, 0x84, 0x92, 0xa6, 0xab, 0xd5, 0xdf, 0x8c, 0x8b, 0x71, 0x52, 0x1f, 0xbd, 0x08,
    -	0xd3, 0x1e, 0xe7, 0xdd, 0x01, 0x80, 0xe0, 0xae, 0x8f, 0x48, 0x80, 0x69, 0x1c, 0x15, 0xe2, 0xb8,
    -	0x2e, 0xe3, 0xad, 0x21, 0x1d, 0x0d, 0x00, 0xc6, 0xe2, 0xbc, 0xf5, 0x76, 0x52, 0x01, 0xa7, 0x6d,
    -	0xd0, 0x3a, 0x5c, 0x1a, 0x3a, 0x69, 0x28, 0x11, 0xca, 0x57, 0x24, 0xd4, 0xa5, 0xcd, 0xb4, 0x0a,
    -	0xce, 0xb2, 0x43, 0xdb, 0x31, 0x2a, 0x3b, 0xce, 0xd3, 0xf3, 0xcd, 0xc2, 0x1b, 0xaf, 0x30, 0x97,
    -	0xcd, 0xa0, 0xdb, 0xd5, 0xa2, 0x74, 0x5b, 0xff, 0x83, 0x16, 0x2d, 0x42, 0x8a, 0x02, 0x1f, 0x77,
    -	0xcb, 0x94, 0xb2, 0x88, 0xb0, 0x23, 0x37, 0x9b, 0xfd, 0xde, 0x1a, 0x89, 0xfd, 0x86, 0xc5, 0xf3,
    -	0x78, 0xfa, 0xfb, 0x47, 0x0d, 0x66, 0xef, 0x75, 0x3a, 0xad, 0x55, 0x87, 0xef, 0x96, 0x96, 0x41,
    -	0x77, 0x58, 0x15, 0x1d, 0x18, 0x74, 0x27, 0x59, 0x45, 0x99, 0x0c, 0x73, 0x09, 0x7a, 0x06, 0xaa,
    -	0xec, 0x5f, 0xe6, 0x38, 0x0f, 0xd7, 0x49, 0x9e, 0x64, 0xaa, 0x2d, 0xd9, 0xf6, 0x30, 0xf2, 0x37,
    -	0x56, 0x9a, 0xe8, 0x5b, 0x30, 0xc1, 0xf6, 0x36, 0x71, 0xba, 0x05, 0xc9, 0xaf, 0x74, 0xaa, 0x21,
    -	0x8c, 0x42, 0x3e, 0x23, 0x1b, 0x70, 0x00, 0xa7, 0xef, 0xc2, 0x5c, 0x64, 0x10, 0x78, 0x68, 0x93,
    -	0x37, 0x59, 0xbd, 0x42, 0x6d, 0xa8, 0xb0, 0xde, 0x59, 0x55, 0x2a, 0x17, 0xb8, 0x5e, 0x4c, 0x4c,
    -	0x44, 0xc8, 0x3d, 0xd8, 0x2f, 0x1f, 0x0b, 0x2c, 0x7d, 0x03, 0x26, 0x56, 0x5b, 0x0d, 0xdb, 0x15,
    -	0x7c, 0xc3, 0xb4, 0xba, 0x5e, 0x72, 0xa6, 0x56, 0x56, 0xef, 0x60, 0xcc, 0x25, 0x48, 0x87, 0x71,
    -	0x72, 0xdf, 0x24, 0x03, 0xca, 0x29, 0xc6, 0x64, 0x03, 0x58, 0x22, 0xbd, 0xcb, 0x5b, 0xb0, 0x94,
    -	0xe8, 0x3f, 0x29, 0xc1, 0x84, 0xec, 0xf6, 0x0c, 0xce, 0x1f, 0x6b, 0xb1, 0xf3, 0xc7, 0x53, 0xc5,
    -	0x96, 0x20, 0xf7, 0xf0, 0xd1, 0x49, 0x1c, 0x3e, 0xae, 0x15, 0xc4, 0x3b, 0xfa, 0xe4, 0xf1, 0x5e,
    -	0x09, 0x66, 0xe2, 0x8b, 0x8f, 0x9e, 0x85, 0x29, 0x96, 0x6a, 0x2d, 0x93, 0x34, 0x43, 0x86, 0xa7,
    -	0xae, 0x1f, 0xda, 0xa1, 0x08, 0x47, 0xf5, 0x50, 0x4f, 0x99, 0xb5, 0x5c, 0x8f, 0xca, 0x41, 0xe7,
    -	0x4f, 0xe9, 0x90, 0x5a, 0x76, 0x4d, 0x5c, 0xb6, 0xd7, 0x56, 0x1d, 0xba, 0xe1, 0xb5, 0xa9, 0x67,
    -	0x39, 0xbd, 0x54, 0x47, 0x0c, 0x0c, 0x47, 0x91, 0xd1, 0x5b, 0x2c, 0xed, 0xfb, 0xee, 0xd0, 0x33,
    -	0x49, 0x16, 0x7d, 0x0b, 0xa8, 0x07, 0xdb, 0x08, 0xdd, 0x35, 0xd7, 0x34, 0x6c, 0xb1, 0x38, 0x98,
    -	0x6c, 0x13, 0x8f, 0x38, 0x26, 0x09, 0x28, 0x93, 0x80, 0xc0, 0x0a, 0x4c, 0xff, 0xad, 0x06, 0x53,
    -	0x72, 0x2e, 0xce, 0x80, 0xa8, 0xbf, 0x1e, 0x27, 0xea, 0x4f, 0x14, 0xdc, 0xa1, 0xd9, 0x2c, 0xfd,
    -	0x77, 0x1a, 0x2c, 0x06, 0xae, 0xbb, 0x46, 0xb7, 0x61, 0xd8, 0x86, 0x63, 0x12, 0x2f, 0x88, 0xf5,
    -	0x45, 0x28, 0x59, 0x03, 0xb9, 0x92, 0x20, 0x01, 0x4a, 0xab, 0x2d, 0x5c, 0xb2, 0x06, 0xac, 0x8a,
    -	0xee, 0xb8, 0x3e, 0xe5, 0x6c, 0x5e, 0x1c, 0x14, 0x95, 0xd7, 0xf7, 0x64, 0x3b, 0x56, 0x1a, 0x68,
    -	0x13, 0x2a, 0x03, 0xd7, 0xa3, 0xac, 0x72, 0x95, 0x13, 0xeb, 0x7b, 0x84, 0xd7, 0x6c, 0xdd, 0x64,
    -	0x20, 0x86, 0x3b, 0x9d, 0xc1, 0x60, 0x81, 0xa6, 0xff, 0x50, 0x83, 0x47, 0x33, 0xfc, 0x97, 0xa4,
    -	0xa1, 0x0b, 0x13, 0x96, 0x10, 0xca, 0xf4, 0xf2, 0x7c, 0xb1, 0x6e, 0x33, 0xa6, 0x22, 0x4c, 0x6d,
    -	0x41, 0x0a, 0x0b, 0xa0, 0xf5, 0x5f, 0x69, 0x70, 0x31, 0xe5, 0x2f, 0x4f, 0xd1, 0x2c, 0x9e, 0x25,
    -	0xdb, 0x56, 0x29, 0x9a, 0x85, 0x25, 0x97, 0xa0, 0xd7, 0xa1, 0xca, 0xdf, 0x88, 0x4c, 0xd7, 0x96,
    -	0x13, 0x58, 0x0f, 0x26, 0xb0, 0x25, 0xdb, 0x1f, 0x1e, 0x2c, 0x5f, 0xc9, 0x38, 0x6b, 0x07, 0x62,
    -	0xac, 0x00, 0xd0, 0x32, 0x54, 0x88, 0xe7, 0xb9, 0x9e, 0x4c, 0xf6, 0x93, 0x6c, 0xa6, 0xee, 0xb2,
    -	0x06, 0x2c, 0xda, 0xf5, 0x5f, 0x87, 0x41, 0xca, 0xb2, 0x2f, 0xf3, 0x8f, 0x2d, 0x4e, 0x32, 0x31,
    -	0xb2, 0xa5, 0xc3, 0x5c, 0x82, 0x86, 0x70, 0xc1, 0x4a, 0xa4, 0x6b, 0xb9, 0x3b, 0xeb, 0xc5, 0xa6,
    -	0x51, 0x99, 0x35, 0x16, 0x24, 0xfc, 0x85, 0xa4, 0x04, 0xa7, 0xba, 0xd0, 0x09, 0xa4, 0xb4, 0xd0,
    -	0x1b, 0x30, 0xb6, 0x43, 0xe9, 0x20, 0xe3, 0xb2, 0xff, 0x98, 0x22, 0x11, 0xba, 0x50, 0xe5, 0xa3,
    -	0xeb, 0x74, 0x5a, 0x98, 0x43, 0xe9, 0xbf, 0x2f, 0xa9, 0xf9, 0xe0, 0x27, 0xa4, 0x6f, 0xaa, 0xd1,
    -	0xae, 0xd8, 0x86, 0xef, 0xf3, 0x14, 0x26, 0x4e, 0xf3, 0x73, 0x11, 0xc7, 0x95, 0x0c, 0xa7, 0xb4,
    -	0x51, 0x27, 0x2c, 0x9e, 0xda, 0x49, 0x8a, 0xe7, 0x54, 0x56, 0xe1, 0x44, 0xf7, 0xa0, 0x4c, 0xed,
    -	0xa2, 0xa7, 0x72, 0x89, 0xd8, 0x59, 0x6b, 0x37, 0xa6, 0xe4, 0x94, 0x97, 0x3b, 0x6b, 0x6d, 0xcc,
    -	0x20, 0xd0, 0x06, 0x54, 0xbc, 0xa1, 0x4d, 0x58, 0x1d, 0x28, 0x17, 0xaf, 0x2b, 0x6c, 0x06, 0xc3,
    -	0xcd, 0xc7, 0x7e, 0xf9, 0x58, 0xe0, 0xe8, 0x3f, 0xd2, 0x60, 0x3a, 0x56, 0x2d, 0x90, 0x07, 0xe7,
    -	0xed, 0xc8, 0xde, 0x91, 0xf3, 0xf0, 0xdc, 0xe8, 0xbb, 0x4e, 0x6e, 0xfa, 0x39, 0xd9, 0xef, 0xf9,
    -	0xa8, 0x0c, 0xc7, 0xfa, 0xd0, 0x0d, 0x80, 0x70, 0xd8, 0x6c, 0x1f, 0xb0, 0xe0, 0x15, 0x1b, 0x5e,
    -	0xee, 0x03, 0x16, 0xd3, 0x3e, 0x16, 0xed, 0xe8, 0x26, 0x80, 0x4f, 0x4c, 0x8f, 0xd0, 0x66, 0x98,
    -	0xb8, 0x54, 0x39, 0x6e, 0x2b, 0x09, 0x8e, 0x68, 0xe9, 0x7f, 0xd2, 0x60, 0xba, 0x49, 0xe8, 0xf7,
    -	0x5d, 0x6f, 0xb7, 0xe5, 0xda, 0x96, 0xb9, 0x7f, 0x06, 0x24, 0x00, 0xc7, 0x48, 0xc0, 0x71, 0xf9,
    -	0x32, 0xe6, 0x5d, 0x1e, 0x15, 0xd0, 0x3f, 0xd4, 0x60, 0x3e, 0xa6, 0x79, 0x37, 0xcc, 0x07, 0x2a,
    -	0x41, 0x6b, 0x85, 0x12, 0x74, 0x0c, 0x86, 0x25, 0xb5, 0xec, 0x04, 0x8d, 0xd6, 0xa0, 0x44, 0x5d,
    -	0x19, 0xbd, 0xa3, 0x61, 0x12, 0xe2, 0x85, 0x35, 0xa7, 0xe3, 0xe2, 0x12, 0x75, 0xd9, 0x42, 0x2c,
    -	0xc4, 0xb4, 0xa2, 0x19, 0xed, 0x73, 0x1a, 0x01, 0x86, 0xb1, 0x6d, 0xcf, 0xed, 0x9f, 0x78, 0x0c,
    -	0x6a, 0x21, 0x5e, 0xf1, 0xdc, 0x3e, 0xe6, 0x58, 0xfa, 0x47, 0x1a, 0x5c, 0x8c, 0x69, 0x9e, 0x01,
    -	0x6f, 0x78, 0x23, 0xce, 0x1b, 0xae, 0x8d, 0x32, 0x90, 0x1c, 0xf6, 0xf0, 0x51, 0x29, 0x31, 0x0c,
    -	0x36, 0x60, 0xb4, 0x0d, 0x53, 0x03, 0xb7, 0xdb, 0x3e, 0x85, 0x07, 0xda, 0x59, 0xc6, 0xe7, 0x5a,
    -	0x21, 0x16, 0x8e, 0x02, 0xa3, 0xfb, 0x70, 0x91, 0x51, 0x0b, 0x7f, 0x60, 0x98, 0xa4, 0x7d, 0x0a,
    -	0x57, 0x56, 0x8f, 0xf0, 0x17, 0xa0, 0x24, 0x22, 0x4e, 0x77, 0x82, 0xd6, 0x61, 0xc2, 0x1a, 0xf0,
    -	0xf3, 0x85, 0x24, 0x92, 0xc7, 0x92, 0x30, 0x71, 0x1a, 0x11, 0x29, 0x5e, 0xfe, 0xc0, 0x01, 0x86,
    -	0xfe, 0xd7, 0x64, 0x34, 0x70, 0xba, 0xfa, 0x6a, 0x84, 0x1e, 0xc8, 0xb7, 0x9a, 0x93, 0x51, 0x83,
    -	0xa6, 0x64, 0x22, 0x27, 0x65, 0xd6, 0xd5, 0x04, 0x6f, 0xf9, 0x0a, 0x4c, 0x10, 0xa7, 0xcb, 0xc9,
    -	0xba, 0xb8, 0x08, 0xe1, 0xa3, 0xba, 0x2b, 0x9a, 0x70, 0x20, 0xd3, 0x7f, 0x5c, 0x4e, 0x8c, 0x8a,
    -	0x97, 0xd9, 0x77, 0x4f, 0x2d, 0x38, 0x14, 0xe1, 0xcf, 0x0d, 0x90, 0xad, 0x90, 0xfe, 0x89, 0x98,
    -	0xff, 0xc6, 0x28, 0x31, 0x1f, 0xad, 0x7f, 0xb9, 0xe4, 0x0f, 0x7d, 0x07, 0xc6, 0x89, 0xe8, 0x42,
    -	0x54, 0xd5, 0x5b, 0xa3, 0x74, 0x11, 0xa6, 0xdf, 0xf0, 0x9c, 0x25, 0xdb, 0x24, 0x2a, 0x7a, 0x99,
    -	0xcd, 0x17, 0xd3, 0x65, 0xc7, 0x12, 0xc1, 0x9e, 0x27, 0x1b, 0x8f, 0x89, 0x61, 0xab, 0xe6, 0x87,
    -	0x07, 0xcb, 0x10, 0xfe, 0xc4, 0x51, 0x0b, 0xfe, 0x7a, 0x26, 0xef, 0x6c, 0xce, 0xe6, 0x0b, 0xa4,
    -	0xd1, 0x5e, 0xcf, 0x42, 0xd7, 0x4e, 0xed, 0xf5, 0x2c, 0x02, 0x79, 0xf4, 0x19, 0xf6, 0x9f, 0x25,
    -	0xb8, 0x14, 0x2a, 0x17, 0x7e, 0x3d, 0xcb, 0x30, 0xf9, 0xff, 0x57, 0x48, 0xc5, 0x5e, 0xb4, 0xc2,
    -	0xa9, 0xfb, 0xef, 0x7b, 0xd1, 0x0a, 0x7d, 0xcb, 0xa9, 0x76, 0xbf, 0x29, 0x45, 0x07, 0x30, 0xe2,
    -	0xb3, 0xca, 0x29, 0x7c, 0x88, 0xf3, 0x85, 0x7b, 0x99, 0xd1, 0xff, 0x52, 0x86, 0x0b, 0xc9, 0xdd,
    -	0x18, 0xbb, 0x7d, 0xd7, 0x8e, 0xbd, 0x7d, 0x6f, 0xc1, 0xdc, 0xf6, 0xd0, 0xb6, 0xf7, 0xf9, 0x18,
    -	0x22, 0x57, 0xf0, 0xe2, 0xde, 0xfe, 0x4b, 0xd2, 0x72, 0xee, 0x95, 0x0c, 0x1d, 0x9c, 0x69, 0x99,
    -	0xbe, 0x8c, 0x1f, 0xfb, 0x4f, 0x2f, 0xe3, 0x2b, 0x27, 0xb8, 0x8c, 0xcf, 0x7e, 0xcf, 0x28, 0x9f,
    -	0xe8, 0x3d, 0xe3, 0x24, 0x37, 0xf1, 0x19, 0x49, 0xec, 0xd8, 0xaf, 0x4a, 0x5e, 0x82, 0x99, 0xf8,
    -	0xeb, 0x90, 0x58, 0x4b, 0xf1, 0x40, 0x25, 0xdf, 0x62, 0x22, 0x6b, 0x29, 0xda, 0xb1, 0xd2, 0xd0,
    -	0x0f, 0x35, 0xb8, 0x9c, 0xfd, 0x15, 0x08, 0xb2, 0x61, 0xa6, 0x6f, 0xdc, 0x8f, 0x7e, 0x99, 0xa3,
    -	0x9d, 0x90, 0xad, 0xf0, 0x67, 0x81, 0xf5, 0x18, 0x16, 0x4e, 0x60, 0xa3, 0xb7, 0xa1, 0xda, 0x37,
    -	0xee, 0xb7, 0x87, 0x5e, 0x8f, 0x9c, 0x98, 0x15, 0xf1, 0x6d, 0xb4, 0x2e, 0x51, 0xb0, 0xc2, 0xd3,
    -	0x3f, 0xd3, 0x60, 0x3e, 0xe7, 0xb2, 0xff, 0x7f, 0x68, 0x94, 0xef, 0x95, 0xa0, 0xd2, 0x36, 0x0d,
    -	0x9b, 0x9c, 0x01, 0xa1, 0x78, 0x2d, 0x46, 0x28, 0x8e, 0xfb, 0x9a, 0x94, 0x7b, 0x95, 0xcb, 0x25,
    -	0x70, 0x82, 0x4b, 0x3c, 0x55, 0x08, 0xed, 0x68, 0x1a, 0xf1, 0x3c, 0x4c, 0xaa, 0x4e, 0x47, 0xcb,
    -	0x6e, 0xfa, 0x2f, 0x4b, 0x30, 0x15, 0xe9, 0x62, 0xc4, 0xdc, 0xb8, 0x1d, 0x2b, 0x08, 0xe5, 0x02,
    -	0x37, 0x2d, 0x91, 0xbe, 0x6a, 0x41, 0x09, 0x10, 0x5f, 0x43, 0x84, 0xef, 0xdf, 0xe9, 0xca, 0xf0,
    -	0x12, 0xcc, 0x50, 0xc3, 0xeb, 0x11, 0xaa, 0x68, 0xbb, 0xb8, 0x64, 0x54, 0x9f, 0xe5, 0x74, 0x62,
    -	0x52, 0x9c, 0xd0, 0x5e, 0x7c, 0x11, 0xa6, 0x63, 0x9d, 0x8d, 0xf2, 0x31, 0x43, 0x63, 0xe5, 0xc1,
    -	0xa7, 0x4b, 0xe7, 0x3e, 0xfe, 0x74, 0xe9, 0xdc, 0x27, 0x9f, 0x2e, 0x9d, 0xfb, 0xc1, 0xe1, 0x92,
    -	0xf6, 0xe0, 0x70, 0x49, 0xfb, 0xf8, 0x70, 0x49, 0xfb, 0xe4, 0x70, 0x49, 0xfb, 0xfb, 0xe1, 0x92,
    -	0xf6, 0xd3, 0xcf, 0x96, 0xce, 0xbd, 0xfd, 0xd8, 0x91, 0xff, 0xb7, 0xe1, 0xdf, 0x01, 0x00, 0x00,
    -	0xff, 0xff, 0x5f, 0xd8, 0x14, 0x50, 0xfb, 0x30, 0x00, 0x00,
    +	// 2875 bytes of a gzipped FileDescriptorProto
    +	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcf, 0x6f, 0x24, 0x47,
    +	0xf5, 0xdf, 0x9e, 0xf1, 0xd8, 0xe3, 0xe7, 0xb5, 0xbd, 0x5b, 0xeb, 0xac, 0x1d, 0xef, 0x37, 0x76,
    +	0xd4, 0x5f, 0x11, 0x36, 0x61, 0x77, 0x86, 0xdd, 0x24, 0x4b, 0x7e, 0x48, 0x09, 0x3b, 0xde, 0x4d,
    +	0xd6, 0x89, 0x7f, 0x4c, 0x6a, 0xc6, 0x09, 0x8a, 0x08, 0xd0, 0xee, 0x29, 0x8f, 0x3b, 0xee, 0xe9,
    +	0x1e, 0x75, 0xd7, 0x98, 0xf5, 0x0d, 0x04, 0x97, 0x9c, 0x40, 0x42, 0x21, 0x1c, 0x91, 0x90, 0xb8,
    +	0x72, 0xe5, 0x10, 0x22, 0x10, 0x41, 0x8a, 0x38, 0x45, 0xe2, 0x40, 0x4e, 0x16, 0x71, 0x4e, 0x88,
    +	0x7f, 0x00, 0xed, 0x09, 0xd5, 0x8f, 0xae, 0xfe, 0x6d, 0xf7, 0x0c, 0x5e, 0x8b, 0x20, 0x4e, 0xeb,
    +	0xa9, 0xf7, 0xde, 0xa7, 0x5e, 0x55, 0xbd, 0x7a, 0xef, 0x53, 0x55, 0xbd, 0x70, 0x7d, 0xef, 0x39,
    +	0xbf, 0x66, 0xb9, 0x75, 0xa3, 0x6f, 0xd5, 0xc9, 0x7d, 0x4a, 0x1c, 0xdf, 0x72, 0x1d, 0xbf, 0xbe,
    +	0x7f, 0x63, 0x9b, 0x50, 0xe3, 0x46, 0xbd, 0x4b, 0x1c, 0xe2, 0x19, 0x94, 0x74, 0x6a, 0x7d, 0xcf,
    +	0xa5, 0x2e, 0x7a, 0x4c, 0xa8, 0xd7, 0x8c, 0xbe, 0x55, 0x0b, 0xd5, 0x6b, 0x52, 0x7d, 0xf1, 0x7a,
    +	0xd7, 0xa2, 0xbb, 0x83, 0xed, 0x9a, 0xe9, 0xf6, 0xea, 0x5d, 0xb7, 0xeb, 0xd6, 0xb9, 0xd5, 0xf6,
    +	0x60, 0x87, 0xff, 0xe2, 0x3f, 0xf8, 0x5f, 0x02, 0x6d, 0x51, 0x8f, 0x74, 0x6e, 0xba, 0x1e, 0xa9,
    +	0xef, 0xa7, 0x7a, 0x5c, 0x7c, 0x26, 0xd4, 0xe9, 0x19, 0xe6, 0xae, 0xe5, 0x10, 0xef, 0xa0, 0xde,
    +	0xdf, 0xeb, 0xb2, 0x06, 0xbf, 0xde, 0x23, 0xd4, 0xc8, 0xb2, 0xaa, 0xe7, 0x59, 0x79, 0x03, 0x87,
    +	0x5a, 0x3d, 0x92, 0x32, 0xb8, 0x75, 0x92, 0x81, 0x6f, 0xee, 0x92, 0x9e, 0x91, 0xb2, 0x7b, 0x3a,
    +	0xcf, 0x6e, 0x40, 0x2d, 0xbb, 0x6e, 0x39, 0xd4, 0xa7, 0x5e, 0xd2, 0x48, 0x7f, 0xbf, 0x04, 0x93,
    +	0x77, 0x0c, 0xd2, 0x73, 0x9d, 0x16, 0xa1, 0xe8, 0x7b, 0x50, 0x65, 0xc3, 0xe8, 0x18, 0xd4, 0x58,
    +	0xd0, 0x1e, 0xd7, 0xae, 0x4e, 0xdd, 0xfc, 0x7a, 0x2d, 0x9c, 0x66, 0x85, 0x5a, 0xeb, 0xef, 0x75,
    +	0x59, 0x83, 0x5f, 0x63, 0xda, 0xb5, 0xfd, 0x1b, 0xb5, 0xcd, 0xed, 0x77, 0x89, 0x49, 0xd7, 0x09,
    +	0x35, 0x1a, 0xe8, 0x93, 0xc3, 0xe5, 0x73, 0x47, 0x87, 0xcb, 0x10, 0xb6, 0x61, 0x85, 0x8a, 0x36,
    +	0x60, 0xcc, 0xef, 0x13, 0x73, 0xa1, 0xc4, 0xd1, 0xaf, 0xd5, 0x8e, 0x5d, 0xc4, 0x9a, 0xf2, 0xac,
    +	0xd5, 0x27, 0x66, 0xe3, 0xbc, 0x44, 0x1e, 0x63, 0xbf, 0x30, 0xc7, 0x41, 0x6f, 0xc2, 0xb8, 0x4f,
    +	0x0d, 0x3a, 0xf0, 0x17, 0xca, 0x1c, 0xb1, 0x56, 0x18, 0x91, 0x5b, 0x35, 0x66, 0x24, 0xe6, 0xb8,
    +	0xf8, 0x8d, 0x25, 0x9a, 0xfe, 0xf7, 0x12, 0x20, 0xa5, 0xbb, 0xe2, 0x3a, 0x1d, 0x8b, 0x5a, 0xae,
    +	0x83, 0x5e, 0x80, 0x31, 0x7a, 0xd0, 0x27, 0x7c, 0x72, 0x26, 0x1b, 0x4f, 0x04, 0x0e, 0xb5, 0x0f,
    +	0xfa, 0xe4, 0xc1, 0xe1, 0xf2, 0xe5, 0xb4, 0x05, 0x93, 0x60, 0x6e, 0x83, 0xd6, 0x94, 0xab, 0x25,
    +	0x6e, 0xfd, 0x4c, 0xbc, 0xeb, 0x07, 0x87, 0xcb, 0x19, 0x41, 0x58, 0x53, 0x48, 0x71, 0x07, 0xd1,
    +	0x3e, 0x20, 0xdb, 0xf0, 0x69, 0xdb, 0x33, 0x1c, 0x5f, 0xf4, 0x64, 0xf5, 0x88, 0x9c, 0x84, 0xa7,
    +	0x8a, 0x2d, 0x1a, 0xb3, 0x68, 0x2c, 0x4a, 0x2f, 0xd0, 0x5a, 0x0a, 0x0d, 0x67, 0xf4, 0x80, 0x9e,
    +	0x80, 0x71, 0x8f, 0x18, 0xbe, 0xeb, 0x2c, 0x8c, 0xf1, 0x51, 0xa8, 0x09, 0xc4, 0xbc, 0x15, 0x4b,
    +	0x29, 0x7a, 0x12, 0x26, 0x7a, 0xc4, 0xf7, 0x8d, 0x2e, 0x59, 0xa8, 0x70, 0xc5, 0x59, 0xa9, 0x38,
    +	0xb1, 0x2e, 0x9a, 0x71, 0x20, 0xd7, 0x3f, 0xd4, 0x60, 0x5a, 0xcd, 0xdc, 0x9a, 0xe5, 0x53, 0xf4,
    +	0xed, 0x54, 0x1c, 0xd6, 0x8a, 0x0d, 0x89, 0x59, 0xf3, 0x28, 0xbc, 0x20, 0x7b, 0xab, 0x06, 0x2d,
    +	0x91, 0x18, 0x5c, 0x87, 0x8a, 0x45, 0x49, 0x8f, 0xad, 0x43, 0xf9, 0xea, 0xd4, 0xcd, 0xab, 0x45,
    +	0x43, 0xa6, 0x31, 0x2d, 0x41, 0x2b, 0xab, 0xcc, 0x1c, 0x0b, 0x14, 0xfd, 0xe7, 0x63, 0x11, 0xf7,
    +	0x59, 0x68, 0xa2, 0x77, 0xa0, 0xea, 0x13, 0x9b, 0x98, 0xd4, 0xf5, 0xa4, 0xfb, 0x4f, 0x17, 0x74,
    +	0xdf, 0xd8, 0x26, 0x76, 0x4b, 0x9a, 0x36, 0xce, 0x33, 0xff, 0x83, 0x5f, 0x58, 0x41, 0xa2, 0x37,
    +	0xa0, 0x4a, 0x49, 0xaf, 0x6f, 0x1b, 0x94, 0xc8, 0x7d, 0xf4, 0xff, 0xd1, 0x21, 0xb0, 0xc8, 0x61,
    +	0x60, 0x4d, 0xb7, 0xd3, 0x96, 0x6a, 0x7c, 0xfb, 0xa8, 0x29, 0x09, 0x5a, 0xb1, 0x82, 0x41, 0xfb,
    +	0x30, 0x33, 0xe8, 0x77, 0x98, 0x26, 0x65, 0xd9, 0xa1, 0x7b, 0x20, 0x23, 0xe9, 0x56, 0xd1, 0xb9,
    +	0xd9, 0x8a, 0x59, 0x37, 0x2e, 0xcb, 0xbe, 0x66, 0xe2, 0xed, 0x38, 0xd1, 0x0b, 0xba, 0x0d, 0xb3,
    +	0x3d, 0xcb, 0xc1, 0xc4, 0xe8, 0x1c, 0xb4, 0x88, 0xe9, 0x3a, 0x1d, 0x9f, 0x87, 0x55, 0xa5, 0x31,
    +	0x2f, 0x01, 0x66, 0xd7, 0xe3, 0x62, 0x9c, 0xd4, 0x47, 0xaf, 0x01, 0x0a, 0x86, 0xf1, 0xaa, 0x48,
    +	0x6e, 0x96, 0xeb, 0xf0, 0x98, 0x2b, 0x87, 0xc1, 0xdd, 0x4e, 0x69, 0xe0, 0x0c, 0x2b, 0xb4, 0x06,
    +	0x73, 0x1e, 0xd9, 0xb7, 0xd8, 0x18, 0xef, 0x59, 0x3e, 0x75, 0xbd, 0x83, 0x35, 0xab, 0x67, 0xd1,
    +	0x85, 0x71, 0xee, 0xd3, 0xc2, 0xd1, 0xe1, 0xf2, 0x1c, 0xce, 0x90, 0xe3, 0x4c, 0x2b, 0xfd, 0x83,
    +	0x71, 0x98, 0x4d, 0xe4, 0x1b, 0xf4, 0x26, 0x5c, 0x36, 0x07, 0x9e, 0x47, 0x1c, 0xba, 0x31, 0xe8,
    +	0x6d, 0x13, 0xaf, 0x65, 0xee, 0x92, 0xce, 0xc0, 0x26, 0x1d, 0x1e, 0x28, 0x95, 0xc6, 0x92, 0xf4,
    +	0xf8, 0xf2, 0x4a, 0xa6, 0x16, 0xce, 0xb1, 0x66, 0xb3, 0xe0, 0xf0, 0xa6, 0x75, 0xcb, 0xf7, 0x15,
    +	0x66, 0x89, 0x63, 0xaa, 0x59, 0xd8, 0x48, 0x69, 0xe0, 0x0c, 0x2b, 0xe6, 0x63, 0x87, 0xf8, 0x96,
    +	0x47, 0x3a, 0x49, 0x1f, 0xcb, 0x71, 0x1f, 0xef, 0x64, 0x6a, 0xe1, 0x1c, 0x6b, 0xf4, 0x2c, 0x4c,
    +	0x89, 0xde, 0xf8, 0xfa, 0xc9, 0x85, 0xbe, 0x24, 0xc1, 0xa6, 0x36, 0x42, 0x11, 0x8e, 0xea, 0xb1,
    +	0xa1, 0xb9, 0xdb, 0x3e, 0xf1, 0xf6, 0x49, 0x27, 0x7f, 0x81, 0x37, 0x53, 0x1a, 0x38, 0xc3, 0x8a,
    +	0x0d, 0x4d, 0x44, 0x60, 0x6a, 0x68, 0xe3, 0xf1, 0xa1, 0x6d, 0x65, 0x6a, 0xe1, 0x1c, 0x6b, 0x16,
    +	0xc7, 0xc2, 0xe5, 0xdb, 0xfb, 0x86, 0x65, 0x1b, 0xdb, 0x36, 0x59, 0x98, 0x88, 0xc7, 0xf1, 0x46,
    +	0x5c, 0x8c, 0x93, 0xfa, 0xe8, 0x55, 0xb8, 0x28, 0x9a, 0xb6, 0x1c, 0x43, 0x81, 0x54, 0x39, 0xc8,
    +	0xa3, 0x12, 0xe4, 0xe2, 0x46, 0x52, 0x01, 0xa7, 0x6d, 0xd0, 0x0b, 0x30, 0x63, 0xba, 0xb6, 0xcd,
    +	0xe3, 0x71, 0xc5, 0x1d, 0x38, 0x74, 0x61, 0x92, 0xa3, 0x20, 0xb6, 0x1f, 0x57, 0x62, 0x12, 0x9c,
    +	0xd0, 0x44, 0x04, 0xc0, 0x0c, 0x0a, 0x8e, 0xbf, 0x00, 0x3c, 0x3f, 0xde, 0x28, 0x9a, 0x03, 0x54,
    +	0xa9, 0x0a, 0x39, 0x80, 0x6a, 0xf2, 0x71, 0x04, 0x58, 0xff, 0xb3, 0x06, 0xf3, 0x39, 0xa9, 0x03,
    +	0xbd, 0x1c, 0x2b, 0xb1, 0x5f, 0x4b, 0x94, 0xd8, 0x2b, 0x39, 0x66, 0x91, 0x3a, 0xeb, 0xc0, 0xb4,
    +	0xc7, 0x46, 0xe5, 0x74, 0x85, 0x8a, 0xcc, 0x91, 0xcf, 0x9e, 0x30, 0x0c, 0x1c, 0xb5, 0x09, 0x73,
    +	0xfe, 0xc5, 0xa3, 0xc3, 0xe5, 0xe9, 0x98, 0x0c, 0xc7, 0xe1, 0xf5, 0x5f, 0x94, 0x00, 0xee, 0x90,
    +	0xbe, 0xed, 0x1e, 0xf4, 0x88, 0x73, 0x16, 0x1c, 0x6a, 0x33, 0xc6, 0xa1, 0xae, 0x9f, 0xb4, 0x3c,
    +	0xca, 0xb5, 0x5c, 0x12, 0xf5, 0x56, 0x82, 0x44, 0xd5, 0x8b, 0x43, 0x1e, 0xcf, 0xa2, 0xfe, 0x5a,
    +	0x86, 0x4b, 0xa1, 0x72, 0x48, 0xa3, 0x5e, 0x8c, 0xad, 0xf1, 0x57, 0x13, 0x6b, 0x3c, 0x9f, 0x61,
    +	0xf2, 0xd0, 0x78, 0xd4, 0xbb, 0x30, 0xc3, 0x58, 0x8e, 0x58, 0x4b, 0xce, 0xa1, 0xc6, 0x87, 0xe6,
    +	0x50, 0xaa, 0xda, 0xad, 0xc5, 0x90, 0x70, 0x02, 0x39, 0x87, 0xb3, 0x4d, 0x7c, 0x19, 0x39, 0xdb,
    +	0x47, 0x1a, 0xcc, 0x84, 0xcb, 0x74, 0x06, 0xa4, 0x6d, 0x23, 0x4e, 0xda, 0x9e, 0x2c, 0x1c, 0xa2,
    +	0x39, 0xac, 0xed, 0x9f, 0x8c, 0xe0, 0x2b, 0x25, 0xb6, 0xc1, 0xb7, 0x0d, 0x73, 0x0f, 0x3d, 0x0e,
    +	0x63, 0x8e, 0xd1, 0x0b, 0x22, 0x53, 0x6d, 0x96, 0x0d, 0xa3, 0x47, 0x30, 0x97, 0xa0, 0xf7, 0x35,
    +	0x40, 0xb2, 0x0a, 0xdc, 0x76, 0x1c, 0x97, 0x1a, 0x22, 0x57, 0x0a, 0xb7, 0x56, 0x0b, 0xbb, 0x15,
    +	0xf4, 0x58, 0xdb, 0x4a, 0x61, 0xdd, 0x75, 0xa8, 0x77, 0x10, 0x2e, 0x72, 0x5a, 0x01, 0x67, 0x38,
    +	0x80, 0x0c, 0x00, 0x4f, 0x62, 0xb6, 0x5d, 0xb9, 0x91, 0xaf, 0x17, 0xc8, 0x79, 0xcc, 0x60, 0xc5,
    +	0x75, 0x76, 0xac, 0x6e, 0x98, 0x76, 0xb0, 0x02, 0xc2, 0x11, 0xd0, 0xc5, 0xbb, 0x30, 0x9f, 0xe3,
    +	0x2d, 0xba, 0x00, 0xe5, 0x3d, 0x72, 0x20, 0xa6, 0x0d, 0xb3, 0x3f, 0xd1, 0x1c, 0x54, 0xf6, 0x0d,
    +	0x7b, 0x20, 0xd2, 0xef, 0x24, 0x16, 0x3f, 0x5e, 0x28, 0x3d, 0xa7, 0xe9, 0x1f, 0x56, 0xa2, 0xb1,
    +	0xc3, 0x19, 0xf3, 0x55, 0xa8, 0x7a, 0xa4, 0x6f, 0x5b, 0xa6, 0xe1, 0x4b, 0x22, 0xc4, 0xc9, 0x2f,
    +	0x96, 0x6d, 0x58, 0x49, 0x63, 0xdc, 0xba, 0xf4, 0x70, 0xb9, 0x75, 0xf9, 0x74, 0xb8, 0xf5, 0x77,
    +	0xa1, 0xea, 0x07, 0xac, 0x7a, 0x8c, 0x43, 0xde, 0x18, 0x22, 0xbf, 0x4a, 0x42, 0xad, 0x3a, 0x50,
    +	0x54, 0x5a, 0x81, 0x66, 0x91, 0xe8, 0xca, 0x90, 0x24, 0xfa, 0x54, 0x89, 0x2f, 0xcb, 0x37, 0x7d,
    +	0x63, 0xe0, 0x93, 0x0e, 0xcf, 0x6d, 0xd5, 0x30, 0xdf, 0x34, 0x79, 0x2b, 0x96, 0x52, 0xf4, 0x4e,
    +	0x2c, 0x64, 0xab, 0xa3, 0x84, 0xec, 0x4c, 0x7e, 0xb8, 0xa2, 0x2d, 0x98, 0xef, 0x7b, 0x6e, 0xd7,
    +	0x23, 0xbe, 0x7f, 0x87, 0x18, 0x1d, 0xdb, 0x72, 0x48, 0x30, 0x3f, 0x82, 0x11, 0x5d, 0x39, 0x3a,
    +	0x5c, 0x9e, 0x6f, 0x66, 0xab, 0xe0, 0x3c, 0x5b, 0xfd, 0x67, 0x15, 0xb8, 0x90, 0xac, 0x80, 0x39,
    +	0x24, 0x55, 0x1b, 0x89, 0xa4, 0x5e, 0x8b, 0x6c, 0x06, 0xc1, 0xe0, 0xd5, 0xea, 0x67, 0x6c, 0x88,
    +	0xdb, 0x30, 0x2b, 0xb3, 0x41, 0x20, 0x94, 0x34, 0x5d, 0xad, 0xfe, 0x56, 0x5c, 0x8c, 0x93, 0xfa,
    +	0xe8, 0x45, 0x98, 0xf6, 0x38, 0xef, 0x0e, 0x00, 0x04, 0x77, 0x7d, 0x44, 0x02, 0x4c, 0xe3, 0xa8,
    +	0x10, 0xc7, 0x75, 0x19, 0x6f, 0x0d, 0xe9, 0x68, 0x00, 0x30, 0x16, 0xe7, 0xad, 0xb7, 0x93, 0x0a,
    +	0x38, 0x6d, 0x83, 0xd6, 0xe1, 0xd2, 0xc0, 0x49, 0x43, 0x89, 0x50, 0xbe, 0x22, 0xa1, 0x2e, 0x6d,
    +	0xa5, 0x55, 0x70, 0x96, 0x1d, 0x5a, 0x85, 0x4b, 0x94, 0x78, 0x3d, 0xcb, 0x31, 0xa8, 0xe5, 0x74,
    +	0x15, 0x9c, 0x58, 0xf9, 0x79, 0x06, 0xd5, 0x4e, 0x8b, 0x71, 0x96, 0x0d, 0xda, 0x89, 0xb1, 0xe2,
    +	0x71, 0x9e, 0xe9, 0x6f, 0x16, 0xde, 0xc3, 0x85, 0x69, 0x71, 0x06, 0x73, 0xaf, 0x16, 0x65, 0xee,
    +	0xfa, 0x1f, 0xb4, 0x68, 0x3d, 0x53, 0x6c, 0xfa, 0xa4, 0x0b, 0xab, 0x94, 0x45, 0x84, 0x68, 0xb9,
    +	0xd9, 0x44, 0xfa, 0xd6, 0x50, 0x44, 0x3a, 0xac, 0xc3, 0x27, 0x33, 0xe9, 0x3f, 0x6a, 0x30, 0x7b,
    +	0xaf, 0xdd, 0x6e, 0xae, 0x3a, 0x7c, 0xe3, 0x35, 0x0d, 0xba, 0xcb, 0x0a, 0x72, 0xdf, 0xa0, 0xbb,
    +	0xc9, 0x82, 0xcc, 0x64, 0x98, 0x4b, 0xd0, 0x33, 0x50, 0x65, 0xff, 0x32, 0xc7, 0x79, 0xe4, 0x4f,
    +	0xf2, 0x7c, 0x55, 0x6d, 0xca, 0xb6, 0x07, 0x91, 0xbf, 0xb1, 0xd2, 0x44, 0xdf, 0x82, 0x09, 0x96,
    +	0x26, 0x88, 0xd3, 0x29, 0xc8, 0xa3, 0xa5, 0x53, 0x0d, 0x61, 0x14, 0x52, 0x23, 0xd9, 0x80, 0x03,
    +	0x38, 0x7d, 0x0f, 0xe6, 0x22, 0x83, 0xc0, 0x03, 0x9b, 0xbc, 0xc9, 0x4a, 0x1f, 0x6a, 0x41, 0x85,
    +	0xf5, 0xce, 0x0a, 0x5c, 0xb9, 0xc0, 0x4d, 0x65, 0x62, 0x22, 0x42, 0x1a, 0xc3, 0x7e, 0xf9, 0x58,
    +	0x60, 0xe9, 0x9b, 0x30, 0xb1, 0xda, 0x6c, 0xd8, 0xae, 0xa0, 0x2e, 0xa6, 0xd5, 0xf1, 0x92, 0x33,
    +	0xb5, 0xb2, 0x7a, 0x07, 0x63, 0x2e, 0x41, 0x3a, 0x8c, 0x93, 0xfb, 0x26, 0xe9, 0x53, 0xce, 0x56,
    +	0x26, 0x1b, 0xc0, 0x72, 0xf2, 0x5d, 0xde, 0x82, 0xa5, 0x44, 0xff, 0x49, 0x09, 0x26, 0x64, 0xb7,
    +	0x67, 0x70, 0x94, 0x59, 0x8b, 0x1d, 0x65, 0x9e, 0x2a, 0xb6, 0x04, 0xb9, 0xe7, 0x98, 0x76, 0xe2,
    +	0x1c, 0x73, 0xad, 0x20, 0xde, 0xf1, 0x87, 0x98, 0xf7, 0x4a, 0x30, 0x13, 0x5f, 0x7c, 0xf4, 0x2c,
    +	0x4c, 0xb1, 0xac, 0x6d, 0x99, 0x64, 0x23, 0x24, 0x8b, 0xea, 0x26, 0xa3, 0x15, 0x8a, 0x70, 0x54,
    +	0x0f, 0x75, 0x95, 0x59, 0xd3, 0xf5, 0xa8, 0x1c, 0x74, 0xfe, 0x94, 0x0e, 0xa8, 0x65, 0xd7, 0xc4,
    +	0xbd, 0x7d, 0x6d, 0xd5, 0xa1, 0x9b, 0x5e, 0x8b, 0x7a, 0x96, 0xd3, 0x4d, 0x75, 0xc4, 0xc0, 0x70,
    +	0x14, 0x19, 0xbd, 0xc5, 0x2a, 0x88, 0xef, 0x0e, 0x3c, 0x93, 0x64, 0x31, 0xc1, 0x80, 0xc5, 0xb0,
    +	0x8d, 0xd0, 0x59, 0x73, 0x4d, 0xc3, 0x16, 0x8b, 0x83, 0xc9, 0x0e, 0xf1, 0x88, 0x63, 0x92, 0x80,
    +	0x7d, 0x09, 0x08, 0xac, 0xc0, 0xf4, 0xdf, 0x6a, 0x30, 0x25, 0xe7, 0xe2, 0x0c, 0x38, 0xff, 0xeb,
    +	0x71, 0xce, 0xff, 0x44, 0xc1, 0x1d, 0x9a, 0x4d, 0xf8, 0x7f, 0xa7, 0xc1, 0x62, 0xe0, 0xba, 0x6b,
    +	0x74, 0x1a, 0x86, 0x6d, 0x38, 0x26, 0xf1, 0x82, 0x58, 0x5f, 0x84, 0x92, 0xd5, 0x97, 0x2b, 0x09,
    +	0x12, 0xa0, 0xb4, 0xda, 0xc4, 0x25, 0xab, 0xcf, 0x0a, 0xf2, 0xae, 0xeb, 0x53, 0x7e, 0x30, 0x10,
    +	0x67, 0x4e, 0xe5, 0xf5, 0x3d, 0xd9, 0x8e, 0x95, 0x06, 0xda, 0x82, 0x4a, 0xdf, 0xf5, 0x28, 0x2b,
    +	0x82, 0xe5, 0xc4, 0xfa, 0x1e, 0xe3, 0x35, 0x5b, 0x37, 0x19, 0x88, 0xe1, 0x4e, 0x67, 0x30, 0x58,
    +	0xa0, 0xe9, 0x3f, 0xd4, 0xe0, 0xd1, 0x0c, 0xff, 0x25, 0xff, 0xe8, 0xc0, 0x84, 0x25, 0x84, 0x32,
    +	0xbd, 0x3c, 0x5f, 0xac, 0xdb, 0x8c, 0xa9, 0x08, 0x53, 0x5b, 0x90, 0xc2, 0x02, 0x68, 0xfd, 0x57,
    +	0x1a, 0x5c, 0x4c, 0xf9, 0xcb, 0x53, 0x34, 0x8b, 0x67, 0x49, 0xdc, 0x55, 0x8a, 0x66, 0x61, 0xc9,
    +	0x25, 0xe8, 0x75, 0xa8, 0xf2, 0xe7, 0x26, 0xd3, 0xb5, 0xe5, 0x04, 0xd6, 0x83, 0x09, 0x6c, 0xca,
    +	0xf6, 0x07, 0x87, 0xcb, 0x57, 0x32, 0x8e, 0xed, 0x81, 0x18, 0x2b, 0x00, 0xb4, 0x0c, 0x15, 0xe2,
    +	0x79, 0xae, 0x27, 0x93, 0xfd, 0x24, 0x9b, 0xa9, 0xbb, 0xac, 0x01, 0x8b, 0x76, 0xfd, 0xd7, 0x61,
    +	0x90, 0xb2, 0xec, 0xcb, 0xfc, 0x63, 0x8b, 0x93, 0x4c, 0x8c, 0x6c, 0xe9, 0x30, 0x97, 0xa0, 0x01,
    +	0x5c, 0xb0, 0x12, 0xe9, 0x5a, 0xee, 0xce, 0x7a, 0xb1, 0x69, 0x54, 0x66, 0x8d, 0x05, 0x09, 0x7f,
    +	0x21, 0x29, 0xc1, 0xa9, 0x2e, 0x74, 0x02, 0x29, 0x2d, 0xf4, 0x06, 0x8c, 0xed, 0x52, 0xda, 0xcf,
    +	0x78, 0x37, 0x38, 0xa1, 0x48, 0x84, 0x2e, 0x54, 0xf9, 0xe8, 0xda, 0xed, 0x26, 0xe6, 0x50, 0xfa,
    +	0xef, 0x4b, 0x6a, 0x3e, 0xf8, 0x61, 0xeb, 0x9b, 0x6a, 0xb4, 0x2b, 0xb6, 0xe1, 0xfb, 0x3c, 0x85,
    +	0x89, 0x8b, 0x81, 0xb9, 0x88, 0xe3, 0x4a, 0x86, 0x53, 0xda, 0xa8, 0x1d, 0x16, 0x4f, 0x6d, 0x94,
    +	0xe2, 0x39, 0x95, 0x55, 0x38, 0xd1, 0x3d, 0x28, 0x53, 0xbb, 0xe8, 0x01, 0x5f, 0x22, 0xb6, 0xd7,
    +	0x5a, 0x8d, 0x29, 0x39, 0xe5, 0xe5, 0xf6, 0x5a, 0x0b, 0x33, 0x08, 0xb4, 0x09, 0x15, 0x6f, 0x60,
    +	0x13, 0x56, 0x07, 0xca, 0xc5, 0xeb, 0x0a, 0x9b, 0xc1, 0x70, 0xf3, 0xb1, 0x5f, 0x3e, 0x16, 0x38,
    +	0xfa, 0x8f, 0x34, 0x98, 0x8e, 0x55, 0x0b, 0xe4, 0xc1, 0x79, 0x3b, 0xb2, 0x77, 0xe4, 0x3c, 0x3c,
    +	0x37, 0xfc, 0xae, 0x93, 0x9b, 0x7e, 0x4e, 0xf6, 0x7b, 0x3e, 0x2a, 0xc3, 0xb1, 0x3e, 0x74, 0x03,
    +	0x20, 0x1c, 0x36, 0xdb, 0x07, 0x2c, 0x78, 0xc5, 0x86, 0x97, 0xfb, 0x80, 0xc5, 0xb4, 0x8f, 0x45,
    +	0x3b, 0xba, 0x09, 0xe0, 0x13, 0xd3, 0x23, 0x74, 0x23, 0x4c, 0x5c, 0xaa, 0x1c, 0xb7, 0x94, 0x04,
    +	0x47, 0xb4, 0xf4, 0x3f, 0x69, 0x30, 0xbd, 0x41, 0xe8, 0xf7, 0x5d, 0x6f, 0xaf, 0xe9, 0xda, 0x96,
    +	0x79, 0x70, 0x06, 0x24, 0x00, 0xc7, 0x48, 0xc0, 0x49, 0xf9, 0x32, 0xe6, 0x5d, 0x1e, 0x15, 0xd0,
    +	0x3f, 0xd2, 0x60, 0x3e, 0xa6, 0x79, 0x37, 0xcc, 0x07, 0x2a, 0x41, 0x6b, 0x85, 0x12, 0x74, 0x0c,
    +	0x86, 0x25, 0xb5, 0xec, 0x04, 0x8d, 0xd6, 0xa0, 0x44, 0x5d, 0x19, 0xbd, 0xc3, 0x61, 0x12, 0xe2,
    +	0x85, 0x35, 0xa7, 0xed, 0xe2, 0x12, 0x75, 0xd9, 0x42, 0x2c, 0xc4, 0xb4, 0xa2, 0x19, 0xed, 0x21,
    +	0x8d, 0x00, 0xc3, 0xd8, 0x8e, 0xe7, 0xf6, 0x46, 0x1e, 0x83, 0x5a, 0x88, 0x57, 0x3c, 0xb7, 0x87,
    +	0x39, 0x96, 0xfe, 0xb1, 0x06, 0x17, 0x63, 0x9a, 0x67, 0xc0, 0x1b, 0xde, 0x88, 0xf3, 0x86, 0x6b,
    +	0xc3, 0x0c, 0x24, 0x87, 0x3d, 0x7c, 0x5c, 0x4a, 0x0c, 0x83, 0x0d, 0x18, 0xed, 0xc0, 0x54, 0xdf,
    +	0xed, 0xb4, 0x4e, 0xe1, 0xad, 0x77, 0x96, 0xf1, 0xb9, 0x66, 0x88, 0x85, 0xa3, 0xc0, 0xe8, 0x3e,
    +	0x5c, 0x64, 0xd4, 0xc2, 0xef, 0x1b, 0x26, 0x69, 0x9d, 0xc2, 0xed, 0xd7, 0x23, 0xfc, 0x31, 0x29,
    +	0x89, 0x88, 0xd3, 0x9d, 0xa0, 0x75, 0x98, 0xb0, 0xfa, 0xfc, 0x7c, 0x21, 0x89, 0xe4, 0x89, 0x24,
    +	0x4c, 0x9c, 0x46, 0x44, 0x8a, 0x97, 0x3f, 0x70, 0x80, 0xa1, 0xff, 0x25, 0x19, 0x0d, 0x9c, 0xae,
    +	0xbe, 0x1a, 0xa1, 0x07, 0xf2, 0xd9, 0x67, 0x34, 0x6a, 0xb0, 0x21, 0x99, 0xc8, 0xa8, 0xcc, 0xba,
    +	0x9a, 0xe0, 0x2d, 0x5f, 0x81, 0x09, 0xe2, 0x74, 0x38, 0x59, 0x17, 0x77, 0x2a, 0x7c, 0x54, 0x77,
    +	0x45, 0x13, 0x0e, 0x64, 0xfa, 0x8f, 0xcb, 0x89, 0x51, 0xf1, 0x32, 0xfb, 0xee, 0xa9, 0x05, 0x87,
    +	0x22, 0xfc, 0xb9, 0x01, 0xb2, 0x1d, 0xd2, 0x3f, 0x11, 0xf3, 0xdf, 0x18, 0x26, 0xe6, 0xa3, 0xf5,
    +	0x2f, 0x97, 0xfc, 0xa1, 0xef, 0xc0, 0x38, 0x11, 0x5d, 0x88, 0xaa, 0x7a, 0x6b, 0x98, 0x2e, 0xc2,
    +	0xf4, 0x1b, 0x9e, 0xb3, 0x64, 0x9b, 0x44, 0x45, 0x2f, 0xb3, 0xf9, 0x62, 0xba, 0xec, 0x58, 0x22,
    +	0xd8, 0xf3, 0x64, 0xe3, 0x31, 0x31, 0x6c, 0xd5, 0xfc, 0xe0, 0x70, 0x19, 0xc2, 0x9f, 0x38, 0x6a,
    +	0xc1, 0x1f, 0xe2, 0xe4, 0x9d, 0xcd, 0xd9, 0x7c, 0xcc, 0x34, 0xdc, 0x43, 0x5c, 0xe8, 0xda, 0xa9,
    +	0x3d, 0xc4, 0x45, 0x20, 0x8f, 0x3f, 0xc3, 0xfe, 0xa3, 0x04, 0x97, 0x42, 0xe5, 0xc2, 0x0f, 0x71,
    +	0x19, 0x26, 0xff, 0xfb, 0xa0, 0xa9, 0xd8, 0xe3, 0x58, 0x38, 0x75, 0xff, 0x79, 0x8f, 0x63, 0xa1,
    +	0x6f, 0x39, 0xd5, 0xee, 0x37, 0xa5, 0xe8, 0x00, 0x86, 0x7c, 0xa1, 0x39, 0x85, 0x6f, 0x7a, 0xbe,
    +	0x74, 0x8f, 0x3c, 0xfa, 0x07, 0x63, 0x70, 0x21, 0xb9, 0x1b, 0x63, 0x17, 0xf9, 0xda, 0x89, 0x17,
    +	0xf9, 0x4d, 0x98, 0xdb, 0x19, 0xd8, 0xf6, 0x01, 0x1f, 0x43, 0xe4, 0x36, 0x5f, 0x3c, 0x01, 0xfc,
    +	0x9f, 0xb4, 0x9c, 0x7b, 0x25, 0x43, 0x07, 0x67, 0x5a, 0xa6, 0xef, 0xf5, 0xc7, 0xfe, 0xdd, 0x7b,
    +	0xfd, 0xca, 0x08, 0xf7, 0xfa, 0x39, 0x17, 0xf1, 0x13, 0x23, 0x5c, 0xc4, 0x67, 0xbf, 0xb2, 0x94,
    +	0x47, 0x7a, 0x65, 0x19, 0xe5, 0x52, 0x3f, 0x23, 0x1f, 0x9e, 0xf8, 0xad, 0xcb, 0x4b, 0x30, 0x13,
    +	0x7f, 0xb3, 0x12, 0x61, 0x21, 0x9e, 0xcd, 0xe4, 0x0b, 0x51, 0x24, 0x2c, 0x44, 0x3b, 0x56, 0x1a,
    +	0xfa, 0x91, 0x06, 0x97, 0xb3, 0xbf, 0x4d, 0x41, 0x36, 0xcc, 0xf4, 0x8c, 0xfb, 0xd1, 0xef, 0x85,
    +	0xb4, 0x11, 0x89, 0x0f, 0x7f, 0x61, 0x58, 0x8f, 0x61, 0xe1, 0x04, 0x36, 0x7a, 0x1b, 0xaa, 0x3d,
    +	0xe3, 0x7e, 0x6b, 0xe0, 0x75, 0xc9, 0xc8, 0x04, 0x8b, 0xef, 0xc8, 0x75, 0x89, 0x82, 0x15, 0x9e,
    +	0xfe, 0x85, 0x06, 0xf3, 0x39, 0xef, 0x06, 0xff, 0x45, 0xa3, 0x7c, 0xaf, 0x04, 0x95, 0x96, 0x69,
    +	0xd8, 0xe4, 0x0c, 0xb8, 0xc9, 0x6b, 0x31, 0x6e, 0x72, 0xd2, 0x37, 0xae, 0xdc, 0xab, 0x5c, 0x5a,
    +	0x82, 0x13, 0xb4, 0xe4, 0xa9, 0x42, 0x68, 0xc7, 0x33, 0x92, 0xe7, 0x61, 0x52, 0x75, 0x3a, 0x5c,
    +	0xa2, 0xd4, 0x7f, 0x59, 0x82, 0xa9, 0x48, 0x17, 0x43, 0xa6, 0xd9, 0x9d, 0x58, 0x6d, 0x29, 0x17,
    +	0xb8, 0xb4, 0x89, 0xf4, 0x55, 0x0b, 0xaa, 0x89, 0xf8, 0x46, 0x23, 0x7c, 0x95, 0x4f, 0x17, 0x99,
    +	0x97, 0x60, 0x86, 0x1a, 0x5e, 0x97, 0x50, 0x75, 0x02, 0x10, 0xf7, 0x95, 0xea, 0x63, 0xa1, 0x76,
    +	0x4c, 0x8a, 0x13, 0xda, 0x8b, 0x2f, 0xc2, 0x74, 0xac, 0xb3, 0x61, 0x3e, 0xb1, 0x68, 0xac, 0x7c,
    +	0xf2, 0xf9, 0xd2, 0xb9, 0x4f, 0x3f, 0x5f, 0x3a, 0xf7, 0xd9, 0xe7, 0x4b, 0xe7, 0x7e, 0x70, 0xb4,
    +	0xa4, 0x7d, 0x72, 0xb4, 0xa4, 0x7d, 0x7a, 0xb4, 0xa4, 0x7d, 0x76, 0xb4, 0xa4, 0xfd, 0xed, 0x68,
    +	0x49, 0xfb, 0xe9, 0x17, 0x4b, 0xe7, 0xde, 0x7e, 0xec, 0xd8, 0xff, 0x71, 0xf1, 0xaf, 0x00, 0x00,
    +	0x00, 0xff, 0xff, 0x6a, 0x79, 0xb9, 0xab, 0x91, 0x31, 0x00, 0x00,
     }
     
     func (m *DaemonSet) Marshal() (dAtA []byte, err error) {
    @@ -2208,6 +2210,11 @@ func (m *DeploymentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	_ = i
     	var l int
     	_ = l
    +	if m.TerminatingReplicas != nil {
    +		i = encodeVarintGenerated(dAtA, i, uint64(*m.TerminatingReplicas))
    +		i--
    +		dAtA[i] = 0x48
    +	}
     	if m.CollisionCount != nil {
     		i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount))
     		i--
    @@ -3486,6 +3493,11 @@ func (m *ReplicaSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	_ = i
     	var l int
     	_ = l
    +	if m.TerminatingReplicas != nil {
    +		i = encodeVarintGenerated(dAtA, i, uint64(*m.TerminatingReplicas))
    +		i--
    +		dAtA[i] = 0x38
    +	}
     	if len(m.Conditions) > 0 {
     		for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
     			{
    @@ -4024,6 +4036,9 @@ func (m *DeploymentStatus) Size() (n int) {
     	if m.CollisionCount != nil {
     		n += 1 + sovGenerated(uint64(*m.CollisionCount))
     	}
    +	if m.TerminatingReplicas != nil {
    +		n += 1 + sovGenerated(uint64(*m.TerminatingReplicas))
    +	}
     	return n
     }
     
    @@ -4502,6 +4517,9 @@ func (m *ReplicaSetStatus) Size() (n int) {
     			n += 1 + l + sovGenerated(uint64(l))
     		}
     	}
    +	if m.TerminatingReplicas != nil {
    +		n += 1 + sovGenerated(uint64(*m.TerminatingReplicas))
    +	}
     	return n
     }
     
    @@ -4793,6 +4811,7 @@ func (this *DeploymentStatus) String() string {
     		`Conditions:` + repeatedStringForConditions + `,`,
     		`ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`,
     		`CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`,
    +		`TerminatingReplicas:` + valueToStringGenerated(this.TerminatingReplicas) + `,`,
     		`}`,
     	}, "")
     	return s
    @@ -5182,6 +5201,7 @@ func (this *ReplicaSetStatus) String() string {
     		`ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`,
     		`AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`,
     		`Conditions:` + repeatedStringForConditions + `,`,
    +		`TerminatingReplicas:` + valueToStringGenerated(this.TerminatingReplicas) + `,`,
     		`}`,
     	}, "")
     	return s
    @@ -7567,6 +7587,26 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error {
     				}
     			}
     			m.CollisionCount = &v
    +		case 9:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field TerminatingReplicas", wireType)
    +			}
    +			var v int32
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				v |= int32(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			m.TerminatingReplicas = &v
     		default:
     			iNdEx = preIndex
     			skippy, err := skipGenerated(dAtA[iNdEx:])
    @@ -11162,6 +11202,26 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error {
     				return err
     			}
     			iNdEx = postIndex
    +		case 7:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field TerminatingReplicas", wireType)
    +			}
    +			var v int32
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				v |= int32(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			m.TerminatingReplicas = &v
     		default:
     			iNdEx = preIndex
     			skippy, err := skipGenerated(dAtA[iNdEx:])
    diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.proto b/vendor/k8s.io/api/extensions/v1beta1/generated.proto
    index 9bbcaa0e2..fed0b4835 100644
    --- a/vendor/k8s.io/api/extensions/v1beta1/generated.proto
    +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.proto
    @@ -320,19 +320,19 @@ message DeploymentStatus {
       // +optional
       optional int64 observedGeneration = 1;
     
    -  // Total number of non-terminated pods targeted by this deployment (their labels match the selector).
    +  // Total number of non-terminating pods targeted by this deployment (their labels match the selector).
       // +optional
       optional int32 replicas = 2;
     
    -  // Total number of non-terminated pods targeted by this deployment that have the desired template spec.
    +  // Total number of non-terminating pods targeted by this deployment that have the desired template spec.
       // +optional
       optional int32 updatedReplicas = 3;
     
    -  // Total number of ready pods targeted by this deployment.
    +  // Total number of non-terminating pods targeted by this Deployment with a Ready Condition.
       // +optional
       optional int32 readyReplicas = 7;
     
    -  // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.
    +  // Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment.
       // +optional
       optional int32 availableReplicas = 4;
     
    @@ -342,6 +342,13 @@ message DeploymentStatus {
       // +optional
       optional int32 unavailableReplicas = 5;
     
    +  // Total number of terminating pods targeted by this deployment. Terminating pods have a non-null
    +  // .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.
    +  //
    +  // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
    +  // +optional
    +  optional int32 terminatingReplicas = 9;
    +
       // Represents the latest available observations of a deployment's current state.
       // +patchMergeKey=type
       // +patchStrategy=merge
    @@ -863,16 +870,16 @@ message ReplicaSetList {
       optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // List of ReplicaSets.
    -  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
    +  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset
       repeated ReplicaSet items = 2;
     }
     
     // ReplicaSetSpec is the specification of a ReplicaSet.
     message ReplicaSetSpec {
    -  // Replicas is the number of desired replicas.
    +  // Replicas is the number of desired pods.
       // This is a pointer to distinguish between explicit zero and unspecified.
       // Defaults to 1.
    -  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
    +  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset
       // +optional
       optional int32 replicas = 1;
     
    @@ -891,29 +898,36 @@ message ReplicaSetSpec {
     
       // Template is the object that describes the pod that will be created if
       // insufficient replicas are detected.
    -  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
    +  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/#pod-template
       // +optional
       optional .k8s.io.api.core.v1.PodTemplateSpec template = 3;
     }
     
     // ReplicaSetStatus represents the current status of a ReplicaSet.
     message ReplicaSetStatus {
    -  // Replicas is the most recently observed number of replicas.
    -  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
    +  // Replicas is the most recently observed number of non-terminating pods.
    +  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset
       optional int32 replicas = 1;
     
    -  // The number of pods that have labels matching the labels of the pod template of the replicaset.
    +  // The number of non-terminating pods that have labels matching the labels of the pod template of the replicaset.
       // +optional
       optional int32 fullyLabeledReplicas = 2;
     
    -  // The number of ready replicas for this replica set.
    +  // The number of non-terminating pods targeted by this ReplicaSet with a Ready Condition.
       // +optional
       optional int32 readyReplicas = 4;
     
    -  // The number of available replicas (ready for at least minReadySeconds) for this replica set.
    +  // The number of available non-terminating pods (ready for at least minReadySeconds) for this replica set.
       // +optional
       optional int32 availableReplicas = 5;
     
    +  // The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp
    +  // and have not yet reached the Failed or Succeeded .status.phase.
    +  //
    +  // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
    +  // +optional
    +  optional int32 terminatingReplicas = 7;
    +
       // ObservedGeneration reflects the generation of the most recently observed ReplicaSet.
       // +optional
       optional int64 observedGeneration = 3;
    @@ -966,7 +980,7 @@ message RollingUpdateDaemonSet {
       // pod is available (Ready for at least minReadySeconds) the old DaemonSet pod
       // on that node is marked deleted. If the old pod becomes unavailable for any
       // reason (Ready transitions to false, is evicted, or is drained) an updated
    -  // pod is immediatedly created on that node without considering surge limits.
    +  // pod is immediately created on that node without considering surge limits.
       // Allowing surge implies the possibility that the resources consumed by the
       // daemonset on any given node can double if the readiness check fails, and
       // so resource intensive daemonsets should take into account that they may
    @@ -1025,6 +1039,9 @@ message Scale {
     message ScaleSpec {
       // desired number of instances for the scaled object.
       // +optional
    +  // +k8s:optional
    +  // +default=0
    +  // +k8s:minimum=0
       optional int32 replicas = 1;
     }
     
    diff --git a/vendor/k8s.io/api/extensions/v1beta1/types.go b/vendor/k8s.io/api/extensions/v1beta1/types.go
    index 09f58692f..c7b50e059 100644
    --- a/vendor/k8s.io/api/extensions/v1beta1/types.go
    +++ b/vendor/k8s.io/api/extensions/v1beta1/types.go
    @@ -27,6 +27,9 @@ import (
     type ScaleSpec struct {
     	// desired number of instances for the scaled object.
     	// +optional
    +	// +k8s:optional
    +	// +default=0
    +	// +k8s:minimum=0
     	Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
     }
     
    @@ -54,6 +57,7 @@ type ScaleStatus struct {
     // +k8s:prerelease-lifecycle-gen:introduced=1.1
     // +k8s:prerelease-lifecycle-gen:deprecated=1.2
     // +k8s:prerelease-lifecycle-gen:removed=1.16
    +// +k8s:isSubresource=/scale
     
     // represents a scaling request for a resource.
     type Scale struct {
    @@ -245,19 +249,19 @@ type DeploymentStatus struct {
     	// +optional
     	ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"`
     
    -	// Total number of non-terminated pods targeted by this deployment (their labels match the selector).
    +	// Total number of non-terminating pods targeted by this deployment (their labels match the selector).
     	// +optional
     	Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"`
     
    -	// Total number of non-terminated pods targeted by this deployment that have the desired template spec.
    +	// Total number of non-terminating pods targeted by this deployment that have the desired template spec.
     	// +optional
     	UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"`
     
    -	// Total number of ready pods targeted by this deployment.
    +	// Total number of non-terminating pods targeted by this Deployment with a Ready Condition.
     	// +optional
     	ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"`
     
    -	// Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.
    +	// Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment.
     	// +optional
     	AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"`
     
    @@ -267,6 +271,13 @@ type DeploymentStatus struct {
     	// +optional
     	UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"`
     
    +	// Total number of terminating pods targeted by this deployment. Terminating pods have a non-null
    +	// .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.
    +	//
    +	// This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
    +	// +optional
    +	TerminatingReplicas *int32 `json:"terminatingReplicas,omitempty" protobuf:"varint,9,opt,name=terminatingReplicas"`
    +
     	// Represents the latest available observations of a deployment's current state.
     	// +patchMergeKey=type
     	// +patchStrategy=merge
    @@ -391,7 +402,7 @@ type RollingUpdateDaemonSet struct {
     	// pod is available (Ready for at least minReadySeconds) the old DaemonSet pod
     	// on that node is marked deleted. If the old pod becomes unavailable for any
     	// reason (Ready transitions to false, is evicted, or is drained) an updated
    -	// pod is immediatedly created on that node without considering surge limits.
    +	// pod is immediately created on that node without considering surge limits.
     	// Allowing surge implies the possibility that the resources consumed by the
     	// daemonset on any given node can double if the readiness check fails, and
     	// so resource intensive daemonsets should take into account that they may
    @@ -941,16 +952,16 @@ type ReplicaSetList struct {
     	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
     
     	// List of ReplicaSets.
    -	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
    +	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset
     	Items []ReplicaSet `json:"items" protobuf:"bytes,2,rep,name=items"`
     }
     
     // ReplicaSetSpec is the specification of a ReplicaSet.
     type ReplicaSetSpec struct {
    -	// Replicas is the number of desired replicas.
    +	// Replicas is the number of desired pods.
     	// This is a pointer to distinguish between explicit zero and unspecified.
     	// Defaults to 1.
    -	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
    +	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset
     	// +optional
     	Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
     
    @@ -969,29 +980,36 @@ type ReplicaSetSpec struct {
     
     	// Template is the object that describes the pod that will be created if
     	// insufficient replicas are detected.
    -	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
    +	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/#pod-template
     	// +optional
     	Template v1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"`
     }
     
     // ReplicaSetStatus represents the current status of a ReplicaSet.
     type ReplicaSetStatus struct {
    -	// Replicas is the most recently observed number of replicas.
    -	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
    +	// Replicas is the most recently observed number of non-terminating pods.
    +	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset
     	Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"`
     
    -	// The number of pods that have labels matching the labels of the pod template of the replicaset.
    +	// The number of non-terminating pods that have labels matching the labels of the pod template of the replicaset.
     	// +optional
     	FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"`
     
    -	// The number of ready replicas for this replica set.
    +	// The number of non-terminating pods targeted by this ReplicaSet with a Ready Condition.
     	// +optional
     	ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"`
     
    -	// The number of available replicas (ready for at least minReadySeconds) for this replica set.
    +	// The number of available non-terminating pods (ready for at least minReadySeconds) for this replica set.
     	// +optional
     	AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"`
     
    +	// The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp
    +	// and have not yet reached the Failed or Succeeded .status.phase.
    +	//
    +	// This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
    +	// +optional
    +	TerminatingReplicas *int32 `json:"terminatingReplicas,omitempty" protobuf:"varint,7,opt,name=terminatingReplicas"`
    +
     	// ObservedGeneration reflects the generation of the most recently observed ReplicaSet.
     	// +optional
     	ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"`
    diff --git a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go
    index 408022c9d..8a158233e 100644
    --- a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go
    +++ b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go
    @@ -169,11 +169,12 @@ func (DeploymentSpec) SwaggerDoc() map[string]string {
     var map_DeploymentStatus = map[string]string{
     	"":                    "DeploymentStatus is the most recently observed status of the Deployment.",
     	"observedGeneration":  "The generation observed by the deployment controller.",
    -	"replicas":            "Total number of non-terminated pods targeted by this deployment (their labels match the selector).",
    -	"updatedReplicas":     "Total number of non-terminated pods targeted by this deployment that have the desired template spec.",
    -	"readyReplicas":       "Total number of ready pods targeted by this deployment.",
    -	"availableReplicas":   "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.",
    +	"replicas":            "Total number of non-terminating pods targeted by this deployment (their labels match the selector).",
    +	"updatedReplicas":     "Total number of non-terminating pods targeted by this deployment that have the desired template spec.",
    +	"readyReplicas":       "Total number of non-terminating pods targeted by this Deployment with a Ready Condition.",
    +	"availableReplicas":   "Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment.",
     	"unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.",
    +	"terminatingReplicas": "Total number of terminating pods targeted by this deployment. Terminating pods have a non-null .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.\n\nThis is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.",
     	"conditions":          "Represents the latest available observations of a deployment's current state.",
     	"collisionCount":      "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.",
     }
    @@ -435,7 +436,7 @@ func (ReplicaSetCondition) SwaggerDoc() map[string]string {
     var map_ReplicaSetList = map[string]string{
     	"":         "ReplicaSetList is a collection of ReplicaSets.",
     	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
    -	"items":    "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller",
    +	"items":    "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset",
     }
     
     func (ReplicaSetList) SwaggerDoc() map[string]string {
    @@ -444,10 +445,10 @@ func (ReplicaSetList) SwaggerDoc() map[string]string {
     
     var map_ReplicaSetSpec = map[string]string{
     	"":                "ReplicaSetSpec is the specification of a ReplicaSet.",
    -	"replicas":        "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller",
    +	"replicas":        "Replicas is the number of desired pods. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset",
     	"minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)",
     	"selector":        "Selector is a label query over pods that should match the replica count. If the selector is empty, it is defaulted to the labels present on the pod template. Label keys and values that must match in order to be controlled by this replica set. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
    -	"template":        "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template",
    +	"template":        "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/#pod-template",
     }
     
     func (ReplicaSetSpec) SwaggerDoc() map[string]string {
    @@ -456,10 +457,11 @@ func (ReplicaSetSpec) SwaggerDoc() map[string]string {
     
     var map_ReplicaSetStatus = map[string]string{
     	"":                     "ReplicaSetStatus represents the current status of a ReplicaSet.",
    -	"replicas":             "Replicas is the most recently observed number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller",
    -	"fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replicaset.",
    -	"readyReplicas":        "The number of ready replicas for this replica set.",
    -	"availableReplicas":    "The number of available replicas (ready for at least minReadySeconds) for this replica set.",
    +	"replicas":             "Replicas is the most recently observed number of non-terminating pods. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset",
    +	"fullyLabeledReplicas": "The number of non-terminating pods that have labels matching the labels of the pod template of the replicaset.",
    +	"readyReplicas":        "The number of non-terminating pods targeted by this ReplicaSet with a Ready Condition.",
    +	"availableReplicas":    "The number of available non-terminating pods (ready for at least minReadySeconds) for this replica set.",
    +	"terminatingReplicas":  "The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.\n\nThis is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.",
     	"observedGeneration":   "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.",
     	"conditions":           "Represents the latest available observations of a replica set's current state.",
     }
    @@ -480,7 +482,7 @@ func (RollbackConfig) SwaggerDoc() map[string]string {
     var map_RollingUpdateDaemonSet = map[string]string{
     	"":               "Spec to control the desired behavior of daemon set rolling update.",
     	"maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.",
    -	"maxSurge":       "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption. This is an alpha field and requires enabling DaemonSetUpdateSurge feature gate.",
    +	"maxSurge":       "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediately created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption. This is an alpha field and requires enabling DaemonSetUpdateSurge feature gate.",
     }
     
     func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string {
    diff --git a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go
    index 6b474ae48..2c7a8524e 100644
    --- a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go
    +++ b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go
    @@ -341,6 +341,11 @@ func (in *DeploymentSpec) DeepCopy() *DeploymentSpec {
     // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
     func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) {
     	*out = *in
    +	if in.TerminatingReplicas != nil {
    +		in, out := &in.TerminatingReplicas, &out.TerminatingReplicas
    +		*out = new(int32)
    +		**out = **in
    +	}
     	if in.Conditions != nil {
     		in, out := &in.Conditions, &out.Conditions
     		*out = make([]DeploymentCondition, len(*in))
    @@ -1045,6 +1050,11 @@ func (in *ReplicaSetSpec) DeepCopy() *ReplicaSetSpec {
     // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
     func (in *ReplicaSetStatus) DeepCopyInto(out *ReplicaSetStatus) {
     	*out = *in
    +	if in.TerminatingReplicas != nil {
    +		in, out := &in.TerminatingReplicas, &out.TerminatingReplicas
    +		*out = new(int32)
    +		**out = **in
    +	}
     	if in.Conditions != nil {
     		in, out := &in.Conditions, &out.Conditions
     		*out = make([]ReplicaSetCondition, len(*in))
    diff --git a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.validations.go b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.validations.go
    new file mode 100644
    index 000000000..6d2a1666a
    --- /dev/null
    +++ b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.validations.go
    @@ -0,0 +1,78 @@
    +//go:build !ignore_autogenerated
    +// +build !ignore_autogenerated
    +
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by validation-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	context "context"
    +	fmt "fmt"
    +
    +	operation "k8s.io/apimachinery/pkg/api/operation"
    +	safe "k8s.io/apimachinery/pkg/api/safe"
    +	validate "k8s.io/apimachinery/pkg/api/validate"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	field "k8s.io/apimachinery/pkg/util/validation/field"
    +)
    +
    +func init() { localSchemeBuilder.Register(RegisterValidations) }
    +
    +// RegisterValidations adds validation functions to the given scheme.
    +// Public to allow building arbitrary schemes.
    +func RegisterValidations(scheme *runtime.Scheme) error {
    +	scheme.AddValidationFunc((*Scale)(nil), func(ctx context.Context, op operation.Operation, obj, oldObj interface{}) field.ErrorList {
    +		switch op.Request.SubresourcePath() {
    +		case "/scale":
    +			return Validate_Scale(ctx, op, nil /* fldPath */, obj.(*Scale), safe.Cast[*Scale](oldObj))
    +		}
    +		return field.ErrorList{field.InternalError(nil, fmt.Errorf("no validation found for %T, subresource: %v", obj, op.Request.SubresourcePath()))}
    +	})
    +	return nil
    +}
    +
    +func Validate_Scale(ctx context.Context, op operation.Operation, fldPath *field.Path, obj, oldObj *Scale) (errs field.ErrorList) {
    +	// field Scale.TypeMeta has no validation
    +	// field Scale.ObjectMeta has no validation
    +
    +	// field Scale.Spec
    +	errs = append(errs,
    +		func(fldPath *field.Path, obj, oldObj *ScaleSpec) (errs field.ErrorList) {
    +			errs = append(errs, Validate_ScaleSpec(ctx, op, fldPath, obj, oldObj)...)
    +			return
    +		}(fldPath.Child("spec"), &obj.Spec, safe.Field(oldObj, func(oldObj *Scale) *ScaleSpec { return &oldObj.Spec }))...)
    +
    +	// field Scale.Status has no validation
    +	return errs
    +}
    +
    +func Validate_ScaleSpec(ctx context.Context, op operation.Operation, fldPath *field.Path, obj, oldObj *ScaleSpec) (errs field.ErrorList) {
    +	// field ScaleSpec.Replicas
    +	errs = append(errs,
    +		func(fldPath *field.Path, obj, oldObj *int32) (errs field.ErrorList) {
    +			// optional value-type fields with zero-value defaults are purely documentation
    +			if op.Type == operation.Update && (obj == oldObj || (obj != nil && oldObj != nil && *obj == *oldObj)) {
    +				return nil // no changes
    +			}
    +			errs = append(errs, validate.Minimum(ctx, op, fldPath, obj, oldObj, 0)...)
    +			return
    +		}(fldPath.Child("replicas"), &obj.Replicas, safe.Field(oldObj, func(oldObj *ScaleSpec) *int32 { return &oldObj.Replicas }))...)
    +
    +	return errs
    +}
    diff --git a/vendor/k8s.io/api/flowcontrol/v1/doc.go b/vendor/k8s.io/api/flowcontrol/v1/doc.go
    index c9e7db158..ad5f45791 100644
    --- a/vendor/k8s.io/api/flowcontrol/v1/doc.go
    +++ b/vendor/k8s.io/api/flowcontrol/v1/doc.go
    @@ -22,4 +22,4 @@ limitations under the License.
     // +groupName=flowcontrol.apiserver.k8s.io
     
     // Package v1 holds api types of version v1 for group "flowcontrol.apiserver.k8s.io".
    -package v1 // import "k8s.io/api/flowcontrol/v1"
    +package v1
    diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/doc.go b/vendor/k8s.io/api/flowcontrol/v1beta1/doc.go
    index 50897b7eb..20268c1f2 100644
    --- a/vendor/k8s.io/api/flowcontrol/v1beta1/doc.go
    +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/doc.go
    @@ -22,4 +22,4 @@ limitations under the License.
     // +groupName=flowcontrol.apiserver.k8s.io
     
     // Package v1beta1 holds api types of version v1alpha1 for group "flowcontrol.apiserver.k8s.io".
    -package v1beta1 // import "k8s.io/api/flowcontrol/v1beta1"
    +package v1beta1
    diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/doc.go b/vendor/k8s.io/api/flowcontrol/v1beta2/doc.go
    index 53b460d37..2dcad11ad 100644
    --- a/vendor/k8s.io/api/flowcontrol/v1beta2/doc.go
    +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/doc.go
    @@ -22,4 +22,4 @@ limitations under the License.
     // +groupName=flowcontrol.apiserver.k8s.io
     
     // Package v1beta2 holds api types of version v1alpha1 for group "flowcontrol.apiserver.k8s.io".
    -package v1beta2 // import "k8s.io/api/flowcontrol/v1beta2"
    +package v1beta2
    diff --git a/vendor/k8s.io/api/flowcontrol/v1beta3/doc.go b/vendor/k8s.io/api/flowcontrol/v1beta3/doc.go
    index cd60cfef7..95f4430d3 100644
    --- a/vendor/k8s.io/api/flowcontrol/v1beta3/doc.go
    +++ b/vendor/k8s.io/api/flowcontrol/v1beta3/doc.go
    @@ -22,4 +22,4 @@ limitations under the License.
     // +groupName=flowcontrol.apiserver.k8s.io
     
     // Package v1beta3 holds api types of version v1beta3 for group "flowcontrol.apiserver.k8s.io".
    -package v1beta3 // import "k8s.io/api/flowcontrol/v1beta3"
    +package v1beta3
    diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go
    index 5db6d52d4..f5fbbdbf0 100644
    --- a/vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go
    +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go
    @@ -20,4 +20,4 @@ limitations under the License.
     
     // +groupName=imagepolicy.k8s.io
     
    -package v1alpha1 // import "k8s.io/api/imagepolicy/v1alpha1"
    +package v1alpha1
    diff --git a/vendor/k8s.io/api/networking/v1/doc.go b/vendor/k8s.io/api/networking/v1/doc.go
    index 1d13e7bab..e2093b7df 100644
    --- a/vendor/k8s.io/api/networking/v1/doc.go
    +++ b/vendor/k8s.io/api/networking/v1/doc.go
    @@ -20,4 +20,4 @@ limitations under the License.
     // +k8s:prerelease-lifecycle-gen=true
     // +groupName=networking.k8s.io
     
    -package v1 // import "k8s.io/api/networking/v1"
    +package v1
    diff --git a/vendor/k8s.io/api/networking/v1/generated.pb.go b/vendor/k8s.io/api/networking/v1/generated.pb.go
    index 7c023e690..062382b63 100644
    --- a/vendor/k8s.io/api/networking/v1/generated.pb.go
    +++ b/vendor/k8s.io/api/networking/v1/generated.pb.go
    @@ -104,10 +104,94 @@ func (m *HTTPIngressRuleValue) XXX_DiscardUnknown() {
     
     var xxx_messageInfo_HTTPIngressRuleValue proto.InternalMessageInfo
     
    +func (m *IPAddress) Reset()      { *m = IPAddress{} }
    +func (*IPAddress) ProtoMessage() {}
    +func (*IPAddress) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_2c41434372fec1d7, []int{2}
    +}
    +func (m *IPAddress) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *IPAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *IPAddress) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_IPAddress.Merge(m, src)
    +}
    +func (m *IPAddress) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *IPAddress) XXX_DiscardUnknown() {
    +	xxx_messageInfo_IPAddress.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_IPAddress proto.InternalMessageInfo
    +
    +func (m *IPAddressList) Reset()      { *m = IPAddressList{} }
    +func (*IPAddressList) ProtoMessage() {}
    +func (*IPAddressList) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_2c41434372fec1d7, []int{3}
    +}
    +func (m *IPAddressList) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *IPAddressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *IPAddressList) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_IPAddressList.Merge(m, src)
    +}
    +func (m *IPAddressList) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *IPAddressList) XXX_DiscardUnknown() {
    +	xxx_messageInfo_IPAddressList.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_IPAddressList proto.InternalMessageInfo
    +
    +func (m *IPAddressSpec) Reset()      { *m = IPAddressSpec{} }
    +func (*IPAddressSpec) ProtoMessage() {}
    +func (*IPAddressSpec) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_2c41434372fec1d7, []int{4}
    +}
    +func (m *IPAddressSpec) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *IPAddressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *IPAddressSpec) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_IPAddressSpec.Merge(m, src)
    +}
    +func (m *IPAddressSpec) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *IPAddressSpec) XXX_DiscardUnknown() {
    +	xxx_messageInfo_IPAddressSpec.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_IPAddressSpec proto.InternalMessageInfo
    +
     func (m *IPBlock) Reset()      { *m = IPBlock{} }
     func (*IPBlock) ProtoMessage() {}
     func (*IPBlock) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_2c41434372fec1d7, []int{2}
    +	return fileDescriptor_2c41434372fec1d7, []int{5}
     }
     func (m *IPBlock) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -135,7 +219,7 @@ var xxx_messageInfo_IPBlock proto.InternalMessageInfo
     func (m *Ingress) Reset()      { *m = Ingress{} }
     func (*Ingress) ProtoMessage() {}
     func (*Ingress) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_2c41434372fec1d7, []int{3}
    +	return fileDescriptor_2c41434372fec1d7, []int{6}
     }
     func (m *Ingress) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -163,7 +247,7 @@ var xxx_messageInfo_Ingress proto.InternalMessageInfo
     func (m *IngressBackend) Reset()      { *m = IngressBackend{} }
     func (*IngressBackend) ProtoMessage() {}
     func (*IngressBackend) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_2c41434372fec1d7, []int{4}
    +	return fileDescriptor_2c41434372fec1d7, []int{7}
     }
     func (m *IngressBackend) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -191,7 +275,7 @@ var xxx_messageInfo_IngressBackend proto.InternalMessageInfo
     func (m *IngressClass) Reset()      { *m = IngressClass{} }
     func (*IngressClass) ProtoMessage() {}
     func (*IngressClass) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_2c41434372fec1d7, []int{5}
    +	return fileDescriptor_2c41434372fec1d7, []int{8}
     }
     func (m *IngressClass) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -219,7 +303,7 @@ var xxx_messageInfo_IngressClass proto.InternalMessageInfo
     func (m *IngressClassList) Reset()      { *m = IngressClassList{} }
     func (*IngressClassList) ProtoMessage() {}
     func (*IngressClassList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_2c41434372fec1d7, []int{6}
    +	return fileDescriptor_2c41434372fec1d7, []int{9}
     }
     func (m *IngressClassList) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -247,7 +331,7 @@ var xxx_messageInfo_IngressClassList proto.InternalMessageInfo
     func (m *IngressClassParametersReference) Reset()      { *m = IngressClassParametersReference{} }
     func (*IngressClassParametersReference) ProtoMessage() {}
     func (*IngressClassParametersReference) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_2c41434372fec1d7, []int{7}
    +	return fileDescriptor_2c41434372fec1d7, []int{10}
     }
     func (m *IngressClassParametersReference) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -275,7 +359,7 @@ var xxx_messageInfo_IngressClassParametersReference proto.InternalMessageInfo
     func (m *IngressClassSpec) Reset()      { *m = IngressClassSpec{} }
     func (*IngressClassSpec) ProtoMessage() {}
     func (*IngressClassSpec) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_2c41434372fec1d7, []int{8}
    +	return fileDescriptor_2c41434372fec1d7, []int{11}
     }
     func (m *IngressClassSpec) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -303,7 +387,7 @@ var xxx_messageInfo_IngressClassSpec proto.InternalMessageInfo
     func (m *IngressList) Reset()      { *m = IngressList{} }
     func (*IngressList) ProtoMessage() {}
     func (*IngressList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_2c41434372fec1d7, []int{9}
    +	return fileDescriptor_2c41434372fec1d7, []int{12}
     }
     func (m *IngressList) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -331,7 +415,7 @@ var xxx_messageInfo_IngressList proto.InternalMessageInfo
     func (m *IngressLoadBalancerIngress) Reset()      { *m = IngressLoadBalancerIngress{} }
     func (*IngressLoadBalancerIngress) ProtoMessage() {}
     func (*IngressLoadBalancerIngress) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_2c41434372fec1d7, []int{10}
    +	return fileDescriptor_2c41434372fec1d7, []int{13}
     }
     func (m *IngressLoadBalancerIngress) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -359,7 +443,7 @@ var xxx_messageInfo_IngressLoadBalancerIngress proto.InternalMessageInfo
     func (m *IngressLoadBalancerStatus) Reset()      { *m = IngressLoadBalancerStatus{} }
     func (*IngressLoadBalancerStatus) ProtoMessage() {}
     func (*IngressLoadBalancerStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_2c41434372fec1d7, []int{11}
    +	return fileDescriptor_2c41434372fec1d7, []int{14}
     }
     func (m *IngressLoadBalancerStatus) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -387,7 +471,7 @@ var xxx_messageInfo_IngressLoadBalancerStatus proto.InternalMessageInfo
     func (m *IngressPortStatus) Reset()      { *m = IngressPortStatus{} }
     func (*IngressPortStatus) ProtoMessage() {}
     func (*IngressPortStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_2c41434372fec1d7, []int{12}
    +	return fileDescriptor_2c41434372fec1d7, []int{15}
     }
     func (m *IngressPortStatus) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -415,7 +499,7 @@ var xxx_messageInfo_IngressPortStatus proto.InternalMessageInfo
     func (m *IngressRule) Reset()      { *m = IngressRule{} }
     func (*IngressRule) ProtoMessage() {}
     func (*IngressRule) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_2c41434372fec1d7, []int{13}
    +	return fileDescriptor_2c41434372fec1d7, []int{16}
     }
     func (m *IngressRule) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -443,7 +527,7 @@ var xxx_messageInfo_IngressRule proto.InternalMessageInfo
     func (m *IngressRuleValue) Reset()      { *m = IngressRuleValue{} }
     func (*IngressRuleValue) ProtoMessage() {}
     func (*IngressRuleValue) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_2c41434372fec1d7, []int{14}
    +	return fileDescriptor_2c41434372fec1d7, []int{17}
     }
     func (m *IngressRuleValue) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -471,7 +555,7 @@ var xxx_messageInfo_IngressRuleValue proto.InternalMessageInfo
     func (m *IngressServiceBackend) Reset()      { *m = IngressServiceBackend{} }
     func (*IngressServiceBackend) ProtoMessage() {}
     func (*IngressServiceBackend) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_2c41434372fec1d7, []int{15}
    +	return fileDescriptor_2c41434372fec1d7, []int{18}
     }
     func (m *IngressServiceBackend) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -499,7 +583,7 @@ var xxx_messageInfo_IngressServiceBackend proto.InternalMessageInfo
     func (m *IngressSpec) Reset()      { *m = IngressSpec{} }
     func (*IngressSpec) ProtoMessage() {}
     func (*IngressSpec) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_2c41434372fec1d7, []int{16}
    +	return fileDescriptor_2c41434372fec1d7, []int{19}
     }
     func (m *IngressSpec) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -527,7 +611,7 @@ var xxx_messageInfo_IngressSpec proto.InternalMessageInfo
     func (m *IngressStatus) Reset()      { *m = IngressStatus{} }
     func (*IngressStatus) ProtoMessage() {}
     func (*IngressStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_2c41434372fec1d7, []int{17}
    +	return fileDescriptor_2c41434372fec1d7, []int{20}
     }
     func (m *IngressStatus) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -555,7 +639,7 @@ var xxx_messageInfo_IngressStatus proto.InternalMessageInfo
     func (m *IngressTLS) Reset()      { *m = IngressTLS{} }
     func (*IngressTLS) ProtoMessage() {}
     func (*IngressTLS) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_2c41434372fec1d7, []int{18}
    +	return fileDescriptor_2c41434372fec1d7, []int{21}
     }
     func (m *IngressTLS) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -583,7 +667,7 @@ var xxx_messageInfo_IngressTLS proto.InternalMessageInfo
     func (m *NetworkPolicy) Reset()      { *m = NetworkPolicy{} }
     func (*NetworkPolicy) ProtoMessage() {}
     func (*NetworkPolicy) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_2c41434372fec1d7, []int{19}
    +	return fileDescriptor_2c41434372fec1d7, []int{22}
     }
     func (m *NetworkPolicy) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -611,7 +695,7 @@ var xxx_messageInfo_NetworkPolicy proto.InternalMessageInfo
     func (m *NetworkPolicyEgressRule) Reset()      { *m = NetworkPolicyEgressRule{} }
     func (*NetworkPolicyEgressRule) ProtoMessage() {}
     func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_2c41434372fec1d7, []int{20}
    +	return fileDescriptor_2c41434372fec1d7, []int{23}
     }
     func (m *NetworkPolicyEgressRule) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -639,7 +723,7 @@ var xxx_messageInfo_NetworkPolicyEgressRule proto.InternalMessageInfo
     func (m *NetworkPolicyIngressRule) Reset()      { *m = NetworkPolicyIngressRule{} }
     func (*NetworkPolicyIngressRule) ProtoMessage() {}
     func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_2c41434372fec1d7, []int{21}
    +	return fileDescriptor_2c41434372fec1d7, []int{24}
     }
     func (m *NetworkPolicyIngressRule) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -667,7 +751,7 @@ var xxx_messageInfo_NetworkPolicyIngressRule proto.InternalMessageInfo
     func (m *NetworkPolicyList) Reset()      { *m = NetworkPolicyList{} }
     func (*NetworkPolicyList) ProtoMessage() {}
     func (*NetworkPolicyList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_2c41434372fec1d7, []int{22}
    +	return fileDescriptor_2c41434372fec1d7, []int{25}
     }
     func (m *NetworkPolicyList) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -695,7 +779,7 @@ var xxx_messageInfo_NetworkPolicyList proto.InternalMessageInfo
     func (m *NetworkPolicyPeer) Reset()      { *m = NetworkPolicyPeer{} }
     func (*NetworkPolicyPeer) ProtoMessage() {}
     func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_2c41434372fec1d7, []int{23}
    +	return fileDescriptor_2c41434372fec1d7, []int{26}
     }
     func (m *NetworkPolicyPeer) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -723,7 +807,7 @@ var xxx_messageInfo_NetworkPolicyPeer proto.InternalMessageInfo
     func (m *NetworkPolicyPort) Reset()      { *m = NetworkPolicyPort{} }
     func (*NetworkPolicyPort) ProtoMessage() {}
     func (*NetworkPolicyPort) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_2c41434372fec1d7, []int{24}
    +	return fileDescriptor_2c41434372fec1d7, []int{27}
     }
     func (m *NetworkPolicyPort) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -751,7 +835,7 @@ var xxx_messageInfo_NetworkPolicyPort proto.InternalMessageInfo
     func (m *NetworkPolicySpec) Reset()      { *m = NetworkPolicySpec{} }
     func (*NetworkPolicySpec) ProtoMessage() {}
     func (*NetworkPolicySpec) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_2c41434372fec1d7, []int{25}
    +	return fileDescriptor_2c41434372fec1d7, []int{28}
     }
     func (m *NetworkPolicySpec) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -776,10 +860,38 @@ func (m *NetworkPolicySpec) XXX_DiscardUnknown() {
     
     var xxx_messageInfo_NetworkPolicySpec proto.InternalMessageInfo
     
    +func (m *ParentReference) Reset()      { *m = ParentReference{} }
    +func (*ParentReference) ProtoMessage() {}
    +func (*ParentReference) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_2c41434372fec1d7, []int{29}
    +}
    +func (m *ParentReference) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ParentReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ParentReference) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ParentReference.Merge(m, src)
    +}
    +func (m *ParentReference) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ParentReference) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ParentReference.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ParentReference proto.InternalMessageInfo
    +
     func (m *ServiceBackendPort) Reset()      { *m = ServiceBackendPort{} }
     func (*ServiceBackendPort) ProtoMessage() {}
     func (*ServiceBackendPort) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_2c41434372fec1d7, []int{26}
    +	return fileDescriptor_2c41434372fec1d7, []int{30}
     }
     func (m *ServiceBackendPort) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -804,9 +916,124 @@ func (m *ServiceBackendPort) XXX_DiscardUnknown() {
     
     var xxx_messageInfo_ServiceBackendPort proto.InternalMessageInfo
     
    +func (m *ServiceCIDR) Reset()      { *m = ServiceCIDR{} }
    +func (*ServiceCIDR) ProtoMessage() {}
    +func (*ServiceCIDR) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_2c41434372fec1d7, []int{31}
    +}
    +func (m *ServiceCIDR) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ServiceCIDR) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ServiceCIDR) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ServiceCIDR.Merge(m, src)
    +}
    +func (m *ServiceCIDR) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ServiceCIDR) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ServiceCIDR.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ServiceCIDR proto.InternalMessageInfo
    +
    +func (m *ServiceCIDRList) Reset()      { *m = ServiceCIDRList{} }
    +func (*ServiceCIDRList) ProtoMessage() {}
    +func (*ServiceCIDRList) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_2c41434372fec1d7, []int{32}
    +}
    +func (m *ServiceCIDRList) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ServiceCIDRList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ServiceCIDRList) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ServiceCIDRList.Merge(m, src)
    +}
    +func (m *ServiceCIDRList) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ServiceCIDRList) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ServiceCIDRList.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ServiceCIDRList proto.InternalMessageInfo
    +
    +func (m *ServiceCIDRSpec) Reset()      { *m = ServiceCIDRSpec{} }
    +func (*ServiceCIDRSpec) ProtoMessage() {}
    +func (*ServiceCIDRSpec) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_2c41434372fec1d7, []int{33}
    +}
    +func (m *ServiceCIDRSpec) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ServiceCIDRSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ServiceCIDRSpec) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ServiceCIDRSpec.Merge(m, src)
    +}
    +func (m *ServiceCIDRSpec) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ServiceCIDRSpec) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ServiceCIDRSpec.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ServiceCIDRSpec proto.InternalMessageInfo
    +
    +func (m *ServiceCIDRStatus) Reset()      { *m = ServiceCIDRStatus{} }
    +func (*ServiceCIDRStatus) ProtoMessage() {}
    +func (*ServiceCIDRStatus) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_2c41434372fec1d7, []int{34}
    +}
    +func (m *ServiceCIDRStatus) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ServiceCIDRStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ServiceCIDRStatus) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ServiceCIDRStatus.Merge(m, src)
    +}
    +func (m *ServiceCIDRStatus) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ServiceCIDRStatus) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ServiceCIDRStatus.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ServiceCIDRStatus proto.InternalMessageInfo
    +
     func init() {
     	proto.RegisterType((*HTTPIngressPath)(nil), "k8s.io.api.networking.v1.HTTPIngressPath")
     	proto.RegisterType((*HTTPIngressRuleValue)(nil), "k8s.io.api.networking.v1.HTTPIngressRuleValue")
    +	proto.RegisterType((*IPAddress)(nil), "k8s.io.api.networking.v1.IPAddress")
    +	proto.RegisterType((*IPAddressList)(nil), "k8s.io.api.networking.v1.IPAddressList")
    +	proto.RegisterType((*IPAddressSpec)(nil), "k8s.io.api.networking.v1.IPAddressSpec")
     	proto.RegisterType((*IPBlock)(nil), "k8s.io.api.networking.v1.IPBlock")
     	proto.RegisterType((*Ingress)(nil), "k8s.io.api.networking.v1.Ingress")
     	proto.RegisterType((*IngressBackend)(nil), "k8s.io.api.networking.v1.IngressBackend")
    @@ -831,7 +1058,12 @@ func init() {
     	proto.RegisterType((*NetworkPolicyPeer)(nil), "k8s.io.api.networking.v1.NetworkPolicyPeer")
     	proto.RegisterType((*NetworkPolicyPort)(nil), "k8s.io.api.networking.v1.NetworkPolicyPort")
     	proto.RegisterType((*NetworkPolicySpec)(nil), "k8s.io.api.networking.v1.NetworkPolicySpec")
    +	proto.RegisterType((*ParentReference)(nil), "k8s.io.api.networking.v1.ParentReference")
     	proto.RegisterType((*ServiceBackendPort)(nil), "k8s.io.api.networking.v1.ServiceBackendPort")
    +	proto.RegisterType((*ServiceCIDR)(nil), "k8s.io.api.networking.v1.ServiceCIDR")
    +	proto.RegisterType((*ServiceCIDRList)(nil), "k8s.io.api.networking.v1.ServiceCIDRList")
    +	proto.RegisterType((*ServiceCIDRSpec)(nil), "k8s.io.api.networking.v1.ServiceCIDRSpec")
    +	proto.RegisterType((*ServiceCIDRStatus)(nil), "k8s.io.api.networking.v1.ServiceCIDRStatus")
     }
     
     func init() {
    @@ -839,111 +1071,125 @@ func init() {
     }
     
     var fileDescriptor_2c41434372fec1d7 = []byte{
    -	// 1652 bytes of a gzipped FileDescriptorProto
    -	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4b, 0x6f, 0x1b, 0x55,
    -	0x14, 0xce, 0x38, 0x71, 0xec, 0x1c, 0x27, 0x69, 0x72, 0x69, 0x85, 0x09, 0xc2, 0x0e, 0x23, 0xda,
    -	0x06, 0xda, 0xda, 0x34, 0xad, 0x10, 0x6c, 0x78, 0x4c, 0x9a, 0xa6, 0xa1, 0xa9, 0x63, 0x5d, 0x5b,
    -	0x45, 0x20, 0x1e, 0x9d, 0x8c, 0x6f, 0x9c, 0x69, 0xc6, 0x33, 0xa3, 0x3b, 0xd7, 0xa5, 0x95, 0x10,
    -	0x62, 0xc3, 0x82, 0x1d, 0x7f, 0x01, 0xf1, 0x0b, 0x10, 0x2c, 0x90, 0x10, 0x14, 0x36, 0xa8, 0xcb,
    -	0x4a, 0x6c, 0xba, 0xc1, 0xa2, 0xe6, 0x5f, 0x64, 0x85, 0xee, 0x63, 0x1e, 0x7e, 0xd5, 0xa6, 0xaa,
    -	0xb2, 0x4a, 0xee, 0x39, 0xe7, 0x7e, 0xe7, 0x71, 0xcf, 0x6b, 0x0c, 0x6b, 0x87, 0x6f, 0x06, 0x25,
    -	0xdb, 0x2b, 0x9b, 0xbe, 0x5d, 0x76, 0x09, 0xfb, 0xdc, 0xa3, 0x87, 0xb6, 0xdb, 0x2c, 0xdf, 0xb9,
    -	0x58, 0x6e, 0x12, 0x97, 0x50, 0x93, 0x91, 0x46, 0xc9, 0xa7, 0x1e, 0xf3, 0x50, 0x5e, 0x4a, 0x96,
    -	0x4c, 0xdf, 0x2e, 0xc5, 0x92, 0xa5, 0x3b, 0x17, 0x57, 0x2e, 0x34, 0x6d, 0x76, 0xd0, 0xde, 0x2b,
    -	0x59, 0x5e, 0xab, 0xdc, 0xf4, 0x9a, 0x5e, 0x59, 0x5c, 0xd8, 0x6b, 0xef, 0x8b, 0x93, 0x38, 0x88,
    -	0xff, 0x24, 0xd0, 0x8a, 0x9e, 0x50, 0x69, 0x79, 0x94, 0x0c, 0x51, 0xb6, 0x72, 0x39, 0x96, 0x69,
    -	0x99, 0xd6, 0x81, 0xed, 0x12, 0x7a, 0xaf, 0xec, 0x1f, 0x36, 0x39, 0x21, 0x28, 0xb7, 0x08, 0x33,
    -	0x87, 0xdd, 0x2a, 0x8f, 0xba, 0x45, 0xdb, 0x2e, 0xb3, 0x5b, 0x64, 0xe0, 0xc2, 0x1b, 0xe3, 0x2e,
    -	0x04, 0xd6, 0x01, 0x69, 0x99, 0x03, 0xf7, 0x2e, 0x8d, 0xba, 0xd7, 0x66, 0xb6, 0x53, 0xb6, 0x5d,
    -	0x16, 0x30, 0xda, 0x7f, 0x49, 0xff, 0x4d, 0x83, 0x13, 0xd7, 0xea, 0xf5, 0xea, 0xb6, 0xdb, 0xa4,
    -	0x24, 0x08, 0xaa, 0x26, 0x3b, 0x40, 0xab, 0x30, 0xe3, 0x9b, 0xec, 0x20, 0xaf, 0xad, 0x6a, 0x6b,
    -	0x73, 0xc6, 0xfc, 0x83, 0x4e, 0x71, 0xaa, 0xdb, 0x29, 0xce, 0x70, 0x1e, 0x16, 0x1c, 0x74, 0x19,
    -	0xb2, 0xfc, 0x6f, 0xfd, 0x9e, 0x4f, 0xf2, 0xd3, 0x42, 0x2a, 0xdf, 0xed, 0x14, 0xb3, 0x55, 0x45,
    -	0x3b, 0x4a, 0xfc, 0x8f, 0x23, 0x49, 0x54, 0x83, 0xcc, 0x9e, 0x69, 0x1d, 0x12, 0xb7, 0x91, 0x4f,
    -	0xad, 0x6a, 0x6b, 0xb9, 0xf5, 0xb5, 0xd2, 0xa8, 0xe7, 0x2b, 0x29, 0x7b, 0x0c, 0x29, 0x6f, 0x9c,
    -	0x50, 0x46, 0x64, 0x14, 0x01, 0x87, 0x48, 0xfa, 0x3e, 0x9c, 0x4c, 0xd8, 0x8f, 0xdb, 0x0e, 0xb9,
    -	0x69, 0x3a, 0x6d, 0x82, 0x2a, 0x90, 0xe6, 0x8a, 0x83, 0xbc, 0xb6, 0x3a, 0xbd, 0x96, 0x5b, 0x7f,
    -	0x75, 0xb4, 0xaa, 0x3e, 0xf7, 0x8d, 0x05, 0xa5, 0x2b, 0xcd, 0x4f, 0x01, 0x96, 0x30, 0xfa, 0x2e,
    -	0x64, 0xb6, 0xab, 0x86, 0xe3, 0x59, 0x87, 0x3c, 0x3e, 0x96, 0xdd, 0xa0, 0xfd, 0xf1, 0xd9, 0xd8,
    -	0xbe, 0x82, 0xb1, 0xe0, 0x20, 0x1d, 0x66, 0xc9, 0x5d, 0x8b, 0xf8, 0x2c, 0x9f, 0x5a, 0x9d, 0x5e,
    -	0x9b, 0x33, 0xa0, 0xdb, 0x29, 0xce, 0x6e, 0x0a, 0x0a, 0x56, 0x1c, 0xfd, 0xeb, 0x14, 0x64, 0x94,
    -	0x5a, 0x74, 0x0b, 0xb2, 0x3c, 0x7d, 0x1a, 0x26, 0x33, 0x05, 0x6a, 0x6e, 0xfd, 0xf5, 0x84, 0xbd,
    -	0xd1, 0x6b, 0x96, 0xfc, 0xc3, 0x26, 0x27, 0x04, 0x25, 0x2e, 0xcd, 0x6d, 0xdf, 0xdd, 0xbb, 0x4d,
    -	0x2c, 0x76, 0x83, 0x30, 0xd3, 0x40, 0xca, 0x0e, 0x88, 0x69, 0x38, 0x42, 0x45, 0x5b, 0x30, 0x13,
    -	0xf8, 0xc4, 0x52, 0x81, 0x3f, 0x3d, 0x36, 0xf0, 0x35, 0x9f, 0x58, 0xb1, 0x6b, 0xfc, 0x84, 0x05,
    -	0x00, 0xda, 0x85, 0xd9, 0x80, 0x99, 0xac, 0x1d, 0x88, 0x87, 0xcf, 0xad, 0x9f, 0x1d, 0x0f, 0x25,
    -	0xc4, 0x8d, 0x45, 0x05, 0x36, 0x2b, 0xcf, 0x58, 0xc1, 0xe8, 0x7f, 0x68, 0xb0, 0xd8, 0xfb, 0xda,
    -	0xe8, 0x26, 0x64, 0x02, 0x42, 0xef, 0xd8, 0x16, 0xc9, 0xcf, 0x08, 0x25, 0xe5, 0xf1, 0x4a, 0xa4,
    -	0x7c, 0x98, 0x2f, 0x39, 0x9e, 0x2b, 0x8a, 0x86, 0x43, 0x30, 0xf4, 0x01, 0x64, 0x29, 0x09, 0xbc,
    -	0x36, 0xb5, 0x88, 0xb2, 0xfe, 0x42, 0x12, 0x98, 0xd7, 0x3d, 0x87, 0xe4, 0xc9, 0xda, 0xd8, 0xf1,
    -	0x2c, 0xd3, 0x91, 0xa1, 0xc4, 0x64, 0x9f, 0x50, 0xe2, 0x5a, 0xc4, 0x98, 0xe7, 0x59, 0x8e, 0x15,
    -	0x04, 0x8e, 0xc0, 0x78, 0x15, 0xcd, 0x2b, 0x43, 0x36, 0x1c, 0xf3, 0x58, 0x1e, 0x74, 0xa7, 0xe7,
    -	0x41, 0x5f, 0x1b, 0x1b, 0x20, 0x61, 0xd7, 0xa8, 0x57, 0xd5, 0x7f, 0xd5, 0x60, 0x29, 0x29, 0xb8,
    -	0x63, 0x07, 0x0c, 0x7d, 0x3c, 0xe0, 0x44, 0x69, 0x32, 0x27, 0xf8, 0x6d, 0xe1, 0xc2, 0x92, 0x52,
    -	0x95, 0x0d, 0x29, 0x09, 0x07, 0xae, 0x43, 0xda, 0x66, 0xa4, 0x15, 0x88, 0x12, 0xc9, 0xad, 0x9f,
    -	0x99, 0xcc, 0x83, 0xb8, 0x3a, 0xb7, 0xf9, 0x65, 0x2c, 0x31, 0xf4, 0xbf, 0x35, 0x28, 0x26, 0xc5,
    -	0xaa, 0x26, 0x35, 0x5b, 0x84, 0x11, 0x1a, 0x44, 0x8f, 0x87, 0xd6, 0x20, 0x6b, 0x56, 0xb7, 0xb7,
    -	0xa8, 0xd7, 0xf6, 0xc3, 0xd2, 0xe5, 0xa6, 0xbd, 0xa7, 0x68, 0x38, 0xe2, 0xf2, 0x02, 0x3f, 0xb4,
    -	0x55, 0x97, 0x4a, 0x14, 0xf8, 0x75, 0xdb, 0x6d, 0x60, 0xc1, 0xe1, 0x12, 0xae, 0xd9, 0x0a, 0x9b,
    -	0x5f, 0x24, 0x51, 0x31, 0x5b, 0x04, 0x0b, 0x0e, 0x2a, 0x42, 0x3a, 0xb0, 0x3c, 0x5f, 0x66, 0xf0,
    -	0x9c, 0x31, 0xc7, 0x4d, 0xae, 0x71, 0x02, 0x96, 0x74, 0x74, 0x0e, 0xe6, 0xb8, 0x60, 0xe0, 0x9b,
    -	0x16, 0xc9, 0xa7, 0x85, 0xd0, 0x42, 0xb7, 0x53, 0x9c, 0xab, 0x84, 0x44, 0x1c, 0xf3, 0xf5, 0x1f,
    -	0xfa, 0xde, 0x87, 0x3f, 0x1d, 0x5a, 0x07, 0xb0, 0x3c, 0x97, 0x51, 0xcf, 0x71, 0x48, 0xd8, 0x8d,
    -	0xa2, 0xa4, 0xd9, 0x88, 0x38, 0x38, 0x21, 0x85, 0x6c, 0x00, 0x3f, 0x8a, 0x8d, 0x4a, 0x9e, 0xb7,
    -	0x26, 0x0b, 0xfd, 0x90, 0x98, 0x1a, 0x8b, 0x5c, 0x55, 0x82, 0x91, 0x00, 0xd7, 0x7f, 0xd4, 0x20,
    -	0xa7, 0xee, 0x1f, 0x43, 0x3a, 0x5d, 0xed, 0x4d, 0xa7, 0x97, 0xc7, 0x8f, 0x96, 0xe1, 0x99, 0xf4,
    -	0xb3, 0x06, 0x2b, 0xa1, 0xd5, 0x9e, 0xd9, 0x30, 0x4c, 0xc7, 0x74, 0x2d, 0x42, 0xc3, 0x4e, 0xbd,
    -	0x02, 0x29, 0x3b, 0x4c, 0x1f, 0x50, 0x00, 0xa9, 0xed, 0x2a, 0x4e, 0xd9, 0x3e, 0x3a, 0x0f, 0xd9,
    -	0x03, 0x2f, 0x60, 0x22, 0x31, 0x64, 0xea, 0x44, 0x06, 0x5f, 0x53, 0x74, 0x1c, 0x49, 0xa0, 0x2a,
    -	0xa4, 0x7d, 0x8f, 0xb2, 0x20, 0x3f, 0x23, 0x0c, 0x3e, 0x37, 0xd6, 0xe0, 0xaa, 0x47, 0x99, 0xea,
    -	0xa5, 0xf1, 0x88, 0xe2, 0x08, 0x58, 0x02, 0xe9, 0x5f, 0xc0, 0x0b, 0x43, 0x2c, 0x97, 0x57, 0xd0,
    -	0x67, 0x90, 0xb1, 0x25, 0x53, 0x4d, 0xc4, 0xcb, 0x63, 0x15, 0x0e, 0xf1, 0x3f, 0x1e, 0xc4, 0xe1,
    -	0xc0, 0x0d, 0x51, 0xf5, 0xef, 0x35, 0x58, 0x1e, 0xb0, 0x54, 0xec, 0x12, 0x1e, 0x65, 0x22, 0x62,
    -	0xe9, 0xc4, 0x2e, 0xe1, 0x51, 0x86, 0x05, 0x07, 0x5d, 0x87, 0xac, 0x58, 0x45, 0x2c, 0xcf, 0x51,
    -	0x51, 0x2b, 0x87, 0x51, 0xab, 0x2a, 0xfa, 0x51, 0xa7, 0xf8, 0xe2, 0xe0, 0x7e, 0x56, 0x0a, 0xd9,
    -	0x38, 0x02, 0xe0, 0x55, 0x47, 0x28, 0xf5, 0xa8, 0x2a, 0x4c, 0x51, 0x75, 0x9b, 0x9c, 0x80, 0x25,
    -	0x5d, 0xff, 0x2e, 0x4e, 0x4a, 0xbe, 0x2b, 0x70, 0xfb, 0xf8, 0x8b, 0xf4, 0xcf, 0x72, 0xfe, 0x5e,
    -	0x58, 0x70, 0x90, 0x0f, 0x4b, 0x76, 0xdf, 0x72, 0x31, 0x71, 0xd3, 0x8d, 0x6e, 0x18, 0x79, 0x85,
    -	0xbc, 0xd4, 0xcf, 0xc1, 0x03, 0xe8, 0xfa, 0x2d, 0x18, 0x90, 0xe2, 0xed, 0xfe, 0x80, 0x31, 0x7f,
    -	0x48, 0xe1, 0x8c, 0xde, 0x66, 0x62, 0xed, 0x59, 0xe1, 0x53, 0xbd, 0x5e, 0xc5, 0x02, 0x45, 0xff,
    -	0x46, 0x83, 0x53, 0x43, 0x07, 0x67, 0xd4, 0xd8, 0xb4, 0x91, 0x8d, 0xad, 0xa2, 0x5e, 0x54, 0xc6,
    -	0xe0, 0xfc, 0x68, 0x4b, 0x7a, 0x91, 0xf9, 0x8b, 0x0f, 0x7b, 0x7f, 0xfd, 0xcf, 0x54, 0xf4, 0x22,
    -	0xa2, 0xab, 0xbd, 0x1b, 0xc5, 0x5b, 0x74, 0x1d, 0xae, 0x59, 0xf5, 0xd0, 0x93, 0x89, 0xf8, 0x45,
    -	0x3c, 0x3c, 0x20, 0x8d, 0x1a, 0xb0, 0xd8, 0x20, 0xfb, 0x66, 0xdb, 0x61, 0x4a, 0xb7, 0x8a, 0xda,
    -	0xe4, 0xeb, 0x26, 0xea, 0x76, 0x8a, 0x8b, 0x57, 0x7a, 0x30, 0x70, 0x1f, 0x26, 0xda, 0x80, 0x69,
    -	0xe6, 0x84, 0xed, 0xe6, 0x95, 0xb1, 0xd0, 0xf5, 0x9d, 0x9a, 0x91, 0x53, 0xee, 0x4f, 0xd7, 0x77,
    -	0x6a, 0x98, 0xdf, 0x46, 0xef, 0x43, 0x9a, 0xb6, 0x1d, 0xc2, 0x97, 0xa9, 0xe9, 0x89, 0xf6, 0x32,
    -	0xfe, 0xa6, 0x71, 0xf9, 0xf3, 0x53, 0x80, 0x25, 0x84, 0xfe, 0x25, 0x2c, 0xf4, 0x6c, 0x5c, 0xa8,
    -	0x05, 0xf3, 0x4e, 0xa2, 0x84, 0x55, 0x14, 0x2e, 0xfd, 0xaf, 0xba, 0x57, 0x0d, 0xe7, 0xa4, 0xd2,
    -	0x38, 0x9f, 0xe4, 0xe1, 0x1e, 0x78, 0xdd, 0x04, 0x88, 0x7d, 0xe5, 0x95, 0xc8, 0xcb, 0x47, 0x76,
    -	0x1b, 0x55, 0x89, 0xbc, 0xaa, 0x02, 0x2c, 0xe9, 0x7c, 0x7a, 0x05, 0xc4, 0xa2, 0x84, 0x55, 0xe2,
    -	0x7e, 0x19, 0x4d, 0xaf, 0x5a, 0xc4, 0xc1, 0x09, 0x29, 0xfd, 0x77, 0x0d, 0x16, 0x2a, 0xd2, 0xe4,
    -	0xaa, 0xe7, 0xd8, 0xd6, 0xbd, 0x63, 0x58, 0xb4, 0x6e, 0xf4, 0x2c, 0x5a, 0x4f, 0x68, 0xd3, 0x3d,
    -	0x86, 0x8d, 0xdc, 0xb4, 0x7e, 0xd2, 0xe0, 0xf9, 0x1e, 0xc9, 0xcd, 0xb8, 0x19, 0x45, 0x23, 0x41,
    -	0x1b, 0x37, 0x12, 0x7a, 0x10, 0x44, 0x69, 0x0d, 0x1d, 0x09, 0x68, 0x0b, 0x52, 0xcc, 0x53, 0x39,
    -	0x3a, 0x31, 0x1c, 0x21, 0x34, 0x9e, 0x6d, 0x75, 0x0f, 0xa7, 0x98, 0xa7, 0xff, 0xa2, 0x41, 0xbe,
    -	0x47, 0x2a, 0xd9, 0x44, 0x9f, 0xbd, 0xdd, 0x37, 0x60, 0x66, 0x9f, 0x7a, 0xad, 0xa7, 0xb1, 0x3c,
    -	0x0a, 0xfa, 0x55, 0xea, 0xb5, 0xb0, 0x80, 0xd1, 0xef, 0x6b, 0xb0, 0xdc, 0x23, 0x79, 0x0c, 0x0b,
    -	0xc9, 0x4e, 0xef, 0x42, 0x72, 0x76, 0x42, 0x1f, 0x46, 0xac, 0x25, 0xf7, 0x53, 0x7d, 0x1e, 0x70,
    -	0x5f, 0xd1, 0x3e, 0xe4, 0x7c, 0xaf, 0x51, 0x23, 0x0e, 0xb1, 0x98, 0x37, 0xac, 0xc0, 0x9f, 0xe4,
    -	0x84, 0xb9, 0x47, 0x9c, 0xf0, 0xaa, 0x71, 0xa2, 0xdb, 0x29, 0xe6, 0xaa, 0x31, 0x16, 0x4e, 0x02,
    -	0xa3, 0xbb, 0xb0, 0x1c, 0xed, 0xa2, 0x91, 0xb6, 0xd4, 0xd3, 0x6b, 0x3b, 0xd5, 0xed, 0x14, 0x97,
    -	0x2b, 0xfd, 0x88, 0x78, 0x50, 0x09, 0xba, 0x06, 0x19, 0xdb, 0x17, 0x9f, 0xdd, 0xea, 0x8b, 0xed,
    -	0x49, 0x8b, 0x9d, 0xfc, 0x3e, 0x97, 0x1f, 0x7f, 0xea, 0x80, 0xc3, 0xeb, 0xfa, 0x5f, 0xfd, 0x39,
    -	0xc0, 0x13, 0x0e, 0x6d, 0x25, 0xb6, 0x0f, 0x39, 0xf3, 0xce, 0x3d, 0xdd, 0xe6, 0xd1, 0x3b, 0x16,
    -	0x47, 0x37, 0xa1, 0x36, 0xb3, 0x9d, 0x92, 0xfc, 0x31, 0xa6, 0xb4, 0xed, 0xb2, 0x5d, 0x5a, 0x63,
    -	0xd4, 0x76, 0x9b, 0x72, 0x44, 0x27, 0xd6, 0xa2, 0xd3, 0x90, 0x51, 0x53, 0x53, 0x38, 0x9e, 0x96,
    -	0x5e, 0x6d, 0x4a, 0x12, 0x0e, 0x79, 0xfa, 0x51, 0x7f, 0x5e, 0x88, 0x19, 0x7a, 0xfb, 0x99, 0xe5,
    -	0xc5, 0x73, 0x2a, 0x1b, 0x47, 0xe7, 0xc6, 0x27, 0xf1, 0x62, 0x29, 0x33, 0x7d, 0x7d, 0xc2, 0x4c,
    -	0x4f, 0x4e, 0xb4, 0x91, 0x6b, 0x25, 0xfa, 0x10, 0x66, 0x89, 0x44, 0x97, 0x23, 0xf2, 0xe2, 0x84,
    -	0xe8, 0x71, 0x5b, 0x8d, 0x7f, 0x79, 0x50, 0x34, 0x05, 0x88, 0xde, 0xe1, 0x51, 0xe2, 0xb2, 0xfc,
    -	0x83, 0x5f, 0xee, 0xe1, 0x73, 0xc6, 0x4b, 0xd2, 0xd9, 0x88, 0x7c, 0xc4, 0x3f, 0x70, 0xa2, 0x23,
    -	0x4e, 0xde, 0xd0, 0x3f, 0x05, 0x34, 0xb8, 0xe4, 0x4c, 0xb0, 0x42, 0x9d, 0x81, 0x59, 0xb7, 0xdd,
    -	0xda, 0x23, 0xb2, 0x86, 0xd2, 0xb1, 0x81, 0x15, 0x41, 0xc5, 0x8a, 0x6b, 0xbc, 0xfd, 0xe0, 0x71,
    -	0x61, 0xea, 0xe1, 0xe3, 0xc2, 0xd4, 0xa3, 0xc7, 0x85, 0xa9, 0xaf, 0xba, 0x05, 0xed, 0x41, 0xb7,
    -	0xa0, 0x3d, 0xec, 0x16, 0xb4, 0x47, 0xdd, 0x82, 0xf6, 0x4f, 0xb7, 0xa0, 0x7d, 0xfb, 0x6f, 0x61,
    -	0xea, 0xa3, 0xfc, 0xa8, 0x5f, 0x4b, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x24, 0x03, 0xec, 0x04,
    -	0x48, 0x15, 0x00, 0x00,
    +	// 1884 bytes of a gzipped FileDescriptorProto
    +	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x59, 0xcd, 0x8f, 0x1b, 0x49,
    +	0x15, 0x9f, 0xf6, 0x8c, 0x67, 0xec, 0xe7, 0xf9, 0xc8, 0x14, 0x59, 0x61, 0x06, 0x61, 0x87, 0x5e,
    +	0xb2, 0x3b, 0x4b, 0x76, 0x6d, 0x32, 0x1b, 0x21, 0xb8, 0x00, 0xdb, 0x93, 0x6c, 0xe2, 0xcd, 0xc4,
    +	0xb1, 0xca, 0x56, 0x10, 0x88, 0x8f, 0xed, 0x69, 0xd7, 0x78, 0x7a, 0xa7, 0xdd, 0xd5, 0xaa, 0x2e,
    +	0x87, 0x44, 0x42, 0x88, 0x0b, 0x07, 0x6e, 0xf0, 0x27, 0x20, 0xfe, 0x02, 0x04, 0xd2, 0xae, 0xb4,
    +	0x82, 0x85, 0x0b, 0xca, 0x71, 0x25, 0x2e, 0x7b, 0xc1, 0x22, 0xe6, 0xbf, 0xc8, 0x09, 0xd5, 0x47,
    +	0x7f, 0xd9, 0xee, 0xb1, 0x89, 0x22, 0x9f, 0xc6, 0xfd, 0xde, 0xab, 0xdf, 0x7b, 0xf5, 0xea, 0x7d,
    +	0x55, 0x0d, 0x1c, 0x5e, 0x7c, 0x27, 0x6c, 0xb8, 0xb4, 0x69, 0x07, 0x6e, 0xd3, 0x27, 0xfc, 0x17,
    +	0x94, 0x5d, 0xb8, 0xfe, 0xa0, 0xf9, 0xf8, 0x66, 0x73, 0x40, 0x7c, 0xc2, 0x6c, 0x4e, 0xfa, 0x8d,
    +	0x80, 0x51, 0x4e, 0x51, 0x55, 0x49, 0x36, 0xec, 0xc0, 0x6d, 0x24, 0x92, 0x8d, 0xc7, 0x37, 0x0f,
    +	0xde, 0x19, 0xb8, 0xfc, 0x7c, 0x74, 0xda, 0x70, 0xe8, 0xb0, 0x39, 0xa0, 0x03, 0xda, 0x94, 0x0b,
    +	0x4e, 0x47, 0x67, 0xf2, 0x4b, 0x7e, 0xc8, 0x5f, 0x0a, 0xe8, 0xc0, 0x4c, 0xa9, 0x74, 0x28, 0x23,
    +	0x73, 0x94, 0x1d, 0xdc, 0x4a, 0x64, 0x86, 0xb6, 0x73, 0xee, 0xfa, 0x84, 0x3d, 0x6d, 0x06, 0x17,
    +	0x03, 0x41, 0x08, 0x9b, 0x43, 0xc2, 0xed, 0x79, 0xab, 0x9a, 0x79, 0xab, 0xd8, 0xc8, 0xe7, 0xee,
    +	0x90, 0xcc, 0x2c, 0xf8, 0xf6, 0xa2, 0x05, 0xa1, 0x73, 0x4e, 0x86, 0xf6, 0xcc, 0xba, 0x77, 0xf3,
    +	0xd6, 0x8d, 0xb8, 0xeb, 0x35, 0x5d, 0x9f, 0x87, 0x9c, 0x4d, 0x2f, 0x32, 0xff, 0x66, 0xc0, 0xde,
    +	0xbd, 0x5e, 0xaf, 0xd3, 0xf2, 0x07, 0x8c, 0x84, 0x61, 0xc7, 0xe6, 0xe7, 0xe8, 0x1a, 0x6c, 0x04,
    +	0x36, 0x3f, 0xaf, 0x1a, 0xd7, 0x8c, 0xc3, 0xb2, 0xb5, 0xfd, 0x6c, 0x5c, 0x5f, 0x9b, 0x8c, 0xeb,
    +	0x1b, 0x82, 0x87, 0x25, 0x07, 0xdd, 0x82, 0x92, 0xf8, 0xdb, 0x7b, 0x1a, 0x90, 0xea, 0xba, 0x94,
    +	0xaa, 0x4e, 0xc6, 0xf5, 0x52, 0x47, 0xd3, 0x5e, 0xa4, 0x7e, 0xe3, 0x58, 0x12, 0x75, 0x61, 0xeb,
    +	0xd4, 0x76, 0x2e, 0x88, 0xdf, 0xaf, 0x16, 0xae, 0x19, 0x87, 0x95, 0xa3, 0xc3, 0x46, 0xde, 0xf1,
    +	0x35, 0xb4, 0x3d, 0x96, 0x92, 0xb7, 0xf6, 0xb4, 0x11, 0x5b, 0x9a, 0x80, 0x23, 0x24, 0xf3, 0x0c,
    +	0xae, 0xa6, 0xec, 0xc7, 0x23, 0x8f, 0x3c, 0xb2, 0xbd, 0x11, 0x41, 0x6d, 0x28, 0x0a, 0xc5, 0x61,
    +	0xd5, 0xb8, 0xb6, 0x7e, 0x58, 0x39, 0x7a, 0x2b, 0x5f, 0xd5, 0xd4, 0xf6, 0xad, 0x1d, 0xad, 0xab,
    +	0x28, 0xbe, 0x42, 0xac, 0x60, 0xcc, 0x4f, 0x0c, 0x28, 0xb7, 0x3a, 0xef, 0xf5, 0xfb, 0x42, 0x0e,
    +	0x7d, 0x08, 0x25, 0x71, 0xde, 0x7d, 0x9b, 0xdb, 0xd2, 0x4d, 0x95, 0xa3, 0x6f, 0xa5, 0x14, 0xc4,
    +	0xee, 0x6f, 0x04, 0x17, 0x03, 0x41, 0x08, 0x1b, 0x42, 0x5a, 0x28, 0x7b, 0x78, 0xfa, 0x11, 0x71,
    +	0xf8, 0x03, 0xc2, 0x6d, 0x0b, 0x69, 0x3d, 0x90, 0xd0, 0x70, 0x8c, 0x8a, 0x5a, 0xb0, 0x11, 0x06,
    +	0xc4, 0xd1, 0x9e, 0x7a, 0xf3, 0x12, 0x4f, 0x45, 0x46, 0x75, 0x03, 0xe2, 0x24, 0xa7, 0x25, 0xbe,
    +	0xb0, 0x84, 0x30, 0x3f, 0x36, 0x60, 0x27, 0x96, 0x3a, 0x71, 0x43, 0x8e, 0x7e, 0x32, 0x63, 0x7e,
    +	0x63, 0x39, 0xf3, 0xc5, 0x6a, 0x69, 0xfc, 0x15, 0xad, 0xa7, 0x14, 0x51, 0x52, 0xa6, 0xdf, 0x83,
    +	0xa2, 0xcb, 0xc9, 0x30, 0xac, 0x16, 0xa4, 0xeb, 0x5f, 0x5f, 0xc2, 0xf6, 0xc4, 0xe9, 0x2d, 0xb1,
    +	0x12, 0x2b, 0x00, 0x73, 0x90, 0x32, 0x5c, 0x6c, 0x08, 0x3d, 0x82, 0x72, 0x60, 0x33, 0xe2, 0x73,
    +	0x4c, 0xce, 0xb4, 0xe5, 0x97, 0x9c, 0x6c, 0x27, 0x12, 0x25, 0x8c, 0xf8, 0x0e, 0xb1, 0x76, 0x26,
    +	0xe3, 0x7a, 0x39, 0x26, 0xe2, 0x04, 0xca, 0x7c, 0x08, 0x5b, 0xad, 0x8e, 0xe5, 0x51, 0xe7, 0x42,
    +	0x44, 0xbf, 0xe3, 0xf6, 0xd9, 0x74, 0xf4, 0x1f, 0xb7, 0x6e, 0x63, 0x2c, 0x39, 0xc8, 0x84, 0x4d,
    +	0xf2, 0xc4, 0x21, 0x01, 0x97, 0x1b, 0x2c, 0x5b, 0x30, 0x19, 0xd7, 0x37, 0xef, 0x48, 0x0a, 0xd6,
    +	0x1c, 0xf3, 0x37, 0x05, 0xd8, 0xd2, 0x41, 0xb5, 0x82, 0x60, 0xb9, 0x9b, 0x09, 0x96, 0xeb, 0x0b,
    +	0xd3, 0x2a, 0x2f, 0x54, 0xd0, 0x43, 0xd8, 0x0c, 0xb9, 0xcd, 0x47, 0xa1, 0x4c, 0xeb, 0xcb, 0xe3,
    +	0x4e, 0x43, 0x49, 0x71, 0x6b, 0x57, 0x83, 0x6d, 0xaa, 0x6f, 0xac, 0x61, 0xcc, 0x7f, 0x18, 0xb0,
    +	0x9b, 0xcd, 0x65, 0xf4, 0x08, 0xb6, 0x42, 0xc2, 0x1e, 0xbb, 0x0e, 0xa9, 0x6e, 0x48, 0x25, 0xcd,
    +	0xc5, 0x4a, 0x94, 0x7c, 0x54, 0x0d, 0x2a, 0xa2, 0x12, 0x68, 0x1a, 0x8e, 0xc0, 0xd0, 0x0f, 0xa1,
    +	0xc4, 0x48, 0x48, 0x47, 0xcc, 0x21, 0xda, 0xfa, 0x77, 0xd2, 0xc0, 0xa2, 0xaa, 0x0b, 0x48, 0x51,
    +	0x8a, 0xfa, 0x27, 0xd4, 0xb1, 0x3d, 0xe5, 0xca, 0x24, 0x3c, 0xb6, 0x45, 0x3c, 0x63, 0x0d, 0x81,
    +	0x63, 0x30, 0x51, 0x23, 0xb7, 0xb5, 0x21, 0xc7, 0x9e, 0xbd, 0x92, 0x03, 0x3d, 0xc9, 0x1c, 0xe8,
    +	0x37, 0x17, 0x3a, 0x48, 0xda, 0x95, 0x5b, 0x00, 0xfe, 0x6a, 0xc0, 0x95, 0xb4, 0xe0, 0x0a, 0x6a,
    +	0xc0, 0xfd, 0x6c, 0x0d, 0x78, 0x63, 0xb9, 0x1d, 0xe4, 0x94, 0x81, 0x7f, 0x1b, 0x50, 0x4f, 0x8b,
    +	0x75, 0x6c, 0x66, 0x0f, 0x09, 0x27, 0x2c, 0x8c, 0x0f, 0x0f, 0x1d, 0x42, 0xc9, 0xee, 0xb4, 0xee,
    +	0x32, 0x3a, 0x0a, 0xa2, 0xd4, 0x15, 0xa6, 0xbd, 0xa7, 0x69, 0x38, 0xe6, 0x8a, 0x04, 0xbf, 0x70,
    +	0x75, 0x0f, 0x4a, 0x25, 0xf8, 0x7d, 0xd7, 0xef, 0x63, 0xc9, 0x11, 0x12, 0xbe, 0x3d, 0x8c, 0x5a,
    +	0x5b, 0x2c, 0xd1, 0xb6, 0x87, 0x04, 0x4b, 0x0e, 0xaa, 0x43, 0x31, 0x74, 0x68, 0xa0, 0x22, 0xb8,
    +	0x6c, 0x95, 0x85, 0xc9, 0x5d, 0x41, 0xc0, 0x8a, 0x8e, 0x6e, 0x40, 0x59, 0x08, 0x86, 0x81, 0xed,
    +	0x90, 0x6a, 0x51, 0x0a, 0xc9, 0xea, 0xd3, 0x8e, 0x88, 0x38, 0xe1, 0x9b, 0x7f, 0x9a, 0x3a, 0x1f,
    +	0x59, 0xea, 0x8e, 0x00, 0x1c, 0xea, 0x73, 0x46, 0x3d, 0x8f, 0x44, 0xd5, 0x28, 0x0e, 0x9a, 0xe3,
    +	0x98, 0x83, 0x53, 0x52, 0xc8, 0x05, 0x08, 0x62, 0xdf, 0xe8, 0xe0, 0xf9, 0xee, 0x72, 0xae, 0x9f,
    +	0xe3, 0x53, 0x6b, 0x57, 0xa8, 0x4a, 0x31, 0x52, 0xe0, 0xe6, 0x9f, 0x0d, 0xa8, 0xe8, 0xf5, 0x2b,
    +	0x08, 0xa7, 0xf7, 0xb3, 0xe1, 0xf4, 0xf5, 0xc5, 0x83, 0xc3, 0xfc, 0x48, 0xfa, 0xc4, 0x80, 0x83,
    +	0xc8, 0x6a, 0x6a, 0xf7, 0x2d, 0xdb, 0xb3, 0x7d, 0x87, 0xb0, 0xa8, 0x52, 0x1f, 0x40, 0xc1, 0x8d,
    +	0xc2, 0x07, 0x34, 0x40, 0xa1, 0xd5, 0xc1, 0x05, 0x37, 0x40, 0x6f, 0x43, 0xe9, 0x9c, 0x86, 0x5c,
    +	0x06, 0x86, 0x0a, 0x9d, 0xd8, 0xe0, 0x7b, 0x9a, 0x8e, 0x63, 0x09, 0xd4, 0x81, 0x62, 0x40, 0x19,
    +	0x0f, 0xab, 0x1b, 0xd2, 0xe0, 0x1b, 0x0b, 0x0d, 0xee, 0x50, 0xc6, 0x75, 0x2d, 0x4d, 0x06, 0x10,
    +	0x81, 0x80, 0x15, 0x90, 0xf9, 0x4b, 0xf8, 0xca, 0x1c, 0xcb, 0xd5, 0x12, 0xf4, 0x73, 0xd8, 0x72,
    +	0x15, 0x53, 0xcf, 0x3b, 0xb7, 0x16, 0x2a, 0x9c, 0xb3, 0xff, 0x64, 0xcc, 0x8a, 0xc6, 0xa9, 0x08,
    +	0xd5, 0xfc, 0xa3, 0x01, 0xfb, 0x33, 0x96, 0xca, 0x49, 0x91, 0x32, 0x2e, 0x3d, 0x56, 0x4c, 0x4d,
    +	0x8a, 0x94, 0x71, 0x2c, 0x39, 0xe8, 0x3e, 0x94, 0xe4, 0xa0, 0xe9, 0x50, 0x4f, 0x7b, 0xad, 0x19,
    +	0x79, 0xad, 0xa3, 0xe9, 0x2f, 0xc6, 0xf5, 0xaf, 0xce, 0x4e, 0xdf, 0x8d, 0x88, 0x8d, 0x63, 0x00,
    +	0x91, 0x75, 0x84, 0x31, 0xca, 0x74, 0x62, 0xca, 0xac, 0xbb, 0x23, 0x08, 0x58, 0xd1, 0xcd, 0x3f,
    +	0x24, 0x41, 0x29, 0x26, 0x41, 0x61, 0x9f, 0x38, 0x91, 0xe9, 0x5e, 0x2e, 0xce, 0x0b, 0x4b, 0x0e,
    +	0x0a, 0xe0, 0x8a, 0x3b, 0x35, 0x3a, 0x2e, 0x5d, 0x74, 0xe3, 0x15, 0x56, 0x55, 0x23, 0x5f, 0x99,
    +	0xe6, 0xe0, 0x19, 0x74, 0xf3, 0x43, 0x98, 0x91, 0x12, 0xe5, 0xfe, 0x9c, 0xf3, 0x60, 0x4e, 0xe2,
    +	0xe4, 0xcf, 0xaa, 0x89, 0xf6, 0x92, 0xdc, 0x53, 0xaf, 0xd7, 0xc1, 0x12, 0xc5, 0xfc, 0xad, 0x01,
    +	0xaf, 0xcd, 0x6d, 0x9c, 0x71, 0x61, 0x33, 0x72, 0x0b, 0x5b, 0x5b, 0x9f, 0xa8, 0xf2, 0xc1, 0xdb,
    +	0xf9, 0x96, 0x64, 0x91, 0xc5, 0x89, 0xcf, 0x3b, 0x7f, 0xf3, 0x9f, 0x85, 0xf8, 0x44, 0x64, 0x55,
    +	0xfb, 0x41, 0xec, 0x6f, 0x59, 0x75, 0x84, 0x66, 0x5d, 0x43, 0xaf, 0xa6, 0xfc, 0x17, 0xf3, 0xf0,
    +	0x8c, 0x34, 0xea, 0xc3, 0x6e, 0x9f, 0x9c, 0xd9, 0x23, 0x8f, 0x6b, 0xdd, 0xda, 0x6b, 0xcb, 0x5f,
    +	0x26, 0xd0, 0x64, 0x5c, 0xdf, 0xbd, 0x9d, 0xc1, 0xc0, 0x53, 0x98, 0xe8, 0x18, 0xd6, 0xb9, 0x17,
    +	0x95, 0x9b, 0x6f, 0x2c, 0x84, 0xee, 0x9d, 0x74, 0xad, 0x8a, 0xde, 0xfe, 0x7a, 0xef, 0xa4, 0x8b,
    +	0xc5, 0x6a, 0xf4, 0x01, 0x14, 0xd9, 0xc8, 0x23, 0x62, 0x98, 0x5a, 0x5f, 0x6a, 0x2e, 0x13, 0x67,
    +	0x9a, 0xa4, 0xbf, 0xf8, 0x0a, 0xb1, 0x82, 0x30, 0x7f, 0x05, 0x3b, 0x99, 0x89, 0x0b, 0x0d, 0x61,
    +	0xdb, 0x4b, 0xa5, 0xb0, 0xf6, 0xc2, 0xbb, 0xff, 0x57, 0xde, 0xeb, 0x82, 0x73, 0x55, 0x6b, 0xdc,
    +	0x4e, 0xf3, 0x70, 0x06, 0xde, 0xb4, 0x01, 0x92, 0xbd, 0x8a, 0x4c, 0x14, 0xe9, 0xa3, 0xaa, 0x8d,
    +	0xce, 0x44, 0x91, 0x55, 0x21, 0x56, 0x74, 0xd1, 0xbd, 0x42, 0xe2, 0x30, 0xc2, 0xdb, 0x49, 0xbd,
    +	0x8c, 0xbb, 0x57, 0x37, 0xe6, 0xe0, 0x94, 0x94, 0xf9, 0x77, 0x03, 0x76, 0xda, 0xca, 0xe4, 0x0e,
    +	0xf5, 0x5c, 0xe7, 0xe9, 0x0a, 0x06, 0xad, 0x07, 0x99, 0x41, 0xeb, 0x92, 0x32, 0x9d, 0x31, 0x2c,
    +	0x77, 0xd2, 0xfa, 0x8b, 0x01, 0x5f, 0xce, 0x48, 0xde, 0x49, 0x8a, 0x51, 0xdc, 0x12, 0x8c, 0x45,
    +	0x2d, 0x21, 0x83, 0x20, 0x53, 0x6b, 0x6e, 0x4b, 0x40, 0x77, 0xa1, 0xc0, 0xa9, 0x8e, 0xd1, 0xa5,
    +	0xe1, 0x08, 0x61, 0x49, 0x6f, 0xeb, 0x51, 0x5c, 0xe0, 0xd4, 0xfc, 0xd4, 0x80, 0x6a, 0x46, 0x2a,
    +	0x5d, 0x44, 0x5f, 0xbd, 0xdd, 0x0f, 0x60, 0xe3, 0x8c, 0xd1, 0xe1, 0xcb, 0x58, 0x1e, 0x3b, 0xfd,
    +	0x7d, 0x46, 0x87, 0x58, 0xc2, 0x98, 0x9f, 0x19, 0xb0, 0x9f, 0x91, 0x5c, 0xc1, 0x40, 0x72, 0x92,
    +	0x1d, 0x48, 0xde, 0x5c, 0x72, 0x0f, 0x39, 0x63, 0xc9, 0x67, 0x85, 0xa9, 0x1d, 0x88, 0xbd, 0xa2,
    +	0x33, 0xa8, 0x04, 0xb4, 0xdf, 0x25, 0x1e, 0x71, 0x38, 0x9d, 0x97, 0xe0, 0x97, 0x6d, 0xc2, 0x3e,
    +	0x25, 0x5e, 0xb4, 0xd4, 0xda, 0x9b, 0x8c, 0xeb, 0x95, 0x4e, 0x82, 0x85, 0xd3, 0xc0, 0xe8, 0x09,
    +	0xec, 0xc7, 0xb3, 0x68, 0xac, 0xad, 0xf0, 0xf2, 0xda, 0x5e, 0x9b, 0x8c, 0xeb, 0xfb, 0xed, 0x69,
    +	0x44, 0x3c, 0xab, 0x04, 0xdd, 0x83, 0x2d, 0x37, 0x90, 0xd7, 0x6e, 0x7d, 0x63, 0xbb, 0x6c, 0xb0,
    +	0x53, 0xf7, 0x73, 0x75, 0xf9, 0xd3, 0x1f, 0x38, 0x5a, 0x6e, 0xfe, 0x6b, 0x3a, 0x06, 0x44, 0xc0,
    +	0xa1, 0xbb, 0xa9, 0xe9, 0x43, 0xf5, 0xbc, 0x1b, 0x2f, 0x37, 0x79, 0x64, 0xdb, 0x62, 0x7e, 0x11,
    +	0x1a, 0x71, 0xd7, 0x6b, 0xa8, 0xa7, 0xb6, 0x46, 0xcb, 0xe7, 0x0f, 0x59, 0x97, 0x33, 0xd7, 0x1f,
    +	0xa8, 0x16, 0x9d, 0x1a, 0x8b, 0xae, 0xc3, 0x96, 0xee, 0x9a, 0x72, 0xe3, 0x45, 0xb5, 0xab, 0x3b,
    +	0x8a, 0x84, 0x23, 0x9e, 0xf9, 0x62, 0x3a, 0x2e, 0x64, 0x0f, 0xfd, 0xe8, 0x95, 0xc5, 0xc5, 0x97,
    +	0x74, 0x34, 0xe6, 0xc7, 0xc6, 0x4f, 0x93, 0xc1, 0x52, 0x45, 0xfa, 0xd1, 0x92, 0x91, 0x9e, 0xee,
    +	0x68, 0xb9, 0x63, 0x25, 0xfa, 0x11, 0x6c, 0x12, 0x85, 0xae, 0x5a, 0xe4, 0xcd, 0x25, 0xd1, 0x93,
    +	0xb2, 0x9a, 0xbc, 0x3c, 0x68, 0x9a, 0x06, 0x44, 0xdf, 0x17, 0x5e, 0x12, 0xb2, 0xe2, 0xc2, 0xaf,
    +	0xe6, 0xf0, 0xb2, 0xf5, 0x35, 0xb5, 0xd9, 0x98, 0xfc, 0x42, 0x5c, 0x70, 0xe2, 0x4f, 0x9c, 0x5e,
    +	0x61, 0x7e, 0x6c, 0xc0, 0xde, 0xd4, 0x0b, 0x12, 0x7a, 0x1d, 0x8a, 0x83, 0xd4, 0x15, 0x33, 0xce,
    +	0x66, 0x75, 0xc7, 0x54, 0x3c, 0x71, 0x53, 0x88, 0x1f, 0x22, 0xa6, 0x6e, 0x0a, 0xb3, 0xaf, 0x0b,
    +	0xa8, 0x99, 0xbe, 0x29, 0xaa, 0xc1, 0x76, 0x5f, 0x8b, 0xcf, 0xbd, 0x2d, 0xc6, 0x43, 0xdc, 0x46,
    +	0xde, 0x10, 0x67, 0xfe, 0x0c, 0xd0, 0xec, 0x78, 0xb6, 0xc4, 0xf0, 0xf7, 0x06, 0x6c, 0xfa, 0xa3,
    +	0xe1, 0x29, 0x51, 0xd9, 0x5f, 0x4c, 0x5c, 0xdb, 0x96, 0x54, 0xac, 0xb9, 0xe6, 0xef, 0x0b, 0x50,
    +	0xd1, 0x0a, 0x8e, 0x5b, 0xb7, 0xf1, 0x0a, 0xda, 0xf4, 0xfd, 0x4c, 0x9b, 0x7e, 0x6b, 0xe1, 0x58,
    +	0x2a, 0xcc, 0xca, 0x7d, 0xe4, 0xea, 0x4e, 0x3d, 0x72, 0xdd, 0x58, 0x0e, 0xee, 0xf2, 0x87, 0xae,
    +	0x4f, 0x0d, 0xd8, 0x4b, 0x49, 0xaf, 0xa0, 0x05, 0x7d, 0x90, 0x6d, 0x41, 0xd7, 0x97, 0xda, 0x45,
    +	0x4e, 0x03, 0x3a, 0xca, 0x18, 0x2f, 0xab, 0x4c, 0x1d, 0x8a, 0x8e, 0xdb, 0x67, 0x99, 0x11, 0x4f,
    +	0x30, 0x43, 0xac, 0xe8, 0xe6, 0x13, 0xd8, 0x9f, 0x71, 0x0f, 0x72, 0xe4, 0xab, 0x45, 0xdf, 0xe5,
    +	0x2e, 0xf5, 0xa3, 0x89, 0xa1, 0xb9, 0xdc, 0xa6, 0x8f, 0xa3, 0x75, 0x99, 0x67, 0x0e, 0x0d, 0x85,
    +	0x53, 0xb0, 0xd6, 0xf7, 0x9e, 0x3d, 0xaf, 0xad, 0x7d, 0xfe, 0xbc, 0xb6, 0xf6, 0xc5, 0xf3, 0xda,
    +	0xda, 0xaf, 0x27, 0x35, 0xe3, 0xd9, 0xa4, 0x66, 0x7c, 0x3e, 0xa9, 0x19, 0x5f, 0x4c, 0x6a, 0xc6,
    +	0x7f, 0x26, 0x35, 0xe3, 0x77, 0xff, 0xad, 0xad, 0xfd, 0xb8, 0x9a, 0xf7, 0x5f, 0xa4, 0xff, 0x05,
    +	0x00, 0x00, 0xff, 0xff, 0xb5, 0x6b, 0x8c, 0x52, 0x60, 0x1a, 0x00, 0x00,
     }
     
     func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) {
    @@ -1028,7 +1274,7 @@ func (m *HTTPIngressRuleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	return len(dAtA) - i, nil
     }
     
    -func (m *IPBlock) Marshal() (dAtA []byte, err error) {
    +func (m *IPAddress) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
     	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    @@ -1038,34 +1284,40 @@ func (m *IPBlock) Marshal() (dAtA []byte, err error) {
     	return dAtA[:n], nil
     }
     
    -func (m *IPBlock) MarshalTo(dAtA []byte) (int, error) {
    +func (m *IPAddress) MarshalTo(dAtA []byte) (int, error) {
     	size := m.Size()
     	return m.MarshalToSizedBuffer(dAtA[:size])
     }
     
    -func (m *IPBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +func (m *IPAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	i := len(dAtA)
     	_ = i
     	var l int
     	_ = l
    -	if len(m.Except) > 0 {
    -		for iNdEx := len(m.Except) - 1; iNdEx >= 0; iNdEx-- {
    -			i -= len(m.Except[iNdEx])
    -			copy(dAtA[i:], m.Except[iNdEx])
    -			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Except[iNdEx])))
    -			i--
    -			dAtA[i] = 0x12
    +	{
    +		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
     		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0x12
    +	{
    +		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
     	}
    -	i -= len(m.CIDR)
    -	copy(dAtA[i:], m.CIDR)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR)))
     	i--
     	dAtA[i] = 0xa
     	return len(dAtA) - i, nil
     }
     
    -func (m *Ingress) Marshal() (dAtA []byte, err error) {
    +func (m *IPAddressList) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
     	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    @@ -1075,38 +1327,32 @@ func (m *Ingress) Marshal() (dAtA []byte, err error) {
     	return dAtA[:n], nil
     }
     
    -func (m *Ingress) MarshalTo(dAtA []byte) (int, error) {
    +func (m *IPAddressList) MarshalTo(dAtA []byte) (int, error) {
     	size := m.Size()
     	return m.MarshalToSizedBuffer(dAtA[:size])
     }
     
    -func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +func (m *IPAddressList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	i := len(dAtA)
     	_ = i
     	var l int
     	_ = l
    -	{
    -		size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    -		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
    -	}
    -	i--
    -	dAtA[i] = 0x1a
    -	{
    -		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    +	if len(m.Items) > 0 {
    +		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
     		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
     	}
    -	i--
    -	dAtA[i] = 0x12
     	{
    -		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
    +		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
     		if err != nil {
     			return 0, err
     		}
    @@ -1118,7 +1364,7 @@ func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	return len(dAtA) - i, nil
     }
     
    -func (m *IngressBackend) Marshal() (dAtA []byte, err error) {
    +func (m *IPAddressSpec) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
     	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    @@ -1128,19 +1374,19 @@ func (m *IngressBackend) Marshal() (dAtA []byte, err error) {
     	return dAtA[:n], nil
     }
     
    -func (m *IngressBackend) MarshalTo(dAtA []byte) (int, error) {
    +func (m *IPAddressSpec) MarshalTo(dAtA []byte) (int, error) {
     	size := m.Size()
     	return m.MarshalToSizedBuffer(dAtA[:size])
     }
     
    -func (m *IngressBackend) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +func (m *IPAddressSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	i := len(dAtA)
     	_ = i
     	var l int
     	_ = l
    -	if m.Service != nil {
    +	if m.ParentRef != nil {
     		{
    -			size, err := m.Service.MarshalToSizedBuffer(dAtA[:i])
    +			size, err := m.ParentRef.MarshalToSizedBuffer(dAtA[:i])
     			if err != nil {
     				return 0, err
     			}
    @@ -1148,15 +1394,140 @@ func (m *IngressBackend) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     			i = encodeVarintGenerated(dAtA, i, uint64(size))
     		}
     		i--
    -		dAtA[i] = 0x22
    +		dAtA[i] = 0xa
     	}
    -	if m.Resource != nil {
    -		{
    -			size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i])
    -			if err != nil {
    -				return 0, err
    -			}
    -			i -= size
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *IPBlock) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *IPBlock) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *IPBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.Except) > 0 {
    +		for iNdEx := len(m.Except) - 1; iNdEx >= 0; iNdEx-- {
    +			i -= len(m.Except[iNdEx])
    +			copy(dAtA[i:], m.Except[iNdEx])
    +			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Except[iNdEx])))
    +			i--
    +			dAtA[i] = 0x12
    +		}
    +	}
    +	i -= len(m.CIDR)
    +	copy(dAtA[i:], m.CIDR)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *Ingress) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *Ingress) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	{
    +		size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0x1a
    +	{
    +		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0x12
    +	{
    +		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *IngressBackend) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *IngressBackend) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *IngressBackend) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if m.Service != nil {
    +		{
    +			size, err := m.Service.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x22
    +	}
    +	if m.Resource != nil {
    +		{
    +			size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
     			i = encodeVarintGenerated(dAtA, i, uint64(size))
     		}
     		i--
    @@ -2137,6 +2508,49 @@ func (m *NetworkPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	return len(dAtA) - i, nil
     }
     
    +func (m *ParentReference) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ParentReference) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ParentReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	i -= len(m.Name)
    +	copy(dAtA[i:], m.Name)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
    +	i--
    +	dAtA[i] = 0x22
    +	i -= len(m.Namespace)
    +	copy(dAtA[i:], m.Namespace)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
    +	i--
    +	dAtA[i] = 0x1a
    +	i -= len(m.Resource)
    +	copy(dAtA[i:], m.Resource)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource)))
    +	i--
    +	dAtA[i] = 0x12
    +	i -= len(m.Group)
    +	copy(dAtA[i:], m.Group)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
     func (m *ServiceBackendPort) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
    @@ -2168,72 +2582,284 @@ func (m *ServiceBackendPort) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	return len(dAtA) - i, nil
     }
     
    -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
    -	offset -= sovGenerated(v)
    -	base := offset
    -	for v >= 1<<7 {
    -		dAtA[offset] = uint8(v&0x7f | 0x80)
    -		v >>= 7
    -		offset++
    +func (m *ServiceCIDR) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
     	}
    -	dAtA[offset] = uint8(v)
    -	return base
    +	return dAtA[:n], nil
     }
    -func (m *HTTPIngressPath) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = len(m.Path)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = m.Backend.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	if m.PathType != nil {
    -		l = len(*m.PathType)
    -		n += 1 + l + sovGenerated(uint64(l))
    -	}
    -	return n
    +
    +func (m *ServiceCIDR) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
     }
     
    -func (m *HTTPIngressRuleValue) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    +func (m *ServiceCIDR) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
     	var l int
     	_ = l
    -	if len(m.Paths) > 0 {
    -		for _, e := range m.Paths {
    -			l = e.Size()
    -			n += 1 + l + sovGenerated(uint64(l))
    +	{
    +		size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
     		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
     	}
    -	return n
    -}
    -
    -func (m *IPBlock) Size() (n int) {
    -	if m == nil {
    -		return 0
    +	i--
    +	dAtA[i] = 0x1a
    +	{
    +		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
     	}
    -	var l int
    -	_ = l
    -	l = len(m.CIDR)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	if len(m.Except) > 0 {
    -		for _, s := range m.Except {
    -			l = len(s)
    -			n += 1 + l + sovGenerated(uint64(l))
    +	i--
    +	dAtA[i] = 0x12
    +	{
    +		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
     		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
     	}
    -	return n
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
     }
     
    -func (m *Ingress) Size() (n int) {
    -	if m == nil {
    -		return 0
    +func (m *ServiceCIDRList) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
     	}
    -	var l int
    -	_ = l
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ServiceCIDRList) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ServiceCIDRList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.Items) > 0 {
    +		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
    +		}
    +	}
    +	{
    +		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *ServiceCIDRSpec) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ServiceCIDRSpec) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ServiceCIDRSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.CIDRs) > 0 {
    +		for iNdEx := len(m.CIDRs) - 1; iNdEx >= 0; iNdEx-- {
    +			i -= len(m.CIDRs[iNdEx])
    +			copy(dAtA[i:], m.CIDRs[iNdEx])
    +			i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDRs[iNdEx])))
    +			i--
    +			dAtA[i] = 0xa
    +		}
    +	}
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *ServiceCIDRStatus) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ServiceCIDRStatus) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ServiceCIDRStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.Conditions) > 0 {
    +		for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0xa
    +		}
    +	}
    +	return len(dAtA) - i, nil
    +}
    +
    +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
    +	offset -= sovGenerated(v)
    +	base := offset
    +	for v >= 1<<7 {
    +		dAtA[offset] = uint8(v&0x7f | 0x80)
    +		v >>= 7
    +		offset++
    +	}
    +	dAtA[offset] = uint8(v)
    +	return base
    +}
    +func (m *HTTPIngressPath) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.Path)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = m.Backend.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if m.PathType != nil {
    +		l = len(*m.PathType)
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	return n
    +}
    +
    +func (m *HTTPIngressRuleValue) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	if len(m.Paths) > 0 {
    +		for _, e := range m.Paths {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
    +}
    +
    +func (m *IPAddress) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.ObjectMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = m.Spec.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *IPAddressList) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.ListMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.Items) > 0 {
    +		for _, e := range m.Items {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
    +}
    +
    +func (m *IPAddressSpec) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	if m.ParentRef != nil {
    +		l = m.ParentRef.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	return n
    +}
    +
    +func (m *IPBlock) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.CIDR)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.Except) > 0 {
    +		for _, s := range m.Except {
    +			l = len(s)
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
    +}
    +
    +func (m *Ingress) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
     	l = m.ObjectMeta.Size()
     	n += 1 + l + sovGenerated(uint64(l))
     	l = m.Spec.Size()
    @@ -2635,6 +3261,23 @@ func (m *NetworkPolicySpec) Size() (n int) {
     	return n
     }
     
    +func (m *ParentReference) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.Group)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.Resource)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.Namespace)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.Name)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
     func (m *ServiceBackendPort) Size() (n int) {
     	if m == nil {
     		return 0
    @@ -2647,39 +3290,138 @@ func (m *ServiceBackendPort) Size() (n int) {
     	return n
     }
     
    -func sovGenerated(x uint64) (n int) {
    -	return (math_bits.Len64(x|1) + 6) / 7
    -}
    -func sozGenerated(x uint64) (n int) {
    -	return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
    -}
    -func (this *HTTPIngressPath) String() string {
    -	if this == nil {
    -		return "nil"
    +func (m *ServiceCIDR) Size() (n int) {
    +	if m == nil {
    +		return 0
     	}
    -	s := strings.Join([]string{`&HTTPIngressPath{`,
    -		`Path:` + fmt.Sprintf("%v", this.Path) + `,`,
    -		`Backend:` + strings.Replace(strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1), `&`, ``, 1) + `,`,
    -		`PathType:` + valueToStringGenerated(this.PathType) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    +	var l int
    +	_ = l
    +	l = m.ObjectMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = m.Spec.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = m.Status.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
     }
    -func (this *HTTPIngressRuleValue) String() string {
    -	if this == nil {
    -		return "nil"
    +
    +func (m *ServiceCIDRList) Size() (n int) {
    +	if m == nil {
    +		return 0
     	}
    -	repeatedStringForPaths := "[]HTTPIngressPath{"
    -	for _, f := range this.Paths {
    -		repeatedStringForPaths += strings.Replace(strings.Replace(f.String(), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + ","
    +	var l int
    +	_ = l
    +	l = m.ListMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.Items) > 0 {
    +		for _, e := range m.Items {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
     	}
    -	repeatedStringForPaths += "}"
    +	return n
    +}
    +
    +func (m *ServiceCIDRSpec) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	if len(m.CIDRs) > 0 {
    +		for _, s := range m.CIDRs {
    +			l = len(s)
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
    +}
    +
    +func (m *ServiceCIDRStatus) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	if len(m.Conditions) > 0 {
    +		for _, e := range m.Conditions {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
    +}
    +
    +func sovGenerated(x uint64) (n int) {
    +	return (math_bits.Len64(x|1) + 6) / 7
    +}
    +func sozGenerated(x uint64) (n int) {
    +	return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
    +}
    +func (this *HTTPIngressPath) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&HTTPIngressPath{`,
    +		`Path:` + fmt.Sprintf("%v", this.Path) + `,`,
    +		`Backend:` + strings.Replace(strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1), `&`, ``, 1) + `,`,
    +		`PathType:` + valueToStringGenerated(this.PathType) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *HTTPIngressRuleValue) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForPaths := "[]HTTPIngressPath{"
    +	for _, f := range this.Paths {
    +		repeatedStringForPaths += strings.Replace(strings.Replace(f.String(), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForPaths += "}"
     	s := strings.Join([]string{`&HTTPIngressRuleValue{`,
     		`Paths:` + repeatedStringForPaths + `,`,
     		`}`,
     	}, "")
     	return s
     }
    +func (this *IPAddress) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&IPAddress{`,
    +		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
    +		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IPAddressSpec", "IPAddressSpec", 1), `&`, ``, 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *IPAddressList) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForItems := "[]IPAddress{"
    +	for _, f := range this.Items {
    +		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "IPAddress", "IPAddress", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForItems += "}"
    +	s := strings.Join([]string{`&IPAddressList{`,
    +		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
    +		`Items:` + repeatedStringForItems + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *IPAddressSpec) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&IPAddressSpec{`,
    +		`ParentRef:` + strings.Replace(this.ParentRef.String(), "ParentReference", "ParentReference", 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
     func (this *IPBlock) String() string {
     	if this == nil {
     		return "nil"
    @@ -3018,6 +3760,19 @@ func (this *NetworkPolicySpec) String() string {
     	}, "")
     	return s
     }
    +func (this *ParentReference) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&ParentReference{`,
    +		`Group:` + fmt.Sprintf("%v", this.Group) + `,`,
    +		`Resource:` + fmt.Sprintf("%v", this.Resource) + `,`,
    +		`Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
    +		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
     func (this *ServiceBackendPort) String() string {
     	if this == nil {
     		return "nil"
    @@ -3029,6 +3784,59 @@ func (this *ServiceBackendPort) String() string {
     	}, "")
     	return s
     }
    +func (this *ServiceCIDR) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&ServiceCIDR{`,
    +		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
    +		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceCIDRSpec", "ServiceCIDRSpec", 1), `&`, ``, 1) + `,`,
    +		`Status:` + strings.Replace(strings.Replace(this.Status.String(), "ServiceCIDRStatus", "ServiceCIDRStatus", 1), `&`, ``, 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ServiceCIDRList) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForItems := "[]ServiceCIDR{"
    +	for _, f := range this.Items {
    +		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ServiceCIDR", "ServiceCIDR", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForItems += "}"
    +	s := strings.Join([]string{`&ServiceCIDRList{`,
    +		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
    +		`Items:` + repeatedStringForItems + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ServiceCIDRSpec) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&ServiceCIDRSpec{`,
    +		`CIDRs:` + fmt.Sprintf("%v", this.CIDRs) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ServiceCIDRStatus) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForConditions := "[]Condition{"
    +	for _, f := range this.Conditions {
    +		repeatedStringForConditions += fmt.Sprintf("%v", f) + ","
    +	}
    +	repeatedStringForConditions += "}"
    +	s := strings.Join([]string{`&ServiceCIDRStatus{`,
    +		`Conditions:` + repeatedStringForConditions + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
     func valueToStringGenerated(v interface{}) string {
     	rv := reflect.ValueOf(v)
     	if rv.IsNil() {
    @@ -3269,7 +4077,7 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *IPBlock) Unmarshal(dAtA []byte) error {
    +func (m *IPAddress) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -3292,17 +4100,17 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: IPBlock: wiretype end group for non-group")
    +			return fmt.Errorf("proto: IPAddress: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: IPBlock: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: IPAddress: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field CIDR", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
     			}
    -			var stringLen uint64
    +			var msglen int
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -3312,29 +4120,30 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    +				msglen |= int(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    +			if msglen < 0 {
     				return ErrInvalidLengthGenerated
     			}
    -			postIndex := iNdEx + intStringLen
    +			postIndex := iNdEx + msglen
     			if postIndex < 0 {
     				return ErrInvalidLengthGenerated
     			}
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.CIDR = string(dAtA[iNdEx:postIndex])
    +			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
     			iNdEx = postIndex
     		case 2:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Except", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
     			}
    -			var stringLen uint64
    +			var msglen int
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -3344,23 +4153,24 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    +				msglen |= int(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    +			if msglen < 0 {
     				return ErrInvalidLengthGenerated
     			}
    -			postIndex := iNdEx + intStringLen
    +			postIndex := iNdEx + msglen
     			if postIndex < 0 {
     				return ErrInvalidLengthGenerated
     			}
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Except = append(m.Except, string(dAtA[iNdEx:postIndex]))
    +			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
     			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
    @@ -3383,7 +4193,7 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *Ingress) Unmarshal(dAtA []byte) error {
    +func (m *IPAddressList) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -3406,15 +4216,15 @@ func (m *Ingress) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: Ingress: wiretype end group for non-group")
    +			return fmt.Errorf("proto: IPAddressList: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: IPAddressList: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
     			}
     			var msglen int
     			for shift := uint(0); ; shift += 7 {
    @@ -3441,46 +4251,13 @@ func (m *Ingress) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
     			iNdEx = postIndex
     		case 2:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 3:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
     			}
     			var msglen int
     			for shift := uint(0); ; shift += 7 {
    @@ -3507,7 +4284,8 @@ func (m *Ingress) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			m.Items = append(m.Items, IPAddress{})
    +			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
     			iNdEx = postIndex
    @@ -3532,7 +4310,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *IngressBackend) Unmarshal(dAtA []byte) error {
    +func (m *IPAddressSpec) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -3555,51 +4333,15 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group")
    +			return fmt.Errorf("proto: IPAddressSpec: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: IPAddressSpec: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
    -		case 3:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if m.Resource == nil {
    -				m.Resource = &v11.TypedLocalObjectReference{}
    -			}
    -			if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 4:
    +		case 1:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field ParentRef", wireType)
     			}
     			var msglen int
     			for shift := uint(0); ; shift += 7 {
    @@ -3626,10 +4368,10 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			if m.Service == nil {
    -				m.Service = &IngressServiceBackend{}
    +			if m.ParentRef == nil {
    +				m.ParentRef = &ParentReference{}
     			}
    -			if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			if err := m.ParentRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
     			iNdEx = postIndex
    @@ -3654,7 +4396,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *IngressClass) Unmarshal(dAtA []byte) error {
    +func (m *IPBlock) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -3677,17 +4419,17 @@ func (m *IngressClass) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: IngressClass: wiretype end group for non-group")
    +			return fmt.Errorf("proto: IPBlock: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: IngressClass: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: IPBlock: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field CIDR", wireType)
     			}
    -			var msglen int
    +			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -3697,30 +4439,29 @@ func (m *IngressClass) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    +				stringLen |= uint64(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			if msglen < 0 {
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
     				return ErrInvalidLengthGenerated
     			}
    -			postIndex := iNdEx + msglen
    +			postIndex := iNdEx + intStringLen
     			if postIndex < 0 {
     				return ErrInvalidLengthGenerated
     			}
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    +			m.CIDR = string(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
     		case 2:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Except", wireType)
     			}
    -			var msglen int
    +			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -3730,24 +4471,23 @@ func (m *IngressClass) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    +				stringLen |= uint64(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			if msglen < 0 {
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
     				return ErrInvalidLengthGenerated
     			}
    -			postIndex := iNdEx + msglen
    +			postIndex := iNdEx + intStringLen
     			if postIndex < 0 {
     				return ErrInvalidLengthGenerated
     			}
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    +			m.Except = append(m.Except, string(dAtA[iNdEx:postIndex]))
     			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
    @@ -3770,7 +4510,7 @@ func (m *IngressClass) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *IngressClassList) Unmarshal(dAtA []byte) error {
    +func (m *Ingress) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -3793,15 +4533,15 @@ func (m *IngressClassList) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: IngressClassList: wiretype end group for non-group")
    +			return fmt.Errorf("proto: Ingress: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: IngressClassList: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
     			}
     			var msglen int
     			for shift := uint(0); ; shift += 7 {
    @@ -3828,13 +4568,13 @@ func (m *IngressClassList) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
     			iNdEx = postIndex
     		case 2:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
     			}
     			var msglen int
     			for shift := uint(0); ; shift += 7 {
    @@ -3861,8 +4601,40 @@ func (m *IngressClassList) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Items = append(m.Items, IngressClass{})
    -			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
     			iNdEx = postIndex
    @@ -3887,7 +4659,7 @@ func (m *IngressClassList) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error {
    +func (m *IngressBackend) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -3910,50 +4682,17 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: IngressClassParametersReference: wiretype end group for non-group")
    +			return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: IngressClassParametersReference: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			s := string(dAtA[iNdEx:postIndex])
    -			m.APIGroup = &s
    -			iNdEx = postIndex
    -		case 2:
    +		case 3:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
     			}
    -			var stringLen uint64
    +			var msglen int
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -3963,61 +4702,33 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    +				msglen |= int(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    +			if msglen < 0 {
     				return ErrInvalidLengthGenerated
     			}
    -			postIndex := iNdEx + intStringLen
    +			postIndex := iNdEx + msglen
     			if postIndex < 0 {
     				return ErrInvalidLengthGenerated
     			}
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Kind = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		case 3:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    +			if m.Resource == nil {
    +				m.Resource = &v11.TypedLocalObjectReference{}
     			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    +			if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
     			}
    -			m.Name = string(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
     		case 4:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType)
     			}
    -			var stringLen uint64
    +			var msglen int
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -4027,57 +4738,27 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    +				msglen |= int(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    +			if msglen < 0 {
     				return ErrInvalidLengthGenerated
     			}
    -			postIndex := iNdEx + intStringLen
    +			postIndex := iNdEx + msglen
     			if postIndex < 0 {
     				return ErrInvalidLengthGenerated
     			}
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			s := string(dAtA[iNdEx:postIndex])
    -			m.Scope = &s
    -			iNdEx = postIndex
    -		case 5:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    +			if m.Service == nil {
    +				m.Service = &IngressServiceBackend{}
     			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    +			if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
     			}
    -			s := string(dAtA[iNdEx:postIndex])
    -			m.Namespace = &s
     			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
    @@ -4100,7 +4781,7 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *IngressClassSpec) Unmarshal(dAtA []byte) error {
    +func (m *IngressClass) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -4123,17 +4804,17 @@ func (m *IngressClassSpec) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: IngressClassSpec: wiretype end group for non-group")
    +			return fmt.Errorf("proto: IngressClass: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: IngressClassSpec: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: IngressClass: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
     			}
    -			var stringLen uint64
    +			var msglen int
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -4143,27 +4824,28 @@ func (m *IngressClassSpec) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    +				msglen |= int(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    +			if msglen < 0 {
     				return ErrInvalidLengthGenerated
     			}
    -			postIndex := iNdEx + intStringLen
    +			postIndex := iNdEx + msglen
     			if postIndex < 0 {
     				return ErrInvalidLengthGenerated
     			}
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Controller = string(dAtA[iNdEx:postIndex])
    +			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
     			iNdEx = postIndex
     		case 2:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
     			}
     			var msglen int
     			for shift := uint(0); ; shift += 7 {
    @@ -4190,10 +4872,7 @@ func (m *IngressClassSpec) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			if m.Parameters == nil {
    -				m.Parameters = &IngressClassParametersReference{}
    -			}
    -			if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
     			iNdEx = postIndex
    @@ -4218,7 +4897,7 @@ func (m *IngressClassSpec) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *IngressList) Unmarshal(dAtA []byte) error {
    +func (m *IngressClassList) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -4241,10 +4920,10 @@ func (m *IngressList) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: IngressList: wiretype end group for non-group")
    +			return fmt.Errorf("proto: IngressClassList: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: IngressClassList: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
    @@ -4309,7 +4988,7 @@ func (m *IngressList) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Items = append(m.Items, Ingress{})
    +			m.Items = append(m.Items, IngressClass{})
     			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
    @@ -4335,7 +5014,7 @@ func (m *IngressList) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error {
    +func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -4358,15 +5037,15 @@ func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: IngressLoadBalancerIngress: wiretype end group for non-group")
    +			return fmt.Errorf("proto: IngressClassParametersReference: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: IngressLoadBalancerIngress: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: IngressClassParametersReference: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType)
     			}
     			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
    @@ -4394,11 +5073,12 @@ func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.IP = string(dAtA[iNdEx:postIndex])
    +			s := string(dAtA[iNdEx:postIndex])
    +			m.APIGroup = &s
     			iNdEx = postIndex
     		case 2:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
     			}
     			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
    @@ -4426,13 +5106,45 @@ func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Hostname = string(dAtA[iNdEx:postIndex])
    +			m.Kind = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Name = string(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
     		case 4:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType)
     			}
    -			var msglen int
    +			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -4442,25 +5154,57 @@ func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    +				stringLen |= uint64(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			if msglen < 0 {
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
     				return ErrInvalidLengthGenerated
     			}
    -			postIndex := iNdEx + msglen
    +			postIndex := iNdEx + intStringLen
     			if postIndex < 0 {
     				return ErrInvalidLengthGenerated
     			}
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Ports = append(m.Ports, IngressPortStatus{})
    -			if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    +			s := string(dAtA[iNdEx:postIndex])
    +			m.Scope = &s
    +			iNdEx = postIndex
    +		case 5:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
     			}
    +			s := string(dAtA[iNdEx:postIndex])
    +			m.Namespace = &s
     			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
    @@ -4483,7 +5227,7 @@ func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error {
    +func (m *IngressClassSpec) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -4506,15 +5250,47 @@ func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: IngressLoadBalancerStatus: wiretype end group for non-group")
    +			return fmt.Errorf("proto: IngressClassSpec: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: IngressLoadBalancerStatus: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: IngressClassSpec: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Controller = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType)
     			}
     			var msglen int
     			for shift := uint(0); ; shift += 7 {
    @@ -4541,8 +5317,10 @@ func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Ingress = append(m.Ingress, IngressLoadBalancerIngress{})
    -			if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			if m.Parameters == nil {
    +				m.Parameters = &IngressClassParametersReference{}
    +			}
    +			if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
     			iNdEx = postIndex
    @@ -4567,7 +5345,7 @@ func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *IngressPortStatus) Unmarshal(dAtA []byte) error {
    +func (m *IngressList) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -4590,36 +5368,17 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: IngressPortStatus: wiretype end group for non-group")
    +			return fmt.Errorf("proto: IngressList: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: IngressPortStatus: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
    -			if wireType != 0 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
    -			}
    -			m.Port = 0
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				m.Port |= int32(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -		case 2:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
     			}
    -			var stringLen uint64
    +			var msglen int
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -4629,29 +5388,30 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    +				msglen |= int(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    +			if msglen < 0 {
     				return ErrInvalidLengthGenerated
     			}
    -			postIndex := iNdEx + intStringLen
    +			postIndex := iNdEx + msglen
     			if postIndex < 0 {
     				return ErrInvalidLengthGenerated
     			}
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Protocol = k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex])
    +			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
     			iNdEx = postIndex
    -		case 3:
    +		case 2:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
     			}
    -			var stringLen uint64
    +			var msglen int
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -4661,24 +5421,25 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    +				msglen |= int(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    +			if msglen < 0 {
     				return ErrInvalidLengthGenerated
     			}
    -			postIndex := iNdEx + intStringLen
    +			postIndex := iNdEx + msglen
     			if postIndex < 0 {
     				return ErrInvalidLengthGenerated
     			}
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			s := string(dAtA[iNdEx:postIndex])
    -			m.Error = &s
    +			m.Items = append(m.Items, Ingress{})
    +			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
     			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
    @@ -4701,7 +5462,7 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *IngressRule) Unmarshal(dAtA []byte) error {
    +func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -4724,15 +5485,15 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: IngressRule: wiretype end group for non-group")
    +			return fmt.Errorf("proto: IngressLoadBalancerIngress: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: IngressRule: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: IngressLoadBalancerIngress: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType)
     			}
     			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
    @@ -4760,11 +5521,43 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Host = string(dAtA[iNdEx:postIndex])
    +			m.IP = string(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
     		case 2:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field IngressRuleValue", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Hostname = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 4:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType)
     			}
     			var msglen int
     			for shift := uint(0); ; shift += 7 {
    @@ -4791,7 +5584,8 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			if err := m.IngressRuleValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			m.Ports = append(m.Ports, IngressPortStatus{})
    +			if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
     			iNdEx = postIndex
    @@ -4816,7 +5610,7 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *IngressRuleValue) Unmarshal(dAtA []byte) error {
    +func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -4839,15 +5633,15 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: IngressRuleValue: wiretype end group for non-group")
    +			return fmt.Errorf("proto: IngressLoadBalancerStatus: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: IngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: IngressLoadBalancerStatus: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType)
     			}
     			var msglen int
     			for shift := uint(0); ; shift += 7 {
    @@ -4874,10 +5668,8 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			if m.HTTP == nil {
    -				m.HTTP = &HTTPIngressRuleValue{}
    -			}
    -			if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			m.Ingress = append(m.Ingress, IngressLoadBalancerIngress{})
    +			if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
     			iNdEx = postIndex
    @@ -4902,7 +5694,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *IngressServiceBackend) Unmarshal(dAtA []byte) error {
    +func (m *IngressPortStatus) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -4925,15 +5717,34 @@ func (m *IngressServiceBackend) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: IngressServiceBackend: wiretype end group for non-group")
    +			return fmt.Errorf("proto: IngressPortStatus: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: IngressServiceBackend: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: IngressPortStatus: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
    +			}
    +			m.Port = 0
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				m.Port |= int32(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +		case 2:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType)
     			}
     			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
    @@ -4961,13 +5772,13 @@ func (m *IngressServiceBackend) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Name = string(dAtA[iNdEx:postIndex])
    +			m.Protocol = k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
    -		case 2:
    +		case 3:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
     			}
    -			var msglen int
    +			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -4977,24 +5788,24 @@ func (m *IngressServiceBackend) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    +				stringLen |= uint64(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			if msglen < 0 {
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
     				return ErrInvalidLengthGenerated
     			}
    -			postIndex := iNdEx + msglen
    +			postIndex := iNdEx + intStringLen
     			if postIndex < 0 {
     				return ErrInvalidLengthGenerated
     			}
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    +			s := string(dAtA[iNdEx:postIndex])
    +			m.Error = &s
     			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
    @@ -5017,7 +5828,7 @@ func (m *IngressServiceBackend) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *IngressSpec) Unmarshal(dAtA []byte) error {
    +func (m *IngressRule) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -5040,17 +5851,17 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: IngressSpec: wiretype end group for non-group")
    +			return fmt.Errorf("proto: IngressRule: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: IngressSpec: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: IngressRule: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field DefaultBackend", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType)
     			}
    -			var msglen int
    +			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -5060,65 +5871,27 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    +				stringLen |= uint64(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			if msglen < 0 {
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
     				return ErrInvalidLengthGenerated
     			}
    -			postIndex := iNdEx + msglen
    +			postIndex := iNdEx + intStringLen
     			if postIndex < 0 {
     				return ErrInvalidLengthGenerated
     			}
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			if m.DefaultBackend == nil {
    -				m.DefaultBackend = &IngressBackend{}
    -			}
    -			if err := m.DefaultBackend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    +			m.Host = string(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
     		case 2:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.TLS = append(m.TLS, IngressTLS{})
    -			if err := m.TLS[len(m.TLS)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 3:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field IngressRuleValue", wireType)
     			}
     			var msglen int
     			for shift := uint(0); ; shift += 7 {
    @@ -5145,44 +5918,10 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Rules = append(m.Rules, IngressRule{})
    -			if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			if err := m.IngressRuleValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
     			iNdEx = postIndex
    -		case 4:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field IngressClassName", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			s := string(dAtA[iNdEx:postIndex])
    -			m.IngressClassName = &s
    -			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
     			skippy, err := skipGenerated(dAtA[iNdEx:])
    @@ -5204,7 +5943,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *IngressStatus) Unmarshal(dAtA []byte) error {
    +func (m *IngressRuleValue) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -5227,15 +5966,15 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: IngressStatus: wiretype end group for non-group")
    +			return fmt.Errorf("proto: IngressRuleValue: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: IngressStatus: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: IngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType)
     			}
     			var msglen int
     			for shift := uint(0); ; shift += 7 {
    @@ -5262,7 +6001,10 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			if m.HTTP == nil {
    +				m.HTTP = &HTTPIngressRuleValue{}
    +			}
    +			if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
     			iNdEx = postIndex
    @@ -5287,7 +6029,7 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *IngressTLS) Unmarshal(dAtA []byte) error {
    +func (m *IngressServiceBackend) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -5310,15 +6052,15 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: IngressTLS: wiretype end group for non-group")
    +			return fmt.Errorf("proto: IngressServiceBackend: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: IngressTLS: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: IngressServiceBackend: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
     			}
     			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
    @@ -5346,13 +6088,13 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex]))
    +			m.Name = string(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
     		case 2:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
     			}
    -			var stringLen uint64
    +			var msglen int
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -5362,23 +6104,24 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    +				msglen |= int(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    +			if msglen < 0 {
     				return ErrInvalidLengthGenerated
     			}
    -			postIndex := iNdEx + intStringLen
    +			postIndex := iNdEx + msglen
     			if postIndex < 0 {
     				return ErrInvalidLengthGenerated
     			}
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.SecretName = string(dAtA[iNdEx:postIndex])
    +			if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
     			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
    @@ -5401,7 +6144,7 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *NetworkPolicy) Unmarshal(dAtA []byte) error {
    +func (m *IngressSpec) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -5424,15 +6167,15 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: NetworkPolicy: wiretype end group for non-group")
    +			return fmt.Errorf("proto: IngressSpec: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: NetworkPolicy: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: IngressSpec: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field DefaultBackend", wireType)
     			}
     			var msglen int
     			for shift := uint(0); ; shift += 7 {
    @@ -5459,13 +6202,16 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			if m.DefaultBackend == nil {
    +				m.DefaultBackend = &IngressBackend{}
    +			}
    +			if err := m.DefaultBackend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
     			iNdEx = postIndex
     		case 2:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType)
     			}
     			var msglen int
     			for shift := uint(0); ; shift += 7 {
    @@ -5492,63 +6238,14 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			m.TLS = append(m.TLS, IngressTLS{})
    +			if err := m.TLS[len(m.TLS)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
     			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: NetworkPolicyEgressRule: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: NetworkPolicyEgressRule: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    +		case 3:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType)
     			}
     			var msglen int
     			for shift := uint(0); ; shift += 7 {
    @@ -5575,16 +6272,99 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Ports = append(m.Ports, NetworkPolicyPort{})
    -			if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			m.Rules = append(m.Rules, IngressRule{})
    +			if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
     			iNdEx = postIndex
    -		case 2:
    +		case 4:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field To", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field IngressClassName", wireType)
     			}
    -			var msglen int
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			s := string(dAtA[iNdEx:postIndex])
    +			m.IngressClassName = &s
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *IngressStatus) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: IngressStatus: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: IngressStatus: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType)
    +			}
    +			var msglen int
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -5609,8 +6389,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.To = append(m.To, NetworkPolicyPeer{})
    -			if err := m.To[len(m.To)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
     			iNdEx = postIndex
    @@ -5635,7 +6414,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error {
    +func (m *IngressTLS) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -5658,15 +6437,129 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: NetworkPolicyIngressRule: wiretype end group for non-group")
    +			return fmt.Errorf("proto: IngressTLS: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: NetworkPolicyIngressRule: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: IngressTLS: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex]))
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.SecretName = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *NetworkPolicy) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: NetworkPolicy: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: NetworkPolicy: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
     			}
     			var msglen int
     			for shift := uint(0); ; shift += 7 {
    @@ -5693,14 +6586,13 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Ports = append(m.Ports, NetworkPolicyPort{})
    -			if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
     			iNdEx = postIndex
     		case 2:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field From", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
     			}
     			var msglen int
     			for shift := uint(0); ; shift += 7 {
    @@ -5727,10 +6619,1020 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.From = append(m.From, NetworkPolicyPeer{})
    -			if err := m.From[len(m.From)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
     				return err
     			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: NetworkPolicyEgressRule: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: NetworkPolicyEgressRule: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Ports = append(m.Ports, NetworkPolicyPort{})
    +			if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field To", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.To = append(m.To, NetworkPolicyPeer{})
    +			if err := m.To[len(m.To)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: NetworkPolicyIngressRule: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: NetworkPolicyIngressRule: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Ports = append(m.Ports, NetworkPolicyPort{})
    +			if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field From", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.From = append(m.From, NetworkPolicyPeer{})
    +			if err := m.From[len(m.From)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: NetworkPolicyList: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: NetworkPolicyList: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Items = append(m.Items, NetworkPolicy{})
    +			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: NetworkPolicyPeer: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: NetworkPolicyPeer: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.PodSelector == nil {
    +				m.PodSelector = &v1.LabelSelector{}
    +			}
    +			if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.NamespaceSelector == nil {
    +				m.NamespaceSelector = &v1.LabelSelector{}
    +			}
    +			if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field IPBlock", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.IPBlock == nil {
    +				m.IPBlock = &IPBlock{}
    +			}
    +			if err := m.IPBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: NetworkPolicyPort: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: NetworkPolicyPort: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			s := k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex])
    +			m.Protocol = &s
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.Port == nil {
    +				m.Port = &intstr.IntOrString{}
    +			}
    +			if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field EndPort", wireType)
    +			}
    +			var v int32
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				v |= int32(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			m.EndPort = &v
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: NetworkPolicySpec: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: NetworkPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Ingress = append(m.Ingress, NetworkPolicyIngressRule{})
    +			if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Egress", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Egress = append(m.Egress, NetworkPolicyEgressRule{})
    +			if err := m.Egress[len(m.Egress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 4:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field PolicyTypes", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.PolicyTypes = append(m.PolicyTypes, PolicyType(dAtA[iNdEx:postIndex]))
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *ParentReference) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ParentReference: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ParentReference: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Group = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Resource = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Namespace = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 4:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Name = string(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
    @@ -5753,7 +7655,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error {
    +func (m *ServiceBackendPort) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -5776,17 +7678,17 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: NetworkPolicyList: wiretype end group for non-group")
    +			return fmt.Errorf("proto: ServiceBackendPort: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: NetworkPolicyList: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: ServiceBackendPort: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
     			}
    -			var msglen int
    +			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -5796,30 +7698,29 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    +				stringLen |= uint64(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			if msglen < 0 {
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
     				return ErrInvalidLengthGenerated
     			}
    -			postIndex := iNdEx + msglen
    +			postIndex := iNdEx + intStringLen
     			if postIndex < 0 {
     				return ErrInvalidLengthGenerated
     			}
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    +			m.Name = string(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
     		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Number", wireType)
     			}
    -			var msglen int
    +			m.Number = 0
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -5829,26 +7730,11 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    +				m.Number |= int32(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.Items = append(m.Items, NetworkPolicy{})
    -			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
     			skippy, err := skipGenerated(dAtA[iNdEx:])
    @@ -5870,7 +7756,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error {
    +func (m *ServiceCIDR) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -5893,15 +7779,15 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: NetworkPolicyPeer: wiretype end group for non-group")
    +			return fmt.Errorf("proto: ServiceCIDR: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: NetworkPolicyPeer: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: ServiceCIDR: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
     			}
     			var msglen int
     			for shift := uint(0); ; shift += 7 {
    @@ -5928,16 +7814,13 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			if m.PodSelector == nil {
    -				m.PodSelector = &v1.LabelSelector{}
    -			}
    -			if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
     			iNdEx = postIndex
     		case 2:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
     			}
     			var msglen int
     			for shift := uint(0); ; shift += 7 {
    @@ -5964,16 +7847,13 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			if m.NamespaceSelector == nil {
    -				m.NamespaceSelector = &v1.LabelSelector{}
    -			}
    -			if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
     			iNdEx = postIndex
     		case 3:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field IPBlock", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
     			}
     			var msglen int
     			for shift := uint(0); ; shift += 7 {
    @@ -6000,10 +7880,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			if m.IPBlock == nil {
    -				m.IPBlock = &IPBlock{}
    -			}
    -			if err := m.IPBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
     			iNdEx = postIndex
    @@ -6028,7 +7905,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error {
    +func (m *ServiceCIDRList) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -6051,17 +7928,17 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: NetworkPolicyPort: wiretype end group for non-group")
    +			return fmt.Errorf("proto: ServiceCIDRList: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: NetworkPolicyPort: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: ServiceCIDRList: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
     			}
    -			var stringLen uint64
    +			var msglen int
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -6071,28 +7948,28 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    +				msglen |= int(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    +			if msglen < 0 {
     				return ErrInvalidLengthGenerated
     			}
    -			postIndex := iNdEx + intStringLen
    +			postIndex := iNdEx + msglen
     			if postIndex < 0 {
     				return ErrInvalidLengthGenerated
     			}
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			s := k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex])
    -			m.Protocol = &s
    +			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
     			iNdEx = postIndex
     		case 2:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
     			}
     			var msglen int
     			for shift := uint(0); ; shift += 7 {
    @@ -6119,33 +7996,11 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			if m.Port == nil {
    -				m.Port = &intstr.IntOrString{}
    -			}
    -			if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			m.Items = append(m.Items, ServiceCIDR{})
    +			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
     			iNdEx = postIndex
    -		case 3:
    -			if wireType != 0 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field EndPort", wireType)
    -			}
    -			var v int32
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				v |= int32(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			m.EndPort = &v
     		default:
     			iNdEx = preIndex
     			skippy, err := skipGenerated(dAtA[iNdEx:])
    @@ -6167,7 +8022,7 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error {
    +func (m *ServiceCIDRSpec) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -6190,116 +8045,15 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: NetworkPolicySpec: wiretype end group for non-group")
    +			return fmt.Errorf("proto: ServiceCIDRSpec: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: NetworkPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: ServiceCIDRSpec: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.Ingress = append(m.Ingress, NetworkPolicyIngressRule{})
    -			if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 3:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Egress", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.Egress = append(m.Egress, NetworkPolicyEgressRule{})
    -			if err := m.Egress[len(m.Egress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 4:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field PolicyTypes", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field CIDRs", wireType)
     			}
     			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
    @@ -6327,7 +8081,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.PolicyTypes = append(m.PolicyTypes, PolicyType(dAtA[iNdEx:postIndex]))
    +			m.CIDRs = append(m.CIDRs, string(dAtA[iNdEx:postIndex]))
     			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
    @@ -6350,7 +8104,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *ServiceBackendPort) Unmarshal(dAtA []byte) error {
    +func (m *ServiceCIDRStatus) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -6373,17 +8127,17 @@ func (m *ServiceBackendPort) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: ServiceBackendPort: wiretype end group for non-group")
    +			return fmt.Errorf("proto: ServiceCIDRStatus: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: ServiceBackendPort: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: ServiceCIDRStatus: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
     			}
    -			var stringLen uint64
    +			var msglen int
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -6393,43 +8147,26 @@ func (m *ServiceBackendPort) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    +				msglen |= int(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    +			if msglen < 0 {
     				return ErrInvalidLengthGenerated
     			}
    -			postIndex := iNdEx + intStringLen
    +			postIndex := iNdEx + msglen
     			if postIndex < 0 {
     				return ErrInvalidLengthGenerated
     			}
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Name = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		case 2:
    -			if wireType != 0 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Number", wireType)
    -			}
    -			m.Number = 0
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				m.Number |= int32(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    +			m.Conditions = append(m.Conditions, v1.Condition{})
    +			if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
     			}
    +			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
     			skippy, err := skipGenerated(dAtA[iNdEx:])
    diff --git a/vendor/k8s.io/api/networking/v1/generated.proto b/vendor/k8s.io/api/networking/v1/generated.proto
    index c72fdc8f3..16a2792aa 100644
    --- a/vendor/k8s.io/api/networking/v1/generated.proto
    +++ b/vendor/k8s.io/api/networking/v1/generated.proto
    @@ -72,6 +72,44 @@ message HTTPIngressRuleValue {
       repeated HTTPIngressPath paths = 1;
     }
     
    +// IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs
    +// that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses.
    +// An IP address can be represented in different formats, to guarantee the uniqueness of the IP,
    +// the name of the object is the IP address in canonical format, four decimal digits separated
    +// by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6.
    +// Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1
    +// Invalid: 10.01.2.3 or 2001:db8:0:0:0::1
    +message IPAddress {
    +  // Standard object's metadata.
    +  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +
    +  // spec is the desired state of the IPAddress.
    +  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    +  // +optional
    +  optional IPAddressSpec spec = 2;
    +}
    +
    +// IPAddressList contains a list of IPAddress.
    +message IPAddressList {
    +  // Standard object's metadata.
    +  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +
    +  // items is the list of IPAddresses.
    +  repeated IPAddress items = 2;
    +}
    +
    +// IPAddressSpec describe the attributes in an IP Address.
    +message IPAddressSpec {
    +  // ParentRef references the resource that an IPAddress is attached to.
    +  // An IPAddress must reference a parent object.
    +  // +required
    +  optional ParentReference parentRef = 1;
    +}
    +
     // IPBlock describes a particular CIDR (Ex. "192.168.1.0/24","2001:db8::/64") that is allowed
     // to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs
     // that should not be included within this rule.
    @@ -496,11 +534,12 @@ message NetworkPolicyPort {
     // NetworkPolicySpec provides the specification of a NetworkPolicy
     message NetworkPolicySpec {
       // podSelector selects the pods to which this NetworkPolicy object applies.
    -  // The array of ingress rules is applied to any pods selected by this field.
    +  // The array of rules is applied to any pods selected by this field. An empty
    +  // selector matches all pods in the policy's namespace.
       // Multiple network policies can select the same set of pods. In this case,
       // the ingress rules for each are combined additively.
    -  // This field is NOT optional and follows standard label selector semantics.
    -  // An empty podSelector matches all pods in this namespace.
    +  // This field is optional. If it is not specified, it defaults to an empty selector.
    +  // +optional
       optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1;
     
       // ingress is a list of ingress rules to be applied to the selected pods.
    @@ -540,6 +579,25 @@ message NetworkPolicySpec {
       repeated string policyTypes = 4;
     }
     
    +// ParentReference describes a reference to a parent object.
    +message ParentReference {
    +  // Group is the group of the object being referenced.
    +  // +optional
    +  optional string group = 1;
    +
    +  // Resource is the resource of the object being referenced.
    +  // +required
    +  optional string resource = 2;
    +
    +  // Namespace is the namespace of the object being referenced.
    +  // +optional
    +  optional string namespace = 3;
    +
    +  // Name is the name of the object being referenced.
    +  // +required
    +  optional string name = 4;
    +}
    +
     // ServiceBackendPort is the service port being referenced.
     // +structType=atomic
     message ServiceBackendPort {
    @@ -554,3 +612,55 @@ message ServiceBackendPort {
       optional int32 number = 2;
     }
     
    +// ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64).
    +// This range is used to allocate ClusterIPs to Service objects.
    +message ServiceCIDR {
    +  // Standard object's metadata.
    +  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +
    +  // spec is the desired state of the ServiceCIDR.
    +  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    +  // +optional
    +  optional ServiceCIDRSpec spec = 2;
    +
    +  // status represents the current state of the ServiceCIDR.
    +  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    +  // +optional
    +  optional ServiceCIDRStatus status = 3;
    +}
    +
    +// ServiceCIDRList contains a list of ServiceCIDR objects.
    +message ServiceCIDRList {
    +  // Standard object's metadata.
    +  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +
    +  // items is the list of ServiceCIDRs.
    +  repeated ServiceCIDR items = 2;
    +}
    +
    +// ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.
    +message ServiceCIDRSpec {
    +  // CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64")
    +  // from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family.
    +  // This field is immutable.
    +  // +optional
    +  // +listType=atomic
    +  repeated string cidrs = 1;
    +}
    +
    +// ServiceCIDRStatus describes the current state of the ServiceCIDR.
    +message ServiceCIDRStatus {
    +  // conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR.
    +  // Current service state
    +  // +optional
    +  // +patchMergeKey=type
    +  // +patchStrategy=merge
    +  // +listType=map
    +  // +listMapKey=type
    +  repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1;
    +}
    +
    diff --git a/vendor/k8s.io/api/networking/v1/register.go b/vendor/k8s.io/api/networking/v1/register.go
    index a200d5437..b9bdcb78c 100644
    --- a/vendor/k8s.io/api/networking/v1/register.go
    +++ b/vendor/k8s.io/api/networking/v1/register.go
    @@ -50,6 +50,10 @@ func addKnownTypes(scheme *runtime.Scheme) error {
     		&IngressClassList{},
     		&NetworkPolicy{},
     		&NetworkPolicyList{},
    +		&IPAddress{},
    +		&IPAddressList{},
    +		&ServiceCIDR{},
    +		&ServiceCIDRList{},
     	)
     
     	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
    diff --git a/vendor/k8s.io/api/networking/v1/types.go b/vendor/k8s.io/api/networking/v1/types.go
    index d75e27558..7d9a4fc94 100644
    --- a/vendor/k8s.io/api/networking/v1/types.go
    +++ b/vendor/k8s.io/api/networking/v1/types.go
    @@ -60,11 +60,12 @@ const (
     // NetworkPolicySpec provides the specification of a NetworkPolicy
     type NetworkPolicySpec struct {
     	// podSelector selects the pods to which this NetworkPolicy object applies.
    -	// The array of ingress rules is applied to any pods selected by this field.
    +	// The array of rules is applied to any pods selected by this field. An empty
    +	// selector matches all pods in the policy's namespace.
     	// Multiple network policies can select the same set of pods. In this case,
     	// the ingress rules for each are combined additively.
    -	// This field is NOT optional and follows standard label selector semantics.
    -	// An empty podSelector matches all pods in this namespace.
    +	// This field is optional. If it is not specified, it defaults to an empty selector.
    +	// +optional
     	PodSelector metav1.LabelSelector `json:"podSelector" protobuf:"bytes,1,opt,name=podSelector"`
     
     	// ingress is a list of ingress rules to be applied to the selected pods.
    @@ -635,3 +636,133 @@ type IngressClassList struct {
     	// items is the list of IngressClasses.
     	Items []IngressClass `json:"items" protobuf:"bytes,2,rep,name=items"`
     }
    +
    +// +genclient
    +// +genclient:nonNamespaced
    +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.33
    +
    +// IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs
    +// that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses.
    +// An IP address can be represented in different formats, to guarantee the uniqueness of the IP,
    +// the name of the object is the IP address in canonical format, four decimal digits separated
    +// by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6.
    +// Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1
    +// Invalid: 10.01.2.3 or 2001:db8:0:0:0::1
    +type IPAddress struct {
    +	metav1.TypeMeta `json:",inline"`
    +	// Standard object's metadata.
    +	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    +	// +optional
    +	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    +	// spec is the desired state of the IPAddress.
    +	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    +	// +optional
    +	Spec IPAddressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
    +}
    +
    +// IPAddressSpec describe the attributes in an IP Address.
    +type IPAddressSpec struct {
    +	// ParentRef references the resource that an IPAddress is attached to.
    +	// An IPAddress must reference a parent object.
    +	// +required
    +	ParentRef *ParentReference `json:"parentRef,omitempty" protobuf:"bytes,1,opt,name=parentRef"`
    +}
    +
    +// ParentReference describes a reference to a parent object.
    +type ParentReference struct {
    +	// Group is the group of the object being referenced.
    +	// +optional
    +	Group string `json:"group,omitempty" protobuf:"bytes,1,opt,name=group"`
    +	// Resource is the resource of the object being referenced.
    +	// +required
    +	Resource string `json:"resource,omitempty" protobuf:"bytes,2,opt,name=resource"`
    +	// Namespace is the namespace of the object being referenced.
    +	// +optional
    +	Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"`
    +	// Name is the name of the object being referenced.
    +	// +required
    +	Name string `json:"name,omitempty" protobuf:"bytes,4,opt,name=name"`
    +}
    +
    +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.33
    +
    +// IPAddressList contains a list of IPAddress.
    +type IPAddressList struct {
    +	metav1.TypeMeta `json:",inline"`
    +	// Standard object's metadata.
    +	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    +	// +optional
    +	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    +	// items is the list of IPAddresses.
    +	Items []IPAddress `json:"items" protobuf:"bytes,2,rep,name=items"`
    +}
    +
    +// +genclient
    +// +genclient:nonNamespaced
    +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.33
    +
    +// ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64).
    +// This range is used to allocate ClusterIPs to Service objects.
    +type ServiceCIDR struct {
    +	metav1.TypeMeta `json:",inline"`
    +	// Standard object's metadata.
    +	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    +	// +optional
    +	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    +	// spec is the desired state of the ServiceCIDR.
    +	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    +	// +optional
    +	Spec ServiceCIDRSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
    +	// status represents the current state of the ServiceCIDR.
    +	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    +	// +optional
    +	Status ServiceCIDRStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
    +}
    +
    +// ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.
    +type ServiceCIDRSpec struct {
    +	// CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64")
    +	// from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family.
    +	// This field is immutable.
    +	// +optional
    +	// +listType=atomic
    +	CIDRs []string `json:"cidrs,omitempty" protobuf:"bytes,1,opt,name=cidrs"`
    +}
    +
    +const (
    +	// ServiceCIDRConditionReady represents status of a ServiceCIDR that is ready to be used by the
    +	// apiserver to allocate ClusterIPs for Services.
    +	ServiceCIDRConditionReady = "Ready"
    +	// ServiceCIDRReasonTerminating represents a reason where a ServiceCIDR is not ready because it is
    +	// being deleted.
    +	ServiceCIDRReasonTerminating = "Terminating"
    +)
    +
    +// ServiceCIDRStatus describes the current state of the ServiceCIDR.
    +type ServiceCIDRStatus struct {
    +	// conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR.
    +	// Current service state
    +	// +optional
    +	// +patchMergeKey=type
    +	// +patchStrategy=merge
    +	// +listType=map
    +	// +listMapKey=type
    +	Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
    +}
    +
    +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.33
    +
    +// ServiceCIDRList contains a list of ServiceCIDR objects.
    +type ServiceCIDRList struct {
    +	metav1.TypeMeta `json:",inline"`
    +	// Standard object's metadata.
    +	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    +	// +optional
    +	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    +	// items is the list of ServiceCIDRs.
    +	Items []ServiceCIDR `json:"items" protobuf:"bytes,2,rep,name=items"`
    +}
    diff --git a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go
    index ff080540d..6210bb7a5 100644
    --- a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go
    +++ b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go
    @@ -47,6 +47,35 @@ func (HTTPIngressRuleValue) SwaggerDoc() map[string]string {
     	return map_HTTPIngressRuleValue
     }
     
    +var map_IPAddress = map[string]string{
    +	"":         "IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. An IP address can be represented in different formats, to guarantee the uniqueness of the IP, the name of the object is the IP address in canonical format, four decimal digits separated by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 Invalid: 10.01.2.3 or 2001:db8:0:0:0::1",
    +	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
    +	"spec":     "spec is the desired state of the IPAddress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
    +}
    +
    +func (IPAddress) SwaggerDoc() map[string]string {
    +	return map_IPAddress
    +}
    +
    +var map_IPAddressList = map[string]string{
    +	"":         "IPAddressList contains a list of IPAddress.",
    +	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
    +	"items":    "items is the list of IPAddresses.",
    +}
    +
    +func (IPAddressList) SwaggerDoc() map[string]string {
    +	return map_IPAddressList
    +}
    +
    +var map_IPAddressSpec = map[string]string{
    +	"":          "IPAddressSpec describe the attributes in an IP Address.",
    +	"parentRef": "ParentRef references the resource that an IPAddress is attached to. An IPAddress must reference a parent object.",
    +}
    +
    +func (IPAddressSpec) SwaggerDoc() map[string]string {
    +	return map_IPAddressSpec
    +}
    +
     var map_IPBlock = map[string]string{
     	"":       "IPBlock describes a particular CIDR (Ex. \"192.168.1.0/24\",\"2001:db8::/64\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.",
     	"cidr":   "cidr is a string representing the IPBlock Valid examples are \"192.168.1.0/24\" or \"2001:db8::/64\"",
    @@ -284,7 +313,7 @@ func (NetworkPolicyPort) SwaggerDoc() map[string]string {
     
     var map_NetworkPolicySpec = map[string]string{
     	"":            "NetworkPolicySpec provides the specification of a NetworkPolicy",
    -	"podSelector": "podSelector selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace.",
    +	"podSelector": "podSelector selects the pods to which this NetworkPolicy object applies. The array of rules is applied to any pods selected by this field. An empty selector matches all pods in the policy's namespace. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is optional. If it is not specified, it defaults to an empty selector.",
     	"ingress":     "ingress is a list of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)",
     	"egress":      "egress is a list of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8",
     	"policyTypes": "policyTypes is a list of rule types that the NetworkPolicy relates to. Valid options are [\"Ingress\"], [\"Egress\"], or [\"Ingress\", \"Egress\"]. If this field is not specified, it will default based on the existence of ingress or egress rules; policies that contain an egress section are assumed to affect egress, and all policies (whether or not they contain an ingress section) are assumed to affect ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8",
    @@ -294,6 +323,18 @@ func (NetworkPolicySpec) SwaggerDoc() map[string]string {
     	return map_NetworkPolicySpec
     }
     
    +var map_ParentReference = map[string]string{
    +	"":          "ParentReference describes a reference to a parent object.",
    +	"group":     "Group is the group of the object being referenced.",
    +	"resource":  "Resource is the resource of the object being referenced.",
    +	"namespace": "Namespace is the namespace of the object being referenced.",
    +	"name":      "Name is the name of the object being referenced.",
    +}
    +
    +func (ParentReference) SwaggerDoc() map[string]string {
    +	return map_ParentReference
    +}
    +
     var map_ServiceBackendPort = map[string]string{
     	"":       "ServiceBackendPort is the service port being referenced.",
     	"name":   "name is the name of the port on the Service. This is a mutually exclusive setting with \"Number\".",
    @@ -304,4 +345,43 @@ func (ServiceBackendPort) SwaggerDoc() map[string]string {
     	return map_ServiceBackendPort
     }
     
    +var map_ServiceCIDR = map[string]string{
    +	"":         "ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). This range is used to allocate ClusterIPs to Service objects.",
    +	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
    +	"spec":     "spec is the desired state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
    +	"status":   "status represents the current state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
    +}
    +
    +func (ServiceCIDR) SwaggerDoc() map[string]string {
    +	return map_ServiceCIDR
    +}
    +
    +var map_ServiceCIDRList = map[string]string{
    +	"":         "ServiceCIDRList contains a list of ServiceCIDR objects.",
    +	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
    +	"items":    "items is the list of ServiceCIDRs.",
    +}
    +
    +func (ServiceCIDRList) SwaggerDoc() map[string]string {
    +	return map_ServiceCIDRList
    +}
    +
    +var map_ServiceCIDRSpec = map[string]string{
    +	"":      "ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.",
    +	"cidrs": "CIDRs defines the IP blocks in CIDR notation (e.g. \"192.168.0.0/24\" or \"2001:db8::/64\") from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. This field is immutable.",
    +}
    +
    +func (ServiceCIDRSpec) SwaggerDoc() map[string]string {
    +	return map_ServiceCIDRSpec
    +}
    +
    +var map_ServiceCIDRStatus = map[string]string{
    +	"":           "ServiceCIDRStatus describes the current state of the ServiceCIDR.",
    +	"conditions": "conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. Current service state",
    +}
    +
    +func (ServiceCIDRStatus) SwaggerDoc() map[string]string {
    +	return map_ServiceCIDRStatus
    +}
    +
     // AUTO-GENERATED FUNCTIONS END HERE
    diff --git a/vendor/k8s.io/api/networking/v1alpha1/well_known_labels.go b/vendor/k8s.io/api/networking/v1/well_known_labels.go
    similarity index 98%
    rename from vendor/k8s.io/api/networking/v1alpha1/well_known_labels.go
    rename to vendor/k8s.io/api/networking/v1/well_known_labels.go
    index 5f9c23f70..28e2e8f3f 100644
    --- a/vendor/k8s.io/api/networking/v1alpha1/well_known_labels.go
    +++ b/vendor/k8s.io/api/networking/v1/well_known_labels.go
    @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
     limitations under the License.
     */
     
    -package v1alpha1
    +package v1
     
     const (
     
    diff --git a/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go
    index 540873833..9ce6435a4 100644
    --- a/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go
    +++ b/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go
    @@ -73,6 +73,87 @@ func (in *HTTPIngressRuleValue) DeepCopy() *HTTPIngressRuleValue {
     	return out
     }
     
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *IPAddress) DeepCopyInto(out *IPAddress) {
    +	*out = *in
    +	out.TypeMeta = in.TypeMeta
    +	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
    +	in.Spec.DeepCopyInto(&out.Spec)
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddress.
    +func (in *IPAddress) DeepCopy() *IPAddress {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(IPAddress)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    +func (in *IPAddress) DeepCopyObject() runtime.Object {
    +	if c := in.DeepCopy(); c != nil {
    +		return c
    +	}
    +	return nil
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *IPAddressList) DeepCopyInto(out *IPAddressList) {
    +	*out = *in
    +	out.TypeMeta = in.TypeMeta
    +	in.ListMeta.DeepCopyInto(&out.ListMeta)
    +	if in.Items != nil {
    +		in, out := &in.Items, &out.Items
    +		*out = make([]IPAddress, len(*in))
    +		for i := range *in {
    +			(*in)[i].DeepCopyInto(&(*out)[i])
    +		}
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressList.
    +func (in *IPAddressList) DeepCopy() *IPAddressList {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(IPAddressList)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    +func (in *IPAddressList) DeepCopyObject() runtime.Object {
    +	if c := in.DeepCopy(); c != nil {
    +		return c
    +	}
    +	return nil
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *IPAddressSpec) DeepCopyInto(out *IPAddressSpec) {
    +	*out = *in
    +	if in.ParentRef != nil {
    +		in, out := &in.ParentRef, &out.ParentRef
    +		*out = new(ParentReference)
    +		**out = **in
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressSpec.
    +func (in *IPAddressSpec) DeepCopy() *IPAddressSpec {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(IPAddressSpec)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
     // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
     func (in *IPBlock) DeepCopyInto(out *IPBlock) {
     	*out = *in
    @@ -711,6 +792,22 @@ func (in *NetworkPolicySpec) DeepCopy() *NetworkPolicySpec {
     	return out
     }
     
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *ParentReference) DeepCopyInto(out *ParentReference) {
    +	*out = *in
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParentReference.
    +func (in *ParentReference) DeepCopy() *ParentReference {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(ParentReference)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
     // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
     func (in *ServiceBackendPort) DeepCopyInto(out *ServiceBackendPort) {
     	*out = *in
    @@ -726,3 +823,108 @@ func (in *ServiceBackendPort) DeepCopy() *ServiceBackendPort {
     	in.DeepCopyInto(out)
     	return out
     }
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *ServiceCIDR) DeepCopyInto(out *ServiceCIDR) {
    +	*out = *in
    +	out.TypeMeta = in.TypeMeta
    +	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
    +	in.Spec.DeepCopyInto(&out.Spec)
    +	in.Status.DeepCopyInto(&out.Status)
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDR.
    +func (in *ServiceCIDR) DeepCopy() *ServiceCIDR {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(ServiceCIDR)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    +func (in *ServiceCIDR) DeepCopyObject() runtime.Object {
    +	if c := in.DeepCopy(); c != nil {
    +		return c
    +	}
    +	return nil
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *ServiceCIDRList) DeepCopyInto(out *ServiceCIDRList) {
    +	*out = *in
    +	out.TypeMeta = in.TypeMeta
    +	in.ListMeta.DeepCopyInto(&out.ListMeta)
    +	if in.Items != nil {
    +		in, out := &in.Items, &out.Items
    +		*out = make([]ServiceCIDR, len(*in))
    +		for i := range *in {
    +			(*in)[i].DeepCopyInto(&(*out)[i])
    +		}
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRList.
    +func (in *ServiceCIDRList) DeepCopy() *ServiceCIDRList {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(ServiceCIDRList)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    +func (in *ServiceCIDRList) DeepCopyObject() runtime.Object {
    +	if c := in.DeepCopy(); c != nil {
    +		return c
    +	}
    +	return nil
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *ServiceCIDRSpec) DeepCopyInto(out *ServiceCIDRSpec) {
    +	*out = *in
    +	if in.CIDRs != nil {
    +		in, out := &in.CIDRs, &out.CIDRs
    +		*out = make([]string, len(*in))
    +		copy(*out, *in)
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRSpec.
    +func (in *ServiceCIDRSpec) DeepCopy() *ServiceCIDRSpec {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(ServiceCIDRSpec)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *ServiceCIDRStatus) DeepCopyInto(out *ServiceCIDRStatus) {
    +	*out = *in
    +	if in.Conditions != nil {
    +		in, out := &in.Conditions, &out.Conditions
    +		*out = make([]metav1.Condition, len(*in))
    +		for i := range *in {
    +			(*in)[i].DeepCopyInto(&(*out)[i])
    +		}
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRStatus.
    +func (in *ServiceCIDRStatus) DeepCopy() *ServiceCIDRStatus {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(ServiceCIDRStatus)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    diff --git a/vendor/k8s.io/api/networking/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/networking/v1/zz_generated.prerelease-lifecycle.go
    index 21e8c671a..6894d8c53 100644
    --- a/vendor/k8s.io/api/networking/v1/zz_generated.prerelease-lifecycle.go
    +++ b/vendor/k8s.io/api/networking/v1/zz_generated.prerelease-lifecycle.go
    @@ -21,6 +21,18 @@ limitations under the License.
     
     package v1
     
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *IPAddress) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 33
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *IPAddressList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 33
    +}
    +
     // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
     // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
     func (in *Ingress) APILifecycleIntroduced() (major, minor int) {
    @@ -56,3 +68,15 @@ func (in *NetworkPolicy) APILifecycleIntroduced() (major, minor int) {
     func (in *NetworkPolicyList) APILifecycleIntroduced() (major, minor int) {
     	return 1, 19
     }
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *ServiceCIDR) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 33
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *ServiceCIDRList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 33
    +}
    diff --git a/vendor/k8s.io/api/networking/v1alpha1/doc.go b/vendor/k8s.io/api/networking/v1alpha1/doc.go
    deleted file mode 100644
    index 3827b0418..000000000
    --- a/vendor/k8s.io/api/networking/v1alpha1/doc.go
    +++ /dev/null
    @@ -1,23 +0,0 @@
    -/*
    -Copyright 2022 The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// +k8s:deepcopy-gen=package
    -// +k8s:protobuf-gen=package
    -// +k8s:openapi-gen=true
    -// +k8s:prerelease-lifecycle-gen=true
    -// +groupName=networking.k8s.io
    -
    -package v1alpha1 // import "k8s.io/api/networking/v1alpha1"
    diff --git a/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go b/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go
    deleted file mode 100644
    index 0d4203483..000000000
    --- a/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go
    +++ /dev/null
    @@ -1,1929 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by protoc-gen-gogo. DO NOT EDIT.
    -// source: k8s.io/api/networking/v1alpha1/generated.proto
    -
    -package v1alpha1
    -
    -import (
    -	fmt "fmt"
    -
    -	io "io"
    -
    -	proto "github.com/gogo/protobuf/proto"
    -	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    -
    -	math "math"
    -	math_bits "math/bits"
    -	reflect "reflect"
    -	strings "strings"
    -)
    -
    -// Reference imports to suppress errors if they are not otherwise used.
    -var _ = proto.Marshal
    -var _ = fmt.Errorf
    -var _ = math.Inf
    -
    -// This is a compile-time assertion to ensure that this generated file
    -// is compatible with the proto package it is being compiled against.
    -// A compilation error at this line likely means your copy of the
    -// proto package needs to be updated.
    -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
    -
    -func (m *IPAddress) Reset()      { *m = IPAddress{} }
    -func (*IPAddress) ProtoMessage() {}
    -func (*IPAddress) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_c1cb39e7b48ce50d, []int{0}
    -}
    -func (m *IPAddress) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *IPAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *IPAddress) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_IPAddress.Merge(m, src)
    -}
    -func (m *IPAddress) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *IPAddress) XXX_DiscardUnknown() {
    -	xxx_messageInfo_IPAddress.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_IPAddress proto.InternalMessageInfo
    -
    -func (m *IPAddressList) Reset()      { *m = IPAddressList{} }
    -func (*IPAddressList) ProtoMessage() {}
    -func (*IPAddressList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_c1cb39e7b48ce50d, []int{1}
    -}
    -func (m *IPAddressList) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *IPAddressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *IPAddressList) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_IPAddressList.Merge(m, src)
    -}
    -func (m *IPAddressList) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *IPAddressList) XXX_DiscardUnknown() {
    -	xxx_messageInfo_IPAddressList.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_IPAddressList proto.InternalMessageInfo
    -
    -func (m *IPAddressSpec) Reset()      { *m = IPAddressSpec{} }
    -func (*IPAddressSpec) ProtoMessage() {}
    -func (*IPAddressSpec) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_c1cb39e7b48ce50d, []int{2}
    -}
    -func (m *IPAddressSpec) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *IPAddressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *IPAddressSpec) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_IPAddressSpec.Merge(m, src)
    -}
    -func (m *IPAddressSpec) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *IPAddressSpec) XXX_DiscardUnknown() {
    -	xxx_messageInfo_IPAddressSpec.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_IPAddressSpec proto.InternalMessageInfo
    -
    -func (m *ParentReference) Reset()      { *m = ParentReference{} }
    -func (*ParentReference) ProtoMessage() {}
    -func (*ParentReference) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_c1cb39e7b48ce50d, []int{3}
    -}
    -func (m *ParentReference) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *ParentReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *ParentReference) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_ParentReference.Merge(m, src)
    -}
    -func (m *ParentReference) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *ParentReference) XXX_DiscardUnknown() {
    -	xxx_messageInfo_ParentReference.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_ParentReference proto.InternalMessageInfo
    -
    -func (m *ServiceCIDR) Reset()      { *m = ServiceCIDR{} }
    -func (*ServiceCIDR) ProtoMessage() {}
    -func (*ServiceCIDR) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_c1cb39e7b48ce50d, []int{4}
    -}
    -func (m *ServiceCIDR) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *ServiceCIDR) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *ServiceCIDR) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_ServiceCIDR.Merge(m, src)
    -}
    -func (m *ServiceCIDR) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *ServiceCIDR) XXX_DiscardUnknown() {
    -	xxx_messageInfo_ServiceCIDR.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_ServiceCIDR proto.InternalMessageInfo
    -
    -func (m *ServiceCIDRList) Reset()      { *m = ServiceCIDRList{} }
    -func (*ServiceCIDRList) ProtoMessage() {}
    -func (*ServiceCIDRList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_c1cb39e7b48ce50d, []int{5}
    -}
    -func (m *ServiceCIDRList) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *ServiceCIDRList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *ServiceCIDRList) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_ServiceCIDRList.Merge(m, src)
    -}
    -func (m *ServiceCIDRList) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *ServiceCIDRList) XXX_DiscardUnknown() {
    -	xxx_messageInfo_ServiceCIDRList.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_ServiceCIDRList proto.InternalMessageInfo
    -
    -func (m *ServiceCIDRSpec) Reset()      { *m = ServiceCIDRSpec{} }
    -func (*ServiceCIDRSpec) ProtoMessage() {}
    -func (*ServiceCIDRSpec) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_c1cb39e7b48ce50d, []int{6}
    -}
    -func (m *ServiceCIDRSpec) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *ServiceCIDRSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *ServiceCIDRSpec) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_ServiceCIDRSpec.Merge(m, src)
    -}
    -func (m *ServiceCIDRSpec) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *ServiceCIDRSpec) XXX_DiscardUnknown() {
    -	xxx_messageInfo_ServiceCIDRSpec.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_ServiceCIDRSpec proto.InternalMessageInfo
    -
    -func (m *ServiceCIDRStatus) Reset()      { *m = ServiceCIDRStatus{} }
    -func (*ServiceCIDRStatus) ProtoMessage() {}
    -func (*ServiceCIDRStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_c1cb39e7b48ce50d, []int{7}
    -}
    -func (m *ServiceCIDRStatus) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *ServiceCIDRStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *ServiceCIDRStatus) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_ServiceCIDRStatus.Merge(m, src)
    -}
    -func (m *ServiceCIDRStatus) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *ServiceCIDRStatus) XXX_DiscardUnknown() {
    -	xxx_messageInfo_ServiceCIDRStatus.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_ServiceCIDRStatus proto.InternalMessageInfo
    -
    -func init() {
    -	proto.RegisterType((*IPAddress)(nil), "k8s.io.api.networking.v1alpha1.IPAddress")
    -	proto.RegisterType((*IPAddressList)(nil), "k8s.io.api.networking.v1alpha1.IPAddressList")
    -	proto.RegisterType((*IPAddressSpec)(nil), "k8s.io.api.networking.v1alpha1.IPAddressSpec")
    -	proto.RegisterType((*ParentReference)(nil), "k8s.io.api.networking.v1alpha1.ParentReference")
    -	proto.RegisterType((*ServiceCIDR)(nil), "k8s.io.api.networking.v1alpha1.ServiceCIDR")
    -	proto.RegisterType((*ServiceCIDRList)(nil), "k8s.io.api.networking.v1alpha1.ServiceCIDRList")
    -	proto.RegisterType((*ServiceCIDRSpec)(nil), "k8s.io.api.networking.v1alpha1.ServiceCIDRSpec")
    -	proto.RegisterType((*ServiceCIDRStatus)(nil), "k8s.io.api.networking.v1alpha1.ServiceCIDRStatus")
    -}
    -
    -func init() {
    -	proto.RegisterFile("k8s.io/api/networking/v1alpha1/generated.proto", fileDescriptor_c1cb39e7b48ce50d)
    -}
    -
    -var fileDescriptor_c1cb39e7b48ce50d = []byte{
    -	// 634 bytes of a gzipped FileDescriptorProto
    -	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcd, 0x6e, 0xd3, 0x4a,
    -	0x18, 0x8d, 0xdb, 0xa4, 0xaa, 0x27, 0xb7, 0xb7, 0xb7, 0x5e, 0x45, 0x5d, 0x38, 0x91, 0xef, 0xa6,
    -	0x08, 0x3a, 0x26, 0x11, 0x42, 0x6c, 0x71, 0x2b, 0xa1, 0x4a, 0xd0, 0x96, 0xe9, 0x0a, 0xd4, 0x05,
    -	0xd3, 0xc9, 0x57, 0x67, 0x08, 0xfe, 0xd1, 0xcc, 0x24, 0xc0, 0x8e, 0x47, 0xe0, 0x05, 0x78, 0x0e,
    -	0x56, 0x20, 0xb1, 0xeb, 0xb2, 0xcb, 0xae, 0x2a, 0x6a, 0x5e, 0x04, 0xcd, 0xd8, 0xb1, 0x93, 0x46,
    -	0xfd, 0xdb, 0x74, 0xe7, 0xef, 0xcc, 0x39, 0x67, 0xbe, 0xf3, 0xcd, 0x8c, 0x8c, 0xf0, 0xf0, 0x99,
    -	0xc4, 0x3c, 0xf1, 0x69, 0xca, 0xfd, 0x18, 0xd4, 0xc7, 0x44, 0x0c, 0x79, 0x1c, 0xfa, 0xe3, 0x2e,
    -	0xfd, 0x90, 0x0e, 0x68, 0xd7, 0x0f, 0x21, 0x06, 0x41, 0x15, 0xf4, 0x71, 0x2a, 0x12, 0x95, 0x38,
    -	0x6e, 0xce, 0xc7, 0x34, 0xe5, 0xb8, 0xe2, 0xe3, 0x09, 0x7f, 0x7d, 0x33, 0xe4, 0x6a, 0x30, 0x3a,
    -	0xc2, 0x2c, 0x89, 0xfc, 0x30, 0x09, 0x13, 0xdf, 0xc8, 0x8e, 0x46, 0xc7, 0xa6, 0x32, 0x85, 0xf9,
    -	0xca, 0xed, 0xd6, 0x9f, 0x54, 0xdb, 0x47, 0x94, 0x0d, 0x78, 0x0c, 0xe2, 0xb3, 0x9f, 0x0e, 0x43,
    -	0x0d, 0x48, 0x3f, 0x02, 0x45, 0xfd, 0xf1, 0x5c, 0x13, 0xeb, 0xfe, 0x55, 0x2a, 0x31, 0x8a, 0x15,
    -	0x8f, 0x60, 0x4e, 0xf0, 0xf4, 0x26, 0x81, 0x64, 0x03, 0x88, 0xe8, 0x65, 0x9d, 0xf7, 0xd3, 0x42,
    -	0xf6, 0xce, 0xfe, 0xf3, 0x7e, 0x5f, 0x80, 0x94, 0xce, 0x3b, 0xb4, 0xac, 0x3b, 0xea, 0x53, 0x45,
    -	0x5b, 0x56, 0xc7, 0xda, 0x68, 0xf6, 0x1e, 0xe3, 0x6a, 0x1c, 0xa5, 0x31, 0x4e, 0x87, 0xa1, 0x06,
    -	0x24, 0xd6, 0x6c, 0x3c, 0xee, 0xe2, 0xbd, 0xa3, 0xf7, 0xc0, 0xd4, 0x2b, 0x50, 0x34, 0x70, 0x4e,
    -	0xce, 0xdb, 0xb5, 0xec, 0xbc, 0x8d, 0x2a, 0x8c, 0x94, 0xae, 0xce, 0x1e, 0xaa, 0xcb, 0x14, 0x58,
    -	0x6b, 0xc1, 0xb8, 0x6f, 0xe2, 0xeb, 0x87, 0x8d, 0xcb, 0xd6, 0x0e, 0x52, 0x60, 0xc1, 0x3f, 0x85,
    -	0x75, 0x5d, 0x57, 0xc4, 0x18, 0x79, 0x3f, 0x2c, 0xb4, 0x52, 0xb2, 0x5e, 0x72, 0xa9, 0x9c, 0xc3,
    -	0xb9, 0x10, 0xf8, 0x76, 0x21, 0xb4, 0xda, 0x44, 0xf8, 0xaf, 0xd8, 0x67, 0x79, 0x82, 0x4c, 0x05,
    -	0xd8, 0x45, 0x0d, 0xae, 0x20, 0x92, 0xad, 0x85, 0xce, 0xe2, 0x46, 0xb3, 0xf7, 0xe0, 0xd6, 0x09,
    -	0x82, 0x95, 0xc2, 0xb5, 0xb1, 0xa3, 0xf5, 0x24, 0xb7, 0xf1, 0xa2, 0xa9, 0xf6, 0x75, 0x2c, 0xe7,
    -	0x10, 0xd9, 0x29, 0x15, 0x10, 0x2b, 0x02, 0xc7, 0x45, 0xff, 0xfe, 0x4d, 0x9b, 0xec, 0x4f, 0x04,
    -	0x20, 0x20, 0x66, 0x10, 0xac, 0x64, 0xe7, 0x6d, 0xbb, 0x04, 0x49, 0x65, 0xe8, 0x7d, 0xb7, 0xd0,
    -	0xea, 0x25, 0xb6, 0xf3, 0x3f, 0x6a, 0x84, 0x22, 0x19, 0xa5, 0x66, 0x37, 0xbb, 0xea, 0xf3, 0x85,
    -	0x06, 0x49, 0xbe, 0xe6, 0x3c, 0x42, 0xcb, 0x02, 0x64, 0x32, 0x12, 0x0c, 0xcc, 0xe1, 0xd9, 0xd5,
    -	0x94, 0x48, 0x81, 0x93, 0x92, 0xe1, 0xf8, 0xc8, 0x8e, 0x69, 0x04, 0x32, 0xa5, 0x0c, 0x5a, 0x8b,
    -	0x86, 0xbe, 0x56, 0xd0, 0xed, 0xdd, 0xc9, 0x02, 0xa9, 0x38, 0x4e, 0x07, 0xd5, 0x75, 0xd1, 0xaa,
    -	0x1b, 0x6e, 0x79, 0xd0, 0x9a, 0x4b, 0xcc, 0x8a, 0xf7, 0x6d, 0x01, 0x35, 0x0f, 0x40, 0x8c, 0x39,
    -	0x83, 0xad, 0x9d, 0x6d, 0x72, 0x0f, 0x77, 0xf5, 0xf5, 0xcc, 0x5d, 0xbd, 0xf1, 0x10, 0xa6, 0x9a,
    -	0xbb, 0xea, 0xb6, 0x3a, 0x6f, 0xd0, 0x92, 0x54, 0x54, 0x8d, 0xa4, 0x19, 0x4a, 0xb3, 0xd7, 0xbd,
    -	0x8b, 0xa9, 0x11, 0x06, 0xff, 0x16, 0xb6, 0x4b, 0x79, 0x4d, 0x0a, 0x43, 0xef, 0x97, 0x85, 0x56,
    -	0xa7, 0xd8, 0xf7, 0xf0, 0x14, 0xf6, 0x67, 0x9f, 0xc2, 0xc3, 0x3b, 0x64, 0xb9, 0xe2, 0x31, 0xf4,
    -	0x66, 0x22, 0x98, 0xe7, 0xd0, 0x46, 0x0d, 0xc6, 0xfb, 0x42, 0xb6, 0xac, 0xce, 0xe2, 0x86, 0x1d,
    -	0xd8, 0x5a, 0xa3, 0x17, 0x25, 0xc9, 0x71, 0xef, 0x13, 0x5a, 0x9b, 0x1b, 0x92, 0xc3, 0x10, 0x62,
    -	0x49, 0xdc, 0xe7, 0x8a, 0x27, 0x71, 0x2e, 0x9d, 0x3d, 0xc0, 0x6b, 0xa2, 0x6f, 0x4d, 0x74, 0xd5,
    -	0xed, 0x28, 0x21, 0x49, 0xa6, 0x6c, 0x83, 0xed, 0x93, 0x0b, 0xb7, 0x76, 0x7a, 0xe1, 0xd6, 0xce,
    -	0x2e, 0xdc, 0xda, 0x97, 0xcc, 0xb5, 0x4e, 0x32, 0xd7, 0x3a, 0xcd, 0x5c, 0xeb, 0x2c, 0x73, 0xad,
    -	0xdf, 0x99, 0x6b, 0x7d, 0xfd, 0xe3, 0xd6, 0xde, 0xba, 0xd7, 0xff, 0x7f, 0xfe, 0x06, 0x00, 0x00,
    -	0xff, 0xff, 0xb1, 0xd0, 0x33, 0x02, 0xa0, 0x06, 0x00, 0x00,
    -}
    -
    -func (m *IPAddress) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *IPAddress) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *IPAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	{
    -		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    -		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
    -	}
    -	i--
    -	dAtA[i] = 0x12
    -	{
    -		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    -		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
    -	}
    -	i--
    -	dAtA[i] = 0xa
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *IPAddressList) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *IPAddressList) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *IPAddressList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	if len(m.Items) > 0 {
    -		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
    -			{
    -				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    -				if err != nil {
    -					return 0, err
    -				}
    -				i -= size
    -				i = encodeVarintGenerated(dAtA, i, uint64(size))
    -			}
    -			i--
    -			dAtA[i] = 0x12
    -		}
    -	}
    -	{
    -		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    -		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
    -	}
    -	i--
    -	dAtA[i] = 0xa
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *IPAddressSpec) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *IPAddressSpec) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *IPAddressSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	if m.ParentRef != nil {
    -		{
    -			size, err := m.ParentRef.MarshalToSizedBuffer(dAtA[:i])
    -			if err != nil {
    -				return 0, err
    -			}
    -			i -= size
    -			i = encodeVarintGenerated(dAtA, i, uint64(size))
    -		}
    -		i--
    -		dAtA[i] = 0xa
    -	}
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *ParentReference) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *ParentReference) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *ParentReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	i -= len(m.Name)
    -	copy(dAtA[i:], m.Name)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
    -	i--
    -	dAtA[i] = 0x22
    -	i -= len(m.Namespace)
    -	copy(dAtA[i:], m.Namespace)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
    -	i--
    -	dAtA[i] = 0x1a
    -	i -= len(m.Resource)
    -	copy(dAtA[i:], m.Resource)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource)))
    -	i--
    -	dAtA[i] = 0x12
    -	i -= len(m.Group)
    -	copy(dAtA[i:], m.Group)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group)))
    -	i--
    -	dAtA[i] = 0xa
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *ServiceCIDR) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *ServiceCIDR) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *ServiceCIDR) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	{
    -		size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    -		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
    -	}
    -	i--
    -	dAtA[i] = 0x1a
    -	{
    -		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    -		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
    -	}
    -	i--
    -	dAtA[i] = 0x12
    -	{
    -		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    -		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
    -	}
    -	i--
    -	dAtA[i] = 0xa
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *ServiceCIDRList) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *ServiceCIDRList) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *ServiceCIDRList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	if len(m.Items) > 0 {
    -		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
    -			{
    -				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    -				if err != nil {
    -					return 0, err
    -				}
    -				i -= size
    -				i = encodeVarintGenerated(dAtA, i, uint64(size))
    -			}
    -			i--
    -			dAtA[i] = 0x12
    -		}
    -	}
    -	{
    -		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    -		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
    -	}
    -	i--
    -	dAtA[i] = 0xa
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *ServiceCIDRSpec) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *ServiceCIDRSpec) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *ServiceCIDRSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	if len(m.CIDRs) > 0 {
    -		for iNdEx := len(m.CIDRs) - 1; iNdEx >= 0; iNdEx-- {
    -			i -= len(m.CIDRs[iNdEx])
    -			copy(dAtA[i:], m.CIDRs[iNdEx])
    -			i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDRs[iNdEx])))
    -			i--
    -			dAtA[i] = 0xa
    -		}
    -	}
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *ServiceCIDRStatus) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *ServiceCIDRStatus) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *ServiceCIDRStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	if len(m.Conditions) > 0 {
    -		for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
    -			{
    -				size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    -				if err != nil {
    -					return 0, err
    -				}
    -				i -= size
    -				i = encodeVarintGenerated(dAtA, i, uint64(size))
    -			}
    -			i--
    -			dAtA[i] = 0xa
    -		}
    -	}
    -	return len(dAtA) - i, nil
    -}
    -
    -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
    -	offset -= sovGenerated(v)
    -	base := offset
    -	for v >= 1<<7 {
    -		dAtA[offset] = uint8(v&0x7f | 0x80)
    -		v >>= 7
    -		offset++
    -	}
    -	dAtA[offset] = uint8(v)
    -	return base
    -}
    -func (m *IPAddress) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = m.ObjectMeta.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = m.Spec.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	return n
    -}
    -
    -func (m *IPAddressList) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = m.ListMeta.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	if len(m.Items) > 0 {
    -		for _, e := range m.Items {
    -			l = e.Size()
    -			n += 1 + l + sovGenerated(uint64(l))
    -		}
    -	}
    -	return n
    -}
    -
    -func (m *IPAddressSpec) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	if m.ParentRef != nil {
    -		l = m.ParentRef.Size()
    -		n += 1 + l + sovGenerated(uint64(l))
    -	}
    -	return n
    -}
    -
    -func (m *ParentReference) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = len(m.Group)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = len(m.Resource)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = len(m.Namespace)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = len(m.Name)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	return n
    -}
    -
    -func (m *ServiceCIDR) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = m.ObjectMeta.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = m.Spec.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = m.Status.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	return n
    -}
    -
    -func (m *ServiceCIDRList) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = m.ListMeta.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	if len(m.Items) > 0 {
    -		for _, e := range m.Items {
    -			l = e.Size()
    -			n += 1 + l + sovGenerated(uint64(l))
    -		}
    -	}
    -	return n
    -}
    -
    -func (m *ServiceCIDRSpec) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	if len(m.CIDRs) > 0 {
    -		for _, s := range m.CIDRs {
    -			l = len(s)
    -			n += 1 + l + sovGenerated(uint64(l))
    -		}
    -	}
    -	return n
    -}
    -
    -func (m *ServiceCIDRStatus) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	if len(m.Conditions) > 0 {
    -		for _, e := range m.Conditions {
    -			l = e.Size()
    -			n += 1 + l + sovGenerated(uint64(l))
    -		}
    -	}
    -	return n
    -}
    -
    -func sovGenerated(x uint64) (n int) {
    -	return (math_bits.Len64(x|1) + 6) / 7
    -}
    -func sozGenerated(x uint64) (n int) {
    -	return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
    -}
    -func (this *IPAddress) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	s := strings.Join([]string{`&IPAddress{`,
    -		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
    -		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IPAddressSpec", "IPAddressSpec", 1), `&`, ``, 1) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *IPAddressList) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	repeatedStringForItems := "[]IPAddress{"
    -	for _, f := range this.Items {
    -		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "IPAddress", "IPAddress", 1), `&`, ``, 1) + ","
    -	}
    -	repeatedStringForItems += "}"
    -	s := strings.Join([]string{`&IPAddressList{`,
    -		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
    -		`Items:` + repeatedStringForItems + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *IPAddressSpec) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	s := strings.Join([]string{`&IPAddressSpec{`,
    -		`ParentRef:` + strings.Replace(this.ParentRef.String(), "ParentReference", "ParentReference", 1) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *ParentReference) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	s := strings.Join([]string{`&ParentReference{`,
    -		`Group:` + fmt.Sprintf("%v", this.Group) + `,`,
    -		`Resource:` + fmt.Sprintf("%v", this.Resource) + `,`,
    -		`Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
    -		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *ServiceCIDR) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	s := strings.Join([]string{`&ServiceCIDR{`,
    -		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
    -		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceCIDRSpec", "ServiceCIDRSpec", 1), `&`, ``, 1) + `,`,
    -		`Status:` + strings.Replace(strings.Replace(this.Status.String(), "ServiceCIDRStatus", "ServiceCIDRStatus", 1), `&`, ``, 1) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *ServiceCIDRList) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	repeatedStringForItems := "[]ServiceCIDR{"
    -	for _, f := range this.Items {
    -		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ServiceCIDR", "ServiceCIDR", 1), `&`, ``, 1) + ","
    -	}
    -	repeatedStringForItems += "}"
    -	s := strings.Join([]string{`&ServiceCIDRList{`,
    -		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
    -		`Items:` + repeatedStringForItems + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *ServiceCIDRSpec) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	s := strings.Join([]string{`&ServiceCIDRSpec{`,
    -		`CIDRs:` + fmt.Sprintf("%v", this.CIDRs) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *ServiceCIDRStatus) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	repeatedStringForConditions := "[]Condition{"
    -	for _, f := range this.Conditions {
    -		repeatedStringForConditions += fmt.Sprintf("%v", f) + ","
    -	}
    -	repeatedStringForConditions += "}"
    -	s := strings.Join([]string{`&ServiceCIDRStatus{`,
    -		`Conditions:` + repeatedStringForConditions + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func valueToStringGenerated(v interface{}) string {
    -	rv := reflect.ValueOf(v)
    -	if rv.IsNil() {
    -		return "nil"
    -	}
    -	pv := reflect.Indirect(rv).Interface()
    -	return fmt.Sprintf("*%v", pv)
    -}
    -func (m *IPAddress) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: IPAddress: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: IPAddress: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *IPAddressList) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: IPAddressList: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: IPAddressList: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.Items = append(m.Items, IPAddress{})
    -			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *IPAddressSpec) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: IPAddressSpec: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: IPAddressSpec: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ParentRef", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if m.ParentRef == nil {
    -				m.ParentRef = &ParentReference{}
    -			}
    -			if err := m.ParentRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *ParentReference) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: ParentReference: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: ParentReference: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.Group = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.Resource = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		case 3:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.Namespace = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		case 4:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.Name = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *ServiceCIDR) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: ServiceCIDR: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: ServiceCIDR: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 3:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *ServiceCIDRList) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: ServiceCIDRList: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: ServiceCIDRList: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.Items = append(m.Items, ServiceCIDR{})
    -			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *ServiceCIDRSpec) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: ServiceCIDRSpec: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: ServiceCIDRSpec: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field CIDRs", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.CIDRs = append(m.CIDRs, string(dAtA[iNdEx:postIndex]))
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *ServiceCIDRStatus) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: ServiceCIDRStatus: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: ServiceCIDRStatus: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.Conditions = append(m.Conditions, v1.Condition{})
    -			if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func skipGenerated(dAtA []byte) (n int, err error) {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	depth := 0
    -	for iNdEx < l {
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return 0, ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return 0, io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= (uint64(b) & 0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		wireType := int(wire & 0x7)
    -		switch wireType {
    -		case 0:
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return 0, ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return 0, io.ErrUnexpectedEOF
    -				}
    -				iNdEx++
    -				if dAtA[iNdEx-1] < 0x80 {
    -					break
    -				}
    -			}
    -		case 1:
    -			iNdEx += 8
    -		case 2:
    -			var length int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return 0, ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return 0, io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				length |= (int(b) & 0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if length < 0 {
    -				return 0, ErrInvalidLengthGenerated
    -			}
    -			iNdEx += length
    -		case 3:
    -			depth++
    -		case 4:
    -			if depth == 0 {
    -				return 0, ErrUnexpectedEndOfGroupGenerated
    -			}
    -			depth--
    -		case 5:
    -			iNdEx += 4
    -		default:
    -			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
    -		}
    -		if iNdEx < 0 {
    -			return 0, ErrInvalidLengthGenerated
    -		}
    -		if depth == 0 {
    -			return iNdEx, nil
    -		}
    -	}
    -	return 0, io.ErrUnexpectedEOF
    -}
    -
    -var (
    -	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
    -	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
    -	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
    -)
    diff --git a/vendor/k8s.io/api/networking/v1alpha1/generated.proto b/vendor/k8s.io/api/networking/v1alpha1/generated.proto
    deleted file mode 100644
    index 80ec6af73..000000000
    --- a/vendor/k8s.io/api/networking/v1alpha1/generated.proto
    +++ /dev/null
    @@ -1,142 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -
    -// This file was autogenerated by go-to-protobuf. Do not edit it manually!
    -
    -syntax = "proto2";
    -
    -package k8s.io.api.networking.v1alpha1;
    -
    -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
    -import "k8s.io/apimachinery/pkg/runtime/generated.proto";
    -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
    -
    -// Package-wide variables from generator "generated".
    -option go_package = "k8s.io/api/networking/v1alpha1";
    -
    -// IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs
    -// that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses.
    -// An IP address can be represented in different formats, to guarantee the uniqueness of the IP,
    -// the name of the object is the IP address in canonical format, four decimal digits separated
    -// by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6.
    -// Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1
    -// Invalid: 10.01.2.3 or 2001:db8:0:0:0::1
    -message IPAddress {
    -  // Standard object's metadata.
    -  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    -  // +optional
    -  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    -
    -  // spec is the desired state of the IPAddress.
    -  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    -  // +optional
    -  optional IPAddressSpec spec = 2;
    -}
    -
    -// IPAddressList contains a list of IPAddress.
    -message IPAddressList {
    -  // Standard object's metadata.
    -  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    -  // +optional
    -  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    -
    -  // items is the list of IPAddresses.
    -  repeated IPAddress items = 2;
    -}
    -
    -// IPAddressSpec describe the attributes in an IP Address.
    -message IPAddressSpec {
    -  // ParentRef references the resource that an IPAddress is attached to.
    -  // An IPAddress must reference a parent object.
    -  // +required
    -  optional ParentReference parentRef = 1;
    -}
    -
    -// ParentReference describes a reference to a parent object.
    -message ParentReference {
    -  // Group is the group of the object being referenced.
    -  // +optional
    -  optional string group = 1;
    -
    -  // Resource is the resource of the object being referenced.
    -  // +required
    -  optional string resource = 2;
    -
    -  // Namespace is the namespace of the object being referenced.
    -  // +optional
    -  optional string namespace = 3;
    -
    -  // Name is the name of the object being referenced.
    -  // +required
    -  optional string name = 4;
    -}
    -
    -// ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64).
    -// This range is used to allocate ClusterIPs to Service objects.
    -message ServiceCIDR {
    -  // Standard object's metadata.
    -  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    -  // +optional
    -  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    -
    -  // spec is the desired state of the ServiceCIDR.
    -  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    -  // +optional
    -  optional ServiceCIDRSpec spec = 2;
    -
    -  // status represents the current state of the ServiceCIDR.
    -  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    -  // +optional
    -  optional ServiceCIDRStatus status = 3;
    -}
    -
    -// ServiceCIDRList contains a list of ServiceCIDR objects.
    -message ServiceCIDRList {
    -  // Standard object's metadata.
    -  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    -  // +optional
    -  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    -
    -  // items is the list of ServiceCIDRs.
    -  repeated ServiceCIDR items = 2;
    -}
    -
    -// ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.
    -message ServiceCIDRSpec {
    -  // CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64")
    -  // from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family.
    -  // The network address of each CIDR, the address that identifies the subnet of a host, is reserved
    -  // and will not be allocated. The broadcast address for IPv4 CIDRs is also reserved and will not be
    -  // allocated.
    -  // This field is immutable.
    -  // +optional
    -  // +listType=atomic
    -  repeated string cidrs = 1;
    -}
    -
    -// ServiceCIDRStatus describes the current state of the ServiceCIDR.
    -message ServiceCIDRStatus {
    -  // conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR.
    -  // Current service state
    -  // +optional
    -  // +patchMergeKey=type
    -  // +patchStrategy=merge
    -  // +listType=map
    -  // +listMapKey=type
    -  repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1;
    -}
    -
    diff --git a/vendor/k8s.io/api/networking/v1alpha1/register.go b/vendor/k8s.io/api/networking/v1alpha1/register.go
    deleted file mode 100644
    index c8f5856b5..000000000
    --- a/vendor/k8s.io/api/networking/v1alpha1/register.go
    +++ /dev/null
    @@ -1,62 +0,0 @@
    -/*
    -Copyright 2022 The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -package v1alpha1
    -
    -import (
    -	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    -	"k8s.io/apimachinery/pkg/runtime"
    -	"k8s.io/apimachinery/pkg/runtime/schema"
    -)
    -
    -// GroupName is the group name used in this package.
    -const GroupName = "networking.k8s.io"
    -
    -// SchemeGroupVersion is group version used to register objects in this package.
    -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
    -
    -// Kind takes an unqualified kind and returns a Group qualified GroupKind.
    -func Kind(kind string) schema.GroupKind {
    -	return SchemeGroupVersion.WithKind(kind).GroupKind()
    -}
    -
    -// Resource takes an unqualified resource and returns a Group qualified GroupResource.
    -func Resource(resource string) schema.GroupResource {
    -	return SchemeGroupVersion.WithResource(resource).GroupResource()
    -}
    -
    -var (
    -	// SchemeBuilder holds functions that add things to a scheme.
    -	// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
    -	// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
    -	SchemeBuilder      = runtime.NewSchemeBuilder(addKnownTypes)
    -	localSchemeBuilder = &SchemeBuilder
    -
    -	// AddToScheme adds the types of this group into the given scheme.
    -	AddToScheme = localSchemeBuilder.AddToScheme
    -)
    -
    -// Adds the list of known types to the given scheme.
    -func addKnownTypes(scheme *runtime.Scheme) error {
    -	scheme.AddKnownTypes(SchemeGroupVersion,
    -		&IPAddress{},
    -		&IPAddressList{},
    -		&ServiceCIDR{},
    -		&ServiceCIDRList{},
    -	)
    -	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
    -	return nil
    -}
    diff --git a/vendor/k8s.io/api/networking/v1alpha1/types.go b/vendor/k8s.io/api/networking/v1alpha1/types.go
    deleted file mode 100644
    index 0e454f026..000000000
    --- a/vendor/k8s.io/api/networking/v1alpha1/types.go
    +++ /dev/null
    @@ -1,154 +0,0 @@
    -/*
    -Copyright 2022 The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -package v1alpha1
    -
    -import (
    -	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    -)
    -
    -// +genclient
    -// +genclient:nonNamespaced
    -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    -// +k8s:prerelease-lifecycle-gen:introduced=1.27
    -
    -// IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs
    -// that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses.
    -// An IP address can be represented in different formats, to guarantee the uniqueness of the IP,
    -// the name of the object is the IP address in canonical format, four decimal digits separated
    -// by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6.
    -// Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1
    -// Invalid: 10.01.2.3 or 2001:db8:0:0:0::1
    -type IPAddress struct {
    -	metav1.TypeMeta `json:",inline"`
    -	// Standard object's metadata.
    -	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    -	// +optional
    -	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    -	// spec is the desired state of the IPAddress.
    -	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    -	// +optional
    -	Spec IPAddressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
    -}
    -
    -// IPAddressSpec describe the attributes in an IP Address.
    -type IPAddressSpec struct {
    -	// ParentRef references the resource that an IPAddress is attached to.
    -	// An IPAddress must reference a parent object.
    -	// +required
    -	ParentRef *ParentReference `json:"parentRef,omitempty" protobuf:"bytes,1,opt,name=parentRef"`
    -}
    -
    -// ParentReference describes a reference to a parent object.
    -type ParentReference struct {
    -	// Group is the group of the object being referenced.
    -	// +optional
    -	Group string `json:"group,omitempty" protobuf:"bytes,1,opt,name=group"`
    -	// Resource is the resource of the object being referenced.
    -	// +required
    -	Resource string `json:"resource,omitempty" protobuf:"bytes,2,opt,name=resource"`
    -	// Namespace is the namespace of the object being referenced.
    -	// +optional
    -	Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"`
    -	// Name is the name of the object being referenced.
    -	// +required
    -	Name string `json:"name,omitempty" protobuf:"bytes,4,opt,name=name"`
    -}
    -
    -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    -// +k8s:prerelease-lifecycle-gen:introduced=1.27
    -
    -// IPAddressList contains a list of IPAddress.
    -type IPAddressList struct {
    -	metav1.TypeMeta `json:",inline"`
    -	// Standard object's metadata.
    -	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    -	// +optional
    -	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    -	// items is the list of IPAddresses.
    -	Items []IPAddress `json:"items" protobuf:"bytes,2,rep,name=items"`
    -}
    -
    -// +genclient
    -// +genclient:nonNamespaced
    -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    -// +k8s:prerelease-lifecycle-gen:introduced=1.27
    -
    -// ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64).
    -// This range is used to allocate ClusterIPs to Service objects.
    -type ServiceCIDR struct {
    -	metav1.TypeMeta `json:",inline"`
    -	// Standard object's metadata.
    -	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    -	// +optional
    -	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    -	// spec is the desired state of the ServiceCIDR.
    -	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    -	// +optional
    -	Spec ServiceCIDRSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
    -	// status represents the current state of the ServiceCIDR.
    -	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    -	// +optional
    -	Status ServiceCIDRStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
    -}
    -
    -// ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.
    -type ServiceCIDRSpec struct {
    -	// CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64")
    -	// from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family.
    -	// The network address of each CIDR, the address that identifies the subnet of a host, is reserved
    -	// and will not be allocated. The broadcast address for IPv4 CIDRs is also reserved and will not be
    -	// allocated.
    -	// This field is immutable.
    -	// +optional
    -	// +listType=atomic
    -	CIDRs []string `json:"cidrs,omitempty" protobuf:"bytes,1,opt,name=cidrs"`
    -}
    -
    -const (
    -	// ServiceCIDRConditionReady represents status of a ServiceCIDR that is ready to be used by the
    -	// apiserver to allocate ClusterIPs for Services.
    -	ServiceCIDRConditionReady = "Ready"
    -	// ServiceCIDRReasonTerminating represents a reason where a ServiceCIDR is not ready because it is
    -	// being deleted.
    -	ServiceCIDRReasonTerminating = "Terminating"
    -)
    -
    -// ServiceCIDRStatus describes the current state of the ServiceCIDR.
    -type ServiceCIDRStatus struct {
    -	// conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR.
    -	// Current service state
    -	// +optional
    -	// +patchMergeKey=type
    -	// +patchStrategy=merge
    -	// +listType=map
    -	// +listMapKey=type
    -	Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
    -}
    -
    -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    -// +k8s:prerelease-lifecycle-gen:introduced=1.27
    -
    -// ServiceCIDRList contains a list of ServiceCIDR objects.
    -type ServiceCIDRList struct {
    -	metav1.TypeMeta `json:",inline"`
    -	// Standard object's metadata.
    -	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    -	// +optional
    -	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    -	// items is the list of ServiceCIDRs.
    -	Items []ServiceCIDR `json:"items" protobuf:"bytes,2,rep,name=items"`
    -}
    diff --git a/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go
    deleted file mode 100644
    index 4c8eb57a7..000000000
    --- a/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go
    +++ /dev/null
    @@ -1,110 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -package v1alpha1
    -
    -// This file contains a collection of methods that can be used from go-restful to
    -// generate Swagger API documentation for its models. Please read this PR for more
    -// information on the implementation: https://github.com/emicklei/go-restful/pull/215
    -//
    -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
    -// they are on one line! For multiple line or blocks that you want to ignore use ---.
    -// Any context after a --- is ignored.
    -//
    -// Those methods can be generated by using hack/update-codegen.sh
    -
    -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
    -var map_IPAddress = map[string]string{
    -	"":         "IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. An IP address can be represented in different formats, to guarantee the uniqueness of the IP, the name of the object is the IP address in canonical format, four decimal digits separated by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 Invalid: 10.01.2.3 or 2001:db8:0:0:0::1",
    -	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
    -	"spec":     "spec is the desired state of the IPAddress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
    -}
    -
    -func (IPAddress) SwaggerDoc() map[string]string {
    -	return map_IPAddress
    -}
    -
    -var map_IPAddressList = map[string]string{
    -	"":         "IPAddressList contains a list of IPAddress.",
    -	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
    -	"items":    "items is the list of IPAddresses.",
    -}
    -
    -func (IPAddressList) SwaggerDoc() map[string]string {
    -	return map_IPAddressList
    -}
    -
    -var map_IPAddressSpec = map[string]string{
    -	"":          "IPAddressSpec describe the attributes in an IP Address.",
    -	"parentRef": "ParentRef references the resource that an IPAddress is attached to. An IPAddress must reference a parent object.",
    -}
    -
    -func (IPAddressSpec) SwaggerDoc() map[string]string {
    -	return map_IPAddressSpec
    -}
    -
    -var map_ParentReference = map[string]string{
    -	"":          "ParentReference describes a reference to a parent object.",
    -	"group":     "Group is the group of the object being referenced.",
    -	"resource":  "Resource is the resource of the object being referenced.",
    -	"namespace": "Namespace is the namespace of the object being referenced.",
    -	"name":      "Name is the name of the object being referenced.",
    -}
    -
    -func (ParentReference) SwaggerDoc() map[string]string {
    -	return map_ParentReference
    -}
    -
    -var map_ServiceCIDR = map[string]string{
    -	"":         "ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). This range is used to allocate ClusterIPs to Service objects.",
    -	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
    -	"spec":     "spec is the desired state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
    -	"status":   "status represents the current state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
    -}
    -
    -func (ServiceCIDR) SwaggerDoc() map[string]string {
    -	return map_ServiceCIDR
    -}
    -
    -var map_ServiceCIDRList = map[string]string{
    -	"":         "ServiceCIDRList contains a list of ServiceCIDR objects.",
    -	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
    -	"items":    "items is the list of ServiceCIDRs.",
    -}
    -
    -func (ServiceCIDRList) SwaggerDoc() map[string]string {
    -	return map_ServiceCIDRList
    -}
    -
    -var map_ServiceCIDRSpec = map[string]string{
    -	"":      "ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.",
    -	"cidrs": "CIDRs defines the IP blocks in CIDR notation (e.g. \"192.168.0.0/24\" or \"2001:db8::/64\") from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. The network address of each CIDR, the address that identifies the subnet of a host, is reserved and will not be allocated. The broadcast address for IPv4 CIDRs is also reserved and will not be allocated. This field is immutable.",
    -}
    -
    -func (ServiceCIDRSpec) SwaggerDoc() map[string]string {
    -	return map_ServiceCIDRSpec
    -}
    -
    -var map_ServiceCIDRStatus = map[string]string{
    -	"":           "ServiceCIDRStatus describes the current state of the ServiceCIDR.",
    -	"conditions": "conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. Current service state",
    -}
    -
    -func (ServiceCIDRStatus) SwaggerDoc() map[string]string {
    -	return map_ServiceCIDRStatus
    -}
    -
    -// AUTO-GENERATED FUNCTIONS END HERE
    diff --git a/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go
    deleted file mode 100644
    index 5c8f697ba..000000000
    --- a/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go
    +++ /dev/null
    @@ -1,229 +0,0 @@
    -//go:build !ignore_autogenerated
    -// +build !ignore_autogenerated
    -
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by deepcopy-gen. DO NOT EDIT.
    -
    -package v1alpha1
    -
    -import (
    -	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    -	runtime "k8s.io/apimachinery/pkg/runtime"
    -)
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *IPAddress) DeepCopyInto(out *IPAddress) {
    -	*out = *in
    -	out.TypeMeta = in.TypeMeta
    -	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
    -	in.Spec.DeepCopyInto(&out.Spec)
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddress.
    -func (in *IPAddress) DeepCopy() *IPAddress {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(IPAddress)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    -func (in *IPAddress) DeepCopyObject() runtime.Object {
    -	if c := in.DeepCopy(); c != nil {
    -		return c
    -	}
    -	return nil
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *IPAddressList) DeepCopyInto(out *IPAddressList) {
    -	*out = *in
    -	out.TypeMeta = in.TypeMeta
    -	in.ListMeta.DeepCopyInto(&out.ListMeta)
    -	if in.Items != nil {
    -		in, out := &in.Items, &out.Items
    -		*out = make([]IPAddress, len(*in))
    -		for i := range *in {
    -			(*in)[i].DeepCopyInto(&(*out)[i])
    -		}
    -	}
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressList.
    -func (in *IPAddressList) DeepCopy() *IPAddressList {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(IPAddressList)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    -func (in *IPAddressList) DeepCopyObject() runtime.Object {
    -	if c := in.DeepCopy(); c != nil {
    -		return c
    -	}
    -	return nil
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *IPAddressSpec) DeepCopyInto(out *IPAddressSpec) {
    -	*out = *in
    -	if in.ParentRef != nil {
    -		in, out := &in.ParentRef, &out.ParentRef
    -		*out = new(ParentReference)
    -		**out = **in
    -	}
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressSpec.
    -func (in *IPAddressSpec) DeepCopy() *IPAddressSpec {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(IPAddressSpec)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *ParentReference) DeepCopyInto(out *ParentReference) {
    -	*out = *in
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParentReference.
    -func (in *ParentReference) DeepCopy() *ParentReference {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(ParentReference)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *ServiceCIDR) DeepCopyInto(out *ServiceCIDR) {
    -	*out = *in
    -	out.TypeMeta = in.TypeMeta
    -	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
    -	in.Spec.DeepCopyInto(&out.Spec)
    -	in.Status.DeepCopyInto(&out.Status)
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDR.
    -func (in *ServiceCIDR) DeepCopy() *ServiceCIDR {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(ServiceCIDR)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    -func (in *ServiceCIDR) DeepCopyObject() runtime.Object {
    -	if c := in.DeepCopy(); c != nil {
    -		return c
    -	}
    -	return nil
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *ServiceCIDRList) DeepCopyInto(out *ServiceCIDRList) {
    -	*out = *in
    -	out.TypeMeta = in.TypeMeta
    -	in.ListMeta.DeepCopyInto(&out.ListMeta)
    -	if in.Items != nil {
    -		in, out := &in.Items, &out.Items
    -		*out = make([]ServiceCIDR, len(*in))
    -		for i := range *in {
    -			(*in)[i].DeepCopyInto(&(*out)[i])
    -		}
    -	}
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRList.
    -func (in *ServiceCIDRList) DeepCopy() *ServiceCIDRList {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(ServiceCIDRList)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    -func (in *ServiceCIDRList) DeepCopyObject() runtime.Object {
    -	if c := in.DeepCopy(); c != nil {
    -		return c
    -	}
    -	return nil
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *ServiceCIDRSpec) DeepCopyInto(out *ServiceCIDRSpec) {
    -	*out = *in
    -	if in.CIDRs != nil {
    -		in, out := &in.CIDRs, &out.CIDRs
    -		*out = make([]string, len(*in))
    -		copy(*out, *in)
    -	}
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRSpec.
    -func (in *ServiceCIDRSpec) DeepCopy() *ServiceCIDRSpec {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(ServiceCIDRSpec)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *ServiceCIDRStatus) DeepCopyInto(out *ServiceCIDRStatus) {
    -	*out = *in
    -	if in.Conditions != nil {
    -		in, out := &in.Conditions, &out.Conditions
    -		*out = make([]v1.Condition, len(*in))
    -		for i := range *in {
    -			(*in)[i].DeepCopyInto(&(*out)[i])
    -		}
    -	}
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRStatus.
    -func (in *ServiceCIDRStatus) DeepCopy() *ServiceCIDRStatus {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(ServiceCIDRStatus)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    diff --git a/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go
    deleted file mode 100644
    index 714e7b625..000000000
    --- a/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go
    +++ /dev/null
    @@ -1,94 +0,0 @@
    -//go:build !ignore_autogenerated
    -// +build !ignore_autogenerated
    -
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
    -
    -package v1alpha1
    -
    -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    -func (in *IPAddress) APILifecycleIntroduced() (major, minor int) {
    -	return 1, 27
    -}
    -
    -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
    -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or  "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
    -func (in *IPAddress) APILifecycleDeprecated() (major, minor int) {
    -	return 1, 30
    -}
    -
    -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
    -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or  "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
    -func (in *IPAddress) APILifecycleRemoved() (major, minor int) {
    -	return 1, 33
    -}
    -
    -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    -func (in *IPAddressList) APILifecycleIntroduced() (major, minor int) {
    -	return 1, 27
    -}
    -
    -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
    -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or  "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
    -func (in *IPAddressList) APILifecycleDeprecated() (major, minor int) {
    -	return 1, 30
    -}
    -
    -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
    -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or  "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
    -func (in *IPAddressList) APILifecycleRemoved() (major, minor int) {
    -	return 1, 33
    -}
    -
    -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    -func (in *ServiceCIDR) APILifecycleIntroduced() (major, minor int) {
    -	return 1, 27
    -}
    -
    -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
    -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or  "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
    -func (in *ServiceCIDR) APILifecycleDeprecated() (major, minor int) {
    -	return 1, 30
    -}
    -
    -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
    -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or  "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
    -func (in *ServiceCIDR) APILifecycleRemoved() (major, minor int) {
    -	return 1, 33
    -}
    -
    -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    -func (in *ServiceCIDRList) APILifecycleIntroduced() (major, minor int) {
    -	return 1, 27
    -}
    -
    -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
    -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or  "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
    -func (in *ServiceCIDRList) APILifecycleDeprecated() (major, minor int) {
    -	return 1, 30
    -}
    -
    -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
    -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or  "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
    -func (in *ServiceCIDRList) APILifecycleRemoved() (major, minor int) {
    -	return 1, 33
    -}
    diff --git a/vendor/k8s.io/api/networking/v1beta1/doc.go b/vendor/k8s.io/api/networking/v1beta1/doc.go
    index fa6d01cea..c5a03e04e 100644
    --- a/vendor/k8s.io/api/networking/v1beta1/doc.go
    +++ b/vendor/k8s.io/api/networking/v1beta1/doc.go
    @@ -20,4 +20,4 @@ limitations under the License.
     // +k8s:prerelease-lifecycle-gen=true
     // +groupName=networking.k8s.io
     
    -package v1beta1 // import "k8s.io/api/networking/v1beta1"
    +package v1beta1
    diff --git a/vendor/k8s.io/api/node/v1/doc.go b/vendor/k8s.io/api/node/v1/doc.go
    index 57ca52445..3239af703 100644
    --- a/vendor/k8s.io/api/node/v1/doc.go
    +++ b/vendor/k8s.io/api/node/v1/doc.go
    @@ -20,4 +20,4 @@ limitations under the License.
     // +k8s:prerelease-lifecycle-gen=true
     // +groupName=node.k8s.io
     
    -package v1 // import "k8s.io/api/node/v1"
    +package v1
    diff --git a/vendor/k8s.io/api/node/v1alpha1/doc.go b/vendor/k8s.io/api/node/v1alpha1/doc.go
    index dfe99540b..2f3d46ac2 100644
    --- a/vendor/k8s.io/api/node/v1alpha1/doc.go
    +++ b/vendor/k8s.io/api/node/v1alpha1/doc.go
    @@ -20,4 +20,4 @@ limitations under the License.
     
     // +groupName=node.k8s.io
     
    -package v1alpha1 // import "k8s.io/api/node/v1alpha1"
    +package v1alpha1
    diff --git a/vendor/k8s.io/api/node/v1beta1/doc.go b/vendor/k8s.io/api/node/v1beta1/doc.go
    index c76ba89c4..7b47c8df6 100644
    --- a/vendor/k8s.io/api/node/v1beta1/doc.go
    +++ b/vendor/k8s.io/api/node/v1beta1/doc.go
    @@ -21,4 +21,4 @@ limitations under the License.
     
     // +groupName=node.k8s.io
     
    -package v1beta1 // import "k8s.io/api/node/v1beta1"
    +package v1beta1
    diff --git a/vendor/k8s.io/api/policy/v1/doc.go b/vendor/k8s.io/api/policy/v1/doc.go
    index c51e02685..ff47e7fd4 100644
    --- a/vendor/k8s.io/api/policy/v1/doc.go
    +++ b/vendor/k8s.io/api/policy/v1/doc.go
    @@ -22,4 +22,4 @@ limitations under the License.
     // Package policy is for any kind of policy object.  Suitable examples, even if
     // they aren't all here, are PodDisruptionBudget,
     // NetworkPolicy, etc.
    -package v1 // import "k8s.io/api/policy/v1"
    +package v1
    diff --git a/vendor/k8s.io/api/policy/v1/generated.proto b/vendor/k8s.io/api/policy/v1/generated.proto
    index 57128e811..953489072 100644
    --- a/vendor/k8s.io/api/policy/v1/generated.proto
    +++ b/vendor/k8s.io/api/policy/v1/generated.proto
    @@ -115,9 +115,6 @@ message PodDisruptionBudgetSpec {
       // Additional policies may be added in the future.
       // Clients making eviction decisions should disallow eviction of unhealthy pods
       // if they encounter an unrecognized policy in this field.
    -  //
    -  // This field is beta-level. The eviction API uses this field when
    -  // the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default).
       // +optional
       optional string unhealthyPodEvictionPolicy = 4;
     }
    diff --git a/vendor/k8s.io/api/policy/v1/types.go b/vendor/k8s.io/api/policy/v1/types.go
    index f05367ebe..4e7436789 100644
    --- a/vendor/k8s.io/api/policy/v1/types.go
    +++ b/vendor/k8s.io/api/policy/v1/types.go
    @@ -70,9 +70,6 @@ type PodDisruptionBudgetSpec struct {
     	// Additional policies may be added in the future.
     	// Clients making eviction decisions should disallow eviction of unhealthy pods
     	// if they encounter an unrecognized policy in this field.
    -	//
    -	// This field is beta-level. The eviction API uses this field when
    -	// the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default).
     	// +optional
     	UnhealthyPodEvictionPolicy *UnhealthyPodEvictionPolicyType `json:"unhealthyPodEvictionPolicy,omitempty" protobuf:"bytes,4,opt,name=unhealthyPodEvictionPolicy"`
     }
    diff --git a/vendor/k8s.io/api/policy/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/policy/v1/types_swagger_doc_generated.go
    index 799b0794a..9b2f5b945 100644
    --- a/vendor/k8s.io/api/policy/v1/types_swagger_doc_generated.go
    +++ b/vendor/k8s.io/api/policy/v1/types_swagger_doc_generated.go
    @@ -63,7 +63,7 @@ var map_PodDisruptionBudgetSpec = map[string]string{
     	"minAvailable":               "An eviction is allowed if at least \"minAvailable\" pods selected by \"selector\" will still be available after the eviction, i.e. even in the absence of the evicted pod.  So for example you can prevent all voluntary evictions by specifying \"100%\".",
     	"selector":                   "Label query over pods whose evictions are managed by the disruption budget. A null selector will match no pods, while an empty ({}) selector will select all pods within the namespace.",
     	"maxUnavailable":             "An eviction is allowed if at most \"maxUnavailable\" pods selected by \"selector\" are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. This is a mutually exclusive setting with \"minAvailable\".",
    -	"unhealthyPodEvictionPolicy": "UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type=\"Ready\",status=\"True\".\n\nValid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy.\n\nIfHealthyBudget policy means that running pods (status.phase=\"Running\"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction.\n\nAlwaysAllow policy means that all running pods (status.phase=\"Running\"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction.\n\nAdditional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field.\n\nThis field is beta-level. The eviction API uses this field when the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default).",
    +	"unhealthyPodEvictionPolicy": "UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type=\"Ready\",status=\"True\".\n\nValid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy.\n\nIfHealthyBudget policy means that running pods (status.phase=\"Running\"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction.\n\nAlwaysAllow policy means that all running pods (status.phase=\"Running\"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction.\n\nAdditional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field.",
     }
     
     func (PodDisruptionBudgetSpec) SwaggerDoc() map[string]string {
    diff --git a/vendor/k8s.io/api/policy/v1beta1/doc.go b/vendor/k8s.io/api/policy/v1beta1/doc.go
    index 76da54b4c..777106c60 100644
    --- a/vendor/k8s.io/api/policy/v1beta1/doc.go
    +++ b/vendor/k8s.io/api/policy/v1beta1/doc.go
    @@ -22,4 +22,4 @@ limitations under the License.
     // Package policy is for any kind of policy object.  Suitable examples, even if
     // they aren't all here, are PodDisruptionBudget,
     // NetworkPolicy, etc.
    -package v1beta1 // import "k8s.io/api/policy/v1beta1"
    +package v1beta1
    diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.proto b/vendor/k8s.io/api/policy/v1beta1/generated.proto
    index 91e33f233..e0cbe00f1 100644
    --- a/vendor/k8s.io/api/policy/v1beta1/generated.proto
    +++ b/vendor/k8s.io/api/policy/v1beta1/generated.proto
    @@ -115,9 +115,6 @@ message PodDisruptionBudgetSpec {
       // Additional policies may be added in the future.
       // Clients making eviction decisions should disallow eviction of unhealthy pods
       // if they encounter an unrecognized policy in this field.
    -  //
    -  // This field is beta-level. The eviction API uses this field when
    -  // the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default).
       // +optional
       optional string unhealthyPodEvictionPolicy = 4;
     }
    diff --git a/vendor/k8s.io/api/policy/v1beta1/types.go b/vendor/k8s.io/api/policy/v1beta1/types.go
    index bc5f970d2..9bba454f9 100644
    --- a/vendor/k8s.io/api/policy/v1beta1/types.go
    +++ b/vendor/k8s.io/api/policy/v1beta1/types.go
    @@ -67,9 +67,6 @@ type PodDisruptionBudgetSpec struct {
     	// Additional policies may be added in the future.
     	// Clients making eviction decisions should disallow eviction of unhealthy pods
     	// if they encounter an unrecognized policy in this field.
    -	//
    -	// This field is beta-level. The eviction API uses this field when
    -	// the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default).
     	// +optional
     	UnhealthyPodEvictionPolicy *UnhealthyPodEvictionPolicyType `json:"unhealthyPodEvictionPolicy,omitempty" protobuf:"bytes,4,opt,name=unhealthyPodEvictionPolicy"`
     }
    diff --git a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go
    index 4a79d7594..cffc9a548 100644
    --- a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go
    +++ b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go
    @@ -63,7 +63,7 @@ var map_PodDisruptionBudgetSpec = map[string]string{
     	"minAvailable":               "An eviction is allowed if at least \"minAvailable\" pods selected by \"selector\" will still be available after the eviction, i.e. even in the absence of the evicted pod.  So for example you can prevent all voluntary evictions by specifying \"100%\".",
     	"selector":                   "Label query over pods whose evictions are managed by the disruption budget. A null selector selects no pods. An empty selector ({}) also selects no pods, which differs from standard behavior of selecting all pods. In policy/v1, an empty selector will select all pods in the namespace.",
     	"maxUnavailable":             "An eviction is allowed if at most \"maxUnavailable\" pods selected by \"selector\" are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. This is a mutually exclusive setting with \"minAvailable\".",
    -	"unhealthyPodEvictionPolicy": "UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type=\"Ready\",status=\"True\".\n\nValid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy.\n\nIfHealthyBudget policy means that running pods (status.phase=\"Running\"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction.\n\nAlwaysAllow policy means that all running pods (status.phase=\"Running\"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction.\n\nAdditional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field.\n\nThis field is beta-level. The eviction API uses this field when the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default).",
    +	"unhealthyPodEvictionPolicy": "UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type=\"Ready\",status=\"True\".\n\nValid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy.\n\nIfHealthyBudget policy means that running pods (status.phase=\"Running\"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction.\n\nAlwaysAllow policy means that all running pods (status.phase=\"Running\"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction.\n\nAdditional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field.",
     }
     
     func (PodDisruptionBudgetSpec) SwaggerDoc() map[string]string {
    diff --git a/vendor/k8s.io/api/rbac/v1/doc.go b/vendor/k8s.io/api/rbac/v1/doc.go
    index b0e4e5b5b..408546274 100644
    --- a/vendor/k8s.io/api/rbac/v1/doc.go
    +++ b/vendor/k8s.io/api/rbac/v1/doc.go
    @@ -20,4 +20,4 @@ limitations under the License.
     // +k8s:prerelease-lifecycle-gen=true
     // +groupName=rbac.authorization.k8s.io
     
    -package v1 // import "k8s.io/api/rbac/v1"
    +package v1
    diff --git a/vendor/k8s.io/api/rbac/v1alpha1/doc.go b/vendor/k8s.io/api/rbac/v1alpha1/doc.go
    index 918b8a337..70d3c0e97 100644
    --- a/vendor/k8s.io/api/rbac/v1alpha1/doc.go
    +++ b/vendor/k8s.io/api/rbac/v1alpha1/doc.go
    @@ -20,4 +20,4 @@ limitations under the License.
     
     // +groupName=rbac.authorization.k8s.io
     
    -package v1alpha1 // import "k8s.io/api/rbac/v1alpha1"
    +package v1alpha1
    diff --git a/vendor/k8s.io/api/rbac/v1beta1/doc.go b/vendor/k8s.io/api/rbac/v1beta1/doc.go
    index 156f273e6..504a58d8b 100644
    --- a/vendor/k8s.io/api/rbac/v1beta1/doc.go
    +++ b/vendor/k8s.io/api/rbac/v1beta1/doc.go
    @@ -21,4 +21,4 @@ limitations under the License.
     
     // +groupName=rbac.authorization.k8s.io
     
    -package v1beta1 // import "k8s.io/api/rbac/v1beta1"
    +package v1beta1
    diff --git a/vendor/k8s.io/api/resource/v1/devicetaint.go b/vendor/k8s.io/api/resource/v1/devicetaint.go
    new file mode 100644
    index 000000000..a5c2e20a6
    --- /dev/null
    +++ b/vendor/k8s.io/api/resource/v1/devicetaint.go
    @@ -0,0 +1,35 @@
    +/*
    +Copyright 2025 The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +package v1
    +
    +import "fmt"
    +
    +var _ fmt.Stringer = DeviceTaint{}
    +
    +// String converts to a string in the format '=:', '=:', ':', or ''.
    +func (t DeviceTaint) String() string {
    +	if len(t.Effect) == 0 {
    +		if len(t.Value) == 0 {
    +			return fmt.Sprintf("%v", t.Key)
    +		}
    +		return fmt.Sprintf("%v=%v:", t.Key, t.Value)
    +	}
    +	if len(t.Value) == 0 {
    +		return fmt.Sprintf("%v:%v", t.Key, t.Effect)
    +	}
    +	return fmt.Sprintf("%v=%v:%v", t.Key, t.Value, t.Effect)
    +}
    diff --git a/vendor/k8s.io/api/resource/v1/doc.go b/vendor/k8s.io/api/resource/v1/doc.go
    new file mode 100644
    index 000000000..c94ca75dd
    --- /dev/null
    +++ b/vendor/k8s.io/api/resource/v1/doc.go
    @@ -0,0 +1,24 @@
    +/*
    +Copyright 2025 The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// +k8s:openapi-gen=true
    +// +k8s:deepcopy-gen=package
    +// +k8s:protobuf-gen=package
    +// +k8s:prerelease-lifecycle-gen=true
    +// +groupName=resource.k8s.io
    +
    +// Package v1 is the v1 version of the resource API.
    +package v1
    diff --git a/vendor/k8s.io/api/resource/v1/generated.pb.go b/vendor/k8s.io/api/resource/v1/generated.pb.go
    new file mode 100644
    index 000000000..5695e2c7e
    --- /dev/null
    +++ b/vendor/k8s.io/api/resource/v1/generated.pb.go
    @@ -0,0 +1,12777 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by protoc-gen-gogo. DO NOT EDIT.
    +// source: k8s.io/api/resource/v1/generated.proto
    +
    +package v1
    +
    +import (
    +	fmt "fmt"
    +
    +	io "io"
    +
    +	proto "github.com/gogo/protobuf/proto"
    +	github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
    +	v11 "k8s.io/api/core/v1"
    +	resource "k8s.io/apimachinery/pkg/api/resource"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +
    +	math "math"
    +	math_bits "math/bits"
    +	reflect "reflect"
    +	strings "strings"
    +
    +	k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types"
    +)
    +
    +// Reference imports to suppress errors if they are not otherwise used.
    +var _ = proto.Marshal
    +var _ = fmt.Errorf
    +var _ = math.Inf
    +
    +// This is a compile-time assertion to ensure that this generated file
    +// is compatible with the proto package it is being compiled against.
    +// A compilation error at this line likely means your copy of the
    +// proto package needs to be updated.
    +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
    +
    +func (m *AllocatedDeviceStatus) Reset()      { *m = AllocatedDeviceStatus{} }
    +func (*AllocatedDeviceStatus) ProtoMessage() {}
    +func (*AllocatedDeviceStatus) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f4fc532aec02d243, []int{0}
    +}
    +func (m *AllocatedDeviceStatus) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *AllocatedDeviceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *AllocatedDeviceStatus) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_AllocatedDeviceStatus.Merge(m, src)
    +}
    +func (m *AllocatedDeviceStatus) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *AllocatedDeviceStatus) XXX_DiscardUnknown() {
    +	xxx_messageInfo_AllocatedDeviceStatus.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_AllocatedDeviceStatus proto.InternalMessageInfo
    +
    +func (m *AllocationResult) Reset()      { *m = AllocationResult{} }
    +func (*AllocationResult) ProtoMessage() {}
    +func (*AllocationResult) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f4fc532aec02d243, []int{1}
    +}
    +func (m *AllocationResult) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *AllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *AllocationResult) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_AllocationResult.Merge(m, src)
    +}
    +func (m *AllocationResult) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *AllocationResult) XXX_DiscardUnknown() {
    +	xxx_messageInfo_AllocationResult.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_AllocationResult proto.InternalMessageInfo
    +
    +func (m *CELDeviceSelector) Reset()      { *m = CELDeviceSelector{} }
    +func (*CELDeviceSelector) ProtoMessage() {}
    +func (*CELDeviceSelector) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f4fc532aec02d243, []int{2}
    +}
    +func (m *CELDeviceSelector) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *CELDeviceSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *CELDeviceSelector) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_CELDeviceSelector.Merge(m, src)
    +}
    +func (m *CELDeviceSelector) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *CELDeviceSelector) XXX_DiscardUnknown() {
    +	xxx_messageInfo_CELDeviceSelector.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_CELDeviceSelector proto.InternalMessageInfo
    +
    +func (m *CapacityRequestPolicy) Reset()      { *m = CapacityRequestPolicy{} }
    +func (*CapacityRequestPolicy) ProtoMessage() {}
    +func (*CapacityRequestPolicy) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f4fc532aec02d243, []int{3}
    +}
    +func (m *CapacityRequestPolicy) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *CapacityRequestPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *CapacityRequestPolicy) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_CapacityRequestPolicy.Merge(m, src)
    +}
    +func (m *CapacityRequestPolicy) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *CapacityRequestPolicy) XXX_DiscardUnknown() {
    +	xxx_messageInfo_CapacityRequestPolicy.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_CapacityRequestPolicy proto.InternalMessageInfo
    +
    +func (m *CapacityRequestPolicyRange) Reset()      { *m = CapacityRequestPolicyRange{} }
    +func (*CapacityRequestPolicyRange) ProtoMessage() {}
    +func (*CapacityRequestPolicyRange) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f4fc532aec02d243, []int{4}
    +}
    +func (m *CapacityRequestPolicyRange) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *CapacityRequestPolicyRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *CapacityRequestPolicyRange) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_CapacityRequestPolicyRange.Merge(m, src)
    +}
    +func (m *CapacityRequestPolicyRange) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *CapacityRequestPolicyRange) XXX_DiscardUnknown() {
    +	xxx_messageInfo_CapacityRequestPolicyRange.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_CapacityRequestPolicyRange proto.InternalMessageInfo
    +
    +func (m *CapacityRequirements) Reset()      { *m = CapacityRequirements{} }
    +func (*CapacityRequirements) ProtoMessage() {}
    +func (*CapacityRequirements) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f4fc532aec02d243, []int{5}
    +}
    +func (m *CapacityRequirements) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *CapacityRequirements) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *CapacityRequirements) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_CapacityRequirements.Merge(m, src)
    +}
    +func (m *CapacityRequirements) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *CapacityRequirements) XXX_DiscardUnknown() {
    +	xxx_messageInfo_CapacityRequirements.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_CapacityRequirements proto.InternalMessageInfo
    +
    +func (m *Counter) Reset()      { *m = Counter{} }
    +func (*Counter) ProtoMessage() {}
    +func (*Counter) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f4fc532aec02d243, []int{6}
    +}
    +func (m *Counter) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *Counter) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_Counter.Merge(m, src)
    +}
    +func (m *Counter) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *Counter) XXX_DiscardUnknown() {
    +	xxx_messageInfo_Counter.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_Counter proto.InternalMessageInfo
    +
    +func (m *CounterSet) Reset()      { *m = CounterSet{} }
    +func (*CounterSet) ProtoMessage() {}
    +func (*CounterSet) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f4fc532aec02d243, []int{7}
    +}
    +func (m *CounterSet) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *CounterSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *CounterSet) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_CounterSet.Merge(m, src)
    +}
    +func (m *CounterSet) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *CounterSet) XXX_DiscardUnknown() {
    +	xxx_messageInfo_CounterSet.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_CounterSet proto.InternalMessageInfo
    +
    +func (m *Device) Reset()      { *m = Device{} }
    +func (*Device) ProtoMessage() {}
    +func (*Device) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f4fc532aec02d243, []int{8}
    +}
    +func (m *Device) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *Device) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *Device) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_Device.Merge(m, src)
    +}
    +func (m *Device) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *Device) XXX_DiscardUnknown() {
    +	xxx_messageInfo_Device.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_Device proto.InternalMessageInfo
    +
    +func (m *DeviceAllocationConfiguration) Reset()      { *m = DeviceAllocationConfiguration{} }
    +func (*DeviceAllocationConfiguration) ProtoMessage() {}
    +func (*DeviceAllocationConfiguration) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f4fc532aec02d243, []int{9}
    +}
    +func (m *DeviceAllocationConfiguration) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *DeviceAllocationConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *DeviceAllocationConfiguration) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_DeviceAllocationConfiguration.Merge(m, src)
    +}
    +func (m *DeviceAllocationConfiguration) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *DeviceAllocationConfiguration) XXX_DiscardUnknown() {
    +	xxx_messageInfo_DeviceAllocationConfiguration.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_DeviceAllocationConfiguration proto.InternalMessageInfo
    +
    +func (m *DeviceAllocationResult) Reset()      { *m = DeviceAllocationResult{} }
    +func (*DeviceAllocationResult) ProtoMessage() {}
    +func (*DeviceAllocationResult) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f4fc532aec02d243, []int{10}
    +}
    +func (m *DeviceAllocationResult) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *DeviceAllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *DeviceAllocationResult) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_DeviceAllocationResult.Merge(m, src)
    +}
    +func (m *DeviceAllocationResult) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *DeviceAllocationResult) XXX_DiscardUnknown() {
    +	xxx_messageInfo_DeviceAllocationResult.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_DeviceAllocationResult proto.InternalMessageInfo
    +
    +func (m *DeviceAttribute) Reset()      { *m = DeviceAttribute{} }
    +func (*DeviceAttribute) ProtoMessage() {}
    +func (*DeviceAttribute) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f4fc532aec02d243, []int{11}
    +}
    +func (m *DeviceAttribute) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *DeviceAttribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *DeviceAttribute) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_DeviceAttribute.Merge(m, src)
    +}
    +func (m *DeviceAttribute) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *DeviceAttribute) XXX_DiscardUnknown() {
    +	xxx_messageInfo_DeviceAttribute.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_DeviceAttribute proto.InternalMessageInfo
    +
    +func (m *DeviceCapacity) Reset()      { *m = DeviceCapacity{} }
    +func (*DeviceCapacity) ProtoMessage() {}
    +func (*DeviceCapacity) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f4fc532aec02d243, []int{12}
    +}
    +func (m *DeviceCapacity) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *DeviceCapacity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *DeviceCapacity) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_DeviceCapacity.Merge(m, src)
    +}
    +func (m *DeviceCapacity) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *DeviceCapacity) XXX_DiscardUnknown() {
    +	xxx_messageInfo_DeviceCapacity.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_DeviceCapacity proto.InternalMessageInfo
    +
    +func (m *DeviceClaim) Reset()      { *m = DeviceClaim{} }
    +func (*DeviceClaim) ProtoMessage() {}
    +func (*DeviceClaim) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f4fc532aec02d243, []int{13}
    +}
    +func (m *DeviceClaim) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *DeviceClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *DeviceClaim) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_DeviceClaim.Merge(m, src)
    +}
    +func (m *DeviceClaim) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *DeviceClaim) XXX_DiscardUnknown() {
    +	xxx_messageInfo_DeviceClaim.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_DeviceClaim proto.InternalMessageInfo
    +
    +func (m *DeviceClaimConfiguration) Reset()      { *m = DeviceClaimConfiguration{} }
    +func (*DeviceClaimConfiguration) ProtoMessage() {}
    +func (*DeviceClaimConfiguration) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f4fc532aec02d243, []int{14}
    +}
    +func (m *DeviceClaimConfiguration) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *DeviceClaimConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *DeviceClaimConfiguration) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_DeviceClaimConfiguration.Merge(m, src)
    +}
    +func (m *DeviceClaimConfiguration) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *DeviceClaimConfiguration) XXX_DiscardUnknown() {
    +	xxx_messageInfo_DeviceClaimConfiguration.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_DeviceClaimConfiguration proto.InternalMessageInfo
    +
    +func (m *DeviceClass) Reset()      { *m = DeviceClass{} }
    +func (*DeviceClass) ProtoMessage() {}
    +func (*DeviceClass) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f4fc532aec02d243, []int{15}
    +}
    +func (m *DeviceClass) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *DeviceClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *DeviceClass) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_DeviceClass.Merge(m, src)
    +}
    +func (m *DeviceClass) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *DeviceClass) XXX_DiscardUnknown() {
    +	xxx_messageInfo_DeviceClass.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_DeviceClass proto.InternalMessageInfo
    +
    +func (m *DeviceClassConfiguration) Reset()      { *m = DeviceClassConfiguration{} }
    +func (*DeviceClassConfiguration) ProtoMessage() {}
    +func (*DeviceClassConfiguration) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f4fc532aec02d243, []int{16}
    +}
    +func (m *DeviceClassConfiguration) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *DeviceClassConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *DeviceClassConfiguration) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_DeviceClassConfiguration.Merge(m, src)
    +}
    +func (m *DeviceClassConfiguration) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *DeviceClassConfiguration) XXX_DiscardUnknown() {
    +	xxx_messageInfo_DeviceClassConfiguration.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_DeviceClassConfiguration proto.InternalMessageInfo
    +
    +func (m *DeviceClassList) Reset()      { *m = DeviceClassList{} }
    +func (*DeviceClassList) ProtoMessage() {}
    +func (*DeviceClassList) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f4fc532aec02d243, []int{17}
    +}
    +func (m *DeviceClassList) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *DeviceClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *DeviceClassList) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_DeviceClassList.Merge(m, src)
    +}
    +func (m *DeviceClassList) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *DeviceClassList) XXX_DiscardUnknown() {
    +	xxx_messageInfo_DeviceClassList.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_DeviceClassList proto.InternalMessageInfo
    +
    +func (m *DeviceClassSpec) Reset()      { *m = DeviceClassSpec{} }
    +func (*DeviceClassSpec) ProtoMessage() {}
    +func (*DeviceClassSpec) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f4fc532aec02d243, []int{18}
    +}
    +func (m *DeviceClassSpec) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *DeviceClassSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *DeviceClassSpec) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_DeviceClassSpec.Merge(m, src)
    +}
    +func (m *DeviceClassSpec) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *DeviceClassSpec) XXX_DiscardUnknown() {
    +	xxx_messageInfo_DeviceClassSpec.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_DeviceClassSpec proto.InternalMessageInfo
    +
    +func (m *DeviceConfiguration) Reset()      { *m = DeviceConfiguration{} }
    +func (*DeviceConfiguration) ProtoMessage() {}
    +func (*DeviceConfiguration) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f4fc532aec02d243, []int{19}
    +}
    +func (m *DeviceConfiguration) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *DeviceConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *DeviceConfiguration) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_DeviceConfiguration.Merge(m, src)
    +}
    +func (m *DeviceConfiguration) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *DeviceConfiguration) XXX_DiscardUnknown() {
    +	xxx_messageInfo_DeviceConfiguration.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_DeviceConfiguration proto.InternalMessageInfo
    +
    +func (m *DeviceConstraint) Reset()      { *m = DeviceConstraint{} }
    +func (*DeviceConstraint) ProtoMessage() {}
    +func (*DeviceConstraint) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f4fc532aec02d243, []int{20}
    +}
    +func (m *DeviceConstraint) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *DeviceConstraint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *DeviceConstraint) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_DeviceConstraint.Merge(m, src)
    +}
    +func (m *DeviceConstraint) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *DeviceConstraint) XXX_DiscardUnknown() {
    +	xxx_messageInfo_DeviceConstraint.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_DeviceConstraint proto.InternalMessageInfo
    +
    +func (m *DeviceCounterConsumption) Reset()      { *m = DeviceCounterConsumption{} }
    +func (*DeviceCounterConsumption) ProtoMessage() {}
    +func (*DeviceCounterConsumption) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f4fc532aec02d243, []int{21}
    +}
    +func (m *DeviceCounterConsumption) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *DeviceCounterConsumption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *DeviceCounterConsumption) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_DeviceCounterConsumption.Merge(m, src)
    +}
    +func (m *DeviceCounterConsumption) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *DeviceCounterConsumption) XXX_DiscardUnknown() {
    +	xxx_messageInfo_DeviceCounterConsumption.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_DeviceCounterConsumption proto.InternalMessageInfo
    +
    +func (m *DeviceRequest) Reset()      { *m = DeviceRequest{} }
    +func (*DeviceRequest) ProtoMessage() {}
    +func (*DeviceRequest) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f4fc532aec02d243, []int{22}
    +}
    +func (m *DeviceRequest) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *DeviceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *DeviceRequest) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_DeviceRequest.Merge(m, src)
    +}
    +func (m *DeviceRequest) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *DeviceRequest) XXX_DiscardUnknown() {
    +	xxx_messageInfo_DeviceRequest.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_DeviceRequest proto.InternalMessageInfo
    +
    +func (m *DeviceRequestAllocationResult) Reset()      { *m = DeviceRequestAllocationResult{} }
    +func (*DeviceRequestAllocationResult) ProtoMessage() {}
    +func (*DeviceRequestAllocationResult) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f4fc532aec02d243, []int{23}
    +}
    +func (m *DeviceRequestAllocationResult) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *DeviceRequestAllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *DeviceRequestAllocationResult) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_DeviceRequestAllocationResult.Merge(m, src)
    +}
    +func (m *DeviceRequestAllocationResult) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *DeviceRequestAllocationResult) XXX_DiscardUnknown() {
    +	xxx_messageInfo_DeviceRequestAllocationResult.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_DeviceRequestAllocationResult proto.InternalMessageInfo
    +
    +func (m *DeviceSelector) Reset()      { *m = DeviceSelector{} }
    +func (*DeviceSelector) ProtoMessage() {}
    +func (*DeviceSelector) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f4fc532aec02d243, []int{24}
    +}
    +func (m *DeviceSelector) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *DeviceSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *DeviceSelector) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_DeviceSelector.Merge(m, src)
    +}
    +func (m *DeviceSelector) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *DeviceSelector) XXX_DiscardUnknown() {
    +	xxx_messageInfo_DeviceSelector.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_DeviceSelector proto.InternalMessageInfo
    +
    +func (m *DeviceSubRequest) Reset()      { *m = DeviceSubRequest{} }
    +func (*DeviceSubRequest) ProtoMessage() {}
    +func (*DeviceSubRequest) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f4fc532aec02d243, []int{25}
    +}
    +func (m *DeviceSubRequest) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *DeviceSubRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *DeviceSubRequest) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_DeviceSubRequest.Merge(m, src)
    +}
    +func (m *DeviceSubRequest) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *DeviceSubRequest) XXX_DiscardUnknown() {
    +	xxx_messageInfo_DeviceSubRequest.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_DeviceSubRequest proto.InternalMessageInfo
    +
    +func (m *DeviceTaint) Reset()      { *m = DeviceTaint{} }
    +func (*DeviceTaint) ProtoMessage() {}
    +func (*DeviceTaint) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f4fc532aec02d243, []int{26}
    +}
    +func (m *DeviceTaint) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *DeviceTaint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *DeviceTaint) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_DeviceTaint.Merge(m, src)
    +}
    +func (m *DeviceTaint) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *DeviceTaint) XXX_DiscardUnknown() {
    +	xxx_messageInfo_DeviceTaint.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_DeviceTaint proto.InternalMessageInfo
    +
    +func (m *DeviceToleration) Reset()      { *m = DeviceToleration{} }
    +func (*DeviceToleration) ProtoMessage() {}
    +func (*DeviceToleration) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f4fc532aec02d243, []int{27}
    +}
    +func (m *DeviceToleration) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *DeviceToleration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *DeviceToleration) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_DeviceToleration.Merge(m, src)
    +}
    +func (m *DeviceToleration) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *DeviceToleration) XXX_DiscardUnknown() {
    +	xxx_messageInfo_DeviceToleration.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_DeviceToleration proto.InternalMessageInfo
    +
    +func (m *ExactDeviceRequest) Reset()      { *m = ExactDeviceRequest{} }
    +func (*ExactDeviceRequest) ProtoMessage() {}
    +func (*ExactDeviceRequest) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f4fc532aec02d243, []int{28}
    +}
    +func (m *ExactDeviceRequest) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ExactDeviceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ExactDeviceRequest) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ExactDeviceRequest.Merge(m, src)
    +}
    +func (m *ExactDeviceRequest) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ExactDeviceRequest) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ExactDeviceRequest.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ExactDeviceRequest proto.InternalMessageInfo
    +
    +func (m *NetworkDeviceData) Reset()      { *m = NetworkDeviceData{} }
    +func (*NetworkDeviceData) ProtoMessage() {}
    +func (*NetworkDeviceData) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f4fc532aec02d243, []int{29}
    +}
    +func (m *NetworkDeviceData) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *NetworkDeviceData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *NetworkDeviceData) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_NetworkDeviceData.Merge(m, src)
    +}
    +func (m *NetworkDeviceData) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *NetworkDeviceData) XXX_DiscardUnknown() {
    +	xxx_messageInfo_NetworkDeviceData.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_NetworkDeviceData proto.InternalMessageInfo
    +
    +func (m *OpaqueDeviceConfiguration) Reset()      { *m = OpaqueDeviceConfiguration{} }
    +func (*OpaqueDeviceConfiguration) ProtoMessage() {}
    +func (*OpaqueDeviceConfiguration) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f4fc532aec02d243, []int{30}
    +}
    +func (m *OpaqueDeviceConfiguration) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *OpaqueDeviceConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *OpaqueDeviceConfiguration) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_OpaqueDeviceConfiguration.Merge(m, src)
    +}
    +func (m *OpaqueDeviceConfiguration) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *OpaqueDeviceConfiguration) XXX_DiscardUnknown() {
    +	xxx_messageInfo_OpaqueDeviceConfiguration.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_OpaqueDeviceConfiguration proto.InternalMessageInfo
    +
    +func (m *ResourceClaim) Reset()      { *m = ResourceClaim{} }
    +func (*ResourceClaim) ProtoMessage() {}
    +func (*ResourceClaim) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f4fc532aec02d243, []int{31}
    +}
    +func (m *ResourceClaim) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ResourceClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ResourceClaim) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ResourceClaim.Merge(m, src)
    +}
    +func (m *ResourceClaim) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ResourceClaim) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ResourceClaim.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo
    +
    +func (m *ResourceClaimConsumerReference) Reset()      { *m = ResourceClaimConsumerReference{} }
    +func (*ResourceClaimConsumerReference) ProtoMessage() {}
    +func (*ResourceClaimConsumerReference) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f4fc532aec02d243, []int{32}
    +}
    +func (m *ResourceClaimConsumerReference) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ResourceClaimConsumerReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ResourceClaimConsumerReference) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ResourceClaimConsumerReference.Merge(m, src)
    +}
    +func (m *ResourceClaimConsumerReference) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ResourceClaimConsumerReference) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ResourceClaimConsumerReference.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ResourceClaimConsumerReference proto.InternalMessageInfo
    +
    +func (m *ResourceClaimList) Reset()      { *m = ResourceClaimList{} }
    +func (*ResourceClaimList) ProtoMessage() {}
    +func (*ResourceClaimList) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f4fc532aec02d243, []int{33}
    +}
    +func (m *ResourceClaimList) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ResourceClaimList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ResourceClaimList) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ResourceClaimList.Merge(m, src)
    +}
    +func (m *ResourceClaimList) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ResourceClaimList) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ResourceClaimList.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ResourceClaimList proto.InternalMessageInfo
    +
    +func (m *ResourceClaimSpec) Reset()      { *m = ResourceClaimSpec{} }
    +func (*ResourceClaimSpec) ProtoMessage() {}
    +func (*ResourceClaimSpec) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f4fc532aec02d243, []int{34}
    +}
    +func (m *ResourceClaimSpec) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ResourceClaimSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ResourceClaimSpec) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ResourceClaimSpec.Merge(m, src)
    +}
    +func (m *ResourceClaimSpec) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ResourceClaimSpec) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ResourceClaimSpec.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ResourceClaimSpec proto.InternalMessageInfo
    +
    +func (m *ResourceClaimStatus) Reset()      { *m = ResourceClaimStatus{} }
    +func (*ResourceClaimStatus) ProtoMessage() {}
    +func (*ResourceClaimStatus) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f4fc532aec02d243, []int{35}
    +}
    +func (m *ResourceClaimStatus) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ResourceClaimStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ResourceClaimStatus) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ResourceClaimStatus.Merge(m, src)
    +}
    +func (m *ResourceClaimStatus) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ResourceClaimStatus) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ResourceClaimStatus.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ResourceClaimStatus proto.InternalMessageInfo
    +
    +func (m *ResourceClaimTemplate) Reset()      { *m = ResourceClaimTemplate{} }
    +func (*ResourceClaimTemplate) ProtoMessage() {}
    +func (*ResourceClaimTemplate) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f4fc532aec02d243, []int{36}
    +}
    +func (m *ResourceClaimTemplate) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ResourceClaimTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ResourceClaimTemplate) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ResourceClaimTemplate.Merge(m, src)
    +}
    +func (m *ResourceClaimTemplate) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ResourceClaimTemplate) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ResourceClaimTemplate.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ResourceClaimTemplate proto.InternalMessageInfo
    +
    +func (m *ResourceClaimTemplateList) Reset()      { *m = ResourceClaimTemplateList{} }
    +func (*ResourceClaimTemplateList) ProtoMessage() {}
    +func (*ResourceClaimTemplateList) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f4fc532aec02d243, []int{37}
    +}
    +func (m *ResourceClaimTemplateList) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ResourceClaimTemplateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ResourceClaimTemplateList) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ResourceClaimTemplateList.Merge(m, src)
    +}
    +func (m *ResourceClaimTemplateList) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ResourceClaimTemplateList) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ResourceClaimTemplateList.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ResourceClaimTemplateList proto.InternalMessageInfo
    +
    +func (m *ResourceClaimTemplateSpec) Reset()      { *m = ResourceClaimTemplateSpec{} }
    +func (*ResourceClaimTemplateSpec) ProtoMessage() {}
    +func (*ResourceClaimTemplateSpec) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f4fc532aec02d243, []int{38}
    +}
    +func (m *ResourceClaimTemplateSpec) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ResourceClaimTemplateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ResourceClaimTemplateSpec) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ResourceClaimTemplateSpec.Merge(m, src)
    +}
    +func (m *ResourceClaimTemplateSpec) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ResourceClaimTemplateSpec) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ResourceClaimTemplateSpec.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ResourceClaimTemplateSpec proto.InternalMessageInfo
    +
    +func (m *ResourcePool) Reset()      { *m = ResourcePool{} }
    +func (*ResourcePool) ProtoMessage() {}
    +func (*ResourcePool) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f4fc532aec02d243, []int{39}
    +}
    +func (m *ResourcePool) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ResourcePool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ResourcePool) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ResourcePool.Merge(m, src)
    +}
    +func (m *ResourcePool) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ResourcePool) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ResourcePool.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ResourcePool proto.InternalMessageInfo
    +
    +func (m *ResourceSlice) Reset()      { *m = ResourceSlice{} }
    +func (*ResourceSlice) ProtoMessage() {}
    +func (*ResourceSlice) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f4fc532aec02d243, []int{40}
    +}
    +func (m *ResourceSlice) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ResourceSlice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ResourceSlice) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ResourceSlice.Merge(m, src)
    +}
    +func (m *ResourceSlice) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ResourceSlice) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ResourceSlice.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ResourceSlice proto.InternalMessageInfo
    +
    +func (m *ResourceSliceList) Reset()      { *m = ResourceSliceList{} }
    +func (*ResourceSliceList) ProtoMessage() {}
    +func (*ResourceSliceList) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f4fc532aec02d243, []int{41}
    +}
    +func (m *ResourceSliceList) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ResourceSliceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ResourceSliceList) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ResourceSliceList.Merge(m, src)
    +}
    +func (m *ResourceSliceList) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ResourceSliceList) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ResourceSliceList.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ResourceSliceList proto.InternalMessageInfo
    +
    +func (m *ResourceSliceSpec) Reset()      { *m = ResourceSliceSpec{} }
    +func (*ResourceSliceSpec) ProtoMessage() {}
    +func (*ResourceSliceSpec) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_f4fc532aec02d243, []int{42}
    +}
    +func (m *ResourceSliceSpec) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ResourceSliceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ResourceSliceSpec) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ResourceSliceSpec.Merge(m, src)
    +}
    +func (m *ResourceSliceSpec) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ResourceSliceSpec) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ResourceSliceSpec.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ResourceSliceSpec proto.InternalMessageInfo
    +
    +func init() {
    +	proto.RegisterType((*AllocatedDeviceStatus)(nil), "k8s.io.api.resource.v1.AllocatedDeviceStatus")
    +	proto.RegisterType((*AllocationResult)(nil), "k8s.io.api.resource.v1.AllocationResult")
    +	proto.RegisterType((*CELDeviceSelector)(nil), "k8s.io.api.resource.v1.CELDeviceSelector")
    +	proto.RegisterType((*CapacityRequestPolicy)(nil), "k8s.io.api.resource.v1.CapacityRequestPolicy")
    +	proto.RegisterType((*CapacityRequestPolicyRange)(nil), "k8s.io.api.resource.v1.CapacityRequestPolicyRange")
    +	proto.RegisterType((*CapacityRequirements)(nil), "k8s.io.api.resource.v1.CapacityRequirements")
    +	proto.RegisterMapType((map[QualifiedName]resource.Quantity)(nil), "k8s.io.api.resource.v1.CapacityRequirements.RequestsEntry")
    +	proto.RegisterType((*Counter)(nil), "k8s.io.api.resource.v1.Counter")
    +	proto.RegisterType((*CounterSet)(nil), "k8s.io.api.resource.v1.CounterSet")
    +	proto.RegisterMapType((map[string]Counter)(nil), "k8s.io.api.resource.v1.CounterSet.CountersEntry")
    +	proto.RegisterType((*Device)(nil), "k8s.io.api.resource.v1.Device")
    +	proto.RegisterMapType((map[QualifiedName]DeviceAttribute)(nil), "k8s.io.api.resource.v1.Device.AttributesEntry")
    +	proto.RegisterMapType((map[QualifiedName]DeviceCapacity)(nil), "k8s.io.api.resource.v1.Device.CapacityEntry")
    +	proto.RegisterType((*DeviceAllocationConfiguration)(nil), "k8s.io.api.resource.v1.DeviceAllocationConfiguration")
    +	proto.RegisterType((*DeviceAllocationResult)(nil), "k8s.io.api.resource.v1.DeviceAllocationResult")
    +	proto.RegisterType((*DeviceAttribute)(nil), "k8s.io.api.resource.v1.DeviceAttribute")
    +	proto.RegisterType((*DeviceCapacity)(nil), "k8s.io.api.resource.v1.DeviceCapacity")
    +	proto.RegisterType((*DeviceClaim)(nil), "k8s.io.api.resource.v1.DeviceClaim")
    +	proto.RegisterType((*DeviceClaimConfiguration)(nil), "k8s.io.api.resource.v1.DeviceClaimConfiguration")
    +	proto.RegisterType((*DeviceClass)(nil), "k8s.io.api.resource.v1.DeviceClass")
    +	proto.RegisterType((*DeviceClassConfiguration)(nil), "k8s.io.api.resource.v1.DeviceClassConfiguration")
    +	proto.RegisterType((*DeviceClassList)(nil), "k8s.io.api.resource.v1.DeviceClassList")
    +	proto.RegisterType((*DeviceClassSpec)(nil), "k8s.io.api.resource.v1.DeviceClassSpec")
    +	proto.RegisterType((*DeviceConfiguration)(nil), "k8s.io.api.resource.v1.DeviceConfiguration")
    +	proto.RegisterType((*DeviceConstraint)(nil), "k8s.io.api.resource.v1.DeviceConstraint")
    +	proto.RegisterType((*DeviceCounterConsumption)(nil), "k8s.io.api.resource.v1.DeviceCounterConsumption")
    +	proto.RegisterMapType((map[string]Counter)(nil), "k8s.io.api.resource.v1.DeviceCounterConsumption.CountersEntry")
    +	proto.RegisterType((*DeviceRequest)(nil), "k8s.io.api.resource.v1.DeviceRequest")
    +	proto.RegisterType((*DeviceRequestAllocationResult)(nil), "k8s.io.api.resource.v1.DeviceRequestAllocationResult")
    +	proto.RegisterMapType((map[QualifiedName]resource.Quantity)(nil), "k8s.io.api.resource.v1.DeviceRequestAllocationResult.ConsumedCapacityEntry")
    +	proto.RegisterType((*DeviceSelector)(nil), "k8s.io.api.resource.v1.DeviceSelector")
    +	proto.RegisterType((*DeviceSubRequest)(nil), "k8s.io.api.resource.v1.DeviceSubRequest")
    +	proto.RegisterType((*DeviceTaint)(nil), "k8s.io.api.resource.v1.DeviceTaint")
    +	proto.RegisterType((*DeviceToleration)(nil), "k8s.io.api.resource.v1.DeviceToleration")
    +	proto.RegisterType((*ExactDeviceRequest)(nil), "k8s.io.api.resource.v1.ExactDeviceRequest")
    +	proto.RegisterType((*NetworkDeviceData)(nil), "k8s.io.api.resource.v1.NetworkDeviceData")
    +	proto.RegisterType((*OpaqueDeviceConfiguration)(nil), "k8s.io.api.resource.v1.OpaqueDeviceConfiguration")
    +	proto.RegisterType((*ResourceClaim)(nil), "k8s.io.api.resource.v1.ResourceClaim")
    +	proto.RegisterType((*ResourceClaimConsumerReference)(nil), "k8s.io.api.resource.v1.ResourceClaimConsumerReference")
    +	proto.RegisterType((*ResourceClaimList)(nil), "k8s.io.api.resource.v1.ResourceClaimList")
    +	proto.RegisterType((*ResourceClaimSpec)(nil), "k8s.io.api.resource.v1.ResourceClaimSpec")
    +	proto.RegisterType((*ResourceClaimStatus)(nil), "k8s.io.api.resource.v1.ResourceClaimStatus")
    +	proto.RegisterType((*ResourceClaimTemplate)(nil), "k8s.io.api.resource.v1.ResourceClaimTemplate")
    +	proto.RegisterType((*ResourceClaimTemplateList)(nil), "k8s.io.api.resource.v1.ResourceClaimTemplateList")
    +	proto.RegisterType((*ResourceClaimTemplateSpec)(nil), "k8s.io.api.resource.v1.ResourceClaimTemplateSpec")
    +	proto.RegisterType((*ResourcePool)(nil), "k8s.io.api.resource.v1.ResourcePool")
    +	proto.RegisterType((*ResourceSlice)(nil), "k8s.io.api.resource.v1.ResourceSlice")
    +	proto.RegisterType((*ResourceSliceList)(nil), "k8s.io.api.resource.v1.ResourceSliceList")
    +	proto.RegisterType((*ResourceSliceSpec)(nil), "k8s.io.api.resource.v1.ResourceSliceSpec")
    +}
    +
    +func init() {
    +	proto.RegisterFile("k8s.io/api/resource/v1/generated.proto", fileDescriptor_f4fc532aec02d243)
    +}
    +
    +var fileDescriptor_f4fc532aec02d243 = []byte{
    +	// 3028 bytes of a gzipped FileDescriptorProto
    +	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x5b, 0x4d, 0x6c, 0x24, 0x47,
    +	0xf5, 0x77, 0xcf, 0xcc, 0x8e, 0xc7, 0x6f, 0x6c, 0xaf, 0x5d, 0xbb, 0xeb, 0x4c, 0xfc, 0xff, 0xc7,
    +	0xe3, 0xf4, 0x92, 0xc4, 0x49, 0x76, 0xc7, 0x6b, 0x8b, 0x44, 0x51, 0x12, 0x10, 0x1e, 0xdb, 0x9b,
    +	0x38, 0xfb, 0x11, 0xa7, 0xc6, 0x6b, 0x36, 0x28, 0x84, 0xb4, 0x7b, 0xca, 0x76, 0xe3, 0x9e, 0xee,
    +	0x49, 0x77, 0x8d, 0x77, 0xcd, 0x29, 0xe2, 0x00, 0x57, 0x04, 0x12, 0x02, 0x24, 0x24, 0x94, 0x03,
    +	0x12, 0x17, 0x84, 0x38, 0x11, 0x04, 0x28, 0xc7, 0x08, 0x29, 0x28, 0x17, 0xa4, 0x20, 0xa1, 0x81,
    +	0x1d, 0x4e, 0x48, 0x08, 0x89, 0x0b, 0x07, 0x1f, 0x10, 0xaa, 0xea, 0xaa, 0xfe, 0x9a, 0x6e, 0x4f,
    +	0xdb, 0x59, 0xaf, 0x96, 0x9b, 0xe7, 0xd5, 0x7b, 0xbf, 0xaa, 0x7a, 0xf5, 0xbe, 0xea, 0x75, 0x19,
    +	0x9e, 0xdc, 0x7b, 0xc1, 0xad, 0x19, 0xf6, 0xbc, 0xd6, 0x36, 0xe6, 0x1d, 0xe2, 0xda, 0x1d, 0x47,
    +	0x27, 0xf3, 0xfb, 0x0b, 0xf3, 0x3b, 0xc4, 0x22, 0x8e, 0x46, 0x49, 0xb3, 0xd6, 0x76, 0x6c, 0x6a,
    +	0xa3, 0x29, 0x8f, 0xaf, 0xa6, 0xb5, 0x8d, 0x9a, 0xe4, 0xab, 0xed, 0x2f, 0x4c, 0x5f, 0xde, 0x31,
    +	0xe8, 0x6e, 0x67, 0xab, 0xa6, 0xdb, 0xad, 0xf9, 0x1d, 0x7b, 0xc7, 0x9e, 0xe7, 0xec, 0x5b, 0x9d,
    +	0x6d, 0xfe, 0x8b, 0xff, 0xe0, 0x7f, 0x79, 0x30, 0xd3, 0x6a, 0x68, 0x3a, 0xdd, 0x76, 0x92, 0xa6,
    +	0x9a, 0xfe, 0x7c, 0xc0, 0xd3, 0xd2, 0xf4, 0x5d, 0xc3, 0x22, 0xce, 0xc1, 0x7c, 0x7b, 0x6f, 0x27,
    +	0xba, 0xc6, 0xe3, 0x48, 0xb9, 0xf3, 0x2d, 0x42, 0xb5, 0xa4, 0xb9, 0xe6, 0xd3, 0xa4, 0x9c, 0x8e,
    +	0x45, 0x8d, 0x56, 0xff, 0x34, 0xcf, 0x0f, 0x12, 0x70, 0xf5, 0x5d, 0xd2, 0xd2, 0xe2, 0x72, 0xea,
    +	0x87, 0x79, 0xb8, 0xb0, 0x64, 0x9a, 0xb6, 0xce, 0x68, 0x2b, 0x64, 0xdf, 0xd0, 0x49, 0x83, 0x6a,
    +	0xb4, 0xe3, 0xa2, 0x27, 0xa1, 0xd8, 0x74, 0x8c, 0x7d, 0xe2, 0x54, 0x94, 0x59, 0x65, 0x6e, 0xa4,
    +	0x3e, 0xfe, 0x51, 0xb7, 0x3a, 0xd4, 0xeb, 0x56, 0x8b, 0x2b, 0x9c, 0x8a, 0xc5, 0x28, 0x9a, 0x85,
    +	0x42, 0xdb, 0xb6, 0xcd, 0x4a, 0x8e, 0x73, 0x8d, 0x0a, 0xae, 0xc2, 0xba, 0x6d, 0x9b, 0x98, 0x8f,
    +	0x70, 0x24, 0x8e, 0x5c, 0xc9, 0xc7, 0x90, 0x38, 0x15, 0x8b, 0x51, 0xf4, 0x04, 0x0c, 0xbb, 0xbb,
    +	0x9a, 0x43, 0xd6, 0x56, 0x2a, 0xc3, 0x9c, 0xb1, 0xdc, 0xeb, 0x56, 0x87, 0x1b, 0x1e, 0x09, 0xcb,
    +	0x31, 0xa4, 0x03, 0xe8, 0xb6, 0xd5, 0x34, 0xa8, 0x61, 0x5b, 0x6e, 0xa5, 0x30, 0x9b, 0x9f, 0x2b,
    +	0x2f, 0xce, 0xd7, 0x02, 0x3b, 0xf0, 0xf7, 0x5f, 0x6b, 0xef, 0xed, 0x30, 0x82, 0x5b, 0x63, 0x6a,
    +	0xae, 0xed, 0x2f, 0xd4, 0x96, 0xa5, 0x5c, 0x1d, 0x89, 0x35, 0x80, 0x4f, 0x72, 0x71, 0x08, 0x16,
    +	0x5d, 0x83, 0x42, 0x53, 0xa3, 0x5a, 0xe5, 0xcc, 0xac, 0x32, 0x57, 0x5e, 0xbc, 0x9c, 0x0a, 0x2f,
    +	0xd4, 0x5b, 0xc3, 0xda, 0x9d, 0xd5, 0xbb, 0x94, 0x58, 0x2e, 0x03, 0x2f, 0x31, 0x05, 0xac, 0x68,
    +	0x54, 0xc3, 0x1c, 0x04, 0xbd, 0x05, 0x65, 0x8b, 0xd0, 0x3b, 0xb6, 0xb3, 0xc7, 0x88, 0x95, 0x22,
    +	0xc7, 0x7c, 0xba, 0x96, 0x6c, 0xba, 0xb5, 0x9b, 0x82, 0x95, 0x2b, 0x85, 0x09, 0xd4, 0xcf, 0xf6,
    +	0xba, 0xd5, 0xf2, 0xcd, 0x00, 0x01, 0x87, 0xe1, 0xd4, 0xdf, 0xe4, 0x60, 0x42, 0x1c, 0xa1, 0x61,
    +	0x5b, 0x98, 0xb8, 0x1d, 0x93, 0xa2, 0x37, 0x61, 0xd8, 0xd3, 0xaa, 0xcb, 0x8f, 0xaf, 0xbc, 0x58,
    +	0x4b, 0x9b, 0xce, 0x9b, 0x27, 0x0e, 0x50, 0x3f, 0x2b, 0x14, 0x34, 0xec, 0x8d, 0xbb, 0x58, 0xe2,
    +	0xa1, 0x4d, 0x18, 0xb5, 0xec, 0x26, 0x69, 0x10, 0x93, 0xe8, 0xd4, 0x76, 0xf8, 0xa1, 0x96, 0x17,
    +	0x67, 0xc3, 0xf8, 0xcc, 0x85, 0xf8, 0x56, 0x42, 0x7c, 0xf5, 0x89, 0x5e, 0xb7, 0x3a, 0x1a, 0xa6,
    +	0xe0, 0x08, 0x0e, 0xea, 0xc0, 0x39, 0xcd, 0x5f, 0xc5, 0x86, 0xd1, 0x22, 0x2e, 0xd5, 0x5a, 0x6d,
    +	0x71, 0x02, 0xcf, 0x64, 0x3b, 0x60, 0x26, 0x56, 0x7f, 0xa4, 0xd7, 0xad, 0x9e, 0x5b, 0xea, 0x87,
    +	0xc2, 0x49, 0xf8, 0xea, 0x2b, 0x30, 0xb9, 0xbc, 0x7a, 0x5d, 0x98, 0xbe, 0x5c, 0xcb, 0x22, 0x00,
    +	0xb9, 0xdb, 0x76, 0x88, 0xcb, 0xce, 0x53, 0x38, 0x80, 0x6f, 0x32, 0xab, 0xfe, 0x08, 0x0e, 0x71,
    +	0xa9, 0x1f, 0xe4, 0xe0, 0xc2, 0xb2, 0xd6, 0xd6, 0x74, 0x83, 0x1e, 0x60, 0xf2, 0x6e, 0x87, 0xb8,
    +	0x74, 0xdd, 0x36, 0x0d, 0xfd, 0x00, 0xdd, 0x62, 0x87, 0xb1, 0xad, 0x75, 0x4c, 0x9a, 0x70, 0x18,
    +	0x7d, 0xbb, 0x09, 0x4e, 0xe7, 0x8d, 0x8e, 0x66, 0x51, 0x83, 0x1e, 0x78, 0x8e, 0xb0, 0xe2, 0x41,
    +	0x60, 0x89, 0x85, 0x08, 0x94, 0xf7, 0x35, 0xd3, 0x68, 0x6e, 0x6a, 0x66, 0x87, 0xb8, 0x95, 0x3c,
    +	0xf7, 0x84, 0xe3, 0x42, 0x9f, 0x13, 0xbb, 0x2a, 0x6f, 0x06, 0x50, 0x38, 0x8c, 0x8b, 0xb6, 0x00,
    +	0xf8, 0x4f, 0xac, 0x59, 0x3b, 0xa4, 0x52, 0xe0, 0x1b, 0x58, 0x4c, 0xb3, 0xa6, 0x44, 0x05, 0x70,
    +	0xc9, 0xfa, 0x38, 0xd3, 0xdd, 0xa6, 0x8f, 0x84, 0x43, 0xa8, 0xea, 0x7b, 0x39, 0x98, 0x4e, 0x17,
    +	0x45, 0x6b, 0x90, 0x6f, 0x19, 0xd6, 0x09, 0x95, 0x37, 0xdc, 0xeb, 0x56, 0xf3, 0x37, 0x0c, 0x0b,
    +	0x33, 0x0c, 0x0e, 0xa5, 0xdd, 0xe5, 0xd1, 0xea, 0xa4, 0x50, 0xda, 0x5d, 0xcc, 0x30, 0xd0, 0x75,
    +	0x28, 0xb8, 0x94, 0xb4, 0x85, 0x03, 0x1c, 0x17, 0x8b, 0x07, 0x89, 0x06, 0x25, 0x6d, 0xcc, 0x51,
    +	0xd4, 0xff, 0x28, 0x70, 0x3e, 0xac, 0x02, 0xc3, 0x21, 0x2d, 0x62, 0x51, 0x17, 0x1d, 0x40, 0xc9,
    +	0xf1, 0x54, 0xc2, 0x7c, 0x99, 0x9d, 0xf1, 0x8b, 0x59, 0xb4, 0x2f, 0xe5, 0x6b, 0x42, 0x9f, 0xee,
    +	0xaa, 0x45, 0x9d, 0x83, 0xfa, 0xe3, 0xe2, 0xbc, 0x4b, 0x92, 0xfc, 0xcd, 0xbf, 0x54, 0xc7, 0xde,
    +	0xe8, 0x68, 0xa6, 0xb1, 0x6d, 0x90, 0xe6, 0x4d, 0xad, 0x45, 0xb0, 0x3f, 0xdd, 0xf4, 0x1e, 0x8c,
    +	0x45, 0xa4, 0xd1, 0x04, 0xe4, 0xf7, 0xc8, 0x81, 0xe7, 0x10, 0x98, 0xfd, 0x89, 0x56, 0xe0, 0xcc,
    +	0x3e, 0xb3, 0x93, 0x93, 0x69, 0x14, 0x7b, 0xc2, 0x2f, 0xe6, 0x5e, 0x50, 0xd4, 0xb7, 0x61, 0x78,
    +	0xd9, 0xee, 0x58, 0x94, 0x38, 0xa8, 0x21, 0x41, 0x4f, 0x76, 0xe2, 0x63, 0x62, 0x8f, 0x67, 0xb8,
    +	0x05, 0x8b, 0x39, 0xd4, 0x7f, 0x28, 0x00, 0x62, 0x82, 0x06, 0xa1, 0x2c, 0x6f, 0x59, 0x5a, 0x8b,
    +	0x08, 0xe7, 0xf6, 0xf3, 0x16, 0xd7, 0x00, 0x1f, 0x41, 0x6f, 0x43, 0x49, 0xf7, 0xf8, 0xdd, 0x4a,
    +	0x8e, 0x2b, 0xfe, 0x4a, 0xaa, 0xe2, 0x7d, 0x5c, 0xf9, 0xa7, 0x50, 0xf7, 0x84, 0x54, 0xb7, 0x24,
    +	0x63, 0x1f, 0x73, 0xfa, 0x2d, 0x18, 0x8b, 0x30, 0x27, 0x68, 0xf7, 0xb9, 0xa8, 0x76, 0xab, 0x03,
    +	0xe6, 0x0f, 0xab, 0xf3, 0xdf, 0x25, 0x10, 0x09, 0x36, 0xc3, 0x56, 0x5d, 0x00, 0x8d, 0x52, 0xc7,
    +	0xd8, 0xea, 0x50, 0x22, 0x37, 0x3b, 0x20, 0x63, 0xd4, 0x96, 0x7c, 0x01, 0x6f, 0xab, 0x17, 0x65,
    +	0x7c, 0x0c, 0x06, 0xfa, 0x6d, 0x2b, 0x34, 0x0d, 0xda, 0x83, 0x92, 0x2e, 0x0c, 0x56, 0x04, 0xaf,
    +	0x4b, 0x03, 0xa6, 0x94, 0xf6, 0x1d, 0x33, 0x65, 0x49, 0x4e, 0x30, 0x65, 0x39, 0x01, 0xda, 0x87,
    +	0x09, 0xdd, 0xb6, 0xdc, 0x4e, 0x8b, 0xb8, 0x52, 0xe9, 0xa2, 0x76, 0xb8, 0x72, 0xf4, 0xa4, 0x82,
    +	0x7b, 0x99, 0x0b, 0xb7, 0x79, 0xf1, 0x50, 0x11, 0x13, 0x4f, 0x2c, 0xc7, 0x10, 0x71, 0xdf, 0x1c,
    +	0x68, 0x0e, 0x4a, 0x2c, 0xcb, 0xb1, 0xd5, 0xf0, 0x54, 0x36, 0x52, 0x1f, 0x65, 0x4b, 0xbe, 0x29,
    +	0x68, 0xd8, 0x1f, 0xed, 0xcb, 0xab, 0xc5, 0xfb, 0x94, 0x57, 0xe7, 0xa0, 0xa4, 0x99, 0x26, 0x63,
    +	0x70, 0x79, 0x5d, 0x55, 0xf2, 0x56, 0xb0, 0x24, 0x68, 0xd8, 0x1f, 0x45, 0xd7, 0xa0, 0x48, 0x35,
    +	0xc3, 0xa2, 0x6e, 0xa5, 0xc4, 0x35, 0x73, 0xf1, 0x68, 0xcd, 0x6c, 0x30, 0xde, 0xa0, 0x9a, 0xe3,
    +	0x3f, 0x5d, 0x2c, 0x20, 0xd0, 0x02, 0x94, 0xb7, 0x0c, 0xab, 0xe9, 0x6e, 0xd8, 0x0c, 0xbc, 0x32,
    +	0xc2, 0x67, 0xe6, 0x95, 0x4c, 0x3d, 0x20, 0xe3, 0x30, 0x0f, 0x5a, 0x86, 0x49, 0xf6, 0xd3, 0xb0,
    +	0x76, 0x82, 0xaa, 0xac, 0x02, 0xb3, 0xf9, 0xb9, 0x91, 0xfa, 0x85, 0x5e, 0xb7, 0x3a, 0x59, 0x8f,
    +	0x0f, 0xe2, 0x7e, 0x7e, 0x74, 0x1b, 0x2a, 0x82, 0x78, 0x55, 0x33, 0xcc, 0x8e, 0x43, 0x42, 0x58,
    +	0x65, 0x8e, 0xf5, 0xff, 0xbd, 0x6e, 0xb5, 0x52, 0x4f, 0xe1, 0xc1, 0xa9, 0xd2, 0x0c, 0x99, 0x15,
    +	0x10, 0x77, 0x6e, 0x74, 0x4c, 0x6a, 0xb4, 0xcd, 0x50, 0xcd, 0xe4, 0x56, 0x46, 0xf9, 0xf6, 0x38,
    +	0xf2, 0x52, 0x0a, 0x0f, 0x4e, 0x95, 0x9e, 0xde, 0x86, 0xb3, 0x31, 0x6f, 0x4a, 0x88, 0x05, 0x5f,
    +	0x88, 0xc6, 0x82, 0xa7, 0x06, 0x14, 0x74, 0x12, 0x2f, 0x14, 0x13, 0xa6, 0x75, 0x18, 0x8b, 0xb8,
    +	0x50, 0xc2, 0x2c, 0x2f, 0x47, 0x67, 0x79, 0x72, 0x80, 0x73, 0xc8, 0x84, 0x13, 0x0a, 0x3c, 0xdf,
    +	0xce, 0xc1, 0x63, 0xf1, 0xa2, 0x72, 0xd9, 0xb6, 0xb6, 0x8d, 0x9d, 0x8e, 0xc3, 0x7f, 0xa0, 0x2f,
    +	0x41, 0xd1, 0x03, 0x12, 0x11, 0x69, 0x4e, 0x9a, 0x50, 0x83, 0x53, 0x0f, 0xbb, 0xd5, 0xa9, 0xb8,
    +	0xa8, 0x37, 0x82, 0x85, 0x1c, 0xb3, 0x69, 0x3f, 0x27, 0xe6, 0xf8, 0xa1, 0x8e, 0x86, 0x73, 0x5a,
    +	0x90, 0xc2, 0xd0, 0x37, 0xe0, 0x5c, 0x53, 0xf8, 0x71, 0x68, 0x09, 0x22, 0x67, 0x3f, 0x3b, 0xc8,
    +	0xf5, 0x43, 0x22, 0xf5, 0xff, 0x13, 0xab, 0x3c, 0x97, 0x30, 0x88, 0x93, 0x26, 0x51, 0xff, 0xa4,
    +	0xc0, 0x54, 0x72, 0x79, 0x8d, 0xde, 0x81, 0x61, 0x87, 0xff, 0x25, 0x73, 0xfa, 0x73, 0x47, 0x2f,
    +	0x45, 0xec, 0x2c, 0xbd, 0x4c, 0xf7, 0x7e, 0xbb, 0x58, 0xc2, 0xa2, 0xaf, 0x42, 0x51, 0xe7, 0xab,
    +	0x11, 0xe1, 0xfc, 0xb9, 0xac, 0x17, 0x80, 0xe8, 0xae, 0x7d, 0xf7, 0xf6, 0xc8, 0x58, 0x80, 0xaa,
    +	0x3f, 0x53, 0xe0, 0x6c, 0xcc, 0xd2, 0xd0, 0x0c, 0xe4, 0x0d, 0x8b, 0x72, 0xcb, 0xc9, 0x7b, 0x07,
    +	0xb2, 0x66, 0x51, 0x2f, 0x07, 0xb3, 0x01, 0xf4, 0x38, 0x14, 0xb6, 0xd8, 0x55, 0x31, 0xcf, 0x9d,
    +	0x65, 0xac, 0xd7, 0xad, 0x8e, 0xd4, 0x6d, 0xdb, 0xf4, 0x38, 0xf8, 0x10, 0x7a, 0x0a, 0x8a, 0x2e,
    +	0x75, 0x0c, 0x6b, 0x87, 0x17, 0x9a, 0x23, 0x5e, 0xc0, 0x68, 0x70, 0x8a, 0xc7, 0x26, 0x86, 0xd1,
    +	0x33, 0x30, 0xbc, 0x4f, 0x1c, 0x5e, 0x9e, 0x7b, 0x61, 0x95, 0x87, 0xc1, 0x4d, 0x8f, 0xe4, 0xb1,
    +	0x4a, 0x06, 0xf5, 0x63, 0x05, 0xc6, 0xa3, 0xf6, 0x7a, 0x2a, 0x15, 0x06, 0xda, 0x86, 0x31, 0x27,
    +	0x5c, 0xbc, 0x0a, 0x1f, 0xba, 0x7c, 0xac, 0x62, 0xb9, 0x3e, 0xd9, 0xeb, 0x56, 0xc7, 0xa2, 0x45,
    +	0x70, 0x14, 0x56, 0xfd, 0x71, 0x0e, 0xca, 0x62, 0x3f, 0xa6, 0x66, 0xb4, 0x50, 0xa3, 0xaf, 0x42,
    +	0x7c, 0x22, 0x93, 0x35, 0x05, 0xd5, 0x49, 0x82, 0xe3, 0x7c, 0x0d, 0xca, 0x2c, 0x99, 0x51, 0xc7,
    +	0xcb, 0x08, 0x9e, 0x11, 0xcd, 0x0d, 0x74, 0x18, 0x21, 0x10, 0xdc, 0x2b, 0x02, 0x9a, 0x8b, 0xc3,
    +	0x88, 0xe8, 0xb6, 0x6f, 0xa0, 0xf9, 0x4c, 0x79, 0x98, 0x6d, 0x35, 0x9b, 0x6d, 0x7e, 0xa8, 0x40,
    +	0x25, 0x4d, 0x28, 0x12, 0x3a, 0x94, 0x93, 0x84, 0x8e, 0xdc, 0x83, 0x08, 0x1d, 0xbf, 0x56, 0x42,
    +	0x47, 0xec, 0xba, 0xe8, 0x1d, 0x28, 0xb1, 0x3b, 0x2e, 0xef, 0x49, 0x78, 0x26, 0x7b, 0x25, 0xdb,
    +	0x8d, 0xf8, 0xf5, 0xad, 0xaf, 0x13, 0x9d, 0xde, 0x20, 0x54, 0x0b, 0x2e, 0xb0, 0x01, 0x0d, 0xfb,
    +	0xa8, 0x68, 0x0d, 0x0a, 0x6e, 0x9b, 0xe8, 0xd9, 0xb2, 0x0b, 0x5f, 0x54, 0xa3, 0x4d, 0xf4, 0xa0,
    +	0x9a, 0x64, 0xbf, 0x30, 0x87, 0x50, 0xbf, 0x1f, 0xd6, 0xbf, 0xeb, 0x46, 0xf5, 0x9f, 0xa2, 0x55,
    +	0xe5, 0x41, 0x68, 0xf5, 0x03, 0x3f, 0x68, 0xf1, 0x85, 0x5d, 0x37, 0x5c, 0x8a, 0xde, 0xea, 0xd3,
    +	0x6c, 0x2d, 0x9b, 0x66, 0x99, 0x34, 0xd7, 0xab, 0xef, 0x45, 0x92, 0x12, 0xd2, 0xea, 0xab, 0x70,
    +	0xc6, 0xa0, 0xa4, 0x25, 0xfd, 0xe7, 0x62, 0x06, 0xb5, 0x06, 0xc1, 0x65, 0x8d, 0x49, 0x62, 0x0f,
    +	0x40, 0xfd, 0x6e, 0x2e, 0xb2, 0x76, 0xa6, 0x6e, 0xf4, 0x65, 0x18, 0x71, 0x45, 0x99, 0x27, 0x3d,
    +	0x7f, 0x40, 0xc2, 0xf6, 0xab, 0xc6, 0x49, 0x31, 0xc9, 0x88, 0xa4, 0xb8, 0x38, 0xc0, 0x0a, 0xf9,
    +	0x66, 0x2e, 0xa3, 0x6f, 0xc6, 0x8e, 0x39, 0xcd, 0x37, 0xd1, 0x75, 0x38, 0x4f, 0xee, 0x52, 0x62,
    +	0x35, 0x49, 0x13, 0x0b, 0x1c, 0x5e, 0x1b, 0x7b, 0xe1, 0xbe, 0xd2, 0xeb, 0x56, 0xcf, 0xaf, 0x26,
    +	0x8c, 0xe3, 0x44, 0x29, 0xd5, 0x84, 0xa4, 0xc3, 0x47, 0xb7, 0xa0, 0x68, 0xb7, 0xb5, 0x77, 0xfd,
    +	0xf0, 0xbe, 0x90, 0xb6, 0xfc, 0xd7, 0x39, 0x57, 0x92, 0x71, 0x01, 0x5b, 0xbb, 0x37, 0x8c, 0x05,
    +	0x98, 0xfa, 0x77, 0x05, 0x26, 0xe2, 0x81, 0xee, 0x18, 0xf1, 0x64, 0x1d, 0xc6, 0x5b, 0x1a, 0xd5,
    +	0x77, 0xfd, 0x84, 0x29, 0x7a, 0xa6, 0x73, 0xbd, 0x6e, 0x75, 0xfc, 0x46, 0x64, 0xe4, 0xb0, 0x5b,
    +	0x45, 0x57, 0x3b, 0xa6, 0x79, 0x10, 0xbd, 0xce, 0xc4, 0xe4, 0xd1, 0x9b, 0x30, 0xd9, 0x34, 0x5c,
    +	0x6a, 0x58, 0x3a, 0x0d, 0x40, 0xbd, 0x26, 0xeb, 0xb3, 0xac, 0x60, 0x5e, 0x89, 0x0f, 0xa6, 0xe0,
    +	0xf6, 0xa3, 0xa8, 0x3f, 0xca, 0xf9, 0x3e, 0xdc, 0x77, 0x01, 0x42, 0x8b, 0x00, 0xba, 0x7f, 0xe3,
    +	0x8d, 0xb7, 0xc7, 0x82, 0xbb, 0x30, 0x0e, 0x71, 0x21, 0xb3, 0xef, 0x36, 0xfd, 0xc5, 0xe3, 0x5e,
    +	0xbc, 0x1e, 0x9a, 0xbb, 0xf5, 0x3f, 0x15, 0x18, 0x8b, 0x64, 0xd2, 0x0c, 0x57, 0xec, 0x37, 0x60,
    +	0x98, 0xdc, 0xd5, 0x74, 0x6a, 0xca, 0xb2, 0xe0, 0x99, 0xb4, 0x09, 0x57, 0x19, 0x5b, 0x34, 0x51,
    +	0xf3, 0x06, 0xe0, 0xaa, 0x27, 0x8e, 0x25, 0x0e, 0xda, 0x85, 0xf1, 0x6d, 0xc3, 0x71, 0xe9, 0xd2,
    +	0xbe, 0x66, 0x98, 0xda, 0x96, 0x49, 0x44, 0x26, 0x1d, 0x90, 0xa5, 0x1b, 0x9d, 0x2d, 0x89, 0x3b,
    +	0x25, 0x16, 0x3a, 0x7e, 0x35, 0x82, 0x83, 0x63, 0xb8, 0xea, 0x1f, 0x8b, 0xb2, 0xa6, 0x4f, 0x29,
    +	0x44, 0xd1, 0xd3, 0xac, 0xa0, 0xe5, 0x43, 0x42, 0x07, 0xa1, 0xca, 0x94, 0x93, 0xb1, 0x1c, 0x0f,
    +	0x7d, 0x59, 0xc8, 0x65, 0xfa, 0xb2, 0x90, 0xcf, 0xf0, 0x65, 0xa1, 0x70, 0xe4, 0x97, 0x85, 0x05,
    +	0x28, 0x6b, 0xcd, 0x96, 0x61, 0x2d, 0xe9, 0x3a, 0x71, 0x5d, 0x5e, 0x30, 0x8a, 0xbb, 0xe8, 0x52,
    +	0x40, 0xc6, 0x61, 0x1e, 0x56, 0xfe, 0x50, 0xdb, 0x24, 0x8e, 0xb8, 0xdf, 0x15, 0xb3, 0x28, 0x76,
    +	0xc3, 0x17, 0x08, 0xca, 0x9f, 0x80, 0xe6, 0xe2, 0x30, 0x62, 0xf2, 0x65, 0x77, 0xf8, 0x3e, 0x5e,
    +	0x76, 0x4b, 0x9f, 0xe9, 0xb2, 0xfb, 0x5a, 0xf0, 0x31, 0x66, 0x84, 0xeb, 0xf6, 0x4a, 0xe8, 0x63,
    +	0xcc, 0x61, 0xb7, 0xfa, 0x78, 0xda, 0x07, 0x27, 0x7a, 0xd0, 0x26, 0x6e, 0xed, 0x56, 0xf8, 0x8b,
    +	0xcd, 0xfb, 0x8a, 0xdf, 0x7c, 0x69, 0xca, 0x9a, 0x97, 0xdf, 0xeb, 0xcb, 0x8b, 0xd7, 0x4e, 0x74,
    +	0xed, 0xa9, 0x2d, 0xc7, 0xd0, 0xbc, 0x80, 0xf0, 0x74, 0xac, 0x2f, 0xd3, 0x4c, 0x6f, 0x0c, 0xf5,
    +	0xad, 0x67, 0xda, 0x85, 0x0b, 0x89, 0xa8, 0xa7, 0xda, 0xf3, 0xdc, 0x94, 0x17, 0x13, 0xbf, 0x5b,
    +	0xb3, 0x02, 0x79, 0x9d, 0x98, 0x22, 0x6f, 0xa5, 0x7e, 0x23, 0xea, 0xfb, 0x62, 0xe1, 0xb5, 0xa6,
    +	0x97, 0x57, 0xaf, 0x63, 0x26, 0xae, 0x7e, 0xab, 0x20, 0x33, 0x55, 0xe0, 0xec, 0x19, 0x62, 0xd4,
    +	0x12, 0x9c, 0x6d, 0x06, 0x09, 0x9d, 0xe7, 0x65, 0xcf, 0x45, 0x1f, 0x11, 0xcc, 0xe1, 0x0a, 0x84,
    +	0xcb, 0xc5, 0xf9, 0xa3, 0x25, 0x49, 0xfe, 0x3e, 0x96, 0x24, 0x9b, 0x30, 0x1e, 0x7c, 0xbe, 0xb9,
    +	0x61, 0x37, 0xa5, 0xcf, 0xd7, 0x64, 0x08, 0x5b, 0x8a, 0x8c, 0x1e, 0x76, 0xab, 0xe7, 0xe3, 0x37,
    +	0x5b, 0x46, 0xc7, 0x31, 0x14, 0x74, 0x11, 0xce, 0xf0, 0xac, 0xc1, 0xa3, 0x42, 0x3e, 0x28, 0xbe,
    +	0x78, 0xd8, 0xc7, 0xde, 0xd8, 0xe9, 0x47, 0x83, 0xcd, 0x50, 0x2f, 0x74, 0x98, 0x9f, 0xfd, 0xa5,
    +	0xe3, 0x34, 0xf9, 0xbd, 0x9a, 0xc3, 0x1f, 0xf1, 0xb1, 0xd4, 0x7f, 0xf9, 0xf7, 0x08, 0xde, 0x9e,
    +	0x43, 0x8f, 0x85, 0x8c, 0xb9, 0x5e, 0x16, 0xcb, 0xca, 0x5f, 0x23, 0x07, 0x9e, 0x65, 0x5f, 0x0c,
    +	0x5b, 0xf6, 0x48, 0xca, 0x35, 0xf7, 0x25, 0x28, 0x92, 0xed, 0x6d, 0xa2, 0x53, 0x11, 0x99, 0x65,
    +	0xe3, 0xb7, 0xb8, 0xca, 0xa9, 0x87, 0xac, 0xf0, 0x08, 0xa6, 0xf4, 0x88, 0x58, 0x88, 0x30, 0xfb,
    +	0xa0, 0x46, 0x8b, 0x2c, 0x35, 0x9b, 0xa4, 0x29, 0x3e, 0x26, 0x1d, 0xe7, 0xdb, 0x1e, 0x6f, 0x1a,
    +	0x6c, 0x48, 0x00, 0x1c, 0x60, 0xbd, 0x58, 0xfa, 0xc1, 0x4f, 0xaa, 0x43, 0xef, 0xfd, 0x79, 0x76,
    +	0x48, 0x7d, 0x3f, 0x27, 0x8d, 0x3f, 0x50, 0xf7, 0xa0, 0x8d, 0xbf, 0x0a, 0x25, 0xbb, 0xcd, 0x78,
    +	0x6d, 0x99, 0x95, 0x2e, 0xc9, 0xea, 0xe2, 0x75, 0x41, 0x3f, 0xec, 0x56, 0x2b, 0x71, 0x58, 0x39,
    +	0x86, 0x7d, 0xe9, 0x40, 0x85, 0xf9, 0x4c, 0x2a, 0x2c, 0x1c, 0x5f, 0x85, 0xcb, 0x30, 0x19, 0x98,
    +	0x4e, 0x83, 0xe8, 0xb6, 0xd5, 0x74, 0x85, 0xf5, 0xf2, 0xcc, 0xb1, 0x11, 0x1f, 0xc4, 0xfd, 0xfc,
    +	0xea, 0x0f, 0x0b, 0x80, 0xfa, 0x0b, 0x8d, 0xa4, 0x08, 0xa0, 0x7c, 0x96, 0x08, 0x90, 0x3b, 0xd5,
    +	0x08, 0x90, 0xbf, 0xbf, 0x11, 0xa0, 0x70, 0x44, 0x04, 0x78, 0x18, 0x4b, 0x88, 0xd3, 0x0a, 0x1a,
    +	0x3f, 0x57, 0x60, 0xb2, 0xef, 0x15, 0x02, 0x7a, 0x09, 0xc6, 0x0c, 0x56, 0x08, 0x6f, 0x6b, 0xe2,
    +	0xca, 0xe6, 0x19, 0xc6, 0x05, 0xb1, 0xcc, 0xb1, 0xb5, 0xf0, 0x20, 0x8e, 0xf2, 0xa2, 0x47, 0x21,
    +	0x6f, 0xb4, 0x65, 0xaf, 0x96, 0xe7, 0xaa, 0xb5, 0x75, 0x17, 0x33, 0x1a, 0x33, 0xb9, 0x5d, 0xcd,
    +	0x69, 0xde, 0xd1, 0x1c, 0xe6, 0xc9, 0x0e, 0xd3, 0x6e, 0x3e, 0x6a, 0x72, 0xaf, 0x46, 0x87, 0x71,
    +	0x9c, 0x5f, 0xfd, 0xa9, 0x02, 0x8f, 0xa6, 0x5e, 0xe5, 0x32, 0xbf, 0x64, 0xd1, 0x00, 0xda, 0x9a,
    +	0xa3, 0xb5, 0x88, 0xb8, 0xa3, 0x9c, 0xe0, 0xe5, 0x87, 0x7f, 0x09, 0x5a, 0xf7, 0x81, 0x70, 0x08,
    +	0x54, 0xfd, 0x5e, 0x0e, 0xc6, 0xe4, 0x05, 0xd6, 0xeb, 0xdd, 0x9d, 0x7e, 0x63, 0xe7, 0x5a, 0xa4,
    +	0xb1, 0x93, 0x5a, 0x52, 0x44, 0x96, 0x95, 0xd6, 0xda, 0x41, 0x0d, 0x28, 0xba, 0xfc, 0x7d, 0xd0,
    +	0xa0, 0x0e, 0x7a, 0x14, 0x8e, 0x8b, 0x04, 0x8a, 0xf7, 0x7e, 0x63, 0x01, 0xa5, 0xf6, 0x14, 0x98,
    +	0x89, 0xf0, 0x8b, 0x42, 0xcc, 0xc1, 0x64, 0x9b, 0x38, 0xc4, 0xd2, 0x09, 0xba, 0x04, 0x25, 0xad,
    +	0x6d, 0xbc, 0xe2, 0xd8, 0x9d, 0xb6, 0x38, 0x45, 0xff, 0xf6, 0xb7, 0xb4, 0xbe, 0xc6, 0xe9, 0xd8,
    +	0xe7, 0x60, 0xdc, 0x72, 0x2d, 0xc2, 0x96, 0x42, 0x9d, 0x4e, 0x8f, 0x8e, 0x7d, 0x0e, 0xbf, 0x2e,
    +	0x2a, 0xa4, 0xd6, 0x45, 0x75, 0xc8, 0x77, 0x8c, 0xa6, 0x68, 0x34, 0x5f, 0x91, 0xc9, 0xe3, 0x56,
    +	0xd6, 0x42, 0x98, 0x09, 0xab, 0xbf, 0x55, 0x60, 0x32, 0xb2, 0xc9, 0x07, 0xd0, 0x7d, 0x7a, 0x2d,
    +	0xda, 0x7d, 0x7a, 0x22, 0xd3, 0x61, 0xa5, 0xf4, 0x9f, 0xf4, 0xd8, 0xf2, 0x79, 0x03, 0xea, 0x66,
    +	0xfc, 0x99, 0xd1, 0xc5, 0x0c, 0x4d, 0xdc, 0xf4, 0xb7, 0x45, 0xea, 0xaf, 0x72, 0x70, 0x2e, 0xc1,
    +	0x72, 0xd0, 0x6d, 0x80, 0x20, 0x68, 0x8b, 0xa9, 0x52, 0x23, 0x69, 0xdf, 0x47, 0x12, 0xfe, 0xf2,
    +	0x24, 0x44, 0x0d, 0x61, 0xa1, 0x16, 0x94, 0x1d, 0xe2, 0x12, 0x67, 0x9f, 0x34, 0xaf, 0xf2, 0xdc,
    +	0xcf, 0x14, 0xf5, 0x7c, 0x26, 0x45, 0xf5, 0x59, 0x69, 0x10, 0xb2, 0x71, 0x00, 0x89, 0xc3, 0xf8,
    +	0xe8, 0x76, 0xa0, 0x30, 0xef, 0xeb, 0xf3, 0xe5, 0x01, 0xbb, 0x88, 0xbe, 0xca, 0x3b, 0x42, 0x75,
    +	0x7f, 0x50, 0xe0, 0x42, 0x64, 0x79, 0x1b, 0xa4, 0xd5, 0x36, 0x35, 0x4a, 0x1e, 0x40, 0x88, 0x69,
    +	0x44, 0x42, 0xcc, 0x42, 0x26, 0xed, 0xc9, 0xe5, 0xa5, 0x76, 0x91, 0x3f, 0x56, 0xe0, 0xd1, 0x44,
    +	0x89, 0x07, 0xe0, 0x38, 0x38, 0xea, 0x38, 0x97, 0x8f, 0xb5, 0xa3, 0x14, 0x07, 0xfa, 0x7d, 0xda,
    +	0x7e, 0xb8, 0x27, 0xfd, 0x6f, 0xe5, 0x01, 0xf5, 0x17, 0x0a, 0x8c, 0x4a, 0xce, 0x75, 0xdb, 0x36,
    +	0x33, 0x5c, 0x2e, 0x17, 0x01, 0xc4, 0xeb, 0x53, 0xf9, 0x15, 0x25, 0x1f, 0xac, 0xf8, 0x15, 0x7f,
    +	0x04, 0x87, 0xb8, 0xd0, 0x6b, 0x80, 0xe4, 0xda, 0x1a, 0xa6, 0xec, 0x09, 0xf2, 0x90, 0x9e, 0xaf,
    +	0x4f, 0x0b, 0x59, 0x84, 0xfb, 0x38, 0x70, 0x82, 0x94, 0xfa, 0x3b, 0x25, 0xc8, 0xbd, 0x9c, 0xfc,
    +	0xf0, 0xe9, 0x9c, 0x2f, 0x2b, 0x55, 0xe7, 0xe1, 0x0c, 0xc2, 0x39, 0x1f, 0xc2, 0x0c, 0xc2, 0xd7,
    +	0x95, 0xe2, 0x00, 0xbf, 0x2c, 0xc4, 0xd6, 0xcf, 0x0d, 0x3f, 0x6b, 0x75, 0x76, 0x35, 0xf4, 0xce,
    +	0xb8, 0xbc, 0xf8, 0xb9, 0x41, 0x0b, 0x61, 0x46, 0x99, 0xd8, 0x33, 0x0c, 0x3f, 0xc8, 0xc9, 0x1f,
    +	0xeb, 0x41, 0x4e, 0xe1, 0x14, 0x1e, 0xe4, 0x9c, 0x39, 0xf2, 0x41, 0xce, 0x5a, 0x90, 0x2d, 0xbc,
    +	0xdb, 0xc3, 0xcc, 0xd1, 0xe9, 0xf5, 0x88, 0x57, 0xbb, 0x18, 0xa6, 0xda, 0xc4, 0xf1, 0xc8, 0xc1,
    +	0xda, 0x98, 0x27, 0x7a, 0x6f, 0x82, 0xa6, 0x7b, 0xdd, 0xea, 0xd4, 0x7a, 0x22, 0x07, 0x4e, 0x91,
    +	0x44, 0x5b, 0x30, 0xce, 0x5b, 0x7c, 0x4d, 0xff, 0x45, 0x95, 0xf7, 0x6e, 0x48, 0x1d, 0xfc, 0x4c,
    +	0x2e, 0xe8, 0x3c, 0x37, 0x22, 0x08, 0x38, 0x86, 0x58, 0x7f, 0xf9, 0xa3, 0x7b, 0x33, 0x43, 0x9f,
    +	0xdc, 0x9b, 0x19, 0xfa, 0xf4, 0xde, 0xcc, 0xd0, 0x7b, 0xbd, 0x19, 0xe5, 0xa3, 0xde, 0x8c, 0xf2,
    +	0x49, 0x6f, 0x46, 0xf9, 0xb4, 0x37, 0xa3, 0xfc, 0xb5, 0x37, 0xa3, 0x7c, 0xe7, 0x6f, 0x33, 0x43,
    +	0x5f, 0x99, 0x4a, 0xfe, 0x77, 0x81, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x0c, 0xec, 0x16,
    +	0x47, 0x30, 0x00, 0x00,
    +}
    +
    +func (m *AllocatedDeviceStatus) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *AllocatedDeviceStatus) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *AllocatedDeviceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if m.ShareID != nil {
    +		i -= len(*m.ShareID)
    +		copy(dAtA[i:], *m.ShareID)
    +		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ShareID)))
    +		i--
    +		dAtA[i] = 0x3a
    +	}
    +	if m.NetworkData != nil {
    +		{
    +			size, err := m.NetworkData.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x32
    +	}
    +	if m.Data != nil {
    +		{
    +			size, err := m.Data.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x2a
    +	}
    +	if len(m.Conditions) > 0 {
    +		for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x22
    +		}
    +	}
    +	i -= len(m.Device)
    +	copy(dAtA[i:], m.Device)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Device)))
    +	i--
    +	dAtA[i] = 0x1a
    +	i -= len(m.Pool)
    +	copy(dAtA[i:], m.Pool)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Pool)))
    +	i--
    +	dAtA[i] = 0x12
    +	i -= len(m.Driver)
    +	copy(dAtA[i:], m.Driver)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *AllocationResult) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *AllocationResult) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *AllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if m.AllocationTimestamp != nil {
    +		{
    +			size, err := m.AllocationTimestamp.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x2a
    +	}
    +	if m.NodeSelector != nil {
    +		{
    +			size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x1a
    +	}
    +	{
    +		size, err := m.Devices.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *CELDeviceSelector) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *CELDeviceSelector) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *CELDeviceSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	i -= len(m.Expression)
    +	copy(dAtA[i:], m.Expression)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *CapacityRequestPolicy) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *CapacityRequestPolicy) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *CapacityRequestPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if m.ValidRange != nil {
    +		{
    +			size, err := m.ValidRange.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x22
    +	}
    +	if len(m.ValidValues) > 0 {
    +		for iNdEx := len(m.ValidValues) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.ValidValues[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x1a
    +		}
    +	}
    +	if m.Default != nil {
    +		{
    +			size, err := m.Default.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0xa
    +	}
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *CapacityRequestPolicyRange) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *CapacityRequestPolicyRange) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *CapacityRequestPolicyRange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if m.Step != nil {
    +		{
    +			size, err := m.Step.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x1a
    +	}
    +	if m.Max != nil {
    +		{
    +			size, err := m.Max.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x12
    +	}
    +	if m.Min != nil {
    +		{
    +			size, err := m.Min.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0xa
    +	}
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *CapacityRequirements) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *CapacityRequirements) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *CapacityRequirements) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.Requests) > 0 {
    +		keysForRequests := make([]string, 0, len(m.Requests))
    +		for k := range m.Requests {
    +			keysForRequests = append(keysForRequests, string(k))
    +		}
    +		github_com_gogo_protobuf_sortkeys.Strings(keysForRequests)
    +		for iNdEx := len(keysForRequests) - 1; iNdEx >= 0; iNdEx-- {
    +			v := m.Requests[QualifiedName(keysForRequests[iNdEx])]
    +			baseI := i
    +			{
    +				size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
    +			i -= len(keysForRequests[iNdEx])
    +			copy(dAtA[i:], keysForRequests[iNdEx])
    +			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForRequests[iNdEx])))
    +			i--
    +			dAtA[i] = 0xa
    +			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
    +			i--
    +			dAtA[i] = 0xa
    +		}
    +	}
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *Counter) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *Counter) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *Counter) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	{
    +		size, err := m.Value.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *CounterSet) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *CounterSet) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *CounterSet) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.Counters) > 0 {
    +		keysForCounters := make([]string, 0, len(m.Counters))
    +		for k := range m.Counters {
    +			keysForCounters = append(keysForCounters, string(k))
    +		}
    +		github_com_gogo_protobuf_sortkeys.Strings(keysForCounters)
    +		for iNdEx := len(keysForCounters) - 1; iNdEx >= 0; iNdEx-- {
    +			v := m.Counters[string(keysForCounters[iNdEx])]
    +			baseI := i
    +			{
    +				size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
    +			i -= len(keysForCounters[iNdEx])
    +			copy(dAtA[i:], keysForCounters[iNdEx])
    +			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCounters[iNdEx])))
    +			i--
    +			dAtA[i] = 0xa
    +			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
    +			i--
    +			dAtA[i] = 0x12
    +		}
    +	}
    +	i -= len(m.Name)
    +	copy(dAtA[i:], m.Name)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *Device) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *Device) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *Device) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if m.AllowMultipleAllocations != nil {
    +		i--
    +		if *m.AllowMultipleAllocations {
    +			dAtA[i] = 1
    +		} else {
    +			dAtA[i] = 0
    +		}
    +		i--
    +		dAtA[i] = 0x60
    +	}
    +	if len(m.BindingFailureConditions) > 0 {
    +		for iNdEx := len(m.BindingFailureConditions) - 1; iNdEx >= 0; iNdEx-- {
    +			i -= len(m.BindingFailureConditions[iNdEx])
    +			copy(dAtA[i:], m.BindingFailureConditions[iNdEx])
    +			i = encodeVarintGenerated(dAtA, i, uint64(len(m.BindingFailureConditions[iNdEx])))
    +			i--
    +			dAtA[i] = 0x5a
    +		}
    +	}
    +	if len(m.BindingConditions) > 0 {
    +		for iNdEx := len(m.BindingConditions) - 1; iNdEx >= 0; iNdEx-- {
    +			i -= len(m.BindingConditions[iNdEx])
    +			copy(dAtA[i:], m.BindingConditions[iNdEx])
    +			i = encodeVarintGenerated(dAtA, i, uint64(len(m.BindingConditions[iNdEx])))
    +			i--
    +			dAtA[i] = 0x52
    +		}
    +	}
    +	if m.BindsToNode != nil {
    +		i--
    +		if *m.BindsToNode {
    +			dAtA[i] = 1
    +		} else {
    +			dAtA[i] = 0
    +		}
    +		i--
    +		dAtA[i] = 0x48
    +	}
    +	if len(m.Taints) > 0 {
    +		for iNdEx := len(m.Taints) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Taints[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x42
    +		}
    +	}
    +	if m.AllNodes != nil {
    +		i--
    +		if *m.AllNodes {
    +			dAtA[i] = 1
    +		} else {
    +			dAtA[i] = 0
    +		}
    +		i--
    +		dAtA[i] = 0x38
    +	}
    +	if m.NodeSelector != nil {
    +		{
    +			size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x32
    +	}
    +	if m.NodeName != nil {
    +		i -= len(*m.NodeName)
    +		copy(dAtA[i:], *m.NodeName)
    +		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeName)))
    +		i--
    +		dAtA[i] = 0x2a
    +	}
    +	if len(m.ConsumesCounters) > 0 {
    +		for iNdEx := len(m.ConsumesCounters) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.ConsumesCounters[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x22
    +		}
    +	}
    +	if len(m.Capacity) > 0 {
    +		keysForCapacity := make([]string, 0, len(m.Capacity))
    +		for k := range m.Capacity {
    +			keysForCapacity = append(keysForCapacity, string(k))
    +		}
    +		github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity)
    +		for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- {
    +			v := m.Capacity[QualifiedName(keysForCapacity[iNdEx])]
    +			baseI := i
    +			{
    +				size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
    +			i -= len(keysForCapacity[iNdEx])
    +			copy(dAtA[i:], keysForCapacity[iNdEx])
    +			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx])))
    +			i--
    +			dAtA[i] = 0xa
    +			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
    +			i--
    +			dAtA[i] = 0x1a
    +		}
    +	}
    +	if len(m.Attributes) > 0 {
    +		keysForAttributes := make([]string, 0, len(m.Attributes))
    +		for k := range m.Attributes {
    +			keysForAttributes = append(keysForAttributes, string(k))
    +		}
    +		github_com_gogo_protobuf_sortkeys.Strings(keysForAttributes)
    +		for iNdEx := len(keysForAttributes) - 1; iNdEx >= 0; iNdEx-- {
    +			v := m.Attributes[QualifiedName(keysForAttributes[iNdEx])]
    +			baseI := i
    +			{
    +				size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
    +			i -= len(keysForAttributes[iNdEx])
    +			copy(dAtA[i:], keysForAttributes[iNdEx])
    +			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAttributes[iNdEx])))
    +			i--
    +			dAtA[i] = 0xa
    +			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
    +			i--
    +			dAtA[i] = 0x12
    +		}
    +	}
    +	i -= len(m.Name)
    +	copy(dAtA[i:], m.Name)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *DeviceAllocationConfiguration) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *DeviceAllocationConfiguration) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *DeviceAllocationConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	{
    +		size, err := m.DeviceConfiguration.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0x1a
    +	if len(m.Requests) > 0 {
    +		for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- {
    +			i -= len(m.Requests[iNdEx])
    +			copy(dAtA[i:], m.Requests[iNdEx])
    +			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Requests[iNdEx])))
    +			i--
    +			dAtA[i] = 0x12
    +		}
    +	}
    +	i -= len(m.Source)
    +	copy(dAtA[i:], m.Source)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Source)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *DeviceAllocationResult) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *DeviceAllocationResult) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *DeviceAllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.Config) > 0 {
    +		for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Config[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
    +		}
    +	}
    +	if len(m.Results) > 0 {
    +		for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Results[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0xa
    +		}
    +	}
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *DeviceAttribute) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *DeviceAttribute) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *DeviceAttribute) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if m.VersionValue != nil {
    +		i -= len(*m.VersionValue)
    +		copy(dAtA[i:], *m.VersionValue)
    +		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VersionValue)))
    +		i--
    +		dAtA[i] = 0x2a
    +	}
    +	if m.StringValue != nil {
    +		i -= len(*m.StringValue)
    +		copy(dAtA[i:], *m.StringValue)
    +		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StringValue)))
    +		i--
    +		dAtA[i] = 0x22
    +	}
    +	if m.BoolValue != nil {
    +		i--
    +		if *m.BoolValue {
    +			dAtA[i] = 1
    +		} else {
    +			dAtA[i] = 0
    +		}
    +		i--
    +		dAtA[i] = 0x18
    +	}
    +	if m.IntValue != nil {
    +		i = encodeVarintGenerated(dAtA, i, uint64(*m.IntValue))
    +		i--
    +		dAtA[i] = 0x10
    +	}
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *DeviceCapacity) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *DeviceCapacity) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *DeviceCapacity) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if m.RequestPolicy != nil {
    +		{
    +			size, err := m.RequestPolicy.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x12
    +	}
    +	{
    +		size, err := m.Value.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *DeviceClaim) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *DeviceClaim) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *DeviceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.Config) > 0 {
    +		for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Config[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x1a
    +		}
    +	}
    +	if len(m.Constraints) > 0 {
    +		for iNdEx := len(m.Constraints) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Constraints[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
    +		}
    +	}
    +	if len(m.Requests) > 0 {
    +		for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Requests[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0xa
    +		}
    +	}
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *DeviceClaimConfiguration) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *DeviceClaimConfiguration) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *DeviceClaimConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	{
    +		size, err := m.DeviceConfiguration.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0x12
    +	if len(m.Requests) > 0 {
    +		for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- {
    +			i -= len(m.Requests[iNdEx])
    +			copy(dAtA[i:], m.Requests[iNdEx])
    +			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Requests[iNdEx])))
    +			i--
    +			dAtA[i] = 0xa
    +		}
    +	}
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *DeviceClass) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *DeviceClass) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *DeviceClass) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	{
    +		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0x12
    +	{
    +		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *DeviceClassConfiguration) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *DeviceClassConfiguration) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *DeviceClassConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	{
    +		size, err := m.DeviceConfiguration.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *DeviceClassList) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *DeviceClassList) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *DeviceClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.Items) > 0 {
    +		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
    +		}
    +	}
    +	{
    +		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *DeviceClassSpec) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *DeviceClassSpec) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *DeviceClassSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if m.ExtendedResourceName != nil {
    +		i -= len(*m.ExtendedResourceName)
    +		copy(dAtA[i:], *m.ExtendedResourceName)
    +		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ExtendedResourceName)))
    +		i--
    +		dAtA[i] = 0x22
    +	}
    +	if len(m.Config) > 0 {
    +		for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Config[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
    +		}
    +	}
    +	if len(m.Selectors) > 0 {
    +		for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0xa
    +		}
    +	}
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *DeviceConfiguration) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *DeviceConfiguration) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *DeviceConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if m.Opaque != nil {
    +		{
    +			size, err := m.Opaque.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0xa
    +	}
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *DeviceConstraint) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *DeviceConstraint) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *DeviceConstraint) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if m.DistinctAttribute != nil {
    +		i -= len(*m.DistinctAttribute)
    +		copy(dAtA[i:], *m.DistinctAttribute)
    +		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.DistinctAttribute)))
    +		i--
    +		dAtA[i] = 0x1a
    +	}
    +	if m.MatchAttribute != nil {
    +		i -= len(*m.MatchAttribute)
    +		copy(dAtA[i:], *m.MatchAttribute)
    +		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchAttribute)))
    +		i--
    +		dAtA[i] = 0x12
    +	}
    +	if len(m.Requests) > 0 {
    +		for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- {
    +			i -= len(m.Requests[iNdEx])
    +			copy(dAtA[i:], m.Requests[iNdEx])
    +			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Requests[iNdEx])))
    +			i--
    +			dAtA[i] = 0xa
    +		}
    +	}
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *DeviceCounterConsumption) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *DeviceCounterConsumption) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *DeviceCounterConsumption) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.Counters) > 0 {
    +		keysForCounters := make([]string, 0, len(m.Counters))
    +		for k := range m.Counters {
    +			keysForCounters = append(keysForCounters, string(k))
    +		}
    +		github_com_gogo_protobuf_sortkeys.Strings(keysForCounters)
    +		for iNdEx := len(keysForCounters) - 1; iNdEx >= 0; iNdEx-- {
    +			v := m.Counters[string(keysForCounters[iNdEx])]
    +			baseI := i
    +			{
    +				size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
    +			i -= len(keysForCounters[iNdEx])
    +			copy(dAtA[i:], keysForCounters[iNdEx])
    +			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCounters[iNdEx])))
    +			i--
    +			dAtA[i] = 0xa
    +			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
    +			i--
    +			dAtA[i] = 0x12
    +		}
    +	}
    +	i -= len(m.CounterSet)
    +	copy(dAtA[i:], m.CounterSet)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.CounterSet)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *DeviceRequest) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *DeviceRequest) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *DeviceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.FirstAvailable) > 0 {
    +		for iNdEx := len(m.FirstAvailable) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.FirstAvailable[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x1a
    +		}
    +	}
    +	if m.Exactly != nil {
    +		{
    +			size, err := m.Exactly.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x12
    +	}
    +	i -= len(m.Name)
    +	copy(dAtA[i:], m.Name)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *DeviceRequestAllocationResult) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *DeviceRequestAllocationResult) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *DeviceRequestAllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.ConsumedCapacity) > 0 {
    +		keysForConsumedCapacity := make([]string, 0, len(m.ConsumedCapacity))
    +		for k := range m.ConsumedCapacity {
    +			keysForConsumedCapacity = append(keysForConsumedCapacity, string(k))
    +		}
    +		github_com_gogo_protobuf_sortkeys.Strings(keysForConsumedCapacity)
    +		for iNdEx := len(keysForConsumedCapacity) - 1; iNdEx >= 0; iNdEx-- {
    +			v := m.ConsumedCapacity[QualifiedName(keysForConsumedCapacity[iNdEx])]
    +			baseI := i
    +			{
    +				size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
    +			i -= len(keysForConsumedCapacity[iNdEx])
    +			copy(dAtA[i:], keysForConsumedCapacity[iNdEx])
    +			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForConsumedCapacity[iNdEx])))
    +			i--
    +			dAtA[i] = 0xa
    +			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
    +			i--
    +			dAtA[i] = 0x52
    +		}
    +	}
    +	if m.ShareID != nil {
    +		i -= len(*m.ShareID)
    +		copy(dAtA[i:], *m.ShareID)
    +		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ShareID)))
    +		i--
    +		dAtA[i] = 0x4a
    +	}
    +	if len(m.BindingFailureConditions) > 0 {
    +		for iNdEx := len(m.BindingFailureConditions) - 1; iNdEx >= 0; iNdEx-- {
    +			i -= len(m.BindingFailureConditions[iNdEx])
    +			copy(dAtA[i:], m.BindingFailureConditions[iNdEx])
    +			i = encodeVarintGenerated(dAtA, i, uint64(len(m.BindingFailureConditions[iNdEx])))
    +			i--
    +			dAtA[i] = 0x42
    +		}
    +	}
    +	if len(m.BindingConditions) > 0 {
    +		for iNdEx := len(m.BindingConditions) - 1; iNdEx >= 0; iNdEx-- {
    +			i -= len(m.BindingConditions[iNdEx])
    +			copy(dAtA[i:], m.BindingConditions[iNdEx])
    +			i = encodeVarintGenerated(dAtA, i, uint64(len(m.BindingConditions[iNdEx])))
    +			i--
    +			dAtA[i] = 0x3a
    +		}
    +	}
    +	if len(m.Tolerations) > 0 {
    +		for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x32
    +		}
    +	}
    +	if m.AdminAccess != nil {
    +		i--
    +		if *m.AdminAccess {
    +			dAtA[i] = 1
    +		} else {
    +			dAtA[i] = 0
    +		}
    +		i--
    +		dAtA[i] = 0x28
    +	}
    +	i -= len(m.Device)
    +	copy(dAtA[i:], m.Device)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Device)))
    +	i--
    +	dAtA[i] = 0x22
    +	i -= len(m.Pool)
    +	copy(dAtA[i:], m.Pool)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Pool)))
    +	i--
    +	dAtA[i] = 0x1a
    +	i -= len(m.Driver)
    +	copy(dAtA[i:], m.Driver)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver)))
    +	i--
    +	dAtA[i] = 0x12
    +	i -= len(m.Request)
    +	copy(dAtA[i:], m.Request)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Request)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *DeviceSelector) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *DeviceSelector) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *DeviceSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if m.CEL != nil {
    +		{
    +			size, err := m.CEL.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0xa
    +	}
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *DeviceSubRequest) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *DeviceSubRequest) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *DeviceSubRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if m.Capacity != nil {
    +		{
    +			size, err := m.Capacity.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x3a
    +	}
    +	if len(m.Tolerations) > 0 {
    +		for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x32
    +		}
    +	}
    +	i = encodeVarintGenerated(dAtA, i, uint64(m.Count))
    +	i--
    +	dAtA[i] = 0x28
    +	i -= len(m.AllocationMode)
    +	copy(dAtA[i:], m.AllocationMode)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllocationMode)))
    +	i--
    +	dAtA[i] = 0x22
    +	if len(m.Selectors) > 0 {
    +		for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x1a
    +		}
    +	}
    +	i -= len(m.DeviceClassName)
    +	copy(dAtA[i:], m.DeviceClassName)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeviceClassName)))
    +	i--
    +	dAtA[i] = 0x12
    +	i -= len(m.Name)
    +	copy(dAtA[i:], m.Name)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *DeviceTaint) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *DeviceTaint) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *DeviceTaint) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if m.TimeAdded != nil {
    +		{
    +			size, err := m.TimeAdded.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x22
    +	}
    +	i -= len(m.Effect)
    +	copy(dAtA[i:], m.Effect)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect)))
    +	i--
    +	dAtA[i] = 0x1a
    +	i -= len(m.Value)
    +	copy(dAtA[i:], m.Value)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value)))
    +	i--
    +	dAtA[i] = 0x12
    +	i -= len(m.Key)
    +	copy(dAtA[i:], m.Key)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *DeviceToleration) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *DeviceToleration) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *DeviceToleration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if m.TolerationSeconds != nil {
    +		i = encodeVarintGenerated(dAtA, i, uint64(*m.TolerationSeconds))
    +		i--
    +		dAtA[i] = 0x28
    +	}
    +	i -= len(m.Effect)
    +	copy(dAtA[i:], m.Effect)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect)))
    +	i--
    +	dAtA[i] = 0x22
    +	i -= len(m.Value)
    +	copy(dAtA[i:], m.Value)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value)))
    +	i--
    +	dAtA[i] = 0x1a
    +	i -= len(m.Operator)
    +	copy(dAtA[i:], m.Operator)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator)))
    +	i--
    +	dAtA[i] = 0x12
    +	i -= len(m.Key)
    +	copy(dAtA[i:], m.Key)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *ExactDeviceRequest) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ExactDeviceRequest) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ExactDeviceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if m.Capacity != nil {
    +		{
    +			size, err := m.Capacity.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x3a
    +	}
    +	if len(m.Tolerations) > 0 {
    +		for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x32
    +		}
    +	}
    +	if m.AdminAccess != nil {
    +		i--
    +		if *m.AdminAccess {
    +			dAtA[i] = 1
    +		} else {
    +			dAtA[i] = 0
    +		}
    +		i--
    +		dAtA[i] = 0x28
    +	}
    +	i = encodeVarintGenerated(dAtA, i, uint64(m.Count))
    +	i--
    +	dAtA[i] = 0x20
    +	i -= len(m.AllocationMode)
    +	copy(dAtA[i:], m.AllocationMode)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllocationMode)))
    +	i--
    +	dAtA[i] = 0x1a
    +	if len(m.Selectors) > 0 {
    +		for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
    +		}
    +	}
    +	i -= len(m.DeviceClassName)
    +	copy(dAtA[i:], m.DeviceClassName)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeviceClassName)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *NetworkDeviceData) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *NetworkDeviceData) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *NetworkDeviceData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	i -= len(m.HardwareAddress)
    +	copy(dAtA[i:], m.HardwareAddress)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.HardwareAddress)))
    +	i--
    +	dAtA[i] = 0x1a
    +	if len(m.IPs) > 0 {
    +		for iNdEx := len(m.IPs) - 1; iNdEx >= 0; iNdEx-- {
    +			i -= len(m.IPs[iNdEx])
    +			copy(dAtA[i:], m.IPs[iNdEx])
    +			i = encodeVarintGenerated(dAtA, i, uint64(len(m.IPs[iNdEx])))
    +			i--
    +			dAtA[i] = 0x12
    +		}
    +	}
    +	i -= len(m.InterfaceName)
    +	copy(dAtA[i:], m.InterfaceName)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.InterfaceName)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *OpaqueDeviceConfiguration) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *OpaqueDeviceConfiguration) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *OpaqueDeviceConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	{
    +		size, err := m.Parameters.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0x12
    +	i -= len(m.Driver)
    +	copy(dAtA[i:], m.Driver)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *ResourceClaim) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ResourceClaim) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ResourceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	{
    +		size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0x1a
    +	{
    +		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0x12
    +	{
    +		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *ResourceClaimConsumerReference) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ResourceClaimConsumerReference) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ResourceClaimConsumerReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	i -= len(m.UID)
    +	copy(dAtA[i:], m.UID)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID)))
    +	i--
    +	dAtA[i] = 0x2a
    +	i -= len(m.Name)
    +	copy(dAtA[i:], m.Name)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
    +	i--
    +	dAtA[i] = 0x22
    +	i -= len(m.Resource)
    +	copy(dAtA[i:], m.Resource)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource)))
    +	i--
    +	dAtA[i] = 0x1a
    +	i -= len(m.APIGroup)
    +	copy(dAtA[i:], m.APIGroup)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *ResourceClaimList) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ResourceClaimList) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ResourceClaimList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.Items) > 0 {
    +		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
    +		}
    +	}
    +	{
    +		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *ResourceClaimSpec) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ResourceClaimSpec) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ResourceClaimSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	{
    +		size, err := m.Devices.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *ResourceClaimStatus) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ResourceClaimStatus) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ResourceClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.Devices) > 0 {
    +		for iNdEx := len(m.Devices) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Devices[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x22
    +		}
    +	}
    +	if len(m.ReservedFor) > 0 {
    +		for iNdEx := len(m.ReservedFor) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.ReservedFor[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
    +		}
    +	}
    +	if m.Allocation != nil {
    +		{
    +			size, err := m.Allocation.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0xa
    +	}
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *ResourceClaimTemplate) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ResourceClaimTemplate) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ResourceClaimTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	{
    +		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0x12
    +	{
    +		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *ResourceClaimTemplateList) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ResourceClaimTemplateList) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ResourceClaimTemplateList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.Items) > 0 {
    +		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
    +		}
    +	}
    +	{
    +		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *ResourceClaimTemplateSpec) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ResourceClaimTemplateSpec) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ResourceClaimTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	{
    +		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0x12
    +	{
    +		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *ResourcePool) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ResourcePool) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ResourcePool) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceSliceCount))
    +	i--
    +	dAtA[i] = 0x18
    +	i = encodeVarintGenerated(dAtA, i, uint64(m.Generation))
    +	i--
    +	dAtA[i] = 0x10
    +	i -= len(m.Name)
    +	copy(dAtA[i:], m.Name)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *ResourceSlice) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ResourceSlice) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ResourceSlice) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	{
    +		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0x12
    +	{
    +		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *ResourceSliceList) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ResourceSliceList) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ResourceSliceList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.Items) > 0 {
    +		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
    +		}
    +	}
    +	{
    +		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *ResourceSliceSpec) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ResourceSliceSpec) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ResourceSliceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.SharedCounters) > 0 {
    +		for iNdEx := len(m.SharedCounters) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.SharedCounters[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x42
    +		}
    +	}
    +	if m.PerDeviceNodeSelection != nil {
    +		i--
    +		if *m.PerDeviceNodeSelection {
    +			dAtA[i] = 1
    +		} else {
    +			dAtA[i] = 0
    +		}
    +		i--
    +		dAtA[i] = 0x38
    +	}
    +	if len(m.Devices) > 0 {
    +		for iNdEx := len(m.Devices) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Devices[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x32
    +		}
    +	}
    +	if m.AllNodes != nil {
    +		i--
    +		if *m.AllNodes {
    +			dAtA[i] = 1
    +		} else {
    +			dAtA[i] = 0
    +		}
    +		i--
    +		dAtA[i] = 0x28
    +	}
    +	if m.NodeSelector != nil {
    +		{
    +			size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x22
    +	}
    +	if m.NodeName != nil {
    +		i -= len(*m.NodeName)
    +		copy(dAtA[i:], *m.NodeName)
    +		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeName)))
    +		i--
    +		dAtA[i] = 0x1a
    +	}
    +	{
    +		size, err := m.Pool.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0x12
    +	i -= len(m.Driver)
    +	copy(dAtA[i:], m.Driver)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
    +	offset -= sovGenerated(v)
    +	base := offset
    +	for v >= 1<<7 {
    +		dAtA[offset] = uint8(v&0x7f | 0x80)
    +		v >>= 7
    +		offset++
    +	}
    +	dAtA[offset] = uint8(v)
    +	return base
    +}
    +func (m *AllocatedDeviceStatus) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.Driver)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.Pool)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.Device)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.Conditions) > 0 {
    +		for _, e := range m.Conditions {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	if m.Data != nil {
    +		l = m.Data.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	if m.NetworkData != nil {
    +		l = m.NetworkData.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	if m.ShareID != nil {
    +		l = len(*m.ShareID)
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	return n
    +}
    +
    +func (m *AllocationResult) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.Devices.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if m.NodeSelector != nil {
    +		l = m.NodeSelector.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	if m.AllocationTimestamp != nil {
    +		l = m.AllocationTimestamp.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	return n
    +}
    +
    +func (m *CELDeviceSelector) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.Expression)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *CapacityRequestPolicy) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	if m.Default != nil {
    +		l = m.Default.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	if len(m.ValidValues) > 0 {
    +		for _, e := range m.ValidValues {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	if m.ValidRange != nil {
    +		l = m.ValidRange.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	return n
    +}
    +
    +func (m *CapacityRequestPolicyRange) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	if m.Min != nil {
    +		l = m.Min.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	if m.Max != nil {
    +		l = m.Max.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	if m.Step != nil {
    +		l = m.Step.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	return n
    +}
    +
    +func (m *CapacityRequirements) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	if len(m.Requests) > 0 {
    +		for k, v := range m.Requests {
    +			_ = k
    +			_ = v
    +			l = v.Size()
    +			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
    +			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
    +		}
    +	}
    +	return n
    +}
    +
    +func (m *Counter) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.Value.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *CounterSet) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.Name)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.Counters) > 0 {
    +		for k, v := range m.Counters {
    +			_ = k
    +			_ = v
    +			l = v.Size()
    +			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
    +			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
    +		}
    +	}
    +	return n
    +}
    +
    +func (m *Device) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.Name)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.Attributes) > 0 {
    +		for k, v := range m.Attributes {
    +			_ = k
    +			_ = v
    +			l = v.Size()
    +			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
    +			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
    +		}
    +	}
    +	if len(m.Capacity) > 0 {
    +		for k, v := range m.Capacity {
    +			_ = k
    +			_ = v
    +			l = v.Size()
    +			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
    +			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
    +		}
    +	}
    +	if len(m.ConsumesCounters) > 0 {
    +		for _, e := range m.ConsumesCounters {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	if m.NodeName != nil {
    +		l = len(*m.NodeName)
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	if m.NodeSelector != nil {
    +		l = m.NodeSelector.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	if m.AllNodes != nil {
    +		n += 2
    +	}
    +	if len(m.Taints) > 0 {
    +		for _, e := range m.Taints {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	if m.BindsToNode != nil {
    +		n += 2
    +	}
    +	if len(m.BindingConditions) > 0 {
    +		for _, s := range m.BindingConditions {
    +			l = len(s)
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	if len(m.BindingFailureConditions) > 0 {
    +		for _, s := range m.BindingFailureConditions {
    +			l = len(s)
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	if m.AllowMultipleAllocations != nil {
    +		n += 2
    +	}
    +	return n
    +}
    +
    +func (m *DeviceAllocationConfiguration) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.Source)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.Requests) > 0 {
    +		for _, s := range m.Requests {
    +			l = len(s)
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	l = m.DeviceConfiguration.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *DeviceAllocationResult) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	if len(m.Results) > 0 {
    +		for _, e := range m.Results {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	if len(m.Config) > 0 {
    +		for _, e := range m.Config {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
    +}
    +
    +func (m *DeviceAttribute) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	if m.IntValue != nil {
    +		n += 1 + sovGenerated(uint64(*m.IntValue))
    +	}
    +	if m.BoolValue != nil {
    +		n += 2
    +	}
    +	if m.StringValue != nil {
    +		l = len(*m.StringValue)
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	if m.VersionValue != nil {
    +		l = len(*m.VersionValue)
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	return n
    +}
    +
    +func (m *DeviceCapacity) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.Value.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if m.RequestPolicy != nil {
    +		l = m.RequestPolicy.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	return n
    +}
    +
    +func (m *DeviceClaim) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	if len(m.Requests) > 0 {
    +		for _, e := range m.Requests {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	if len(m.Constraints) > 0 {
    +		for _, e := range m.Constraints {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	if len(m.Config) > 0 {
    +		for _, e := range m.Config {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
    +}
    +
    +func (m *DeviceClaimConfiguration) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	if len(m.Requests) > 0 {
    +		for _, s := range m.Requests {
    +			l = len(s)
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	l = m.DeviceConfiguration.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *DeviceClass) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.ObjectMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = m.Spec.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *DeviceClassConfiguration) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.DeviceConfiguration.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *DeviceClassList) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.ListMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.Items) > 0 {
    +		for _, e := range m.Items {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
    +}
    +
    +func (m *DeviceClassSpec) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	if len(m.Selectors) > 0 {
    +		for _, e := range m.Selectors {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	if len(m.Config) > 0 {
    +		for _, e := range m.Config {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	if m.ExtendedResourceName != nil {
    +		l = len(*m.ExtendedResourceName)
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	return n
    +}
    +
    +func (m *DeviceConfiguration) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	if m.Opaque != nil {
    +		l = m.Opaque.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	return n
    +}
    +
    +func (m *DeviceConstraint) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	if len(m.Requests) > 0 {
    +		for _, s := range m.Requests {
    +			l = len(s)
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	if m.MatchAttribute != nil {
    +		l = len(*m.MatchAttribute)
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	if m.DistinctAttribute != nil {
    +		l = len(*m.DistinctAttribute)
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	return n
    +}
    +
    +func (m *DeviceCounterConsumption) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.CounterSet)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.Counters) > 0 {
    +		for k, v := range m.Counters {
    +			_ = k
    +			_ = v
    +			l = v.Size()
    +			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
    +			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
    +		}
    +	}
    +	return n
    +}
    +
    +func (m *DeviceRequest) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.Name)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if m.Exactly != nil {
    +		l = m.Exactly.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	if len(m.FirstAvailable) > 0 {
    +		for _, e := range m.FirstAvailable {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
    +}
    +
    +func (m *DeviceRequestAllocationResult) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.Request)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.Driver)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.Pool)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.Device)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if m.AdminAccess != nil {
    +		n += 2
    +	}
    +	if len(m.Tolerations) > 0 {
    +		for _, e := range m.Tolerations {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	if len(m.BindingConditions) > 0 {
    +		for _, s := range m.BindingConditions {
    +			l = len(s)
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	if len(m.BindingFailureConditions) > 0 {
    +		for _, s := range m.BindingFailureConditions {
    +			l = len(s)
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	if m.ShareID != nil {
    +		l = len(*m.ShareID)
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	if len(m.ConsumedCapacity) > 0 {
    +		for k, v := range m.ConsumedCapacity {
    +			_ = k
    +			_ = v
    +			l = v.Size()
    +			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
    +			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
    +		}
    +	}
    +	return n
    +}
    +
    +func (m *DeviceSelector) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	if m.CEL != nil {
    +		l = m.CEL.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	return n
    +}
    +
    +func (m *DeviceSubRequest) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.Name)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.DeviceClassName)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.Selectors) > 0 {
    +		for _, e := range m.Selectors {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	l = len(m.AllocationMode)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	n += 1 + sovGenerated(uint64(m.Count))
    +	if len(m.Tolerations) > 0 {
    +		for _, e := range m.Tolerations {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	if m.Capacity != nil {
    +		l = m.Capacity.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	return n
    +}
    +
    +func (m *DeviceTaint) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.Key)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.Value)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.Effect)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if m.TimeAdded != nil {
    +		l = m.TimeAdded.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	return n
    +}
    +
    +func (m *DeviceToleration) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.Key)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.Operator)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.Value)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.Effect)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if m.TolerationSeconds != nil {
    +		n += 1 + sovGenerated(uint64(*m.TolerationSeconds))
    +	}
    +	return n
    +}
    +
    +func (m *ExactDeviceRequest) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.DeviceClassName)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.Selectors) > 0 {
    +		for _, e := range m.Selectors {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	l = len(m.AllocationMode)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	n += 1 + sovGenerated(uint64(m.Count))
    +	if m.AdminAccess != nil {
    +		n += 2
    +	}
    +	if len(m.Tolerations) > 0 {
    +		for _, e := range m.Tolerations {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	if m.Capacity != nil {
    +		l = m.Capacity.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	return n
    +}
    +
    +func (m *NetworkDeviceData) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.InterfaceName)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.IPs) > 0 {
    +		for _, s := range m.IPs {
    +			l = len(s)
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	l = len(m.HardwareAddress)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *OpaqueDeviceConfiguration) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.Driver)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = m.Parameters.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *ResourceClaim) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.ObjectMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = m.Spec.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = m.Status.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *ResourceClaimConsumerReference) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.APIGroup)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.Resource)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.Name)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.UID)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *ResourceClaimList) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.ListMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.Items) > 0 {
    +		for _, e := range m.Items {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
    +}
    +
    +func (m *ResourceClaimSpec) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.Devices.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *ResourceClaimStatus) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	if m.Allocation != nil {
    +		l = m.Allocation.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	if len(m.ReservedFor) > 0 {
    +		for _, e := range m.ReservedFor {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	if len(m.Devices) > 0 {
    +		for _, e := range m.Devices {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
    +}
    +
    +func (m *ResourceClaimTemplate) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.ObjectMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = m.Spec.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *ResourceClaimTemplateList) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.ListMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.Items) > 0 {
    +		for _, e := range m.Items {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
    +}
    +
    +func (m *ResourceClaimTemplateSpec) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.ObjectMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = m.Spec.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *ResourcePool) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.Name)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	n += 1 + sovGenerated(uint64(m.Generation))
    +	n += 1 + sovGenerated(uint64(m.ResourceSliceCount))
    +	return n
    +}
    +
    +func (m *ResourceSlice) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.ObjectMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = m.Spec.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *ResourceSliceList) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.ListMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.Items) > 0 {
    +		for _, e := range m.Items {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
    +}
    +
    +func (m *ResourceSliceSpec) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.Driver)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = m.Pool.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if m.NodeName != nil {
    +		l = len(*m.NodeName)
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	if m.NodeSelector != nil {
    +		l = m.NodeSelector.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	if m.AllNodes != nil {
    +		n += 2
    +	}
    +	if len(m.Devices) > 0 {
    +		for _, e := range m.Devices {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	if m.PerDeviceNodeSelection != nil {
    +		n += 2
    +	}
    +	if len(m.SharedCounters) > 0 {
    +		for _, e := range m.SharedCounters {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
    +}
    +
    +func sovGenerated(x uint64) (n int) {
    +	return (math_bits.Len64(x|1) + 6) / 7
    +}
    +func sozGenerated(x uint64) (n int) {
    +	return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
    +}
    +func (this *AllocatedDeviceStatus) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForConditions := "[]Condition{"
    +	for _, f := range this.Conditions {
    +		repeatedStringForConditions += fmt.Sprintf("%v", f) + ","
    +	}
    +	repeatedStringForConditions += "}"
    +	s := strings.Join([]string{`&AllocatedDeviceStatus{`,
    +		`Driver:` + fmt.Sprintf("%v", this.Driver) + `,`,
    +		`Pool:` + fmt.Sprintf("%v", this.Pool) + `,`,
    +		`Device:` + fmt.Sprintf("%v", this.Device) + `,`,
    +		`Conditions:` + repeatedStringForConditions + `,`,
    +		`Data:` + strings.Replace(fmt.Sprintf("%v", this.Data), "RawExtension", "runtime.RawExtension", 1) + `,`,
    +		`NetworkData:` + strings.Replace(this.NetworkData.String(), "NetworkDeviceData", "NetworkDeviceData", 1) + `,`,
    +		`ShareID:` + valueToStringGenerated(this.ShareID) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *AllocationResult) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&AllocationResult{`,
    +		`Devices:` + strings.Replace(strings.Replace(this.Devices.String(), "DeviceAllocationResult", "DeviceAllocationResult", 1), `&`, ``, 1) + `,`,
    +		`NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`,
    +		`AllocationTimestamp:` + strings.Replace(fmt.Sprintf("%v", this.AllocationTimestamp), "Time", "v1.Time", 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *CELDeviceSelector) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&CELDeviceSelector{`,
    +		`Expression:` + fmt.Sprintf("%v", this.Expression) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *CapacityRequestPolicy) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForValidValues := "[]Quantity{"
    +	for _, f := range this.ValidValues {
    +		repeatedStringForValidValues += fmt.Sprintf("%v", f) + ","
    +	}
    +	repeatedStringForValidValues += "}"
    +	s := strings.Join([]string{`&CapacityRequestPolicy{`,
    +		`Default:` + strings.Replace(fmt.Sprintf("%v", this.Default), "Quantity", "resource.Quantity", 1) + `,`,
    +		`ValidValues:` + repeatedStringForValidValues + `,`,
    +		`ValidRange:` + strings.Replace(this.ValidRange.String(), "CapacityRequestPolicyRange", "CapacityRequestPolicyRange", 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *CapacityRequestPolicyRange) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&CapacityRequestPolicyRange{`,
    +		`Min:` + strings.Replace(fmt.Sprintf("%v", this.Min), "Quantity", "resource.Quantity", 1) + `,`,
    +		`Max:` + strings.Replace(fmt.Sprintf("%v", this.Max), "Quantity", "resource.Quantity", 1) + `,`,
    +		`Step:` + strings.Replace(fmt.Sprintf("%v", this.Step), "Quantity", "resource.Quantity", 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *CapacityRequirements) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	keysForRequests := make([]string, 0, len(this.Requests))
    +	for k := range this.Requests {
    +		keysForRequests = append(keysForRequests, string(k))
    +	}
    +	github_com_gogo_protobuf_sortkeys.Strings(keysForRequests)
    +	mapStringForRequests := "map[QualifiedName]resource.Quantity{"
    +	for _, k := range keysForRequests {
    +		mapStringForRequests += fmt.Sprintf("%v: %v,", k, this.Requests[QualifiedName(k)])
    +	}
    +	mapStringForRequests += "}"
    +	s := strings.Join([]string{`&CapacityRequirements{`,
    +		`Requests:` + mapStringForRequests + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *Counter) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&Counter{`,
    +		`Value:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Value), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *CounterSet) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	keysForCounters := make([]string, 0, len(this.Counters))
    +	for k := range this.Counters {
    +		keysForCounters = append(keysForCounters, k)
    +	}
    +	github_com_gogo_protobuf_sortkeys.Strings(keysForCounters)
    +	mapStringForCounters := "map[string]Counter{"
    +	for _, k := range keysForCounters {
    +		mapStringForCounters += fmt.Sprintf("%v: %v,", k, this.Counters[k])
    +	}
    +	mapStringForCounters += "}"
    +	s := strings.Join([]string{`&CounterSet{`,
    +		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
    +		`Counters:` + mapStringForCounters + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *Device) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForConsumesCounters := "[]DeviceCounterConsumption{"
    +	for _, f := range this.ConsumesCounters {
    +		repeatedStringForConsumesCounters += strings.Replace(strings.Replace(f.String(), "DeviceCounterConsumption", "DeviceCounterConsumption", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForConsumesCounters += "}"
    +	repeatedStringForTaints := "[]DeviceTaint{"
    +	for _, f := range this.Taints {
    +		repeatedStringForTaints += fmt.Sprintf("%v", f) + ","
    +	}
    +	repeatedStringForTaints += "}"
    +	keysForAttributes := make([]string, 0, len(this.Attributes))
    +	for k := range this.Attributes {
    +		keysForAttributes = append(keysForAttributes, string(k))
    +	}
    +	github_com_gogo_protobuf_sortkeys.Strings(keysForAttributes)
    +	mapStringForAttributes := "map[QualifiedName]DeviceAttribute{"
    +	for _, k := range keysForAttributes {
    +		mapStringForAttributes += fmt.Sprintf("%v: %v,", k, this.Attributes[QualifiedName(k)])
    +	}
    +	mapStringForAttributes += "}"
    +	keysForCapacity := make([]string, 0, len(this.Capacity))
    +	for k := range this.Capacity {
    +		keysForCapacity = append(keysForCapacity, string(k))
    +	}
    +	github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity)
    +	mapStringForCapacity := "map[QualifiedName]DeviceCapacity{"
    +	for _, k := range keysForCapacity {
    +		mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[QualifiedName(k)])
    +	}
    +	mapStringForCapacity += "}"
    +	s := strings.Join([]string{`&Device{`,
    +		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
    +		`Attributes:` + mapStringForAttributes + `,`,
    +		`Capacity:` + mapStringForCapacity + `,`,
    +		`ConsumesCounters:` + repeatedStringForConsumesCounters + `,`,
    +		`NodeName:` + valueToStringGenerated(this.NodeName) + `,`,
    +		`NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`,
    +		`AllNodes:` + valueToStringGenerated(this.AllNodes) + `,`,
    +		`Taints:` + repeatedStringForTaints + `,`,
    +		`BindsToNode:` + valueToStringGenerated(this.BindsToNode) + `,`,
    +		`BindingConditions:` + fmt.Sprintf("%v", this.BindingConditions) + `,`,
    +		`BindingFailureConditions:` + fmt.Sprintf("%v", this.BindingFailureConditions) + `,`,
    +		`AllowMultipleAllocations:` + valueToStringGenerated(this.AllowMultipleAllocations) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *DeviceAllocationConfiguration) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&DeviceAllocationConfiguration{`,
    +		`Source:` + fmt.Sprintf("%v", this.Source) + `,`,
    +		`Requests:` + fmt.Sprintf("%v", this.Requests) + `,`,
    +		`DeviceConfiguration:` + strings.Replace(strings.Replace(this.DeviceConfiguration.String(), "DeviceConfiguration", "DeviceConfiguration", 1), `&`, ``, 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *DeviceAllocationResult) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForResults := "[]DeviceRequestAllocationResult{"
    +	for _, f := range this.Results {
    +		repeatedStringForResults += strings.Replace(strings.Replace(f.String(), "DeviceRequestAllocationResult", "DeviceRequestAllocationResult", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForResults += "}"
    +	repeatedStringForConfig := "[]DeviceAllocationConfiguration{"
    +	for _, f := range this.Config {
    +		repeatedStringForConfig += strings.Replace(strings.Replace(f.String(), "DeviceAllocationConfiguration", "DeviceAllocationConfiguration", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForConfig += "}"
    +	s := strings.Join([]string{`&DeviceAllocationResult{`,
    +		`Results:` + repeatedStringForResults + `,`,
    +		`Config:` + repeatedStringForConfig + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *DeviceAttribute) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&DeviceAttribute{`,
    +		`IntValue:` + valueToStringGenerated(this.IntValue) + `,`,
    +		`BoolValue:` + valueToStringGenerated(this.BoolValue) + `,`,
    +		`StringValue:` + valueToStringGenerated(this.StringValue) + `,`,
    +		`VersionValue:` + valueToStringGenerated(this.VersionValue) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *DeviceCapacity) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&DeviceCapacity{`,
    +		`Value:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Value), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`,
    +		`RequestPolicy:` + strings.Replace(this.RequestPolicy.String(), "CapacityRequestPolicy", "CapacityRequestPolicy", 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *DeviceClaim) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForRequests := "[]DeviceRequest{"
    +	for _, f := range this.Requests {
    +		repeatedStringForRequests += strings.Replace(strings.Replace(f.String(), "DeviceRequest", "DeviceRequest", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForRequests += "}"
    +	repeatedStringForConstraints := "[]DeviceConstraint{"
    +	for _, f := range this.Constraints {
    +		repeatedStringForConstraints += strings.Replace(strings.Replace(f.String(), "DeviceConstraint", "DeviceConstraint", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForConstraints += "}"
    +	repeatedStringForConfig := "[]DeviceClaimConfiguration{"
    +	for _, f := range this.Config {
    +		repeatedStringForConfig += strings.Replace(strings.Replace(f.String(), "DeviceClaimConfiguration", "DeviceClaimConfiguration", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForConfig += "}"
    +	s := strings.Join([]string{`&DeviceClaim{`,
    +		`Requests:` + repeatedStringForRequests + `,`,
    +		`Constraints:` + repeatedStringForConstraints + `,`,
    +		`Config:` + repeatedStringForConfig + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *DeviceClaimConfiguration) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&DeviceClaimConfiguration{`,
    +		`Requests:` + fmt.Sprintf("%v", this.Requests) + `,`,
    +		`DeviceConfiguration:` + strings.Replace(strings.Replace(this.DeviceConfiguration.String(), "DeviceConfiguration", "DeviceConfiguration", 1), `&`, ``, 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *DeviceClass) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&DeviceClass{`,
    +		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
    +		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeviceClassSpec", "DeviceClassSpec", 1), `&`, ``, 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *DeviceClassConfiguration) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&DeviceClassConfiguration{`,
    +		`DeviceConfiguration:` + strings.Replace(strings.Replace(this.DeviceConfiguration.String(), "DeviceConfiguration", "DeviceConfiguration", 1), `&`, ``, 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *DeviceClassList) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForItems := "[]DeviceClass{"
    +	for _, f := range this.Items {
    +		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DeviceClass", "DeviceClass", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForItems += "}"
    +	s := strings.Join([]string{`&DeviceClassList{`,
    +		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
    +		`Items:` + repeatedStringForItems + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *DeviceClassSpec) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForSelectors := "[]DeviceSelector{"
    +	for _, f := range this.Selectors {
    +		repeatedStringForSelectors += strings.Replace(strings.Replace(f.String(), "DeviceSelector", "DeviceSelector", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForSelectors += "}"
    +	repeatedStringForConfig := "[]DeviceClassConfiguration{"
    +	for _, f := range this.Config {
    +		repeatedStringForConfig += strings.Replace(strings.Replace(f.String(), "DeviceClassConfiguration", "DeviceClassConfiguration", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForConfig += "}"
    +	s := strings.Join([]string{`&DeviceClassSpec{`,
    +		`Selectors:` + repeatedStringForSelectors + `,`,
    +		`Config:` + repeatedStringForConfig + `,`,
    +		`ExtendedResourceName:` + valueToStringGenerated(this.ExtendedResourceName) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *DeviceConfiguration) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&DeviceConfiguration{`,
    +		`Opaque:` + strings.Replace(this.Opaque.String(), "OpaqueDeviceConfiguration", "OpaqueDeviceConfiguration", 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *DeviceConstraint) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&DeviceConstraint{`,
    +		`Requests:` + fmt.Sprintf("%v", this.Requests) + `,`,
    +		`MatchAttribute:` + valueToStringGenerated(this.MatchAttribute) + `,`,
    +		`DistinctAttribute:` + valueToStringGenerated(this.DistinctAttribute) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *DeviceCounterConsumption) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	keysForCounters := make([]string, 0, len(this.Counters))
    +	for k := range this.Counters {
    +		keysForCounters = append(keysForCounters, k)
    +	}
    +	github_com_gogo_protobuf_sortkeys.Strings(keysForCounters)
    +	mapStringForCounters := "map[string]Counter{"
    +	for _, k := range keysForCounters {
    +		mapStringForCounters += fmt.Sprintf("%v: %v,", k, this.Counters[k])
    +	}
    +	mapStringForCounters += "}"
    +	s := strings.Join([]string{`&DeviceCounterConsumption{`,
    +		`CounterSet:` + fmt.Sprintf("%v", this.CounterSet) + `,`,
    +		`Counters:` + mapStringForCounters + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *DeviceRequest) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForFirstAvailable := "[]DeviceSubRequest{"
    +	for _, f := range this.FirstAvailable {
    +		repeatedStringForFirstAvailable += strings.Replace(strings.Replace(f.String(), "DeviceSubRequest", "DeviceSubRequest", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForFirstAvailable += "}"
    +	s := strings.Join([]string{`&DeviceRequest{`,
    +		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
    +		`Exactly:` + strings.Replace(this.Exactly.String(), "ExactDeviceRequest", "ExactDeviceRequest", 1) + `,`,
    +		`FirstAvailable:` + repeatedStringForFirstAvailable + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *DeviceRequestAllocationResult) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForTolerations := "[]DeviceToleration{"
    +	for _, f := range this.Tolerations {
    +		repeatedStringForTolerations += strings.Replace(strings.Replace(f.String(), "DeviceToleration", "DeviceToleration", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForTolerations += "}"
    +	keysForConsumedCapacity := make([]string, 0, len(this.ConsumedCapacity))
    +	for k := range this.ConsumedCapacity {
    +		keysForConsumedCapacity = append(keysForConsumedCapacity, string(k))
    +	}
    +	github_com_gogo_protobuf_sortkeys.Strings(keysForConsumedCapacity)
    +	mapStringForConsumedCapacity := "map[QualifiedName]resource.Quantity{"
    +	for _, k := range keysForConsumedCapacity {
    +		mapStringForConsumedCapacity += fmt.Sprintf("%v: %v,", k, this.ConsumedCapacity[QualifiedName(k)])
    +	}
    +	mapStringForConsumedCapacity += "}"
    +	s := strings.Join([]string{`&DeviceRequestAllocationResult{`,
    +		`Request:` + fmt.Sprintf("%v", this.Request) + `,`,
    +		`Driver:` + fmt.Sprintf("%v", this.Driver) + `,`,
    +		`Pool:` + fmt.Sprintf("%v", this.Pool) + `,`,
    +		`Device:` + fmt.Sprintf("%v", this.Device) + `,`,
    +		`AdminAccess:` + valueToStringGenerated(this.AdminAccess) + `,`,
    +		`Tolerations:` + repeatedStringForTolerations + `,`,
    +		`BindingConditions:` + fmt.Sprintf("%v", this.BindingConditions) + `,`,
    +		`BindingFailureConditions:` + fmt.Sprintf("%v", this.BindingFailureConditions) + `,`,
    +		`ShareID:` + valueToStringGenerated(this.ShareID) + `,`,
    +		`ConsumedCapacity:` + mapStringForConsumedCapacity + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *DeviceSelector) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&DeviceSelector{`,
    +		`CEL:` + strings.Replace(this.CEL.String(), "CELDeviceSelector", "CELDeviceSelector", 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *DeviceSubRequest) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForSelectors := "[]DeviceSelector{"
    +	for _, f := range this.Selectors {
    +		repeatedStringForSelectors += strings.Replace(strings.Replace(f.String(), "DeviceSelector", "DeviceSelector", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForSelectors += "}"
    +	repeatedStringForTolerations := "[]DeviceToleration{"
    +	for _, f := range this.Tolerations {
    +		repeatedStringForTolerations += strings.Replace(strings.Replace(f.String(), "DeviceToleration", "DeviceToleration", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForTolerations += "}"
    +	s := strings.Join([]string{`&DeviceSubRequest{`,
    +		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
    +		`DeviceClassName:` + fmt.Sprintf("%v", this.DeviceClassName) + `,`,
    +		`Selectors:` + repeatedStringForSelectors + `,`,
    +		`AllocationMode:` + fmt.Sprintf("%v", this.AllocationMode) + `,`,
    +		`Count:` + fmt.Sprintf("%v", this.Count) + `,`,
    +		`Tolerations:` + repeatedStringForTolerations + `,`,
    +		`Capacity:` + strings.Replace(this.Capacity.String(), "CapacityRequirements", "CapacityRequirements", 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *DeviceToleration) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&DeviceToleration{`,
    +		`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
    +		`Operator:` + fmt.Sprintf("%v", this.Operator) + `,`,
    +		`Value:` + fmt.Sprintf("%v", this.Value) + `,`,
    +		`Effect:` + fmt.Sprintf("%v", this.Effect) + `,`,
    +		`TolerationSeconds:` + valueToStringGenerated(this.TolerationSeconds) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ExactDeviceRequest) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForSelectors := "[]DeviceSelector{"
    +	for _, f := range this.Selectors {
    +		repeatedStringForSelectors += strings.Replace(strings.Replace(f.String(), "DeviceSelector", "DeviceSelector", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForSelectors += "}"
    +	repeatedStringForTolerations := "[]DeviceToleration{"
    +	for _, f := range this.Tolerations {
    +		repeatedStringForTolerations += strings.Replace(strings.Replace(f.String(), "DeviceToleration", "DeviceToleration", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForTolerations += "}"
    +	s := strings.Join([]string{`&ExactDeviceRequest{`,
    +		`DeviceClassName:` + fmt.Sprintf("%v", this.DeviceClassName) + `,`,
    +		`Selectors:` + repeatedStringForSelectors + `,`,
    +		`AllocationMode:` + fmt.Sprintf("%v", this.AllocationMode) + `,`,
    +		`Count:` + fmt.Sprintf("%v", this.Count) + `,`,
    +		`AdminAccess:` + valueToStringGenerated(this.AdminAccess) + `,`,
    +		`Tolerations:` + repeatedStringForTolerations + `,`,
    +		`Capacity:` + strings.Replace(this.Capacity.String(), "CapacityRequirements", "CapacityRequirements", 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *NetworkDeviceData) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&NetworkDeviceData{`,
    +		`InterfaceName:` + fmt.Sprintf("%v", this.InterfaceName) + `,`,
    +		`IPs:` + fmt.Sprintf("%v", this.IPs) + `,`,
    +		`HardwareAddress:` + fmt.Sprintf("%v", this.HardwareAddress) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *OpaqueDeviceConfiguration) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&OpaqueDeviceConfiguration{`,
    +		`Driver:` + fmt.Sprintf("%v", this.Driver) + `,`,
    +		`Parameters:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Parameters), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ResourceClaim) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&ResourceClaim{`,
    +		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
    +		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimSpec", "ResourceClaimSpec", 1), `&`, ``, 1) + `,`,
    +		`Status:` + strings.Replace(strings.Replace(this.Status.String(), "ResourceClaimStatus", "ResourceClaimStatus", 1), `&`, ``, 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ResourceClaimConsumerReference) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&ResourceClaimConsumerReference{`,
    +		`APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`,
    +		`Resource:` + fmt.Sprintf("%v", this.Resource) + `,`,
    +		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
    +		`UID:` + fmt.Sprintf("%v", this.UID) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ResourceClaimList) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForItems := "[]ResourceClaim{"
    +	for _, f := range this.Items {
    +		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceClaim", "ResourceClaim", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForItems += "}"
    +	s := strings.Join([]string{`&ResourceClaimList{`,
    +		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
    +		`Items:` + repeatedStringForItems + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ResourceClaimSpec) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&ResourceClaimSpec{`,
    +		`Devices:` + strings.Replace(strings.Replace(this.Devices.String(), "DeviceClaim", "DeviceClaim", 1), `&`, ``, 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ResourceClaimStatus) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForReservedFor := "[]ResourceClaimConsumerReference{"
    +	for _, f := range this.ReservedFor {
    +		repeatedStringForReservedFor += strings.Replace(strings.Replace(f.String(), "ResourceClaimConsumerReference", "ResourceClaimConsumerReference", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForReservedFor += "}"
    +	repeatedStringForDevices := "[]AllocatedDeviceStatus{"
    +	for _, f := range this.Devices {
    +		repeatedStringForDevices += strings.Replace(strings.Replace(f.String(), "AllocatedDeviceStatus", "AllocatedDeviceStatus", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForDevices += "}"
    +	s := strings.Join([]string{`&ResourceClaimStatus{`,
    +		`Allocation:` + strings.Replace(this.Allocation.String(), "AllocationResult", "AllocationResult", 1) + `,`,
    +		`ReservedFor:` + repeatedStringForReservedFor + `,`,
    +		`Devices:` + repeatedStringForDevices + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ResourceClaimTemplate) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&ResourceClaimTemplate{`,
    +		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
    +		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimTemplateSpec", "ResourceClaimTemplateSpec", 1), `&`, ``, 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ResourceClaimTemplateList) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForItems := "[]ResourceClaimTemplate{"
    +	for _, f := range this.Items {
    +		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceClaimTemplate", "ResourceClaimTemplate", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForItems += "}"
    +	s := strings.Join([]string{`&ResourceClaimTemplateList{`,
    +		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
    +		`Items:` + repeatedStringForItems + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ResourceClaimTemplateSpec) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&ResourceClaimTemplateSpec{`,
    +		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
    +		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimSpec", "ResourceClaimSpec", 1), `&`, ``, 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ResourcePool) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&ResourcePool{`,
    +		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
    +		`Generation:` + fmt.Sprintf("%v", this.Generation) + `,`,
    +		`ResourceSliceCount:` + fmt.Sprintf("%v", this.ResourceSliceCount) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ResourceSlice) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&ResourceSlice{`,
    +		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
    +		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceSliceSpec", "ResourceSliceSpec", 1), `&`, ``, 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ResourceSliceList) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForItems := "[]ResourceSlice{"
    +	for _, f := range this.Items {
    +		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceSlice", "ResourceSlice", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForItems += "}"
    +	s := strings.Join([]string{`&ResourceSliceList{`,
    +		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
    +		`Items:` + repeatedStringForItems + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ResourceSliceSpec) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForDevices := "[]Device{"
    +	for _, f := range this.Devices {
    +		repeatedStringForDevices += strings.Replace(strings.Replace(f.String(), "Device", "Device", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForDevices += "}"
    +	repeatedStringForSharedCounters := "[]CounterSet{"
    +	for _, f := range this.SharedCounters {
    +		repeatedStringForSharedCounters += strings.Replace(strings.Replace(f.String(), "CounterSet", "CounterSet", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForSharedCounters += "}"
    +	s := strings.Join([]string{`&ResourceSliceSpec{`,
    +		`Driver:` + fmt.Sprintf("%v", this.Driver) + `,`,
    +		`Pool:` + strings.Replace(strings.Replace(this.Pool.String(), "ResourcePool", "ResourcePool", 1), `&`, ``, 1) + `,`,
    +		`NodeName:` + valueToStringGenerated(this.NodeName) + `,`,
    +		`NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`,
    +		`AllNodes:` + valueToStringGenerated(this.AllNodes) + `,`,
    +		`Devices:` + repeatedStringForDevices + `,`,
    +		`PerDeviceNodeSelection:` + valueToStringGenerated(this.PerDeviceNodeSelection) + `,`,
    +		`SharedCounters:` + repeatedStringForSharedCounters + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func valueToStringGenerated(v interface{}) string {
    +	rv := reflect.ValueOf(v)
    +	if rv.IsNil() {
    +		return "nil"
    +	}
    +	pv := reflect.Indirect(rv).Interface()
    +	return fmt.Sprintf("*%v", pv)
    +}
    +func (m *AllocatedDeviceStatus) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: AllocatedDeviceStatus: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: AllocatedDeviceStatus: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Driver = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Pool = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Device = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 4:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Conditions = append(m.Conditions, v1.Condition{})
    +			if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 5:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.Data == nil {
    +				m.Data = &runtime.RawExtension{}
    +			}
    +			if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 6:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field NetworkData", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.NetworkData == nil {
    +				m.NetworkData = &NetworkDeviceData{}
    +			}
    +			if err := m.NetworkData.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 7:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ShareID", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			s := string(dAtA[iNdEx:postIndex])
    +			m.ShareID = &s
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *AllocationResult) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: AllocationResult: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: AllocationResult: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.Devices.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.NodeSelector == nil {
    +				m.NodeSelector = &v11.NodeSelector{}
    +			}
    +			if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 5:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field AllocationTimestamp", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.AllocationTimestamp == nil {
    +				m.AllocationTimestamp = &v1.Time{}
    +			}
    +			if err := m.AllocationTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *CELDeviceSelector) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: CELDeviceSelector: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: CELDeviceSelector: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Expression = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *CapacityRequestPolicy) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: CapacityRequestPolicy: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: CapacityRequestPolicy: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Default", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.Default == nil {
    +				m.Default = &resource.Quantity{}
    +			}
    +			if err := m.Default.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ValidValues", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.ValidValues = append(m.ValidValues, resource.Quantity{})
    +			if err := m.ValidValues[len(m.ValidValues)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 4:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ValidRange", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.ValidRange == nil {
    +				m.ValidRange = &CapacityRequestPolicyRange{}
    +			}
    +			if err := m.ValidRange.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *CapacityRequestPolicyRange) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: CapacityRequestPolicyRange: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: CapacityRequestPolicyRange: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.Min == nil {
    +				m.Min = &resource.Quantity{}
    +			}
    +			if err := m.Min.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.Max == nil {
    +				m.Max = &resource.Quantity{}
    +			}
    +			if err := m.Max.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Step", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.Step == nil {
    +				m.Step = &resource.Quantity{}
    +			}
    +			if err := m.Step.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *CapacityRequirements) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: CapacityRequirements: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: CapacityRequirements: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.Requests == nil {
    +				m.Requests = make(map[QualifiedName]resource.Quantity)
    +			}
    +			var mapkey QualifiedName
    +			mapvalue := &resource.Quantity{}
    +			for iNdEx < postIndex {
    +				entryPreIndex := iNdEx
    +				var wire uint64
    +				for shift := uint(0); ; shift += 7 {
    +					if shift >= 64 {
    +						return ErrIntOverflowGenerated
    +					}
    +					if iNdEx >= l {
    +						return io.ErrUnexpectedEOF
    +					}
    +					b := dAtA[iNdEx]
    +					iNdEx++
    +					wire |= uint64(b&0x7F) << shift
    +					if b < 0x80 {
    +						break
    +					}
    +				}
    +				fieldNum := int32(wire >> 3)
    +				if fieldNum == 1 {
    +					var stringLenmapkey uint64
    +					for shift := uint(0); ; shift += 7 {
    +						if shift >= 64 {
    +							return ErrIntOverflowGenerated
    +						}
    +						if iNdEx >= l {
    +							return io.ErrUnexpectedEOF
    +						}
    +						b := dAtA[iNdEx]
    +						iNdEx++
    +						stringLenmapkey |= uint64(b&0x7F) << shift
    +						if b < 0x80 {
    +							break
    +						}
    +					}
    +					intStringLenmapkey := int(stringLenmapkey)
    +					if intStringLenmapkey < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					postStringIndexmapkey := iNdEx + intStringLenmapkey
    +					if postStringIndexmapkey < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					if postStringIndexmapkey > l {
    +						return io.ErrUnexpectedEOF
    +					}
    +					mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey])
    +					iNdEx = postStringIndexmapkey
    +				} else if fieldNum == 2 {
    +					var mapmsglen int
    +					for shift := uint(0); ; shift += 7 {
    +						if shift >= 64 {
    +							return ErrIntOverflowGenerated
    +						}
    +						if iNdEx >= l {
    +							return io.ErrUnexpectedEOF
    +						}
    +						b := dAtA[iNdEx]
    +						iNdEx++
    +						mapmsglen |= int(b&0x7F) << shift
    +						if b < 0x80 {
    +							break
    +						}
    +					}
    +					if mapmsglen < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					postmsgIndex := iNdEx + mapmsglen
    +					if postmsgIndex < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					if postmsgIndex > l {
    +						return io.ErrUnexpectedEOF
    +					}
    +					mapvalue = &resource.Quantity{}
    +					if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
    +						return err
    +					}
    +					iNdEx = postmsgIndex
    +				} else {
    +					iNdEx = entryPreIndex
    +					skippy, err := skipGenerated(dAtA[iNdEx:])
    +					if err != nil {
    +						return err
    +					}
    +					if (skippy < 0) || (iNdEx+skippy) < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					if (iNdEx + skippy) > postIndex {
    +						return io.ErrUnexpectedEOF
    +					}
    +					iNdEx += skippy
    +				}
    +			}
    +			m.Requests[QualifiedName(mapkey)] = *mapvalue
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *Counter) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: Counter: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: Counter: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *CounterSet) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: CounterSet: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: CounterSet: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Name = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Counters", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.Counters == nil {
    +				m.Counters = make(map[string]Counter)
    +			}
    +			var mapkey string
    +			mapvalue := &Counter{}
    +			for iNdEx < postIndex {
    +				entryPreIndex := iNdEx
    +				var wire uint64
    +				for shift := uint(0); ; shift += 7 {
    +					if shift >= 64 {
    +						return ErrIntOverflowGenerated
    +					}
    +					if iNdEx >= l {
    +						return io.ErrUnexpectedEOF
    +					}
    +					b := dAtA[iNdEx]
    +					iNdEx++
    +					wire |= uint64(b&0x7F) << shift
    +					if b < 0x80 {
    +						break
    +					}
    +				}
    +				fieldNum := int32(wire >> 3)
    +				if fieldNum == 1 {
    +					var stringLenmapkey uint64
    +					for shift := uint(0); ; shift += 7 {
    +						if shift >= 64 {
    +							return ErrIntOverflowGenerated
    +						}
    +						if iNdEx >= l {
    +							return io.ErrUnexpectedEOF
    +						}
    +						b := dAtA[iNdEx]
    +						iNdEx++
    +						stringLenmapkey |= uint64(b&0x7F) << shift
    +						if b < 0x80 {
    +							break
    +						}
    +					}
    +					intStringLenmapkey := int(stringLenmapkey)
    +					if intStringLenmapkey < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					postStringIndexmapkey := iNdEx + intStringLenmapkey
    +					if postStringIndexmapkey < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					if postStringIndexmapkey > l {
    +						return io.ErrUnexpectedEOF
    +					}
    +					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
    +					iNdEx = postStringIndexmapkey
    +				} else if fieldNum == 2 {
    +					var mapmsglen int
    +					for shift := uint(0); ; shift += 7 {
    +						if shift >= 64 {
    +							return ErrIntOverflowGenerated
    +						}
    +						if iNdEx >= l {
    +							return io.ErrUnexpectedEOF
    +						}
    +						b := dAtA[iNdEx]
    +						iNdEx++
    +						mapmsglen |= int(b&0x7F) << shift
    +						if b < 0x80 {
    +							break
    +						}
    +					}
    +					if mapmsglen < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					postmsgIndex := iNdEx + mapmsglen
    +					if postmsgIndex < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					if postmsgIndex > l {
    +						return io.ErrUnexpectedEOF
    +					}
    +					mapvalue = &Counter{}
    +					if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
    +						return err
    +					}
    +					iNdEx = postmsgIndex
    +				} else {
    +					iNdEx = entryPreIndex
    +					skippy, err := skipGenerated(dAtA[iNdEx:])
    +					if err != nil {
    +						return err
    +					}
    +					if (skippy < 0) || (iNdEx+skippy) < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					if (iNdEx + skippy) > postIndex {
    +						return io.ErrUnexpectedEOF
    +					}
    +					iNdEx += skippy
    +				}
    +			}
    +			m.Counters[mapkey] = *mapvalue
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *Device) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: Device: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: Device: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Name = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.Attributes == nil {
    +				m.Attributes = make(map[QualifiedName]DeviceAttribute)
    +			}
    +			var mapkey QualifiedName
    +			mapvalue := &DeviceAttribute{}
    +			for iNdEx < postIndex {
    +				entryPreIndex := iNdEx
    +				var wire uint64
    +				for shift := uint(0); ; shift += 7 {
    +					if shift >= 64 {
    +						return ErrIntOverflowGenerated
    +					}
    +					if iNdEx >= l {
    +						return io.ErrUnexpectedEOF
    +					}
    +					b := dAtA[iNdEx]
    +					iNdEx++
    +					wire |= uint64(b&0x7F) << shift
    +					if b < 0x80 {
    +						break
    +					}
    +				}
    +				fieldNum := int32(wire >> 3)
    +				if fieldNum == 1 {
    +					var stringLenmapkey uint64
    +					for shift := uint(0); ; shift += 7 {
    +						if shift >= 64 {
    +							return ErrIntOverflowGenerated
    +						}
    +						if iNdEx >= l {
    +							return io.ErrUnexpectedEOF
    +						}
    +						b := dAtA[iNdEx]
    +						iNdEx++
    +						stringLenmapkey |= uint64(b&0x7F) << shift
    +						if b < 0x80 {
    +							break
    +						}
    +					}
    +					intStringLenmapkey := int(stringLenmapkey)
    +					if intStringLenmapkey < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					postStringIndexmapkey := iNdEx + intStringLenmapkey
    +					if postStringIndexmapkey < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					if postStringIndexmapkey > l {
    +						return io.ErrUnexpectedEOF
    +					}
    +					mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey])
    +					iNdEx = postStringIndexmapkey
    +				} else if fieldNum == 2 {
    +					var mapmsglen int
    +					for shift := uint(0); ; shift += 7 {
    +						if shift >= 64 {
    +							return ErrIntOverflowGenerated
    +						}
    +						if iNdEx >= l {
    +							return io.ErrUnexpectedEOF
    +						}
    +						b := dAtA[iNdEx]
    +						iNdEx++
    +						mapmsglen |= int(b&0x7F) << shift
    +						if b < 0x80 {
    +							break
    +						}
    +					}
    +					if mapmsglen < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					postmsgIndex := iNdEx + mapmsglen
    +					if postmsgIndex < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					if postmsgIndex > l {
    +						return io.ErrUnexpectedEOF
    +					}
    +					mapvalue = &DeviceAttribute{}
    +					if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
    +						return err
    +					}
    +					iNdEx = postmsgIndex
    +				} else {
    +					iNdEx = entryPreIndex
    +					skippy, err := skipGenerated(dAtA[iNdEx:])
    +					if err != nil {
    +						return err
    +					}
    +					if (skippy < 0) || (iNdEx+skippy) < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					if (iNdEx + skippy) > postIndex {
    +						return io.ErrUnexpectedEOF
    +					}
    +					iNdEx += skippy
    +				}
    +			}
    +			m.Attributes[QualifiedName(mapkey)] = *mapvalue
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.Capacity == nil {
    +				m.Capacity = make(map[QualifiedName]DeviceCapacity)
    +			}
    +			var mapkey QualifiedName
    +			mapvalue := &DeviceCapacity{}
    +			for iNdEx < postIndex {
    +				entryPreIndex := iNdEx
    +				var wire uint64
    +				for shift := uint(0); ; shift += 7 {
    +					if shift >= 64 {
    +						return ErrIntOverflowGenerated
    +					}
    +					if iNdEx >= l {
    +						return io.ErrUnexpectedEOF
    +					}
    +					b := dAtA[iNdEx]
    +					iNdEx++
    +					wire |= uint64(b&0x7F) << shift
    +					if b < 0x80 {
    +						break
    +					}
    +				}
    +				fieldNum := int32(wire >> 3)
    +				if fieldNum == 1 {
    +					var stringLenmapkey uint64
    +					for shift := uint(0); ; shift += 7 {
    +						if shift >= 64 {
    +							return ErrIntOverflowGenerated
    +						}
    +						if iNdEx >= l {
    +							return io.ErrUnexpectedEOF
    +						}
    +						b := dAtA[iNdEx]
    +						iNdEx++
    +						stringLenmapkey |= uint64(b&0x7F) << shift
    +						if b < 0x80 {
    +							break
    +						}
    +					}
    +					intStringLenmapkey := int(stringLenmapkey)
    +					if intStringLenmapkey < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					postStringIndexmapkey := iNdEx + intStringLenmapkey
    +					if postStringIndexmapkey < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					if postStringIndexmapkey > l {
    +						return io.ErrUnexpectedEOF
    +					}
    +					mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey])
    +					iNdEx = postStringIndexmapkey
    +				} else if fieldNum == 2 {
    +					var mapmsglen int
    +					for shift := uint(0); ; shift += 7 {
    +						if shift >= 64 {
    +							return ErrIntOverflowGenerated
    +						}
    +						if iNdEx >= l {
    +							return io.ErrUnexpectedEOF
    +						}
    +						b := dAtA[iNdEx]
    +						iNdEx++
    +						mapmsglen |= int(b&0x7F) << shift
    +						if b < 0x80 {
    +							break
    +						}
    +					}
    +					if mapmsglen < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					postmsgIndex := iNdEx + mapmsglen
    +					if postmsgIndex < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					if postmsgIndex > l {
    +						return io.ErrUnexpectedEOF
    +					}
    +					mapvalue = &DeviceCapacity{}
    +					if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
    +						return err
    +					}
    +					iNdEx = postmsgIndex
    +				} else {
    +					iNdEx = entryPreIndex
    +					skippy, err := skipGenerated(dAtA[iNdEx:])
    +					if err != nil {
    +						return err
    +					}
    +					if (skippy < 0) || (iNdEx+skippy) < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					if (iNdEx + skippy) > postIndex {
    +						return io.ErrUnexpectedEOF
    +					}
    +					iNdEx += skippy
    +				}
    +			}
    +			m.Capacity[QualifiedName(mapkey)] = *mapvalue
    +			iNdEx = postIndex
    +		case 4:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ConsumesCounters", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.ConsumesCounters = append(m.ConsumesCounters, DeviceCounterConsumption{})
    +			if err := m.ConsumesCounters[len(m.ConsumesCounters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 5:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			s := string(dAtA[iNdEx:postIndex])
    +			m.NodeName = &s
    +			iNdEx = postIndex
    +		case 6:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.NodeSelector == nil {
    +				m.NodeSelector = &v11.NodeSelector{}
    +			}
    +			if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 7:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field AllNodes", wireType)
    +			}
    +			var v int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				v |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			b := bool(v != 0)
    +			m.AllNodes = &b
    +		case 8:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Taints", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Taints = append(m.Taints, DeviceTaint{})
    +			if err := m.Taints[len(m.Taints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 9:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field BindsToNode", wireType)
    +			}
    +			var v int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				v |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			b := bool(v != 0)
    +			m.BindsToNode = &b
    +		case 10:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field BindingConditions", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.BindingConditions = append(m.BindingConditions, string(dAtA[iNdEx:postIndex]))
    +			iNdEx = postIndex
    +		case 11:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field BindingFailureConditions", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.BindingFailureConditions = append(m.BindingFailureConditions, string(dAtA[iNdEx:postIndex]))
    +			iNdEx = postIndex
    +		case 12:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field AllowMultipleAllocations", wireType)
    +			}
    +			var v int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				v |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			b := bool(v != 0)
    +			m.AllowMultipleAllocations = &b
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *DeviceAllocationConfiguration) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: DeviceAllocationConfiguration: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: DeviceAllocationConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Source = AllocationConfigSource(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex]))
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *DeviceAllocationResult) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: DeviceAllocationResult: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: DeviceAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Results = append(m.Results, DeviceRequestAllocationResult{})
    +			if err := m.Results[len(m.Results)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Config = append(m.Config, DeviceAllocationConfiguration{})
    +			if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *DeviceAttribute) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: DeviceAttribute: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: DeviceAttribute: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 2:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType)
    +			}
    +			var v int64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				v |= int64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			m.IntValue = &v
    +		case 3:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType)
    +			}
    +			var v int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				v |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			b := bool(v != 0)
    +			m.BoolValue = &b
    +		case 4:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			s := string(dAtA[iNdEx:postIndex])
    +			m.StringValue = &s
    +			iNdEx = postIndex
    +		case 5:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field VersionValue", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			s := string(dAtA[iNdEx:postIndex])
    +			m.VersionValue = &s
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *DeviceCapacity) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: DeviceCapacity: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: DeviceCapacity: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field RequestPolicy", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.RequestPolicy == nil {
    +				m.RequestPolicy = &CapacityRequestPolicy{}
    +			}
    +			if err := m.RequestPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *DeviceClaim) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: DeviceClaim: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: DeviceClaim: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Requests = append(m.Requests, DeviceRequest{})
    +			if err := m.Requests[len(m.Requests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Constraints", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Constraints = append(m.Constraints, DeviceConstraint{})
    +			if err := m.Constraints[len(m.Constraints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Config = append(m.Config, DeviceClaimConfiguration{})
    +			if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *DeviceClaimConfiguration) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: DeviceClaimConfiguration: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: DeviceClaimConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex]))
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *DeviceClass) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: DeviceClass: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: DeviceClass: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *DeviceClassConfiguration) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: DeviceClassConfiguration: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: DeviceClassConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *DeviceClassList) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: DeviceClassList: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: DeviceClassList: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Items = append(m.Items, DeviceClass{})
    +			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *DeviceClassSpec) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: DeviceClassSpec: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: DeviceClassSpec: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Selectors = append(m.Selectors, DeviceSelector{})
    +			if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Config = append(m.Config, DeviceClassConfiguration{})
    +			if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 4:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ExtendedResourceName", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			s := string(dAtA[iNdEx:postIndex])
    +			m.ExtendedResourceName = &s
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *DeviceConfiguration) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: DeviceConfiguration: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: DeviceConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Opaque", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.Opaque == nil {
    +				m.Opaque = &OpaqueDeviceConfiguration{}
    +			}
    +			if err := m.Opaque.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *DeviceConstraint) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: DeviceConstraint: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: DeviceConstraint: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex]))
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field MatchAttribute", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			s := FullyQualifiedName(dAtA[iNdEx:postIndex])
    +			m.MatchAttribute = &s
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field DistinctAttribute", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			s := FullyQualifiedName(dAtA[iNdEx:postIndex])
    +			m.DistinctAttribute = &s
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *DeviceCounterConsumption) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: DeviceCounterConsumption: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: DeviceCounterConsumption: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field CounterSet", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.CounterSet = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Counters", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.Counters == nil {
    +				m.Counters = make(map[string]Counter)
    +			}
    +			var mapkey string
    +			mapvalue := &Counter{}
    +			for iNdEx < postIndex {
    +				entryPreIndex := iNdEx
    +				var wire uint64
    +				for shift := uint(0); ; shift += 7 {
    +					if shift >= 64 {
    +						return ErrIntOverflowGenerated
    +					}
    +					if iNdEx >= l {
    +						return io.ErrUnexpectedEOF
    +					}
    +					b := dAtA[iNdEx]
    +					iNdEx++
    +					wire |= uint64(b&0x7F) << shift
    +					if b < 0x80 {
    +						break
    +					}
    +				}
    +				fieldNum := int32(wire >> 3)
    +				if fieldNum == 1 {
    +					var stringLenmapkey uint64
    +					for shift := uint(0); ; shift += 7 {
    +						if shift >= 64 {
    +							return ErrIntOverflowGenerated
    +						}
    +						if iNdEx >= l {
    +							return io.ErrUnexpectedEOF
    +						}
    +						b := dAtA[iNdEx]
    +						iNdEx++
    +						stringLenmapkey |= uint64(b&0x7F) << shift
    +						if b < 0x80 {
    +							break
    +						}
    +					}
    +					intStringLenmapkey := int(stringLenmapkey)
    +					if intStringLenmapkey < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					postStringIndexmapkey := iNdEx + intStringLenmapkey
    +					if postStringIndexmapkey < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					if postStringIndexmapkey > l {
    +						return io.ErrUnexpectedEOF
    +					}
    +					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
    +					iNdEx = postStringIndexmapkey
    +				} else if fieldNum == 2 {
    +					var mapmsglen int
    +					for shift := uint(0); ; shift += 7 {
    +						if shift >= 64 {
    +							return ErrIntOverflowGenerated
    +						}
    +						if iNdEx >= l {
    +							return io.ErrUnexpectedEOF
    +						}
    +						b := dAtA[iNdEx]
    +						iNdEx++
    +						mapmsglen |= int(b&0x7F) << shift
    +						if b < 0x80 {
    +							break
    +						}
    +					}
    +					if mapmsglen < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					postmsgIndex := iNdEx + mapmsglen
    +					if postmsgIndex < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					if postmsgIndex > l {
    +						return io.ErrUnexpectedEOF
    +					}
    +					mapvalue = &Counter{}
    +					if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
    +						return err
    +					}
    +					iNdEx = postmsgIndex
    +				} else {
    +					iNdEx = entryPreIndex
    +					skippy, err := skipGenerated(dAtA[iNdEx:])
    +					if err != nil {
    +						return err
    +					}
    +					if (skippy < 0) || (iNdEx+skippy) < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					if (iNdEx + skippy) > postIndex {
    +						return io.ErrUnexpectedEOF
    +					}
    +					iNdEx += skippy
    +				}
    +			}
    +			m.Counters[mapkey] = *mapvalue
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *DeviceRequest) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: DeviceRequest: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: DeviceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Name = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Exactly", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.Exactly == nil {
    +				m.Exactly = &ExactDeviceRequest{}
    +			}
    +			if err := m.Exactly.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field FirstAvailable", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.FirstAvailable = append(m.FirstAvailable, DeviceSubRequest{})
    +			if err := m.FirstAvailable[len(m.FirstAvailable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *DeviceRequestAllocationResult) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: DeviceRequestAllocationResult: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: DeviceRequestAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Request = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Driver = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Pool = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 4:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Device = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 5:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field AdminAccess", wireType)
    +			}
    +			var v int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				v |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			b := bool(v != 0)
    +			m.AdminAccess = &b
    +		case 6:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Tolerations = append(m.Tolerations, DeviceToleration{})
    +			if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 7:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field BindingConditions", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.BindingConditions = append(m.BindingConditions, string(dAtA[iNdEx:postIndex]))
    +			iNdEx = postIndex
    +		case 8:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field BindingFailureConditions", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.BindingFailureConditions = append(m.BindingFailureConditions, string(dAtA[iNdEx:postIndex]))
    +			iNdEx = postIndex
    +		case 9:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ShareID", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			s := k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
    +			m.ShareID = &s
    +			iNdEx = postIndex
    +		case 10:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ConsumedCapacity", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.ConsumedCapacity == nil {
    +				m.ConsumedCapacity = make(map[QualifiedName]resource.Quantity)
    +			}
    +			var mapkey QualifiedName
    +			mapvalue := &resource.Quantity{}
    +			for iNdEx < postIndex {
    +				entryPreIndex := iNdEx
    +				var wire uint64
    +				for shift := uint(0); ; shift += 7 {
    +					if shift >= 64 {
    +						return ErrIntOverflowGenerated
    +					}
    +					if iNdEx >= l {
    +						return io.ErrUnexpectedEOF
    +					}
    +					b := dAtA[iNdEx]
    +					iNdEx++
    +					wire |= uint64(b&0x7F) << shift
    +					if b < 0x80 {
    +						break
    +					}
    +				}
    +				fieldNum := int32(wire >> 3)
    +				if fieldNum == 1 {
    +					var stringLenmapkey uint64
    +					for shift := uint(0); ; shift += 7 {
    +						if shift >= 64 {
    +							return ErrIntOverflowGenerated
    +						}
    +						if iNdEx >= l {
    +							return io.ErrUnexpectedEOF
    +						}
    +						b := dAtA[iNdEx]
    +						iNdEx++
    +						stringLenmapkey |= uint64(b&0x7F) << shift
    +						if b < 0x80 {
    +							break
    +						}
    +					}
    +					intStringLenmapkey := int(stringLenmapkey)
    +					if intStringLenmapkey < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					postStringIndexmapkey := iNdEx + intStringLenmapkey
    +					if postStringIndexmapkey < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					if postStringIndexmapkey > l {
    +						return io.ErrUnexpectedEOF
    +					}
    +					mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey])
    +					iNdEx = postStringIndexmapkey
    +				} else if fieldNum == 2 {
    +					var mapmsglen int
    +					for shift := uint(0); ; shift += 7 {
    +						if shift >= 64 {
    +							return ErrIntOverflowGenerated
    +						}
    +						if iNdEx >= l {
    +							return io.ErrUnexpectedEOF
    +						}
    +						b := dAtA[iNdEx]
    +						iNdEx++
    +						mapmsglen |= int(b&0x7F) << shift
    +						if b < 0x80 {
    +							break
    +						}
    +					}
    +					if mapmsglen < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					postmsgIndex := iNdEx + mapmsglen
    +					if postmsgIndex < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					if postmsgIndex > l {
    +						return io.ErrUnexpectedEOF
    +					}
    +					mapvalue = &resource.Quantity{}
    +					if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
    +						return err
    +					}
    +					iNdEx = postmsgIndex
    +				} else {
    +					iNdEx = entryPreIndex
    +					skippy, err := skipGenerated(dAtA[iNdEx:])
    +					if err != nil {
    +						return err
    +					}
    +					if (skippy < 0) || (iNdEx+skippy) < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					if (iNdEx + skippy) > postIndex {
    +						return io.ErrUnexpectedEOF
    +					}
    +					iNdEx += skippy
    +				}
    +			}
    +			m.ConsumedCapacity[QualifiedName(mapkey)] = *mapvalue
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *DeviceSelector) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: DeviceSelector: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: DeviceSelector: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field CEL", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.CEL == nil {
    +				m.CEL = &CELDeviceSelector{}
    +			}
    +			if err := m.CEL.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *DeviceSubRequest) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: DeviceSubRequest: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: DeviceSubRequest: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Name = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field DeviceClassName", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.DeviceClassName = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Selectors = append(m.Selectors, DeviceSelector{})
    +			if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 4:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field AllocationMode", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.AllocationMode = DeviceAllocationMode(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 5:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
    +			}
    +			m.Count = 0
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				m.Count |= int64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +		case 6:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Tolerations = append(m.Tolerations, DeviceToleration{})
    +			if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 7:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.Capacity == nil {
    +				m.Capacity = &CapacityRequirements{}
    +			}
    +			if err := m.Capacity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *DeviceTaint) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: DeviceTaint: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: DeviceTaint: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Key = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Value = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Effect", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Effect = DeviceTaintEffect(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 4:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field TimeAdded", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.TimeAdded == nil {
    +				m.TimeAdded = &v1.Time{}
    +			}
    +			if err := m.TimeAdded.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *DeviceToleration) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: DeviceToleration: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: DeviceToleration: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Key = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Operator = DeviceTolerationOperator(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Value = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 4:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Effect", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Effect = DeviceTaintEffect(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 5:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field TolerationSeconds", wireType)
    +			}
    +			var v int64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				v |= int64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			m.TolerationSeconds = &v
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *ExactDeviceRequest) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ExactDeviceRequest: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ExactDeviceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field DeviceClassName", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.DeviceClassName = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Selectors = append(m.Selectors, DeviceSelector{})
    +			if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field AllocationMode", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.AllocationMode = DeviceAllocationMode(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 4:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
    +			}
    +			m.Count = 0
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				m.Count |= int64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +		case 5:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field AdminAccess", wireType)
    +			}
    +			var v int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				v |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			b := bool(v != 0)
    +			m.AdminAccess = &b
    +		case 6:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Tolerations = append(m.Tolerations, DeviceToleration{})
    +			if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 7:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.Capacity == nil {
    +				m.Capacity = &CapacityRequirements{}
    +			}
    +			if err := m.Capacity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *NetworkDeviceData) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: NetworkDeviceData: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: NetworkDeviceData: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field InterfaceName", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.InterfaceName = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field IPs", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.IPs = append(m.IPs, string(dAtA[iNdEx:postIndex]))
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field HardwareAddress", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.HardwareAddress = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *OpaqueDeviceConfiguration) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: OpaqueDeviceConfiguration: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: OpaqueDeviceConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Driver = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *ResourceClaim) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ResourceClaim: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ResourceClaim: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *ResourceClaimConsumerReference) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ResourceClaimConsumerReference: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ResourceClaimConsumerReference: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.APIGroup = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Resource = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 4:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Name = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 5:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *ResourceClaimList) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ResourceClaimList: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ResourceClaimList: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Items = append(m.Items, ResourceClaim{})
    +			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *ResourceClaimSpec) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ResourceClaimSpec: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ResourceClaimSpec: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.Devices.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *ResourceClaimStatus) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ResourceClaimStatus: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ResourceClaimStatus: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Allocation", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.Allocation == nil {
    +				m.Allocation = &AllocationResult{}
    +			}
    +			if err := m.Allocation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ReservedFor", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.ReservedFor = append(m.ReservedFor, ResourceClaimConsumerReference{})
    +			if err := m.ReservedFor[len(m.ReservedFor)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 4:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Devices = append(m.Devices, AllocatedDeviceStatus{})
    +			if err := m.Devices[len(m.Devices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *ResourceClaimTemplate) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ResourceClaimTemplate: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ResourceClaimTemplate: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *ResourceClaimTemplateList) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ResourceClaimTemplateList: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ResourceClaimTemplateList: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Items = append(m.Items, ResourceClaimTemplate{})
    +			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *ResourceClaimTemplateSpec) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ResourceClaimTemplateSpec: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ResourceClaimTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *ResourcePool) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ResourcePool: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ResourcePool: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Name = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType)
    +			}
    +			m.Generation = 0
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				m.Generation |= int64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +		case 3:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ResourceSliceCount", wireType)
    +			}
    +			m.ResourceSliceCount = 0
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				m.ResourceSliceCount |= int64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *ResourceSlice) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ResourceSlice: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ResourceSlice: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *ResourceSliceList) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ResourceSliceList: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ResourceSliceList: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Items = append(m.Items, ResourceSlice{})
    +			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *ResourceSliceSpec) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ResourceSliceSpec: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ResourceSliceSpec: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Driver = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.Pool.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			s := string(dAtA[iNdEx:postIndex])
    +			m.NodeName = &s
    +			iNdEx = postIndex
    +		case 4:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.NodeSelector == nil {
    +				m.NodeSelector = &v11.NodeSelector{}
    +			}
    +			if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 5:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field AllNodes", wireType)
    +			}
    +			var v int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				v |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			b := bool(v != 0)
    +			m.AllNodes = &b
    +		case 6:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Devices = append(m.Devices, Device{})
    +			if err := m.Devices[len(m.Devices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 7:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field PerDeviceNodeSelection", wireType)
    +			}
    +			var v int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				v |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			b := bool(v != 0)
    +			m.PerDeviceNodeSelection = &b
    +		case 8:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field SharedCounters", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.SharedCounters = append(m.SharedCounters, CounterSet{})
    +			if err := m.SharedCounters[len(m.SharedCounters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func skipGenerated(dAtA []byte) (n int, err error) {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	depth := 0
    +	for iNdEx < l {
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return 0, ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return 0, io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= (uint64(b) & 0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		wireType := int(wire & 0x7)
    +		switch wireType {
    +		case 0:
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return 0, ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return 0, io.ErrUnexpectedEOF
    +				}
    +				iNdEx++
    +				if dAtA[iNdEx-1] < 0x80 {
    +					break
    +				}
    +			}
    +		case 1:
    +			iNdEx += 8
    +		case 2:
    +			var length int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return 0, ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return 0, io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				length |= (int(b) & 0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if length < 0 {
    +				return 0, ErrInvalidLengthGenerated
    +			}
    +			iNdEx += length
    +		case 3:
    +			depth++
    +		case 4:
    +			if depth == 0 {
    +				return 0, ErrUnexpectedEndOfGroupGenerated
    +			}
    +			depth--
    +		case 5:
    +			iNdEx += 4
    +		default:
    +			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
    +		}
    +		if iNdEx < 0 {
    +			return 0, ErrInvalidLengthGenerated
    +		}
    +		if depth == 0 {
    +			return iNdEx, nil
    +		}
    +	}
    +	return 0, io.ErrUnexpectedEOF
    +}
    +
    +var (
    +	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
    +	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
    +	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
    +)
    diff --git a/vendor/k8s.io/api/resource/v1/generated.proto b/vendor/k8s.io/api/resource/v1/generated.proto
    new file mode 100644
    index 000000000..816a430c2
    --- /dev/null
    +++ b/vendor/k8s.io/api/resource/v1/generated.proto
    @@ -0,0 +1,1589 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +
    +// This file was autogenerated by go-to-protobuf. Do not edit it manually!
    +
    +syntax = "proto2";
    +
    +package k8s.io.api.resource.v1;
    +
    +import "k8s.io/api/core/v1/generated.proto";
    +import "k8s.io/apimachinery/pkg/api/resource/generated.proto";
    +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
    +import "k8s.io/apimachinery/pkg/runtime/generated.proto";
    +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
    +
    +// Package-wide variables from generator "generated".
    +option go_package = "k8s.io/api/resource/v1";
    +
    +// AllocatedDeviceStatus contains the status of an allocated device, if the
    +// driver chooses to report it. This may include driver-specific information.
    +//
    +// The combination of Driver, Pool, Device, and ShareID must match the corresponding key
    +// in Status.Allocation.Devices.
    +message AllocatedDeviceStatus {
    +  // Driver specifies the name of the DRA driver whose kubelet
    +  // plugin should be invoked to process the allocation once the claim is
    +  // needed on a node.
    +  //
    +  // Must be a DNS subdomain and should end with a DNS domain owned by the
    +  // vendor of the driver.
    +  //
    +  // +required
    +  optional string driver = 1;
    +
    +  // This name together with the driver name and the device name field
    +  // identify which device was allocated (`//`).
    +  //
    +  // Must not be longer than 253 characters and may contain one or more
    +  // DNS sub-domains separated by slashes.
    +  //
    +  // +required
    +  optional string pool = 2;
    +
    +  // Device references one device instance via its name in the driver's
    +  // resource pool. It must be a DNS label.
    +  //
    +  // +required
    +  optional string device = 3;
    +
    +  // ShareID uniquely identifies an individual allocation share of the device.
    +  //
    +  // +optional
    +  // +featureGate=DRAConsumableCapacity
    +  optional string shareID = 7;
    +
    +  // Conditions contains the latest observation of the device's state.
    +  // If the device has been configured according to the class and claim
    +  // config references, the `Ready` condition should be True.
    +  //
    +  // Must not contain more than 8 entries.
    +  //
    +  // +optional
    +  // +listType=map
    +  // +listMapKey=type
    +  repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 4;
    +
    +  // Data contains arbitrary driver-specific data.
    +  //
    +  // The length of the raw data must be smaller or equal to 10 Ki.
    +  //
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.runtime.RawExtension data = 5;
    +
    +  // NetworkData contains network-related information specific to the device.
    +  //
    +  // +optional
    +  optional NetworkDeviceData networkData = 6;
    +}
    +
    +// AllocationResult contains attributes of an allocated resource.
    +message AllocationResult {
    +  // Devices is the result of allocating devices.
    +  //
    +  // +optional
    +  optional DeviceAllocationResult devices = 1;
    +
    +  // NodeSelector defines where the allocated resources are available. If
    +  // unset, they are available everywhere.
    +  //
    +  // +optional
    +  optional .k8s.io.api.core.v1.NodeSelector nodeSelector = 3;
    +
    +  // AllocationTimestamp stores the time when the resources were allocated.
    +  // This field is not guaranteed to be set, in which case that time is unknown.
    +  //
    +  // This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus
    +  // feature gate.
    +  //
    +  // +optional
    +  // +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time allocationTimestamp = 5;
    +}
    +
    +// CELDeviceSelector contains a CEL expression for selecting a device.
    +message CELDeviceSelector {
    +  // Expression is a CEL expression which evaluates a single device. It
    +  // must evaluate to true when the device under consideration satisfies
    +  // the desired criteria, and false when it does not. Any other result
    +  // is an error and causes allocation of devices to abort.
    +  //
    +  // The expression's input is an object named "device", which carries
    +  // the following properties:
    +  //  - driver (string): the name of the driver which defines this device.
    +  //  - attributes (map[string]object): the device's attributes, grouped by prefix
    +  //    (e.g. device.attributes["dra.example.com"] evaluates to an object with all
    +  //    of the attributes which were prefixed by "dra.example.com".
    +  //  - capacity (map[string]object): the device's capacities, grouped by prefix.
    +  //  - allowMultipleAllocations (bool): the allowMultipleAllocations property of the device
    +  //    (v1.34+ with the DRAConsumableCapacity feature enabled).
    +  //
    +  // Example: Consider a device with driver="dra.example.com", which exposes
    +  // two attributes named "model" and "ext.example.com/family" and which
    +  // exposes one capacity named "modules". This input to this expression
    +  // would have the following fields:
    +  //
    +  //     device.driver
    +  //     device.attributes["dra.example.com"].model
    +  //     device.attributes["ext.example.com"].family
    +  //     device.capacity["dra.example.com"].modules
    +  //
    +  // The device.driver field can be used to check for a specific driver,
    +  // either as a high-level precondition (i.e. you only want to consider
    +  // devices from this driver) or as part of a multi-clause expression
    +  // that is meant to consider devices from different drivers.
    +  //
    +  // The value type of each attribute is defined by the device
    +  // definition, and users who write these expressions must consult the
    +  // documentation for their specific drivers. The value type of each
    +  // capacity is Quantity.
    +  //
    +  // If an unknown prefix is used as a lookup in either device.attributes
    +  // or device.capacity, an empty map will be returned. Any reference to
    +  // an unknown field will cause an evaluation error and allocation to
    +  // abort.
    +  //
    +  // A robust expression should check for the existence of attributes
    +  // before referencing them.
    +  //
    +  // For ease of use, the cel.bind() function is enabled, and can be used
    +  // to simplify expressions that access multiple attributes with the
    +  // same domain. For example:
    +  //
    +  //     cel.bind(dra, device.attributes["dra.example.com"], dra.someBool && dra.anotherBool)
    +  //
    +  // The length of the expression must be smaller or equal to 10 Ki. The
    +  // cost of evaluating it is also limited based on the estimated number
    +  // of logical steps.
    +  //
    +  // +required
    +  optional string expression = 1;
    +}
    +
    +// CapacityRequestPolicy defines how requests consume device capacity.
    +//
    +// Must not set more than one ValidRequestValues.
    +message CapacityRequestPolicy {
    +  // Default specifies how much of this capacity is consumed by a request
    +  // that does not contain an entry for it in DeviceRequest's Capacity.
    +  //
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity default = 1;
    +
    +  // ValidValues defines a set of acceptable quantity values in consuming requests.
    +  //
    +  // Must not contain more than 10 entries.
    +  // Must be sorted in ascending order.
    +  //
    +  // If this field is set,
    +  // Default must be defined and it must be included in ValidValues list.
    +  //
    +  // If the requested amount does not match any valid value but smaller than some valid values,
    +  // the scheduler calculates the smallest valid value that is greater than or equal to the request.
    +  // That is: min(ceil(requestedValue) ∈ validValues), where requestedValue ≤ max(validValues).
    +  //
    +  // If the requested amount exceeds all valid values, the request violates the policy,
    +  // and this device cannot be allocated.
    +  //
    +  // +optional
    +  // +listType=atomic
    +  // +oneOf=ValidRequestValues
    +  repeated .k8s.io.apimachinery.pkg.api.resource.Quantity validValues = 3;
    +
    +  // ValidRange defines an acceptable quantity value range in consuming requests.
    +  //
    +  // If this field is set,
    +  // Default must be defined and it must fall within the defined ValidRange.
    +  //
    +  // If the requested amount does not fall within the defined range, the request violates the policy,
    +  // and this device cannot be allocated.
    +  //
    +  // If the request doesn't contain this capacity entry, Default value is used.
    +  //
    +  // +optional
    +  // +oneOf=ValidRequestValues
    +  optional CapacityRequestPolicyRange validRange = 4;
    +}
    +
    +// CapacityRequestPolicyRange defines a valid range for consumable capacity values.
    +//
    +//   - If the requested amount is less than Min, it is rounded up to the Min value.
    +//   - If Step is set and the requested amount is between Min and Max but not aligned with Step,
    +//     it will be rounded up to the next value equal to Min + (n * Step).
    +//   - If Step is not set, the requested amount is used as-is if it falls within the range Min to Max (if set).
    +//   - If the requested or rounded amount exceeds Max (if set), the request does not satisfy the policy,
    +//     and the device cannot be allocated.
    +message CapacityRequestPolicyRange {
    +  // Min specifies the minimum capacity allowed for a consumption request.
    +  //
    +  // Min must be greater than or equal to zero,
    +  // and less than or equal to the capacity value.
    +  // requestPolicy.default must be more than or equal to the minimum.
    +  //
    +  // +required
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity min = 1;
    +
    +  // Max defines the upper limit for capacity that can be requested.
    +  //
    +  // Max must be less than or equal to the capacity value.
    +  // Min and requestPolicy.default must be less than or equal to the maximum.
    +  //
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity max = 2;
    +
    +  // Step defines the step size between valid capacity amounts within the range.
    +  //
    +  // Max (if set) and requestPolicy.default must be a multiple of Step.
    +  // Min + Step must be less than or equal to the capacity value.
    +  //
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity step = 3;
    +}
    +
    +// CapacityRequirements defines the capacity requirements for a specific device request.
    +message CapacityRequirements {
    +  // Requests represent individual device resource requests for distinct resources,
    +  // all of which must be provided by the device.
    +  //
    +  // This value is used as an additional filtering condition against the available capacity on the device.
    +  // This is semantically equivalent to a CEL selector with
    +  // `device.capacity[]..compareTo(quantity()) >= 0`.
    +  // For example, device.capacity['test-driver.cdi.k8s.io'].counters.compareTo(quantity('2')) >= 0.
    +  //
    +  // When a requestPolicy is defined, the requested amount is adjusted upward
    +  // to the nearest valid value based on the policy.
    +  // If the requested amount cannot be adjusted to a valid value—because it exceeds what the requestPolicy allows—
    +  // the device is considered ineligible for allocation.
    +  //
    +  // For any capacity that is not explicitly requested:
    +  // - If no requestPolicy is set, the default consumed capacity is equal to the full device capacity
    +  //   (i.e., the whole device is claimed).
    +  // - If a requestPolicy is set, the default consumed capacity is determined according to that policy.
    +  //
    +  // If the device allows multiple allocation,
    +  // the aggregated amount across all requests must not exceed the capacity value.
    +  // The consumed capacity, which may be adjusted based on the requestPolicy if defined,
    +  // is recorded in the resource claim’s status.devices[*].consumedCapacity field.
    +  //
    +  // +optional
    +  map requests = 1;
    +}
    +
    +// Counter describes a quantity associated with a device.
    +message Counter {
    +  // Value defines how much of a certain device counter is available.
    +  //
    +  // +required
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity value = 1;
    +}
    +
    +// CounterSet defines a named set of counters
    +// that are available to be used by devices defined in the
    +// ResourceSlice.
    +//
    +// The counters are not allocatable by themselves, but
    +// can be referenced by devices. When a device is allocated,
    +// the portion of counters it uses will no longer be available for use
    +// by other devices.
    +message CounterSet {
    +  // Name defines the name of the counter set.
    +  // It must be a DNS label.
    +  //
    +  // +required
    +  optional string name = 1;
    +
    +  // Counters defines the set of counters for this CounterSet
    +  // The name of each counter must be unique in that set and must be a DNS label.
    +  //
    +  // The maximum number of counters in all sets is 32.
    +  //
    +  // +required
    +  map counters = 2;
    +}
    +
    +// Device represents one individual hardware instance that can be selected based
    +// on its attributes. Besides the name, exactly one field must be set.
    +message Device {
    +  // Name is unique identifier among all devices managed by
    +  // the driver in the pool. It must be a DNS label.
    +  //
    +  // +required
    +  optional string name = 1;
    +
    +  // Attributes defines the set of attributes for this device.
    +  // The name of each attribute must be unique in that set.
    +  //
    +  // The maximum number of attributes and capacities combined is 32.
    +  //
    +  // +optional
    +  map attributes = 2;
    +
    +  // Capacity defines the set of capacities for this device.
    +  // The name of each capacity must be unique in that set.
    +  //
    +  // The maximum number of attributes and capacities combined is 32.
    +  //
    +  // +optional
    +  map capacity = 3;
    +
    +  // ConsumesCounters defines a list of references to sharedCounters
    +  // and the set of counters that the device will
    +  // consume from those counter sets.
    +  //
    +  // There can only be a single entry per counterSet.
    +  //
    +  // The total number of device counter consumption entries
    +  // must be <= 32. In addition, the total number in the
    +  // entire ResourceSlice must be <= 1024 (for example,
    +  // 64 devices with 16 counters each).
    +  //
    +  // +optional
    +  // +listType=atomic
    +  // +featureGate=DRAPartitionableDevices
    +  repeated DeviceCounterConsumption consumesCounters = 4;
    +
    +  // NodeName identifies the node where the device is available.
    +  //
    +  // Must only be set if Spec.PerDeviceNodeSelection is set to true.
    +  // At most one of NodeName, NodeSelector and AllNodes can be set.
    +  //
    +  // +optional
    +  // +oneOf=DeviceNodeSelection
    +  // +featureGate=DRAPartitionableDevices
    +  optional string nodeName = 5;
    +
    +  // NodeSelector defines the nodes where the device is available.
    +  //
    +  // Must use exactly one term.
    +  //
    +  // Must only be set if Spec.PerDeviceNodeSelection is set to true.
    +  // At most one of NodeName, NodeSelector and AllNodes can be set.
    +  //
    +  // +optional
    +  // +oneOf=DeviceNodeSelection
    +  // +featureGate=DRAPartitionableDevices
    +  optional .k8s.io.api.core.v1.NodeSelector nodeSelector = 6;
    +
    +  // AllNodes indicates that all nodes have access to the device.
    +  //
    +  // Must only be set if Spec.PerDeviceNodeSelection is set to true.
    +  // At most one of NodeName, NodeSelector and AllNodes can be set.
    +  //
    +  // +optional
    +  // +oneOf=DeviceNodeSelection
    +  // +featureGate=DRAPartitionableDevices
    +  optional bool allNodes = 7;
    +
    +  // If specified, these are the driver-defined taints.
    +  //
    +  // The maximum number of taints is 4.
    +  //
    +  // This is an alpha field and requires enabling the DRADeviceTaints
    +  // feature gate.
    +  //
    +  // +optional
    +  // +listType=atomic
    +  // +featureGate=DRADeviceTaints
    +  repeated DeviceTaint taints = 8;
    +
    +  // BindsToNode indicates if the usage of an allocation involving this device
    +  // has to be limited to exactly the node that was chosen when allocating the claim.
    +  // If set to true, the scheduler will set the ResourceClaim.Status.Allocation.NodeSelector
    +  // to match the node where the allocation was made.
    +  //
    +  // This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus
    +  // feature gates.
    +  //
    +  // +optional
    +  // +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus
    +  optional bool bindsToNode = 9;
    +
    +  // BindingConditions defines the conditions for proceeding with binding.
    +  // All of these conditions must be set in the per-device status
    +  // conditions with a value of True to proceed with binding the pod to the node
    +  // while scheduling the pod.
    +  //
    +  // The maximum number of binding conditions is 4.
    +  //
    +  // The conditions must be a valid condition type string.
    +  //
    +  // This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus
    +  // feature gates.
    +  //
    +  // +optional
    +  // +listType=atomic
    +  // +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus
    +  repeated string bindingConditions = 10;
    +
    +  // BindingFailureConditions defines the conditions for binding failure.
    +  // They may be set in the per-device status conditions.
    +  // If any is set to "True", a binding failure occurred.
    +  //
    +  // The maximum number of binding failure conditions is 4.
    +  //
    +  // The conditions must be a valid condition type string.
    +  //
    +  // This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus
    +  // feature gates.
    +  //
    +  // +optional
    +  // +listType=atomic
    +  // +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus
    +  repeated string bindingFailureConditions = 11;
    +
    +  // AllowMultipleAllocations marks whether the device is allowed to be allocated to multiple DeviceRequests.
    +  //
    +  // If AllowMultipleAllocations is set to true, the device can be allocated more than once,
    +  // and all of its capacity is consumable, regardless of whether the requestPolicy is defined or not.
    +  //
    +  // +optional
    +  // +featureGate=DRAConsumableCapacity
    +  optional bool allowMultipleAllocations = 12;
    +}
    +
    +// DeviceAllocationConfiguration gets embedded in an AllocationResult.
    +message DeviceAllocationConfiguration {
    +  // Source records whether the configuration comes from a class and thus
    +  // is not something that a normal user would have been able to set
    +  // or from a claim.
    +  //
    +  // +required
    +  optional string source = 1;
    +
    +  // Requests lists the names of requests where the configuration applies.
    +  // If empty, its applies to all requests.
    +  //
    +  // References to subrequests must include the name of the main request
    +  // and may include the subrequest using the format 
    [/]. If just + // the main request is given, the configuration applies to all subrequests. + // + // +optional + // +listType=atomic + repeated string requests = 2; + + optional DeviceConfiguration deviceConfiguration = 3; +} + +// DeviceAllocationResult is the result of allocating devices. +message DeviceAllocationResult { + // Results lists all allocated devices. + // + // +optional + // +listType=atomic + repeated DeviceRequestAllocationResult results = 1; + + // This field is a combination of all the claim and class configuration parameters. + // Drivers can distinguish between those based on a flag. + // + // This includes configuration parameters for drivers which have no allocated + // devices in the result because it is up to the drivers which configuration + // parameters they support. They can silently ignore unknown configuration + // parameters. + // + // +optional + // +listType=atomic + repeated DeviceAllocationConfiguration config = 2; +} + +// DeviceAttribute must have exactly one field set. +message DeviceAttribute { + // IntValue is a number. + // + // +optional + // +oneOf=ValueType + optional int64 int = 2; + + // BoolValue is a true/false value. + // + // +optional + // +oneOf=ValueType + optional bool bool = 3; + + // StringValue is a string. Must not be longer than 64 characters. + // + // +optional + // +oneOf=ValueType + optional string string = 4; + + // VersionValue is a semantic version according to semver.org spec 2.0.0. + // Must not be longer than 64 characters. + // + // +optional + // +oneOf=ValueType + optional string version = 5; +} + +// DeviceCapacity describes a quantity associated with a device. +message DeviceCapacity { + // Value defines how much of a certain capacity that device has. + // + // This field reflects the fixed total capacity and does not change. + // The consumed amount is tracked separately by scheduler + // and does not affect this value. + // + // +required + optional .k8s.io.apimachinery.pkg.api.resource.Quantity value = 1; + + // RequestPolicy defines how this DeviceCapacity must be consumed + // when the device is allowed to be shared by multiple allocations. + // + // The Device must have allowMultipleAllocations set to true in order to set a requestPolicy. + // + // If unset, capacity requests are unconstrained: + // requests can consume any amount of capacity, as long as the total consumed + // across all allocations does not exceed the device's defined capacity. + // If request is also unset, default is the full capacity value. + // + // +optional + // +featureGate=DRAConsumableCapacity + optional CapacityRequestPolicy requestPolicy = 2; +} + +// DeviceClaim defines how to request devices with a ResourceClaim. +message DeviceClaim { + // Requests represent individual requests for distinct devices which + // must all be satisfied. If empty, nothing needs to be allocated. + // + // +optional + // +listType=atomic + repeated DeviceRequest requests = 1; + + // These constraints must be satisfied by the set of devices that get + // allocated for the claim. + // + // +optional + // +listType=atomic + repeated DeviceConstraint constraints = 2; + + // This field holds configuration for multiple potential drivers which + // could satisfy requests in this claim. It is ignored while allocating + // the claim. + // + // +optional + // +listType=atomic + repeated DeviceClaimConfiguration config = 3; +} + +// DeviceClaimConfiguration is used for configuration parameters in DeviceClaim. +message DeviceClaimConfiguration { + // Requests lists the names of requests where the configuration applies. + // If empty, it applies to all requests. + // + // References to subrequests must include the name of the main request + // and may include the subrequest using the format
    [/]. If just + // the main request is given, the configuration applies to all subrequests. + // + // +optional + // +listType=atomic + repeated string requests = 1; + + optional DeviceConfiguration deviceConfiguration = 2; +} + +// DeviceClass is a vendor- or admin-provided resource that contains +// device configuration and selectors. It can be referenced in +// the device requests of a claim to apply these presets. +// Cluster scoped. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +message DeviceClass { + // Standard object metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines what can be allocated and how to configure it. + // + // This is mutable. Consumers have to be prepared for classes changing + // at any time, either because they get updated or replaced. Claim + // allocations are done once based on whatever was set in classes at + // the time of allocation. + // + // Changing the spec automatically increments the metadata.generation number. + optional DeviceClassSpec spec = 2; +} + +// DeviceClassConfiguration is used in DeviceClass. +message DeviceClassConfiguration { + optional DeviceConfiguration deviceConfiguration = 1; +} + +// DeviceClassList is a collection of classes. +message DeviceClassList { + // Standard list metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of resource classes. + repeated DeviceClass items = 2; +} + +// DeviceClassSpec is used in a [DeviceClass] to define what can be allocated +// and how to configure it. +message DeviceClassSpec { + // Each selector must be satisfied by a device which is claimed via this class. + // + // +optional + // +listType=atomic + repeated DeviceSelector selectors = 1; + + // Config defines configuration parameters that apply to each device that is claimed via this class. + // Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor + // configuration applies to exactly one driver. + // + // They are passed to the driver, but are not considered while allocating the claim. + // + // +optional + // +listType=atomic + repeated DeviceClassConfiguration config = 2; + + // ExtendedResourceName is the extended resource name for the devices of this class. + // The devices of this class can be used to satisfy a pod's extended resource requests. + // It has the same format as the name of a pod's extended resource. + // It should be unique among all the device classes in a cluster. + // If two device classes have the same name, then the class created later + // is picked to satisfy a pod's extended resource requests. + // If two classes are created at the same time, then the name of the class + // lexicographically sorted first is picked. + // + // This is an alpha field. + // +optional + // +featureGate=DRAExtendedResource + optional string extendedResourceName = 4; +} + +// DeviceConfiguration must have exactly one field set. It gets embedded +// inline in some other structs which have other fields, so field names must +// not conflict with those. +message DeviceConfiguration { + // Opaque provides driver-specific configuration parameters. + // + // +optional + // +oneOf=ConfigurationType + optional OpaqueDeviceConfiguration opaque = 1; +} + +// DeviceConstraint must have exactly one field set besides Requests. +message DeviceConstraint { + // Requests is a list of the one or more requests in this claim which + // must co-satisfy this constraint. If a request is fulfilled by + // multiple devices, then all of the devices must satisfy the + // constraint. If this is not specified, this constraint applies to all + // requests in this claim. + // + // References to subrequests must include the name of the main request + // and may include the subrequest using the format
    [/]. If just + // the main request is given, the constraint applies to all subrequests. + // + // +optional + // +listType=atomic + repeated string requests = 1; + + // MatchAttribute requires that all devices in question have this + // attribute and that its type and value are the same across those + // devices. + // + // For example, if you specified "dra.example.com/numa" (a hypothetical example!), + // then only devices in the same NUMA node will be chosen. A device which + // does not have that attribute will not be chosen. All devices should + // use a value of the same type for this attribute because that is part of + // its specification, but if one device doesn't, then it also will not be + // chosen. + // + // Must include the domain qualifier. + // + // +optional + // +oneOf=ConstraintType + optional string matchAttribute = 2; + + // DistinctAttribute requires that all devices in question have this + // attribute and that its type and value are unique across those devices. + // + // This acts as the inverse of MatchAttribute. + // + // This constraint is used to avoid allocating multiple requests to the same device + // by ensuring attribute-level differentiation. + // + // This is useful for scenarios where resource requests must be fulfilled by separate physical devices. + // For example, a container requests two network interfaces that must be allocated from two different physical NICs. + // + // +optional + // +oneOf=ConstraintType + // +featureGate=DRAConsumableCapacity + optional string distinctAttribute = 3; +} + +// DeviceCounterConsumption defines a set of counters that +// a device will consume from a CounterSet. +message DeviceCounterConsumption { + // CounterSet is the name of the set from which the + // counters defined will be consumed. + // + // +required + optional string counterSet = 1; + + // Counters defines the counters that will be consumed by the device. + // + // The maximum number counters in a device is 32. + // In addition, the maximum number of all counters + // in all devices is 1024 (for example, 64 devices with + // 16 counters each). + // + // +required + map counters = 2; +} + +// DeviceRequest is a request for devices required for a claim. +// This is typically a request for a single resource like a device, but can +// also ask for several identical devices. With FirstAvailable it is also +// possible to provide a prioritized list of requests. +message DeviceRequest { + // Name can be used to reference this request in a pod.spec.containers[].resources.claims + // entry and in a constraint of the claim. + // + // References using the name in the DeviceRequest will uniquely + // identify a request when the Exactly field is set. When the + // FirstAvailable field is set, a reference to the name of the + // DeviceRequest will match whatever subrequest is chosen by the + // scheduler. + // + // Must be a DNS label. + // + // +required + optional string name = 1; + + // Exactly specifies the details for a single request that must + // be met exactly for the request to be satisfied. + // + // One of Exactly or FirstAvailable must be set. + // + // +optional + // +oneOf=deviceRequestType + optional ExactDeviceRequest exactly = 2; + + // FirstAvailable contains subrequests, of which exactly one will be + // selected by the scheduler. It tries to + // satisfy them in the order in which they are listed here. So if + // there are two entries in the list, the scheduler will only check + // the second one if it determines that the first one can not be used. + // + // DRA does not yet implement scoring, so the scheduler will + // select the first set of devices that satisfies all the + // requests in the claim. And if the requirements can + // be satisfied on more than one node, other scheduling features + // will determine which node is chosen. This means that the set of + // devices allocated to a claim might not be the optimal set + // available to the cluster. Scoring will be implemented later. + // + // +optional + // +oneOf=deviceRequestType + // +listType=atomic + // +featureGate=DRAPrioritizedList + repeated DeviceSubRequest firstAvailable = 3; +} + +// DeviceRequestAllocationResult contains the allocation result for one request. +message DeviceRequestAllocationResult { + // Request is the name of the request in the claim which caused this + // device to be allocated. If it references a subrequest in the + // firstAvailable list on a DeviceRequest, this field must + // include both the name of the main request and the subrequest + // using the format
    /. + // + // Multiple devices may have been allocated per request. + // + // +required + optional string request = 1; + + // Driver specifies the name of the DRA driver whose kubelet + // plugin should be invoked to process the allocation once the claim is + // needed on a node. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. + // + // +required + optional string driver = 2; + + // This name together with the driver name and the device name field + // identify which device was allocated (`//`). + // + // Must not be longer than 253 characters and may contain one or more + // DNS sub-domains separated by slashes. + // + // +required + optional string pool = 3; + + // Device references one device instance via its name in the driver's + // resource pool. It must be a DNS label. + // + // +required + optional string device = 4; + + // AdminAccess indicates that this device was allocated for + // administrative access. See the corresponding request field + // for a definition of mode. + // + // This is an alpha field and requires enabling the DRAAdminAccess + // feature gate. Admin access is disabled if this field is unset or + // set to false, otherwise it is enabled. + // + // +optional + // +featureGate=DRAAdminAccess + optional bool adminAccess = 5; + + // A copy of all tolerations specified in the request at the time + // when the device got allocated. + // + // The maximum number of tolerations is 16. + // + // This is an alpha field and requires enabling the DRADeviceTaints + // feature gate. + // + // +optional + // +listType=atomic + // +featureGate=DRADeviceTaints + repeated DeviceToleration tolerations = 6; + + // BindingConditions contains a copy of the BindingConditions + // from the corresponding ResourceSlice at the time of allocation. + // + // This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus + // feature gates. + // + // +optional + // +listType=atomic + // +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus + repeated string bindingConditions = 7; + + // BindingFailureConditions contains a copy of the BindingFailureConditions + // from the corresponding ResourceSlice at the time of allocation. + // + // This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus + // feature gates. + // + // +optional + // +listType=atomic + // +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus + repeated string bindingFailureConditions = 8; + + // ShareID uniquely identifies an individual allocation share of the device, + // used when the device supports multiple simultaneous allocations. + // It serves as an additional map key to differentiate concurrent shares + // of the same device. + // + // +optional + // +featureGate=DRAConsumableCapacity + optional string shareID = 9; + + // ConsumedCapacity tracks the amount of capacity consumed per device as part of the claim request. + // The consumed amount may differ from the requested amount: it is rounded up to the nearest valid + // value based on the device’s requestPolicy if applicable (i.e., may not be less than the requested amount). + // + // The total consumed capacity for each device must not exceed the DeviceCapacity's Value. + // + // This field is populated only for devices that allow multiple allocations. + // All capacity entries are included, even if the consumed amount is zero. + // + // +optional + // +featureGate=DRAConsumableCapacity + map consumedCapacity = 10; +} + +// DeviceSelector must have exactly one field set. +message DeviceSelector { + // CEL contains a CEL expression for selecting a device. + // + // +optional + // +oneOf=SelectorType + optional CELDeviceSelector cel = 1; +} + +// DeviceSubRequest describes a request for device provided in the +// claim.spec.devices.requests[].firstAvailable array. Each +// is typically a request for a single resource like a device, but can +// also ask for several identical devices. +// +// DeviceSubRequest is similar to ExactDeviceRequest, but doesn't expose the +// AdminAccess field as that one is only supported when requesting a +// specific device. +message DeviceSubRequest { + // Name can be used to reference this subrequest in the list of constraints + // or the list of configurations for the claim. References must use the + // format
    /. + // + // Must be a DNS label. + // + // +required + optional string name = 1; + + // DeviceClassName references a specific DeviceClass, which can define + // additional configuration and selectors to be inherited by this + // subrequest. + // + // A class is required. Which classes are available depends on the cluster. + // + // Administrators may use this to restrict which devices may get + // requested by only installing classes with selectors for permitted + // devices. If users are free to request anything without restrictions, + // then administrators can create an empty DeviceClass for users + // to reference. + // + // +required + optional string deviceClassName = 2; + + // Selectors define criteria which must be satisfied by a specific + // device in order for that device to be considered for this + // subrequest. All selectors must be satisfied for a device to be + // considered. + // + // +optional + // +listType=atomic + repeated DeviceSelector selectors = 3; + + // AllocationMode and its related fields define how devices are allocated + // to satisfy this subrequest. Supported values are: + // + // - ExactCount: This request is for a specific number of devices. + // This is the default. The exact number is provided in the + // count field. + // + // - All: This subrequest is for all of the matching devices in a pool. + // Allocation will fail if some devices are already allocated, + // unless adminAccess is requested. + // + // If AllocationMode is not specified, the default mode is ExactCount. If + // the mode is ExactCount and count is not specified, the default count is + // one. Any other subrequests must specify this field. + // + // More modes may get added in the future. Clients must refuse to handle + // requests with unknown modes. + // + // +optional + optional string allocationMode = 4; + + // Count is used only when the count mode is "ExactCount". Must be greater than zero. + // If AllocationMode is ExactCount and this field is not specified, the default is one. + // + // +optional + // +oneOf=AllocationMode + optional int64 count = 5; + + // If specified, the request's tolerations. + // + // Tolerations for NoSchedule are required to allocate a + // device which has a taint with that effect. The same applies + // to NoExecute. + // + // In addition, should any of the allocated devices get tainted + // with NoExecute after allocation and that effect is not tolerated, + // then all pods consuming the ResourceClaim get deleted to evict + // them. The scheduler will not let new pods reserve the claim while + // it has these tainted devices. Once all pods are evicted, the + // claim will get deallocated. + // + // The maximum number of tolerations is 16. + // + // This is an alpha field and requires enabling the DRADeviceTaints + // feature gate. + // + // +optional + // +listType=atomic + // +featureGate=DRADeviceTaints + repeated DeviceToleration tolerations = 6; + + // Capacity define resource requirements against each capacity. + // + // If this field is unset and the device supports multiple allocations, + // the default value will be applied to each capacity according to requestPolicy. + // For the capacity that has no requestPolicy, default is the full capacity value. + // + // Applies to each device allocation. + // If Count > 1, + // the request fails if there aren't enough devices that meet the requirements. + // If AllocationMode is set to All, + // the request fails if there are devices that otherwise match the request, + // and have this capacity, with a value >= the requested amount, but which cannot be allocated to this request. + // + // +optional + // +featureGate=DRAConsumableCapacity + optional CapacityRequirements capacity = 7; +} + +// The device this taint is attached to has the "effect" on +// any claim which does not tolerate the taint and, through the claim, +// to pods using the claim. +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +message DeviceTaint { + // The taint key to be applied to a device. + // Must be a label name. + // + // +required + optional string key = 1; + + // The taint value corresponding to the taint key. + // Must be a label value. + // + // +optional + optional string value = 2; + + // The effect of the taint on claims that do not tolerate the taint + // and through such claims on the pods using them. + // Valid effects are NoSchedule and NoExecute. PreferNoSchedule as used for + // nodes is not valid here. + // + // +required + optional string effect = 3; + + // TimeAdded represents the time at which the taint was added. + // Added automatically during create or update if not set. + // + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time timeAdded = 4; +} + +// The ResourceClaim this DeviceToleration is attached to tolerates any taint that matches +// the triple using the matching operator . +message DeviceToleration { + // Key is the taint key that the toleration applies to. Empty means match all taint keys. + // If the key is empty, operator must be Exists; this combination means to match all values and all keys. + // Must be a label name. + // + // +optional + optional string key = 1; + + // Operator represents a key's relationship to the value. + // Valid operators are Exists and Equal. Defaults to Equal. + // Exists is equivalent to wildcard for value, so that a ResourceClaim can + // tolerate all taints of a particular category. + // + // +optional + // +default="Equal" + optional string operator = 2; + + // Value is the taint value the toleration matches to. + // If the operator is Exists, the value must be empty, otherwise just a regular string. + // Must be a label value. + // + // +optional + optional string value = 3; + + // Effect indicates the taint effect to match. Empty means match all taint effects. + // When specified, allowed values are NoSchedule and NoExecute. + // + // +optional + optional string effect = 4; + + // TolerationSeconds represents the period of time the toleration (which must be + // of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + // it is not set, which means tolerate the taint forever (do not evict). Zero and + // negative values will be treated as 0 (evict immediately) by the system. + // If larger than zero, the time when the pod needs to be evicted is calculated as